-
Notifications
You must be signed in to change notification settings - Fork 10
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
hw/misc/aspeed_hace: Fix SG Accumulative hashing
Make the Aspeed HACE module use the new qcrypto accumulative hashing functions when in scatter-gather accumulative mode. A hash context will maintain a "running-hash" as each scatter-gather chunk is received. Previously each scatter-gather "chunk" was cached so the hash could be computed once the final chunk was received. However, the cache was a shallow copy, so once the guest overwrote the memory provided to HACE the final hash would not be correct. Possibly related to: https://gitlab.com/qemu-project/qemu/-/issues/1121 Buglink: openbmc#36 Signed-off-by: Alejandro Zeise <[email protected]>
- Loading branch information
Showing
2 changed files
with
51 additions
and
44 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,7 @@ | ||
/* | ||
* ASPEED Hash and Crypto Engine | ||
* | ||
* Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates | ||
* Copyright (C) 2021 IBM Corp. | ||
* | ||
* Joel Stanley <[email protected]> | ||
|
@@ -151,47 +152,15 @@ static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id, | |
return iov_count; | ||
} | ||
|
||
/** | ||
* Generate iov for accumulative mode. | ||
* | ||
* @param s aspeed hace state object | ||
* @param iov iov of the current request | ||
* @param id index of the current iov | ||
* @param req_len length of the current request | ||
* | ||
* @return count of iov | ||
*/ | ||
static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id, | ||
hwaddr *req_len) | ||
{ | ||
uint32_t pad_offset; | ||
uint32_t total_msg_len; | ||
s->total_req_len += *req_len; | ||
|
||
if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) { | ||
if (s->iov_count) { | ||
return reconstruct_iov(s, iov, id, &pad_offset); | ||
} | ||
|
||
*req_len -= s->total_req_len - total_msg_len; | ||
s->total_req_len = 0; | ||
iov[id].iov_len = *req_len; | ||
} else { | ||
s->iov_cache[s->iov_count].iov_base = iov->iov_base; | ||
s->iov_cache[s->iov_count].iov_len = *req_len; | ||
++s->iov_count; | ||
} | ||
|
||
return id + 1; | ||
} | ||
|
||
static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, | ||
bool acc_mode) | ||
{ | ||
struct iovec iov[ASPEED_HACE_MAX_SG]; | ||
uint32_t total_msg_len; | ||
uint32_t pad_offset; | ||
g_autofree uint8_t *digest_buf = NULL; | ||
size_t digest_len = 0; | ||
int niov = 0; | ||
bool sg_acc_mode_final_request = false; | ||
int i; | ||
void *haddr; | ||
|
||
|
@@ -226,8 +195,15 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, | |
} | ||
iov[i].iov_base = haddr; | ||
if (acc_mode) { | ||
niov = gen_acc_mode_iov(s, iov, i, &plen); | ||
|
||
s->total_req_len += plen; | ||
|
||
if (has_padding(s, &iov[i], plen, &total_msg_len, &pad_offset)) { | ||
/* Padding being present indicates the final request */ | ||
sg_acc_mode_final_request = true; | ||
iov[i].iov_len = pad_offset; | ||
} else { | ||
iov[i].iov_len = plen; | ||
} | ||
} else { | ||
iov[i].iov_len = plen; | ||
} | ||
|
@@ -252,20 +228,42 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, | |
* required to check whether cache is empty. If no, we should | ||
* combine cached iov and the current iov. | ||
*/ | ||
uint32_t total_msg_len; | ||
uint32_t pad_offset; | ||
s->total_req_len += len; | ||
if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) { | ||
niov = reconstruct_iov(s, iov, 0, &pad_offset); | ||
i = reconstruct_iov(s, iov, 0, &pad_offset); | ||
} | ||
} | ||
} | ||
|
||
if (niov) { | ||
i = niov; | ||
} | ||
if (acc_mode) { | ||
if (s->qcrypto_hash_context == NULL && | ||
qcrypto_hash_accumulate_new_ctx(algo, &s->qcrypto_hash_context, NULL)) { | ||
qemu_log_mask(LOG_GUEST_ERROR, | ||
"%s: qcrypto failed to create hash context\n", | ||
__func__); | ||
return; | ||
} | ||
|
||
if (qcrypto_hash_accumulate_bytesv(algo, | ||
s->qcrypto_hash_context, iov, i, | ||
&digest_buf, | ||
&digest_len, NULL) < 0) { | ||
|
||
qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__); | ||
return; | ||
} | ||
|
||
if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) { | ||
if (sg_acc_mode_final_request) { | ||
if (qcrypto_hash_accumulate_free_ctx(s->qcrypto_hash_context, NULL) < 0) { | ||
qemu_log_mask(LOG_GUEST_ERROR, | ||
"%s: qcrypto failed to free context\n", __func__); | ||
} | ||
|
||
s->qcrypto_hash_context = NULL; | ||
s->iov_count = 0; | ||
s->total_req_len = 0; | ||
} | ||
} else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) { | ||
qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__); | ||
return; | ||
} | ||
|
@@ -397,6 +395,11 @@ static void aspeed_hace_reset(DeviceState *dev) | |
{ | ||
struct AspeedHACEState *s = ASPEED_HACE(dev); | ||
|
||
if (s->qcrypto_hash_context != NULL) { | ||
qcrypto_hash_accumulate_free_ctx(s->qcrypto_hash_context, NULL); | ||
s->qcrypto_hash_context = NULL; | ||
} | ||
|
||
memset(s->regs, 0, sizeof(s->regs)); | ||
s->iov_count = 0; | ||
s->total_req_len = 0; | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters