/*
* Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
};
struct cc_user_key_info {
- uint8_t *key;
+ u8 *key;
dma_addr_t key_dma_addr;
};
struct cc_hw_key_info {
- enum HwCryptoKey key1_slot;
- enum HwCryptoKey key2_slot;
+ enum cc_hw_crypto_key key1_slot;
+ enum cc_hw_crypto_key key2_slot;
};
struct ssi_ablkcipher_ctx {
static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
-static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, uint32_t size) {
+static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
switch (ctx_p->flow_mode){
case S_DIN_to_AES:
switch (size){
switch (ctx_p->cipher_mode){
case DRV_CIPHER_XTS:
if ((size >= SSI_MIN_AES_XTS_SIZE) &&
- (size <= SSI_MAX_AES_XTS_SIZE) &&
+ (size <= SSI_MAX_AES_XTS_SIZE) &&
IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
int rc = 0;
unsigned int max_key_buf_size = get_max_keysize(tfm);
- SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p,
+ SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p,
crypto_tfm_alg_name(tfm));
CHECK_AND_RETURN_UPON_FIPS_ERROR();
max_key_buf_size, ctx_p->user.key);
return -ENOMEM;
}
- SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr, max_key_buf_size);
SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=0x%llX\n",
max_key_buf_size, ctx_p->user.key,
(unsigned long long)ctx_p->user.key_dma_addr);
}
/* Unmap key buffer */
- SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr);
dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n",
+ SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n",
(unsigned long long)ctx_p->user.key_dma_addr);
/* Free key buffer in context */
u8 key3[DES_KEY_SIZE];
}tdes_keys_t;
-static const u8 zero_buff[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+static const u8 zero_buff[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
/* The function verifies that tdes keys are not weak.*/
tdes_keys_t *tdes_key = (tdes_keys_t*)key;
/* verify key1 != key2 and key3 != key2*/
- if (unlikely( (memcmp((u8*)tdes_key->key1, (u8*)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
+ if (unlikely( (memcmp((u8*)tdes_key->key1, (u8*)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
(memcmp((u8*)tdes_key->key3, (u8*)tdes_key->key2, sizeof(tdes_key->key3)) == 0) )) {
return -ENOEXEC;
}
return 0;
}
-static enum HwCryptoKey hw_key_to_cc_hw_key(int slot_num)
+static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
{
switch (slot_num) {
case 0:
return END_OF_KEYS;
}
-static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
- const u8 *key,
+static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
+ const u8 *key,
unsigned int keylen)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = &ctx_p->drvdata->plat_dev->dev;
u32 tmp[DES_EXPKEY_WORDS];
unsigned int max_key_buf_size = get_max_keysize(tfm);
- DECL_CYCLE_COUNT_RESOURCES;
SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n",
ctx_p, crypto_tfm_alg_name(tfm), keylen);
- dump_byte_array("key", (uint8_t *)key, keylen);
+ dump_byte_array("key", (u8 *)key, keylen);
CHECK_AND_RETURN_UPON_FIPS_ERROR();
SSI_LOG_DEBUG("ssi_blkcipher_setkey: after FIPS check");
-
+
/* STAT_PHASE_0: Init and sanity checks */
- START_CYCLE_COUNT();
#if SSI_CC_HAS_MULTI2
/*last byte of key buffer is round number and should not be a part of key size*/
}
ctx_p->keylen = keylen;
- END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
SSI_LOG_DEBUG("ssi_blkcipher_setkey: ssi_is_hw_key ret 0");
return 0;
return -EINVAL;
}
}
- if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
+ if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
ssi_fips_verify_xts_keys(key, keylen) != 0) {
SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak XTS key");
return -EINVAL;
}
- if ((ctx_p->flow_mode == S_DIN_to_DES) &&
- (keylen == DES3_EDE_KEY_SIZE) &&
+ if ((ctx_p->flow_mode == S_DIN_to_DES) &&
+ (keylen == DES3_EDE_KEY_SIZE) &&
ssi_fips_verify_3des_keys(key, keylen) != 0) {
SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak 3DES key");
return -EINVAL;
}
- END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
/* STAT_PHASE_1: Copy key to ctx */
- START_CYCLE_COUNT();
- SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr);
- dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
+ dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
max_key_buf_size, DMA_TO_DEVICE);
#if SSI_CC_HAS_MULTI2
if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
SSI_LOG_DEBUG("ssi_blkcipher_setkey: SSI_CC_HAS_MULTI2 einval");
return -EINVAL;
}
- } else
+ } else
#endif /*SSI_CC_HAS_MULTI2*/
{
memcpy(ctx_p->user.key, key, keylen);
}
}
}
- dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
+ dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
max_key_buf_size, DMA_TO_DEVICE);
- SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr ,max_key_buf_size);
ctx_p->keylen = keylen;
-
- END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
+
SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely");
return 0;
struct blkcipher_req_ctx *req_ctx,
unsigned int ivsize,
unsigned int nbytes,
- HwDesc_s desc[],
+ struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
case DRV_CIPHER_CTR:
case DRV_CIPHER_OFB:
/* Load cipher state */
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
- iv_dma_addr, ivsize,
- NS_BIT);
- HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
- HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
- if ((cipher_mode == DRV_CIPHER_CTR) ||
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
+ NS_BIT);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ if ((cipher_mode == DRV_CIPHER_CTR) ||
(cipher_mode == DRV_CIPHER_OFB) ) {
- HW_DESC_SET_SETUP_MODE(&desc[*seq_size],
- SETUP_LOAD_STATE1);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
} else {
- HW_DESC_SET_SETUP_MODE(&desc[*seq_size],
- SETUP_LOAD_STATE0);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
}
(*seq_size)++;
/*FALLTHROUGH*/
case DRV_CIPHER_ECB:
/* Load key */
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
- HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
if (flow_mode == S_DIN_to_AES) {
if (ssi_is_hw_key(tfm)) {
- HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key1_slot);
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
} else {
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
- key_dma_addr,
- ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len),
- NS_BIT);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, ((key_len == 24) ?
+ AES_MAX_KEY_SIZE :
+ key_len), NS_BIT);
}
- HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len);
+ set_key_size_aes(&desc[*seq_size], key_len);
} else {
/*des*/
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
- key_dma_addr, key_len,
- NS_BIT);
- HW_DESC_SET_KEY_SIZE_DES(&desc[*seq_size], key_len);
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ key_len, NS_BIT);
+ set_key_size_des(&desc[*seq_size], key_len);
}
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
- HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_KEY0);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
(*seq_size)++;
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
/* Load AES key */
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
- HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
if (ssi_is_hw_key(tfm)) {
- HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key1_slot);
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
} else {
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
- key_dma_addr, key_len/2,
- NS_BIT);
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ (key_len / 2), NS_BIT);
}
- HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len/2);
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
- HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_KEY0);
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
(*seq_size)++;
/* load XEX key */
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
- HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
if (ssi_is_hw_key(tfm)) {
- HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key2_slot);
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key2_slot);
} else {
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
- (key_dma_addr+key_len/2), key_len/2,
- NS_BIT);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ (key_dma_addr + (key_len / 2)),
+ (key_len / 2), NS_BIT);
}
- HW_DESC_SET_XEX_DATA_UNIT_SIZE(&desc[*seq_size], du_size);
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], S_DIN_to_AES2);
- HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len/2);
- HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
+ set_xex_data_unit_size(&desc[*seq_size], du_size);
+ set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
(*seq_size)++;
-
+
/* Set state */
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE1);
- HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
- HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
- HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len/2);
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
- iv_dma_addr, CC_AES_BLOCK_SIZE,
- NS_BIT);
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
(*seq_size)++;
break;
default:
struct crypto_tfm *tfm,
struct blkcipher_req_ctx *req_ctx,
unsigned int ivsize,
- HwDesc_s desc[],
+ struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-
+
int direction = req_ctx->gen_ctx.op_type;
/* Load system key */
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], ctx_p->cipher_mode);
- HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI, ctx_p->user.key_dma_addr,
- CC_MULTI2_SYSTEM_KEY_SIZE,
- NS_BIT);
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], ctx_p->flow_mode);
- HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_KEY0);
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_din_type(&desc[*seq_size], DMA_DLLI, ctx_p->user.key_dma_addr,
+ CC_MULTI2_SYSTEM_KEY_SIZE, NS_BIT);
+ set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
(*seq_size)++;
/* load data key */
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
- (ctx_p->user.key_dma_addr +
- CC_MULTI2_SYSTEM_KEY_SIZE),
- CC_MULTI2_DATA_KEY_SIZE, NS_BIT);
- HW_DESC_SET_MULTI2_NUM_ROUNDS(&desc[*seq_size],
- ctx_p->key_round_number);
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], ctx_p->flow_mode);
- HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], ctx_p->cipher_mode);
- HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
- HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE0 );
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ (ctx_p->user.key_dma_addr + CC_MULTI2_SYSTEM_KEY_SIZE),
+ CC_MULTI2_DATA_KEY_SIZE, NS_BIT);
+ set_multi2_num_rounds(&desc[*seq_size], ctx_p->key_round_number);
+ set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
+ set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
(*seq_size)++;
-
-
+
+
/* Set state */
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
- req_ctx->gen_ctx.iv_dma_addr,
- ivsize, NS_BIT);
- HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], ctx_p->flow_mode);
- HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], ctx_p->cipher_mode);
- HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE1);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
+ ivsize, NS_BIT);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
+ set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
(*seq_size)++;
-
+
}
#endif /*SSI_CC_HAS_MULTI2*/
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes,
void *areq,
- HwDesc_s desc[],
+ struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
(unsigned long long)sg_dma_address(dst),
nbytes);
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
- sg_dma_address(src),
- nbytes, NS_BIT);
- HW_DESC_SET_DOUT_DLLI(&desc[*seq_size],
- sg_dma_address(dst),
- nbytes,
- NS_BIT, (areq == NULL)? 0:1);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+ nbytes, NS_BIT);
+ set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+ nbytes, NS_BIT, (!areq ? 0 : 1));
if (areq != NULL) {
- HW_DESC_SET_QUEUE_LAST_IND(&desc[*seq_size]);
+ set_queue_last_ind(&desc[*seq_size]);
}
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
+ set_flow_mode(&desc[*seq_size], flow_mode);
(*seq_size)++;
} else {
/* bypass */
(unsigned long long)req_ctx->mlli_params.mlli_dma_addr,
req_ctx->mlli_params.mlli_len,
(unsigned int)ctx_p->drvdata->mlli_sram_addr);
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
- req_ctx->mlli_params.mlli_dma_addr,
- req_ctx->mlli_params.mlli_len,
- NS_BIT);
- HW_DESC_SET_DOUT_SRAM(&desc[*seq_size],
- ctx_p->drvdata->mlli_sram_addr,
- req_ctx->mlli_params.mlli_len);
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], BYPASS);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[*seq_size], BYPASS);
(*seq_size)++;
- HW_DESC_INIT(&desc[*seq_size]);
- HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_MLLI,
- ctx_p->drvdata->mlli_sram_addr,
- req_ctx->in_mlli_nents, NS_BIT);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_MLLI,
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->in_mlli_nents, NS_BIT);
if (req_ctx->out_nents == 0) {
SSI_LOG_DEBUG(" din/dout params addr 0x%08X "
"addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
(unsigned int)ctx_p->drvdata->mlli_sram_addr);
- HW_DESC_SET_DOUT_MLLI(&desc[*seq_size],
- ctx_p->drvdata->mlli_sram_addr,
- req_ctx->in_mlli_nents,
- NS_BIT,(areq == NULL)? 0:1);
+ set_dout_mlli(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->in_mlli_nents, NS_BIT,
+ (!areq ? 0 : 1));
} else {
SSI_LOG_DEBUG(" din/dout params "
"addr 0x%08X addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
- (unsigned int)ctx_p->drvdata->mlli_sram_addr +
- (uint32_t)LLI_ENTRY_BYTE_SIZE *
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr +
+ (u32)LLI_ENTRY_BYTE_SIZE *
req_ctx->in_nents);
- HW_DESC_SET_DOUT_MLLI(&desc[*seq_size],
- (ctx_p->drvdata->mlli_sram_addr +
- LLI_ENTRY_BYTE_SIZE *
- req_ctx->in_mlli_nents),
- req_ctx->out_mlli_nents, NS_BIT,(areq == NULL)? 0:1);
+ set_dout_mlli(&desc[*seq_size],
+ (ctx_p->drvdata->mlli_sram_addr +
+ (LLI_ENTRY_BYTE_SIZE *
+ req_ctx->in_mlli_nents)),
+ req_ctx->out_mlli_nents, NS_BIT,
+ (!areq ? 0 : 1));
}
if (areq != NULL) {
- HW_DESC_SET_QUEUE_LAST_IND(&desc[*seq_size]);
+ set_queue_last_ind(&desc[*seq_size]);
}
- HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
+ set_flow_mode(&desc[*seq_size], flow_mode);
(*seq_size)++;
}
}
static int ssi_blkcipher_complete(struct device *dev,
- struct ssi_ablkcipher_ctx *ctx_p,
+ struct ssi_ablkcipher_ctx *ctx_p,
struct blkcipher_req_ctx *req_ctx,
struct scatterlist *dst, struct scatterlist *src,
- void *info, //req info
unsigned int ivsize,
void *areq,
void __iomem *cc_base)
{
int completion_error = 0;
- uint32_t inflight_counter;
- DECL_CYCLE_COUNT_RESOURCES;
+ u32 inflight_counter;
- START_CYCLE_COUNT();
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
- info = req_ctx->backup_info;
- END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_4);
/*Set the inflight couter value to local variable*/
unsigned int nbytes,
void *info, //req info
unsigned int ivsize,
- void *areq,
+ void *areq,
enum drv_crypto_direction direction)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = &ctx_p->drvdata->plat_dev->dev;
- HwDesc_s desc[MAX_ABLKCIPHER_SEQ_LEN];
+ struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
struct ssi_crypto_req ssi_req = {};
int rc, seq_len = 0,cts_restore_flag = 0;
- DECL_CYCLE_COUNT_RESOURCES;
SSI_LOG_DEBUG("%s areq=%p info=%p nbytes=%d\n",
((direction==DRV_CRYPTO_DIRECTION_ENCRYPT)?"Encrypt":"Decrypt"),
CHECK_AND_RETURN_UPON_FIPS_ERROR();
/* STAT_PHASE_0: Init and sanity checks */
- START_CYCLE_COUNT();
-
+
/* TODO: check data length according to mode */
if (unlikely(validate_data_size(ctx_p, nbytes))) {
SSI_LOG_ERR("Unsupported data size %d.\n", nbytes);
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
-
- END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_0);
+
/* STAT_PHASE_1: Map buffers */
- START_CYCLE_COUNT();
-
+
rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, info, src, dst);
if (unlikely(rc != 0)) {
SSI_LOG_ERR("map_request() failed\n");
goto exit_process;
}
- END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_1);
/* STAT_PHASE_2: Create sequence */
- START_CYCLE_COUNT();
/* Setup processing */
#if SSI_CC_HAS_MULTI2
}
/* Data processing */
ssi_blkcipher_create_data_desc(tfm,
- req_ctx,
+ req_ctx,
dst, src,
nbytes,
areq,
/* set the IV size (8/16 B long)*/
ssi_req.ivgen_size = ivsize;
}
- END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_2);
/* STAT_PHASE_3: Lock HW and push sequence */
- START_CYCLE_COUNT();
-
+
rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (areq == NULL)? 0:1);
if(areq != NULL) {
if (unlikely(rc != -EINPROGRESS)) {
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
}
- END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
} else {
if (rc != 0) {
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
- END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
} else {
- END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
- rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst, src, info, ivsize, NULL, ctx_p->drvdata->cc_base);
- }
+ rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst,
+ src, ivsize, NULL,
+ ctx_p->drvdata->cc_base);
+ }
}
exit_process:
if (cts_restore_flag != 0)
ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
-
+
return rc;
}
CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR();
- ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src, areq->info, ivsize, areq, cc_base);
+ ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src,
+ ivsize, areq, cc_base);
}
static void ssi_sblkcipher_exit(struct crypto_tfm *tfm)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-
+
kfree(ctx_p->sync_ctx);
SSI_LOG_DEBUG("Free sync ctx buffer in context ctx_p->sync_ctx=@%p\n", ctx_p->sync_ctx);
static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
{
struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
-
+
ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
return ssi_blkcipher_init(tfm);
}
-static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key,
+static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+ const u8 *key,
unsigned int keylen)
{
return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
#endif /*SSI_CC_HAS_MULTI2*/
};
-static
+static
struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *template)
{
struct ssi_crypto_alg *t_alg;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct ssi_ablkcipher_ctx);
-
+
alg->cra_init = template->synchronous? ssi_sblkcipher_init:ssi_ablkcipher_init;
alg->cra_exit = template->synchronous? ssi_sblkcipher_exit:ssi_blkcipher_exit;
alg->cra_type = template->synchronous? &crypto_blkcipher_type:&crypto_ablkcipher_type;
int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
{
struct ssi_crypto_alg *t_alg, *n;
- struct ssi_blkcipher_handle *blkcipher_handle =
+ struct ssi_blkcipher_handle *blkcipher_handle =
drvdata->blkcipher_handle;
struct device *dev;
dev = &drvdata->plat_dev->dev;
kfree(t_alg);
goto fail0;
} else {
- list_add_tail(&t_alg->entry,
+ list_add_tail(&t_alg->entry,
&ablkcipher_handle->blkcipher_alg_list);
- SSI_LOG_DEBUG("Registered %s\n",
+ SSI_LOG_DEBUG("Registered %s\n",
t_alg->crypto_alg.cra_driver_name);
}
}