2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/platform_device.h>
18 #include <crypto/ctr.h>
19 #include "ssi_config.h"
20 #include "ssi_driver.h"
21 #include "ssi_ivgen.h"
22 #include "ssi_request_mgr.h"
23 #include "ssi_sram_mgr.h"
24 #include "ssi_buffer_mgr.h"
26 /* The max. size of pool *MUST* be <= SRAM total size */
27 #define SSI_IVPOOL_SIZE 1024
28 /* The first 32B fraction of pool are dedicated to the
29 next encryption "key" & "IV" for pool regeneration */
30 #define SSI_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
31 #define SSI_IVPOOL_GEN_SEQ_LEN 4
34 * struct ssi_ivgen_ctx -IV pool generation context
35 * @pool: the start address of the iv-pool resides in internal RAM
36 * @ctr_key_dma: address of pool's encryption key material in internal RAM
37 * @ctr_iv_dma: address of pool's counter iv in internal RAM
38 * @next_iv_ofs: the offset to the next available IV in pool
39 * @pool_meta: virt. address of the initial enc. key/IV
40 * @pool_meta_dma: phys. address of the initial enc. key/IV
42 struct ssi_ivgen_ctx {
44 ssi_sram_addr_t ctr_key;
45 ssi_sram_addr_t ctr_iv;
48 dma_addr_t pool_meta_dma;
52 * Generates SSI_IVPOOL_SIZE of random bytes by
53 * encrypting 0's using AES128-CTR.
55 * \param ivgen iv-pool context
56 * \param iv_seq IN/OUT array to the descriptors sequence
57 * \param iv_seq_len IN/OUT pointer to the sequence length
59 static int ssi_ivgen_generate_pool(
60 struct ssi_ivgen_ctx *ivgen_ctx,
62 unsigned int *iv_seq_len)
64 unsigned int idx = *iv_seq_len;
66 if ( (*iv_seq_len + SSI_IVPOOL_GEN_SEQ_LEN) > SSI_IVPOOL_SEQ_LEN) {
67 /* The sequence will be longer than allowed */
71 HW_DESC_INIT(&iv_seq[idx]);
72 HW_DESC_SET_DIN_SRAM(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
73 HW_DESC_SET_SETUP_MODE(&iv_seq[idx], SETUP_LOAD_KEY0);
74 HW_DESC_SET_CIPHER_CONFIG0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
75 HW_DESC_SET_FLOW_MODE(&iv_seq[idx], S_DIN_to_AES);
76 HW_DESC_SET_KEY_SIZE_AES(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
77 HW_DESC_SET_CIPHER_MODE(&iv_seq[idx], DRV_CIPHER_CTR);
80 /* Setup cipher state */
81 HW_DESC_INIT(&iv_seq[idx]);
82 HW_DESC_SET_DIN_SRAM(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
83 HW_DESC_SET_CIPHER_CONFIG0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
84 HW_DESC_SET_FLOW_MODE(&iv_seq[idx], S_DIN_to_AES);
85 HW_DESC_SET_SETUP_MODE(&iv_seq[idx], SETUP_LOAD_STATE1);
86 HW_DESC_SET_KEY_SIZE_AES(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
87 HW_DESC_SET_CIPHER_MODE(&iv_seq[idx], DRV_CIPHER_CTR);
90 /* Perform dummy encrypt to skip first block */
91 HW_DESC_INIT(&iv_seq[idx]);
92 HW_DESC_SET_DIN_CONST(&iv_seq[idx], 0, CC_AES_IV_SIZE);
93 HW_DESC_SET_DOUT_SRAM(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
94 HW_DESC_SET_FLOW_MODE(&iv_seq[idx], DIN_AES_DOUT);
97 /* Generate IV pool */
98 HW_DESC_INIT(&iv_seq[idx]);
99 HW_DESC_SET_DIN_CONST(&iv_seq[idx], 0, SSI_IVPOOL_SIZE);
100 HW_DESC_SET_DOUT_SRAM(&iv_seq[idx], ivgen_ctx->pool, SSI_IVPOOL_SIZE);
101 HW_DESC_SET_FLOW_MODE(&iv_seq[idx], DIN_AES_DOUT);
104 *iv_seq_len = idx; /* Update sequence length */
106 /* queue ordering assures pool readiness */
107 ivgen_ctx->next_iv_ofs = SSI_IVPOOL_META_SIZE;
113 * Generates the initial pool in SRAM.
114 * This function should be invoked when resuming DX driver.
118 * \return int Zero for success, negative value otherwise.
120 int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
122 struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
123 HwDesc_s iv_seq[SSI_IVPOOL_SEQ_LEN];
124 unsigned int iv_seq_len = 0;
127 /* Generate initial enc. key/iv */
128 get_random_bytes(ivgen_ctx->pool_meta, SSI_IVPOOL_META_SIZE);
130 /* The first 32B reserved for the enc. Key/IV */
131 ivgen_ctx->ctr_key = ivgen_ctx->pool;
132 ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
134 /* Copy initial enc. key and IV to SRAM at a single descriptor */
135 HW_DESC_INIT(&iv_seq[iv_seq_len]);
136 HW_DESC_SET_DIN_TYPE(&iv_seq[iv_seq_len], DMA_DLLI,
137 ivgen_ctx->pool_meta_dma, SSI_IVPOOL_META_SIZE,
139 HW_DESC_SET_DOUT_SRAM(&iv_seq[iv_seq_len], ivgen_ctx->pool,
140 SSI_IVPOOL_META_SIZE);
141 HW_DESC_SET_FLOW_MODE(&iv_seq[iv_seq_len], BYPASS);
144 /* Generate initial pool */
145 rc = ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, &iv_seq_len);
146 if (unlikely(rc != 0)) {
149 /* Fire-and-forget */
150 return send_request_init(drvdata, iv_seq, iv_seq_len);
154 * Free iv-pool and ivgen context.
158 void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
160 struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
161 struct device *device = &(drvdata->plat_dev->dev);
163 if (ivgen_ctx == NULL)
166 if (ivgen_ctx->pool_meta != NULL) {
167 memset(ivgen_ctx->pool_meta, 0, SSI_IVPOOL_META_SIZE);
168 SSI_RESTORE_DMA_ADDR_TO_48BIT(ivgen_ctx->pool_meta_dma);
169 dma_free_coherent(device, SSI_IVPOOL_META_SIZE,
170 ivgen_ctx->pool_meta, ivgen_ctx->pool_meta_dma);
173 ivgen_ctx->pool = NULL_SRAM_ADDR;
175 /* release "this" context */
180 * Allocates iv-pool and maps resources.
181 * This function generates the first IV pool.
183 * \param drvdata Driver's private context
185 * \return int Zero for success, negative value otherwise.
187 int ssi_ivgen_init(struct ssi_drvdata *drvdata)
189 struct ssi_ivgen_ctx *ivgen_ctx;
190 struct device *device = &drvdata->plat_dev->dev;
193 /* Allocate "this" context */
194 drvdata->ivgen_handle = kzalloc(sizeof(struct ssi_ivgen_ctx), GFP_KERNEL);
195 if (!drvdata->ivgen_handle) {
196 SSI_LOG_ERR("Not enough memory to allocate IVGEN context "
197 "(%zu B)\n", sizeof(struct ssi_ivgen_ctx));
201 ivgen_ctx = drvdata->ivgen_handle;
203 /* Allocate pool's header for intial enc. key/IV */
204 ivgen_ctx->pool_meta = dma_alloc_coherent(device, SSI_IVPOOL_META_SIZE,
205 &ivgen_ctx->pool_meta_dma, GFP_KERNEL);
206 if (!ivgen_ctx->pool_meta) {
207 SSI_LOG_ERR("Not enough memory to allocate DMA of pool_meta "
208 "(%u B)\n", SSI_IVPOOL_META_SIZE);
212 SSI_UPDATE_DMA_ADDR_TO_48BIT(ivgen_ctx->pool_meta_dma,
213 SSI_IVPOOL_META_SIZE);
214 /* Allocate IV pool in SRAM */
215 ivgen_ctx->pool = ssi_sram_mgr_alloc(drvdata, SSI_IVPOOL_SIZE);
216 if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
217 SSI_LOG_ERR("SRAM pool exhausted\n");
222 return ssi_ivgen_init_sram_pool(drvdata);
225 ssi_ivgen_fini(drvdata);
230 * Acquires 16 Bytes IV from the iv-pool
232 * \param drvdata Driver private context
233 * \param iv_out_dma Array of physical IV out addresses
234 * \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
235 * \param iv_out_size May be 8 or 16 bytes long
236 * \param iv_seq IN/OUT array to the descriptors sequence
237 * \param iv_seq_len IN/OUT pointer to the sequence length
239 * \return int Zero for success, negative value otherwise.
242 struct ssi_drvdata *drvdata,
243 dma_addr_t iv_out_dma[],
244 unsigned int iv_out_dma_len,
245 unsigned int iv_out_size,
247 unsigned int *iv_seq_len)
249 struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
250 unsigned int idx = *iv_seq_len;
253 if ((iv_out_size != CC_AES_IV_SIZE) &&
254 (iv_out_size != CTR_RFC3686_IV_SIZE)) {
257 if ( (iv_out_dma_len + 1) > SSI_IVPOOL_SEQ_LEN) {
258 /* The sequence will be longer than allowed */
262 //check that number of generated IV is limited to max dma address iv buffer size
263 if ( iv_out_dma_len > SSI_MAX_IVGEN_DMA_ADDRESSES) {
264 /* The sequence will be longer than allowed */
268 for (t = 0; t < iv_out_dma_len; t++) {
269 /* Acquire IV from pool */
270 HW_DESC_INIT(&iv_seq[idx]);
271 HW_DESC_SET_DIN_SRAM(&iv_seq[idx],
272 ivgen_ctx->pool + ivgen_ctx->next_iv_ofs,
274 HW_DESC_SET_DOUT_DLLI(&iv_seq[idx], iv_out_dma[t],
275 iv_out_size, NS_BIT, 0);
276 HW_DESC_SET_FLOW_MODE(&iv_seq[idx], BYPASS);
280 /* Bypass operation is proceeded by crypto sequence, hence must
281 * assure bypass-write-transaction by a memory barrier */
282 HW_DESC_INIT(&iv_seq[idx]);
283 HW_DESC_SET_DIN_NO_DMA(&iv_seq[idx], 0, 0xfffff0);
284 HW_DESC_SET_DOUT_NO_DMA(&iv_seq[idx], 0, 0, 1);
287 *iv_seq_len = idx; /* update seq length */
289 /* Update iv index */
290 ivgen_ctx->next_iv_ofs += iv_out_size;
292 if ((SSI_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
293 SSI_LOG_DEBUG("Pool exhausted, regenerating iv-pool\n");
294 /* pool is drained -regenerate it! */
295 return ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, iv_seq_len);