]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/crypto/caam/caamalg.c
Merge remote-tracking branch 'input-current/for-linus'
[karo-tx-linux.git] / drivers / crypto / caam / caamalg.c
index ba79d638f78200a189bda8763f2997222d60709d..ea8189f4b0212cc038f5f4363cf10f5e8a54099a 100644 (file)
@@ -1705,14 +1705,131 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        return ret;
 }
 
+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+                                const u8 *key, unsigned int keylen)
+{
+       struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+       struct device *jrdev = ctx->jrdev;
+       u32 *key_jump_cmd, *desc;
+       __be64 sector_size = cpu_to_be64(512);
+
+       if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+               crypto_ablkcipher_set_flags(ablkcipher,
+                                           CRYPTO_TFM_RES_BAD_KEY_LEN);
+               dev_err(jrdev, "key size mismatch\n");
+               return -EINVAL;
+       }
+
+       memcpy(ctx->key, key, keylen);
+       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, ctx->key_dma)) {
+               dev_err(jrdev, "unable to map key i/o memory\n");
+               return -ENOMEM;
+       }
+       ctx->enckeylen = keylen;
+
+       /* xts_ablkcipher_encrypt shared descriptor */
+       desc = ctx->sh_desc_enc;
+       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+       /* Skip if already shared */
+       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+                                  JUMP_COND_SHRD);
+
+       /* Load class1 keys only */
+       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+                         ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+       /* Load sector size with index 40 bytes (0x28) */
+       append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
+                  LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
+       append_data(desc, (void *)&sector_size, 8);
+
+       set_jump_tgt_here(desc, key_jump_cmd);
+
+       /*
+        * create sequence for loading the sector index
+        * Upper 8B of IV - will be used as sector index
+        * Lower 8B of IV - will be discarded
+        */
+       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+                  LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
+       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+       /* Load operation */
+       append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
+                        OP_ALG_ENCRYPT);
+
+       /* Perform operation */
+       ablkcipher_append_src_dst(desc);
+
+       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+                                             DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+               dev_err(jrdev, "unable to map shared descriptor\n");
+               return -ENOMEM;
+       }
+#ifdef DEBUG
+       print_hex_dump(KERN_ERR,
+                      "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+       /* xts_ablkcipher_decrypt shared descriptor */
+       desc = ctx->sh_desc_dec;
+
+       init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+       /* Skip if already shared */
+       key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+                                  JUMP_COND_SHRD);
+
+       /* Load class1 key only */
+       append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+                         ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+       /* Load sector size with index 40 bytes (0x28) */
+       append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
+                  LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
+       append_data(desc, (void *)&sector_size, 8);
+
+       set_jump_tgt_here(desc, key_jump_cmd);
+
+       /*
+        * create sequence for loading the sector index
+        * Upper 8B of IV - will be used as sector index
+        * Lower 8B of IV - will be discarded
+        */
+       append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+                  LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
+       append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+       /* Load operation */
+       append_dec_op1(desc, ctx->class1_alg_type);
+
+       /* Perform operation */
+       ablkcipher_append_src_dst(desc);
+
+       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+                                             DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+               dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
+                                desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
+               dev_err(jrdev, "unable to map shared descriptor\n");
+               return -ENOMEM;
+       }
+#ifdef DEBUG
+       print_hex_dump(KERN_ERR,
+                      "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+                      DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+       return 0;
+}
+
 /*
  * aead_edesc - s/w-extended aead descriptor
  * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
- * @assoc_chained: if source is chained
  * @src_nents: number of segments in input scatterlist
- * @src_chained: if source is chained
  * @dst_nents: number of segments in output scatterlist
- * @dst_chained: if destination is chained
  * @iv_dma: dma address of iv for checking continuity and link table
  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -1721,11 +1838,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  */
 struct aead_edesc {
        int assoc_nents;
-       bool assoc_chained;
        int src_nents;
-       bool src_chained;
        int dst_nents;
-       bool dst_chained;
        dma_addr_t iv_dma;
        int sec4_sg_bytes;
        dma_addr_t sec4_sg_dma;
@@ -1736,9 +1850,7 @@ struct aead_edesc {
 /*
  * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  * @src_nents: number of segments in input scatterlist
- * @src_chained: if source is chained
  * @dst_nents: number of segments in output scatterlist
- * @dst_chained: if destination is chained
  * @iv_dma: dma address of iv for checking continuity and link table
  * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -1747,9 +1859,7 @@ struct aead_edesc {
  */
 struct ablkcipher_edesc {
        int src_nents;
-       bool src_chained;
        int dst_nents;
-       bool dst_chained;
        dma_addr_t iv_dma;
        int sec4_sg_bytes;
        dma_addr_t sec4_sg_dma;
@@ -1759,18 +1869,15 @@ struct ablkcipher_edesc {
 
 static void caam_unmap(struct device *dev, struct scatterlist *src,
                       struct scatterlist *dst, int src_nents,
-                      bool src_chained, int dst_nents, bool dst_chained,
+                      int dst_nents,
                       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
                       int sec4_sg_bytes)
 {
        if (dst != src) {
-               dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
-                                    src_chained);
-               dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
-                                    dst_chained);
+               dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
+               dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
        } else {
-               dma_unmap_sg_chained(dev, src, src_nents ? : 1,
-                                    DMA_BIDIRECTIONAL, src_chained);
+               dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
        }
 
        if (iv_dma)
@@ -1785,8 +1892,7 @@ static void aead_unmap(struct device *dev,
                       struct aead_request *req)
 {
        caam_unmap(dev, req->src, req->dst,
-                  edesc->src_nents, edesc->src_chained, edesc->dst_nents,
-                  edesc->dst_chained, 0, 0,
+                  edesc->src_nents, edesc->dst_nents, 0, 0,
                   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 
@@ -1798,8 +1904,8 @@ static void ablkcipher_unmap(struct device *dev,
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 
        caam_unmap(dev, req->src, req->dst,
-                  edesc->src_nents, edesc->src_chained, edesc->dst_nents,
-                  edesc->dst_chained, edesc->iv_dma, ivsize,
+                  edesc->src_nents, edesc->dst_nents,
+                  edesc->iv_dma, ivsize,
                   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 
@@ -2169,22 +2275,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
        struct aead_edesc *edesc;
        int sgc;
        bool all_contig = true;
-       bool src_chained = false, dst_chained = false;
        int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
        unsigned int authsize = ctx->authsize;
 
        if (unlikely(req->dst != req->src)) {
-               src_nents = sg_count(req->src, req->assoclen + req->cryptlen,
-                                    &src_chained);
+               src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
                dst_nents = sg_count(req->dst,
                                     req->assoclen + req->cryptlen +
-                                       (encrypt ? authsize : (-authsize)),
-                                    &dst_chained);
+                                       (encrypt ? authsize : (-authsize)));
        } else {
                src_nents = sg_count(req->src,
                                     req->assoclen + req->cryptlen +
-                                       (encrypt ? authsize : 0),
-                                    &src_chained);
+                                       (encrypt ? authsize : 0));
        }
 
        /* Check if data are contiguous. */
@@ -2207,37 +2309,35 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
        }
 
        if (likely(req->src == req->dst)) {
-               sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                        DMA_BIDIRECTIONAL, src_chained);
+               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+                                DMA_BIDIRECTIONAL);
                if (unlikely(!sgc)) {
                        dev_err(jrdev, "unable to map source\n");
                        kfree(edesc);
                        return ERR_PTR(-ENOMEM);
                }
        } else {
-               sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                        DMA_TO_DEVICE, src_chained);
+               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+                                DMA_TO_DEVICE);
                if (unlikely(!sgc)) {
                        dev_err(jrdev, "unable to map source\n");
                        kfree(edesc);
                        return ERR_PTR(-ENOMEM);
                }
 
-               sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
-                                        DMA_FROM_DEVICE, dst_chained);
+               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+                                DMA_FROM_DEVICE);
                if (unlikely(!sgc)) {
                        dev_err(jrdev, "unable to map destination\n");
-                       dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                            DMA_TO_DEVICE, src_chained);
+                       dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+                                    DMA_TO_DEVICE);
                        kfree(edesc);
                        return ERR_PTR(-ENOMEM);
                }
        }
 
        edesc->src_nents = src_nents;
-       edesc->src_chained = src_chained;
        edesc->dst_nents = dst_nents;
-       edesc->dst_chained = dst_chained;
        edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
                         desc_bytes;
        *all_contig_ptr = all_contig;
@@ -2467,22 +2567,21 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
        bool iv_contig = false;
        int sgc;
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-       bool src_chained = false, dst_chained = false;
        int sec4_sg_index;
 
-       src_nents = sg_count(req->src, req->nbytes, &src_chained);
+       src_nents = sg_count(req->src, req->nbytes);
 
        if (req->dst != req->src)
-               dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+               dst_nents = sg_count(req->dst, req->nbytes);
 
        if (likely(req->src == req->dst)) {
-               sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                        DMA_BIDIRECTIONAL, src_chained);
+               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+                                DMA_BIDIRECTIONAL);
        } else {
-               sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                        DMA_TO_DEVICE, src_chained);
-               sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
-                                        DMA_FROM_DEVICE, dst_chained);
+               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+                                DMA_TO_DEVICE);
+               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+                                DMA_FROM_DEVICE);
        }
 
        iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
@@ -2511,9 +2610,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
        }
 
        edesc->src_nents = src_nents;
-       edesc->src_chained = src_chained;
        edesc->dst_nents = dst_nents;
-       edesc->dst_chained = dst_chained;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
                         desc_bytes;
@@ -2646,22 +2743,21 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
        bool iv_contig = false;
        int sgc;
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-       bool src_chained = false, dst_chained = false;
        int sec4_sg_index;
 
-       src_nents = sg_count(req->src, req->nbytes, &src_chained);
+       src_nents = sg_count(req->src, req->nbytes);
 
        if (unlikely(req->dst != req->src))
-               dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+               dst_nents = sg_count(req->dst, req->nbytes);
 
        if (likely(req->src == req->dst)) {
-               sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                        DMA_BIDIRECTIONAL, src_chained);
+               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+                                DMA_BIDIRECTIONAL);
        } else {
-               sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-                                        DMA_TO_DEVICE, src_chained);
-               sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
-                                        DMA_FROM_DEVICE, dst_chained);
+               sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+                                DMA_TO_DEVICE);
+               sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+                                DMA_FROM_DEVICE);
        }
 
        /*
@@ -2690,9 +2786,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
        }
 
        edesc->src_nents = src_nents;
-       edesc->src_chained = src_chained;
        edesc->dst_nents = dst_nents;
-       edesc->dst_chained = dst_chained;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
                         desc_bytes;
@@ -2871,7 +2965,23 @@ static struct caam_alg_template driver_algs[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        },
                .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-       }
+       },
+       {
+               .name = "xts(aes)",
+               .driver_name = "xts-aes-caam",
+               .blocksize = AES_BLOCK_SIZE,
+               .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+               .template_ablkcipher = {
+                       .setkey = xts_ablkcipher_setkey,
+                       .encrypt = ablkcipher_encrypt,
+                       .decrypt = ablkcipher_decrypt,
+                       .geniv = "eseqiv",
+                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
+                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
+                       .ivsize = AES_BLOCK_SIZE,
+                       },
+               .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+       },
 };
 
 static struct caam_aead_alg driver_aeads[] = {