2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/module.h>
49 #include <crypto/internal/rsa.h>
50 #include <crypto/internal/akcipher.h>
51 #include <crypto/akcipher.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/fips.h>
54 #include <crypto/scatterwalk.h>
55 #include "qat_rsapubkey-asn1.h"
56 #include "qat_rsaprivkey-asn1.h"
57 #include "icp_qat_fw_pke.h"
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
63 static DEFINE_MUTEX(algs_lock);
64 static unsigned int active_devs;
66 struct qat_rsa_input_params {
80 } __packed __aligned(64);
82 struct qat_rsa_output_params {
92 } __packed __aligned(64);
102 struct qat_crypto_instance *inst;
103 } __packed __aligned(64);
105 struct qat_rsa_request {
106 struct qat_rsa_input_params in;
107 struct qat_rsa_output_params out;
112 struct icp_qat_fw_pke_request req;
113 struct qat_rsa_ctx *ctx;
117 static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
119 struct akcipher_request *areq = (void *)(__force long)resp->opaque;
120 struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
121 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
122 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
123 resp->pke_resp_hdr.comn_resp_flags);
125 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
128 dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
131 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
134 areq->dst_len = req->ctx->key_sz;
135 if (req->dst_align) {
136 char *ptr = req->dst_align;
138 while (!(*ptr) && areq->dst_len) {
143 if (areq->dst_len != req->ctx->key_sz)
144 memmove(req->dst_align, ptr, areq->dst_len);
146 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
149 dma_free_coherent(dev, req->ctx->key_sz, req->dst_align,
152 char *ptr = sg_virt(areq->dst);
154 while (!(*ptr) && areq->dst_len) {
159 if (sg_virt(areq->dst) != ptr && areq->dst_len)
160 memmove(sg_virt(areq->dst), ptr, areq->dst_len);
162 dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
166 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
168 dma_unmap_single(dev, req->phy_out,
169 sizeof(struct qat_rsa_output_params),
172 akcipher_request_complete(areq, err);
175 void qat_alg_asym_callback(void *_resp)
177 struct icp_qat_fw_pke_resp *resp = _resp;
182 #define PKE_RSA_EP_512 0x1c161b21
183 #define PKE_RSA_EP_1024 0x35111bf7
184 #define PKE_RSA_EP_1536 0x4d111cdc
185 #define PKE_RSA_EP_2048 0x6e111dba
186 #define PKE_RSA_EP_3072 0x7d111ea3
187 #define PKE_RSA_EP_4096 0xa5101f7e
189 static unsigned long qat_rsa_enc_fn_id(unsigned int len)
191 unsigned int bitslen = len << 3;
195 return PKE_RSA_EP_512;
197 return PKE_RSA_EP_1024;
199 return PKE_RSA_EP_1536;
201 return PKE_RSA_EP_2048;
203 return PKE_RSA_EP_3072;
205 return PKE_RSA_EP_4096;
211 #define PKE_RSA_DP1_512 0x1c161b3c
212 #define PKE_RSA_DP1_1024 0x35111c12
213 #define PKE_RSA_DP1_1536 0x4d111cf7
214 #define PKE_RSA_DP1_2048 0x6e111dda
215 #define PKE_RSA_DP1_3072 0x7d111ebe
216 #define PKE_RSA_DP1_4096 0xa5101f98
218 static unsigned long qat_rsa_dec_fn_id(unsigned int len)
220 unsigned int bitslen = len << 3;
224 return PKE_RSA_DP1_512;
226 return PKE_RSA_DP1_1024;
228 return PKE_RSA_DP1_1536;
230 return PKE_RSA_DP1_2048;
232 return PKE_RSA_DP1_3072;
234 return PKE_RSA_DP1_4096;
240 static int qat_rsa_enc(struct akcipher_request *req)
242 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
243 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
244 struct qat_crypto_instance *inst = ctx->inst;
245 struct device *dev = &GET_DEV(inst->accel_dev);
246 struct qat_rsa_request *qat_req =
247 PTR_ALIGN(akcipher_request_ctx(req), 64);
248 struct icp_qat_fw_pke_request *msg = &qat_req->req;
251 if (unlikely(!ctx->n || !ctx->e))
254 if (req->dst_len < ctx->key_sz) {
255 req->dst_len = ctx->key_sz;
258 memset(msg, '\0', sizeof(*msg));
259 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
260 ICP_QAT_FW_COMN_REQ_FLAG_SET);
261 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
262 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
266 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
267 msg->pke_hdr.comn_req_flags =
268 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
269 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
271 qat_req->in.enc.e = ctx->dma_e;
272 qat_req->in.enc.n = ctx->dma_n;
276 * src can be of any size in valid range, but HW expects it to be the
277 * same as modulo n so in case it is different we need to allocate a
278 * new buf and copy src data.
279 * In other case we just need to map the user provided buffer.
280 * Also need to make sure that it is in contiguous buffer.
282 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
283 qat_req->src_align = NULL;
284 qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src),
285 req->src_len, DMA_TO_DEVICE);
286 if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m)))
290 int shift = ctx->key_sz - req->src_len;
292 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
295 if (unlikely(!qat_req->src_align))
298 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
301 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
302 qat_req->dst_align = NULL;
303 qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst),
307 if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c)))
311 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
314 if (unlikely(!qat_req->dst_align))
318 qat_req->in.in_tab[3] = 0;
319 qat_req->out.out_tab[1] = 0;
320 qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
321 sizeof(struct qat_rsa_input_params),
323 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
326 qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
327 sizeof(struct qat_rsa_output_params),
329 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
330 goto unmap_in_params;
332 msg->pke_mid.src_data_addr = qat_req->phy_in;
333 msg->pke_mid.dest_data_addr = qat_req->phy_out;
334 msg->pke_mid.opaque = (uint64_t)(__force long)req;
335 msg->input_param_count = 3;
336 msg->output_param_count = 1;
338 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
339 } while (ret == -EBUSY && ctr++ < 100);
344 if (qat_req->src_align)
345 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
348 if (!dma_mapping_error(dev, qat_req->in.enc.m))
349 dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
352 if (qat_req->dst_align)
353 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
356 if (!dma_mapping_error(dev, qat_req->out.enc.c))
357 dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
360 if (!dma_mapping_error(dev, qat_req->phy_in))
361 dma_unmap_single(dev, qat_req->phy_in,
362 sizeof(struct qat_rsa_input_params),
364 if (!dma_mapping_error(dev, qat_req->phy_out))
365 dma_unmap_single(dev, qat_req->phy_out,
366 sizeof(struct qat_rsa_output_params),
371 static int qat_rsa_dec(struct akcipher_request *req)
373 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
374 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
375 struct qat_crypto_instance *inst = ctx->inst;
376 struct device *dev = &GET_DEV(inst->accel_dev);
377 struct qat_rsa_request *qat_req =
378 PTR_ALIGN(akcipher_request_ctx(req), 64);
379 struct icp_qat_fw_pke_request *msg = &qat_req->req;
382 if (unlikely(!ctx->n || !ctx->d))
385 if (req->dst_len < ctx->key_sz) {
386 req->dst_len = ctx->key_sz;
389 memset(msg, '\0', sizeof(*msg));
390 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
391 ICP_QAT_FW_COMN_REQ_FLAG_SET);
392 msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
393 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
397 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
398 msg->pke_hdr.comn_req_flags =
399 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
400 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
402 qat_req->in.dec.d = ctx->dma_d;
403 qat_req->in.dec.n = ctx->dma_n;
407 * src can be of any size in valid range, but HW expects it to be the
408 * same as modulo n so in case it is different we need to allocate a
409 * new buf and copy src data.
410 * In other case we just need to map the user provided buffer.
411 * Also need to make sure that it is in contiguous buffer.
413 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
414 qat_req->src_align = NULL;
415 qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src),
416 req->dst_len, DMA_TO_DEVICE);
417 if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c)))
421 int shift = ctx->key_sz - req->src_len;
423 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
426 if (unlikely(!qat_req->src_align))
429 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
432 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
433 qat_req->dst_align = NULL;
434 qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst),
438 if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m)))
442 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
445 if (unlikely(!qat_req->dst_align))
450 qat_req->in.in_tab[3] = 0;
451 qat_req->out.out_tab[1] = 0;
452 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
453 sizeof(struct qat_rsa_input_params),
455 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
458 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
459 sizeof(struct qat_rsa_output_params),
461 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
462 goto unmap_in_params;
464 msg->pke_mid.src_data_addr = qat_req->phy_in;
465 msg->pke_mid.dest_data_addr = qat_req->phy_out;
466 msg->pke_mid.opaque = (uint64_t)(__force long)req;
467 msg->input_param_count = 3;
468 msg->output_param_count = 1;
470 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
471 } while (ret == -EBUSY && ctr++ < 100);
476 if (qat_req->src_align)
477 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
480 if (!dma_mapping_error(dev, qat_req->in.dec.c))
481 dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
484 if (qat_req->dst_align)
485 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
488 if (!dma_mapping_error(dev, qat_req->out.dec.m))
489 dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
492 if (!dma_mapping_error(dev, qat_req->phy_in))
493 dma_unmap_single(dev, qat_req->phy_in,
494 sizeof(struct qat_rsa_input_params),
496 if (!dma_mapping_error(dev, qat_req->phy_out))
497 dma_unmap_single(dev, qat_req->phy_out,
498 sizeof(struct qat_rsa_output_params),
503 int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
504 const void *value, size_t vlen)
506 struct qat_rsa_ctx *ctx = context;
507 struct qat_crypto_instance *inst = ctx->inst;
508 struct device *dev = &GET_DEV(inst->accel_dev);
509 const char *ptr = value;
512 while (!*ptr && vlen) {
519 /* In FIPS mode only allow key size 2K & 3K */
520 if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
521 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
524 /* invalid key size provided */
525 if (!qat_rsa_enc_fn_id(ctx->key_sz))
529 ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
533 memcpy(ctx->n, ptr, ctx->key_sz);
541 int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
542 const void *value, size_t vlen)
544 struct qat_rsa_ctx *ctx = context;
545 struct qat_crypto_instance *inst = ctx->inst;
546 struct device *dev = &GET_DEV(inst->accel_dev);
547 const char *ptr = value;
549 while (!*ptr && vlen) {
554 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
559 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
564 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
568 int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
569 const void *value, size_t vlen)
571 struct qat_rsa_ctx *ctx = context;
572 struct qat_crypto_instance *inst = ctx->inst;
573 struct device *dev = &GET_DEV(inst->accel_dev);
574 const char *ptr = value;
577 while (!*ptr && vlen) {
583 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
586 /* In FIPS mode only allow key size 2K & 3K */
587 if (fips_enabled && (vlen != 256 && vlen != 384)) {
588 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
593 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
597 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
604 static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
605 unsigned int keylen, bool private)
607 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
608 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
611 /* Free the old key if any */
613 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
615 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
617 memset(ctx->d, '\0', ctx->key_sz);
618 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
626 ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key,
629 ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key,
634 if (!ctx->n || !ctx->e) {
635 /* invalid key provided */
639 if (private && !ctx->d) {
640 /* invalid private key provided */
648 memset(ctx->d, '\0', ctx->key_sz);
649 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
653 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
657 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
664 static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
667 return qat_rsa_setkey(tfm, key, keylen, false);
670 static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
673 return qat_rsa_setkey(tfm, key, keylen, true);
676 static int qat_rsa_max_size(struct crypto_akcipher *tfm)
678 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
680 return (ctx->n) ? ctx->key_sz : -EINVAL;
683 static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
685 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
686 struct qat_crypto_instance *inst =
687 qat_crypto_get_instance_node(get_current_node());
697 static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
699 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
700 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
703 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
705 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
707 memset(ctx->d, '\0', ctx->key_sz);
708 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
710 qat_crypto_put_instance(ctx->inst);
716 static struct akcipher_alg rsa = {
717 .encrypt = qat_rsa_enc,
718 .decrypt = qat_rsa_dec,
720 .verify = qat_rsa_enc,
721 .set_pub_key = qat_rsa_setpubkey,
722 .set_priv_key = qat_rsa_setprivkey,
723 .max_size = qat_rsa_max_size,
724 .init = qat_rsa_init_tfm,
725 .exit = qat_rsa_exit_tfm,
726 .reqsize = sizeof(struct qat_rsa_request) + 64,
729 .cra_driver_name = "qat-rsa",
730 .cra_priority = 1000,
731 .cra_module = THIS_MODULE,
732 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
736 int qat_asym_algs_register(void)
740 mutex_lock(&algs_lock);
741 if (++active_devs == 1) {
742 rsa.base.cra_flags = 0;
743 ret = crypto_register_akcipher(&rsa);
745 mutex_unlock(&algs_lock);
749 void qat_asym_algs_unregister(void)
751 mutex_lock(&algs_lock);
752 if (--active_devs == 0)
753 crypto_unregister_akcipher(&rsa);
754 mutex_unlock(&algs_lock);