2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include "adf_accel_devices.h"
50 #include "adf_common_drv.h"
51 #include "adf_transport.h"
53 #include "adf_cfg_strings.h"
54 #include "qat_crypto.h"
55 #include "icp_qat_fw.h"
57 #define SEC ADF_KERNEL_SEC
59 static struct service_hndl qat_crypto;
61 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
63 if (atomic_sub_return(1, &inst->refctr) == 0)
64 adf_dev_put(inst->accel_dev);
67 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
69 struct qat_crypto_instance *inst;
70 struct list_head *list_ptr, *tmp;
73 list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
74 inst = list_entry(list_ptr, struct qat_crypto_instance, list);
76 for (i = 0; i < atomic_read(&inst->refctr); i++)
77 qat_crypto_put_instance(inst);
80 adf_remove_ring(inst->sym_tx);
83 adf_remove_ring(inst->sym_rx);
86 adf_remove_ring(inst->pke_tx);
89 adf_remove_ring(inst->pke_rx);
97 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
99 struct adf_accel_dev *accel_dev = NULL;
100 struct qat_crypto_instance *inst_best = NULL;
101 struct list_head *itr;
102 unsigned long best = ~0;
104 list_for_each(itr, adf_devmgr_get_head()) {
105 accel_dev = list_entry(itr, struct adf_accel_dev, list);
107 if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
108 dev_to_node(&GET_DEV(accel_dev)) < 0) &&
109 adf_dev_started(accel_dev) &&
110 !list_empty(&accel_dev->crypto_list))
115 pr_err("QAT: Could not find a device on node %d\n", node);
116 accel_dev = adf_devmgr_get_first();
118 if (!accel_dev || !adf_dev_started(accel_dev))
121 list_for_each(itr, &accel_dev->crypto_list) {
122 struct qat_crypto_instance *inst;
125 inst = list_entry(itr, struct qat_crypto_instance, list);
126 cur = atomic_read(&inst->refctr);
133 if (atomic_add_return(1, &inst_best->refctr) == 1) {
134 if (adf_dev_get(accel_dev)) {
135 atomic_dec(&inst_best->refctr);
136 dev_err(&GET_DEV(accel_dev),
137 "Could not increment dev refctr\n");
145 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
149 unsigned long num_inst, num_msg_sym, num_msg_asym;
151 struct qat_crypto_instance *inst;
152 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
153 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
155 INIT_LIST_HEAD(&accel_dev->crypto_list);
156 strlcpy(key, ADF_NUM_CY, sizeof(key));
157 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
160 if (kstrtoul(val, 0, &num_inst))
163 for (i = 0; i < num_inst; i++) {
164 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
165 dev_to_node(&GET_DEV(accel_dev)));
169 list_add_tail(&inst->list, &accel_dev->crypto_list);
171 atomic_set(&inst->refctr, 0);
172 inst->accel_dev = accel_dev;
173 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
174 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
177 if (kstrtoul(val, 10, &bank))
179 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
180 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
183 if (kstrtoul(val, 10, &num_msg_sym))
186 num_msg_sym = num_msg_sym >> 1;
188 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
189 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
192 if (kstrtoul(val, 10, &num_msg_asym))
194 num_msg_asym = num_msg_asym >> 1;
196 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
197 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
198 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
199 msg_size, key, NULL, 0, &inst->sym_tx))
202 msg_size = msg_size >> 1;
203 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
204 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
205 msg_size, key, NULL, 0, &inst->pke_tx))
208 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
209 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
210 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
211 msg_size, key, qat_alg_callback, 0,
215 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
216 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
217 msg_size, key, qat_alg_asym_callback, 0,
223 qat_crypto_free_instances(accel_dev);
227 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
229 if (qat_crypto_create_instances(accel_dev))
235 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
237 return qat_crypto_free_instances(accel_dev);
240 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
241 enum adf_event event)
247 ret = qat_crypto_init(accel_dev);
249 case ADF_EVENT_SHUTDOWN:
250 ret = qat_crypto_shutdown(accel_dev);
252 case ADF_EVENT_RESTARTING:
253 case ADF_EVENT_RESTARTED:
254 case ADF_EVENT_START:
262 int qat_crypto_register(void)
264 memset(&qat_crypto, 0, sizeof(qat_crypto));
265 qat_crypto.event_hld = qat_crypto_event_handler;
266 qat_crypto.name = "qat_crypto";
267 return adf_service_register(&qat_crypto);
270 int qat_crypto_unregister(void)
272 return adf_service_unregister(&qat_crypto);