]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - crypto/crypto_engine.c
powerpc/powernv/pci: Enable 64-bit devices to access >4GB DMA space
[karo-tx-linux.git] / crypto / crypto_engine.c
1 /*
2  * Handle async block request by crypto hardware engine.
3  *
4  * Copyright (C) 2016 Linaro, Inc.
5  *
6  * Author: Baolin Wang <baolin.wang@linaro.org>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  *
13  */
14
15 #include <linux/err.h>
16 #include <linux/delay.h>
17 #include <crypto/engine.h>
18 #include <crypto/internal/hash.h>
19 #include <uapi/linux/sched/types.h>
20 #include "internal.h"
21
22 #define CRYPTO_ENGINE_MAX_QLEN 10
23
24 /**
25  * crypto_pump_requests - dequeue one request from engine queue to process
26  * @engine: the hardware engine
27  * @in_kthread: true if we are in the context of the request pump thread
28  *
29  * This function checks if there is any request in the engine queue that
30  * needs processing and if so call out to the driver to initialize hardware
31  * and handle each request.
32  */
33 static void crypto_pump_requests(struct crypto_engine *engine,
34                                  bool in_kthread)
35 {
36         struct crypto_async_request *async_req, *backlog;
37         struct ahash_request *hreq;
38         struct ablkcipher_request *breq;
39         unsigned long flags;
40         bool was_busy = false;
41         int ret, rtype;
42
43         spin_lock_irqsave(&engine->queue_lock, flags);
44
45         /* Make sure we are not already running a request */
46         if (engine->cur_req)
47                 goto out;
48
49         /* If another context is idling then defer */
50         if (engine->idling) {
51                 kthread_queue_work(engine->kworker, &engine->pump_requests);
52                 goto out;
53         }
54
55         /* Check if the engine queue is idle */
56         if (!crypto_queue_len(&engine->queue) || !engine->running) {
57                 if (!engine->busy)
58                         goto out;
59
60                 /* Only do teardown in the thread */
61                 if (!in_kthread) {
62                         kthread_queue_work(engine->kworker,
63                                            &engine->pump_requests);
64                         goto out;
65                 }
66
67                 engine->busy = false;
68                 engine->idling = true;
69                 spin_unlock_irqrestore(&engine->queue_lock, flags);
70
71                 if (engine->unprepare_crypt_hardware &&
72                     engine->unprepare_crypt_hardware(engine))
73                         pr_err("failed to unprepare crypt hardware\n");
74
75                 spin_lock_irqsave(&engine->queue_lock, flags);
76                 engine->idling = false;
77                 goto out;
78         }
79
80         /* Get the fist request from the engine queue to handle */
81         backlog = crypto_get_backlog(&engine->queue);
82         async_req = crypto_dequeue_request(&engine->queue);
83         if (!async_req)
84                 goto out;
85
86         engine->cur_req = async_req;
87         if (backlog)
88                 backlog->complete(backlog, -EINPROGRESS);
89
90         if (engine->busy)
91                 was_busy = true;
92         else
93                 engine->busy = true;
94
95         spin_unlock_irqrestore(&engine->queue_lock, flags);
96
97         rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
98         /* Until here we get the request need to be encrypted successfully */
99         if (!was_busy && engine->prepare_crypt_hardware) {
100                 ret = engine->prepare_crypt_hardware(engine);
101                 if (ret) {
102                         pr_err("failed to prepare crypt hardware\n");
103                         goto req_err;
104                 }
105         }
106
107         switch (rtype) {
108         case CRYPTO_ALG_TYPE_AHASH:
109                 hreq = ahash_request_cast(engine->cur_req);
110                 if (engine->prepare_hash_request) {
111                         ret = engine->prepare_hash_request(engine, hreq);
112                         if (ret) {
113                                 pr_err("failed to prepare request: %d\n", ret);
114                                 goto req_err;
115                         }
116                         engine->cur_req_prepared = true;
117                 }
118                 ret = engine->hash_one_request(engine, hreq);
119                 if (ret) {
120                         pr_err("failed to hash one request from queue\n");
121                         goto req_err;
122                 }
123                 return;
124         case CRYPTO_ALG_TYPE_ABLKCIPHER:
125                 breq = ablkcipher_request_cast(engine->cur_req);
126                 if (engine->prepare_cipher_request) {
127                         ret = engine->prepare_cipher_request(engine, breq);
128                         if (ret) {
129                                 pr_err("failed to prepare request: %d\n", ret);
130                                 goto req_err;
131                         }
132                         engine->cur_req_prepared = true;
133                 }
134                 ret = engine->cipher_one_request(engine, breq);
135                 if (ret) {
136                         pr_err("failed to cipher one request from queue\n");
137                         goto req_err;
138                 }
139                 return;
140         default:
141                 pr_err("failed to prepare request of unknown type\n");
142                 return;
143         }
144
145 req_err:
146         switch (rtype) {
147         case CRYPTO_ALG_TYPE_AHASH:
148                 hreq = ahash_request_cast(engine->cur_req);
149                 crypto_finalize_hash_request(engine, hreq, ret);
150                 break;
151         case CRYPTO_ALG_TYPE_ABLKCIPHER:
152                 breq = ablkcipher_request_cast(engine->cur_req);
153                 crypto_finalize_cipher_request(engine, breq, ret);
154                 break;
155         }
156         return;
157
158 out:
159         spin_unlock_irqrestore(&engine->queue_lock, flags);
160 }
161
162 static void crypto_pump_work(struct kthread_work *work)
163 {
164         struct crypto_engine *engine =
165                 container_of(work, struct crypto_engine, pump_requests);
166
167         crypto_pump_requests(engine, true);
168 }
169
170 /**
171  * crypto_transfer_cipher_request - transfer the new request into the
172  * enginequeue
173  * @engine: the hardware engine
174  * @req: the request need to be listed into the engine queue
175  */
176 int crypto_transfer_cipher_request(struct crypto_engine *engine,
177                                    struct ablkcipher_request *req,
178                                    bool need_pump)
179 {
180         unsigned long flags;
181         int ret;
182
183         spin_lock_irqsave(&engine->queue_lock, flags);
184
185         if (!engine->running) {
186                 spin_unlock_irqrestore(&engine->queue_lock, flags);
187                 return -ESHUTDOWN;
188         }
189
190         ret = ablkcipher_enqueue_request(&engine->queue, req);
191
192         if (!engine->busy && need_pump)
193                 kthread_queue_work(engine->kworker, &engine->pump_requests);
194
195         spin_unlock_irqrestore(&engine->queue_lock, flags);
196         return ret;
197 }
198 EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
199
200 /**
201  * crypto_transfer_cipher_request_to_engine - transfer one request to list
202  * into the engine queue
203  * @engine: the hardware engine
204  * @req: the request need to be listed into the engine queue
205  */
206 int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
207                                              struct ablkcipher_request *req)
208 {
209         return crypto_transfer_cipher_request(engine, req, true);
210 }
211 EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
212
213 /**
214  * crypto_transfer_hash_request - transfer the new request into the
215  * enginequeue
216  * @engine: the hardware engine
217  * @req: the request need to be listed into the engine queue
218  */
219 int crypto_transfer_hash_request(struct crypto_engine *engine,
220                                  struct ahash_request *req, bool need_pump)
221 {
222         unsigned long flags;
223         int ret;
224
225         spin_lock_irqsave(&engine->queue_lock, flags);
226
227         if (!engine->running) {
228                 spin_unlock_irqrestore(&engine->queue_lock, flags);
229                 return -ESHUTDOWN;
230         }
231
232         ret = ahash_enqueue_request(&engine->queue, req);
233
234         if (!engine->busy && need_pump)
235                 kthread_queue_work(engine->kworker, &engine->pump_requests);
236
237         spin_unlock_irqrestore(&engine->queue_lock, flags);
238         return ret;
239 }
240 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
241
242 /**
243  * crypto_transfer_hash_request_to_engine - transfer one request to list
244  * into the engine queue
245  * @engine: the hardware engine
246  * @req: the request need to be listed into the engine queue
247  */
248 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
249                                            struct ahash_request *req)
250 {
251         return crypto_transfer_hash_request(engine, req, true);
252 }
253 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
254
255 /**
256  * crypto_finalize_cipher_request - finalize one request if the request is done
257  * @engine: the hardware engine
258  * @req: the request need to be finalized
259  * @err: error number
260  */
261 void crypto_finalize_cipher_request(struct crypto_engine *engine,
262                                     struct ablkcipher_request *req, int err)
263 {
264         unsigned long flags;
265         bool finalize_cur_req = false;
266         int ret;
267
268         spin_lock_irqsave(&engine->queue_lock, flags);
269         if (engine->cur_req == &req->base)
270                 finalize_cur_req = true;
271         spin_unlock_irqrestore(&engine->queue_lock, flags);
272
273         if (finalize_cur_req) {
274                 if (engine->cur_req_prepared &&
275                     engine->unprepare_cipher_request) {
276                         ret = engine->unprepare_cipher_request(engine, req);
277                         if (ret)
278                                 pr_err("failed to unprepare request\n");
279                 }
280                 spin_lock_irqsave(&engine->queue_lock, flags);
281                 engine->cur_req = NULL;
282                 engine->cur_req_prepared = false;
283                 spin_unlock_irqrestore(&engine->queue_lock, flags);
284         }
285
286         req->base.complete(&req->base, err);
287
288         kthread_queue_work(engine->kworker, &engine->pump_requests);
289 }
290 EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
291
292 /**
293  * crypto_finalize_hash_request - finalize one request if the request is done
294  * @engine: the hardware engine
295  * @req: the request need to be finalized
296  * @err: error number
297  */
298 void crypto_finalize_hash_request(struct crypto_engine *engine,
299                                   struct ahash_request *req, int err)
300 {
301         unsigned long flags;
302         bool finalize_cur_req = false;
303         int ret;
304
305         spin_lock_irqsave(&engine->queue_lock, flags);
306         if (engine->cur_req == &req->base)
307                 finalize_cur_req = true;
308         spin_unlock_irqrestore(&engine->queue_lock, flags);
309
310         if (finalize_cur_req) {
311                 if (engine->cur_req_prepared &&
312                     engine->unprepare_hash_request) {
313                         ret = engine->unprepare_hash_request(engine, req);
314                         if (ret)
315                                 pr_err("failed to unprepare request\n");
316                 }
317                 spin_lock_irqsave(&engine->queue_lock, flags);
318                 engine->cur_req = NULL;
319                 engine->cur_req_prepared = false;
320                 spin_unlock_irqrestore(&engine->queue_lock, flags);
321         }
322
323         req->base.complete(&req->base, err);
324
325         kthread_queue_work(engine->kworker, &engine->pump_requests);
326 }
327 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
328
329 /**
330  * crypto_engine_start - start the hardware engine
331  * @engine: the hardware engine need to be started
332  *
333  * Return 0 on success, else on fail.
334  */
335 int crypto_engine_start(struct crypto_engine *engine)
336 {
337         unsigned long flags;
338
339         spin_lock_irqsave(&engine->queue_lock, flags);
340
341         if (engine->running || engine->busy) {
342                 spin_unlock_irqrestore(&engine->queue_lock, flags);
343                 return -EBUSY;
344         }
345
346         engine->running = true;
347         spin_unlock_irqrestore(&engine->queue_lock, flags);
348
349         kthread_queue_work(engine->kworker, &engine->pump_requests);
350
351         return 0;
352 }
353 EXPORT_SYMBOL_GPL(crypto_engine_start);
354
355 /**
356  * crypto_engine_stop - stop the hardware engine
357  * @engine: the hardware engine need to be stopped
358  *
359  * Return 0 on success, else on fail.
360  */
361 int crypto_engine_stop(struct crypto_engine *engine)
362 {
363         unsigned long flags;
364         unsigned int limit = 500;
365         int ret = 0;
366
367         spin_lock_irqsave(&engine->queue_lock, flags);
368
369         /*
370          * If the engine queue is not empty or the engine is on busy state,
371          * we need to wait for a while to pump the requests of engine queue.
372          */
373         while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
374                 spin_unlock_irqrestore(&engine->queue_lock, flags);
375                 msleep(20);
376                 spin_lock_irqsave(&engine->queue_lock, flags);
377         }
378
379         if (crypto_queue_len(&engine->queue) || engine->busy)
380                 ret = -EBUSY;
381         else
382                 engine->running = false;
383
384         spin_unlock_irqrestore(&engine->queue_lock, flags);
385
386         if (ret)
387                 pr_warn("could not stop engine\n");
388
389         return ret;
390 }
391 EXPORT_SYMBOL_GPL(crypto_engine_stop);
392
393 /**
394  * crypto_engine_alloc_init - allocate crypto hardware engine structure and
395  * initialize it.
396  * @dev: the device attached with one hardware engine
397  * @rt: whether this queue is set to run as a realtime task
398  *
399  * This must be called from context that can sleep.
400  * Return: the crypto engine structure on success, else NULL.
401  */
402 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
403 {
404         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
405         struct crypto_engine *engine;
406
407         if (!dev)
408                 return NULL;
409
410         engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
411         if (!engine)
412                 return NULL;
413
414         engine->rt = rt;
415         engine->running = false;
416         engine->busy = false;
417         engine->idling = false;
418         engine->cur_req_prepared = false;
419         engine->priv_data = dev;
420         snprintf(engine->name, sizeof(engine->name),
421                  "%s-engine", dev_name(dev));
422
423         crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
424         spin_lock_init(&engine->queue_lock);
425
426         engine->kworker = kthread_create_worker(0, "%s", engine->name);
427         if (IS_ERR(engine->kworker)) {
428                 dev_err(dev, "failed to create crypto request pump task\n");
429                 return NULL;
430         }
431         kthread_init_work(&engine->pump_requests, crypto_pump_work);
432
433         if (engine->rt) {
434                 dev_info(dev, "will run requests pump with realtime priority\n");
435                 sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
436         }
437
438         return engine;
439 }
440 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
441
442 /**
443  * crypto_engine_exit - free the resources of hardware engine when exit
444  * @engine: the hardware engine need to be freed
445  *
446  * Return 0 for success.
447  */
448 int crypto_engine_exit(struct crypto_engine *engine)
449 {
450         int ret;
451
452         ret = crypto_engine_stop(engine);
453         if (ret)
454                 return ret;
455
456         kthread_destroy_worker(engine->kworker);
457
458         return 0;
459 }
460 EXPORT_SYMBOL_GPL(crypto_engine_exit);
461
462 MODULE_LICENSE("GPL");
463 MODULE_DESCRIPTION("Crypto hardware engine framework");