]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mmc/card/queue.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[karo-tx-linux.git] / drivers / mmc / card / queue.c
1 /*
2  *  linux/drivers/mmc/card/queue.c
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *  Copyright 2006-2007 Pierre Ossman
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  */
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
18 #include <linux/dma-mapping.h>
19
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/host.h>
22 #include "queue.h"
23
24 #define MMC_QUEUE_BOUNCESZ      65536
25
26 /*
27  * Prepare a MMC request. This just filters out odd stuff.
28  */
29 static int mmc_prep_request(struct request_queue *q, struct request *req)
30 {
31         struct mmc_queue *mq = q->queuedata;
32
33         /*
34          * We only like normal block requests and discards.
35          */
36         if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
37                 blk_dump_rq_flags(req, "MMC bad request");
38                 return BLKPREP_KILL;
39         }
40
41         if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
42                 return BLKPREP_KILL;
43
44         req->cmd_flags |= REQ_DONTPREP;
45
46         return BLKPREP_OK;
47 }
48
49 static int mmc_queue_thread(void *d)
50 {
51         struct mmc_queue *mq = d;
52         struct request_queue *q = mq->queue;
53
54         current->flags |= PF_MEMALLOC;
55
56         down(&mq->thread_sem);
57         do {
58                 struct request *req = NULL;
59                 unsigned int cmd_flags = 0;
60
61                 spin_lock_irq(q->queue_lock);
62                 set_current_state(TASK_INTERRUPTIBLE);
63                 req = blk_fetch_request(q);
64                 mq->mqrq_cur->req = req;
65                 spin_unlock_irq(q->queue_lock);
66
67                 if (req || mq->mqrq_prev->req) {
68                         set_current_state(TASK_RUNNING);
69                         cmd_flags = req ? req->cmd_flags : 0;
70                         mq->issue_fn(mq, req);
71                         cond_resched();
72                         if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
73                                 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
74                                 continue; /* fetch again */
75                         }
76
77                         /*
78                          * Current request becomes previous request
79                          * and vice versa.
80                          * In case of special requests, current request
81                          * has been finished. Do not assign it to previous
82                          * request.
83                          */
84                         if (cmd_flags & MMC_REQ_SPECIAL_MASK)
85                                 mq->mqrq_cur->req = NULL;
86
87                         mq->mqrq_prev->brq.mrq.data = NULL;
88                         mq->mqrq_prev->req = NULL;
89                         swap(mq->mqrq_prev, mq->mqrq_cur);
90                 } else {
91                         if (kthread_should_stop()) {
92                                 set_current_state(TASK_RUNNING);
93                                 break;
94                         }
95                         up(&mq->thread_sem);
96                         schedule();
97                         down(&mq->thread_sem);
98                 }
99         } while (1);
100         up(&mq->thread_sem);
101
102         return 0;
103 }
104
105 /*
106  * Generic MMC request handler.  This is called for any queue on a
107  * particular host.  When the host is not busy, we look for a request
108  * on any queue on this host, and attempt to issue it.  This may
109  * not be the queue we were asked to process.
110  */
111 static void mmc_request_fn(struct request_queue *q)
112 {
113         struct mmc_queue *mq = q->queuedata;
114         struct request *req;
115         unsigned long flags;
116         struct mmc_context_info *cntx;
117
118         if (!mq) {
119                 while ((req = blk_fetch_request(q)) != NULL) {
120                         req->cmd_flags |= REQ_QUIET;
121                         __blk_end_request_all(req, -EIO);
122                 }
123                 return;
124         }
125
126         cntx = &mq->card->host->context_info;
127         if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
128                 /*
129                  * New MMC request arrived when MMC thread may be
130                  * blocked on the previous request to be complete
131                  * with no current request fetched
132                  */
133                 spin_lock_irqsave(&cntx->lock, flags);
134                 if (cntx->is_waiting_last_req) {
135                         cntx->is_new_req = true;
136                         wake_up_interruptible(&cntx->wait);
137                 }
138                 spin_unlock_irqrestore(&cntx->lock, flags);
139         } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
140                 wake_up_process(mq->thread);
141 }
142
143 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
144 {
145         struct scatterlist *sg;
146
147         sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
148         if (!sg)
149                 *err = -ENOMEM;
150         else {
151                 *err = 0;
152                 sg_init_table(sg, sg_len);
153         }
154
155         return sg;
156 }
157
158 static void mmc_queue_setup_discard(struct request_queue *q,
159                                     struct mmc_card *card)
160 {
161         unsigned max_discard;
162
163         max_discard = mmc_calc_max_discard(card);
164         if (!max_discard)
165                 return;
166
167         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
168         blk_queue_max_discard_sectors(q, max_discard);
169         if (card->erased_byte == 0 && !mmc_can_discard(card))
170                 q->limits.discard_zeroes_data = 1;
171         q->limits.discard_granularity = card->pref_erase << 9;
172         /* granularity must not be greater than max. discard */
173         if (card->pref_erase > max_discard)
174                 q->limits.discard_granularity = 0;
175         if (mmc_can_secure_erase_trim(card))
176                 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
177 }
178
179 /**
180  * mmc_init_queue - initialise a queue structure.
181  * @mq: mmc queue
182  * @card: mmc card to attach this queue
183  * @lock: queue lock
184  * @subname: partition subname
185  *
186  * Initialise a MMC card request queue.
187  */
188 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
189                    spinlock_t *lock, const char *subname)
190 {
191         struct mmc_host *host = card->host;
192         u64 limit = BLK_BOUNCE_HIGH;
193         int ret;
194         struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
195         struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
196
197         if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
198                 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
199
200         mq->card = card;
201         mq->queue = blk_init_queue(mmc_request_fn, lock);
202         if (!mq->queue)
203                 return -ENOMEM;
204
205         mq->mqrq_cur = mqrq_cur;
206         mq->mqrq_prev = mqrq_prev;
207         mq->queue->queuedata = mq;
208
209         blk_queue_prep_rq(mq->queue, mmc_prep_request);
210         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
211         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
212         if (mmc_can_erase(card))
213                 mmc_queue_setup_discard(mq->queue, card);
214
215 #ifdef CONFIG_MMC_BLOCK_BOUNCE
216         if (host->max_segs == 1) {
217                 unsigned int bouncesz;
218
219                 bouncesz = MMC_QUEUE_BOUNCESZ;
220
221                 if (bouncesz > host->max_req_size)
222                         bouncesz = host->max_req_size;
223                 if (bouncesz > host->max_seg_size)
224                         bouncesz = host->max_seg_size;
225                 if (bouncesz > (host->max_blk_count * 512))
226                         bouncesz = host->max_blk_count * 512;
227
228                 if (bouncesz > 512) {
229                         mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
230                         if (!mqrq_cur->bounce_buf) {
231                                 pr_warn("%s: unable to allocate bounce cur buffer\n",
232                                         mmc_card_name(card));
233                         } else {
234                                 mqrq_prev->bounce_buf =
235                                                 kmalloc(bouncesz, GFP_KERNEL);
236                                 if (!mqrq_prev->bounce_buf) {
237                                         pr_warn("%s: unable to allocate bounce prev buffer\n",
238                                                 mmc_card_name(card));
239                                         kfree(mqrq_cur->bounce_buf);
240                                         mqrq_cur->bounce_buf = NULL;
241                                 }
242                         }
243                 }
244
245                 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
246                         blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
247                         blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
248                         blk_queue_max_segments(mq->queue, bouncesz / 512);
249                         blk_queue_max_segment_size(mq->queue, bouncesz);
250
251                         mqrq_cur->sg = mmc_alloc_sg(1, &ret);
252                         if (ret)
253                                 goto cleanup_queue;
254
255                         mqrq_cur->bounce_sg =
256                                 mmc_alloc_sg(bouncesz / 512, &ret);
257                         if (ret)
258                                 goto cleanup_queue;
259
260                         mqrq_prev->sg = mmc_alloc_sg(1, &ret);
261                         if (ret)
262                                 goto cleanup_queue;
263
264                         mqrq_prev->bounce_sg =
265                                 mmc_alloc_sg(bouncesz / 512, &ret);
266                         if (ret)
267                                 goto cleanup_queue;
268                 }
269         }
270 #endif
271
272         if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
273                 blk_queue_bounce_limit(mq->queue, limit);
274                 blk_queue_max_hw_sectors(mq->queue,
275                         min(host->max_blk_count, host->max_req_size / 512));
276                 blk_queue_max_segments(mq->queue, host->max_segs);
277                 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
278
279                 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
280                 if (ret)
281                         goto cleanup_queue;
282
283
284                 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
285                 if (ret)
286                         goto cleanup_queue;
287         }
288
289         sema_init(&mq->thread_sem, 1);
290
291         mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
292                 host->index, subname ? subname : "");
293
294         if (IS_ERR(mq->thread)) {
295                 ret = PTR_ERR(mq->thread);
296                 goto free_bounce_sg;
297         }
298
299         return 0;
300  free_bounce_sg:
301         kfree(mqrq_cur->bounce_sg);
302         mqrq_cur->bounce_sg = NULL;
303         kfree(mqrq_prev->bounce_sg);
304         mqrq_prev->bounce_sg = NULL;
305
306  cleanup_queue:
307         kfree(mqrq_cur->sg);
308         mqrq_cur->sg = NULL;
309         kfree(mqrq_cur->bounce_buf);
310         mqrq_cur->bounce_buf = NULL;
311
312         kfree(mqrq_prev->sg);
313         mqrq_prev->sg = NULL;
314         kfree(mqrq_prev->bounce_buf);
315         mqrq_prev->bounce_buf = NULL;
316
317         blk_cleanup_queue(mq->queue);
318         return ret;
319 }
320
321 void mmc_cleanup_queue(struct mmc_queue *mq)
322 {
323         struct request_queue *q = mq->queue;
324         unsigned long flags;
325         struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
326         struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
327
328         /* Make sure the queue isn't suspended, as that will deadlock */
329         mmc_queue_resume(mq);
330
331         /* Then terminate our worker thread */
332         kthread_stop(mq->thread);
333
334         /* Empty the queue */
335         spin_lock_irqsave(q->queue_lock, flags);
336         q->queuedata = NULL;
337         blk_start_queue(q);
338         spin_unlock_irqrestore(q->queue_lock, flags);
339
340         kfree(mqrq_cur->bounce_sg);
341         mqrq_cur->bounce_sg = NULL;
342
343         kfree(mqrq_cur->sg);
344         mqrq_cur->sg = NULL;
345
346         kfree(mqrq_cur->bounce_buf);
347         mqrq_cur->bounce_buf = NULL;
348
349         kfree(mqrq_prev->bounce_sg);
350         mqrq_prev->bounce_sg = NULL;
351
352         kfree(mqrq_prev->sg);
353         mqrq_prev->sg = NULL;
354
355         kfree(mqrq_prev->bounce_buf);
356         mqrq_prev->bounce_buf = NULL;
357
358         mq->card = NULL;
359 }
360 EXPORT_SYMBOL(mmc_cleanup_queue);
361
362 int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
363 {
364         struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
365         struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
366         int ret = 0;
367
368
369         mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
370         if (!mqrq_cur->packed) {
371                 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
372                         mmc_card_name(card));
373                 ret = -ENOMEM;
374                 goto out;
375         }
376
377         mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
378         if (!mqrq_prev->packed) {
379                 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
380                         mmc_card_name(card));
381                 kfree(mqrq_cur->packed);
382                 mqrq_cur->packed = NULL;
383                 ret = -ENOMEM;
384                 goto out;
385         }
386
387         INIT_LIST_HEAD(&mqrq_cur->packed->list);
388         INIT_LIST_HEAD(&mqrq_prev->packed->list);
389
390 out:
391         return ret;
392 }
393
394 void mmc_packed_clean(struct mmc_queue *mq)
395 {
396         struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
397         struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
398
399         kfree(mqrq_cur->packed);
400         mqrq_cur->packed = NULL;
401         kfree(mqrq_prev->packed);
402         mqrq_prev->packed = NULL;
403 }
404
405 /**
406  * mmc_queue_suspend - suspend a MMC request queue
407  * @mq: MMC queue to suspend
408  *
409  * Stop the block request queue, and wait for our thread to
410  * complete any outstanding requests.  This ensures that we
411  * won't suspend while a request is being processed.
412  */
413 void mmc_queue_suspend(struct mmc_queue *mq)
414 {
415         struct request_queue *q = mq->queue;
416         unsigned long flags;
417
418         if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
419                 mq->flags |= MMC_QUEUE_SUSPENDED;
420
421                 spin_lock_irqsave(q->queue_lock, flags);
422                 blk_stop_queue(q);
423                 spin_unlock_irqrestore(q->queue_lock, flags);
424
425                 down(&mq->thread_sem);
426         }
427 }
428
429 /**
430  * mmc_queue_resume - resume a previously suspended MMC request queue
431  * @mq: MMC queue to resume
432  */
433 void mmc_queue_resume(struct mmc_queue *mq)
434 {
435         struct request_queue *q = mq->queue;
436         unsigned long flags;
437
438         if (mq->flags & MMC_QUEUE_SUSPENDED) {
439                 mq->flags &= ~MMC_QUEUE_SUSPENDED;
440
441                 up(&mq->thread_sem);
442
443                 spin_lock_irqsave(q->queue_lock, flags);
444                 blk_start_queue(q);
445                 spin_unlock_irqrestore(q->queue_lock, flags);
446         }
447 }
448
449 static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
450                                             struct mmc_packed *packed,
451                                             struct scatterlist *sg,
452                                             enum mmc_packed_type cmd_type)
453 {
454         struct scatterlist *__sg = sg;
455         unsigned int sg_len = 0;
456         struct request *req;
457
458         if (mmc_packed_wr(cmd_type)) {
459                 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
460                 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
461                 unsigned int len, remain, offset = 0;
462                 u8 *buf = (u8 *)packed->cmd_hdr;
463
464                 remain = hdr_sz;
465                 do {
466                         len = min(remain, max_seg_sz);
467                         sg_set_buf(__sg, buf + offset, len);
468                         offset += len;
469                         remain -= len;
470                         sg_unmark_end(__sg++);
471                         sg_len++;
472                 } while (remain);
473         }
474
475         list_for_each_entry(req, &packed->list, queuelist) {
476                 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
477                 __sg = sg + (sg_len - 1);
478                 sg_unmark_end(__sg++);
479         }
480         sg_mark_end(sg + (sg_len - 1));
481         return sg_len;
482 }
483
484 /*
485  * Prepare the sg list(s) to be handed of to the host driver
486  */
487 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
488 {
489         unsigned int sg_len;
490         size_t buflen;
491         struct scatterlist *sg;
492         enum mmc_packed_type cmd_type;
493         int i;
494
495         cmd_type = mqrq->cmd_type;
496
497         if (!mqrq->bounce_buf) {
498                 if (mmc_packed_cmd(cmd_type))
499                         return mmc_queue_packed_map_sg(mq, mqrq->packed,
500                                                        mqrq->sg, cmd_type);
501                 else
502                         return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
503         }
504
505         BUG_ON(!mqrq->bounce_sg);
506
507         if (mmc_packed_cmd(cmd_type))
508                 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
509                                                  mqrq->bounce_sg, cmd_type);
510         else
511                 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
512
513         mqrq->bounce_sg_len = sg_len;
514
515         buflen = 0;
516         for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
517                 buflen += sg->length;
518
519         sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
520
521         return 1;
522 }
523
524 /*
525  * If writing, bounce the data to the buffer before the request
526  * is sent to the host driver
527  */
528 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
529 {
530         if (!mqrq->bounce_buf)
531                 return;
532
533         if (rq_data_dir(mqrq->req) != WRITE)
534                 return;
535
536         sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
537                 mqrq->bounce_buf, mqrq->sg[0].length);
538 }
539
540 /*
541  * If reading, bounce the data from the buffer after the request
542  * has been handled by the host driver
543  */
544 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
545 {
546         if (!mqrq->bounce_buf)
547                 return;
548
549         if (rq_data_dir(mqrq->req) != READ)
550                 return;
551
552         sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
553                 mqrq->bounce_buf, mqrq->sg[0].length);
554 }