2 * Copyright (C) 2003 Russell King, All Rights Reserved.
3 * Copyright 2006-2007 Pierre Ossman
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/blkdev.h>
13 #include <linux/freezer.h>
14 #include <linux/kthread.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/host.h>
26 #define MMC_QUEUE_BOUNCESZ 65536
29 * Prepare a MMC request. This just filters out odd stuff.
31 static int mmc_prep_request(struct request_queue *q, struct request *req)
33 struct mmc_queue *mq = q->queuedata;
35 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
38 req->rq_flags |= RQF_DONTPREP;
43 struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
46 struct mmc_queue_req *mqrq;
47 int i = ffz(mq->qslots);
53 WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
54 test_bit(mqrq->task_id, &mq->qslots));
57 __set_bit(mqrq->task_id, &mq->qslots);
62 void mmc_queue_req_free(struct mmc_queue *mq,
63 struct mmc_queue_req *mqrq)
65 WARN_ON(!mqrq->req || mq->qcnt < 1 ||
66 !test_bit(mqrq->task_id, &mq->qslots));
69 __clear_bit(mqrq->task_id, &mq->qslots);
72 static int mmc_queue_thread(void *d)
74 struct mmc_queue *mq = d;
75 struct request_queue *q = mq->queue;
76 struct mmc_context_info *cntx = &mq->card->host->context_info;
78 current->flags |= PF_MEMALLOC;
80 down(&mq->thread_sem);
84 spin_lock_irq(q->queue_lock);
85 set_current_state(TASK_INTERRUPTIBLE);
86 req = blk_fetch_request(q);
88 cntx->is_waiting_last_req = false;
89 cntx->is_new_req = false;
92 * Dispatch queue is empty so set flags for
93 * mmc_request_fn() to wake us up.
96 cntx->is_waiting_last_req = true;
100 spin_unlock_irq(q->queue_lock);
102 if (req || mq->qcnt) {
103 set_current_state(TASK_RUNNING);
104 mmc_blk_issue_rq(mq, req);
107 if (kthread_should_stop()) {
108 set_current_state(TASK_RUNNING);
113 down(&mq->thread_sem);
122 * Generic MMC request handler. This is called for any queue on a
123 * particular host. When the host is not busy, we look for a request
124 * on any queue on this host, and attempt to issue it. This may
125 * not be the queue we were asked to process.
127 static void mmc_request_fn(struct request_queue *q)
129 struct mmc_queue *mq = q->queuedata;
131 struct mmc_context_info *cntx;
134 while ((req = blk_fetch_request(q)) != NULL) {
135 req->rq_flags |= RQF_QUIET;
136 __blk_end_request_all(req, BLK_STS_IOERR);
141 cntx = &mq->card->host->context_info;
143 if (cntx->is_waiting_last_req) {
144 cntx->is_new_req = true;
145 wake_up_interruptible(&cntx->wait);
149 wake_up_process(mq->thread);
152 static struct scatterlist *mmc_alloc_sg(int sg_len)
154 struct scatterlist *sg;
156 sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
158 sg_init_table(sg, sg_len);
163 static void mmc_queue_setup_discard(struct request_queue *q,
164 struct mmc_card *card)
166 unsigned max_discard;
168 max_discard = mmc_calc_max_discard(card);
172 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
173 blk_queue_max_discard_sectors(q, max_discard);
174 q->limits.discard_granularity = card->pref_erase << 9;
175 /* granularity must not be greater than max. discard */
176 if (card->pref_erase > max_discard)
177 q->limits.discard_granularity = 0;
178 if (mmc_can_secure_erase_trim(card))
179 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
182 static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
184 kfree(mqrq->bounce_sg);
185 mqrq->bounce_sg = NULL;
190 kfree(mqrq->bounce_buf);
191 mqrq->bounce_buf = NULL;
194 static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
198 for (i = 0; i < qdepth; i++)
199 mmc_queue_req_free_bufs(&mqrq[i]);
202 static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
204 mmc_queue_reqs_free_bufs(mqrq, qdepth);
208 static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
210 struct mmc_queue_req *mqrq;
213 mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
215 for (i = 0; i < qdepth; i++)
222 #ifdef CONFIG_MMC_BLOCK_BOUNCE
223 static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
224 unsigned int bouncesz)
228 for (i = 0; i < qdepth; i++) {
229 mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
230 if (!mqrq[i].bounce_buf)
233 mqrq[i].sg = mmc_alloc_sg(1);
237 mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
238 if (!mqrq[i].bounce_sg)
245 static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
246 unsigned int bouncesz)
250 ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
252 mmc_queue_reqs_free_bufs(mqrq, qdepth);
257 static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
259 unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
261 if (host->max_segs != 1)
264 if (bouncesz > host->max_req_size)
265 bouncesz = host->max_req_size;
266 if (bouncesz > host->max_seg_size)
267 bouncesz = host->max_seg_size;
268 if (bouncesz > host->max_blk_count * 512)
269 bouncesz = host->max_blk_count * 512;
277 static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq,
278 int qdepth, unsigned int bouncesz)
283 static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
289 static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth,
294 for (i = 0; i < qdepth; i++) {
295 mqrq[i].sg = mmc_alloc_sg(max_segs);
303 void mmc_queue_free_shared_queue(struct mmc_card *card)
306 mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
311 static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
313 struct mmc_host *host = card->host;
314 struct mmc_queue_req *mqrq;
315 unsigned int bouncesz;
321 mqrq = mmc_queue_alloc_mqrqs(qdepth);
326 card->qdepth = qdepth;
328 bouncesz = mmc_queue_calc_bouncesz(host);
330 if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) {
332 pr_warn("%s: unable to allocate bounce buffers\n",
333 mmc_card_name(card));
336 card->bouncesz = bouncesz;
339 ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
347 mmc_queue_free_shared_queue(card);
351 int mmc_queue_alloc_shared_queue(struct mmc_card *card)
353 return __mmc_queue_alloc_shared_queue(card, 2);
357 * mmc_init_queue - initialise a queue structure.
359 * @card: mmc card to attach this queue
361 * @subname: partition subname
363 * Initialise a MMC card request queue.
365 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
366 spinlock_t *lock, const char *subname)
368 struct mmc_host *host = card->host;
369 u64 limit = BLK_BOUNCE_HIGH;
372 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
373 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
376 mq->queue = blk_init_queue(mmc_request_fn, lock);
380 mq->mqrq = card->mqrq;
381 mq->qdepth = card->qdepth;
382 mq->queue->queuedata = mq;
384 blk_queue_prep_rq(mq->queue, mmc_prep_request);
385 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
386 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
387 if (mmc_can_erase(card))
388 mmc_queue_setup_discard(mq->queue, card);
390 if (card->bouncesz) {
391 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
392 blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
393 blk_queue_max_segments(mq->queue, card->bouncesz / 512);
394 blk_queue_max_segment_size(mq->queue, card->bouncesz);
396 blk_queue_bounce_limit(mq->queue, limit);
397 blk_queue_max_hw_sectors(mq->queue,
398 min(host->max_blk_count, host->max_req_size / 512));
399 blk_queue_max_segments(mq->queue, host->max_segs);
400 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
403 sema_init(&mq->thread_sem, 1);
405 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
406 host->index, subname ? subname : "");
408 if (IS_ERR(mq->thread)) {
409 ret = PTR_ERR(mq->thread);
417 blk_cleanup_queue(mq->queue);
421 void mmc_cleanup_queue(struct mmc_queue *mq)
423 struct request_queue *q = mq->queue;
426 /* Make sure the queue isn't suspended, as that will deadlock */
427 mmc_queue_resume(mq);
429 /* Then terminate our worker thread */
430 kthread_stop(mq->thread);
432 /* Empty the queue */
433 spin_lock_irqsave(q->queue_lock, flags);
436 spin_unlock_irqrestore(q->queue_lock, flags);
441 EXPORT_SYMBOL(mmc_cleanup_queue);
444 * mmc_queue_suspend - suspend a MMC request queue
445 * @mq: MMC queue to suspend
447 * Stop the block request queue, and wait for our thread to
448 * complete any outstanding requests. This ensures that we
449 * won't suspend while a request is being processed.
451 void mmc_queue_suspend(struct mmc_queue *mq)
453 struct request_queue *q = mq->queue;
456 if (!mq->suspended) {
457 mq->suspended |= true;
459 spin_lock_irqsave(q->queue_lock, flags);
461 spin_unlock_irqrestore(q->queue_lock, flags);
463 down(&mq->thread_sem);
468 * mmc_queue_resume - resume a previously suspended MMC request queue
469 * @mq: MMC queue to resume
471 void mmc_queue_resume(struct mmc_queue *mq)
473 struct request_queue *q = mq->queue;
477 mq->suspended = false;
481 spin_lock_irqsave(q->queue_lock, flags);
483 spin_unlock_irqrestore(q->queue_lock, flags);
488 * Prepare the sg list(s) to be handed of to the host driver
490 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
494 struct scatterlist *sg;
497 if (!mqrq->bounce_buf)
498 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
500 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
502 mqrq->bounce_sg_len = sg_len;
505 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
506 buflen += sg->length;
508 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
514 * If writing, bounce the data to the buffer before the request
515 * is sent to the host driver
517 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
519 if (!mqrq->bounce_buf)
522 if (rq_data_dir(mqrq->req) != WRITE)
525 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
526 mqrq->bounce_buf, mqrq->sg[0].length);
530 * If reading, bounce the data from the buffer after the request
531 * has been handled by the host driver
533 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
535 if (!mqrq->bounce_buf)
538 if (rq_data_dir(mqrq->req) != READ)
541 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
542 mqrq->bounce_buf, mqrq->sg[0].length);