2 * Functions related to io context handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
14 * For io context allocations
16 static struct kmem_cache *iocontext_cachep;
19 * get_io_context - increment reference count to io_context
20 * @ioc: io_context to get
22 * Increment reference count to @ioc.
24 void get_io_context(struct io_context *ioc)
26 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
27 atomic_long_inc(&ioc->refcount);
29 EXPORT_SYMBOL(get_io_context);
31 static void icq_free_icq_rcu(struct rcu_head *head)
33 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35 kmem_cache_free(icq->__rcu_icq_cache, icq);
38 /* Exit an icq. Called with both ioc and q locked. */
39 static void ioc_exit_icq(struct io_cq *icq)
41 struct elevator_type *et = icq->q->elevator->type;
43 if (icq->flags & ICQ_EXITED)
46 if (et->uses_mq && et->ops.mq.exit_icq)
47 et->ops.mq.exit_icq(icq);
48 else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
49 et->ops.sq.elevator_exit_icq_fn(icq);
51 icq->flags |= ICQ_EXITED;
54 /* Release an icq. Called with both ioc and q locked. */
55 static void ioc_destroy_icq(struct io_cq *icq)
57 struct io_context *ioc = icq->ioc;
58 struct request_queue *q = icq->q;
59 struct elevator_type *et = q->elevator->type;
61 lockdep_assert_held(&ioc->lock);
62 lockdep_assert_held(q->queue_lock);
64 radix_tree_delete(&ioc->icq_tree, icq->q->id);
65 hlist_del_init(&icq->ioc_node);
66 list_del_init(&icq->q_node);
69 * Both setting lookup hint to and clearing it from @icq are done
70 * under queue_lock. If it's not pointing to @icq now, it never
71 * will. Hint assignment itself can race safely.
73 if (rcu_access_pointer(ioc->icq_hint) == icq)
74 rcu_assign_pointer(ioc->icq_hint, NULL);
79 * @icq->q might have gone away by the time RCU callback runs
80 * making it impossible to determine icq_cache. Record it in @icq.
82 icq->__rcu_icq_cache = et->icq_cache;
83 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
87 * Slow path for ioc release in put_io_context(). Performs double-lock
88 * dancing to unlink all icq's and then frees ioc.
90 static void ioc_release_fn(struct work_struct *work)
92 struct io_context *ioc = container_of(work, struct io_context,
97 * Exiting icq may call into put_io_context() through elevator
98 * which will trigger lockdep warning. The ioc's are guaranteed to
99 * be different, use a different locking subclass here. Use
100 * irqsave variant as there's no spin_lock_irq_nested().
102 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
104 while (!hlist_empty(&ioc->icq_list)) {
105 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
106 struct io_cq, ioc_node);
107 struct request_queue *q = icq->q;
109 if (spin_trylock(q->queue_lock)) {
110 ioc_destroy_icq(icq);
111 spin_unlock(q->queue_lock);
113 spin_unlock_irqrestore(&ioc->lock, flags);
115 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
119 spin_unlock_irqrestore(&ioc->lock, flags);
121 kmem_cache_free(iocontext_cachep, ioc);
125 * put_io_context - put a reference of io_context
126 * @ioc: io_context to put
128 * Decrement reference count of @ioc and release it if the count reaches
131 void put_io_context(struct io_context *ioc)
134 bool free_ioc = false;
139 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
142 * Releasing ioc requires reverse order double locking and we may
143 * already be holding a queue_lock. Do it asynchronously from wq.
145 if (atomic_long_dec_and_test(&ioc->refcount)) {
146 spin_lock_irqsave(&ioc->lock, flags);
147 if (!hlist_empty(&ioc->icq_list))
148 queue_work(system_power_efficient_wq,
152 spin_unlock_irqrestore(&ioc->lock, flags);
156 kmem_cache_free(iocontext_cachep, ioc);
158 EXPORT_SYMBOL(put_io_context);
161 * put_io_context_active - put active reference on ioc
162 * @ioc: ioc of interest
164 * Undo get_io_context_active(). If active reference reaches zero after
165 * put, @ioc can never issue further IOs and ioscheds are notified.
167 void put_io_context_active(struct io_context *ioc)
172 if (!atomic_dec_and_test(&ioc->active_ref)) {
178 * Need ioc lock to walk icq_list and q lock to exit icq. Perform
179 * reverse double locking. Read comment in ioc_release_fn() for
180 * explanation on the nested locking annotation.
183 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
184 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
185 if (icq->flags & ICQ_EXITED)
187 if (spin_trylock(icq->q->queue_lock)) {
189 spin_unlock(icq->q->queue_lock);
191 spin_unlock_irqrestore(&ioc->lock, flags);
196 spin_unlock_irqrestore(&ioc->lock, flags);
201 /* Called by the exiting task */
202 void exit_io_context(struct task_struct *task)
204 struct io_context *ioc;
207 ioc = task->io_context;
208 task->io_context = NULL;
211 atomic_dec(&ioc->nr_tasks);
212 put_io_context_active(ioc);
216 * ioc_clear_queue - break any ioc association with the specified queue
217 * @q: request_queue being cleared
219 * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
221 void ioc_clear_queue(struct request_queue *q)
223 lockdep_assert_held(q->queue_lock);
225 while (!list_empty(&q->icq_list)) {
226 struct io_cq *icq = list_entry(q->icq_list.next,
227 struct io_cq, q_node);
228 struct io_context *ioc = icq->ioc;
230 spin_lock(&ioc->lock);
231 ioc_destroy_icq(icq);
232 spin_unlock(&ioc->lock);
236 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
238 struct io_context *ioc;
241 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
247 atomic_long_set(&ioc->refcount, 1);
248 atomic_set(&ioc->nr_tasks, 1);
249 atomic_set(&ioc->active_ref, 1);
250 spin_lock_init(&ioc->lock);
251 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
252 INIT_HLIST_HEAD(&ioc->icq_list);
253 INIT_WORK(&ioc->release_work, ioc_release_fn);
256 * Try to install. ioc shouldn't be installed if someone else
257 * already did or @task, which isn't %current, is exiting. Note
258 * that we need to allow ioc creation on exiting %current as exit
259 * path may issue IOs from e.g. exit_files(). The exit path is
260 * responsible for not issuing IO after exit_io_context().
263 if (!task->io_context &&
264 (task == current || !(task->flags & PF_EXITING)))
265 task->io_context = ioc;
267 kmem_cache_free(iocontext_cachep, ioc);
269 ret = task->io_context ? 0 : -EBUSY;
277 * get_task_io_context - get io_context of a task
278 * @task: task of interest
279 * @gfp_flags: allocation flags, used if allocation is necessary
280 * @node: allocation node, used if allocation is necessary
282 * Return io_context of @task. If it doesn't exist, it is created with
283 * @gfp_flags and @node. The returned io_context has its reference count
286 * This function always goes through task_lock() and it's better to use
287 * %current->io_context + get_io_context() for %current.
289 struct io_context *get_task_io_context(struct task_struct *task,
290 gfp_t gfp_flags, int node)
292 struct io_context *ioc;
294 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
298 ioc = task->io_context;
305 } while (!create_task_io_context(task, gfp_flags, node));
309 EXPORT_SYMBOL(get_task_io_context);
312 * ioc_lookup_icq - lookup io_cq from ioc
313 * @ioc: the associated io_context
314 * @q: the associated request_queue
316 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
317 * with @q->queue_lock held.
319 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
323 lockdep_assert_held(q->queue_lock);
326 * icq's are indexed from @ioc using radix tree and hint pointer,
327 * both of which are protected with RCU. All removals are done
328 * holding both q and ioc locks, and we're holding q lock - if we
329 * find a icq which points to us, it's guaranteed to be valid.
332 icq = rcu_dereference(ioc->icq_hint);
333 if (icq && icq->q == q)
336 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
337 if (icq && icq->q == q)
338 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
345 EXPORT_SYMBOL(ioc_lookup_icq);
348 * ioc_create_icq - create and link io_cq
349 * @ioc: io_context of interest
350 * @q: request_queue of interest
351 * @gfp_mask: allocation mask
353 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
354 * will be created using @gfp_mask.
356 * The caller is responsible for ensuring @ioc won't go away and @q is
357 * alive and will stay alive until this function returns.
359 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
362 struct elevator_type *et = q->elevator->type;
366 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
371 if (radix_tree_maybe_preload(gfp_mask) < 0) {
372 kmem_cache_free(et->icq_cache, icq);
378 INIT_LIST_HEAD(&icq->q_node);
379 INIT_HLIST_NODE(&icq->ioc_node);
381 /* lock both q and ioc and try to link @icq */
382 spin_lock_irq(q->queue_lock);
383 spin_lock(&ioc->lock);
385 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
386 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
387 list_add(&icq->q_node, &q->icq_list);
388 if (et->uses_mq && et->ops.mq.init_icq)
389 et->ops.mq.init_icq(icq);
390 else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
391 et->ops.sq.elevator_init_icq_fn(icq);
393 kmem_cache_free(et->icq_cache, icq);
394 icq = ioc_lookup_icq(ioc, q);
396 printk(KERN_ERR "cfq: icq link failed!\n");
399 spin_unlock(&ioc->lock);
400 spin_unlock_irq(q->queue_lock);
401 radix_tree_preload_end();
405 static int __init blk_ioc_init(void)
407 iocontext_cachep = kmem_cache_create("blkdev_ioc",
408 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
411 subsys_initcall(blk_ioc_init);