]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/btrfs/async-thread.c
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[karo-tx-linux.git] / fs / btrfs / async-thread.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  * Copyright (C) 2014 Fujitsu.  All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include "async-thread.h"
26 #include "ctree.h"
27
28 #define WORK_DONE_BIT 0
29 #define WORK_ORDER_DONE_BIT 1
30 #define WORK_HIGH_PRIO_BIT 2
31
32 #define NO_THRESHOLD (-1)
33 #define DFT_THRESHOLD (32)
34
35 struct __btrfs_workqueue {
36         struct workqueue_struct *normal_wq;
37
38         /* File system this workqueue services */
39         struct btrfs_fs_info *fs_info;
40
41         /* List head pointing to ordered work list */
42         struct list_head ordered_list;
43
44         /* Spinlock for ordered_list */
45         spinlock_t list_lock;
46
47         /* Thresholding related variants */
48         atomic_t pending;
49
50         /* Up limit of concurrency workers */
51         int limit_active;
52
53         /* Current number of concurrency workers */
54         int current_active;
55
56         /* Threshold to change current_active */
57         int thresh;
58         unsigned int count;
59         spinlock_t thres_lock;
60 };
61
62 struct btrfs_workqueue {
63         struct __btrfs_workqueue *normal;
64         struct __btrfs_workqueue *high;
65 };
66
67 static void normal_work_helper(struct btrfs_work *work);
68
69 #define BTRFS_WORK_HELPER(name)                                 \
70 void btrfs_##name(struct work_struct *arg)                              \
71 {                                                                       \
72         struct btrfs_work *work = container_of(arg, struct btrfs_work,  \
73                                                normal_work);            \
74         normal_work_helper(work);                                       \
75 }
76
77 struct btrfs_fs_info *
78 btrfs_workqueue_owner(struct __btrfs_workqueue *wq)
79 {
80         return wq->fs_info;
81 }
82
83 struct btrfs_fs_info *
84 btrfs_work_owner(struct btrfs_work *work)
85 {
86         return work->wq->fs_info;
87 }
88
89 bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
90 {
91         /*
92          * We could compare wq->normal->pending with num_online_cpus()
93          * to support "thresh == NO_THRESHOLD" case, but it requires
94          * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
95          * postpone it until someone needs the support of that case.
96          */
97         if (wq->normal->thresh == NO_THRESHOLD)
98                 return false;
99
100         return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
101 }
102
103 BTRFS_WORK_HELPER(worker_helper);
104 BTRFS_WORK_HELPER(delalloc_helper);
105 BTRFS_WORK_HELPER(flush_delalloc_helper);
106 BTRFS_WORK_HELPER(cache_helper);
107 BTRFS_WORK_HELPER(submit_helper);
108 BTRFS_WORK_HELPER(fixup_helper);
109 BTRFS_WORK_HELPER(endio_helper);
110 BTRFS_WORK_HELPER(endio_meta_helper);
111 BTRFS_WORK_HELPER(endio_meta_write_helper);
112 BTRFS_WORK_HELPER(endio_raid56_helper);
113 BTRFS_WORK_HELPER(endio_repair_helper);
114 BTRFS_WORK_HELPER(rmw_helper);
115 BTRFS_WORK_HELPER(endio_write_helper);
116 BTRFS_WORK_HELPER(freespace_write_helper);
117 BTRFS_WORK_HELPER(delayed_meta_helper);
118 BTRFS_WORK_HELPER(readahead_helper);
119 BTRFS_WORK_HELPER(qgroup_rescan_helper);
120 BTRFS_WORK_HELPER(extent_refs_helper);
121 BTRFS_WORK_HELPER(scrub_helper);
122 BTRFS_WORK_HELPER(scrubwrc_helper);
123 BTRFS_WORK_HELPER(scrubnc_helper);
124 BTRFS_WORK_HELPER(scrubparity_helper);
125
126 static struct __btrfs_workqueue *
127 __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
128                         unsigned int flags, int limit_active, int thresh)
129 {
130         struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
131
132         if (!ret)
133                 return NULL;
134
135         ret->fs_info = fs_info;
136         ret->limit_active = limit_active;
137         atomic_set(&ret->pending, 0);
138         if (thresh == 0)
139                 thresh = DFT_THRESHOLD;
140         /* For low threshold, disabling threshold is a better choice */
141         if (thresh < DFT_THRESHOLD) {
142                 ret->current_active = limit_active;
143                 ret->thresh = NO_THRESHOLD;
144         } else {
145                 /*
146                  * For threshold-able wq, let its concurrency grow on demand.
147                  * Use minimal max_active at alloc time to reduce resource
148                  * usage.
149                  */
150                 ret->current_active = 1;
151                 ret->thresh = thresh;
152         }
153
154         if (flags & WQ_HIGHPRI)
155                 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
156                                                  ret->current_active, "btrfs",
157                                                  name);
158         else
159                 ret->normal_wq = alloc_workqueue("%s-%s", flags,
160                                                  ret->current_active, "btrfs",
161                                                  name);
162         if (!ret->normal_wq) {
163                 kfree(ret);
164                 return NULL;
165         }
166
167         INIT_LIST_HEAD(&ret->ordered_list);
168         spin_lock_init(&ret->list_lock);
169         spin_lock_init(&ret->thres_lock);
170         trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
171         return ret;
172 }
173
174 static inline void
175 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
176
177 struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
178                                               const char *name,
179                                               unsigned int flags,
180                                               int limit_active,
181                                               int thresh)
182 {
183         struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
184
185         if (!ret)
186                 return NULL;
187
188         ret->normal = __btrfs_alloc_workqueue(fs_info, name,
189                                               flags & ~WQ_HIGHPRI,
190                                               limit_active, thresh);
191         if (!ret->normal) {
192                 kfree(ret);
193                 return NULL;
194         }
195
196         if (flags & WQ_HIGHPRI) {
197                 ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
198                                                     limit_active, thresh);
199                 if (!ret->high) {
200                         __btrfs_destroy_workqueue(ret->normal);
201                         kfree(ret);
202                         return NULL;
203                 }
204         }
205         return ret;
206 }
207
208 /*
209  * Hook for threshold which will be called in btrfs_queue_work.
210  * This hook WILL be called in IRQ handler context,
211  * so workqueue_set_max_active MUST NOT be called in this hook
212  */
213 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
214 {
215         if (wq->thresh == NO_THRESHOLD)
216                 return;
217         atomic_inc(&wq->pending);
218 }
219
220 /*
221  * Hook for threshold which will be called before executing the work,
222  * This hook is called in kthread content.
223  * So workqueue_set_max_active is called here.
224  */
225 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
226 {
227         int new_current_active;
228         long pending;
229         int need_change = 0;
230
231         if (wq->thresh == NO_THRESHOLD)
232                 return;
233
234         atomic_dec(&wq->pending);
235         spin_lock(&wq->thres_lock);
236         /*
237          * Use wq->count to limit the calling frequency of
238          * workqueue_set_max_active.
239          */
240         wq->count++;
241         wq->count %= (wq->thresh / 4);
242         if (!wq->count)
243                 goto  out;
244         new_current_active = wq->current_active;
245
246         /*
247          * pending may be changed later, but it's OK since we really
248          * don't need it so accurate to calculate new_max_active.
249          */
250         pending = atomic_read(&wq->pending);
251         if (pending > wq->thresh)
252                 new_current_active++;
253         if (pending < wq->thresh / 2)
254                 new_current_active--;
255         new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
256         if (new_current_active != wq->current_active)  {
257                 need_change = 1;
258                 wq->current_active = new_current_active;
259         }
260 out:
261         spin_unlock(&wq->thres_lock);
262
263         if (need_change) {
264                 workqueue_set_max_active(wq->normal_wq, wq->current_active);
265         }
266 }
267
268 static void run_ordered_work(struct __btrfs_workqueue *wq)
269 {
270         struct list_head *list = &wq->ordered_list;
271         struct btrfs_work *work;
272         spinlock_t *lock = &wq->list_lock;
273         unsigned long flags;
274
275         while (1) {
276                 void *wtag;
277
278                 spin_lock_irqsave(lock, flags);
279                 if (list_empty(list))
280                         break;
281                 work = list_entry(list->next, struct btrfs_work,
282                                   ordered_list);
283                 if (!test_bit(WORK_DONE_BIT, &work->flags))
284                         break;
285
286                 /*
287                  * we are going to call the ordered done function, but
288                  * we leave the work item on the list as a barrier so
289                  * that later work items that are done don't have their
290                  * functions called before this one returns
291                  */
292                 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
293                         break;
294                 trace_btrfs_ordered_sched(work);
295                 spin_unlock_irqrestore(lock, flags);
296                 work->ordered_func(work);
297
298                 /* now take the lock again and drop our item from the list */
299                 spin_lock_irqsave(lock, flags);
300                 list_del(&work->ordered_list);
301                 spin_unlock_irqrestore(lock, flags);
302
303                 /*
304                  * We don't want to call the ordered free functions with the
305                  * lock held though. Save the work as tag for the trace event,
306                  * because the callback could free the structure.
307                  */
308                 wtag = work;
309                 work->ordered_free(work);
310                 trace_btrfs_all_work_done(wq->fs_info, wtag);
311         }
312         spin_unlock_irqrestore(lock, flags);
313 }
314
315 static void normal_work_helper(struct btrfs_work *work)
316 {
317         struct __btrfs_workqueue *wq;
318         void *wtag;
319         int need_order = 0;
320
321         /*
322          * We should not touch things inside work in the following cases:
323          * 1) after work->func() if it has no ordered_free
324          *    Since the struct is freed in work->func().
325          * 2) after setting WORK_DONE_BIT
326          *    The work may be freed in other threads almost instantly.
327          * So we save the needed things here.
328          */
329         if (work->ordered_func)
330                 need_order = 1;
331         wq = work->wq;
332         /* Safe for tracepoints in case work gets freed by the callback */
333         wtag = work;
334
335         trace_btrfs_work_sched(work);
336         thresh_exec_hook(wq);
337         work->func(work);
338         if (need_order) {
339                 set_bit(WORK_DONE_BIT, &work->flags);
340                 run_ordered_work(wq);
341         }
342         if (!need_order)
343                 trace_btrfs_all_work_done(wq->fs_info, wtag);
344 }
345
346 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
347                      btrfs_func_t func,
348                      btrfs_func_t ordered_func,
349                      btrfs_func_t ordered_free)
350 {
351         work->func = func;
352         work->ordered_func = ordered_func;
353         work->ordered_free = ordered_free;
354         INIT_WORK(&work->normal_work, uniq_func);
355         INIT_LIST_HEAD(&work->ordered_list);
356         work->flags = 0;
357 }
358
359 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
360                                       struct btrfs_work *work)
361 {
362         unsigned long flags;
363
364         work->wq = wq;
365         thresh_queue_hook(wq);
366         if (work->ordered_func) {
367                 spin_lock_irqsave(&wq->list_lock, flags);
368                 list_add_tail(&work->ordered_list, &wq->ordered_list);
369                 spin_unlock_irqrestore(&wq->list_lock, flags);
370         }
371         trace_btrfs_work_queued(work);
372         queue_work(wq->normal_wq, &work->normal_work);
373 }
374
375 void btrfs_queue_work(struct btrfs_workqueue *wq,
376                       struct btrfs_work *work)
377 {
378         struct __btrfs_workqueue *dest_wq;
379
380         if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
381                 dest_wq = wq->high;
382         else
383                 dest_wq = wq->normal;
384         __btrfs_queue_work(dest_wq, work);
385 }
386
387 static inline void
388 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
389 {
390         destroy_workqueue(wq->normal_wq);
391         trace_btrfs_workqueue_destroy(wq);
392         kfree(wq);
393 }
394
395 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
396 {
397         if (!wq)
398                 return;
399         if (wq->high)
400                 __btrfs_destroy_workqueue(wq->high);
401         __btrfs_destroy_workqueue(wq->normal);
402         kfree(wq);
403 }
404
405 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
406 {
407         if (!wq)
408                 return;
409         wq->normal->limit_active = limit_active;
410         if (wq->high)
411                 wq->high->limit_active = limit_active;
412 }
413
414 void btrfs_set_work_high_priority(struct btrfs_work *work)
415 {
416         set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
417 }