]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/btrfs/qgroup.c
arch/include: remove empty Kbuild files
[karo-tx-linux.git] / fs / btrfs / qgroup.c
1 /*
2  * Copyright (C) 2011 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
27
28 #include "ctree.h"
29 #include "transaction.h"
30 #include "disk-io.h"
31 #include "locking.h"
32 #include "ulist.h"
33 #include "backref.h"
34 #include "extent_io.h"
35 #include "qgroup.h"
36
37
38 /* TODO XXX FIXME
39  *  - subvol delete -> delete when ref goes to 0? delete limits also?
40  *  - reorganize keys
41  *  - compressed
42  *  - sync
43  *  - copy also limits on subvol creation
44  *  - limit
45  *  - caches fuer ulists
46  *  - performance benchmarks
47  *  - check all ioctl parameters
48  */
49
50 /*
51  * one struct for each qgroup, organized in fs_info->qgroup_tree.
52  */
53 struct btrfs_qgroup {
54         u64 qgroupid;
55
56         /*
57          * state
58          */
59         u64 rfer;       /* referenced */
60         u64 rfer_cmpr;  /* referenced compressed */
61         u64 excl;       /* exclusive */
62         u64 excl_cmpr;  /* exclusive compressed */
63
64         /*
65          * limits
66          */
67         u64 lim_flags;  /* which limits are set */
68         u64 max_rfer;
69         u64 max_excl;
70         u64 rsv_rfer;
71         u64 rsv_excl;
72
73         /*
74          * reservation tracking
75          */
76         u64 reserved;
77
78         /*
79          * lists
80          */
81         struct list_head groups;  /* groups this group is member of */
82         struct list_head members; /* groups that are members of this group */
83         struct list_head dirty;   /* dirty groups */
84         struct rb_node node;      /* tree of qgroups */
85
86         /*
87          * temp variables for accounting operations
88          * Refer to qgroup_shared_accounting() for details.
89          */
90         u64 old_refcnt;
91         u64 new_refcnt;
92 };
93
94 static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
95                                            int mod)
96 {
97         if (qg->old_refcnt < seq)
98                 qg->old_refcnt = seq;
99         qg->old_refcnt += mod;
100 }
101
102 static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
103                                            int mod)
104 {
105         if (qg->new_refcnt < seq)
106                 qg->new_refcnt = seq;
107         qg->new_refcnt += mod;
108 }
109
110 static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
111 {
112         if (qg->old_refcnt < seq)
113                 return 0;
114         return qg->old_refcnt - seq;
115 }
116
117 static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
118 {
119         if (qg->new_refcnt < seq)
120                 return 0;
121         return qg->new_refcnt - seq;
122 }
123
124 /*
125  * glue structure to represent the relations between qgroups.
126  */
127 struct btrfs_qgroup_list {
128         struct list_head next_group;
129         struct list_head next_member;
130         struct btrfs_qgroup *group;
131         struct btrfs_qgroup *member;
132 };
133
134 static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
135 {
136         return (u64)(uintptr_t)qg;
137 }
138
139 static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
140 {
141         return (struct btrfs_qgroup *)(uintptr_t)n->aux;
142 }
143
144 static int
145 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
146                    int init_flags);
147 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
148
149 /* must be called with qgroup_ioctl_lock held */
150 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
151                                            u64 qgroupid)
152 {
153         struct rb_node *n = fs_info->qgroup_tree.rb_node;
154         struct btrfs_qgroup *qgroup;
155
156         while (n) {
157                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
158                 if (qgroup->qgroupid < qgroupid)
159                         n = n->rb_left;
160                 else if (qgroup->qgroupid > qgroupid)
161                         n = n->rb_right;
162                 else
163                         return qgroup;
164         }
165         return NULL;
166 }
167
168 /* must be called with qgroup_lock held */
169 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
170                                           u64 qgroupid)
171 {
172         struct rb_node **p = &fs_info->qgroup_tree.rb_node;
173         struct rb_node *parent = NULL;
174         struct btrfs_qgroup *qgroup;
175
176         while (*p) {
177                 parent = *p;
178                 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
179
180                 if (qgroup->qgroupid < qgroupid)
181                         p = &(*p)->rb_left;
182                 else if (qgroup->qgroupid > qgroupid)
183                         p = &(*p)->rb_right;
184                 else
185                         return qgroup;
186         }
187
188         qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
189         if (!qgroup)
190                 return ERR_PTR(-ENOMEM);
191
192         qgroup->qgroupid = qgroupid;
193         INIT_LIST_HEAD(&qgroup->groups);
194         INIT_LIST_HEAD(&qgroup->members);
195         INIT_LIST_HEAD(&qgroup->dirty);
196
197         rb_link_node(&qgroup->node, parent, p);
198         rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
199
200         return qgroup;
201 }
202
203 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
204 {
205         struct btrfs_qgroup_list *list;
206
207         list_del(&qgroup->dirty);
208         while (!list_empty(&qgroup->groups)) {
209                 list = list_first_entry(&qgroup->groups,
210                                         struct btrfs_qgroup_list, next_group);
211                 list_del(&list->next_group);
212                 list_del(&list->next_member);
213                 kfree(list);
214         }
215
216         while (!list_empty(&qgroup->members)) {
217                 list = list_first_entry(&qgroup->members,
218                                         struct btrfs_qgroup_list, next_member);
219                 list_del(&list->next_group);
220                 list_del(&list->next_member);
221                 kfree(list);
222         }
223         kfree(qgroup);
224 }
225
226 /* must be called with qgroup_lock held */
227 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
228 {
229         struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
230
231         if (!qgroup)
232                 return -ENOENT;
233
234         rb_erase(&qgroup->node, &fs_info->qgroup_tree);
235         __del_qgroup_rb(qgroup);
236         return 0;
237 }
238
239 /* must be called with qgroup_lock held */
240 static int add_relation_rb(struct btrfs_fs_info *fs_info,
241                            u64 memberid, u64 parentid)
242 {
243         struct btrfs_qgroup *member;
244         struct btrfs_qgroup *parent;
245         struct btrfs_qgroup_list *list;
246
247         member = find_qgroup_rb(fs_info, memberid);
248         parent = find_qgroup_rb(fs_info, parentid);
249         if (!member || !parent)
250                 return -ENOENT;
251
252         list = kzalloc(sizeof(*list), GFP_ATOMIC);
253         if (!list)
254                 return -ENOMEM;
255
256         list->group = parent;
257         list->member = member;
258         list_add_tail(&list->next_group, &member->groups);
259         list_add_tail(&list->next_member, &parent->members);
260
261         return 0;
262 }
263
264 /* must be called with qgroup_lock held */
265 static int del_relation_rb(struct btrfs_fs_info *fs_info,
266                            u64 memberid, u64 parentid)
267 {
268         struct btrfs_qgroup *member;
269         struct btrfs_qgroup *parent;
270         struct btrfs_qgroup_list *list;
271
272         member = find_qgroup_rb(fs_info, memberid);
273         parent = find_qgroup_rb(fs_info, parentid);
274         if (!member || !parent)
275                 return -ENOENT;
276
277         list_for_each_entry(list, &member->groups, next_group) {
278                 if (list->group == parent) {
279                         list_del(&list->next_group);
280                         list_del(&list->next_member);
281                         kfree(list);
282                         return 0;
283                 }
284         }
285         return -ENOENT;
286 }
287
288 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
289 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
290                                u64 rfer, u64 excl)
291 {
292         struct btrfs_qgroup *qgroup;
293
294         qgroup = find_qgroup_rb(fs_info, qgroupid);
295         if (!qgroup)
296                 return -EINVAL;
297         if (qgroup->rfer != rfer || qgroup->excl != excl)
298                 return -EINVAL;
299         return 0;
300 }
301 #endif
302
303 /*
304  * The full config is read in one go, only called from open_ctree()
305  * It doesn't use any locking, as at this point we're still single-threaded
306  */
307 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
308 {
309         struct btrfs_key key;
310         struct btrfs_key found_key;
311         struct btrfs_root *quota_root = fs_info->quota_root;
312         struct btrfs_path *path = NULL;
313         struct extent_buffer *l;
314         int slot;
315         int ret = 0;
316         u64 flags = 0;
317         u64 rescan_progress = 0;
318
319         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
320                 return 0;
321
322         fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
323         if (!fs_info->qgroup_ulist) {
324                 ret = -ENOMEM;
325                 goto out;
326         }
327
328         path = btrfs_alloc_path();
329         if (!path) {
330                 ret = -ENOMEM;
331                 goto out;
332         }
333
334         /* default this to quota off, in case no status key is found */
335         fs_info->qgroup_flags = 0;
336
337         /*
338          * pass 1: read status, all qgroup infos and limits
339          */
340         key.objectid = 0;
341         key.type = 0;
342         key.offset = 0;
343         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
344         if (ret)
345                 goto out;
346
347         while (1) {
348                 struct btrfs_qgroup *qgroup;
349
350                 slot = path->slots[0];
351                 l = path->nodes[0];
352                 btrfs_item_key_to_cpu(l, &found_key, slot);
353
354                 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
355                         struct btrfs_qgroup_status_item *ptr;
356
357                         ptr = btrfs_item_ptr(l, slot,
358                                              struct btrfs_qgroup_status_item);
359
360                         if (btrfs_qgroup_status_version(l, ptr) !=
361                             BTRFS_QGROUP_STATUS_VERSION) {
362                                 btrfs_err(fs_info,
363                                  "old qgroup version, quota disabled");
364                                 goto out;
365                         }
366                         if (btrfs_qgroup_status_generation(l, ptr) !=
367                             fs_info->generation) {
368                                 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
369                                 btrfs_err(fs_info,
370                                         "qgroup generation mismatch, marked as inconsistent");
371                         }
372                         fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
373                                                                           ptr);
374                         rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
375                         goto next1;
376                 }
377
378                 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
379                     found_key.type != BTRFS_QGROUP_LIMIT_KEY)
380                         goto next1;
381
382                 qgroup = find_qgroup_rb(fs_info, found_key.offset);
383                 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
384                     (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
385                         btrfs_err(fs_info, "inconsistent qgroup config");
386                         flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
387                 }
388                 if (!qgroup) {
389                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
390                         if (IS_ERR(qgroup)) {
391                                 ret = PTR_ERR(qgroup);
392                                 goto out;
393                         }
394                 }
395                 switch (found_key.type) {
396                 case BTRFS_QGROUP_INFO_KEY: {
397                         struct btrfs_qgroup_info_item *ptr;
398
399                         ptr = btrfs_item_ptr(l, slot,
400                                              struct btrfs_qgroup_info_item);
401                         qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
402                         qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
403                         qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
404                         qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
405                         /* generation currently unused */
406                         break;
407                 }
408                 case BTRFS_QGROUP_LIMIT_KEY: {
409                         struct btrfs_qgroup_limit_item *ptr;
410
411                         ptr = btrfs_item_ptr(l, slot,
412                                              struct btrfs_qgroup_limit_item);
413                         qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
414                         qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
415                         qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
416                         qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
417                         qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
418                         break;
419                 }
420                 }
421 next1:
422                 ret = btrfs_next_item(quota_root, path);
423                 if (ret < 0)
424                         goto out;
425                 if (ret)
426                         break;
427         }
428         btrfs_release_path(path);
429
430         /*
431          * pass 2: read all qgroup relations
432          */
433         key.objectid = 0;
434         key.type = BTRFS_QGROUP_RELATION_KEY;
435         key.offset = 0;
436         ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
437         if (ret)
438                 goto out;
439         while (1) {
440                 slot = path->slots[0];
441                 l = path->nodes[0];
442                 btrfs_item_key_to_cpu(l, &found_key, slot);
443
444                 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
445                         goto next2;
446
447                 if (found_key.objectid > found_key.offset) {
448                         /* parent <- member, not needed to build config */
449                         /* FIXME should we omit the key completely? */
450                         goto next2;
451                 }
452
453                 ret = add_relation_rb(fs_info, found_key.objectid,
454                                       found_key.offset);
455                 if (ret == -ENOENT) {
456                         btrfs_warn(fs_info,
457                                 "orphan qgroup relation 0x%llx->0x%llx",
458                                 found_key.objectid, found_key.offset);
459                         ret = 0;        /* ignore the error */
460                 }
461                 if (ret)
462                         goto out;
463 next2:
464                 ret = btrfs_next_item(quota_root, path);
465                 if (ret < 0)
466                         goto out;
467                 if (ret)
468                         break;
469         }
470 out:
471         fs_info->qgroup_flags |= flags;
472         if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
473                 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
474         else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
475                  ret >= 0)
476                 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
477         btrfs_free_path(path);
478
479         if (ret < 0) {
480                 ulist_free(fs_info->qgroup_ulist);
481                 fs_info->qgroup_ulist = NULL;
482                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
483         }
484
485         return ret < 0 ? ret : 0;
486 }
487
488 /*
489  * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
490  * first two are in single-threaded paths.And for the third one, we have set
491  * quota_root to be null with qgroup_lock held before, so it is safe to clean
492  * up the in-memory structures without qgroup_lock held.
493  */
494 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
495 {
496         struct rb_node *n;
497         struct btrfs_qgroup *qgroup;
498
499         while ((n = rb_first(&fs_info->qgroup_tree))) {
500                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
501                 rb_erase(n, &fs_info->qgroup_tree);
502                 __del_qgroup_rb(qgroup);
503         }
504         /*
505          * we call btrfs_free_qgroup_config() when umounting
506          * filesystem and disabling quota, so we set qgroup_ulist
507          * to be null here to avoid double free.
508          */
509         ulist_free(fs_info->qgroup_ulist);
510         fs_info->qgroup_ulist = NULL;
511 }
512
513 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
514                                     struct btrfs_root *quota_root,
515                                     u64 src, u64 dst)
516 {
517         int ret;
518         struct btrfs_path *path;
519         struct btrfs_key key;
520
521         path = btrfs_alloc_path();
522         if (!path)
523                 return -ENOMEM;
524
525         key.objectid = src;
526         key.type = BTRFS_QGROUP_RELATION_KEY;
527         key.offset = dst;
528
529         ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
530
531         btrfs_mark_buffer_dirty(path->nodes[0]);
532
533         btrfs_free_path(path);
534         return ret;
535 }
536
537 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
538                                     struct btrfs_root *quota_root,
539                                     u64 src, u64 dst)
540 {
541         int ret;
542         struct btrfs_path *path;
543         struct btrfs_key key;
544
545         path = btrfs_alloc_path();
546         if (!path)
547                 return -ENOMEM;
548
549         key.objectid = src;
550         key.type = BTRFS_QGROUP_RELATION_KEY;
551         key.offset = dst;
552
553         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
554         if (ret < 0)
555                 goto out;
556
557         if (ret > 0) {
558                 ret = -ENOENT;
559                 goto out;
560         }
561
562         ret = btrfs_del_item(trans, quota_root, path);
563 out:
564         btrfs_free_path(path);
565         return ret;
566 }
567
568 static int add_qgroup_item(struct btrfs_trans_handle *trans,
569                            struct btrfs_root *quota_root, u64 qgroupid)
570 {
571         int ret;
572         struct btrfs_path *path;
573         struct btrfs_qgroup_info_item *qgroup_info;
574         struct btrfs_qgroup_limit_item *qgroup_limit;
575         struct extent_buffer *leaf;
576         struct btrfs_key key;
577
578         if (btrfs_is_testing(quota_root->fs_info))
579                 return 0;
580
581         path = btrfs_alloc_path();
582         if (!path)
583                 return -ENOMEM;
584
585         key.objectid = 0;
586         key.type = BTRFS_QGROUP_INFO_KEY;
587         key.offset = qgroupid;
588
589         /*
590          * Avoid a transaction abort by catching -EEXIST here. In that
591          * case, we proceed by re-initializing the existing structure
592          * on disk.
593          */
594
595         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
596                                       sizeof(*qgroup_info));
597         if (ret && ret != -EEXIST)
598                 goto out;
599
600         leaf = path->nodes[0];
601         qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
602                                  struct btrfs_qgroup_info_item);
603         btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
604         btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
605         btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
606         btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
607         btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
608
609         btrfs_mark_buffer_dirty(leaf);
610
611         btrfs_release_path(path);
612
613         key.type = BTRFS_QGROUP_LIMIT_KEY;
614         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
615                                       sizeof(*qgroup_limit));
616         if (ret && ret != -EEXIST)
617                 goto out;
618
619         leaf = path->nodes[0];
620         qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
621                                   struct btrfs_qgroup_limit_item);
622         btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
623         btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
624         btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
625         btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
626         btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
627
628         btrfs_mark_buffer_dirty(leaf);
629
630         ret = 0;
631 out:
632         btrfs_free_path(path);
633         return ret;
634 }
635
636 static int del_qgroup_item(struct btrfs_trans_handle *trans,
637                            struct btrfs_root *quota_root, u64 qgroupid)
638 {
639         int ret;
640         struct btrfs_path *path;
641         struct btrfs_key key;
642
643         path = btrfs_alloc_path();
644         if (!path)
645                 return -ENOMEM;
646
647         key.objectid = 0;
648         key.type = BTRFS_QGROUP_INFO_KEY;
649         key.offset = qgroupid;
650         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
651         if (ret < 0)
652                 goto out;
653
654         if (ret > 0) {
655                 ret = -ENOENT;
656                 goto out;
657         }
658
659         ret = btrfs_del_item(trans, quota_root, path);
660         if (ret)
661                 goto out;
662
663         btrfs_release_path(path);
664
665         key.type = BTRFS_QGROUP_LIMIT_KEY;
666         ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
667         if (ret < 0)
668                 goto out;
669
670         if (ret > 0) {
671                 ret = -ENOENT;
672                 goto out;
673         }
674
675         ret = btrfs_del_item(trans, quota_root, path);
676
677 out:
678         btrfs_free_path(path);
679         return ret;
680 }
681
682 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
683                                     struct btrfs_root *root,
684                                     struct btrfs_qgroup *qgroup)
685 {
686         struct btrfs_path *path;
687         struct btrfs_key key;
688         struct extent_buffer *l;
689         struct btrfs_qgroup_limit_item *qgroup_limit;
690         int ret;
691         int slot;
692
693         key.objectid = 0;
694         key.type = BTRFS_QGROUP_LIMIT_KEY;
695         key.offset = qgroup->qgroupid;
696
697         path = btrfs_alloc_path();
698         if (!path)
699                 return -ENOMEM;
700
701         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
702         if (ret > 0)
703                 ret = -ENOENT;
704
705         if (ret)
706                 goto out;
707
708         l = path->nodes[0];
709         slot = path->slots[0];
710         qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
711         btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
712         btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
713         btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
714         btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
715         btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
716
717         btrfs_mark_buffer_dirty(l);
718
719 out:
720         btrfs_free_path(path);
721         return ret;
722 }
723
724 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
725                                    struct btrfs_root *root,
726                                    struct btrfs_qgroup *qgroup)
727 {
728         struct btrfs_path *path;
729         struct btrfs_key key;
730         struct extent_buffer *l;
731         struct btrfs_qgroup_info_item *qgroup_info;
732         int ret;
733         int slot;
734
735         if (btrfs_is_testing(root->fs_info))
736                 return 0;
737
738         key.objectid = 0;
739         key.type = BTRFS_QGROUP_INFO_KEY;
740         key.offset = qgroup->qgroupid;
741
742         path = btrfs_alloc_path();
743         if (!path)
744                 return -ENOMEM;
745
746         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
747         if (ret > 0)
748                 ret = -ENOENT;
749
750         if (ret)
751                 goto out;
752
753         l = path->nodes[0];
754         slot = path->slots[0];
755         qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
756         btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
757         btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
758         btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
759         btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
760         btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
761
762         btrfs_mark_buffer_dirty(l);
763
764 out:
765         btrfs_free_path(path);
766         return ret;
767 }
768
769 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
770                                      struct btrfs_fs_info *fs_info,
771                                     struct btrfs_root *root)
772 {
773         struct btrfs_path *path;
774         struct btrfs_key key;
775         struct extent_buffer *l;
776         struct btrfs_qgroup_status_item *ptr;
777         int ret;
778         int slot;
779
780         key.objectid = 0;
781         key.type = BTRFS_QGROUP_STATUS_KEY;
782         key.offset = 0;
783
784         path = btrfs_alloc_path();
785         if (!path)
786                 return -ENOMEM;
787
788         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
789         if (ret > 0)
790                 ret = -ENOENT;
791
792         if (ret)
793                 goto out;
794
795         l = path->nodes[0];
796         slot = path->slots[0];
797         ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
798         btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
799         btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
800         btrfs_set_qgroup_status_rescan(l, ptr,
801                                 fs_info->qgroup_rescan_progress.objectid);
802
803         btrfs_mark_buffer_dirty(l);
804
805 out:
806         btrfs_free_path(path);
807         return ret;
808 }
809
810 /*
811  * called with qgroup_lock held
812  */
813 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
814                                   struct btrfs_root *root)
815 {
816         struct btrfs_path *path;
817         struct btrfs_key key;
818         struct extent_buffer *leaf = NULL;
819         int ret;
820         int nr = 0;
821
822         path = btrfs_alloc_path();
823         if (!path)
824                 return -ENOMEM;
825
826         path->leave_spinning = 1;
827
828         key.objectid = 0;
829         key.offset = 0;
830         key.type = 0;
831
832         while (1) {
833                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
834                 if (ret < 0)
835                         goto out;
836                 leaf = path->nodes[0];
837                 nr = btrfs_header_nritems(leaf);
838                 if (!nr)
839                         break;
840                 /*
841                  * delete the leaf one by one
842                  * since the whole tree is going
843                  * to be deleted.
844                  */
845                 path->slots[0] = 0;
846                 ret = btrfs_del_items(trans, root, path, 0, nr);
847                 if (ret)
848                         goto out;
849
850                 btrfs_release_path(path);
851         }
852         ret = 0;
853 out:
854         set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags);
855         btrfs_free_path(path);
856         return ret;
857 }
858
859 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
860                        struct btrfs_fs_info *fs_info)
861 {
862         struct btrfs_root *quota_root;
863         struct btrfs_root *tree_root = fs_info->tree_root;
864         struct btrfs_path *path = NULL;
865         struct btrfs_qgroup_status_item *ptr;
866         struct extent_buffer *leaf;
867         struct btrfs_key key;
868         struct btrfs_key found_key;
869         struct btrfs_qgroup *qgroup = NULL;
870         int ret = 0;
871         int slot;
872
873         mutex_lock(&fs_info->qgroup_ioctl_lock);
874         if (fs_info->quota_root) {
875                 set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
876                 goto out;
877         }
878
879         fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
880         if (!fs_info->qgroup_ulist) {
881                 ret = -ENOMEM;
882                 goto out;
883         }
884
885         /*
886          * initially create the quota tree
887          */
888         quota_root = btrfs_create_tree(trans, fs_info,
889                                        BTRFS_QUOTA_TREE_OBJECTID);
890         if (IS_ERR(quota_root)) {
891                 ret =  PTR_ERR(quota_root);
892                 goto out;
893         }
894
895         path = btrfs_alloc_path();
896         if (!path) {
897                 ret = -ENOMEM;
898                 goto out_free_root;
899         }
900
901         key.objectid = 0;
902         key.type = BTRFS_QGROUP_STATUS_KEY;
903         key.offset = 0;
904
905         ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
906                                       sizeof(*ptr));
907         if (ret)
908                 goto out_free_path;
909
910         leaf = path->nodes[0];
911         ptr = btrfs_item_ptr(leaf, path->slots[0],
912                                  struct btrfs_qgroup_status_item);
913         btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
914         btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
915         fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
916                                 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
917         btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
918         btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
919
920         btrfs_mark_buffer_dirty(leaf);
921
922         key.objectid = 0;
923         key.type = BTRFS_ROOT_REF_KEY;
924         key.offset = 0;
925
926         btrfs_release_path(path);
927         ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
928         if (ret > 0)
929                 goto out_add_root;
930         if (ret < 0)
931                 goto out_free_path;
932
933
934         while (1) {
935                 slot = path->slots[0];
936                 leaf = path->nodes[0];
937                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
938
939                 if (found_key.type == BTRFS_ROOT_REF_KEY) {
940                         ret = add_qgroup_item(trans, quota_root,
941                                               found_key.offset);
942                         if (ret)
943                                 goto out_free_path;
944
945                         qgroup = add_qgroup_rb(fs_info, found_key.offset);
946                         if (IS_ERR(qgroup)) {
947                                 ret = PTR_ERR(qgroup);
948                                 goto out_free_path;
949                         }
950                 }
951                 ret = btrfs_next_item(tree_root, path);
952                 if (ret < 0)
953                         goto out_free_path;
954                 if (ret)
955                         break;
956         }
957
958 out_add_root:
959         btrfs_release_path(path);
960         ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
961         if (ret)
962                 goto out_free_path;
963
964         qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
965         if (IS_ERR(qgroup)) {
966                 ret = PTR_ERR(qgroup);
967                 goto out_free_path;
968         }
969         spin_lock(&fs_info->qgroup_lock);
970         fs_info->quota_root = quota_root;
971         set_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags);
972         spin_unlock(&fs_info->qgroup_lock);
973 out_free_path:
974         btrfs_free_path(path);
975 out_free_root:
976         if (ret) {
977                 free_extent_buffer(quota_root->node);
978                 free_extent_buffer(quota_root->commit_root);
979                 kfree(quota_root);
980         }
981 out:
982         if (ret) {
983                 ulist_free(fs_info->qgroup_ulist);
984                 fs_info->qgroup_ulist = NULL;
985         }
986         mutex_unlock(&fs_info->qgroup_ioctl_lock);
987         return ret;
988 }
989
990 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
991                         struct btrfs_fs_info *fs_info)
992 {
993         struct btrfs_root *tree_root = fs_info->tree_root;
994         struct btrfs_root *quota_root;
995         int ret = 0;
996
997         mutex_lock(&fs_info->qgroup_ioctl_lock);
998         if (!fs_info->quota_root)
999                 goto out;
1000         clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1001         set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags);
1002         btrfs_qgroup_wait_for_completion(fs_info, false);
1003         spin_lock(&fs_info->qgroup_lock);
1004         quota_root = fs_info->quota_root;
1005         fs_info->quota_root = NULL;
1006         fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
1007         spin_unlock(&fs_info->qgroup_lock);
1008
1009         btrfs_free_qgroup_config(fs_info);
1010
1011         ret = btrfs_clean_quota_tree(trans, quota_root);
1012         if (ret)
1013                 goto out;
1014
1015         ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
1016         if (ret)
1017                 goto out;
1018
1019         list_del(&quota_root->dirty_list);
1020
1021         btrfs_tree_lock(quota_root->node);
1022         clean_tree_block(fs_info, quota_root->node);
1023         btrfs_tree_unlock(quota_root->node);
1024         btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1025
1026         free_extent_buffer(quota_root->node);
1027         free_extent_buffer(quota_root->commit_root);
1028         kfree(quota_root);
1029 out:
1030         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1031         return ret;
1032 }
1033
1034 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1035                          struct btrfs_qgroup *qgroup)
1036 {
1037         if (list_empty(&qgroup->dirty))
1038                 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1039 }
1040
1041 static void report_reserved_underflow(struct btrfs_fs_info *fs_info,
1042                                       struct btrfs_qgroup *qgroup,
1043                                       u64 num_bytes)
1044 {
1045 #ifdef CONFIG_BTRFS_DEBUG
1046         WARN_ON(qgroup->reserved < num_bytes);
1047         btrfs_debug(fs_info,
1048                 "qgroup %llu reserved space underflow, have: %llu, to free: %llu",
1049                 qgroup->qgroupid, qgroup->reserved, num_bytes);
1050 #endif
1051         qgroup->reserved = 0;
1052 }
1053 /*
1054  * The easy accounting, if we are adding/removing the only ref for an extent
1055  * then this qgroup and all of the parent qgroups get their reference and
1056  * exclusive counts adjusted.
1057  *
1058  * Caller should hold fs_info->qgroup_lock.
1059  */
1060 static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1061                                     struct ulist *tmp, u64 ref_root,
1062                                     u64 num_bytes, int sign)
1063 {
1064         struct btrfs_qgroup *qgroup;
1065         struct btrfs_qgroup_list *glist;
1066         struct ulist_node *unode;
1067         struct ulist_iterator uiter;
1068         int ret = 0;
1069
1070         qgroup = find_qgroup_rb(fs_info, ref_root);
1071         if (!qgroup)
1072                 goto out;
1073
1074         qgroup->rfer += sign * num_bytes;
1075         qgroup->rfer_cmpr += sign * num_bytes;
1076
1077         WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1078         qgroup->excl += sign * num_bytes;
1079         qgroup->excl_cmpr += sign * num_bytes;
1080         if (sign > 0) {
1081                 if (qgroup->reserved < num_bytes)
1082                         report_reserved_underflow(fs_info, qgroup, num_bytes);
1083                 else
1084                         qgroup->reserved -= num_bytes;
1085         }
1086
1087         qgroup_dirty(fs_info, qgroup);
1088
1089         /* Get all of the parent groups that contain this qgroup */
1090         list_for_each_entry(glist, &qgroup->groups, next_group) {
1091                 ret = ulist_add(tmp, glist->group->qgroupid,
1092                                 qgroup_to_aux(glist->group), GFP_ATOMIC);
1093                 if (ret < 0)
1094                         goto out;
1095         }
1096
1097         /* Iterate all of the parents and adjust their reference counts */
1098         ULIST_ITER_INIT(&uiter);
1099         while ((unode = ulist_next(tmp, &uiter))) {
1100                 qgroup = unode_aux_to_qgroup(unode);
1101                 qgroup->rfer += sign * num_bytes;
1102                 qgroup->rfer_cmpr += sign * num_bytes;
1103                 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1104                 qgroup->excl += sign * num_bytes;
1105                 if (sign > 0) {
1106                         if (qgroup->reserved < num_bytes)
1107                                 report_reserved_underflow(fs_info, qgroup,
1108                                                           num_bytes);
1109                         else
1110                                 qgroup->reserved -= num_bytes;
1111                 }
1112                 qgroup->excl_cmpr += sign * num_bytes;
1113                 qgroup_dirty(fs_info, qgroup);
1114
1115                 /* Add any parents of the parents */
1116                 list_for_each_entry(glist, &qgroup->groups, next_group) {
1117                         ret = ulist_add(tmp, glist->group->qgroupid,
1118                                         qgroup_to_aux(glist->group), GFP_ATOMIC);
1119                         if (ret < 0)
1120                                 goto out;
1121                 }
1122         }
1123         ret = 0;
1124 out:
1125         return ret;
1126 }
1127
1128
1129 /*
1130  * Quick path for updating qgroup with only excl refs.
1131  *
1132  * In that case, just update all parent will be enough.
1133  * Or we needs to do a full rescan.
1134  * Caller should also hold fs_info->qgroup_lock.
1135  *
1136  * Return 0 for quick update, return >0 for need to full rescan
1137  * and mark INCONSISTENT flag.
1138  * Return < 0 for other error.
1139  */
1140 static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1141                                    struct ulist *tmp, u64 src, u64 dst,
1142                                    int sign)
1143 {
1144         struct btrfs_qgroup *qgroup;
1145         int ret = 1;
1146         int err = 0;
1147
1148         qgroup = find_qgroup_rb(fs_info, src);
1149         if (!qgroup)
1150                 goto out;
1151         if (qgroup->excl == qgroup->rfer) {
1152                 ret = 0;
1153                 err = __qgroup_excl_accounting(fs_info, tmp, dst,
1154                                                qgroup->excl, sign);
1155                 if (err < 0) {
1156                         ret = err;
1157                         goto out;
1158                 }
1159         }
1160 out:
1161         if (ret)
1162                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1163         return ret;
1164 }
1165
1166 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1167                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1168 {
1169         struct btrfs_root *quota_root;
1170         struct btrfs_qgroup *parent;
1171         struct btrfs_qgroup *member;
1172         struct btrfs_qgroup_list *list;
1173         struct ulist *tmp;
1174         int ret = 0;
1175
1176         /* Check the level of src and dst first */
1177         if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1178                 return -EINVAL;
1179
1180         tmp = ulist_alloc(GFP_KERNEL);
1181         if (!tmp)
1182                 return -ENOMEM;
1183
1184         mutex_lock(&fs_info->qgroup_ioctl_lock);
1185         quota_root = fs_info->quota_root;
1186         if (!quota_root) {
1187                 ret = -EINVAL;
1188                 goto out;
1189         }
1190         member = find_qgroup_rb(fs_info, src);
1191         parent = find_qgroup_rb(fs_info, dst);
1192         if (!member || !parent) {
1193                 ret = -EINVAL;
1194                 goto out;
1195         }
1196
1197         /* check if such qgroup relation exist firstly */
1198         list_for_each_entry(list, &member->groups, next_group) {
1199                 if (list->group == parent) {
1200                         ret = -EEXIST;
1201                         goto out;
1202                 }
1203         }
1204
1205         ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1206         if (ret)
1207                 goto out;
1208
1209         ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1210         if (ret) {
1211                 del_qgroup_relation_item(trans, quota_root, src, dst);
1212                 goto out;
1213         }
1214
1215         spin_lock(&fs_info->qgroup_lock);
1216         ret = add_relation_rb(fs_info, src, dst);
1217         if (ret < 0) {
1218                 spin_unlock(&fs_info->qgroup_lock);
1219                 goto out;
1220         }
1221         ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
1222         spin_unlock(&fs_info->qgroup_lock);
1223 out:
1224         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1225         ulist_free(tmp);
1226         return ret;
1227 }
1228
1229 static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
1230                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1231 {
1232         struct btrfs_root *quota_root;
1233         struct btrfs_qgroup *parent;
1234         struct btrfs_qgroup *member;
1235         struct btrfs_qgroup_list *list;
1236         struct ulist *tmp;
1237         int ret = 0;
1238         int err;
1239
1240         tmp = ulist_alloc(GFP_KERNEL);
1241         if (!tmp)
1242                 return -ENOMEM;
1243
1244         quota_root = fs_info->quota_root;
1245         if (!quota_root) {
1246                 ret = -EINVAL;
1247                 goto out;
1248         }
1249
1250         member = find_qgroup_rb(fs_info, src);
1251         parent = find_qgroup_rb(fs_info, dst);
1252         if (!member || !parent) {
1253                 ret = -EINVAL;
1254                 goto out;
1255         }
1256
1257         /* check if such qgroup relation exist firstly */
1258         list_for_each_entry(list, &member->groups, next_group) {
1259                 if (list->group == parent)
1260                         goto exist;
1261         }
1262         ret = -ENOENT;
1263         goto out;
1264 exist:
1265         ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1266         err = del_qgroup_relation_item(trans, quota_root, dst, src);
1267         if (err && !ret)
1268                 ret = err;
1269
1270         spin_lock(&fs_info->qgroup_lock);
1271         del_relation_rb(fs_info, src, dst);
1272         ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1273         spin_unlock(&fs_info->qgroup_lock);
1274 out:
1275         ulist_free(tmp);
1276         return ret;
1277 }
1278
1279 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1280                               struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1281 {
1282         int ret = 0;
1283
1284         mutex_lock(&fs_info->qgroup_ioctl_lock);
1285         ret = __del_qgroup_relation(trans, fs_info, src, dst);
1286         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1287
1288         return ret;
1289 }
1290
1291 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1292                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1293 {
1294         struct btrfs_root *quota_root;
1295         struct btrfs_qgroup *qgroup;
1296         int ret = 0;
1297
1298         mutex_lock(&fs_info->qgroup_ioctl_lock);
1299         quota_root = fs_info->quota_root;
1300         if (!quota_root) {
1301                 ret = -EINVAL;
1302                 goto out;
1303         }
1304         qgroup = find_qgroup_rb(fs_info, qgroupid);
1305         if (qgroup) {
1306                 ret = -EEXIST;
1307                 goto out;
1308         }
1309
1310         ret = add_qgroup_item(trans, quota_root, qgroupid);
1311         if (ret)
1312                 goto out;
1313
1314         spin_lock(&fs_info->qgroup_lock);
1315         qgroup = add_qgroup_rb(fs_info, qgroupid);
1316         spin_unlock(&fs_info->qgroup_lock);
1317
1318         if (IS_ERR(qgroup))
1319                 ret = PTR_ERR(qgroup);
1320 out:
1321         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1322         return ret;
1323 }
1324
1325 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1326                         struct btrfs_fs_info *fs_info, u64 qgroupid)
1327 {
1328         struct btrfs_root *quota_root;
1329         struct btrfs_qgroup *qgroup;
1330         struct btrfs_qgroup_list *list;
1331         int ret = 0;
1332
1333         mutex_lock(&fs_info->qgroup_ioctl_lock);
1334         quota_root = fs_info->quota_root;
1335         if (!quota_root) {
1336                 ret = -EINVAL;
1337                 goto out;
1338         }
1339
1340         qgroup = find_qgroup_rb(fs_info, qgroupid);
1341         if (!qgroup) {
1342                 ret = -ENOENT;
1343                 goto out;
1344         } else {
1345                 /* check if there are no children of this qgroup */
1346                 if (!list_empty(&qgroup->members)) {
1347                         ret = -EBUSY;
1348                         goto out;
1349                 }
1350         }
1351         ret = del_qgroup_item(trans, quota_root, qgroupid);
1352
1353         while (!list_empty(&qgroup->groups)) {
1354                 list = list_first_entry(&qgroup->groups,
1355                                         struct btrfs_qgroup_list, next_group);
1356                 ret = __del_qgroup_relation(trans, fs_info,
1357                                            qgroupid,
1358                                            list->group->qgroupid);
1359                 if (ret)
1360                         goto out;
1361         }
1362
1363         spin_lock(&fs_info->qgroup_lock);
1364         del_qgroup_rb(fs_info, qgroupid);
1365         spin_unlock(&fs_info->qgroup_lock);
1366 out:
1367         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1368         return ret;
1369 }
1370
1371 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1372                        struct btrfs_fs_info *fs_info, u64 qgroupid,
1373                        struct btrfs_qgroup_limit *limit)
1374 {
1375         struct btrfs_root *quota_root;
1376         struct btrfs_qgroup *qgroup;
1377         int ret = 0;
1378         /* Sometimes we would want to clear the limit on this qgroup.
1379          * To meet this requirement, we treat the -1 as a special value
1380          * which tell kernel to clear the limit on this qgroup.
1381          */
1382         const u64 CLEAR_VALUE = -1;
1383
1384         mutex_lock(&fs_info->qgroup_ioctl_lock);
1385         quota_root = fs_info->quota_root;
1386         if (!quota_root) {
1387                 ret = -EINVAL;
1388                 goto out;
1389         }
1390
1391         qgroup = find_qgroup_rb(fs_info, qgroupid);
1392         if (!qgroup) {
1393                 ret = -ENOENT;
1394                 goto out;
1395         }
1396
1397         spin_lock(&fs_info->qgroup_lock);
1398         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1399                 if (limit->max_rfer == CLEAR_VALUE) {
1400                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1401                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1402                         qgroup->max_rfer = 0;
1403                 } else {
1404                         qgroup->max_rfer = limit->max_rfer;
1405                 }
1406         }
1407         if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1408                 if (limit->max_excl == CLEAR_VALUE) {
1409                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1410                         limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1411                         qgroup->max_excl = 0;
1412                 } else {
1413                         qgroup->max_excl = limit->max_excl;
1414                 }
1415         }
1416         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1417                 if (limit->rsv_rfer == CLEAR_VALUE) {
1418                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1419                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1420                         qgroup->rsv_rfer = 0;
1421                 } else {
1422                         qgroup->rsv_rfer = limit->rsv_rfer;
1423                 }
1424         }
1425         if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1426                 if (limit->rsv_excl == CLEAR_VALUE) {
1427                         qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1428                         limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1429                         qgroup->rsv_excl = 0;
1430                 } else {
1431                         qgroup->rsv_excl = limit->rsv_excl;
1432                 }
1433         }
1434         qgroup->lim_flags |= limit->flags;
1435
1436         spin_unlock(&fs_info->qgroup_lock);
1437
1438         ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1439         if (ret) {
1440                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1441                 btrfs_info(fs_info, "unable to update quota limit for %llu",
1442                        qgroupid);
1443         }
1444
1445 out:
1446         mutex_unlock(&fs_info->qgroup_ioctl_lock);
1447         return ret;
1448 }
1449
1450 int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
1451                                          struct btrfs_fs_info *fs_info)
1452 {
1453         struct btrfs_qgroup_extent_record *record;
1454         struct btrfs_delayed_ref_root *delayed_refs;
1455         struct rb_node *node;
1456         u64 qgroup_to_skip;
1457         int ret = 0;
1458
1459         delayed_refs = &trans->transaction->delayed_refs;
1460         qgroup_to_skip = delayed_refs->qgroup_to_skip;
1461
1462         /*
1463          * No need to do lock, since this function will only be called in
1464          * btrfs_commit_transaction().
1465          */
1466         node = rb_first(&delayed_refs->dirty_extent_root);
1467         while (node) {
1468                 record = rb_entry(node, struct btrfs_qgroup_extent_record,
1469                                   node);
1470                 if (WARN_ON(!record->old_roots))
1471                         ret = btrfs_find_all_roots(NULL, fs_info,
1472                                         record->bytenr, 0, &record->old_roots);
1473                 if (ret < 0)
1474                         break;
1475                 if (qgroup_to_skip)
1476                         ulist_del(record->old_roots, qgroup_to_skip, 0);
1477                 node = rb_next(node);
1478         }
1479         return ret;
1480 }
1481
1482 int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
1483                                 struct btrfs_delayed_ref_root *delayed_refs,
1484                                 struct btrfs_qgroup_extent_record *record)
1485 {
1486         struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1487         struct rb_node *parent_node = NULL;
1488         struct btrfs_qgroup_extent_record *entry;
1489         u64 bytenr = record->bytenr;
1490
1491         assert_spin_locked(&delayed_refs->lock);
1492         trace_btrfs_qgroup_trace_extent(fs_info, record);
1493
1494         while (*p) {
1495                 parent_node = *p;
1496                 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1497                                  node);
1498                 if (bytenr < entry->bytenr)
1499                         p = &(*p)->rb_left;
1500                 else if (bytenr > entry->bytenr)
1501                         p = &(*p)->rb_right;
1502                 else
1503                         return 1;
1504         }
1505
1506         rb_link_node(&record->node, parent_node, p);
1507         rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1508         return 0;
1509 }
1510
1511 int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1512                                    struct btrfs_qgroup_extent_record *qrecord)
1513 {
1514         struct ulist *old_root;
1515         u64 bytenr = qrecord->bytenr;
1516         int ret;
1517
1518         ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root);
1519         if (ret < 0)
1520                 return ret;
1521
1522         /*
1523          * Here we don't need to get the lock of
1524          * trans->transaction->delayed_refs, since inserted qrecord won't
1525          * be deleted, only qrecord->node may be modified (new qrecord insert)
1526          *
1527          * So modifying qrecord->old_roots is safe here
1528          */
1529         qrecord->old_roots = old_root;
1530         return 0;
1531 }
1532
1533 int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
1534                 struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
1535                 gfp_t gfp_flag)
1536 {
1537         struct btrfs_qgroup_extent_record *record;
1538         struct btrfs_delayed_ref_root *delayed_refs;
1539         int ret;
1540
1541         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1542             || bytenr == 0 || num_bytes == 0)
1543                 return 0;
1544         if (WARN_ON(trans == NULL))
1545                 return -EINVAL;
1546         record = kmalloc(sizeof(*record), gfp_flag);
1547         if (!record)
1548                 return -ENOMEM;
1549
1550         delayed_refs = &trans->transaction->delayed_refs;
1551         record->bytenr = bytenr;
1552         record->num_bytes = num_bytes;
1553         record->old_roots = NULL;
1554
1555         spin_lock(&delayed_refs->lock);
1556         ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
1557         spin_unlock(&delayed_refs->lock);
1558         if (ret > 0) {
1559                 kfree(record);
1560                 return 0;
1561         }
1562         return btrfs_qgroup_trace_extent_post(fs_info, record);
1563 }
1564
1565 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
1566                                   struct btrfs_fs_info *fs_info,
1567                                   struct extent_buffer *eb)
1568 {
1569         int nr = btrfs_header_nritems(eb);
1570         int i, extent_type, ret;
1571         struct btrfs_key key;
1572         struct btrfs_file_extent_item *fi;
1573         u64 bytenr, num_bytes;
1574
1575         /* We can be called directly from walk_up_proc() */
1576         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1577                 return 0;
1578
1579         for (i = 0; i < nr; i++) {
1580                 btrfs_item_key_to_cpu(eb, &key, i);
1581
1582                 if (key.type != BTRFS_EXTENT_DATA_KEY)
1583                         continue;
1584
1585                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1586                 /* filter out non qgroup-accountable extents  */
1587                 extent_type = btrfs_file_extent_type(eb, fi);
1588
1589                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1590                         continue;
1591
1592                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1593                 if (!bytenr)
1594                         continue;
1595
1596                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1597
1598                 ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
1599                                                 num_bytes, GFP_NOFS);
1600                 if (ret)
1601                         return ret;
1602         }
1603         return 0;
1604 }
1605
1606 /*
1607  * Walk up the tree from the bottom, freeing leaves and any interior
1608  * nodes which have had all slots visited. If a node (leaf or
1609  * interior) is freed, the node above it will have it's slot
1610  * incremented. The root node will never be freed.
1611  *
1612  * At the end of this function, we should have a path which has all
1613  * slots incremented to the next position for a search. If we need to
1614  * read a new node it will be NULL and the node above it will have the
1615  * correct slot selected for a later read.
1616  *
1617  * If we increment the root nodes slot counter past the number of
1618  * elements, 1 is returned to signal completion of the search.
1619  */
1620 static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
1621 {
1622         int level = 0;
1623         int nr, slot;
1624         struct extent_buffer *eb;
1625
1626         if (root_level == 0)
1627                 return 1;
1628
1629         while (level <= root_level) {
1630                 eb = path->nodes[level];
1631                 nr = btrfs_header_nritems(eb);
1632                 path->slots[level]++;
1633                 slot = path->slots[level];
1634                 if (slot >= nr || level == 0) {
1635                         /*
1636                          * Don't free the root -  we will detect this
1637                          * condition after our loop and return a
1638                          * positive value for caller to stop walking the tree.
1639                          */
1640                         if (level != root_level) {
1641                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
1642                                 path->locks[level] = 0;
1643
1644                                 free_extent_buffer(eb);
1645                                 path->nodes[level] = NULL;
1646                                 path->slots[level] = 0;
1647                         }
1648                 } else {
1649                         /*
1650                          * We have a valid slot to walk back down
1651                          * from. Stop here so caller can process these
1652                          * new nodes.
1653                          */
1654                         break;
1655                 }
1656
1657                 level++;
1658         }
1659
1660         eb = path->nodes[root_level];
1661         if (path->slots[root_level] >= btrfs_header_nritems(eb))
1662                 return 1;
1663
1664         return 0;
1665 }
1666
1667 int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
1668                                struct btrfs_root *root,
1669                                struct extent_buffer *root_eb,
1670                                u64 root_gen, int root_level)
1671 {
1672         struct btrfs_fs_info *fs_info = root->fs_info;
1673         int ret = 0;
1674         int level;
1675         struct extent_buffer *eb = root_eb;
1676         struct btrfs_path *path = NULL;
1677
1678         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
1679         BUG_ON(root_eb == NULL);
1680
1681         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1682                 return 0;
1683
1684         if (!extent_buffer_uptodate(root_eb)) {
1685                 ret = btrfs_read_buffer(root_eb, root_gen);
1686                 if (ret)
1687                         goto out;
1688         }
1689
1690         if (root_level == 0) {
1691                 ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
1692                 goto out;
1693         }
1694
1695         path = btrfs_alloc_path();
1696         if (!path)
1697                 return -ENOMEM;
1698
1699         /*
1700          * Walk down the tree.  Missing extent blocks are filled in as
1701          * we go. Metadata is accounted every time we read a new
1702          * extent block.
1703          *
1704          * When we reach a leaf, we account for file extent items in it,
1705          * walk back up the tree (adjusting slot pointers as we go)
1706          * and restart the search process.
1707          */
1708         extent_buffer_get(root_eb); /* For path */
1709         path->nodes[root_level] = root_eb;
1710         path->slots[root_level] = 0;
1711         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
1712 walk_down:
1713         level = root_level;
1714         while (level >= 0) {
1715                 if (path->nodes[level] == NULL) {
1716                         int parent_slot;
1717                         u64 child_gen;
1718                         u64 child_bytenr;
1719
1720                         /*
1721                          * We need to get child blockptr/gen from parent before
1722                          * we can read it.
1723                           */
1724                         eb = path->nodes[level + 1];
1725                         parent_slot = path->slots[level + 1];
1726                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
1727                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
1728
1729                         eb = read_tree_block(fs_info, child_bytenr, child_gen);
1730                         if (IS_ERR(eb)) {
1731                                 ret = PTR_ERR(eb);
1732                                 goto out;
1733                         } else if (!extent_buffer_uptodate(eb)) {
1734                                 free_extent_buffer(eb);
1735                                 ret = -EIO;
1736                                 goto out;
1737                         }
1738
1739                         path->nodes[level] = eb;
1740                         path->slots[level] = 0;
1741
1742                         btrfs_tree_read_lock(eb);
1743                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1744                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
1745
1746                         ret = btrfs_qgroup_trace_extent(trans, fs_info,
1747                                                         child_bytenr,
1748                                                         fs_info->nodesize,
1749                                                         GFP_NOFS);
1750                         if (ret)
1751                                 goto out;
1752                 }
1753
1754                 if (level == 0) {
1755                         ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
1756                                                            path->nodes[level]);
1757                         if (ret)
1758                                 goto out;
1759
1760                         /* Nonzero return here means we completed our search */
1761                         ret = adjust_slots_upwards(path, root_level);
1762                         if (ret)
1763                                 break;
1764
1765                         /* Restart search with new slots */
1766                         goto walk_down;
1767                 }
1768
1769                 level--;
1770         }
1771
1772         ret = 0;
1773 out:
1774         btrfs_free_path(path);
1775
1776         return ret;
1777 }
1778
1779 #define UPDATE_NEW      0
1780 #define UPDATE_OLD      1
1781 /*
1782  * Walk all of the roots that points to the bytenr and adjust their refcnts.
1783  */
1784 static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
1785                                 struct ulist *roots, struct ulist *tmp,
1786                                 struct ulist *qgroups, u64 seq, int update_old)
1787 {
1788         struct ulist_node *unode;
1789         struct ulist_iterator uiter;
1790         struct ulist_node *tmp_unode;
1791         struct ulist_iterator tmp_uiter;
1792         struct btrfs_qgroup *qg;
1793         int ret = 0;
1794
1795         if (!roots)
1796                 return 0;
1797         ULIST_ITER_INIT(&uiter);
1798         while ((unode = ulist_next(roots, &uiter))) {
1799                 qg = find_qgroup_rb(fs_info, unode->val);
1800                 if (!qg)
1801                         continue;
1802
1803                 ulist_reinit(tmp);
1804                 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
1805                                 GFP_ATOMIC);
1806                 if (ret < 0)
1807                         return ret;
1808                 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
1809                 if (ret < 0)
1810                         return ret;
1811                 ULIST_ITER_INIT(&tmp_uiter);
1812                 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1813                         struct btrfs_qgroup_list *glist;
1814
1815                         qg = unode_aux_to_qgroup(tmp_unode);
1816                         if (update_old)
1817                                 btrfs_qgroup_update_old_refcnt(qg, seq, 1);
1818                         else
1819                                 btrfs_qgroup_update_new_refcnt(qg, seq, 1);
1820                         list_for_each_entry(glist, &qg->groups, next_group) {
1821                                 ret = ulist_add(qgroups, glist->group->qgroupid,
1822                                                 qgroup_to_aux(glist->group),
1823                                                 GFP_ATOMIC);
1824                                 if (ret < 0)
1825                                         return ret;
1826                                 ret = ulist_add(tmp, glist->group->qgroupid,
1827                                                 qgroup_to_aux(glist->group),
1828                                                 GFP_ATOMIC);
1829                                 if (ret < 0)
1830                                         return ret;
1831                         }
1832                 }
1833         }
1834         return 0;
1835 }
1836
1837 /*
1838  * Update qgroup rfer/excl counters.
1839  * Rfer update is easy, codes can explain themselves.
1840  *
1841  * Excl update is tricky, the update is split into 2 part.
1842  * Part 1: Possible exclusive <-> sharing detect:
1843  *      |       A       |       !A      |
1844  *  -------------------------------------
1845  *  B   |       *       |       -       |
1846  *  -------------------------------------
1847  *  !B  |       +       |       **      |
1848  *  -------------------------------------
1849  *
1850  * Conditions:
1851  * A:   cur_old_roots < nr_old_roots    (not exclusive before)
1852  * !A:  cur_old_roots == nr_old_roots   (possible exclusive before)
1853  * B:   cur_new_roots < nr_new_roots    (not exclusive now)
1854  * !B:  cur_new_roots == nr_new_roots   (possible exclusive now)
1855  *
1856  * Results:
1857  * +: Possible sharing -> exclusive     -: Possible exclusive -> sharing
1858  * *: Definitely not changed.           **: Possible unchanged.
1859  *
1860  * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
1861  *
1862  * To make the logic clear, we first use condition A and B to split
1863  * combination into 4 results.
1864  *
1865  * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
1866  * only on variant maybe 0.
1867  *
1868  * Lastly, check result **, since there are 2 variants maybe 0, split them
1869  * again(2x2).
1870  * But this time we don't need to consider other things, the codes and logic
1871  * is easy to understand now.
1872  */
1873 static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1874                                   struct ulist *qgroups,
1875                                   u64 nr_old_roots,
1876                                   u64 nr_new_roots,
1877                                   u64 num_bytes, u64 seq)
1878 {
1879         struct ulist_node *unode;
1880         struct ulist_iterator uiter;
1881         struct btrfs_qgroup *qg;
1882         u64 cur_new_count, cur_old_count;
1883
1884         ULIST_ITER_INIT(&uiter);
1885         while ((unode = ulist_next(qgroups, &uiter))) {
1886                 bool dirty = false;
1887
1888                 qg = unode_aux_to_qgroup(unode);
1889                 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
1890                 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
1891
1892                 trace_qgroup_update_counters(fs_info, qg->qgroupid,
1893                                              cur_old_count, cur_new_count);
1894
1895                 /* Rfer update part */
1896                 if (cur_old_count == 0 && cur_new_count > 0) {
1897                         qg->rfer += num_bytes;
1898                         qg->rfer_cmpr += num_bytes;
1899                         dirty = true;
1900                 }
1901                 if (cur_old_count > 0 && cur_new_count == 0) {
1902                         qg->rfer -= num_bytes;
1903                         qg->rfer_cmpr -= num_bytes;
1904                         dirty = true;
1905                 }
1906
1907                 /* Excl update part */
1908                 /* Exclusive/none -> shared case */
1909                 if (cur_old_count == nr_old_roots &&
1910                     cur_new_count < nr_new_roots) {
1911                         /* Exclusive -> shared */
1912                         if (cur_old_count != 0) {
1913                                 qg->excl -= num_bytes;
1914                                 qg->excl_cmpr -= num_bytes;
1915                                 dirty = true;
1916                         }
1917                 }
1918
1919                 /* Shared -> exclusive/none case */
1920                 if (cur_old_count < nr_old_roots &&
1921                     cur_new_count == nr_new_roots) {
1922                         /* Shared->exclusive */
1923                         if (cur_new_count != 0) {
1924                                 qg->excl += num_bytes;
1925                                 qg->excl_cmpr += num_bytes;
1926                                 dirty = true;
1927                         }
1928                 }
1929
1930                 /* Exclusive/none -> exclusive/none case */
1931                 if (cur_old_count == nr_old_roots &&
1932                     cur_new_count == nr_new_roots) {
1933                         if (cur_old_count == 0) {
1934                                 /* None -> exclusive/none */
1935
1936                                 if (cur_new_count != 0) {
1937                                         /* None -> exclusive */
1938                                         qg->excl += num_bytes;
1939                                         qg->excl_cmpr += num_bytes;
1940                                         dirty = true;
1941                                 }
1942                                 /* None -> none, nothing changed */
1943                         } else {
1944                                 /* Exclusive -> exclusive/none */
1945
1946                                 if (cur_new_count == 0) {
1947                                         /* Exclusive -> none */
1948                                         qg->excl -= num_bytes;
1949                                         qg->excl_cmpr -= num_bytes;
1950                                         dirty = true;
1951                                 }
1952                                 /* Exclusive -> exclusive, nothing changed */
1953                         }
1954                 }
1955
1956                 if (dirty)
1957                         qgroup_dirty(fs_info, qg);
1958         }
1959         return 0;
1960 }
1961
1962 int
1963 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
1964                             struct btrfs_fs_info *fs_info,
1965                             u64 bytenr, u64 num_bytes,
1966                             struct ulist *old_roots, struct ulist *new_roots)
1967 {
1968         struct ulist *qgroups = NULL;
1969         struct ulist *tmp = NULL;
1970         u64 seq;
1971         u64 nr_new_roots = 0;
1972         u64 nr_old_roots = 0;
1973         int ret = 0;
1974
1975         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1976                 return 0;
1977
1978         if (new_roots)
1979                 nr_new_roots = new_roots->nnodes;
1980         if (old_roots)
1981                 nr_old_roots = old_roots->nnodes;
1982
1983         BUG_ON(!fs_info->quota_root);
1984
1985         trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes,
1986                                           nr_old_roots, nr_new_roots);
1987
1988         qgroups = ulist_alloc(GFP_NOFS);
1989         if (!qgroups) {
1990                 ret = -ENOMEM;
1991                 goto out_free;
1992         }
1993         tmp = ulist_alloc(GFP_NOFS);
1994         if (!tmp) {
1995                 ret = -ENOMEM;
1996                 goto out_free;
1997         }
1998
1999         mutex_lock(&fs_info->qgroup_rescan_lock);
2000         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2001                 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2002                         mutex_unlock(&fs_info->qgroup_rescan_lock);
2003                         ret = 0;
2004                         goto out_free;
2005                 }
2006         }
2007         mutex_unlock(&fs_info->qgroup_rescan_lock);
2008
2009         spin_lock(&fs_info->qgroup_lock);
2010         seq = fs_info->qgroup_seq;
2011
2012         /* Update old refcnts using old_roots */
2013         ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2014                                    UPDATE_OLD);
2015         if (ret < 0)
2016                 goto out;
2017
2018         /* Update new refcnts using new_roots */
2019         ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2020                                    UPDATE_NEW);
2021         if (ret < 0)
2022                 goto out;
2023
2024         qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2025                                num_bytes, seq);
2026
2027         /*
2028          * Bump qgroup_seq to avoid seq overlap
2029          */
2030         fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2031 out:
2032         spin_unlock(&fs_info->qgroup_lock);
2033 out_free:
2034         ulist_free(tmp);
2035         ulist_free(qgroups);
2036         ulist_free(old_roots);
2037         ulist_free(new_roots);
2038         return ret;
2039 }
2040
2041 int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
2042                                  struct btrfs_fs_info *fs_info)
2043 {
2044         struct btrfs_qgroup_extent_record *record;
2045         struct btrfs_delayed_ref_root *delayed_refs;
2046         struct ulist *new_roots = NULL;
2047         struct rb_node *node;
2048         u64 qgroup_to_skip;
2049         int ret = 0;
2050
2051         delayed_refs = &trans->transaction->delayed_refs;
2052         qgroup_to_skip = delayed_refs->qgroup_to_skip;
2053         while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2054                 record = rb_entry(node, struct btrfs_qgroup_extent_record,
2055                                   node);
2056
2057                 trace_btrfs_qgroup_account_extents(fs_info, record);
2058
2059                 if (!ret) {
2060                         /*
2061                          * Use (u64)-1 as time_seq to do special search, which
2062                          * doesn't lock tree or delayed_refs and search current
2063                          * root. It's safe inside commit_transaction().
2064                          */
2065                         ret = btrfs_find_all_roots(trans, fs_info,
2066                                         record->bytenr, (u64)-1, &new_roots);
2067                         if (ret < 0)
2068                                 goto cleanup;
2069                         if (qgroup_to_skip)
2070                                 ulist_del(new_roots, qgroup_to_skip, 0);
2071                         ret = btrfs_qgroup_account_extent(trans, fs_info,
2072                                         record->bytenr, record->num_bytes,
2073                                         record->old_roots, new_roots);
2074                         record->old_roots = NULL;
2075                         new_roots = NULL;
2076                 }
2077 cleanup:
2078                 ulist_free(record->old_roots);
2079                 ulist_free(new_roots);
2080                 new_roots = NULL;
2081                 rb_erase(node, &delayed_refs->dirty_extent_root);
2082                 kfree(record);
2083
2084         }
2085         return ret;
2086 }
2087
2088 /*
2089  * called from commit_transaction. Writes all changed qgroups to disk.
2090  */
2091 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
2092                       struct btrfs_fs_info *fs_info)
2093 {
2094         struct btrfs_root *quota_root = fs_info->quota_root;
2095         int ret = 0;
2096         int start_rescan_worker = 0;
2097
2098         if (!quota_root)
2099                 goto out;
2100
2101         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
2102             test_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
2103                 start_rescan_worker = 1;
2104
2105         if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
2106                 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2107         if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags))
2108                 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2109
2110         spin_lock(&fs_info->qgroup_lock);
2111         while (!list_empty(&fs_info->dirty_qgroups)) {
2112                 struct btrfs_qgroup *qgroup;
2113                 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2114                                           struct btrfs_qgroup, dirty);
2115                 list_del_init(&qgroup->dirty);
2116                 spin_unlock(&fs_info->qgroup_lock);
2117                 ret = update_qgroup_info_item(trans, quota_root, qgroup);
2118                 if (ret)
2119                         fs_info->qgroup_flags |=
2120                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2121                 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
2122                 if (ret)
2123                         fs_info->qgroup_flags |=
2124                                         BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2125                 spin_lock(&fs_info->qgroup_lock);
2126         }
2127         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2128                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2129         else
2130                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2131         spin_unlock(&fs_info->qgroup_lock);
2132
2133         ret = update_qgroup_status_item(trans, fs_info, quota_root);
2134         if (ret)
2135                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2136
2137         if (!ret && start_rescan_worker) {
2138                 ret = qgroup_rescan_init(fs_info, 0, 1);
2139                 if (!ret) {
2140                         qgroup_rescan_zero_tracking(fs_info);
2141                         btrfs_queue_work(fs_info->qgroup_rescan_workers,
2142                                          &fs_info->qgroup_rescan_work);
2143                 }
2144                 ret = 0;
2145         }
2146
2147 out:
2148
2149         return ret;
2150 }
2151
2152 /*
2153  * Copy the accounting information between qgroups. This is necessary
2154  * when a snapshot or a subvolume is created. Throwing an error will
2155  * cause a transaction abort so we take extra care here to only error
2156  * when a readonly fs is a reasonable outcome.
2157  */
2158 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2159                          struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
2160                          struct btrfs_qgroup_inherit *inherit)
2161 {
2162         int ret = 0;
2163         int i;
2164         u64 *i_qgroups;
2165         struct btrfs_root *quota_root = fs_info->quota_root;
2166         struct btrfs_qgroup *srcgroup;
2167         struct btrfs_qgroup *dstgroup;
2168         u32 level_size = 0;
2169         u64 nums;
2170
2171         mutex_lock(&fs_info->qgroup_ioctl_lock);
2172         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2173                 goto out;
2174
2175         if (!quota_root) {
2176                 ret = -EINVAL;
2177                 goto out;
2178         }
2179
2180         if (inherit) {
2181                 i_qgroups = (u64 *)(inherit + 1);
2182                 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2183                        2 * inherit->num_excl_copies;
2184                 for (i = 0; i < nums; ++i) {
2185                         srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2186
2187                         /*
2188                          * Zero out invalid groups so we can ignore
2189                          * them later.
2190                          */
2191                         if (!srcgroup ||
2192                             ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2193                                 *i_qgroups = 0ULL;
2194
2195                         ++i_qgroups;
2196                 }
2197         }
2198
2199         /*
2200          * create a tracking group for the subvol itself
2201          */
2202         ret = add_qgroup_item(trans, quota_root, objectid);
2203         if (ret)
2204                 goto out;
2205
2206         if (srcid) {
2207                 struct btrfs_root *srcroot;
2208                 struct btrfs_key srckey;
2209
2210                 srckey.objectid = srcid;
2211                 srckey.type = BTRFS_ROOT_ITEM_KEY;
2212                 srckey.offset = (u64)-1;
2213                 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
2214                 if (IS_ERR(srcroot)) {
2215                         ret = PTR_ERR(srcroot);
2216                         goto out;
2217                 }
2218
2219                 level_size = fs_info->nodesize;
2220         }
2221
2222         /*
2223          * add qgroup to all inherited groups
2224          */
2225         if (inherit) {
2226                 i_qgroups = (u64 *)(inherit + 1);
2227                 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2228                         if (*i_qgroups == 0)
2229                                 continue;
2230                         ret = add_qgroup_relation_item(trans, quota_root,
2231                                                        objectid, *i_qgroups);
2232                         if (ret && ret != -EEXIST)
2233                                 goto out;
2234                         ret = add_qgroup_relation_item(trans, quota_root,
2235                                                        *i_qgroups, objectid);
2236                         if (ret && ret != -EEXIST)
2237                                 goto out;
2238                 }
2239                 ret = 0;
2240         }
2241
2242
2243         spin_lock(&fs_info->qgroup_lock);
2244
2245         dstgroup = add_qgroup_rb(fs_info, objectid);
2246         if (IS_ERR(dstgroup)) {
2247                 ret = PTR_ERR(dstgroup);
2248                 goto unlock;
2249         }
2250
2251         if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2252                 dstgroup->lim_flags = inherit->lim.flags;
2253                 dstgroup->max_rfer = inherit->lim.max_rfer;
2254                 dstgroup->max_excl = inherit->lim.max_excl;
2255                 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2256                 dstgroup->rsv_excl = inherit->lim.rsv_excl;
2257
2258                 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
2259                 if (ret) {
2260                         fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2261                         btrfs_info(fs_info,
2262                                    "unable to update quota limit for %llu",
2263                                    dstgroup->qgroupid);
2264                         goto unlock;
2265                 }
2266         }
2267
2268         if (srcid) {
2269                 srcgroup = find_qgroup_rb(fs_info, srcid);
2270                 if (!srcgroup)
2271                         goto unlock;
2272
2273                 /*
2274                  * We call inherit after we clone the root in order to make sure
2275                  * our counts don't go crazy, so at this point the only
2276                  * difference between the two roots should be the root node.
2277                  */
2278                 dstgroup->rfer = srcgroup->rfer;
2279                 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2280                 dstgroup->excl = level_size;
2281                 dstgroup->excl_cmpr = level_size;
2282                 srcgroup->excl = level_size;
2283                 srcgroup->excl_cmpr = level_size;
2284
2285                 /* inherit the limit info */
2286                 dstgroup->lim_flags = srcgroup->lim_flags;
2287                 dstgroup->max_rfer = srcgroup->max_rfer;
2288                 dstgroup->max_excl = srcgroup->max_excl;
2289                 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2290                 dstgroup->rsv_excl = srcgroup->rsv_excl;
2291
2292                 qgroup_dirty(fs_info, dstgroup);
2293                 qgroup_dirty(fs_info, srcgroup);
2294         }
2295
2296         if (!inherit)
2297                 goto unlock;
2298
2299         i_qgroups = (u64 *)(inherit + 1);
2300         for (i = 0; i < inherit->num_qgroups; ++i) {
2301                 if (*i_qgroups) {
2302                         ret = add_relation_rb(fs_info, objectid, *i_qgroups);
2303                         if (ret)
2304                                 goto unlock;
2305                 }
2306                 ++i_qgroups;
2307         }
2308
2309         for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
2310                 struct btrfs_qgroup *src;
2311                 struct btrfs_qgroup *dst;
2312
2313                 if (!i_qgroups[0] || !i_qgroups[1])
2314                         continue;
2315
2316                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2317                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2318
2319                 if (!src || !dst) {
2320                         ret = -EINVAL;
2321                         goto unlock;
2322                 }
2323
2324                 dst->rfer = src->rfer - level_size;
2325                 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2326         }
2327         for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
2328                 struct btrfs_qgroup *src;
2329                 struct btrfs_qgroup *dst;
2330
2331                 if (!i_qgroups[0] || !i_qgroups[1])
2332                         continue;
2333
2334                 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2335                 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2336
2337                 if (!src || !dst) {
2338                         ret = -EINVAL;
2339                         goto unlock;
2340                 }
2341
2342                 dst->excl = src->excl + level_size;
2343                 dst->excl_cmpr = src->excl_cmpr + level_size;
2344         }
2345
2346 unlock:
2347         spin_unlock(&fs_info->qgroup_lock);
2348 out:
2349         mutex_unlock(&fs_info->qgroup_ioctl_lock);
2350         return ret;
2351 }
2352
2353 static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
2354 {
2355         if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2356             qg->reserved + (s64)qg->rfer + num_bytes > qg->max_rfer)
2357                 return false;
2358
2359         if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2360             qg->reserved + (s64)qg->excl + num_bytes > qg->max_excl)
2361                 return false;
2362
2363         return true;
2364 }
2365
2366 static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
2367 {
2368         struct btrfs_root *quota_root;
2369         struct btrfs_qgroup *qgroup;
2370         struct btrfs_fs_info *fs_info = root->fs_info;
2371         u64 ref_root = root->root_key.objectid;
2372         int ret = 0;
2373         struct ulist_node *unode;
2374         struct ulist_iterator uiter;
2375
2376         if (!is_fstree(ref_root))
2377                 return 0;
2378
2379         if (num_bytes == 0)
2380                 return 0;
2381
2382         spin_lock(&fs_info->qgroup_lock);
2383         quota_root = fs_info->quota_root;
2384         if (!quota_root)
2385                 goto out;
2386
2387         qgroup = find_qgroup_rb(fs_info, ref_root);
2388         if (!qgroup)
2389                 goto out;
2390
2391         /*
2392          * in a first step, we check all affected qgroups if any limits would
2393          * be exceeded
2394          */
2395         ulist_reinit(fs_info->qgroup_ulist);
2396         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2397                         (uintptr_t)qgroup, GFP_ATOMIC);
2398         if (ret < 0)
2399                 goto out;
2400         ULIST_ITER_INIT(&uiter);
2401         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2402                 struct btrfs_qgroup *qg;
2403                 struct btrfs_qgroup_list *glist;
2404
2405                 qg = unode_aux_to_qgroup(unode);
2406
2407                 if (enforce && !qgroup_check_limits(qg, num_bytes)) {
2408                         ret = -EDQUOT;
2409                         goto out;
2410                 }
2411
2412                 list_for_each_entry(glist, &qg->groups, next_group) {
2413                         ret = ulist_add(fs_info->qgroup_ulist,
2414                                         glist->group->qgroupid,
2415                                         (uintptr_t)glist->group, GFP_ATOMIC);
2416                         if (ret < 0)
2417                                 goto out;
2418                 }
2419         }
2420         ret = 0;
2421         /*
2422          * no limits exceeded, now record the reservation into all qgroups
2423          */
2424         ULIST_ITER_INIT(&uiter);
2425         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2426                 struct btrfs_qgroup *qg;
2427
2428                 qg = unode_aux_to_qgroup(unode);
2429
2430                 qg->reserved += num_bytes;
2431         }
2432
2433 out:
2434         spin_unlock(&fs_info->qgroup_lock);
2435         return ret;
2436 }
2437
2438 void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
2439                                u64 ref_root, u64 num_bytes)
2440 {
2441         struct btrfs_root *quota_root;
2442         struct btrfs_qgroup *qgroup;
2443         struct ulist_node *unode;
2444         struct ulist_iterator uiter;
2445         int ret = 0;
2446
2447         if (!is_fstree(ref_root))
2448                 return;
2449
2450         if (num_bytes == 0)
2451                 return;
2452
2453         spin_lock(&fs_info->qgroup_lock);
2454
2455         quota_root = fs_info->quota_root;
2456         if (!quota_root)
2457                 goto out;
2458
2459         qgroup = find_qgroup_rb(fs_info, ref_root);
2460         if (!qgroup)
2461                 goto out;
2462
2463         ulist_reinit(fs_info->qgroup_ulist);
2464         ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2465                         (uintptr_t)qgroup, GFP_ATOMIC);
2466         if (ret < 0)
2467                 goto out;
2468         ULIST_ITER_INIT(&uiter);
2469         while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2470                 struct btrfs_qgroup *qg;
2471                 struct btrfs_qgroup_list *glist;
2472
2473                 qg = unode_aux_to_qgroup(unode);
2474
2475                 if (qg->reserved < num_bytes)
2476                         report_reserved_underflow(fs_info, qg, num_bytes);
2477                 else
2478                         qg->reserved -= num_bytes;
2479
2480                 list_for_each_entry(glist, &qg->groups, next_group) {
2481                         ret = ulist_add(fs_info->qgroup_ulist,
2482                                         glist->group->qgroupid,
2483                                         (uintptr_t)glist->group, GFP_ATOMIC);
2484                         if (ret < 0)
2485                                 goto out;
2486                 }
2487         }
2488
2489 out:
2490         spin_unlock(&fs_info->qgroup_lock);
2491 }
2492
2493 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2494 {
2495         if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2496                 return;
2497         btrfs_err(trans->fs_info,
2498                 "qgroups not uptodate in trans handle %p:  list is%s empty, seq is %#x.%x",
2499                 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2500                 (u32)(trans->delayed_ref_elem.seq >> 32),
2501                 (u32)trans->delayed_ref_elem.seq);
2502         BUG();
2503 }
2504
2505 /*
2506  * returns < 0 on error, 0 when more leafs are to be scanned.
2507  * returns 1 when done.
2508  */
2509 static int
2510 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2511                    struct btrfs_trans_handle *trans)
2512 {
2513         struct btrfs_key found;
2514         struct extent_buffer *scratch_leaf = NULL;
2515         struct ulist *roots = NULL;
2516         struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2517         u64 num_bytes;
2518         int slot;
2519         int ret;
2520
2521         mutex_lock(&fs_info->qgroup_rescan_lock);
2522         ret = btrfs_search_slot_for_read(fs_info->extent_root,
2523                                          &fs_info->qgroup_rescan_progress,
2524                                          path, 1, 0);
2525
2526         btrfs_debug(fs_info,
2527                 "current progress key (%llu %u %llu), search_slot ret %d",
2528                 fs_info->qgroup_rescan_progress.objectid,
2529                 fs_info->qgroup_rescan_progress.type,
2530                 fs_info->qgroup_rescan_progress.offset, ret);
2531
2532         if (ret) {
2533                 /*
2534                  * The rescan is about to end, we will not be scanning any
2535                  * further blocks. We cannot unset the RESCAN flag here, because
2536                  * we want to commit the transaction if everything went well.
2537                  * To make the live accounting work in this phase, we set our
2538                  * scan progress pointer such that every real extent objectid
2539                  * will be smaller.
2540                  */
2541                 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2542                 btrfs_release_path(path);
2543                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2544                 return ret;
2545         }
2546
2547         btrfs_item_key_to_cpu(path->nodes[0], &found,
2548                               btrfs_header_nritems(path->nodes[0]) - 1);
2549         fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2550
2551         btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2552         scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
2553         if (!scratch_leaf) {
2554                 ret = -ENOMEM;
2555                 mutex_unlock(&fs_info->qgroup_rescan_lock);
2556                 goto out;
2557         }
2558         extent_buffer_get(scratch_leaf);
2559         btrfs_tree_read_lock(scratch_leaf);
2560         btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK);
2561         slot = path->slots[0];
2562         btrfs_release_path(path);
2563         mutex_unlock(&fs_info->qgroup_rescan_lock);
2564
2565         for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2566                 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2567                 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2568                     found.type != BTRFS_METADATA_ITEM_KEY)
2569                         continue;
2570                 if (found.type == BTRFS_METADATA_ITEM_KEY)
2571                         num_bytes = fs_info->nodesize;
2572                 else
2573                         num_bytes = found.offset;
2574
2575                 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2576                                            &roots);
2577                 if (ret < 0)
2578                         goto out;
2579                 /* For rescan, just pass old_roots as NULL */
2580                 ret = btrfs_qgroup_account_extent(trans, fs_info,
2581                                 found.objectid, num_bytes, NULL, roots);
2582                 if (ret < 0)
2583                         goto out;
2584         }
2585 out:
2586         if (scratch_leaf) {
2587                 btrfs_tree_read_unlock_blocking(scratch_leaf);
2588                 free_extent_buffer(scratch_leaf);
2589         }
2590         btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2591
2592         return ret;
2593 }
2594
2595 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2596 {
2597         struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2598                                                      qgroup_rescan_work);
2599         struct btrfs_path *path;
2600         struct btrfs_trans_handle *trans = NULL;
2601         int err = -ENOMEM;
2602         int ret = 0;
2603
2604         path = btrfs_alloc_path();
2605         if (!path)
2606                 goto out;
2607
2608         err = 0;
2609         while (!err && !btrfs_fs_closing(fs_info)) {
2610                 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2611                 if (IS_ERR(trans)) {
2612                         err = PTR_ERR(trans);
2613                         break;
2614                 }
2615                 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
2616                         err = -EINTR;
2617                 } else {
2618                         err = qgroup_rescan_leaf(fs_info, path, trans);
2619                 }
2620                 if (err > 0)
2621                         btrfs_commit_transaction(trans);
2622                 else
2623                         btrfs_end_transaction(trans);
2624         }
2625
2626 out:
2627         btrfs_free_path(path);
2628
2629         mutex_lock(&fs_info->qgroup_rescan_lock);
2630         if (!btrfs_fs_closing(fs_info))
2631                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2632
2633         if (err > 0 &&
2634             fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2635                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2636         } else if (err < 0) {
2637                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2638         }
2639         mutex_unlock(&fs_info->qgroup_rescan_lock);
2640
2641         /*
2642          * only update status, since the previous part has already updated the
2643          * qgroup info.
2644          */
2645         trans = btrfs_start_transaction(fs_info->quota_root, 1);
2646         if (IS_ERR(trans)) {
2647                 err = PTR_ERR(trans);
2648                 btrfs_err(fs_info,
2649                           "fail to start transaction for status update: %d\n",
2650                           err);
2651                 goto done;
2652         }
2653         ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
2654         if (ret < 0) {
2655                 err = ret;
2656                 btrfs_err(fs_info, "fail to update qgroup status: %d", err);
2657         }
2658         btrfs_end_transaction(trans);
2659
2660         if (btrfs_fs_closing(fs_info)) {
2661                 btrfs_info(fs_info, "qgroup scan paused");
2662         } else if (err >= 0) {
2663                 btrfs_info(fs_info, "qgroup scan completed%s",
2664                         err > 0 ? " (inconsistency flag cleared)" : "");
2665         } else {
2666                 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2667         }
2668
2669 done:
2670         mutex_lock(&fs_info->qgroup_rescan_lock);
2671         fs_info->qgroup_rescan_running = false;
2672         mutex_unlock(&fs_info->qgroup_rescan_lock);
2673         complete_all(&fs_info->qgroup_rescan_completion);
2674 }
2675
2676 /*
2677  * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2678  * memory required for the rescan context.
2679  */
2680 static int
2681 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2682                    int init_flags)
2683 {
2684         int ret = 0;
2685
2686         if (!init_flags &&
2687             (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2688              !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2689                 ret = -EINVAL;
2690                 goto err;
2691         }
2692
2693         mutex_lock(&fs_info->qgroup_rescan_lock);
2694         spin_lock(&fs_info->qgroup_lock);
2695
2696         if (init_flags) {
2697                 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2698                         ret = -EINPROGRESS;
2699                 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2700                         ret = -EINVAL;
2701
2702                 if (ret) {
2703                         spin_unlock(&fs_info->qgroup_lock);
2704                         mutex_unlock(&fs_info->qgroup_rescan_lock);
2705                         goto err;
2706                 }
2707                 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2708         }
2709
2710         memset(&fs_info->qgroup_rescan_progress, 0,
2711                 sizeof(fs_info->qgroup_rescan_progress));
2712         fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2713         init_completion(&fs_info->qgroup_rescan_completion);
2714         fs_info->qgroup_rescan_running = true;
2715
2716         spin_unlock(&fs_info->qgroup_lock);
2717         mutex_unlock(&fs_info->qgroup_rescan_lock);
2718
2719         memset(&fs_info->qgroup_rescan_work, 0,
2720                sizeof(fs_info->qgroup_rescan_work));
2721         btrfs_init_work(&fs_info->qgroup_rescan_work,
2722                         btrfs_qgroup_rescan_helper,
2723                         btrfs_qgroup_rescan_worker, NULL, NULL);
2724
2725         if (ret) {
2726 err:
2727                 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2728                 return ret;
2729         }
2730
2731         return 0;
2732 }
2733
2734 static void
2735 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2736 {
2737         struct rb_node *n;
2738         struct btrfs_qgroup *qgroup;
2739
2740         spin_lock(&fs_info->qgroup_lock);
2741         /* clear all current qgroup tracking information */
2742         for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2743                 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2744                 qgroup->rfer = 0;
2745                 qgroup->rfer_cmpr = 0;
2746                 qgroup->excl = 0;
2747                 qgroup->excl_cmpr = 0;
2748         }
2749         spin_unlock(&fs_info->qgroup_lock);
2750 }
2751
2752 int
2753 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2754 {
2755         int ret = 0;
2756         struct btrfs_trans_handle *trans;
2757
2758         ret = qgroup_rescan_init(fs_info, 0, 1);
2759         if (ret)
2760                 return ret;
2761
2762         /*
2763          * We have set the rescan_progress to 0, which means no more
2764          * delayed refs will be accounted by btrfs_qgroup_account_ref.
2765          * However, btrfs_qgroup_account_ref may be right after its call
2766          * to btrfs_find_all_roots, in which case it would still do the
2767          * accounting.
2768          * To solve this, we're committing the transaction, which will
2769          * ensure we run all delayed refs and only after that, we are
2770          * going to clear all tracking information for a clean start.
2771          */
2772
2773         trans = btrfs_join_transaction(fs_info->fs_root);
2774         if (IS_ERR(trans)) {
2775                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2776                 return PTR_ERR(trans);
2777         }
2778         ret = btrfs_commit_transaction(trans);
2779         if (ret) {
2780                 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2781                 return ret;
2782         }
2783
2784         qgroup_rescan_zero_tracking(fs_info);
2785
2786         btrfs_queue_work(fs_info->qgroup_rescan_workers,
2787                          &fs_info->qgroup_rescan_work);
2788
2789         return 0;
2790 }
2791
2792 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
2793                                      bool interruptible)
2794 {
2795         int running;
2796         int ret = 0;
2797
2798         mutex_lock(&fs_info->qgroup_rescan_lock);
2799         spin_lock(&fs_info->qgroup_lock);
2800         running = fs_info->qgroup_rescan_running;
2801         spin_unlock(&fs_info->qgroup_lock);
2802         mutex_unlock(&fs_info->qgroup_rescan_lock);
2803
2804         if (!running)
2805                 return 0;
2806
2807         if (interruptible)
2808                 ret = wait_for_completion_interruptible(
2809                                         &fs_info->qgroup_rescan_completion);
2810         else
2811                 wait_for_completion(&fs_info->qgroup_rescan_completion);
2812
2813         return ret;
2814 }
2815
2816 /*
2817  * this is only called from open_ctree where we're still single threaded, thus
2818  * locking is omitted here.
2819  */
2820 void
2821 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2822 {
2823         if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2824                 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2825                                  &fs_info->qgroup_rescan_work);
2826 }
2827
2828 /*
2829  * Reserve qgroup space for range [start, start + len).
2830  *
2831  * This function will either reserve space from related qgroups or doing
2832  * nothing if the range is already reserved.
2833  *
2834  * Return 0 for successful reserve
2835  * Return <0 for error (including -EQUOT)
2836  *
2837  * NOTE: this function may sleep for memory allocation.
2838  */
2839 int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
2840 {
2841         struct btrfs_root *root = BTRFS_I(inode)->root;
2842         struct extent_changeset changeset;
2843         struct ulist_node *unode;
2844         struct ulist_iterator uiter;
2845         int ret;
2846
2847         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
2848             !is_fstree(root->objectid) || len == 0)
2849                 return 0;
2850
2851         changeset.bytes_changed = 0;
2852         ulist_init(&changeset.range_changed);
2853         ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
2854                         start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
2855         trace_btrfs_qgroup_reserve_data(inode, start, len,
2856                                         changeset.bytes_changed,
2857                                         QGROUP_RESERVE);
2858         if (ret < 0)
2859                 goto cleanup;
2860         ret = qgroup_reserve(root, changeset.bytes_changed, true);
2861         if (ret < 0)
2862                 goto cleanup;
2863
2864         ulist_release(&changeset.range_changed);
2865         return ret;
2866
2867 cleanup:
2868         /* cleanup already reserved ranges */
2869         ULIST_ITER_INIT(&uiter);
2870         while ((unode = ulist_next(&changeset.range_changed, &uiter)))
2871                 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
2872                                  unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
2873                                  GFP_NOFS);
2874         ulist_release(&changeset.range_changed);
2875         return ret;
2876 }
2877
2878 static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
2879                                        int free)
2880 {
2881         struct extent_changeset changeset;
2882         int trace_op = QGROUP_RELEASE;
2883         int ret;
2884
2885         changeset.bytes_changed = 0;
2886         ulist_init(&changeset.range_changed);
2887         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 
2888                         start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
2889         if (ret < 0)
2890                 goto out;
2891
2892         if (free) {
2893                 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
2894                                 BTRFS_I(inode)->root->objectid,
2895                                 changeset.bytes_changed);
2896                 trace_op = QGROUP_FREE;
2897         }
2898         trace_btrfs_qgroup_release_data(inode, start, len,
2899                                         changeset.bytes_changed, trace_op);
2900 out:
2901         ulist_release(&changeset.range_changed);
2902         return ret;
2903 }
2904
2905 /*
2906  * Free a reserved space range from io_tree and related qgroups
2907  *
2908  * Should be called when a range of pages get invalidated before reaching disk.
2909  * Or for error cleanup case.
2910  *
2911  * For data written to disk, use btrfs_qgroup_release_data().
2912  *
2913  * NOTE: This function may sleep for memory allocation.
2914  */
2915 int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len)
2916 {
2917         return __btrfs_qgroup_release_data(inode, start, len, 1);
2918 }
2919
2920 /*
2921  * Release a reserved space range from io_tree only.
2922  *
2923  * Should be called when a range of pages get written to disk and corresponding
2924  * FILE_EXTENT is inserted into corresponding root.
2925  *
2926  * Since new qgroup accounting framework will only update qgroup numbers at
2927  * commit_transaction() time, its reserved space shouldn't be freed from
2928  * related qgroups.
2929  *
2930  * But we should release the range from io_tree, to allow further write to be
2931  * COWed.
2932  *
2933  * NOTE: This function may sleep for memory allocation.
2934  */
2935 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
2936 {
2937         return __btrfs_qgroup_release_data(inode, start, len, 0);
2938 }
2939
2940 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
2941                               bool enforce)
2942 {
2943         struct btrfs_fs_info *fs_info = root->fs_info;
2944         int ret;
2945
2946         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2947             !is_fstree(root->objectid) || num_bytes == 0)
2948                 return 0;
2949
2950         BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
2951         ret = qgroup_reserve(root, num_bytes, enforce);
2952         if (ret < 0)
2953                 return ret;
2954         atomic64_add(num_bytes, &root->qgroup_meta_rsv);
2955         return ret;
2956 }
2957
2958 void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2959 {
2960         struct btrfs_fs_info *fs_info = root->fs_info;
2961         u64 reserved;
2962
2963         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2964             !is_fstree(root->objectid))
2965                 return;
2966
2967         reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0);
2968         if (reserved == 0)
2969                 return;
2970         btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved);
2971 }
2972
2973 void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2974 {
2975         struct btrfs_fs_info *fs_info = root->fs_info;
2976
2977         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2978             !is_fstree(root->objectid))
2979                 return;
2980
2981         BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
2982         WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes);
2983         atomic64_sub(num_bytes, &root->qgroup_meta_rsv);
2984         btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes);
2985 }
2986
2987 /*
2988  * Check qgroup reserved space leaking, normally at destroy inode
2989  * time
2990  */
2991 void btrfs_qgroup_check_reserved_leak(struct inode *inode)
2992 {
2993         struct extent_changeset changeset;
2994         struct ulist_node *unode;
2995         struct ulist_iterator iter;
2996         int ret;
2997
2998         changeset.bytes_changed = 0;
2999         ulist_init(&changeset.range_changed);
3000         ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
3001                         EXTENT_QGROUP_RESERVED, &changeset);
3002
3003         WARN_ON(ret < 0);
3004         if (WARN_ON(changeset.bytes_changed)) {
3005                 ULIST_ITER_INIT(&iter);
3006                 while ((unode = ulist_next(&changeset.range_changed, &iter))) {
3007                         btrfs_warn(BTRFS_I(inode)->root->fs_info,
3008                                 "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
3009                                 inode->i_ino, unode->val, unode->aux);
3010                 }
3011                 btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
3012                                 BTRFS_I(inode)->root->objectid,
3013                                 changeset.bytes_changed);
3014
3015         }
3016         ulist_release(&changeset.range_changed);
3017 }