]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/md/persistent-data/dm-btree-remove.c
Merge branch 'core-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / drivers / md / persistent-data / dm-btree-remove.c
1 /*
2  * Copyright (C) 2011 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-btree.h"
8 #include "dm-btree-internal.h"
9 #include "dm-transaction-manager.h"
10
11 #include <linux/export.h>
12
13 /*
14  * Removing an entry from a btree
15  * ==============================
16  *
17  * A very important constraint for our btree is that no node, except the
18  * root, may have fewer than a certain number of entries.
19  * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
20  *
21  * Ensuring this is complicated by the way we want to only ever hold the
22  * locks on 2 nodes concurrently, and only change nodes in a top to bottom
23  * fashion.
24  *
25  * Each node may have a left or right sibling.  When decending the spine,
26  * if a node contains only MIN_ENTRIES then we try and increase this to at
27  * least MIN_ENTRIES + 1.  We do this in the following ways:
28  *
29  * [A] No siblings => this can only happen if the node is the root, in which
30  *     case we copy the childs contents over the root.
31  *
32  * [B] No left sibling
33  *     ==> rebalance(node, right sibling)
34  *
35  * [C] No right sibling
36  *     ==> rebalance(left sibling, node)
37  *
38  * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
39  *     ==> delete node adding it's contents to left and right
40  *
41  * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
42  *     ==> rebalance(left, node, right)
43  *
44  * After these operations it's possible that the our original node no
45  * longer contains the desired sub tree.  For this reason this rebalancing
46  * is performed on the children of the current node.  This also avoids
47  * having a special case for the root.
48  *
49  * Once this rebalancing has occurred we can then step into the child node
50  * for internal nodes.  Or delete the entry for leaf nodes.
51  */
52
53 /*
54  * Some little utilities for moving node data around.
55  */
56 static void node_shift(struct btree_node *n, int shift)
57 {
58         uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
59         uint32_t value_size = le32_to_cpu(n->header.value_size);
60
61         if (shift < 0) {
62                 shift = -shift;
63                 BUG_ON(shift > nr_entries);
64                 BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift));
65                 memmove(key_ptr(n, 0),
66                         key_ptr(n, shift),
67                         (nr_entries - shift) * sizeof(__le64));
68                 memmove(value_ptr(n, 0),
69                         value_ptr(n, shift),
70                         (nr_entries - shift) * value_size);
71         } else {
72                 BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries));
73                 memmove(key_ptr(n, shift),
74                         key_ptr(n, 0),
75                         nr_entries * sizeof(__le64));
76                 memmove(value_ptr(n, shift),
77                         value_ptr(n, 0),
78                         nr_entries * value_size);
79         }
80 }
81
82 static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
83 {
84         uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
85         uint32_t value_size = le32_to_cpu(left->header.value_size);
86         BUG_ON(value_size != le32_to_cpu(right->header.value_size));
87
88         if (shift < 0) {
89                 shift = -shift;
90                 BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries));
91                 memcpy(key_ptr(left, nr_left),
92                        key_ptr(right, 0),
93                        shift * sizeof(__le64));
94                 memcpy(value_ptr(left, nr_left),
95                        value_ptr(right, 0),
96                        shift * value_size);
97         } else {
98                 BUG_ON(shift > le32_to_cpu(right->header.max_entries));
99                 memcpy(key_ptr(right, 0),
100                        key_ptr(left, nr_left - shift),
101                        shift * sizeof(__le64));
102                 memcpy(value_ptr(right, 0),
103                        value_ptr(left, nr_left - shift),
104                        shift * value_size);
105         }
106 }
107
108 /*
109  * Delete a specific entry from a leaf node.
110  */
111 static void delete_at(struct btree_node *n, unsigned index)
112 {
113         unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
114         unsigned nr_to_copy = nr_entries - (index + 1);
115         uint32_t value_size = le32_to_cpu(n->header.value_size);
116         BUG_ON(index >= nr_entries);
117
118         if (nr_to_copy) {
119                 memmove(key_ptr(n, index),
120                         key_ptr(n, index + 1),
121                         nr_to_copy * sizeof(__le64));
122
123                 memmove(value_ptr(n, index),
124                         value_ptr(n, index + 1),
125                         nr_to_copy * value_size);
126         }
127
128         n->header.nr_entries = cpu_to_le32(nr_entries - 1);
129 }
130
131 static unsigned merge_threshold(struct btree_node *n)
132 {
133         return le32_to_cpu(n->header.max_entries) / 3;
134 }
135
136 struct child {
137         unsigned index;
138         struct dm_block *block;
139         struct btree_node *n;
140 };
141
142 static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
143                       struct btree_node *parent,
144                       unsigned index, struct child *result)
145 {
146         int r, inc;
147         dm_block_t root;
148
149         result->index = index;
150         root = value64(parent, index);
151
152         r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
153                                &result->block, &inc);
154         if (r)
155                 return r;
156
157         result->n = dm_block_data(result->block);
158
159         if (inc)
160                 inc_children(info->tm, result->n, vt);
161
162         *((__le64 *) value_ptr(parent, index)) =
163                 cpu_to_le64(dm_block_location(result->block));
164
165         return 0;
166 }
167
168 static int exit_child(struct dm_btree_info *info, struct child *c)
169 {
170         return dm_tm_unlock(info->tm, c->block);
171 }
172
173 static void shift(struct btree_node *left, struct btree_node *right, int count)
174 {
175         uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
176         uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
177         uint32_t max_entries = le32_to_cpu(left->header.max_entries);
178         uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
179
180         BUG_ON(max_entries != r_max_entries);
181         BUG_ON(nr_left - count > max_entries);
182         BUG_ON(nr_right + count > max_entries);
183
184         if (!count)
185                 return;
186
187         if (count > 0) {
188                 node_shift(right, count);
189                 node_copy(left, right, count);
190         } else {
191                 node_copy(left, right, count);
192                 node_shift(right, count);
193         }
194
195         left->header.nr_entries = cpu_to_le32(nr_left - count);
196         right->header.nr_entries = cpu_to_le32(nr_right + count);
197 }
198
199 static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
200                          struct child *l, struct child *r)
201 {
202         struct btree_node *left = l->n;
203         struct btree_node *right = r->n;
204         uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
205         uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
206         unsigned threshold = 2 * merge_threshold(left) + 1;
207
208         if (nr_left + nr_right < threshold) {
209                 /*
210                  * Merge
211                  */
212                 node_copy(left, right, -nr_right);
213                 left->header.nr_entries = cpu_to_le32(nr_left + nr_right);
214                 delete_at(parent, r->index);
215
216                 /*
217                  * We need to decrement the right block, but not it's
218                  * children, since they're still referenced by left.
219                  */
220                 dm_tm_dec(info->tm, dm_block_location(r->block));
221         } else {
222                 /*
223                  * Rebalance.
224                  */
225                 unsigned target_left = (nr_left + nr_right) / 2;
226                 shift(left, right, nr_left - target_left);
227                 *key_ptr(parent, r->index) = right->keys[0];
228         }
229 }
230
231 static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
232                       struct dm_btree_value_type *vt, unsigned left_index)
233 {
234         int r;
235         struct btree_node *parent;
236         struct child left, right;
237
238         parent = dm_block_data(shadow_current(s));
239
240         r = init_child(info, vt, parent, left_index, &left);
241         if (r)
242                 return r;
243
244         r = init_child(info, vt, parent, left_index + 1, &right);
245         if (r) {
246                 exit_child(info, &left);
247                 return r;
248         }
249
250         __rebalance2(info, parent, &left, &right);
251
252         r = exit_child(info, &left);
253         if (r) {
254                 exit_child(info, &right);
255                 return r;
256         }
257
258         return exit_child(info, &right);
259 }
260
261 /*
262  * We dump as many entries from center as possible into left, then the rest
263  * in right, then rebalance2.  This wastes some cpu, but I want something
264  * simple atm.
265  */
266 static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
267                                struct child *l, struct child *c, struct child *r,
268                                struct btree_node *left, struct btree_node *center, struct btree_node *right,
269                                uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
270 {
271         uint32_t max_entries = le32_to_cpu(left->header.max_entries);
272         unsigned shift = min(max_entries - nr_left, nr_center);
273
274         BUG_ON(nr_left + shift > max_entries);
275         node_copy(left, center, -shift);
276         left->header.nr_entries = cpu_to_le32(nr_left + shift);
277
278         if (shift != nr_center) {
279                 shift = nr_center - shift;
280                 BUG_ON((nr_right + shift) > max_entries);
281                 node_shift(right, shift);
282                 node_copy(center, right, shift);
283                 right->header.nr_entries = cpu_to_le32(nr_right + shift);
284         }
285         *key_ptr(parent, r->index) = right->keys[0];
286
287         delete_at(parent, c->index);
288         r->index--;
289
290         dm_tm_dec(info->tm, dm_block_location(c->block));
291         __rebalance2(info, parent, l, r);
292 }
293
294 /*
295  * Redistributes entries among 3 sibling nodes.
296  */
297 static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
298                           struct child *l, struct child *c, struct child *r,
299                           struct btree_node *left, struct btree_node *center, struct btree_node *right,
300                           uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
301 {
302         int s;
303         uint32_t max_entries = le32_to_cpu(left->header.max_entries);
304         unsigned total = nr_left + nr_center + nr_right;
305         unsigned target_right = total / 3;
306         unsigned remainder = (target_right * 3) != total;
307         unsigned target_left = target_right + remainder;
308
309         BUG_ON(target_left > max_entries);
310         BUG_ON(target_right > max_entries);
311
312         if (nr_left < nr_right) {
313                 s = nr_left - target_left;
314
315                 if (s < 0 && nr_center < -s) {
316                         /* not enough in central node */
317                         shift(left, center, -nr_center);
318                         s += nr_center;
319                         shift(left, right, s);
320                         nr_right += s;
321                 } else
322                         shift(left, center, s);
323
324                 shift(center, right, target_right - nr_right);
325
326         } else {
327                 s = target_right - nr_right;
328                 if (s > 0 && nr_center < s) {
329                         /* not enough in central node */
330                         shift(center, right, nr_center);
331                         s -= nr_center;
332                         shift(left, right, s);
333                         nr_left -= s;
334                 } else
335                         shift(center, right, s);
336
337                 shift(left, center, nr_left - target_left);
338         }
339
340         *key_ptr(parent, c->index) = center->keys[0];
341         *key_ptr(parent, r->index) = right->keys[0];
342 }
343
344 static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
345                          struct child *l, struct child *c, struct child *r)
346 {
347         struct btree_node *left = l->n;
348         struct btree_node *center = c->n;
349         struct btree_node *right = r->n;
350
351         uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
352         uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
353         uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
354
355         unsigned threshold = merge_threshold(left) * 4 + 1;
356
357         BUG_ON(left->header.max_entries != center->header.max_entries);
358         BUG_ON(center->header.max_entries != right->header.max_entries);
359
360         if ((nr_left + nr_center + nr_right) < threshold)
361                 delete_center_node(info, parent, l, c, r, left, center, right,
362                                    nr_left, nr_center, nr_right);
363         else
364                 redistribute3(info, parent, l, c, r, left, center, right,
365                               nr_left, nr_center, nr_right);
366 }
367
368 static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
369                       struct dm_btree_value_type *vt, unsigned left_index)
370 {
371         int r;
372         struct btree_node *parent = dm_block_data(shadow_current(s));
373         struct child left, center, right;
374
375         /*
376          * FIXME: fill out an array?
377          */
378         r = init_child(info, vt, parent, left_index, &left);
379         if (r)
380                 return r;
381
382         r = init_child(info, vt, parent, left_index + 1, &center);
383         if (r) {
384                 exit_child(info, &left);
385                 return r;
386         }
387
388         r = init_child(info, vt, parent, left_index + 2, &right);
389         if (r) {
390                 exit_child(info, &left);
391                 exit_child(info, &center);
392                 return r;
393         }
394
395         __rebalance3(info, parent, &left, &center, &right);
396
397         r = exit_child(info, &left);
398         if (r) {
399                 exit_child(info, &center);
400                 exit_child(info, &right);
401                 return r;
402         }
403
404         r = exit_child(info, &center);
405         if (r) {
406                 exit_child(info, &right);
407                 return r;
408         }
409
410         r = exit_child(info, &right);
411         if (r)
412                 return r;
413
414         return 0;
415 }
416
417 static int rebalance_children(struct shadow_spine *s,
418                               struct dm_btree_info *info,
419                               struct dm_btree_value_type *vt, uint64_t key)
420 {
421         int i, r, has_left_sibling, has_right_sibling;
422         struct btree_node *n;
423
424         n = dm_block_data(shadow_current(s));
425
426         if (le32_to_cpu(n->header.nr_entries) == 1) {
427                 struct dm_block *child;
428                 dm_block_t b = value64(n, 0);
429
430                 r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child);
431                 if (r)
432                         return r;
433
434                 memcpy(n, dm_block_data(child),
435                        dm_bm_block_size(dm_tm_get_bm(info->tm)));
436                 r = dm_tm_unlock(info->tm, child);
437                 if (r)
438                         return r;
439
440                 dm_tm_dec(info->tm, dm_block_location(child));
441                 return 0;
442         }
443
444         i = lower_bound(n, key);
445         if (i < 0)
446                 return -ENODATA;
447
448         has_left_sibling = i > 0;
449         has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
450
451         if (!has_left_sibling)
452                 r = rebalance2(s, info, vt, i);
453
454         else if (!has_right_sibling)
455                 r = rebalance2(s, info, vt, i - 1);
456
457         else
458                 r = rebalance3(s, info, vt, i - 1);
459
460         return r;
461 }
462
463 static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
464 {
465         int i = lower_bound(n, key);
466
467         if ((i < 0) ||
468             (i >= le32_to_cpu(n->header.nr_entries)) ||
469             (le64_to_cpu(n->keys[i]) != key))
470                 return -ENODATA;
471
472         *index = i;
473
474         return 0;
475 }
476
477 /*
478  * Prepares for removal from one level of the hierarchy.  The caller must
479  * call delete_at() to remove the entry at index.
480  */
481 static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
482                       struct dm_btree_value_type *vt, dm_block_t root,
483                       uint64_t key, unsigned *index)
484 {
485         int i = *index, r;
486         struct btree_node *n;
487
488         for (;;) {
489                 r = shadow_step(s, root, vt);
490                 if (r < 0)
491                         break;
492
493                 /*
494                  * We have to patch up the parent node, ugly, but I don't
495                  * see a way to do this automatically as part of the spine
496                  * op.
497                  */
498                 if (shadow_has_parent(s)) {
499                         __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
500                         memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
501                                &location, sizeof(__le64));
502                 }
503
504                 n = dm_block_data(shadow_current(s));
505
506                 if (le32_to_cpu(n->header.flags) & LEAF_NODE)
507                         return do_leaf(n, key, index);
508
509                 r = rebalance_children(s, info, vt, key);
510                 if (r)
511                         break;
512
513                 n = dm_block_data(shadow_current(s));
514                 if (le32_to_cpu(n->header.flags) & LEAF_NODE)
515                         return do_leaf(n, key, index);
516
517                 i = lower_bound(n, key);
518
519                 /*
520                  * We know the key is present, or else
521                  * rebalance_children would have returned
522                  * -ENODATA
523                  */
524                 root = value64(n, i);
525         }
526
527         return r;
528 }
529
530 int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
531                     uint64_t *keys, dm_block_t *new_root)
532 {
533         unsigned level, last_level = info->levels - 1;
534         int index = 0, r = 0;
535         struct shadow_spine spine;
536         struct btree_node *n;
537         struct dm_btree_value_type le64_vt;
538
539         init_le64_type(info->tm, &le64_vt);
540         init_shadow_spine(&spine, info);
541         for (level = 0; level < info->levels; level++) {
542                 r = remove_raw(&spine, info,
543                                (level == last_level ?
544                                 &info->value_type : &le64_vt),
545                                root, keys[level], (unsigned *)&index);
546                 if (r < 0)
547                         break;
548
549                 n = dm_block_data(shadow_current(&spine));
550                 if (level != last_level) {
551                         root = value64(n, index);
552                         continue;
553                 }
554
555                 BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries));
556
557                 if (info->value_type.dec)
558                         info->value_type.dec(info->value_type.context,
559                                              value_ptr(n, index));
560
561                 delete_at(n, index);
562         }
563
564         *new_root = shadow_root(&spine);
565         exit_shadow_spine(&spine);
566
567         return r;
568 }
569 EXPORT_SYMBOL_GPL(dm_btree_remove);
570
571 /*----------------------------------------------------------------*/
572
573 static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
574                           struct dm_btree_value_type *vt, dm_block_t root,
575                           uint64_t key, int *index)
576 {
577         int i = *index, r;
578         struct btree_node *n;
579
580         for (;;) {
581                 r = shadow_step(s, root, vt);
582                 if (r < 0)
583                         break;
584
585                 /*
586                  * We have to patch up the parent node, ugly, but I don't
587                  * see a way to do this automatically as part of the spine
588                  * op.
589                  */
590                 if (shadow_has_parent(s)) {
591                         __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
592                         memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
593                                &location, sizeof(__le64));
594                 }
595
596                 n = dm_block_data(shadow_current(s));
597
598                 if (le32_to_cpu(n->header.flags) & LEAF_NODE) {
599                         *index = lower_bound(n, key);
600                         return 0;
601                 }
602
603                 r = rebalance_children(s, info, vt, key);
604                 if (r)
605                         break;
606
607                 n = dm_block_data(shadow_current(s));
608                 if (le32_to_cpu(n->header.flags) & LEAF_NODE) {
609                         *index = lower_bound(n, key);
610                         return 0;
611                 }
612
613                 i = lower_bound(n, key);
614
615                 /*
616                  * We know the key is present, or else
617                  * rebalance_children would have returned
618                  * -ENODATA
619                  */
620                 root = value64(n, i);
621         }
622
623         return r;
624 }
625
626 static int remove_one(struct dm_btree_info *info, dm_block_t root,
627                       uint64_t *keys, uint64_t end_key,
628                       dm_block_t *new_root, unsigned *nr_removed)
629 {
630         unsigned level, last_level = info->levels - 1;
631         int index = 0, r = 0;
632         struct shadow_spine spine;
633         struct btree_node *n;
634         struct dm_btree_value_type le64_vt;
635         uint64_t k;
636
637         init_le64_type(info->tm, &le64_vt);
638         init_shadow_spine(&spine, info);
639         for (level = 0; level < last_level; level++) {
640                 r = remove_raw(&spine, info, &le64_vt,
641                                root, keys[level], (unsigned *) &index);
642                 if (r < 0)
643                         goto out;
644
645                 n = dm_block_data(shadow_current(&spine));
646                 root = value64(n, index);
647         }
648
649         r = remove_nearest(&spine, info, &info->value_type,
650                            root, keys[last_level], &index);
651         if (r < 0)
652                 goto out;
653
654         n = dm_block_data(shadow_current(&spine));
655
656         if (index < 0)
657                 index = 0;
658
659         if (index >= le32_to_cpu(n->header.nr_entries)) {
660                 r = -ENODATA;
661                 goto out;
662         }
663
664         k = le64_to_cpu(n->keys[index]);
665         if (k >= keys[last_level] && k < end_key) {
666                 if (info->value_type.dec)
667                         info->value_type.dec(info->value_type.context,
668                                              value_ptr(n, index));
669
670                 delete_at(n, index);
671                 keys[last_level] = k + 1ull;
672
673         } else
674                 r = -ENODATA;
675
676 out:
677         *new_root = shadow_root(&spine);
678         exit_shadow_spine(&spine);
679
680         return r;
681 }
682
683 int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
684                            uint64_t *first_key, uint64_t end_key,
685                            dm_block_t *new_root, unsigned *nr_removed)
686 {
687         int r;
688
689         *nr_removed = 0;
690         do {
691                 r = remove_one(info, root, first_key, end_key, &root, nr_removed);
692                 if (!r)
693                         (*nr_removed)++;
694         } while (!r);
695
696         *new_root = root;
697         return r == -ENODATA ? 0 : r;
698 }
699 EXPORT_SYMBOL_GPL(dm_btree_remove_leaves);