2 * Copyright (c) 2014 Christoph Hellwig.
5 #include "blocklayout.h"
7 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
9 static inline struct pnfs_block_extent *
10 ext_node(struct rb_node *node)
12 return rb_entry(node, struct pnfs_block_extent, be_node);
15 static struct pnfs_block_extent *
16 ext_tree_first(struct rb_root *root)
18 struct rb_node *node = rb_first(root);
19 return node ? ext_node(node) : NULL;
22 static struct pnfs_block_extent *
23 ext_tree_prev(struct pnfs_block_extent *be)
25 struct rb_node *node = rb_prev(&be->be_node);
26 return node ? ext_node(node) : NULL;
29 static struct pnfs_block_extent *
30 ext_tree_next(struct pnfs_block_extent *be)
32 struct rb_node *node = rb_next(&be->be_node);
33 return node ? ext_node(node) : NULL;
36 static inline sector_t
37 ext_f_end(struct pnfs_block_extent *be)
39 return be->be_f_offset + be->be_length;
42 static struct pnfs_block_extent *
43 __ext_tree_search(struct rb_root *root, sector_t start)
45 struct rb_node *node = root->rb_node;
46 struct pnfs_block_extent *be = NULL;
50 if (start < be->be_f_offset)
52 else if (start >= ext_f_end(be))
53 node = node->rb_right;
59 if (start < be->be_f_offset)
62 if (start >= ext_f_end(be))
63 return ext_tree_next(be);
70 ext_can_merge(struct pnfs_block_extent *be1, struct pnfs_block_extent *be2)
72 if (be1->be_state != be2->be_state)
74 if (be1->be_mdev != be2->be_mdev)
77 if (be1->be_f_offset + be1->be_length != be2->be_f_offset)
80 if (be1->be_state != PNFS_BLOCK_NONE_DATA &&
81 (be1->be_v_offset + be1->be_length != be2->be_v_offset))
84 if (be1->be_state == PNFS_BLOCK_INVALID_DATA &&
85 be1->be_tag != be2->be_tag)
91 static struct pnfs_block_extent *
92 ext_try_to_merge_left(struct rb_root *root, struct pnfs_block_extent *be)
94 struct pnfs_block_extent *left = ext_tree_prev(be);
96 if (left && ext_can_merge(left, be)) {
97 left->be_length += be->be_length;
98 rb_erase(&be->be_node, root);
106 static struct pnfs_block_extent *
107 ext_try_to_merge_right(struct rb_root *root, struct pnfs_block_extent *be)
109 struct pnfs_block_extent *right = ext_tree_next(be);
111 if (right && ext_can_merge(be, right)) {
112 be->be_length += right->be_length;
113 rb_erase(&right->be_node, root);
121 __ext_tree_insert(struct rb_root *root,
122 struct pnfs_block_extent *new, bool merge_ok)
124 struct rb_node **p = &root->rb_node, *parent = NULL;
125 struct pnfs_block_extent *be;
129 be = ext_node(parent);
131 if (new->be_f_offset < be->be_f_offset) {
132 if (merge_ok && ext_can_merge(new, be)) {
133 be->be_f_offset = new->be_f_offset;
134 if (be->be_state != PNFS_BLOCK_NONE_DATA)
135 be->be_v_offset = new->be_v_offset;
136 be->be_length += new->be_length;
137 be = ext_try_to_merge_left(root, be);
142 } else if (new->be_f_offset >= ext_f_end(be)) {
143 if (merge_ok && ext_can_merge(be, new)) {
144 be->be_length += new->be_length;
145 be = ext_try_to_merge_right(root, be);
155 rb_link_node(&new->be_node, parent, p);
156 rb_insert_color(&new->be_node, root);
160 __ext_tree_remove(struct rb_root *root, sector_t start, sector_t end)
162 struct pnfs_block_extent *be;
163 sector_t len1 = 0, len2 = 0;
164 sector_t orig_f_offset;
165 sector_t orig_v_offset;
168 be = __ext_tree_search(root, start);
171 if (be->be_f_offset >= end)
174 orig_f_offset = be->be_f_offset;
175 orig_v_offset = be->be_v_offset;
176 orig_len = be->be_length;
178 if (start > be->be_f_offset)
179 len1 = start - be->be_f_offset;
180 if (ext_f_end(be) > end)
181 len2 = ext_f_end(be) - end;
185 struct pnfs_block_extent *new;
187 new = kzalloc(sizeof(*new), GFP_ATOMIC);
191 be->be_length = len1;
193 new->be_f_offset = end;
194 if (be->be_state != PNFS_BLOCK_NONE_DATA) {
196 orig_v_offset + orig_len - len2;
198 new->be_length = len2;
199 new->be_state = be->be_state;
200 new->be_tag = be->be_tag;
201 new->be_mdev = be->be_mdev;
202 memcpy(&new->be_devid, &be->be_devid,
203 sizeof(struct nfs4_deviceid));
205 __ext_tree_insert(root, new, true);
207 be->be_f_offset = end;
208 if (be->be_state != PNFS_BLOCK_NONE_DATA) {
210 orig_v_offset + orig_len - len2;
212 be->be_length = len2;
216 be->be_length = len1;
217 be = ext_tree_next(be);
220 while (be && ext_f_end(be) <= end) {
221 struct pnfs_block_extent *next = ext_tree_next(be);
223 rb_erase(&be->be_node, root);
228 if (be && be->be_f_offset < end) {
229 len1 = ext_f_end(be) - end;
230 be->be_f_offset = end;
231 if (be->be_state != PNFS_BLOCK_NONE_DATA)
232 be->be_v_offset += be->be_length - len1;
233 be->be_length = len1;
241 ext_tree_insert(struct pnfs_block_layout *bl, struct pnfs_block_extent *new)
243 struct pnfs_block_extent *be;
244 struct rb_root *root;
247 switch (new->be_state) {
248 case PNFS_BLOCK_READWRITE_DATA:
249 case PNFS_BLOCK_INVALID_DATA:
250 root = &bl->bl_ext_rw;
252 case PNFS_BLOCK_READ_DATA:
253 case PNFS_BLOCK_NONE_DATA:
254 root = &bl->bl_ext_ro;
257 dprintk("invalid extent type\n");
261 spin_lock(&bl->bl_ext_lock);
263 be = __ext_tree_search(root, new->be_f_offset);
264 if (!be || be->be_f_offset >= ext_f_end(new)) {
265 __ext_tree_insert(root, new, true);
266 } else if (new->be_f_offset >= be->be_f_offset) {
267 if (ext_f_end(new) <= ext_f_end(be)) {
270 sector_t new_len = ext_f_end(new) - ext_f_end(be);
271 sector_t diff = new->be_length - new_len;
273 new->be_f_offset += diff;
274 new->be_v_offset += diff;
275 new->be_length = new_len;
278 } else if (ext_f_end(new) <= ext_f_end(be)) {
279 new->be_length = be->be_f_offset - new->be_f_offset;
280 __ext_tree_insert(root, new, true);
282 struct pnfs_block_extent *split;
283 sector_t new_len = ext_f_end(new) - ext_f_end(be);
284 sector_t diff = new->be_length - new_len;
286 split = kmemdup(new, sizeof(*new), GFP_ATOMIC);
292 split->be_length = be->be_f_offset - split->be_f_offset;
293 __ext_tree_insert(root, split, true);
295 new->be_f_offset += diff;
296 new->be_v_offset += diff;
297 new->be_length = new_len;
301 spin_unlock(&bl->bl_ext_lock);
306 __ext_tree_lookup(struct rb_root *root, sector_t isect,
307 struct pnfs_block_extent *ret)
309 struct rb_node *node;
310 struct pnfs_block_extent *be;
312 node = root->rb_node;
315 if (isect < be->be_f_offset)
316 node = node->rb_left;
317 else if (isect >= ext_f_end(be))
318 node = node->rb_right;
329 ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect,
330 struct pnfs_block_extent *ret, bool rw)
334 spin_lock(&bl->bl_ext_lock);
336 found = __ext_tree_lookup(&bl->bl_ext_ro, isect, ret);
338 found = __ext_tree_lookup(&bl->bl_ext_rw, isect, ret);
339 spin_unlock(&bl->bl_ext_lock);
344 int ext_tree_remove(struct pnfs_block_layout *bl, bool rw,
345 sector_t start, sector_t end)
349 spin_lock(&bl->bl_ext_lock);
350 err = __ext_tree_remove(&bl->bl_ext_ro, start, end);
352 err2 = __ext_tree_remove(&bl->bl_ext_rw, start, end);
356 spin_unlock(&bl->bl_ext_lock);
362 ext_tree_split(struct rb_root *root, struct pnfs_block_extent *be,
365 struct pnfs_block_extent *new;
366 sector_t orig_len = be->be_length;
368 dprintk("%s: need split for 0x%lx:0x%lx at 0x%lx\n",
369 __func__, be->be_f_offset, ext_f_end(be), split);
371 new = kzalloc(sizeof(*new), GFP_ATOMIC);
375 be->be_length = split - be->be_f_offset;
377 new->be_f_offset = split;
378 if (be->be_state != PNFS_BLOCK_NONE_DATA)
379 new->be_v_offset = be->be_v_offset + be->be_length;
380 new->be_length = orig_len - be->be_length;
381 new->be_state = be->be_state;
382 new->be_tag = be->be_tag;
384 new->be_mdev = be->be_mdev;
385 memcpy(&new->be_devid, &be->be_devid, sizeof(struct nfs4_deviceid));
387 dprintk("%s: got 0x%lx:0x%lx!\n",
388 __func__, be->be_f_offset, ext_f_end(be));
389 dprintk("%s: got 0x%lx:0x%lx!\n",
390 __func__, new->be_f_offset, ext_f_end(new));
392 __ext_tree_insert(root, new, false);
397 ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
400 struct rb_root *root = &bl->bl_ext_rw;
401 sector_t end = start + len;
402 struct pnfs_block_extent *be;
405 spin_lock(&bl->bl_ext_lock);
407 * First remove all COW extents or holes from written to range.
409 err = __ext_tree_remove(&bl->bl_ext_ro, start, end);
414 * Then mark all invalid extents in the range as written to.
416 for (be = __ext_tree_search(root, start); be; be = ext_tree_next(be)) {
417 if (be->be_f_offset >= end)
420 if (be->be_state != PNFS_BLOCK_INVALID_DATA || be->be_tag)
423 if (be->be_f_offset < start) {
424 struct pnfs_block_extent *left = ext_tree_prev(be);
426 if (left && ext_can_merge(left, be)) {
427 sector_t diff = start - be->be_f_offset;
429 left->be_length += diff;
431 be->be_f_offset += diff;
432 be->be_v_offset += diff;
433 be->be_length -= diff;
435 err = ext_tree_split(root, be, start);
441 if (ext_f_end(be) > end) {
442 struct pnfs_block_extent *right = ext_tree_next(be);
444 if (right && ext_can_merge(be, right)) {
445 sector_t diff = end - be->be_f_offset;
447 be->be_length -= diff;
449 right->be_f_offset -= diff;
450 right->be_v_offset -= diff;
451 right->be_length += diff;
453 err = ext_tree_split(root, be, end);
459 if (be->be_f_offset >= start && ext_f_end(be) <= end) {
460 be->be_tag = EXTENT_WRITTEN;
461 be = ext_try_to_merge_left(root, be);
462 be = ext_try_to_merge_right(root, be);
466 spin_unlock(&bl->bl_ext_lock);
471 ext_tree_encode_commit(struct pnfs_block_layout *bl, struct xdr_stream *xdr)
473 struct pnfs_block_extent *be;
474 unsigned int count = 0;
475 __be32 *p, *xdr_start;
478 dprintk("%s enter\n", __func__);
480 xdr_start = xdr_reserve_space(xdr, 8);
484 spin_lock(&bl->bl_ext_lock);
485 for (be = ext_tree_first(&bl->bl_ext_rw); be; be = ext_tree_next(be)) {
486 if (be->be_state != PNFS_BLOCK_INVALID_DATA ||
487 be->be_tag != EXTENT_WRITTEN)
490 p = xdr_reserve_space(xdr, 7 * sizeof(__be32) +
491 NFS4_DEVICEID4_SIZE);
493 printk("%s: out of space for extent list\n", __func__);
498 p = xdr_encode_opaque_fixed(p, be->be_devid.data,
499 NFS4_DEVICEID4_SIZE);
500 p = xdr_encode_hyper(p, be->be_f_offset << SECTOR_SHIFT);
501 p = xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT);
502 p = xdr_encode_hyper(p, 0LL);
503 *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA);
505 be->be_tag = EXTENT_COMMITTING;
508 spin_unlock(&bl->bl_ext_lock);
510 xdr_start[0] = cpu_to_be32((xdr->p - xdr_start - 1) * 4);
511 xdr_start[1] = cpu_to_be32(count);
513 dprintk("%s found %i ranges\n", __func__, count);
518 ext_tree_mark_committed(struct pnfs_block_layout *bl, int status)
520 struct rb_root *root = &bl->bl_ext_rw;
521 struct pnfs_block_extent *be;
523 dprintk("%s status %d\n", __func__, status);
525 spin_lock(&bl->bl_ext_lock);
526 for (be = ext_tree_first(root); be; be = ext_tree_next(be)) {
527 if (be->be_state != PNFS_BLOCK_INVALID_DATA ||
528 be->be_tag != EXTENT_COMMITTING)
533 * Mark as written and try again.
535 * XXX: some real error handling here wouldn't hurt..
537 be->be_tag = EXTENT_WRITTEN;
539 be->be_state = PNFS_BLOCK_READWRITE_DATA;
543 be = ext_try_to_merge_left(root, be);
544 be = ext_try_to_merge_right(root, be);
546 spin_unlock(&bl->bl_ext_lock);