2 * Copyright (c) 2014 Christoph Hellwig.
5 #include "blocklayout.h"
7 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
9 static inline struct pnfs_block_extent *
10 ext_node(struct rb_node *node)
12 return rb_entry(node, struct pnfs_block_extent, be_node);
15 static struct pnfs_block_extent *
16 ext_tree_first(struct rb_root *root)
18 struct rb_node *node = rb_first(root);
19 return node ? ext_node(node) : NULL;
22 static struct pnfs_block_extent *
23 ext_tree_prev(struct pnfs_block_extent *be)
25 struct rb_node *node = rb_prev(&be->be_node);
26 return node ? ext_node(node) : NULL;
29 static struct pnfs_block_extent *
30 ext_tree_next(struct pnfs_block_extent *be)
32 struct rb_node *node = rb_next(&be->be_node);
33 return node ? ext_node(node) : NULL;
36 static inline sector_t
37 ext_f_end(struct pnfs_block_extent *be)
39 return be->be_f_offset + be->be_length;
42 static struct pnfs_block_extent *
43 __ext_tree_search(struct rb_root *root, sector_t start)
45 struct rb_node *node = root->rb_node;
46 struct pnfs_block_extent *be = NULL;
50 if (start < be->be_f_offset)
52 else if (start >= ext_f_end(be))
53 node = node->rb_right;
59 if (start < be->be_f_offset)
62 if (start >= ext_f_end(be))
63 return ext_tree_next(be);
70 ext_can_merge(struct pnfs_block_extent *be1, struct pnfs_block_extent *be2)
72 if (be1->be_state != be2->be_state)
74 if (be1->be_device != be2->be_device)
77 if (be1->be_f_offset + be1->be_length != be2->be_f_offset)
80 if (be1->be_state != PNFS_BLOCK_NONE_DATA &&
81 (be1->be_v_offset + be1->be_length != be2->be_v_offset))
84 if (be1->be_state == PNFS_BLOCK_INVALID_DATA &&
85 be1->be_tag != be2->be_tag)
91 static struct pnfs_block_extent *
92 ext_try_to_merge_left(struct rb_root *root, struct pnfs_block_extent *be)
94 struct pnfs_block_extent *left = ext_tree_prev(be);
96 if (left && ext_can_merge(left, be)) {
97 left->be_length += be->be_length;
98 rb_erase(&be->be_node, root);
99 nfs4_put_deviceid_node(be->be_device);
107 static struct pnfs_block_extent *
108 ext_try_to_merge_right(struct rb_root *root, struct pnfs_block_extent *be)
110 struct pnfs_block_extent *right = ext_tree_next(be);
112 if (right && ext_can_merge(be, right)) {
113 be->be_length += right->be_length;
114 rb_erase(&right->be_node, root);
115 nfs4_put_deviceid_node(right->be_device);
123 __ext_tree_insert(struct rb_root *root,
124 struct pnfs_block_extent *new, bool merge_ok)
126 struct rb_node **p = &root->rb_node, *parent = NULL;
127 struct pnfs_block_extent *be;
131 be = ext_node(parent);
133 if (new->be_f_offset < be->be_f_offset) {
134 if (merge_ok && ext_can_merge(new, be)) {
135 be->be_f_offset = new->be_f_offset;
136 if (be->be_state != PNFS_BLOCK_NONE_DATA)
137 be->be_v_offset = new->be_v_offset;
138 be->be_length += new->be_length;
139 be = ext_try_to_merge_left(root, be);
143 } else if (new->be_f_offset >= ext_f_end(be)) {
144 if (merge_ok && ext_can_merge(be, new)) {
145 be->be_length += new->be_length;
146 be = ext_try_to_merge_right(root, be);
155 rb_link_node(&new->be_node, parent, p);
156 rb_insert_color(&new->be_node, root);
159 nfs4_put_deviceid_node(new->be_device);
164 __ext_tree_remove(struct rb_root *root, sector_t start, sector_t end)
166 struct pnfs_block_extent *be;
167 sector_t len1 = 0, len2 = 0;
168 sector_t orig_v_offset;
171 be = __ext_tree_search(root, start);
174 if (be->be_f_offset >= end)
177 orig_v_offset = be->be_v_offset;
178 orig_len = be->be_length;
180 if (start > be->be_f_offset)
181 len1 = start - be->be_f_offset;
182 if (ext_f_end(be) > end)
183 len2 = ext_f_end(be) - end;
187 struct pnfs_block_extent *new;
189 new = kzalloc(sizeof(*new), GFP_ATOMIC);
193 be->be_length = len1;
195 new->be_f_offset = end;
196 if (be->be_state != PNFS_BLOCK_NONE_DATA) {
198 orig_v_offset + orig_len - len2;
200 new->be_length = len2;
201 new->be_state = be->be_state;
202 new->be_tag = be->be_tag;
203 new->be_device = nfs4_get_deviceid(be->be_device);
205 __ext_tree_insert(root, new, true);
207 be->be_f_offset = end;
208 if (be->be_state != PNFS_BLOCK_NONE_DATA) {
210 orig_v_offset + orig_len - len2;
212 be->be_length = len2;
216 be->be_length = len1;
217 be = ext_tree_next(be);
220 while (be && ext_f_end(be) <= end) {
221 struct pnfs_block_extent *next = ext_tree_next(be);
223 rb_erase(&be->be_node, root);
224 nfs4_put_deviceid_node(be->be_device);
229 if (be && be->be_f_offset < end) {
230 len1 = ext_f_end(be) - end;
231 be->be_f_offset = end;
232 if (be->be_state != PNFS_BLOCK_NONE_DATA)
233 be->be_v_offset += be->be_length - len1;
234 be->be_length = len1;
242 ext_tree_insert(struct pnfs_block_layout *bl, struct pnfs_block_extent *new)
244 struct pnfs_block_extent *be;
245 struct rb_root *root;
248 switch (new->be_state) {
249 case PNFS_BLOCK_READWRITE_DATA:
250 case PNFS_BLOCK_INVALID_DATA:
251 root = &bl->bl_ext_rw;
253 case PNFS_BLOCK_READ_DATA:
254 case PNFS_BLOCK_NONE_DATA:
255 root = &bl->bl_ext_ro;
258 dprintk("invalid extent type\n");
262 spin_lock(&bl->bl_ext_lock);
264 be = __ext_tree_search(root, new->be_f_offset);
265 if (!be || be->be_f_offset >= ext_f_end(new)) {
266 __ext_tree_insert(root, new, true);
267 } else if (new->be_f_offset >= be->be_f_offset) {
268 if (ext_f_end(new) <= ext_f_end(be)) {
269 nfs4_put_deviceid_node(new->be_device);
272 sector_t new_len = ext_f_end(new) - ext_f_end(be);
273 sector_t diff = new->be_length - new_len;
275 new->be_f_offset += diff;
276 new->be_v_offset += diff;
277 new->be_length = new_len;
280 } else if (ext_f_end(new) <= ext_f_end(be)) {
281 new->be_length = be->be_f_offset - new->be_f_offset;
282 __ext_tree_insert(root, new, true);
284 struct pnfs_block_extent *split;
285 sector_t new_len = ext_f_end(new) - ext_f_end(be);
286 sector_t diff = new->be_length - new_len;
288 split = kmemdup(new, sizeof(*new), GFP_ATOMIC);
294 split->be_length = be->be_f_offset - split->be_f_offset;
295 split->be_device = nfs4_get_deviceid(new->be_device);
296 __ext_tree_insert(root, split, true);
298 new->be_f_offset += diff;
299 new->be_v_offset += diff;
300 new->be_length = new_len;
304 spin_unlock(&bl->bl_ext_lock);
309 __ext_tree_lookup(struct rb_root *root, sector_t isect,
310 struct pnfs_block_extent *ret)
312 struct rb_node *node;
313 struct pnfs_block_extent *be;
315 node = root->rb_node;
318 if (isect < be->be_f_offset)
319 node = node->rb_left;
320 else if (isect >= ext_f_end(be))
321 node = node->rb_right;
332 ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect,
333 struct pnfs_block_extent *ret, bool rw)
337 spin_lock(&bl->bl_ext_lock);
339 found = __ext_tree_lookup(&bl->bl_ext_ro, isect, ret);
341 found = __ext_tree_lookup(&bl->bl_ext_rw, isect, ret);
342 spin_unlock(&bl->bl_ext_lock);
347 int ext_tree_remove(struct pnfs_block_layout *bl, bool rw,
348 sector_t start, sector_t end)
352 spin_lock(&bl->bl_ext_lock);
353 err = __ext_tree_remove(&bl->bl_ext_ro, start, end);
355 err2 = __ext_tree_remove(&bl->bl_ext_rw, start, end);
359 spin_unlock(&bl->bl_ext_lock);
365 ext_tree_split(struct rb_root *root, struct pnfs_block_extent *be,
368 struct pnfs_block_extent *new;
369 sector_t orig_len = be->be_length;
371 new = kzalloc(sizeof(*new), GFP_ATOMIC);
375 be->be_length = split - be->be_f_offset;
377 new->be_f_offset = split;
378 if (be->be_state != PNFS_BLOCK_NONE_DATA)
379 new->be_v_offset = be->be_v_offset + be->be_length;
380 new->be_length = orig_len - be->be_length;
381 new->be_state = be->be_state;
382 new->be_tag = be->be_tag;
383 new->be_device = nfs4_get_deviceid(be->be_device);
385 __ext_tree_insert(root, new, false);
390 ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
393 struct rb_root *root = &bl->bl_ext_rw;
394 sector_t end = start + len;
395 struct pnfs_block_extent *be;
398 spin_lock(&bl->bl_ext_lock);
400 * First remove all COW extents or holes from written to range.
402 err = __ext_tree_remove(&bl->bl_ext_ro, start, end);
407 * Then mark all invalid extents in the range as written to.
409 for (be = __ext_tree_search(root, start); be; be = ext_tree_next(be)) {
410 if (be->be_f_offset >= end)
413 if (be->be_state != PNFS_BLOCK_INVALID_DATA || be->be_tag)
416 if (be->be_f_offset < start) {
417 struct pnfs_block_extent *left = ext_tree_prev(be);
419 if (left && ext_can_merge(left, be)) {
420 sector_t diff = start - be->be_f_offset;
422 left->be_length += diff;
424 be->be_f_offset += diff;
425 be->be_v_offset += diff;
426 be->be_length -= diff;
428 err = ext_tree_split(root, be, start);
434 if (ext_f_end(be) > end) {
435 struct pnfs_block_extent *right = ext_tree_next(be);
437 if (right && ext_can_merge(be, right)) {
438 sector_t diff = end - be->be_f_offset;
440 be->be_length -= diff;
442 right->be_f_offset -= diff;
443 right->be_v_offset -= diff;
444 right->be_length += diff;
446 err = ext_tree_split(root, be, end);
452 if (be->be_f_offset >= start && ext_f_end(be) <= end) {
453 be->be_tag = EXTENT_WRITTEN;
454 be = ext_try_to_merge_left(root, be);
455 be = ext_try_to_merge_right(root, be);
459 spin_unlock(&bl->bl_ext_lock);
463 static void ext_tree_free_commitdata(struct nfs4_layoutcommit_args *arg,
466 if (arg->layoutupdate_pages != &arg->layoutupdate_page) {
467 int nr_pages = DIV_ROUND_UP(buffer_size, PAGE_SIZE), i;
469 for (i = 0; i < nr_pages; i++)
470 put_page(arg->layoutupdate_pages[i]);
471 kfree(arg->layoutupdate_pages);
473 put_page(arg->layoutupdate_page);
477 static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
478 size_t buffer_size, size_t *count)
480 struct pnfs_block_extent *be;
483 spin_lock(&bl->bl_ext_lock);
484 for (be = ext_tree_first(&bl->bl_ext_rw); be; be = ext_tree_next(be)) {
485 if (be->be_state != PNFS_BLOCK_INVALID_DATA ||
486 be->be_tag != EXTENT_WRITTEN)
490 if (*count * BL_EXTENT_SIZE > buffer_size) {
491 /* keep counting.. */
496 p = xdr_encode_opaque_fixed(p, be->be_device->deviceid.data,
497 NFS4_DEVICEID4_SIZE);
498 p = xdr_encode_hyper(p, be->be_f_offset << SECTOR_SHIFT);
499 p = xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT);
500 p = xdr_encode_hyper(p, 0LL);
501 *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA);
503 be->be_tag = EXTENT_COMMITTING;
505 spin_unlock(&bl->bl_ext_lock);
511 ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
513 struct pnfs_block_layout *bl = BLK_LO2EXT(NFS_I(arg->inode)->layout);
514 size_t count = 0, buffer_size = PAGE_SIZE;
518 dprintk("%s enter\n", __func__);
520 arg->layoutupdate_page = alloc_page(GFP_NOFS);
521 if (!arg->layoutupdate_page)
523 start_p = page_address(arg->layoutupdate_page);
524 arg->layoutupdate_pages = &arg->layoutupdate_page;
527 ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count);
529 ext_tree_free_commitdata(arg, buffer_size);
531 buffer_size = sizeof(__be32) + BL_EXTENT_SIZE * count;
534 arg->layoutupdate_pages =
535 kcalloc(DIV_ROUND_UP(buffer_size, PAGE_SIZE),
536 sizeof(struct page *), GFP_NOFS);
537 if (!arg->layoutupdate_pages)
540 start_p = __vmalloc(buffer_size, GFP_NOFS, PAGE_KERNEL);
542 kfree(arg->layoutupdate_pages);
549 *start_p = cpu_to_be32(count);
550 arg->layoutupdate_len = sizeof(__be32) + BL_EXTENT_SIZE * count;
552 if (unlikely(arg->layoutupdate_pages != &arg->layoutupdate_page)) {
557 p < start_p + arg->layoutupdate_len;
559 arg->layoutupdate_pages[i++] = vmalloc_to_page(p);
563 dprintk("%s found %zu ranges\n", __func__, count);
568 ext_tree_mark_committed(struct nfs4_layoutcommit_args *arg, int status)
570 struct pnfs_block_layout *bl = BLK_LO2EXT(NFS_I(arg->inode)->layout);
571 struct rb_root *root = &bl->bl_ext_rw;
572 struct pnfs_block_extent *be;
574 dprintk("%s status %d\n", __func__, status);
576 ext_tree_free_commitdata(arg, arg->layoutupdate_len);
578 spin_lock(&bl->bl_ext_lock);
579 for (be = ext_tree_first(root); be; be = ext_tree_next(be)) {
580 if (be->be_state != PNFS_BLOCK_INVALID_DATA ||
581 be->be_tag != EXTENT_COMMITTING)
586 * Mark as written and try again.
588 * XXX: some real error handling here wouldn't hurt..
590 be->be_tag = EXTENT_WRITTEN;
592 be->be_state = PNFS_BLOCK_READWRITE_DATA;
596 be = ext_try_to_merge_left(root, be);
597 be = ext_try_to_merge_right(root, be);
599 spin_unlock(&bl->bl_ext_lock);