3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/blk-mq.h>
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/idr.h>
46 #include <linux/workqueue.h>
48 #include "rbd_types.h"
50 #define RBD_DEBUG /* Activate rbd_assert() calls */
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
58 #define SECTOR_SHIFT 9
59 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
67 static int atomic_inc_return_safe(atomic_t *v)
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
80 /* Decrement the counter. Return the resulting value, or -EINVAL */
81 static int atomic_dec_return_safe(atomic_t *v)
85 counter = atomic_dec_return(v);
94 #define RBD_DRV_NAME "rbd"
96 #define RBD_MINORS_PER_MAJOR 256
97 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
99 #define RBD_MAX_PARENT_CHAIN_LEN 16
101 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
102 #define RBD_MAX_SNAP_NAME_LEN \
103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
105 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
107 #define RBD_SNAP_HEAD_NAME "-"
109 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
111 /* This allows a single page to hold an image name sent by OSD */
112 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
113 #define RBD_IMAGE_ID_LEN_MAX 64
115 #define RBD_OBJ_PREFIX_LEN_MAX 64
119 #define RBD_FEATURE_LAYERING (1<<0)
120 #define RBD_FEATURE_STRIPINGV2 (1<<1)
121 #define RBD_FEATURES_ALL \
122 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
124 /* Features supported by this (client software) implementation. */
126 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
129 * An RBD device name will be "rbd#", where the "rbd" comes from
130 * RBD_DRV_NAME above, and # is a unique integer identifier.
131 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
132 * enough to hold all possible device names.
134 #define DEV_NAME_LEN 32
135 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
138 * block device image metadata (in-memory version)
140 struct rbd_image_header {
141 /* These six fields never change for a given rbd image */
148 u64 features; /* Might be changeable someday? */
150 /* The remaining fields need to be updated occasionally */
152 struct ceph_snap_context *snapc;
153 char *snap_names; /* format 1 only */
154 u64 *snap_sizes; /* format 1 only */
158 * An rbd image specification.
160 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
161 * identify an image. Each rbd_dev structure includes a pointer to
162 * an rbd_spec structure that encapsulates this identity.
164 * Each of the id's in an rbd_spec has an associated name. For a
165 * user-mapped image, the names are supplied and the id's associated
166 * with them are looked up. For a layered image, a parent image is
167 * defined by the tuple, and the names are looked up.
169 * An rbd_dev structure contains a parent_spec pointer which is
170 * non-null if the image it represents is a child in a layered
171 * image. This pointer will refer to the rbd_spec structure used
172 * by the parent rbd_dev for its own identity (i.e., the structure
173 * is shared between the parent and child).
175 * Since these structures are populated once, during the discovery
176 * phase of image construction, they are effectively immutable so
177 * we make no effort to synchronize access to them.
179 * Note that code herein does not assume the image name is known (it
180 * could be a null pointer).
184 const char *pool_name;
186 const char *image_id;
187 const char *image_name;
190 const char *snap_name;
196 * an instance of the client. multiple devices may share an rbd client.
199 struct ceph_client *client;
201 struct list_head node;
204 struct rbd_img_request;
205 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
207 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
209 struct rbd_obj_request;
210 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
212 enum obj_request_type {
213 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
216 enum obj_operation_type {
223 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
224 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
225 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
226 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
229 struct rbd_obj_request {
230 const char *object_name;
231 u64 offset; /* object start byte */
232 u64 length; /* bytes from offset */
236 * An object request associated with an image will have its
237 * img_data flag set; a standalone object request will not.
239 * A standalone object request will have which == BAD_WHICH
240 * and a null obj_request pointer.
242 * An object request initiated in support of a layered image
243 * object (to check for its existence before a write) will
244 * have which == BAD_WHICH and a non-null obj_request pointer.
246 * Finally, an object request for rbd image data will have
247 * which != BAD_WHICH, and will have a non-null img_request
248 * pointer. The value of which will be in the range
249 * 0..(img_request->obj_request_count-1).
252 struct rbd_obj_request *obj_request; /* STAT op */
254 struct rbd_img_request *img_request;
256 /* links for img_request->obj_requests list */
257 struct list_head links;
260 u32 which; /* posn image request list */
262 enum obj_request_type type;
264 struct bio *bio_list;
270 struct page **copyup_pages;
271 u32 copyup_page_count;
273 struct ceph_osd_request *osd_req;
275 u64 xferred; /* bytes transferred */
278 rbd_obj_callback_t callback;
279 struct completion completion;
285 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
286 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
287 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
288 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
291 struct rbd_img_request {
292 struct rbd_device *rbd_dev;
293 u64 offset; /* starting image byte offset */
294 u64 length; /* byte count from offset */
297 u64 snap_id; /* for reads */
298 struct ceph_snap_context *snapc; /* for writes */
301 struct request *rq; /* block request */
302 struct rbd_obj_request *obj_request; /* obj req initiator */
304 struct page **copyup_pages;
305 u32 copyup_page_count;
306 spinlock_t completion_lock;/* protects next_completion */
308 rbd_img_callback_t callback;
309 u64 xferred;/* aggregate bytes transferred */
310 int result; /* first nonzero obj_request result */
312 u32 obj_request_count;
313 struct list_head obj_requests; /* rbd_obj_request structs */
318 #define for_each_obj_request(ireq, oreq) \
319 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
320 #define for_each_obj_request_from(ireq, oreq) \
321 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
322 #define for_each_obj_request_safe(ireq, oreq, n) \
323 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
335 int dev_id; /* blkdev unique id */
337 int major; /* blkdev assigned major */
339 struct gendisk *disk; /* blkdev's gendisk and rq */
341 u32 image_format; /* Either 1 or 2 */
342 struct rbd_client *rbd_client;
344 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
346 spinlock_t lock; /* queue, flags, open_count */
348 struct rbd_image_header header;
349 unsigned long flags; /* possibly lock protected */
350 struct rbd_spec *spec;
351 struct rbd_options *opts;
355 struct ceph_file_layout layout;
357 struct ceph_osd_event *watch_event;
358 struct rbd_obj_request *watch_request;
360 struct rbd_spec *parent_spec;
363 struct rbd_device *parent;
365 /* Block layer tags. */
366 struct blk_mq_tag_set tag_set;
368 /* protects updating the header */
369 struct rw_semaphore header_rwsem;
371 struct rbd_mapping mapping;
373 struct list_head node;
377 unsigned long open_count; /* protected by lock */
381 * Flag bits for rbd_dev->flags. If atomicity is required,
382 * rbd_dev->lock is used to protect access.
384 * Currently, only the "removing" flag (which is coupled with the
385 * "open_count" field) requires atomic access.
388 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
389 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
392 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
394 static LIST_HEAD(rbd_dev_list); /* devices */
395 static DEFINE_SPINLOCK(rbd_dev_list_lock);
397 static LIST_HEAD(rbd_client_list); /* clients */
398 static DEFINE_SPINLOCK(rbd_client_list_lock);
400 /* Slab caches for frequently-allocated structures */
402 static struct kmem_cache *rbd_img_request_cache;
403 static struct kmem_cache *rbd_obj_request_cache;
404 static struct kmem_cache *rbd_segment_name_cache;
406 static int rbd_major;
407 static DEFINE_IDA(rbd_dev_id_ida);
409 static struct workqueue_struct *rbd_wq;
412 * Default to false for now, as single-major requires >= 0.75 version of
413 * userspace rbd utility.
415 static bool single_major = false;
416 module_param(single_major, bool, S_IRUGO);
417 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
419 static int rbd_img_request_submit(struct rbd_img_request *img_request);
421 static void rbd_dev_device_release(struct device *dev);
423 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
425 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
427 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
429 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
431 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
432 static void rbd_spec_put(struct rbd_spec *spec);
434 static int rbd_dev_id_to_minor(int dev_id)
436 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
439 static int minor_to_rbd_dev_id(int minor)
441 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
444 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
445 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
446 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
447 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
449 static struct attribute *rbd_bus_attrs[] = {
451 &bus_attr_remove.attr,
452 &bus_attr_add_single_major.attr,
453 &bus_attr_remove_single_major.attr,
457 static umode_t rbd_bus_is_visible(struct kobject *kobj,
458 struct attribute *attr, int index)
461 (attr == &bus_attr_add_single_major.attr ||
462 attr == &bus_attr_remove_single_major.attr))
468 static const struct attribute_group rbd_bus_group = {
469 .attrs = rbd_bus_attrs,
470 .is_visible = rbd_bus_is_visible,
472 __ATTRIBUTE_GROUPS(rbd_bus);
474 static struct bus_type rbd_bus_type = {
476 .bus_groups = rbd_bus_groups,
479 static void rbd_root_dev_release(struct device *dev)
483 static struct device rbd_root_dev = {
485 .release = rbd_root_dev_release,
488 static __printf(2, 3)
489 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
491 struct va_format vaf;
499 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
500 else if (rbd_dev->disk)
501 printk(KERN_WARNING "%s: %s: %pV\n",
502 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
503 else if (rbd_dev->spec && rbd_dev->spec->image_name)
504 printk(KERN_WARNING "%s: image %s: %pV\n",
505 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
506 else if (rbd_dev->spec && rbd_dev->spec->image_id)
507 printk(KERN_WARNING "%s: id %s: %pV\n",
508 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
510 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
511 RBD_DRV_NAME, rbd_dev, &vaf);
516 #define rbd_assert(expr) \
517 if (unlikely(!(expr))) { \
518 printk(KERN_ERR "\nAssertion failure in %s() " \
520 "\trbd_assert(%s);\n\n", \
521 __func__, __LINE__, #expr); \
524 #else /* !RBD_DEBUG */
525 # define rbd_assert(expr) ((void) 0)
526 #endif /* !RBD_DEBUG */
528 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
529 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
530 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
531 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
533 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
534 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
535 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
536 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
537 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
539 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
540 u8 *order, u64 *snap_size);
541 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
543 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
545 static int rbd_open(struct block_device *bdev, fmode_t mode)
547 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
548 bool removing = false;
550 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
553 spin_lock_irq(&rbd_dev->lock);
554 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
557 rbd_dev->open_count++;
558 spin_unlock_irq(&rbd_dev->lock);
562 (void) get_device(&rbd_dev->dev);
567 static void rbd_release(struct gendisk *disk, fmode_t mode)
569 struct rbd_device *rbd_dev = disk->private_data;
570 unsigned long open_count_before;
572 spin_lock_irq(&rbd_dev->lock);
573 open_count_before = rbd_dev->open_count--;
574 spin_unlock_irq(&rbd_dev->lock);
575 rbd_assert(open_count_before > 0);
577 put_device(&rbd_dev->dev);
580 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
585 bool ro_changed = false;
587 /* get_user() may sleep, so call it before taking rbd_dev->lock */
588 if (get_user(val, (int __user *)(arg)))
591 ro = val ? true : false;
592 /* Snapshot doesn't allow to write*/
593 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
596 spin_lock_irq(&rbd_dev->lock);
597 /* prevent others open this device */
598 if (rbd_dev->open_count > 1) {
603 if (rbd_dev->mapping.read_only != ro) {
604 rbd_dev->mapping.read_only = ro;
609 spin_unlock_irq(&rbd_dev->lock);
610 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
611 if (ret == 0 && ro_changed)
612 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
617 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
618 unsigned int cmd, unsigned long arg)
620 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
625 ret = rbd_ioctl_set_ro(rbd_dev, arg);
635 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
636 unsigned int cmd, unsigned long arg)
638 return rbd_ioctl(bdev, mode, cmd, arg);
640 #endif /* CONFIG_COMPAT */
642 static const struct block_device_operations rbd_bd_ops = {
643 .owner = THIS_MODULE,
645 .release = rbd_release,
648 .compat_ioctl = rbd_compat_ioctl,
653 * Initialize an rbd client instance. Success or not, this function
654 * consumes ceph_opts. Caller holds client_mutex.
656 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
658 struct rbd_client *rbdc;
661 dout("%s:\n", __func__);
662 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
666 kref_init(&rbdc->kref);
667 INIT_LIST_HEAD(&rbdc->node);
669 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
670 if (IS_ERR(rbdc->client))
672 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
674 ret = ceph_open_session(rbdc->client);
678 spin_lock(&rbd_client_list_lock);
679 list_add_tail(&rbdc->node, &rbd_client_list);
680 spin_unlock(&rbd_client_list_lock);
682 dout("%s: rbdc %p\n", __func__, rbdc);
686 ceph_destroy_client(rbdc->client);
691 ceph_destroy_options(ceph_opts);
692 dout("%s: error %d\n", __func__, ret);
697 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
699 kref_get(&rbdc->kref);
705 * Find a ceph client with specific addr and configuration. If
706 * found, bump its reference count.
708 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
710 struct rbd_client *client_node;
713 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
716 spin_lock(&rbd_client_list_lock);
717 list_for_each_entry(client_node, &rbd_client_list, node) {
718 if (!ceph_compare_options(ceph_opts, client_node->client)) {
719 __rbd_get_client(client_node);
725 spin_unlock(&rbd_client_list_lock);
727 return found ? client_node : NULL;
731 * (Per device) rbd map options
738 /* string args above */
744 static match_table_t rbd_opts_tokens = {
745 {Opt_queue_depth, "queue_depth=%d"},
747 /* string args above */
748 {Opt_read_only, "read_only"},
749 {Opt_read_only, "ro"}, /* Alternate spelling */
750 {Opt_read_write, "read_write"},
751 {Opt_read_write, "rw"}, /* Alternate spelling */
760 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
761 #define RBD_READ_ONLY_DEFAULT false
763 static int parse_rbd_opts_token(char *c, void *private)
765 struct rbd_options *rbd_opts = private;
766 substring_t argstr[MAX_OPT_ARGS];
767 int token, intval, ret;
769 token = match_token(c, rbd_opts_tokens, argstr);
770 if (token < Opt_last_int) {
771 ret = match_int(&argstr[0], &intval);
773 pr_err("bad mount option arg (not int) at '%s'\n", c);
776 dout("got int token %d val %d\n", token, intval);
777 } else if (token > Opt_last_int && token < Opt_last_string) {
778 dout("got string token %d val %s\n", token, argstr[0].from);
780 dout("got token %d\n", token);
784 case Opt_queue_depth:
786 pr_err("queue_depth out of range\n");
789 rbd_opts->queue_depth = intval;
792 rbd_opts->read_only = true;
795 rbd_opts->read_only = false;
798 /* libceph prints "bad option" msg */
805 static char* obj_op_name(enum obj_operation_type op_type)
820 * Get a ceph client with specific addr and configuration, if one does
821 * not exist create it. Either way, ceph_opts is consumed by this
824 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
826 struct rbd_client *rbdc;
828 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
829 rbdc = rbd_client_find(ceph_opts);
830 if (rbdc) /* using an existing client */
831 ceph_destroy_options(ceph_opts);
833 rbdc = rbd_client_create(ceph_opts);
834 mutex_unlock(&client_mutex);
840 * Destroy ceph client
842 * Caller must hold rbd_client_list_lock.
844 static void rbd_client_release(struct kref *kref)
846 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
848 dout("%s: rbdc %p\n", __func__, rbdc);
849 spin_lock(&rbd_client_list_lock);
850 list_del(&rbdc->node);
851 spin_unlock(&rbd_client_list_lock);
853 ceph_destroy_client(rbdc->client);
858 * Drop reference to ceph client node. If it's not referenced anymore, release
861 static void rbd_put_client(struct rbd_client *rbdc)
864 kref_put(&rbdc->kref, rbd_client_release);
867 static bool rbd_image_format_valid(u32 image_format)
869 return image_format == 1 || image_format == 2;
872 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
877 /* The header has to start with the magic rbd header text */
878 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
881 /* The bio layer requires at least sector-sized I/O */
883 if (ondisk->options.order < SECTOR_SHIFT)
886 /* If we use u64 in a few spots we may be able to loosen this */
888 if (ondisk->options.order > 8 * sizeof (int) - 1)
892 * The size of a snapshot header has to fit in a size_t, and
893 * that limits the number of snapshots.
895 snap_count = le32_to_cpu(ondisk->snap_count);
896 size = SIZE_MAX - sizeof (struct ceph_snap_context);
897 if (snap_count > size / sizeof (__le64))
901 * Not only that, but the size of the entire the snapshot
902 * header must also be representable in a size_t.
904 size -= snap_count * sizeof (__le64);
905 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
912 * Fill an rbd image header with information from the given format 1
915 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
916 struct rbd_image_header_ondisk *ondisk)
918 struct rbd_image_header *header = &rbd_dev->header;
919 bool first_time = header->object_prefix == NULL;
920 struct ceph_snap_context *snapc;
921 char *object_prefix = NULL;
922 char *snap_names = NULL;
923 u64 *snap_sizes = NULL;
929 /* Allocate this now to avoid having to handle failure below */
934 len = strnlen(ondisk->object_prefix,
935 sizeof (ondisk->object_prefix));
936 object_prefix = kmalloc(len + 1, GFP_KERNEL);
939 memcpy(object_prefix, ondisk->object_prefix, len);
940 object_prefix[len] = '\0';
943 /* Allocate the snapshot context and fill it in */
945 snap_count = le32_to_cpu(ondisk->snap_count);
946 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
949 snapc->seq = le64_to_cpu(ondisk->snap_seq);
951 struct rbd_image_snap_ondisk *snaps;
952 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
954 /* We'll keep a copy of the snapshot names... */
956 if (snap_names_len > (u64)SIZE_MAX)
958 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
962 /* ...as well as the array of their sizes. */
964 size = snap_count * sizeof (*header->snap_sizes);
965 snap_sizes = kmalloc(size, GFP_KERNEL);
970 * Copy the names, and fill in each snapshot's id
973 * Note that rbd_dev_v1_header_info() guarantees the
974 * ondisk buffer we're working with has
975 * snap_names_len bytes beyond the end of the
976 * snapshot id array, this memcpy() is safe.
978 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
979 snaps = ondisk->snaps;
980 for (i = 0; i < snap_count; i++) {
981 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
982 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
986 /* We won't fail any more, fill in the header */
989 header->object_prefix = object_prefix;
990 header->obj_order = ondisk->options.order;
991 header->crypt_type = ondisk->options.crypt_type;
992 header->comp_type = ondisk->options.comp_type;
993 /* The rest aren't used for format 1 images */
994 header->stripe_unit = 0;
995 header->stripe_count = 0;
996 header->features = 0;
998 ceph_put_snap_context(header->snapc);
999 kfree(header->snap_names);
1000 kfree(header->snap_sizes);
1003 /* The remaining fields always get updated (when we refresh) */
1005 header->image_size = le64_to_cpu(ondisk->image_size);
1006 header->snapc = snapc;
1007 header->snap_names = snap_names;
1008 header->snap_sizes = snap_sizes;
1016 ceph_put_snap_context(snapc);
1017 kfree(object_prefix);
1022 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1024 const char *snap_name;
1026 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1028 /* Skip over names until we find the one we are looking for */
1030 snap_name = rbd_dev->header.snap_names;
1032 snap_name += strlen(snap_name) + 1;
1034 return kstrdup(snap_name, GFP_KERNEL);
1038 * Snapshot id comparison function for use with qsort()/bsearch().
1039 * Note that result is for snapshots in *descending* order.
1041 static int snapid_compare_reverse(const void *s1, const void *s2)
1043 u64 snap_id1 = *(u64 *)s1;
1044 u64 snap_id2 = *(u64 *)s2;
1046 if (snap_id1 < snap_id2)
1048 return snap_id1 == snap_id2 ? 0 : -1;
1052 * Search a snapshot context to see if the given snapshot id is
1055 * Returns the position of the snapshot id in the array if it's found,
1056 * or BAD_SNAP_INDEX otherwise.
1058 * Note: The snapshot array is in kept sorted (by the osd) in
1059 * reverse order, highest snapshot id first.
1061 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1063 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1066 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1067 sizeof (snap_id), snapid_compare_reverse);
1069 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1072 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1076 const char *snap_name;
1078 which = rbd_dev_snap_index(rbd_dev, snap_id);
1079 if (which == BAD_SNAP_INDEX)
1080 return ERR_PTR(-ENOENT);
1082 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1083 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1086 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1088 if (snap_id == CEPH_NOSNAP)
1089 return RBD_SNAP_HEAD_NAME;
1091 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1092 if (rbd_dev->image_format == 1)
1093 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1095 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1098 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1101 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1102 if (snap_id == CEPH_NOSNAP) {
1103 *snap_size = rbd_dev->header.image_size;
1104 } else if (rbd_dev->image_format == 1) {
1107 which = rbd_dev_snap_index(rbd_dev, snap_id);
1108 if (which == BAD_SNAP_INDEX)
1111 *snap_size = rbd_dev->header.snap_sizes[which];
1116 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1125 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1128 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1129 if (snap_id == CEPH_NOSNAP) {
1130 *snap_features = rbd_dev->header.features;
1131 } else if (rbd_dev->image_format == 1) {
1132 *snap_features = 0; /* No features for format 1 */
1137 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1141 *snap_features = features;
1146 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1148 u64 snap_id = rbd_dev->spec->snap_id;
1153 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1156 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1160 rbd_dev->mapping.size = size;
1161 rbd_dev->mapping.features = features;
1166 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1168 rbd_dev->mapping.size = 0;
1169 rbd_dev->mapping.features = 0;
1172 static void rbd_segment_name_free(const char *name)
1174 /* The explicit cast here is needed to drop the const qualifier */
1176 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1179 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1186 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1189 segment = offset >> rbd_dev->header.obj_order;
1190 name_format = "%s.%012llx";
1191 if (rbd_dev->image_format == 2)
1192 name_format = "%s.%016llx";
1193 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1194 rbd_dev->header.object_prefix, segment);
1195 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1196 pr_err("error formatting segment name for #%llu (%d)\n",
1198 rbd_segment_name_free(name);
1205 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1207 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1209 return offset & (segment_size - 1);
1212 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1213 u64 offset, u64 length)
1215 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1217 offset &= segment_size - 1;
1219 rbd_assert(length <= U64_MAX - offset);
1220 if (offset + length > segment_size)
1221 length = segment_size - offset;
1227 * returns the size of an object in the image
1229 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1231 return 1 << header->obj_order;
1238 static void bio_chain_put(struct bio *chain)
1244 chain = chain->bi_next;
1250 * zeros a bio chain, starting at specific offset
1252 static void zero_bio_chain(struct bio *chain, int start_ofs)
1255 struct bvec_iter iter;
1256 unsigned long flags;
1261 bio_for_each_segment(bv, chain, iter) {
1262 if (pos + bv.bv_len > start_ofs) {
1263 int remainder = max(start_ofs - pos, 0);
1264 buf = bvec_kmap_irq(&bv, &flags);
1265 memset(buf + remainder, 0,
1266 bv.bv_len - remainder);
1267 flush_dcache_page(bv.bv_page);
1268 bvec_kunmap_irq(buf, &flags);
1273 chain = chain->bi_next;
1278 * similar to zero_bio_chain(), zeros data defined by a page array,
1279 * starting at the given byte offset from the start of the array and
1280 * continuing up to the given end offset. The pages array is
1281 * assumed to be big enough to hold all bytes up to the end.
1283 static void zero_pages(struct page **pages, u64 offset, u64 end)
1285 struct page **page = &pages[offset >> PAGE_SHIFT];
1287 rbd_assert(end > offset);
1288 rbd_assert(end - offset <= (u64)SIZE_MAX);
1289 while (offset < end) {
1292 unsigned long flags;
1295 page_offset = offset & ~PAGE_MASK;
1296 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1297 local_irq_save(flags);
1298 kaddr = kmap_atomic(*page);
1299 memset(kaddr + page_offset, 0, length);
1300 flush_dcache_page(*page);
1301 kunmap_atomic(kaddr);
1302 local_irq_restore(flags);
1310 * Clone a portion of a bio, starting at the given byte offset
1311 * and continuing for the number of bytes indicated.
1313 static struct bio *bio_clone_range(struct bio *bio_src,
1314 unsigned int offset,
1320 bio = bio_clone(bio_src, gfpmask);
1322 return NULL; /* ENOMEM */
1324 bio_advance(bio, offset);
1325 bio->bi_iter.bi_size = len;
1331 * Clone a portion of a bio chain, starting at the given byte offset
1332 * into the first bio in the source chain and continuing for the
1333 * number of bytes indicated. The result is another bio chain of
1334 * exactly the given length, or a null pointer on error.
1336 * The bio_src and offset parameters are both in-out. On entry they
1337 * refer to the first source bio and the offset into that bio where
1338 * the start of data to be cloned is located.
1340 * On return, bio_src is updated to refer to the bio in the source
1341 * chain that contains first un-cloned byte, and *offset will
1342 * contain the offset of that byte within that bio.
1344 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1345 unsigned int *offset,
1349 struct bio *bi = *bio_src;
1350 unsigned int off = *offset;
1351 struct bio *chain = NULL;
1354 /* Build up a chain of clone bios up to the limit */
1356 if (!bi || off >= bi->bi_iter.bi_size || !len)
1357 return NULL; /* Nothing to clone */
1361 unsigned int bi_size;
1365 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1366 goto out_err; /* EINVAL; ran out of bio's */
1368 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1369 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1371 goto out_err; /* ENOMEM */
1374 end = &bio->bi_next;
1377 if (off == bi->bi_iter.bi_size) {
1388 bio_chain_put(chain);
1394 * The default/initial value for all object request flags is 0. For
1395 * each flag, once its value is set to 1 it is never reset to 0
1398 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1400 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1401 struct rbd_device *rbd_dev;
1403 rbd_dev = obj_request->img_request->rbd_dev;
1404 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1409 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1412 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1415 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1417 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1418 struct rbd_device *rbd_dev = NULL;
1420 if (obj_request_img_data_test(obj_request))
1421 rbd_dev = obj_request->img_request->rbd_dev;
1422 rbd_warn(rbd_dev, "obj_request %p already marked done",
1427 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1430 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1434 * This sets the KNOWN flag after (possibly) setting the EXISTS
1435 * flag. The latter is set based on the "exists" value provided.
1437 * Note that for our purposes once an object exists it never goes
1438 * away again. It's possible that the response from two existence
1439 * checks are separated by the creation of the target object, and
1440 * the first ("doesn't exist") response arrives *after* the second
1441 * ("does exist"). In that case we ignore the second one.
1443 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1447 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1448 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1452 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1455 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1458 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1461 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1464 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1466 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1468 return obj_request->img_offset <
1469 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1472 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1474 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1475 atomic_read(&obj_request->kref.refcount));
1476 kref_get(&obj_request->kref);
1479 static void rbd_obj_request_destroy(struct kref *kref);
1480 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1482 rbd_assert(obj_request != NULL);
1483 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1484 atomic_read(&obj_request->kref.refcount));
1485 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1488 static void rbd_img_request_get(struct rbd_img_request *img_request)
1490 dout("%s: img %p (was %d)\n", __func__, img_request,
1491 atomic_read(&img_request->kref.refcount));
1492 kref_get(&img_request->kref);
1495 static bool img_request_child_test(struct rbd_img_request *img_request);
1496 static void rbd_parent_request_destroy(struct kref *kref);
1497 static void rbd_img_request_destroy(struct kref *kref);
1498 static void rbd_img_request_put(struct rbd_img_request *img_request)
1500 rbd_assert(img_request != NULL);
1501 dout("%s: img %p (was %d)\n", __func__, img_request,
1502 atomic_read(&img_request->kref.refcount));
1503 if (img_request_child_test(img_request))
1504 kref_put(&img_request->kref, rbd_parent_request_destroy);
1506 kref_put(&img_request->kref, rbd_img_request_destroy);
1509 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1510 struct rbd_obj_request *obj_request)
1512 rbd_assert(obj_request->img_request == NULL);
1514 /* Image request now owns object's original reference */
1515 obj_request->img_request = img_request;
1516 obj_request->which = img_request->obj_request_count;
1517 rbd_assert(!obj_request_img_data_test(obj_request));
1518 obj_request_img_data_set(obj_request);
1519 rbd_assert(obj_request->which != BAD_WHICH);
1520 img_request->obj_request_count++;
1521 list_add_tail(&obj_request->links, &img_request->obj_requests);
1522 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1523 obj_request->which);
1526 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1527 struct rbd_obj_request *obj_request)
1529 rbd_assert(obj_request->which != BAD_WHICH);
1531 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1532 obj_request->which);
1533 list_del(&obj_request->links);
1534 rbd_assert(img_request->obj_request_count > 0);
1535 img_request->obj_request_count--;
1536 rbd_assert(obj_request->which == img_request->obj_request_count);
1537 obj_request->which = BAD_WHICH;
1538 rbd_assert(obj_request_img_data_test(obj_request));
1539 rbd_assert(obj_request->img_request == img_request);
1540 obj_request->img_request = NULL;
1541 obj_request->callback = NULL;
1542 rbd_obj_request_put(obj_request);
1545 static bool obj_request_type_valid(enum obj_request_type type)
1548 case OBJ_REQUEST_NODATA:
1549 case OBJ_REQUEST_BIO:
1550 case OBJ_REQUEST_PAGES:
1557 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1558 struct rbd_obj_request *obj_request)
1560 dout("%s %p\n", __func__, obj_request);
1561 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1564 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1566 dout("%s %p\n", __func__, obj_request);
1567 ceph_osdc_cancel_request(obj_request->osd_req);
1571 * Wait for an object request to complete. If interrupted, cancel the
1572 * underlying osd request.
1574 * @timeout: in jiffies, 0 means "wait forever"
1576 static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1577 unsigned long timeout)
1581 dout("%s %p\n", __func__, obj_request);
1582 ret = wait_for_completion_interruptible_timeout(
1583 &obj_request->completion,
1584 ceph_timeout_jiffies(timeout));
1588 rbd_obj_request_end(obj_request);
1593 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1597 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1599 return __rbd_obj_request_wait(obj_request, 0);
1602 static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
1603 unsigned long timeout)
1605 return __rbd_obj_request_wait(obj_request, timeout);
1608 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1611 dout("%s: img %p\n", __func__, img_request);
1614 * If no error occurred, compute the aggregate transfer
1615 * count for the image request. We could instead use
1616 * atomic64_cmpxchg() to update it as each object request
1617 * completes; not clear which way is better off hand.
1619 if (!img_request->result) {
1620 struct rbd_obj_request *obj_request;
1623 for_each_obj_request(img_request, obj_request)
1624 xferred += obj_request->xferred;
1625 img_request->xferred = xferred;
1628 if (img_request->callback)
1629 img_request->callback(img_request);
1631 rbd_img_request_put(img_request);
1635 * The default/initial value for all image request flags is 0. Each
1636 * is conditionally set to 1 at image request initialization time
1637 * and currently never change thereafter.
1639 static void img_request_write_set(struct rbd_img_request *img_request)
1641 set_bit(IMG_REQ_WRITE, &img_request->flags);
1645 static bool img_request_write_test(struct rbd_img_request *img_request)
1648 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1652 * Set the discard flag when the img_request is an discard request
1654 static void img_request_discard_set(struct rbd_img_request *img_request)
1656 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1660 static bool img_request_discard_test(struct rbd_img_request *img_request)
1663 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1666 static void img_request_child_set(struct rbd_img_request *img_request)
1668 set_bit(IMG_REQ_CHILD, &img_request->flags);
1672 static void img_request_child_clear(struct rbd_img_request *img_request)
1674 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1678 static bool img_request_child_test(struct rbd_img_request *img_request)
1681 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1684 static void img_request_layered_set(struct rbd_img_request *img_request)
1686 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1690 static void img_request_layered_clear(struct rbd_img_request *img_request)
1692 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1696 static bool img_request_layered_test(struct rbd_img_request *img_request)
1699 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1702 static enum obj_operation_type
1703 rbd_img_request_op_type(struct rbd_img_request *img_request)
1705 if (img_request_write_test(img_request))
1706 return OBJ_OP_WRITE;
1707 else if (img_request_discard_test(img_request))
1708 return OBJ_OP_DISCARD;
1714 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1716 u64 xferred = obj_request->xferred;
1717 u64 length = obj_request->length;
1719 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1720 obj_request, obj_request->img_request, obj_request->result,
1723 * ENOENT means a hole in the image. We zero-fill the entire
1724 * length of the request. A short read also implies zero-fill
1725 * to the end of the request. An error requires the whole
1726 * length of the request to be reported finished with an error
1727 * to the block layer. In each case we update the xferred
1728 * count to indicate the whole request was satisfied.
1730 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1731 if (obj_request->result == -ENOENT) {
1732 if (obj_request->type == OBJ_REQUEST_BIO)
1733 zero_bio_chain(obj_request->bio_list, 0);
1735 zero_pages(obj_request->pages, 0, length);
1736 obj_request->result = 0;
1737 } else if (xferred < length && !obj_request->result) {
1738 if (obj_request->type == OBJ_REQUEST_BIO)
1739 zero_bio_chain(obj_request->bio_list, xferred);
1741 zero_pages(obj_request->pages, xferred, length);
1743 obj_request->xferred = length;
1744 obj_request_done_set(obj_request);
1747 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1749 dout("%s: obj %p cb %p\n", __func__, obj_request,
1750 obj_request->callback);
1751 if (obj_request->callback)
1752 obj_request->callback(obj_request);
1754 complete_all(&obj_request->completion);
1757 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1759 dout("%s: obj %p\n", __func__, obj_request);
1760 obj_request_done_set(obj_request);
1763 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1765 struct rbd_img_request *img_request = NULL;
1766 struct rbd_device *rbd_dev = NULL;
1767 bool layered = false;
1769 if (obj_request_img_data_test(obj_request)) {
1770 img_request = obj_request->img_request;
1771 layered = img_request && img_request_layered_test(img_request);
1772 rbd_dev = img_request->rbd_dev;
1775 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1776 obj_request, img_request, obj_request->result,
1777 obj_request->xferred, obj_request->length);
1778 if (layered && obj_request->result == -ENOENT &&
1779 obj_request->img_offset < rbd_dev->parent_overlap)
1780 rbd_img_parent_read(obj_request);
1781 else if (img_request)
1782 rbd_img_obj_request_read_callback(obj_request);
1784 obj_request_done_set(obj_request);
1787 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1789 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1790 obj_request->result, obj_request->length);
1792 * There is no such thing as a successful short write. Set
1793 * it to our originally-requested length.
1795 obj_request->xferred = obj_request->length;
1796 obj_request_done_set(obj_request);
1799 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1801 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1802 obj_request->result, obj_request->length);
1804 * There is no such thing as a successful short discard. Set
1805 * it to our originally-requested length.
1807 obj_request->xferred = obj_request->length;
1808 /* discarding a non-existent object is not a problem */
1809 if (obj_request->result == -ENOENT)
1810 obj_request->result = 0;
1811 obj_request_done_set(obj_request);
1815 * For a simple stat call there's nothing to do. We'll do more if
1816 * this is part of a write sequence for a layered image.
1818 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1820 dout("%s: obj %p\n", __func__, obj_request);
1821 obj_request_done_set(obj_request);
1824 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1826 dout("%s: obj %p\n", __func__, obj_request);
1828 if (obj_request_img_data_test(obj_request))
1829 rbd_osd_copyup_callback(obj_request);
1831 obj_request_done_set(obj_request);
1834 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1835 struct ceph_msg *msg)
1837 struct rbd_obj_request *obj_request = osd_req->r_priv;
1840 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1841 rbd_assert(osd_req == obj_request->osd_req);
1842 if (obj_request_img_data_test(obj_request)) {
1843 rbd_assert(obj_request->img_request);
1844 rbd_assert(obj_request->which != BAD_WHICH);
1846 rbd_assert(obj_request->which == BAD_WHICH);
1849 if (osd_req->r_result < 0)
1850 obj_request->result = osd_req->r_result;
1852 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1855 * We support a 64-bit length, but ultimately it has to be
1856 * passed to the block layer, which just supports a 32-bit
1859 obj_request->xferred = osd_req->r_reply_op_len[0];
1860 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1862 opcode = osd_req->r_ops[0].op;
1864 case CEPH_OSD_OP_READ:
1865 rbd_osd_read_callback(obj_request);
1867 case CEPH_OSD_OP_SETALLOCHINT:
1868 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1869 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1871 case CEPH_OSD_OP_WRITE:
1872 case CEPH_OSD_OP_WRITEFULL:
1873 rbd_osd_write_callback(obj_request);
1875 case CEPH_OSD_OP_STAT:
1876 rbd_osd_stat_callback(obj_request);
1878 case CEPH_OSD_OP_DELETE:
1879 case CEPH_OSD_OP_TRUNCATE:
1880 case CEPH_OSD_OP_ZERO:
1881 rbd_osd_discard_callback(obj_request);
1883 case CEPH_OSD_OP_CALL:
1884 rbd_osd_call_callback(obj_request);
1886 case CEPH_OSD_OP_NOTIFY_ACK:
1887 case CEPH_OSD_OP_WATCH:
1888 rbd_osd_trivial_callback(obj_request);
1891 rbd_warn(NULL, "%s: unsupported op %hu",
1892 obj_request->object_name, (unsigned short) opcode);
1896 if (obj_request_done_test(obj_request))
1897 rbd_obj_request_complete(obj_request);
1900 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1902 struct rbd_img_request *img_request = obj_request->img_request;
1903 struct ceph_osd_request *osd_req = obj_request->osd_req;
1906 rbd_assert(osd_req != NULL);
1908 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1909 ceph_osdc_build_request(osd_req, obj_request->offset,
1910 NULL, snap_id, NULL);
1913 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1915 struct rbd_img_request *img_request = obj_request->img_request;
1916 struct ceph_osd_request *osd_req = obj_request->osd_req;
1917 struct ceph_snap_context *snapc;
1918 struct timespec mtime = CURRENT_TIME;
1920 rbd_assert(osd_req != NULL);
1922 snapc = img_request ? img_request->snapc : NULL;
1923 ceph_osdc_build_request(osd_req, obj_request->offset,
1924 snapc, CEPH_NOSNAP, &mtime);
1928 * Create an osd request. A read request has one osd op (read).
1929 * A write request has either one (watch) or two (hint+write) osd ops.
1930 * (All rbd data writes are prefixed with an allocation hint op, but
1931 * technically osd watch is a write request, hence this distinction.)
1933 static struct ceph_osd_request *rbd_osd_req_create(
1934 struct rbd_device *rbd_dev,
1935 enum obj_operation_type op_type,
1936 unsigned int num_ops,
1937 struct rbd_obj_request *obj_request)
1939 struct ceph_snap_context *snapc = NULL;
1940 struct ceph_osd_client *osdc;
1941 struct ceph_osd_request *osd_req;
1943 if (obj_request_img_data_test(obj_request) &&
1944 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1945 struct rbd_img_request *img_request = obj_request->img_request;
1946 if (op_type == OBJ_OP_WRITE) {
1947 rbd_assert(img_request_write_test(img_request));
1949 rbd_assert(img_request_discard_test(img_request));
1951 snapc = img_request->snapc;
1954 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1956 /* Allocate and initialize the request, for the num_ops ops */
1958 osdc = &rbd_dev->rbd_client->client->osdc;
1959 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1962 return NULL; /* ENOMEM */
1964 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
1965 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1967 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1969 osd_req->r_callback = rbd_osd_req_callback;
1970 osd_req->r_priv = obj_request;
1972 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1973 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1979 * Create a copyup osd request based on the information in the object
1980 * request supplied. A copyup request has two or three osd ops, a
1981 * copyup method call, potentially a hint op, and a write or truncate
1984 static struct ceph_osd_request *
1985 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1987 struct rbd_img_request *img_request;
1988 struct ceph_snap_context *snapc;
1989 struct rbd_device *rbd_dev;
1990 struct ceph_osd_client *osdc;
1991 struct ceph_osd_request *osd_req;
1992 int num_osd_ops = 3;
1994 rbd_assert(obj_request_img_data_test(obj_request));
1995 img_request = obj_request->img_request;
1996 rbd_assert(img_request);
1997 rbd_assert(img_request_write_test(img_request) ||
1998 img_request_discard_test(img_request));
2000 if (img_request_discard_test(img_request))
2003 /* Allocate and initialize the request, for all the ops */
2005 snapc = img_request->snapc;
2006 rbd_dev = img_request->rbd_dev;
2007 osdc = &rbd_dev->rbd_client->client->osdc;
2008 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2011 return NULL; /* ENOMEM */
2013 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2014 osd_req->r_callback = rbd_osd_req_callback;
2015 osd_req->r_priv = obj_request;
2017 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
2018 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
2024 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2026 ceph_osdc_put_request(osd_req);
2029 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2031 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2032 u64 offset, u64 length,
2033 enum obj_request_type type)
2035 struct rbd_obj_request *obj_request;
2039 rbd_assert(obj_request_type_valid(type));
2041 size = strlen(object_name) + 1;
2042 name = kmalloc(size, GFP_NOIO);
2046 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2052 obj_request->object_name = memcpy(name, object_name, size);
2053 obj_request->offset = offset;
2054 obj_request->length = length;
2055 obj_request->flags = 0;
2056 obj_request->which = BAD_WHICH;
2057 obj_request->type = type;
2058 INIT_LIST_HEAD(&obj_request->links);
2059 init_completion(&obj_request->completion);
2060 kref_init(&obj_request->kref);
2062 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2063 offset, length, (int)type, obj_request);
2068 static void rbd_obj_request_destroy(struct kref *kref)
2070 struct rbd_obj_request *obj_request;
2072 obj_request = container_of(kref, struct rbd_obj_request, kref);
2074 dout("%s: obj %p\n", __func__, obj_request);
2076 rbd_assert(obj_request->img_request == NULL);
2077 rbd_assert(obj_request->which == BAD_WHICH);
2079 if (obj_request->osd_req)
2080 rbd_osd_req_destroy(obj_request->osd_req);
2082 rbd_assert(obj_request_type_valid(obj_request->type));
2083 switch (obj_request->type) {
2084 case OBJ_REQUEST_NODATA:
2085 break; /* Nothing to do */
2086 case OBJ_REQUEST_BIO:
2087 if (obj_request->bio_list)
2088 bio_chain_put(obj_request->bio_list);
2090 case OBJ_REQUEST_PAGES:
2091 if (obj_request->pages)
2092 ceph_release_page_vector(obj_request->pages,
2093 obj_request->page_count);
2097 kfree(obj_request->object_name);
2098 obj_request->object_name = NULL;
2099 kmem_cache_free(rbd_obj_request_cache, obj_request);
2102 /* It's OK to call this for a device with no parent */
2104 static void rbd_spec_put(struct rbd_spec *spec);
2105 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2107 rbd_dev_remove_parent(rbd_dev);
2108 rbd_spec_put(rbd_dev->parent_spec);
2109 rbd_dev->parent_spec = NULL;
2110 rbd_dev->parent_overlap = 0;
2114 * Parent image reference counting is used to determine when an
2115 * image's parent fields can be safely torn down--after there are no
2116 * more in-flight requests to the parent image. When the last
2117 * reference is dropped, cleaning them up is safe.
2119 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2123 if (!rbd_dev->parent_spec)
2126 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2130 /* Last reference; clean up parent data structures */
2133 rbd_dev_unparent(rbd_dev);
2135 rbd_warn(rbd_dev, "parent reference underflow");
2139 * If an image has a non-zero parent overlap, get a reference to its
2142 * Returns true if the rbd device has a parent with a non-zero
2143 * overlap and a reference for it was successfully taken, or
2146 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2150 if (!rbd_dev->parent_spec)
2153 down_read(&rbd_dev->header_rwsem);
2154 if (rbd_dev->parent_overlap)
2155 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2156 up_read(&rbd_dev->header_rwsem);
2159 rbd_warn(rbd_dev, "parent reference overflow");
2165 * Caller is responsible for filling in the list of object requests
2166 * that comprises the image request, and the Linux request pointer
2167 * (if there is one).
2169 static struct rbd_img_request *rbd_img_request_create(
2170 struct rbd_device *rbd_dev,
2171 u64 offset, u64 length,
2172 enum obj_operation_type op_type,
2173 struct ceph_snap_context *snapc)
2175 struct rbd_img_request *img_request;
2177 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2181 img_request->rq = NULL;
2182 img_request->rbd_dev = rbd_dev;
2183 img_request->offset = offset;
2184 img_request->length = length;
2185 img_request->flags = 0;
2186 if (op_type == OBJ_OP_DISCARD) {
2187 img_request_discard_set(img_request);
2188 img_request->snapc = snapc;
2189 } else if (op_type == OBJ_OP_WRITE) {
2190 img_request_write_set(img_request);
2191 img_request->snapc = snapc;
2193 img_request->snap_id = rbd_dev->spec->snap_id;
2195 if (rbd_dev_parent_get(rbd_dev))
2196 img_request_layered_set(img_request);
2197 spin_lock_init(&img_request->completion_lock);
2198 img_request->next_completion = 0;
2199 img_request->callback = NULL;
2200 img_request->result = 0;
2201 img_request->obj_request_count = 0;
2202 INIT_LIST_HEAD(&img_request->obj_requests);
2203 kref_init(&img_request->kref);
2205 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2206 obj_op_name(op_type), offset, length, img_request);
2211 static void rbd_img_request_destroy(struct kref *kref)
2213 struct rbd_img_request *img_request;
2214 struct rbd_obj_request *obj_request;
2215 struct rbd_obj_request *next_obj_request;
2217 img_request = container_of(kref, struct rbd_img_request, kref);
2219 dout("%s: img %p\n", __func__, img_request);
2221 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2222 rbd_img_obj_request_del(img_request, obj_request);
2223 rbd_assert(img_request->obj_request_count == 0);
2225 if (img_request_layered_test(img_request)) {
2226 img_request_layered_clear(img_request);
2227 rbd_dev_parent_put(img_request->rbd_dev);
2230 if (img_request_write_test(img_request) ||
2231 img_request_discard_test(img_request))
2232 ceph_put_snap_context(img_request->snapc);
2234 kmem_cache_free(rbd_img_request_cache, img_request);
2237 static struct rbd_img_request *rbd_parent_request_create(
2238 struct rbd_obj_request *obj_request,
2239 u64 img_offset, u64 length)
2241 struct rbd_img_request *parent_request;
2242 struct rbd_device *rbd_dev;
2244 rbd_assert(obj_request->img_request);
2245 rbd_dev = obj_request->img_request->rbd_dev;
2247 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2248 length, OBJ_OP_READ, NULL);
2249 if (!parent_request)
2252 img_request_child_set(parent_request);
2253 rbd_obj_request_get(obj_request);
2254 parent_request->obj_request = obj_request;
2256 return parent_request;
2259 static void rbd_parent_request_destroy(struct kref *kref)
2261 struct rbd_img_request *parent_request;
2262 struct rbd_obj_request *orig_request;
2264 parent_request = container_of(kref, struct rbd_img_request, kref);
2265 orig_request = parent_request->obj_request;
2267 parent_request->obj_request = NULL;
2268 rbd_obj_request_put(orig_request);
2269 img_request_child_clear(parent_request);
2271 rbd_img_request_destroy(kref);
2274 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2276 struct rbd_img_request *img_request;
2277 unsigned int xferred;
2281 rbd_assert(obj_request_img_data_test(obj_request));
2282 img_request = obj_request->img_request;
2284 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2285 xferred = (unsigned int)obj_request->xferred;
2286 result = obj_request->result;
2288 struct rbd_device *rbd_dev = img_request->rbd_dev;
2289 enum obj_operation_type op_type;
2291 if (img_request_discard_test(img_request))
2292 op_type = OBJ_OP_DISCARD;
2293 else if (img_request_write_test(img_request))
2294 op_type = OBJ_OP_WRITE;
2296 op_type = OBJ_OP_READ;
2298 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2299 obj_op_name(op_type), obj_request->length,
2300 obj_request->img_offset, obj_request->offset);
2301 rbd_warn(rbd_dev, " result %d xferred %x",
2303 if (!img_request->result)
2304 img_request->result = result;
2306 * Need to end I/O on the entire obj_request worth of
2307 * bytes in case of error.
2309 xferred = obj_request->length;
2312 /* Image object requests don't own their page array */
2314 if (obj_request->type == OBJ_REQUEST_PAGES) {
2315 obj_request->pages = NULL;
2316 obj_request->page_count = 0;
2319 if (img_request_child_test(img_request)) {
2320 rbd_assert(img_request->obj_request != NULL);
2321 more = obj_request->which < img_request->obj_request_count - 1;
2323 rbd_assert(img_request->rq != NULL);
2325 more = blk_update_request(img_request->rq, result, xferred);
2327 __blk_mq_end_request(img_request->rq, result);
2333 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2335 struct rbd_img_request *img_request;
2336 u32 which = obj_request->which;
2339 rbd_assert(obj_request_img_data_test(obj_request));
2340 img_request = obj_request->img_request;
2342 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2343 rbd_assert(img_request != NULL);
2344 rbd_assert(img_request->obj_request_count > 0);
2345 rbd_assert(which != BAD_WHICH);
2346 rbd_assert(which < img_request->obj_request_count);
2348 spin_lock_irq(&img_request->completion_lock);
2349 if (which != img_request->next_completion)
2352 for_each_obj_request_from(img_request, obj_request) {
2354 rbd_assert(which < img_request->obj_request_count);
2356 if (!obj_request_done_test(obj_request))
2358 more = rbd_img_obj_end_request(obj_request);
2362 rbd_assert(more ^ (which == img_request->obj_request_count));
2363 img_request->next_completion = which;
2365 spin_unlock_irq(&img_request->completion_lock);
2366 rbd_img_request_put(img_request);
2369 rbd_img_request_complete(img_request);
2373 * Add individual osd ops to the given ceph_osd_request and prepare
2374 * them for submission. num_ops is the current number of
2375 * osd operations already to the object request.
2377 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2378 struct ceph_osd_request *osd_request,
2379 enum obj_operation_type op_type,
2380 unsigned int num_ops)
2382 struct rbd_img_request *img_request = obj_request->img_request;
2383 struct rbd_device *rbd_dev = img_request->rbd_dev;
2384 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2385 u64 offset = obj_request->offset;
2386 u64 length = obj_request->length;
2390 if (op_type == OBJ_OP_DISCARD) {
2391 if (!offset && length == object_size &&
2392 (!img_request_layered_test(img_request) ||
2393 !obj_request_overlaps_parent(obj_request))) {
2394 opcode = CEPH_OSD_OP_DELETE;
2395 } else if ((offset + length == object_size)) {
2396 opcode = CEPH_OSD_OP_TRUNCATE;
2398 down_read(&rbd_dev->header_rwsem);
2399 img_end = rbd_dev->header.image_size;
2400 up_read(&rbd_dev->header_rwsem);
2402 if (obj_request->img_offset + length == img_end)
2403 opcode = CEPH_OSD_OP_TRUNCATE;
2405 opcode = CEPH_OSD_OP_ZERO;
2407 } else if (op_type == OBJ_OP_WRITE) {
2408 if (!offset && length == object_size)
2409 opcode = CEPH_OSD_OP_WRITEFULL;
2411 opcode = CEPH_OSD_OP_WRITE;
2412 osd_req_op_alloc_hint_init(osd_request, num_ops,
2413 object_size, object_size);
2416 opcode = CEPH_OSD_OP_READ;
2419 if (opcode == CEPH_OSD_OP_DELETE)
2420 osd_req_op_init(osd_request, num_ops, opcode, 0);
2422 osd_req_op_extent_init(osd_request, num_ops, opcode,
2423 offset, length, 0, 0);
2425 if (obj_request->type == OBJ_REQUEST_BIO)
2426 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2427 obj_request->bio_list, length);
2428 else if (obj_request->type == OBJ_REQUEST_PAGES)
2429 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2430 obj_request->pages, length,
2431 offset & ~PAGE_MASK, false, false);
2433 /* Discards are also writes */
2434 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2435 rbd_osd_req_format_write(obj_request);
2437 rbd_osd_req_format_read(obj_request);
2441 * Split up an image request into one or more object requests, each
2442 * to a different object. The "type" parameter indicates whether
2443 * "data_desc" is the pointer to the head of a list of bio
2444 * structures, or the base of a page array. In either case this
2445 * function assumes data_desc describes memory sufficient to hold
2446 * all data described by the image request.
2448 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2449 enum obj_request_type type,
2452 struct rbd_device *rbd_dev = img_request->rbd_dev;
2453 struct rbd_obj_request *obj_request = NULL;
2454 struct rbd_obj_request *next_obj_request;
2455 struct bio *bio_list = NULL;
2456 unsigned int bio_offset = 0;
2457 struct page **pages = NULL;
2458 enum obj_operation_type op_type;
2462 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2463 (int)type, data_desc);
2465 img_offset = img_request->offset;
2466 resid = img_request->length;
2467 rbd_assert(resid > 0);
2468 op_type = rbd_img_request_op_type(img_request);
2470 if (type == OBJ_REQUEST_BIO) {
2471 bio_list = data_desc;
2472 rbd_assert(img_offset ==
2473 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2474 } else if (type == OBJ_REQUEST_PAGES) {
2479 struct ceph_osd_request *osd_req;
2480 const char *object_name;
2484 object_name = rbd_segment_name(rbd_dev, img_offset);
2487 offset = rbd_segment_offset(rbd_dev, img_offset);
2488 length = rbd_segment_length(rbd_dev, img_offset, resid);
2489 obj_request = rbd_obj_request_create(object_name,
2490 offset, length, type);
2491 /* object request has its own copy of the object name */
2492 rbd_segment_name_free(object_name);
2497 * set obj_request->img_request before creating the
2498 * osd_request so that it gets the right snapc
2500 rbd_img_obj_request_add(img_request, obj_request);
2502 if (type == OBJ_REQUEST_BIO) {
2503 unsigned int clone_size;
2505 rbd_assert(length <= (u64)UINT_MAX);
2506 clone_size = (unsigned int)length;
2507 obj_request->bio_list =
2508 bio_chain_clone_range(&bio_list,
2512 if (!obj_request->bio_list)
2514 } else if (type == OBJ_REQUEST_PAGES) {
2515 unsigned int page_count;
2517 obj_request->pages = pages;
2518 page_count = (u32)calc_pages_for(offset, length);
2519 obj_request->page_count = page_count;
2520 if ((offset + length) & ~PAGE_MASK)
2521 page_count--; /* more on last page */
2522 pages += page_count;
2525 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2526 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2531 obj_request->osd_req = osd_req;
2532 obj_request->callback = rbd_img_obj_callback;
2533 obj_request->img_offset = img_offset;
2535 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2537 rbd_img_request_get(img_request);
2539 img_offset += length;
2546 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2547 rbd_img_obj_request_del(img_request, obj_request);
2553 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2555 struct rbd_img_request *img_request;
2556 struct rbd_device *rbd_dev;
2557 struct page **pages;
2560 dout("%s: obj %p\n", __func__, obj_request);
2562 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2563 obj_request->type == OBJ_REQUEST_NODATA);
2564 rbd_assert(obj_request_img_data_test(obj_request));
2565 img_request = obj_request->img_request;
2566 rbd_assert(img_request);
2568 rbd_dev = img_request->rbd_dev;
2569 rbd_assert(rbd_dev);
2571 pages = obj_request->copyup_pages;
2572 rbd_assert(pages != NULL);
2573 obj_request->copyup_pages = NULL;
2574 page_count = obj_request->copyup_page_count;
2575 rbd_assert(page_count);
2576 obj_request->copyup_page_count = 0;
2577 ceph_release_page_vector(pages, page_count);
2580 * We want the transfer count to reflect the size of the
2581 * original write request. There is no such thing as a
2582 * successful short write, so if the request was successful
2583 * we can just set it to the originally-requested length.
2585 if (!obj_request->result)
2586 obj_request->xferred = obj_request->length;
2588 obj_request_done_set(obj_request);
2592 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2594 struct rbd_obj_request *orig_request;
2595 struct ceph_osd_request *osd_req;
2596 struct ceph_osd_client *osdc;
2597 struct rbd_device *rbd_dev;
2598 struct page **pages;
2599 enum obj_operation_type op_type;
2604 rbd_assert(img_request_child_test(img_request));
2606 /* First get what we need from the image request */
2608 pages = img_request->copyup_pages;
2609 rbd_assert(pages != NULL);
2610 img_request->copyup_pages = NULL;
2611 page_count = img_request->copyup_page_count;
2612 rbd_assert(page_count);
2613 img_request->copyup_page_count = 0;
2615 orig_request = img_request->obj_request;
2616 rbd_assert(orig_request != NULL);
2617 rbd_assert(obj_request_type_valid(orig_request->type));
2618 img_result = img_request->result;
2619 parent_length = img_request->length;
2620 rbd_assert(parent_length == img_request->xferred);
2621 rbd_img_request_put(img_request);
2623 rbd_assert(orig_request->img_request);
2624 rbd_dev = orig_request->img_request->rbd_dev;
2625 rbd_assert(rbd_dev);
2628 * If the overlap has become 0 (most likely because the
2629 * image has been flattened) we need to free the pages
2630 * and re-submit the original write request.
2632 if (!rbd_dev->parent_overlap) {
2633 struct ceph_osd_client *osdc;
2635 ceph_release_page_vector(pages, page_count);
2636 osdc = &rbd_dev->rbd_client->client->osdc;
2637 img_result = rbd_obj_request_submit(osdc, orig_request);
2646 * The original osd request is of no use to use any more.
2647 * We need a new one that can hold the three ops in a copyup
2648 * request. Allocate the new copyup osd request for the
2649 * original request, and release the old one.
2651 img_result = -ENOMEM;
2652 osd_req = rbd_osd_req_create_copyup(orig_request);
2655 rbd_osd_req_destroy(orig_request->osd_req);
2656 orig_request->osd_req = osd_req;
2657 orig_request->copyup_pages = pages;
2658 orig_request->copyup_page_count = page_count;
2660 /* Initialize the copyup op */
2662 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2663 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2666 /* Add the other op(s) */
2668 op_type = rbd_img_request_op_type(orig_request->img_request);
2669 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2671 /* All set, send it off. */
2673 osdc = &rbd_dev->rbd_client->client->osdc;
2674 img_result = rbd_obj_request_submit(osdc, orig_request);
2678 /* Record the error code and complete the request */
2680 orig_request->result = img_result;
2681 orig_request->xferred = 0;
2682 obj_request_done_set(orig_request);
2683 rbd_obj_request_complete(orig_request);
2687 * Read from the parent image the range of data that covers the
2688 * entire target of the given object request. This is used for
2689 * satisfying a layered image write request when the target of an
2690 * object request from the image request does not exist.
2692 * A page array big enough to hold the returned data is allocated
2693 * and supplied to rbd_img_request_fill() as the "data descriptor."
2694 * When the read completes, this page array will be transferred to
2695 * the original object request for the copyup operation.
2697 * If an error occurs, record it as the result of the original
2698 * object request and mark it done so it gets completed.
2700 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2702 struct rbd_img_request *img_request = NULL;
2703 struct rbd_img_request *parent_request = NULL;
2704 struct rbd_device *rbd_dev;
2707 struct page **pages = NULL;
2711 rbd_assert(obj_request_img_data_test(obj_request));
2712 rbd_assert(obj_request_type_valid(obj_request->type));
2714 img_request = obj_request->img_request;
2715 rbd_assert(img_request != NULL);
2716 rbd_dev = img_request->rbd_dev;
2717 rbd_assert(rbd_dev->parent != NULL);
2720 * Determine the byte range covered by the object in the
2721 * child image to which the original request was to be sent.
2723 img_offset = obj_request->img_offset - obj_request->offset;
2724 length = (u64)1 << rbd_dev->header.obj_order;
2727 * There is no defined parent data beyond the parent
2728 * overlap, so limit what we read at that boundary if
2731 if (img_offset + length > rbd_dev->parent_overlap) {
2732 rbd_assert(img_offset < rbd_dev->parent_overlap);
2733 length = rbd_dev->parent_overlap - img_offset;
2737 * Allocate a page array big enough to receive the data read
2740 page_count = (u32)calc_pages_for(0, length);
2741 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2742 if (IS_ERR(pages)) {
2743 result = PTR_ERR(pages);
2749 parent_request = rbd_parent_request_create(obj_request,
2750 img_offset, length);
2751 if (!parent_request)
2754 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2757 parent_request->copyup_pages = pages;
2758 parent_request->copyup_page_count = page_count;
2760 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2761 result = rbd_img_request_submit(parent_request);
2765 parent_request->copyup_pages = NULL;
2766 parent_request->copyup_page_count = 0;
2767 parent_request->obj_request = NULL;
2768 rbd_obj_request_put(obj_request);
2771 ceph_release_page_vector(pages, page_count);
2773 rbd_img_request_put(parent_request);
2774 obj_request->result = result;
2775 obj_request->xferred = 0;
2776 obj_request_done_set(obj_request);
2781 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2783 struct rbd_obj_request *orig_request;
2784 struct rbd_device *rbd_dev;
2787 rbd_assert(!obj_request_img_data_test(obj_request));
2790 * All we need from the object request is the original
2791 * request and the result of the STAT op. Grab those, then
2792 * we're done with the request.
2794 orig_request = obj_request->obj_request;
2795 obj_request->obj_request = NULL;
2796 rbd_obj_request_put(orig_request);
2797 rbd_assert(orig_request);
2798 rbd_assert(orig_request->img_request);
2800 result = obj_request->result;
2801 obj_request->result = 0;
2803 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2804 obj_request, orig_request, result,
2805 obj_request->xferred, obj_request->length);
2806 rbd_obj_request_put(obj_request);
2809 * If the overlap has become 0 (most likely because the
2810 * image has been flattened) we need to free the pages
2811 * and re-submit the original write request.
2813 rbd_dev = orig_request->img_request->rbd_dev;
2814 if (!rbd_dev->parent_overlap) {
2815 struct ceph_osd_client *osdc;
2817 osdc = &rbd_dev->rbd_client->client->osdc;
2818 result = rbd_obj_request_submit(osdc, orig_request);
2824 * Our only purpose here is to determine whether the object
2825 * exists, and we don't want to treat the non-existence as
2826 * an error. If something else comes back, transfer the
2827 * error to the original request and complete it now.
2830 obj_request_existence_set(orig_request, true);
2831 } else if (result == -ENOENT) {
2832 obj_request_existence_set(orig_request, false);
2833 } else if (result) {
2834 orig_request->result = result;
2839 * Resubmit the original request now that we have recorded
2840 * whether the target object exists.
2842 orig_request->result = rbd_img_obj_request_submit(orig_request);
2844 if (orig_request->result)
2845 rbd_obj_request_complete(orig_request);
2848 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2850 struct rbd_obj_request *stat_request;
2851 struct rbd_device *rbd_dev;
2852 struct ceph_osd_client *osdc;
2853 struct page **pages = NULL;
2859 * The response data for a STAT call consists of:
2866 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2867 page_count = (u32)calc_pages_for(0, size);
2868 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2870 return PTR_ERR(pages);
2873 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2878 rbd_obj_request_get(obj_request);
2879 stat_request->obj_request = obj_request;
2880 stat_request->pages = pages;
2881 stat_request->page_count = page_count;
2883 rbd_assert(obj_request->img_request);
2884 rbd_dev = obj_request->img_request->rbd_dev;
2885 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2887 if (!stat_request->osd_req)
2889 stat_request->callback = rbd_img_obj_exists_callback;
2891 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2892 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2894 rbd_osd_req_format_read(stat_request);
2896 osdc = &rbd_dev->rbd_client->client->osdc;
2897 ret = rbd_obj_request_submit(osdc, stat_request);
2900 rbd_obj_request_put(obj_request);
2905 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2907 struct rbd_img_request *img_request;
2908 struct rbd_device *rbd_dev;
2910 rbd_assert(obj_request_img_data_test(obj_request));
2912 img_request = obj_request->img_request;
2913 rbd_assert(img_request);
2914 rbd_dev = img_request->rbd_dev;
2917 if (!img_request_write_test(img_request) &&
2918 !img_request_discard_test(img_request))
2921 /* Non-layered writes */
2922 if (!img_request_layered_test(img_request))
2926 * Layered writes outside of the parent overlap range don't
2927 * share any data with the parent.
2929 if (!obj_request_overlaps_parent(obj_request))
2933 * Entire-object layered writes - we will overwrite whatever
2934 * parent data there is anyway.
2936 if (!obj_request->offset &&
2937 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2941 * If the object is known to already exist, its parent data has
2942 * already been copied.
2944 if (obj_request_known_test(obj_request) &&
2945 obj_request_exists_test(obj_request))
2951 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2953 if (img_obj_request_simple(obj_request)) {
2954 struct rbd_device *rbd_dev;
2955 struct ceph_osd_client *osdc;
2957 rbd_dev = obj_request->img_request->rbd_dev;
2958 osdc = &rbd_dev->rbd_client->client->osdc;
2960 return rbd_obj_request_submit(osdc, obj_request);
2964 * It's a layered write. The target object might exist but
2965 * we may not know that yet. If we know it doesn't exist,
2966 * start by reading the data for the full target object from
2967 * the parent so we can use it for a copyup to the target.
2969 if (obj_request_known_test(obj_request))
2970 return rbd_img_obj_parent_read_full(obj_request);
2972 /* We don't know whether the target exists. Go find out. */
2974 return rbd_img_obj_exists_submit(obj_request);
2977 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2979 struct rbd_obj_request *obj_request;
2980 struct rbd_obj_request *next_obj_request;
2982 dout("%s: img %p\n", __func__, img_request);
2983 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2986 ret = rbd_img_obj_request_submit(obj_request);
2994 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2996 struct rbd_obj_request *obj_request;
2997 struct rbd_device *rbd_dev;
3002 rbd_assert(img_request_child_test(img_request));
3004 /* First get what we need from the image request and release it */
3006 obj_request = img_request->obj_request;
3007 img_xferred = img_request->xferred;
3008 img_result = img_request->result;
3009 rbd_img_request_put(img_request);
3012 * If the overlap has become 0 (most likely because the
3013 * image has been flattened) we need to re-submit the
3016 rbd_assert(obj_request);
3017 rbd_assert(obj_request->img_request);
3018 rbd_dev = obj_request->img_request->rbd_dev;
3019 if (!rbd_dev->parent_overlap) {
3020 struct ceph_osd_client *osdc;
3022 osdc = &rbd_dev->rbd_client->client->osdc;
3023 img_result = rbd_obj_request_submit(osdc, obj_request);
3028 obj_request->result = img_result;
3029 if (obj_request->result)
3033 * We need to zero anything beyond the parent overlap
3034 * boundary. Since rbd_img_obj_request_read_callback()
3035 * will zero anything beyond the end of a short read, an
3036 * easy way to do this is to pretend the data from the
3037 * parent came up short--ending at the overlap boundary.
3039 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3040 obj_end = obj_request->img_offset + obj_request->length;
3041 if (obj_end > rbd_dev->parent_overlap) {
3044 if (obj_request->img_offset < rbd_dev->parent_overlap)
3045 xferred = rbd_dev->parent_overlap -
3046 obj_request->img_offset;
3048 obj_request->xferred = min(img_xferred, xferred);
3050 obj_request->xferred = img_xferred;
3053 rbd_img_obj_request_read_callback(obj_request);
3054 rbd_obj_request_complete(obj_request);
3057 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3059 struct rbd_img_request *img_request;
3062 rbd_assert(obj_request_img_data_test(obj_request));
3063 rbd_assert(obj_request->img_request != NULL);
3064 rbd_assert(obj_request->result == (s32) -ENOENT);
3065 rbd_assert(obj_request_type_valid(obj_request->type));
3067 /* rbd_read_finish(obj_request, obj_request->length); */
3068 img_request = rbd_parent_request_create(obj_request,
3069 obj_request->img_offset,
3070 obj_request->length);
3075 if (obj_request->type == OBJ_REQUEST_BIO)
3076 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3077 obj_request->bio_list);
3079 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3080 obj_request->pages);
3084 img_request->callback = rbd_img_parent_read_callback;
3085 result = rbd_img_request_submit(img_request);
3092 rbd_img_request_put(img_request);
3093 obj_request->result = result;
3094 obj_request->xferred = 0;
3095 obj_request_done_set(obj_request);
3098 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
3100 struct rbd_obj_request *obj_request;
3101 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3104 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3105 OBJ_REQUEST_NODATA);
3110 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3112 if (!obj_request->osd_req)
3115 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
3117 rbd_osd_req_format_read(obj_request);
3119 ret = rbd_obj_request_submit(osdc, obj_request);
3122 ret = rbd_obj_request_wait(obj_request);
3124 rbd_obj_request_put(obj_request);
3129 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3131 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3137 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3138 rbd_dev->header_name, (unsigned long long)notify_id,
3139 (unsigned int)opcode);
3142 * Until adequate refresh error handling is in place, there is
3143 * not much we can do here, except warn.
3145 * See http://tracker.ceph.com/issues/5040
3147 ret = rbd_dev_refresh(rbd_dev);
3149 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3151 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3153 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
3157 * Send a (un)watch request and wait for the ack. Return a request
3158 * with a ref held on success or error.
3160 static struct rbd_obj_request *rbd_obj_watch_request_helper(
3161 struct rbd_device *rbd_dev,
3164 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3165 struct ceph_options *opts = osdc->client->options;
3166 struct rbd_obj_request *obj_request;
3169 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3170 OBJ_REQUEST_NODATA);
3172 return ERR_PTR(-ENOMEM);
3174 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3176 if (!obj_request->osd_req) {
3181 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3182 rbd_dev->watch_event->cookie, 0, watch);
3183 rbd_osd_req_format_write(obj_request);
3186 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3188 ret = rbd_obj_request_submit(osdc, obj_request);
3192 ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
3196 ret = obj_request->result;
3199 rbd_obj_request_end(obj_request);
3206 rbd_obj_request_put(obj_request);
3207 return ERR_PTR(ret);
3211 * Initiate a watch request, synchronously.
3213 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3215 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3216 struct rbd_obj_request *obj_request;
3219 rbd_assert(!rbd_dev->watch_event);
3220 rbd_assert(!rbd_dev->watch_request);
3222 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3223 &rbd_dev->watch_event);
3227 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3228 if (IS_ERR(obj_request)) {
3229 ceph_osdc_cancel_event(rbd_dev->watch_event);
3230 rbd_dev->watch_event = NULL;
3231 return PTR_ERR(obj_request);
3235 * A watch request is set to linger, so the underlying osd
3236 * request won't go away until we unregister it. We retain
3237 * a pointer to the object request during that time (in
3238 * rbd_dev->watch_request), so we'll keep a reference to it.
3239 * We'll drop that reference after we've unregistered it in
3240 * rbd_dev_header_unwatch_sync().
3242 rbd_dev->watch_request = obj_request;
3248 * Tear down a watch request, synchronously.
3250 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3252 struct rbd_obj_request *obj_request;
3254 rbd_assert(rbd_dev->watch_event);
3255 rbd_assert(rbd_dev->watch_request);
3257 rbd_obj_request_end(rbd_dev->watch_request);
3258 rbd_obj_request_put(rbd_dev->watch_request);
3259 rbd_dev->watch_request = NULL;
3261 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3262 if (!IS_ERR(obj_request))
3263 rbd_obj_request_put(obj_request);
3265 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3266 PTR_ERR(obj_request));
3268 ceph_osdc_cancel_event(rbd_dev->watch_event);
3269 rbd_dev->watch_event = NULL;
3273 * Synchronous osd object method call. Returns the number of bytes
3274 * returned in the outbound buffer, or a negative error code.
3276 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3277 const char *object_name,
3278 const char *class_name,
3279 const char *method_name,
3280 const void *outbound,
3281 size_t outbound_size,
3283 size_t inbound_size)
3285 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3286 struct rbd_obj_request *obj_request;
3287 struct page **pages;
3292 * Method calls are ultimately read operations. The result
3293 * should placed into the inbound buffer provided. They
3294 * also supply outbound data--parameters for the object
3295 * method. Currently if this is present it will be a
3298 page_count = (u32)calc_pages_for(0, inbound_size);
3299 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3301 return PTR_ERR(pages);
3304 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3309 obj_request->pages = pages;
3310 obj_request->page_count = page_count;
3312 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3314 if (!obj_request->osd_req)
3317 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3318 class_name, method_name);
3319 if (outbound_size) {
3320 struct ceph_pagelist *pagelist;
3322 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3326 ceph_pagelist_init(pagelist);
3327 ceph_pagelist_append(pagelist, outbound, outbound_size);
3328 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3331 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3332 obj_request->pages, inbound_size,
3334 rbd_osd_req_format_read(obj_request);
3336 ret = rbd_obj_request_submit(osdc, obj_request);
3339 ret = rbd_obj_request_wait(obj_request);
3343 ret = obj_request->result;
3347 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3348 ret = (int)obj_request->xferred;
3349 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3352 rbd_obj_request_put(obj_request);
3354 ceph_release_page_vector(pages, page_count);
3359 static void rbd_queue_workfn(struct work_struct *work)
3361 struct request *rq = blk_mq_rq_from_pdu(work);
3362 struct rbd_device *rbd_dev = rq->q->queuedata;
3363 struct rbd_img_request *img_request;
3364 struct ceph_snap_context *snapc = NULL;
3365 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3366 u64 length = blk_rq_bytes(rq);
3367 enum obj_operation_type op_type;
3371 if (rq->cmd_type != REQ_TYPE_FS) {
3372 dout("%s: non-fs request type %d\n", __func__,
3373 (int) rq->cmd_type);
3378 if (rq->cmd_flags & REQ_DISCARD)
3379 op_type = OBJ_OP_DISCARD;
3380 else if (rq->cmd_flags & REQ_WRITE)
3381 op_type = OBJ_OP_WRITE;
3383 op_type = OBJ_OP_READ;
3385 /* Ignore/skip any zero-length requests */
3388 dout("%s: zero-length request\n", __func__);
3393 /* Only reads are allowed to a read-only device */
3395 if (op_type != OBJ_OP_READ) {
3396 if (rbd_dev->mapping.read_only) {
3400 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3404 * Quit early if the mapped snapshot no longer exists. It's
3405 * still possible the snapshot will have disappeared by the
3406 * time our request arrives at the osd, but there's no sense in
3407 * sending it if we already know.
3409 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3410 dout("request for non-existent snapshot");
3411 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3416 if (offset && length > U64_MAX - offset + 1) {
3417 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3420 goto err_rq; /* Shouldn't happen */
3423 blk_mq_start_request(rq);
3425 down_read(&rbd_dev->header_rwsem);
3426 mapping_size = rbd_dev->mapping.size;
3427 if (op_type != OBJ_OP_READ) {
3428 snapc = rbd_dev->header.snapc;
3429 ceph_get_snap_context(snapc);
3431 up_read(&rbd_dev->header_rwsem);
3433 if (offset + length > mapping_size) {
3434 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3435 length, mapping_size);
3440 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3446 img_request->rq = rq;
3448 if (op_type == OBJ_OP_DISCARD)
3449 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3452 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3455 goto err_img_request;
3457 result = rbd_img_request_submit(img_request);
3459 goto err_img_request;
3464 rbd_img_request_put(img_request);
3467 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3468 obj_op_name(op_type), length, offset, result);
3469 ceph_put_snap_context(snapc);
3471 blk_mq_end_request(rq, result);
3474 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3475 const struct blk_mq_queue_data *bd)
3477 struct request *rq = bd->rq;
3478 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3480 queue_work(rbd_wq, work);
3481 return BLK_MQ_RQ_QUEUE_OK;
3484 static void rbd_free_disk(struct rbd_device *rbd_dev)
3486 struct gendisk *disk = rbd_dev->disk;
3491 rbd_dev->disk = NULL;
3492 if (disk->flags & GENHD_FL_UP) {
3495 blk_cleanup_queue(disk->queue);
3496 blk_mq_free_tag_set(&rbd_dev->tag_set);
3501 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3502 const char *object_name,
3503 u64 offset, u64 length, void *buf)
3506 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3507 struct rbd_obj_request *obj_request;
3508 struct page **pages = NULL;
3513 page_count = (u32) calc_pages_for(offset, length);
3514 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3516 return PTR_ERR(pages);
3519 obj_request = rbd_obj_request_create(object_name, offset, length,
3524 obj_request->pages = pages;
3525 obj_request->page_count = page_count;
3527 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3529 if (!obj_request->osd_req)
3532 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3533 offset, length, 0, 0);
3534 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3536 obj_request->length,
3537 obj_request->offset & ~PAGE_MASK,
3539 rbd_osd_req_format_read(obj_request);
3541 ret = rbd_obj_request_submit(osdc, obj_request);
3544 ret = rbd_obj_request_wait(obj_request);
3548 ret = obj_request->result;
3552 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3553 size = (size_t) obj_request->xferred;
3554 ceph_copy_from_page_vector(pages, buf, 0, size);
3555 rbd_assert(size <= (size_t)INT_MAX);
3559 rbd_obj_request_put(obj_request);
3561 ceph_release_page_vector(pages, page_count);
3567 * Read the complete header for the given rbd device. On successful
3568 * return, the rbd_dev->header field will contain up-to-date
3569 * information about the image.
3571 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3573 struct rbd_image_header_ondisk *ondisk = NULL;
3580 * The complete header will include an array of its 64-bit
3581 * snapshot ids, followed by the names of those snapshots as
3582 * a contiguous block of NUL-terminated strings. Note that
3583 * the number of snapshots could change by the time we read
3584 * it in, in which case we re-read it.
3591 size = sizeof (*ondisk);
3592 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3594 ondisk = kmalloc(size, GFP_KERNEL);
3598 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3602 if ((size_t)ret < size) {
3604 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3608 if (!rbd_dev_ondisk_valid(ondisk)) {
3610 rbd_warn(rbd_dev, "invalid header");
3614 names_size = le64_to_cpu(ondisk->snap_names_len);
3615 want_count = snap_count;
3616 snap_count = le32_to_cpu(ondisk->snap_count);
3617 } while (snap_count != want_count);
3619 ret = rbd_header_from_disk(rbd_dev, ondisk);
3627 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3628 * has disappeared from the (just updated) snapshot context.
3630 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3634 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3637 snap_id = rbd_dev->spec->snap_id;
3638 if (snap_id == CEPH_NOSNAP)
3641 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3642 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3645 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3651 * Don't hold the lock while doing disk operations,
3652 * or lock ordering will conflict with the bdev mutex via:
3653 * rbd_add() -> blkdev_get() -> rbd_open()
3655 spin_lock_irq(&rbd_dev->lock);
3656 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3657 spin_unlock_irq(&rbd_dev->lock);
3659 * If the device is being removed, rbd_dev->disk has
3660 * been destroyed, so don't try to update its size
3663 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3664 dout("setting size to %llu sectors", (unsigned long long)size);
3665 set_capacity(rbd_dev->disk, size);
3666 revalidate_disk(rbd_dev->disk);
3670 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3675 down_write(&rbd_dev->header_rwsem);
3676 mapping_size = rbd_dev->mapping.size;
3678 ret = rbd_dev_header_info(rbd_dev);
3683 * If there is a parent, see if it has disappeared due to the
3684 * mapped image getting flattened.
3686 if (rbd_dev->parent) {
3687 ret = rbd_dev_v2_parent_info(rbd_dev);
3692 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3693 rbd_dev->mapping.size = rbd_dev->header.image_size;
3695 /* validate mapped snapshot's EXISTS flag */
3696 rbd_exists_validate(rbd_dev);
3700 up_write(&rbd_dev->header_rwsem);
3701 if (!ret && mapping_size != rbd_dev->mapping.size)
3702 rbd_dev_update_size(rbd_dev);
3707 static int rbd_init_request(void *data, struct request *rq,
3708 unsigned int hctx_idx, unsigned int request_idx,
3709 unsigned int numa_node)
3711 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3713 INIT_WORK(work, rbd_queue_workfn);
3717 static struct blk_mq_ops rbd_mq_ops = {
3718 .queue_rq = rbd_queue_rq,
3719 .map_queue = blk_mq_map_queue,
3720 .init_request = rbd_init_request,
3723 static int rbd_init_disk(struct rbd_device *rbd_dev)
3725 struct gendisk *disk;
3726 struct request_queue *q;
3730 /* create gendisk info */
3731 disk = alloc_disk(single_major ?
3732 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3733 RBD_MINORS_PER_MAJOR);
3737 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3739 disk->major = rbd_dev->major;
3740 disk->first_minor = rbd_dev->minor;
3742 disk->flags |= GENHD_FL_EXT_DEVT;
3743 disk->fops = &rbd_bd_ops;
3744 disk->private_data = rbd_dev;
3746 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3747 rbd_dev->tag_set.ops = &rbd_mq_ops;
3748 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
3749 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3750 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3751 rbd_dev->tag_set.nr_hw_queues = 1;
3752 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3754 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3758 q = blk_mq_init_queue(&rbd_dev->tag_set);
3764 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3765 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
3767 /* set io sizes to object size */
3768 segment_size = rbd_obj_bytes(&rbd_dev->header);
3769 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3770 q->limits.max_sectors = queue_max_hw_sectors(q);
3771 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
3772 blk_queue_max_segment_size(q, segment_size);
3773 blk_queue_io_min(q, segment_size);
3774 blk_queue_io_opt(q, segment_size);
3776 /* enable the discard support */
3777 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3778 q->limits.discard_granularity = segment_size;
3779 q->limits.discard_alignment = segment_size;
3780 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
3781 q->limits.discard_zeroes_data = 1;
3785 q->queuedata = rbd_dev;
3787 rbd_dev->disk = disk;
3791 blk_mq_free_tag_set(&rbd_dev->tag_set);
3801 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3803 return container_of(dev, struct rbd_device, dev);
3806 static ssize_t rbd_size_show(struct device *dev,
3807 struct device_attribute *attr, char *buf)
3809 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3811 return sprintf(buf, "%llu\n",
3812 (unsigned long long)rbd_dev->mapping.size);
3816 * Note this shows the features for whatever's mapped, which is not
3817 * necessarily the base image.
3819 static ssize_t rbd_features_show(struct device *dev,
3820 struct device_attribute *attr, char *buf)
3822 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3824 return sprintf(buf, "0x%016llx\n",
3825 (unsigned long long)rbd_dev->mapping.features);
3828 static ssize_t rbd_major_show(struct device *dev,
3829 struct device_attribute *attr, char *buf)
3831 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3834 return sprintf(buf, "%d\n", rbd_dev->major);
3836 return sprintf(buf, "(none)\n");
3839 static ssize_t rbd_minor_show(struct device *dev,
3840 struct device_attribute *attr, char *buf)
3842 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3844 return sprintf(buf, "%d\n", rbd_dev->minor);
3847 static ssize_t rbd_client_id_show(struct device *dev,
3848 struct device_attribute *attr, char *buf)
3850 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3852 return sprintf(buf, "client%lld\n",
3853 ceph_client_id(rbd_dev->rbd_client->client));
3856 static ssize_t rbd_pool_show(struct device *dev,
3857 struct device_attribute *attr, char *buf)
3859 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3861 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3864 static ssize_t rbd_pool_id_show(struct device *dev,
3865 struct device_attribute *attr, char *buf)
3867 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3869 return sprintf(buf, "%llu\n",
3870 (unsigned long long) rbd_dev->spec->pool_id);
3873 static ssize_t rbd_name_show(struct device *dev,
3874 struct device_attribute *attr, char *buf)
3876 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3878 if (rbd_dev->spec->image_name)
3879 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3881 return sprintf(buf, "(unknown)\n");
3884 static ssize_t rbd_image_id_show(struct device *dev,
3885 struct device_attribute *attr, char *buf)
3887 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3889 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3893 * Shows the name of the currently-mapped snapshot (or
3894 * RBD_SNAP_HEAD_NAME for the base image).
3896 static ssize_t rbd_snap_show(struct device *dev,
3897 struct device_attribute *attr,
3900 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3902 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3906 * For a v2 image, shows the chain of parent images, separated by empty
3907 * lines. For v1 images or if there is no parent, shows "(no parent
3910 static ssize_t rbd_parent_show(struct device *dev,
3911 struct device_attribute *attr,
3914 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3917 if (!rbd_dev->parent)
3918 return sprintf(buf, "(no parent image)\n");
3920 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3921 struct rbd_spec *spec = rbd_dev->parent_spec;
3923 count += sprintf(&buf[count], "%s"
3924 "pool_id %llu\npool_name %s\n"
3925 "image_id %s\nimage_name %s\n"
3926 "snap_id %llu\nsnap_name %s\n"
3928 !count ? "" : "\n", /* first? */
3929 spec->pool_id, spec->pool_name,
3930 spec->image_id, spec->image_name ?: "(unknown)",
3931 spec->snap_id, spec->snap_name,
3932 rbd_dev->parent_overlap);
3938 static ssize_t rbd_image_refresh(struct device *dev,
3939 struct device_attribute *attr,
3943 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3946 ret = rbd_dev_refresh(rbd_dev);
3953 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3954 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3955 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3956 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3957 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3958 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3959 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3960 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3961 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3962 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3963 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3964 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3966 static struct attribute *rbd_attrs[] = {
3967 &dev_attr_size.attr,
3968 &dev_attr_features.attr,
3969 &dev_attr_major.attr,
3970 &dev_attr_minor.attr,
3971 &dev_attr_client_id.attr,
3972 &dev_attr_pool.attr,
3973 &dev_attr_pool_id.attr,
3974 &dev_attr_name.attr,
3975 &dev_attr_image_id.attr,
3976 &dev_attr_current_snap.attr,
3977 &dev_attr_parent.attr,
3978 &dev_attr_refresh.attr,
3982 static struct attribute_group rbd_attr_group = {
3986 static const struct attribute_group *rbd_attr_groups[] = {
3991 static void rbd_sysfs_dev_release(struct device *dev)
3995 static struct device_type rbd_device_type = {
3997 .groups = rbd_attr_groups,
3998 .release = rbd_sysfs_dev_release,
4001 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4003 kref_get(&spec->kref);
4008 static void rbd_spec_free(struct kref *kref);
4009 static void rbd_spec_put(struct rbd_spec *spec)
4012 kref_put(&spec->kref, rbd_spec_free);
4015 static struct rbd_spec *rbd_spec_alloc(void)
4017 struct rbd_spec *spec;
4019 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4023 spec->pool_id = CEPH_NOPOOL;
4024 spec->snap_id = CEPH_NOSNAP;
4025 kref_init(&spec->kref);
4030 static void rbd_spec_free(struct kref *kref)
4032 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4034 kfree(spec->pool_name);
4035 kfree(spec->image_id);
4036 kfree(spec->image_name);
4037 kfree(spec->snap_name);
4041 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4042 struct rbd_spec *spec,
4043 struct rbd_options *opts)
4045 struct rbd_device *rbd_dev;
4047 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4051 spin_lock_init(&rbd_dev->lock);
4053 atomic_set(&rbd_dev->parent_ref, 0);
4054 INIT_LIST_HEAD(&rbd_dev->node);
4055 init_rwsem(&rbd_dev->header_rwsem);
4057 rbd_dev->rbd_client = rbdc;
4058 rbd_dev->spec = spec;
4059 rbd_dev->opts = opts;
4061 /* Initialize the layout used for all rbd requests */
4063 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4064 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4065 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4066 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4071 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4073 rbd_put_client(rbd_dev->rbd_client);
4074 rbd_spec_put(rbd_dev->spec);
4075 kfree(rbd_dev->opts);
4080 * Get the size and object order for an image snapshot, or if
4081 * snap_id is CEPH_NOSNAP, gets this information for the base
4084 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4085 u8 *order, u64 *snap_size)
4087 __le64 snapid = cpu_to_le64(snap_id);
4092 } __attribute__ ((packed)) size_buf = { 0 };
4094 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4096 &snapid, sizeof (snapid),
4097 &size_buf, sizeof (size_buf));
4098 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4101 if (ret < sizeof (size_buf))
4105 *order = size_buf.order;
4106 dout(" order %u", (unsigned int)*order);
4108 *snap_size = le64_to_cpu(size_buf.size);
4110 dout(" snap_id 0x%016llx snap_size = %llu\n",
4111 (unsigned long long)snap_id,
4112 (unsigned long long)*snap_size);
4117 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4119 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4120 &rbd_dev->header.obj_order,
4121 &rbd_dev->header.image_size);
4124 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4130 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4134 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4135 "rbd", "get_object_prefix", NULL, 0,
4136 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4137 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4142 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4143 p + ret, NULL, GFP_NOIO);
4146 if (IS_ERR(rbd_dev->header.object_prefix)) {
4147 ret = PTR_ERR(rbd_dev->header.object_prefix);
4148 rbd_dev->header.object_prefix = NULL;
4150 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4158 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4161 __le64 snapid = cpu_to_le64(snap_id);
4165 } __attribute__ ((packed)) features_buf = { 0 };
4169 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4170 "rbd", "get_features",
4171 &snapid, sizeof (snapid),
4172 &features_buf, sizeof (features_buf));
4173 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4176 if (ret < sizeof (features_buf))
4179 incompat = le64_to_cpu(features_buf.incompat);
4180 if (incompat & ~RBD_FEATURES_SUPPORTED)
4183 *snap_features = le64_to_cpu(features_buf.features);
4185 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4186 (unsigned long long)snap_id,
4187 (unsigned long long)*snap_features,
4188 (unsigned long long)le64_to_cpu(features_buf.incompat));
4193 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4195 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4196 &rbd_dev->header.features);
4199 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4201 struct rbd_spec *parent_spec;
4203 void *reply_buf = NULL;
4213 parent_spec = rbd_spec_alloc();
4217 size = sizeof (__le64) + /* pool_id */
4218 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4219 sizeof (__le64) + /* snap_id */
4220 sizeof (__le64); /* overlap */
4221 reply_buf = kmalloc(size, GFP_KERNEL);
4227 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4228 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4229 "rbd", "get_parent",
4230 &snapid, sizeof (snapid),
4232 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4237 end = reply_buf + ret;
4239 ceph_decode_64_safe(&p, end, pool_id, out_err);
4240 if (pool_id == CEPH_NOPOOL) {
4242 * Either the parent never existed, or we have
4243 * record of it but the image got flattened so it no
4244 * longer has a parent. When the parent of a
4245 * layered image disappears we immediately set the
4246 * overlap to 0. The effect of this is that all new
4247 * requests will be treated as if the image had no
4250 if (rbd_dev->parent_overlap) {
4251 rbd_dev->parent_overlap = 0;
4252 rbd_dev_parent_put(rbd_dev);
4253 pr_info("%s: clone image has been flattened\n",
4254 rbd_dev->disk->disk_name);
4257 goto out; /* No parent? No problem. */
4260 /* The ceph file layout needs to fit pool id in 32 bits */
4263 if (pool_id > (u64)U32_MAX) {
4264 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4265 (unsigned long long)pool_id, U32_MAX);
4269 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4270 if (IS_ERR(image_id)) {
4271 ret = PTR_ERR(image_id);
4274 ceph_decode_64_safe(&p, end, snap_id, out_err);
4275 ceph_decode_64_safe(&p, end, overlap, out_err);
4278 * The parent won't change (except when the clone is
4279 * flattened, already handled that). So we only need to
4280 * record the parent spec we have not already done so.
4282 if (!rbd_dev->parent_spec) {
4283 parent_spec->pool_id = pool_id;
4284 parent_spec->image_id = image_id;
4285 parent_spec->snap_id = snap_id;
4286 rbd_dev->parent_spec = parent_spec;
4287 parent_spec = NULL; /* rbd_dev now owns this */
4293 * We always update the parent overlap. If it's zero we issue
4294 * a warning, as we will proceed as if there was no parent.
4298 /* refresh, careful to warn just once */
4299 if (rbd_dev->parent_overlap)
4301 "clone now standalone (overlap became 0)");
4304 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4307 rbd_dev->parent_overlap = overlap;
4313 rbd_spec_put(parent_spec);
4318 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4322 __le64 stripe_count;
4323 } __attribute__ ((packed)) striping_info_buf = { 0 };
4324 size_t size = sizeof (striping_info_buf);
4331 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4332 "rbd", "get_stripe_unit_count", NULL, 0,
4333 (char *)&striping_info_buf, size);
4334 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4341 * We don't actually support the "fancy striping" feature
4342 * (STRIPINGV2) yet, but if the striping sizes are the
4343 * defaults the behavior is the same as before. So find
4344 * out, and only fail if the image has non-default values.
4347 obj_size = (u64)1 << rbd_dev->header.obj_order;
4348 p = &striping_info_buf;
4349 stripe_unit = ceph_decode_64(&p);
4350 if (stripe_unit != obj_size) {
4351 rbd_warn(rbd_dev, "unsupported stripe unit "
4352 "(got %llu want %llu)",
4353 stripe_unit, obj_size);
4356 stripe_count = ceph_decode_64(&p);
4357 if (stripe_count != 1) {
4358 rbd_warn(rbd_dev, "unsupported stripe count "
4359 "(got %llu want 1)", stripe_count);
4362 rbd_dev->header.stripe_unit = stripe_unit;
4363 rbd_dev->header.stripe_count = stripe_count;
4368 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4370 size_t image_id_size;
4375 void *reply_buf = NULL;
4377 char *image_name = NULL;
4380 rbd_assert(!rbd_dev->spec->image_name);
4382 len = strlen(rbd_dev->spec->image_id);
4383 image_id_size = sizeof (__le32) + len;
4384 image_id = kmalloc(image_id_size, GFP_KERNEL);
4389 end = image_id + image_id_size;
4390 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4392 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4393 reply_buf = kmalloc(size, GFP_KERNEL);
4397 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4398 "rbd", "dir_get_name",
4399 image_id, image_id_size,
4404 end = reply_buf + ret;
4406 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4407 if (IS_ERR(image_name))
4410 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4418 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4420 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4421 const char *snap_name;
4424 /* Skip over names until we find the one we are looking for */
4426 snap_name = rbd_dev->header.snap_names;
4427 while (which < snapc->num_snaps) {
4428 if (!strcmp(name, snap_name))
4429 return snapc->snaps[which];
4430 snap_name += strlen(snap_name) + 1;
4436 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4438 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4443 for (which = 0; !found && which < snapc->num_snaps; which++) {
4444 const char *snap_name;
4446 snap_id = snapc->snaps[which];
4447 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4448 if (IS_ERR(snap_name)) {
4449 /* ignore no-longer existing snapshots */
4450 if (PTR_ERR(snap_name) == -ENOENT)
4455 found = !strcmp(name, snap_name);
4458 return found ? snap_id : CEPH_NOSNAP;
4462 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4463 * no snapshot by that name is found, or if an error occurs.
4465 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4467 if (rbd_dev->image_format == 1)
4468 return rbd_v1_snap_id_by_name(rbd_dev, name);
4470 return rbd_v2_snap_id_by_name(rbd_dev, name);
4474 * An image being mapped will have everything but the snap id.
4476 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4478 struct rbd_spec *spec = rbd_dev->spec;
4480 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4481 rbd_assert(spec->image_id && spec->image_name);
4482 rbd_assert(spec->snap_name);
4484 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4487 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4488 if (snap_id == CEPH_NOSNAP)
4491 spec->snap_id = snap_id;
4493 spec->snap_id = CEPH_NOSNAP;
4500 * A parent image will have all ids but none of the names.
4502 * All names in an rbd spec are dynamically allocated. It's OK if we
4503 * can't figure out the name for an image id.
4505 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4507 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4508 struct rbd_spec *spec = rbd_dev->spec;
4509 const char *pool_name;
4510 const char *image_name;
4511 const char *snap_name;
4514 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4515 rbd_assert(spec->image_id);
4516 rbd_assert(spec->snap_id != CEPH_NOSNAP);
4518 /* Get the pool name; we have to make our own copy of this */
4520 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4522 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4525 pool_name = kstrdup(pool_name, GFP_KERNEL);
4529 /* Fetch the image name; tolerate failure here */
4531 image_name = rbd_dev_image_name(rbd_dev);
4533 rbd_warn(rbd_dev, "unable to get image name");
4535 /* Fetch the snapshot name */
4537 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4538 if (IS_ERR(snap_name)) {
4539 ret = PTR_ERR(snap_name);
4543 spec->pool_name = pool_name;
4544 spec->image_name = image_name;
4545 spec->snap_name = snap_name;
4555 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4564 struct ceph_snap_context *snapc;
4568 * We'll need room for the seq value (maximum snapshot id),
4569 * snapshot count, and array of that many snapshot ids.
4570 * For now we have a fixed upper limit on the number we're
4571 * prepared to receive.
4573 size = sizeof (__le64) + sizeof (__le32) +
4574 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4575 reply_buf = kzalloc(size, GFP_KERNEL);
4579 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4580 "rbd", "get_snapcontext", NULL, 0,
4582 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4587 end = reply_buf + ret;
4589 ceph_decode_64_safe(&p, end, seq, out);
4590 ceph_decode_32_safe(&p, end, snap_count, out);
4593 * Make sure the reported number of snapshot ids wouldn't go
4594 * beyond the end of our buffer. But before checking that,
4595 * make sure the computed size of the snapshot context we
4596 * allocate is representable in a size_t.
4598 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4603 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4607 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4613 for (i = 0; i < snap_count; i++)
4614 snapc->snaps[i] = ceph_decode_64(&p);
4616 ceph_put_snap_context(rbd_dev->header.snapc);
4617 rbd_dev->header.snapc = snapc;
4619 dout(" snap context seq = %llu, snap_count = %u\n",
4620 (unsigned long long)seq, (unsigned int)snap_count);
4627 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4638 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4639 reply_buf = kmalloc(size, GFP_KERNEL);
4641 return ERR_PTR(-ENOMEM);
4643 snapid = cpu_to_le64(snap_id);
4644 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4645 "rbd", "get_snapshot_name",
4646 &snapid, sizeof (snapid),
4648 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4650 snap_name = ERR_PTR(ret);
4655 end = reply_buf + ret;
4656 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4657 if (IS_ERR(snap_name))
4660 dout(" snap_id 0x%016llx snap_name = %s\n",
4661 (unsigned long long)snap_id, snap_name);
4668 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4670 bool first_time = rbd_dev->header.object_prefix == NULL;
4673 ret = rbd_dev_v2_image_size(rbd_dev);
4678 ret = rbd_dev_v2_header_onetime(rbd_dev);
4683 ret = rbd_dev_v2_snap_context(rbd_dev);
4684 if (ret && first_time) {
4685 kfree(rbd_dev->header.object_prefix);
4686 rbd_dev->header.object_prefix = NULL;
4692 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4694 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4696 if (rbd_dev->image_format == 1)
4697 return rbd_dev_v1_header_info(rbd_dev);
4699 return rbd_dev_v2_header_info(rbd_dev);
4702 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4707 dev = &rbd_dev->dev;
4708 dev->bus = &rbd_bus_type;
4709 dev->type = &rbd_device_type;
4710 dev->parent = &rbd_root_dev;
4711 dev->release = rbd_dev_device_release;
4712 dev_set_name(dev, "%d", rbd_dev->dev_id);
4713 ret = device_register(dev);
4718 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4720 device_unregister(&rbd_dev->dev);
4724 * Get a unique rbd identifier for the given new rbd_dev, and add
4725 * the rbd_dev to the global list.
4727 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4731 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4732 0, minor_to_rbd_dev_id(1 << MINORBITS),
4737 rbd_dev->dev_id = new_dev_id;
4739 spin_lock(&rbd_dev_list_lock);
4740 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4741 spin_unlock(&rbd_dev_list_lock);
4743 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4749 * Remove an rbd_dev from the global list, and record that its
4750 * identifier is no longer in use.
4752 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4754 spin_lock(&rbd_dev_list_lock);
4755 list_del_init(&rbd_dev->node);
4756 spin_unlock(&rbd_dev_list_lock);
4758 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4760 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4764 * Skips over white space at *buf, and updates *buf to point to the
4765 * first found non-space character (if any). Returns the length of
4766 * the token (string of non-white space characters) found. Note
4767 * that *buf must be terminated with '\0'.
4769 static inline size_t next_token(const char **buf)
4772 * These are the characters that produce nonzero for
4773 * isspace() in the "C" and "POSIX" locales.
4775 const char *spaces = " \f\n\r\t\v";
4777 *buf += strspn(*buf, spaces); /* Find start of token */
4779 return strcspn(*buf, spaces); /* Return token length */
4783 * Finds the next token in *buf, dynamically allocates a buffer big
4784 * enough to hold a copy of it, and copies the token into the new
4785 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4786 * that a duplicate buffer is created even for a zero-length token.
4788 * Returns a pointer to the newly-allocated duplicate, or a null
4789 * pointer if memory for the duplicate was not available. If
4790 * the lenp argument is a non-null pointer, the length of the token
4791 * (not including the '\0') is returned in *lenp.
4793 * If successful, the *buf pointer will be updated to point beyond
4794 * the end of the found token.
4796 * Note: uses GFP_KERNEL for allocation.
4798 static inline char *dup_token(const char **buf, size_t *lenp)
4803 len = next_token(buf);
4804 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4807 *(dup + len) = '\0';
4817 * Parse the options provided for an "rbd add" (i.e., rbd image
4818 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4819 * and the data written is passed here via a NUL-terminated buffer.
4820 * Returns 0 if successful or an error code otherwise.
4822 * The information extracted from these options is recorded in
4823 * the other parameters which return dynamically-allocated
4826 * The address of a pointer that will refer to a ceph options
4827 * structure. Caller must release the returned pointer using
4828 * ceph_destroy_options() when it is no longer needed.
4830 * Address of an rbd options pointer. Fully initialized by
4831 * this function; caller must release with kfree().
4833 * Address of an rbd image specification pointer. Fully
4834 * initialized by this function based on parsed options.
4835 * Caller must release with rbd_spec_put().
4837 * The options passed take this form:
4838 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4841 * A comma-separated list of one or more monitor addresses.
4842 * A monitor address is an ip address, optionally followed
4843 * by a port number (separated by a colon).
4844 * I.e.: ip1[:port1][,ip2[:port2]...]
4846 * A comma-separated list of ceph and/or rbd options.
4848 * The name of the rados pool containing the rbd image.
4850 * The name of the image in that pool to map.
4852 * An optional snapshot id. If provided, the mapping will
4853 * present data from the image at the time that snapshot was
4854 * created. The image head is used if no snapshot id is
4855 * provided. Snapshot mappings are always read-only.
4857 static int rbd_add_parse_args(const char *buf,
4858 struct ceph_options **ceph_opts,
4859 struct rbd_options **opts,
4860 struct rbd_spec **rbd_spec)
4864 const char *mon_addrs;
4866 size_t mon_addrs_size;
4867 struct rbd_spec *spec = NULL;
4868 struct rbd_options *rbd_opts = NULL;
4869 struct ceph_options *copts;
4872 /* The first four tokens are required */
4874 len = next_token(&buf);
4876 rbd_warn(NULL, "no monitor address(es) provided");
4880 mon_addrs_size = len + 1;
4884 options = dup_token(&buf, NULL);
4888 rbd_warn(NULL, "no options provided");
4892 spec = rbd_spec_alloc();
4896 spec->pool_name = dup_token(&buf, NULL);
4897 if (!spec->pool_name)
4899 if (!*spec->pool_name) {
4900 rbd_warn(NULL, "no pool name provided");
4904 spec->image_name = dup_token(&buf, NULL);
4905 if (!spec->image_name)
4907 if (!*spec->image_name) {
4908 rbd_warn(NULL, "no image name provided");
4913 * Snapshot name is optional; default is to use "-"
4914 * (indicating the head/no snapshot).
4916 len = next_token(&buf);
4918 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4919 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4920 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4921 ret = -ENAMETOOLONG;
4924 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4927 *(snap_name + len) = '\0';
4928 spec->snap_name = snap_name;
4930 /* Initialize all rbd options to the defaults */
4932 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4936 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4937 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
4939 copts = ceph_parse_options(options, mon_addrs,
4940 mon_addrs + mon_addrs_size - 1,
4941 parse_rbd_opts_token, rbd_opts);
4942 if (IS_ERR(copts)) {
4943 ret = PTR_ERR(copts);
4964 * Return pool id (>= 0) or a negative error code.
4966 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4968 struct ceph_options *opts = rbdc->client->options;
4974 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4975 if (ret == -ENOENT && tries++ < 1) {
4976 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4981 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4982 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4983 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4985 opts->mount_timeout);
4988 /* the osdmap we have is new enough */
4997 * An rbd format 2 image has a unique identifier, distinct from the
4998 * name given to it by the user. Internally, that identifier is
4999 * what's used to specify the names of objects related to the image.
5001 * A special "rbd id" object is used to map an rbd image name to its
5002 * id. If that object doesn't exist, then there is no v2 rbd image
5003 * with the supplied name.
5005 * This function will record the given rbd_dev's image_id field if
5006 * it can be determined, and in that case will return 0. If any
5007 * errors occur a negative errno will be returned and the rbd_dev's
5008 * image_id field will be unchanged (and should be NULL).
5010 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5019 * When probing a parent image, the image id is already
5020 * known (and the image name likely is not). There's no
5021 * need to fetch the image id again in this case. We
5022 * do still need to set the image format though.
5024 if (rbd_dev->spec->image_id) {
5025 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5031 * First, see if the format 2 image id file exists, and if
5032 * so, get the image's persistent id from it.
5034 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5035 object_name = kmalloc(size, GFP_NOIO);
5038 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5039 dout("rbd id object name is %s\n", object_name);
5041 /* Response will be an encoded string, which includes a length */
5043 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5044 response = kzalloc(size, GFP_NOIO);
5050 /* If it doesn't exist we'll assume it's a format 1 image */
5052 ret = rbd_obj_method_sync(rbd_dev, object_name,
5053 "rbd", "get_id", NULL, 0,
5054 response, RBD_IMAGE_ID_LEN_MAX);
5055 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5056 if (ret == -ENOENT) {
5057 image_id = kstrdup("", GFP_KERNEL);
5058 ret = image_id ? 0 : -ENOMEM;
5060 rbd_dev->image_format = 1;
5061 } else if (ret >= 0) {
5064 image_id = ceph_extract_encoded_string(&p, p + ret,
5066 ret = PTR_ERR_OR_ZERO(image_id);
5068 rbd_dev->image_format = 2;
5072 rbd_dev->spec->image_id = image_id;
5073 dout("image_id is %s\n", image_id);
5083 * Undo whatever state changes are made by v1 or v2 header info
5086 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5088 struct rbd_image_header *header;
5090 rbd_dev_parent_put(rbd_dev);
5092 /* Free dynamic fields from the header, then zero it out */
5094 header = &rbd_dev->header;
5095 ceph_put_snap_context(header->snapc);
5096 kfree(header->snap_sizes);
5097 kfree(header->snap_names);
5098 kfree(header->object_prefix);
5099 memset(header, 0, sizeof (*header));
5102 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5106 ret = rbd_dev_v2_object_prefix(rbd_dev);
5111 * Get the and check features for the image. Currently the
5112 * features are assumed to never change.
5114 ret = rbd_dev_v2_features(rbd_dev);
5118 /* If the image supports fancy striping, get its parameters */
5120 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5121 ret = rbd_dev_v2_striping_info(rbd_dev);
5125 /* No support for crypto and compression type format 2 images */
5129 rbd_dev->header.features = 0;
5130 kfree(rbd_dev->header.object_prefix);
5131 rbd_dev->header.object_prefix = NULL;
5137 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5138 * rbd_dev_image_probe() recursion depth, which means it's also the
5139 * length of the already discovered part of the parent chain.
5141 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5143 struct rbd_device *parent = NULL;
5146 if (!rbd_dev->parent_spec)
5149 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5150 pr_info("parent chain is too long (%d)\n", depth);
5155 parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
5163 * Images related by parent/child relationships always share
5164 * rbd_client and spec/parent_spec, so bump their refcounts.
5166 __rbd_get_client(rbd_dev->rbd_client);
5167 rbd_spec_get(rbd_dev->parent_spec);
5169 ret = rbd_dev_image_probe(parent, depth);
5173 rbd_dev->parent = parent;
5174 atomic_set(&rbd_dev->parent_ref, 1);
5178 rbd_dev_unparent(rbd_dev);
5180 rbd_dev_destroy(parent);
5184 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5188 /* Get an id and fill in device name. */
5190 ret = rbd_dev_id_get(rbd_dev);
5194 BUILD_BUG_ON(DEV_NAME_LEN
5195 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5196 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5198 /* Record our major and minor device numbers. */
5200 if (!single_major) {
5201 ret = register_blkdev(0, rbd_dev->name);
5205 rbd_dev->major = ret;
5208 rbd_dev->major = rbd_major;
5209 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5212 /* Set up the blkdev mapping. */
5214 ret = rbd_init_disk(rbd_dev);
5216 goto err_out_blkdev;
5218 ret = rbd_dev_mapping_set(rbd_dev);
5222 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5223 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5225 ret = rbd_bus_add_dev(rbd_dev);
5227 goto err_out_mapping;
5229 /* Everything's ready. Announce the disk to the world. */
5231 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5232 add_disk(rbd_dev->disk);
5234 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5235 (unsigned long long) rbd_dev->mapping.size);
5240 rbd_dev_mapping_clear(rbd_dev);
5242 rbd_free_disk(rbd_dev);
5245 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5247 rbd_dev_id_put(rbd_dev);
5248 rbd_dev_mapping_clear(rbd_dev);
5253 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5255 struct rbd_spec *spec = rbd_dev->spec;
5258 /* Record the header object name for this rbd image. */
5260 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5262 if (rbd_dev->image_format == 1)
5263 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5265 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5267 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5268 if (!rbd_dev->header_name)
5271 if (rbd_dev->image_format == 1)
5272 sprintf(rbd_dev->header_name, "%s%s",
5273 spec->image_name, RBD_SUFFIX);
5275 sprintf(rbd_dev->header_name, "%s%s",
5276 RBD_HEADER_PREFIX, spec->image_id);
5280 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5282 rbd_dev_unprobe(rbd_dev);
5283 kfree(rbd_dev->header_name);
5284 rbd_dev->header_name = NULL;
5285 rbd_dev->image_format = 0;
5286 kfree(rbd_dev->spec->image_id);
5287 rbd_dev->spec->image_id = NULL;
5289 rbd_dev_destroy(rbd_dev);
5293 * Probe for the existence of the header object for the given rbd
5294 * device. If this image is the one being mapped (i.e., not a
5295 * parent), initiate a watch on its header object before using that
5296 * object to get detailed information about the rbd image.
5298 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5303 * Get the id from the image id object. Unless there's an
5304 * error, rbd_dev->spec->image_id will be filled in with
5305 * a dynamically-allocated string, and rbd_dev->image_format
5306 * will be set to either 1 or 2.
5308 ret = rbd_dev_image_id(rbd_dev);
5312 ret = rbd_dev_header_name(rbd_dev);
5314 goto err_out_format;
5317 ret = rbd_dev_header_watch_sync(rbd_dev);
5320 pr_info("image %s/%s does not exist\n",
5321 rbd_dev->spec->pool_name,
5322 rbd_dev->spec->image_name);
5323 goto out_header_name;
5327 ret = rbd_dev_header_info(rbd_dev);
5332 * If this image is the one being mapped, we have pool name and
5333 * id, image name and id, and snap name - need to fill snap id.
5334 * Otherwise this is a parent image, identified by pool, image
5335 * and snap ids - need to fill in names for those ids.
5338 ret = rbd_spec_fill_snap_id(rbd_dev);
5340 ret = rbd_spec_fill_names(rbd_dev);
5343 pr_info("snap %s/%s@%s does not exist\n",
5344 rbd_dev->spec->pool_name,
5345 rbd_dev->spec->image_name,
5346 rbd_dev->spec->snap_name);
5350 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5351 ret = rbd_dev_v2_parent_info(rbd_dev);
5356 * Need to warn users if this image is the one being
5357 * mapped and has a parent.
5359 if (!depth && rbd_dev->parent_spec)
5361 "WARNING: kernel layering is EXPERIMENTAL!");
5364 ret = rbd_dev_probe_parent(rbd_dev, depth);
5368 dout("discovered format %u image, header name is %s\n",
5369 rbd_dev->image_format, rbd_dev->header_name);
5373 rbd_dev_unprobe(rbd_dev);
5376 rbd_dev_header_unwatch_sync(rbd_dev);
5378 kfree(rbd_dev->header_name);
5379 rbd_dev->header_name = NULL;
5381 rbd_dev->image_format = 0;
5382 kfree(rbd_dev->spec->image_id);
5383 rbd_dev->spec->image_id = NULL;
5387 static ssize_t do_rbd_add(struct bus_type *bus,
5391 struct rbd_device *rbd_dev = NULL;
5392 struct ceph_options *ceph_opts = NULL;
5393 struct rbd_options *rbd_opts = NULL;
5394 struct rbd_spec *spec = NULL;
5395 struct rbd_client *rbdc;
5399 if (!try_module_get(THIS_MODULE))
5402 /* parse add command */
5403 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5405 goto err_out_module;
5407 rbdc = rbd_get_client(ceph_opts);
5414 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5417 pr_info("pool %s does not exist\n", spec->pool_name);
5418 goto err_out_client;
5420 spec->pool_id = (u64)rc;
5422 /* The ceph file layout needs to fit pool id in 32 bits */
5424 if (spec->pool_id > (u64)U32_MAX) {
5425 rbd_warn(NULL, "pool id too large (%llu > %u)",
5426 (unsigned long long)spec->pool_id, U32_MAX);
5428 goto err_out_client;
5431 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5433 goto err_out_client;
5434 rbdc = NULL; /* rbd_dev now owns this */
5435 spec = NULL; /* rbd_dev now owns this */
5436 rbd_opts = NULL; /* rbd_dev now owns this */
5438 rc = rbd_dev_image_probe(rbd_dev, 0);
5440 goto err_out_rbd_dev;
5442 /* If we are mapping a snapshot it must be marked read-only */
5444 read_only = rbd_dev->opts->read_only;
5445 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5447 rbd_dev->mapping.read_only = read_only;
5449 rc = rbd_dev_device_setup(rbd_dev);
5452 * rbd_dev_header_unwatch_sync() can't be moved into
5453 * rbd_dev_image_release() without refactoring, see
5454 * commit 1f3ef78861ac.
5456 rbd_dev_header_unwatch_sync(rbd_dev);
5457 rbd_dev_image_release(rbd_dev);
5458 goto err_out_module;
5464 rbd_dev_destroy(rbd_dev);
5466 rbd_put_client(rbdc);
5471 module_put(THIS_MODULE);
5473 dout("Error adding device %s\n", buf);
5478 static ssize_t rbd_add(struct bus_type *bus,
5485 return do_rbd_add(bus, buf, count);
5488 static ssize_t rbd_add_single_major(struct bus_type *bus,
5492 return do_rbd_add(bus, buf, count);
5495 static void rbd_dev_device_release(struct device *dev)
5497 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5499 rbd_free_disk(rbd_dev);
5500 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5501 rbd_dev_mapping_clear(rbd_dev);
5503 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5504 rbd_dev_id_put(rbd_dev);
5505 rbd_dev_mapping_clear(rbd_dev);
5508 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5510 while (rbd_dev->parent) {
5511 struct rbd_device *first = rbd_dev;
5512 struct rbd_device *second = first->parent;
5513 struct rbd_device *third;
5516 * Follow to the parent with no grandparent and
5519 while (second && (third = second->parent)) {
5524 rbd_dev_image_release(second);
5525 first->parent = NULL;
5526 first->parent_overlap = 0;
5528 rbd_assert(first->parent_spec);
5529 rbd_spec_put(first->parent_spec);
5530 first->parent_spec = NULL;
5534 static ssize_t do_rbd_remove(struct bus_type *bus,
5538 struct rbd_device *rbd_dev = NULL;
5539 struct list_head *tmp;
5542 bool already = false;
5545 ret = kstrtoul(buf, 10, &ul);
5549 /* convert to int; abort if we lost anything in the conversion */
5555 spin_lock(&rbd_dev_list_lock);
5556 list_for_each(tmp, &rbd_dev_list) {
5557 rbd_dev = list_entry(tmp, struct rbd_device, node);
5558 if (rbd_dev->dev_id == dev_id) {
5564 spin_lock_irq(&rbd_dev->lock);
5565 if (rbd_dev->open_count)
5568 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5570 spin_unlock_irq(&rbd_dev->lock);
5572 spin_unlock(&rbd_dev_list_lock);
5573 if (ret < 0 || already)
5576 rbd_dev_header_unwatch_sync(rbd_dev);
5578 * flush remaining watch callbacks - these must be complete
5579 * before the osd_client is shutdown
5581 dout("%s: flushing notifies", __func__);
5582 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5585 * Don't free anything from rbd_dev->disk until after all
5586 * notifies are completely processed. Otherwise
5587 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5588 * in a potential use after free of rbd_dev->disk or rbd_dev.
5590 rbd_bus_del_dev(rbd_dev);
5591 rbd_dev_image_release(rbd_dev);
5592 module_put(THIS_MODULE);
5597 static ssize_t rbd_remove(struct bus_type *bus,
5604 return do_rbd_remove(bus, buf, count);
5607 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5611 return do_rbd_remove(bus, buf, count);
5615 * create control files in sysfs
5618 static int rbd_sysfs_init(void)
5622 ret = device_register(&rbd_root_dev);
5626 ret = bus_register(&rbd_bus_type);
5628 device_unregister(&rbd_root_dev);
5633 static void rbd_sysfs_cleanup(void)
5635 bus_unregister(&rbd_bus_type);
5636 device_unregister(&rbd_root_dev);
5639 static int rbd_slab_init(void)
5641 rbd_assert(!rbd_img_request_cache);
5642 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5643 sizeof (struct rbd_img_request),
5644 __alignof__(struct rbd_img_request),
5646 if (!rbd_img_request_cache)
5649 rbd_assert(!rbd_obj_request_cache);
5650 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5651 sizeof (struct rbd_obj_request),
5652 __alignof__(struct rbd_obj_request),
5654 if (!rbd_obj_request_cache)
5657 rbd_assert(!rbd_segment_name_cache);
5658 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5659 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5660 if (rbd_segment_name_cache)
5663 if (rbd_obj_request_cache) {
5664 kmem_cache_destroy(rbd_obj_request_cache);
5665 rbd_obj_request_cache = NULL;
5668 kmem_cache_destroy(rbd_img_request_cache);
5669 rbd_img_request_cache = NULL;
5674 static void rbd_slab_exit(void)
5676 rbd_assert(rbd_segment_name_cache);
5677 kmem_cache_destroy(rbd_segment_name_cache);
5678 rbd_segment_name_cache = NULL;
5680 rbd_assert(rbd_obj_request_cache);
5681 kmem_cache_destroy(rbd_obj_request_cache);
5682 rbd_obj_request_cache = NULL;
5684 rbd_assert(rbd_img_request_cache);
5685 kmem_cache_destroy(rbd_img_request_cache);
5686 rbd_img_request_cache = NULL;
5689 static int __init rbd_init(void)
5693 if (!libceph_compatible(NULL)) {
5694 rbd_warn(NULL, "libceph incompatibility (quitting)");
5698 rc = rbd_slab_init();
5703 * The number of active work items is limited by the number of
5704 * rbd devices * queue depth, so leave @max_active at default.
5706 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5713 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5714 if (rbd_major < 0) {
5720 rc = rbd_sysfs_init();
5722 goto err_out_blkdev;
5725 pr_info("loaded (major %d)\n", rbd_major);
5727 pr_info("loaded\n");
5733 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5735 destroy_workqueue(rbd_wq);
5741 static void __exit rbd_exit(void)
5743 ida_destroy(&rbd_dev_id_ida);
5744 rbd_sysfs_cleanup();
5746 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5747 destroy_workqueue(rbd_wq);
5751 module_init(rbd_init);
5752 module_exit(rbd_exit);
5754 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5755 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5756 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5757 /* following authorship retained from original osdblk.c */
5758 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5760 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5761 MODULE_LICENSE("GPL");