xref: /linux/drivers/block/rbd.c (revision 026dadad6b44f0469a475efb4cae48269d8848bd)
1 
2 /*
3    rbd.c -- Export ceph rados objects as a Linux block device
4 
5 
6    based on drivers/block/osdblk.c:
7 
8    Copyright 2009 Red Hat, Inc.
9 
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; see the file COPYING.  If not, write to
21    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22 
23 
24 
25    For usage instructions, please refer to:
26 
27                  Documentation/ABI/testing/sysfs-bus-rbd
28 
29  */
30 
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
37 
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/fs.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 
45 #include "rbd_types.h"
46 
47 #define RBD_DEBUG	/* Activate rbd_assert() calls */
48 
49 /*
50  * The basic unit of block I/O is a sector.  It is interpreted in a
51  * number of contexts in Linux (blk, bio, genhd), but the default is
52  * universally 512 bytes.  These symbols are just slightly more
53  * meaningful than the bare numbers they represent.
54  */
55 #define	SECTOR_SHIFT	9
56 #define	SECTOR_SIZE	(1ULL << SECTOR_SHIFT)
57 
58 /*
59  * Increment the given counter and return its updated value.
60  * If the counter is already 0 it will not be incremented.
61  * If the counter is already at its maximum value returns
62  * -EINVAL without updating it.
63  */
64 static int atomic_inc_return_safe(atomic_t *v)
65 {
66 	unsigned int counter;
67 
68 	counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 	if (counter <= (unsigned int)INT_MAX)
70 		return (int)counter;
71 
72 	atomic_dec(v);
73 
74 	return -EINVAL;
75 }
76 
77 /* Decrement the counter.  Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
79 {
80 	int counter;
81 
82 	counter = atomic_dec_return(v);
83 	if (counter >= 0)
84 		return counter;
85 
86 	atomic_inc(v);
87 
88 	return -EINVAL;
89 }
90 
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
93 
94 #define RBD_MINORS_PER_MAJOR	256		/* max minors per blkdev */
95 
96 #define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
97 #define RBD_MAX_SNAP_NAME_LEN	\
98 			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
99 
100 #define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
101 
102 #define RBD_SNAP_HEAD_NAME	"-"
103 
104 #define	BAD_SNAP_INDEX	U32_MAX		/* invalid index into snap array */
105 
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX	64
109 
110 #define RBD_OBJ_PREFIX_LEN_MAX	64
111 
112 /* Feature bits */
113 
114 #define RBD_FEATURE_LAYERING	(1<<0)
115 #define RBD_FEATURE_STRIPINGV2	(1<<1)
116 #define RBD_FEATURES_ALL \
117 	    (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
118 
119 /* Features supported by this (client software) implementation. */
120 
121 #define RBD_FEATURES_SUPPORTED	(RBD_FEATURES_ALL)
122 
123 /*
124  * An RBD device name will be "rbd#", where the "rbd" comes from
125  * RBD_DRV_NAME above, and # is a unique integer identifier.
126  * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127  * enough to hold all possible device names.
128  */
129 #define DEV_NAME_LEN		32
130 #define MAX_INT_FORMAT_WIDTH	((5 * sizeof (int)) / 2 + 1)
131 
132 /*
133  * block device image metadata (in-memory version)
134  */
135 struct rbd_image_header {
136 	/* These six fields never change for a given rbd image */
137 	char *object_prefix;
138 	__u8 obj_order;
139 	__u8 crypt_type;
140 	__u8 comp_type;
141 	u64 stripe_unit;
142 	u64 stripe_count;
143 	u64 features;		/* Might be changeable someday? */
144 
145 	/* The remaining fields need to be updated occasionally */
146 	u64 image_size;
147 	struct ceph_snap_context *snapc;
148 	char *snap_names;	/* format 1 only */
149 	u64 *snap_sizes;	/* format 1 only */
150 };
151 
152 /*
153  * An rbd image specification.
154  *
155  * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156  * identify an image.  Each rbd_dev structure includes a pointer to
157  * an rbd_spec structure that encapsulates this identity.
158  *
159  * Each of the id's in an rbd_spec has an associated name.  For a
160  * user-mapped image, the names are supplied and the id's associated
161  * with them are looked up.  For a layered image, a parent image is
162  * defined by the tuple, and the names are looked up.
163  *
164  * An rbd_dev structure contains a parent_spec pointer which is
165  * non-null if the image it represents is a child in a layered
166  * image.  This pointer will refer to the rbd_spec structure used
167  * by the parent rbd_dev for its own identity (i.e., the structure
168  * is shared between the parent and child).
169  *
170  * Since these structures are populated once, during the discovery
171  * phase of image construction, they are effectively immutable so
172  * we make no effort to synchronize access to them.
173  *
174  * Note that code herein does not assume the image name is known (it
175  * could be a null pointer).
176  */
177 struct rbd_spec {
178 	u64		pool_id;
179 	const char	*pool_name;
180 
181 	const char	*image_id;
182 	const char	*image_name;
183 
184 	u64		snap_id;
185 	const char	*snap_name;
186 
187 	struct kref	kref;
188 };
189 
190 /*
191  * an instance of the client.  multiple devices may share an rbd client.
192  */
193 struct rbd_client {
194 	struct ceph_client	*client;
195 	struct kref		kref;
196 	struct list_head	node;
197 };
198 
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
201 
202 #define	BAD_WHICH	U32_MAX		/* Good which or bad which, which? */
203 
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
206 
207 enum obj_request_type {
208 	OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
209 };
210 
211 enum obj_req_flags {
212 	OBJ_REQ_DONE,		/* completion flag: not done = 0, done = 1 */
213 	OBJ_REQ_IMG_DATA,	/* object usage: standalone = 0, image = 1 */
214 	OBJ_REQ_KNOWN,		/* EXISTS flag valid: no = 0, yes = 1 */
215 	OBJ_REQ_EXISTS,		/* target exists: no = 0, yes = 1 */
216 };
217 
218 struct rbd_obj_request {
219 	const char		*object_name;
220 	u64			offset;		/* object start byte */
221 	u64			length;		/* bytes from offset */
222 	unsigned long		flags;
223 
224 	/*
225 	 * An object request associated with an image will have its
226 	 * img_data flag set; a standalone object request will not.
227 	 *
228 	 * A standalone object request will have which == BAD_WHICH
229 	 * and a null obj_request pointer.
230 	 *
231 	 * An object request initiated in support of a layered image
232 	 * object (to check for its existence before a write) will
233 	 * have which == BAD_WHICH and a non-null obj_request pointer.
234 	 *
235 	 * Finally, an object request for rbd image data will have
236 	 * which != BAD_WHICH, and will have a non-null img_request
237 	 * pointer.  The value of which will be in the range
238 	 * 0..(img_request->obj_request_count-1).
239 	 */
240 	union {
241 		struct rbd_obj_request	*obj_request;	/* STAT op */
242 		struct {
243 			struct rbd_img_request	*img_request;
244 			u64			img_offset;
245 			/* links for img_request->obj_requests list */
246 			struct list_head	links;
247 		};
248 	};
249 	u32			which;		/* posn image request list */
250 
251 	enum obj_request_type	type;
252 	union {
253 		struct bio	*bio_list;
254 		struct {
255 			struct page	**pages;
256 			u32		page_count;
257 		};
258 	};
259 	struct page		**copyup_pages;
260 	u32			copyup_page_count;
261 
262 	struct ceph_osd_request	*osd_req;
263 
264 	u64			xferred;	/* bytes transferred */
265 	int			result;
266 
267 	rbd_obj_callback_t	callback;
268 	struct completion	completion;
269 
270 	struct kref		kref;
271 };
272 
273 enum img_req_flags {
274 	IMG_REQ_WRITE,		/* I/O direction: read = 0, write = 1 */
275 	IMG_REQ_CHILD,		/* initiator: block = 0, child image = 1 */
276 	IMG_REQ_LAYERED,	/* ENOENT handling: normal = 0, layered = 1 */
277 };
278 
279 struct rbd_img_request {
280 	struct rbd_device	*rbd_dev;
281 	u64			offset;	/* starting image byte offset */
282 	u64			length;	/* byte count from offset */
283 	unsigned long		flags;
284 	union {
285 		u64			snap_id;	/* for reads */
286 		struct ceph_snap_context *snapc;	/* for writes */
287 	};
288 	union {
289 		struct request		*rq;		/* block request */
290 		struct rbd_obj_request	*obj_request;	/* obj req initiator */
291 	};
292 	struct page		**copyup_pages;
293 	u32			copyup_page_count;
294 	spinlock_t		completion_lock;/* protects next_completion */
295 	u32			next_completion;
296 	rbd_img_callback_t	callback;
297 	u64			xferred;/* aggregate bytes transferred */
298 	int			result;	/* first nonzero obj_request result */
299 
300 	u32			obj_request_count;
301 	struct list_head	obj_requests;	/* rbd_obj_request structs */
302 
303 	struct kref		kref;
304 };
305 
306 #define for_each_obj_request(ireq, oreq) \
307 	list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 	list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 	list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
312 
313 struct rbd_mapping {
314 	u64                     size;
315 	u64                     features;
316 	bool			read_only;
317 };
318 
319 /*
320  * a single device
321  */
322 struct rbd_device {
323 	int			dev_id;		/* blkdev unique id */
324 
325 	int			major;		/* blkdev assigned major */
326 	struct gendisk		*disk;		/* blkdev's gendisk and rq */
327 
328 	u32			image_format;	/* Either 1 or 2 */
329 	struct rbd_client	*rbd_client;
330 
331 	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
332 
333 	spinlock_t		lock;		/* queue, flags, open_count */
334 
335 	struct rbd_image_header	header;
336 	unsigned long		flags;		/* possibly lock protected */
337 	struct rbd_spec		*spec;
338 
339 	char			*header_name;
340 
341 	struct ceph_file_layout	layout;
342 
343 	struct ceph_osd_event   *watch_event;
344 	struct rbd_obj_request	*watch_request;
345 
346 	struct rbd_spec		*parent_spec;
347 	u64			parent_overlap;
348 	atomic_t		parent_ref;
349 	struct rbd_device	*parent;
350 
351 	/* protects updating the header */
352 	struct rw_semaphore     header_rwsem;
353 
354 	struct rbd_mapping	mapping;
355 
356 	struct list_head	node;
357 
358 	/* sysfs related */
359 	struct device		dev;
360 	unsigned long		open_count;	/* protected by lock */
361 };
362 
363 /*
364  * Flag bits for rbd_dev->flags.  If atomicity is required,
365  * rbd_dev->lock is used to protect access.
366  *
367  * Currently, only the "removing" flag (which is coupled with the
368  * "open_count" field) requires atomic access.
369  */
370 enum rbd_dev_flags {
371 	RBD_DEV_FLAG_EXISTS,	/* mapped snapshot has not been deleted */
372 	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
373 };
374 
375 static DEFINE_MUTEX(ctl_mutex);	  /* Serialize open/close/setup/teardown */
376 
377 static LIST_HEAD(rbd_dev_list);    /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
379 
380 static LIST_HEAD(rbd_client_list);		/* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
382 
383 /* Slab caches for frequently-allocated structures */
384 
385 static struct kmem_cache	*rbd_img_request_cache;
386 static struct kmem_cache	*rbd_obj_request_cache;
387 static struct kmem_cache	*rbd_segment_name_cache;
388 
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
390 
391 static void rbd_dev_device_release(struct device *dev);
392 
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
394 		       size_t count);
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
396 			  size_t count);
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
399 
400 static struct bus_attribute rbd_bus_attrs[] = {
401 	__ATTR(add, S_IWUSR, NULL, rbd_add),
402 	__ATTR(remove, S_IWUSR, NULL, rbd_remove),
403 	__ATTR_NULL
404 };
405 
406 static struct bus_type rbd_bus_type = {
407 	.name		= "rbd",
408 	.bus_attrs	= rbd_bus_attrs,
409 };
410 
411 static void rbd_root_dev_release(struct device *dev)
412 {
413 }
414 
415 static struct device rbd_root_dev = {
416 	.init_name =    "rbd",
417 	.release =      rbd_root_dev_release,
418 };
419 
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
422 {
423 	struct va_format vaf;
424 	va_list args;
425 
426 	va_start(args, fmt);
427 	vaf.fmt = fmt;
428 	vaf.va = &args;
429 
430 	if (!rbd_dev)
431 		printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
432 	else if (rbd_dev->disk)
433 		printk(KERN_WARNING "%s: %s: %pV\n",
434 			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
435 	else if (rbd_dev->spec && rbd_dev->spec->image_name)
436 		printk(KERN_WARNING "%s: image %s: %pV\n",
437 			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
438 	else if (rbd_dev->spec && rbd_dev->spec->image_id)
439 		printk(KERN_WARNING "%s: id %s: %pV\n",
440 			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
441 	else	/* punt */
442 		printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
443 			RBD_DRV_NAME, rbd_dev, &vaf);
444 	va_end(args);
445 }
446 
447 #ifdef RBD_DEBUG
448 #define rbd_assert(expr)						\
449 		if (unlikely(!(expr))) {				\
450 			printk(KERN_ERR "\nAssertion failure in %s() "	\
451 						"at line %d:\n\n"	\
452 					"\trbd_assert(%s);\n\n",	\
453 					__func__, __LINE__, #expr);	\
454 			BUG();						\
455 		}
456 #else /* !RBD_DEBUG */
457 #  define rbd_assert(expr)	((void) 0)
458 #endif /* !RBD_DEBUG */
459 
460 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
461 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
462 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
463 
464 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
465 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
467 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
468 					u64 snap_id);
469 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
470 				u8 *order, u64 *snap_size);
471 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
472 		u64 *snap_features);
473 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
474 
475 static int rbd_open(struct block_device *bdev, fmode_t mode)
476 {
477 	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
478 	bool removing = false;
479 
480 	if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
481 		return -EROFS;
482 
483 	spin_lock_irq(&rbd_dev->lock);
484 	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
485 		removing = true;
486 	else
487 		rbd_dev->open_count++;
488 	spin_unlock_irq(&rbd_dev->lock);
489 	if (removing)
490 		return -ENOENT;
491 
492 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
493 	(void) get_device(&rbd_dev->dev);
494 	set_device_ro(bdev, rbd_dev->mapping.read_only);
495 	mutex_unlock(&ctl_mutex);
496 
497 	return 0;
498 }
499 
500 static void rbd_release(struct gendisk *disk, fmode_t mode)
501 {
502 	struct rbd_device *rbd_dev = disk->private_data;
503 	unsigned long open_count_before;
504 
505 	spin_lock_irq(&rbd_dev->lock);
506 	open_count_before = rbd_dev->open_count--;
507 	spin_unlock_irq(&rbd_dev->lock);
508 	rbd_assert(open_count_before > 0);
509 
510 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
511 	put_device(&rbd_dev->dev);
512 	mutex_unlock(&ctl_mutex);
513 }
514 
515 static const struct block_device_operations rbd_bd_ops = {
516 	.owner			= THIS_MODULE,
517 	.open			= rbd_open,
518 	.release		= rbd_release,
519 };
520 
521 /*
522  * Initialize an rbd client instance.  Success or not, this function
523  * consumes ceph_opts.
524  */
525 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
526 {
527 	struct rbd_client *rbdc;
528 	int ret = -ENOMEM;
529 
530 	dout("%s:\n", __func__);
531 	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
532 	if (!rbdc)
533 		goto out_opt;
534 
535 	kref_init(&rbdc->kref);
536 	INIT_LIST_HEAD(&rbdc->node);
537 
538 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
539 
540 	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
541 	if (IS_ERR(rbdc->client))
542 		goto out_mutex;
543 	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
544 
545 	ret = ceph_open_session(rbdc->client);
546 	if (ret < 0)
547 		goto out_err;
548 
549 	spin_lock(&rbd_client_list_lock);
550 	list_add_tail(&rbdc->node, &rbd_client_list);
551 	spin_unlock(&rbd_client_list_lock);
552 
553 	mutex_unlock(&ctl_mutex);
554 	dout("%s: rbdc %p\n", __func__, rbdc);
555 
556 	return rbdc;
557 
558 out_err:
559 	ceph_destroy_client(rbdc->client);
560 out_mutex:
561 	mutex_unlock(&ctl_mutex);
562 	kfree(rbdc);
563 out_opt:
564 	if (ceph_opts)
565 		ceph_destroy_options(ceph_opts);
566 	dout("%s: error %d\n", __func__, ret);
567 
568 	return ERR_PTR(ret);
569 }
570 
571 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
572 {
573 	kref_get(&rbdc->kref);
574 
575 	return rbdc;
576 }
577 
578 /*
579  * Find a ceph client with specific addr and configuration.  If
580  * found, bump its reference count.
581  */
582 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
583 {
584 	struct rbd_client *client_node;
585 	bool found = false;
586 
587 	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
588 		return NULL;
589 
590 	spin_lock(&rbd_client_list_lock);
591 	list_for_each_entry(client_node, &rbd_client_list, node) {
592 		if (!ceph_compare_options(ceph_opts, client_node->client)) {
593 			__rbd_get_client(client_node);
594 
595 			found = true;
596 			break;
597 		}
598 	}
599 	spin_unlock(&rbd_client_list_lock);
600 
601 	return found ? client_node : NULL;
602 }
603 
604 /*
605  * mount options
606  */
607 enum {
608 	Opt_last_int,
609 	/* int args above */
610 	Opt_last_string,
611 	/* string args above */
612 	Opt_read_only,
613 	Opt_read_write,
614 	/* Boolean args above */
615 	Opt_last_bool,
616 };
617 
618 static match_table_t rbd_opts_tokens = {
619 	/* int args above */
620 	/* string args above */
621 	{Opt_read_only, "read_only"},
622 	{Opt_read_only, "ro"},		/* Alternate spelling */
623 	{Opt_read_write, "read_write"},
624 	{Opt_read_write, "rw"},		/* Alternate spelling */
625 	/* Boolean args above */
626 	{-1, NULL}
627 };
628 
629 struct rbd_options {
630 	bool	read_only;
631 };
632 
633 #define RBD_READ_ONLY_DEFAULT	false
634 
635 static int parse_rbd_opts_token(char *c, void *private)
636 {
637 	struct rbd_options *rbd_opts = private;
638 	substring_t argstr[MAX_OPT_ARGS];
639 	int token, intval, ret;
640 
641 	token = match_token(c, rbd_opts_tokens, argstr);
642 	if (token < 0)
643 		return -EINVAL;
644 
645 	if (token < Opt_last_int) {
646 		ret = match_int(&argstr[0], &intval);
647 		if (ret < 0) {
648 			pr_err("bad mount option arg (not int) "
649 			       "at '%s'\n", c);
650 			return ret;
651 		}
652 		dout("got int token %d val %d\n", token, intval);
653 	} else if (token > Opt_last_int && token < Opt_last_string) {
654 		dout("got string token %d val %s\n", token,
655 		     argstr[0].from);
656 	} else if (token > Opt_last_string && token < Opt_last_bool) {
657 		dout("got Boolean token %d\n", token);
658 	} else {
659 		dout("got token %d\n", token);
660 	}
661 
662 	switch (token) {
663 	case Opt_read_only:
664 		rbd_opts->read_only = true;
665 		break;
666 	case Opt_read_write:
667 		rbd_opts->read_only = false;
668 		break;
669 	default:
670 		rbd_assert(false);
671 		break;
672 	}
673 	return 0;
674 }
675 
676 /*
677  * Get a ceph client with specific addr and configuration, if one does
678  * not exist create it.  Either way, ceph_opts is consumed by this
679  * function.
680  */
681 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
682 {
683 	struct rbd_client *rbdc;
684 
685 	rbdc = rbd_client_find(ceph_opts);
686 	if (rbdc)	/* using an existing client */
687 		ceph_destroy_options(ceph_opts);
688 	else
689 		rbdc = rbd_client_create(ceph_opts);
690 
691 	return rbdc;
692 }
693 
694 /*
695  * Destroy ceph client
696  *
697  * Caller must hold rbd_client_list_lock.
698  */
699 static void rbd_client_release(struct kref *kref)
700 {
701 	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
702 
703 	dout("%s: rbdc %p\n", __func__, rbdc);
704 	spin_lock(&rbd_client_list_lock);
705 	list_del(&rbdc->node);
706 	spin_unlock(&rbd_client_list_lock);
707 
708 	ceph_destroy_client(rbdc->client);
709 	kfree(rbdc);
710 }
711 
712 /*
713  * Drop reference to ceph client node. If it's not referenced anymore, release
714  * it.
715  */
716 static void rbd_put_client(struct rbd_client *rbdc)
717 {
718 	if (rbdc)
719 		kref_put(&rbdc->kref, rbd_client_release);
720 }
721 
722 static bool rbd_image_format_valid(u32 image_format)
723 {
724 	return image_format == 1 || image_format == 2;
725 }
726 
727 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
728 {
729 	size_t size;
730 	u32 snap_count;
731 
732 	/* The header has to start with the magic rbd header text */
733 	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
734 		return false;
735 
736 	/* The bio layer requires at least sector-sized I/O */
737 
738 	if (ondisk->options.order < SECTOR_SHIFT)
739 		return false;
740 
741 	/* If we use u64 in a few spots we may be able to loosen this */
742 
743 	if (ondisk->options.order > 8 * sizeof (int) - 1)
744 		return false;
745 
746 	/*
747 	 * The size of a snapshot header has to fit in a size_t, and
748 	 * that limits the number of snapshots.
749 	 */
750 	snap_count = le32_to_cpu(ondisk->snap_count);
751 	size = SIZE_MAX - sizeof (struct ceph_snap_context);
752 	if (snap_count > size / sizeof (__le64))
753 		return false;
754 
755 	/*
756 	 * Not only that, but the size of the entire the snapshot
757 	 * header must also be representable in a size_t.
758 	 */
759 	size -= snap_count * sizeof (__le64);
760 	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
761 		return false;
762 
763 	return true;
764 }
765 
766 /*
767  * Fill an rbd image header with information from the given format 1
768  * on-disk header.
769  */
770 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
771 				 struct rbd_image_header_ondisk *ondisk)
772 {
773 	struct rbd_image_header *header = &rbd_dev->header;
774 	bool first_time = header->object_prefix == NULL;
775 	struct ceph_snap_context *snapc;
776 	char *object_prefix = NULL;
777 	char *snap_names = NULL;
778 	u64 *snap_sizes = NULL;
779 	u32 snap_count;
780 	size_t size;
781 	int ret = -ENOMEM;
782 	u32 i;
783 
784 	/* Allocate this now to avoid having to handle failure below */
785 
786 	if (first_time) {
787 		size_t len;
788 
789 		len = strnlen(ondisk->object_prefix,
790 				sizeof (ondisk->object_prefix));
791 		object_prefix = kmalloc(len + 1, GFP_KERNEL);
792 		if (!object_prefix)
793 			return -ENOMEM;
794 		memcpy(object_prefix, ondisk->object_prefix, len);
795 		object_prefix[len] = '\0';
796 	}
797 
798 	/* Allocate the snapshot context and fill it in */
799 
800 	snap_count = le32_to_cpu(ondisk->snap_count);
801 	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
802 	if (!snapc)
803 		goto out_err;
804 	snapc->seq = le64_to_cpu(ondisk->snap_seq);
805 	if (snap_count) {
806 		struct rbd_image_snap_ondisk *snaps;
807 		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
808 
809 		/* We'll keep a copy of the snapshot names... */
810 
811 		if (snap_names_len > (u64)SIZE_MAX)
812 			goto out_2big;
813 		snap_names = kmalloc(snap_names_len, GFP_KERNEL);
814 		if (!snap_names)
815 			goto out_err;
816 
817 		/* ...as well as the array of their sizes. */
818 
819 		size = snap_count * sizeof (*header->snap_sizes);
820 		snap_sizes = kmalloc(size, GFP_KERNEL);
821 		if (!snap_sizes)
822 			goto out_err;
823 
824 		/*
825 		 * Copy the names, and fill in each snapshot's id
826 		 * and size.
827 		 *
828 		 * Note that rbd_dev_v1_header_info() guarantees the
829 		 * ondisk buffer we're working with has
830 		 * snap_names_len bytes beyond the end of the
831 		 * snapshot id array, this memcpy() is safe.
832 		 */
833 		memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
834 		snaps = ondisk->snaps;
835 		for (i = 0; i < snap_count; i++) {
836 			snapc->snaps[i] = le64_to_cpu(snaps[i].id);
837 			snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
838 		}
839 	}
840 
841 	/* We won't fail any more, fill in the header */
842 
843 	down_write(&rbd_dev->header_rwsem);
844 	if (first_time) {
845 		header->object_prefix = object_prefix;
846 		header->obj_order = ondisk->options.order;
847 		header->crypt_type = ondisk->options.crypt_type;
848 		header->comp_type = ondisk->options.comp_type;
849 		/* The rest aren't used for format 1 images */
850 		header->stripe_unit = 0;
851 		header->stripe_count = 0;
852 		header->features = 0;
853 	} else {
854 		ceph_put_snap_context(header->snapc);
855 		kfree(header->snap_names);
856 		kfree(header->snap_sizes);
857 	}
858 
859 	/* The remaining fields always get updated (when we refresh) */
860 
861 	header->image_size = le64_to_cpu(ondisk->image_size);
862 	header->snapc = snapc;
863 	header->snap_names = snap_names;
864 	header->snap_sizes = snap_sizes;
865 
866 	/* Make sure mapping size is consistent with header info */
867 
868 	if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
869 		if (rbd_dev->mapping.size != header->image_size)
870 			rbd_dev->mapping.size = header->image_size;
871 
872 	up_write(&rbd_dev->header_rwsem);
873 
874 	return 0;
875 out_2big:
876 	ret = -EIO;
877 out_err:
878 	kfree(snap_sizes);
879 	kfree(snap_names);
880 	ceph_put_snap_context(snapc);
881 	kfree(object_prefix);
882 
883 	return ret;
884 }
885 
886 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
887 {
888 	const char *snap_name;
889 
890 	rbd_assert(which < rbd_dev->header.snapc->num_snaps);
891 
892 	/* Skip over names until we find the one we are looking for */
893 
894 	snap_name = rbd_dev->header.snap_names;
895 	while (which--)
896 		snap_name += strlen(snap_name) + 1;
897 
898 	return kstrdup(snap_name, GFP_KERNEL);
899 }
900 
901 /*
902  * Snapshot id comparison function for use with qsort()/bsearch().
903  * Note that result is for snapshots in *descending* order.
904  */
905 static int snapid_compare_reverse(const void *s1, const void *s2)
906 {
907 	u64 snap_id1 = *(u64 *)s1;
908 	u64 snap_id2 = *(u64 *)s2;
909 
910 	if (snap_id1 < snap_id2)
911 		return 1;
912 	return snap_id1 == snap_id2 ? 0 : -1;
913 }
914 
915 /*
916  * Search a snapshot context to see if the given snapshot id is
917  * present.
918  *
919  * Returns the position of the snapshot id in the array if it's found,
920  * or BAD_SNAP_INDEX otherwise.
921  *
922  * Note: The snapshot array is in kept sorted (by the osd) in
923  * reverse order, highest snapshot id first.
924  */
925 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
926 {
927 	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
928 	u64 *found;
929 
930 	found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
931 				sizeof (snap_id), snapid_compare_reverse);
932 
933 	return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
934 }
935 
936 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
937 					u64 snap_id)
938 {
939 	u32 which;
940 
941 	which = rbd_dev_snap_index(rbd_dev, snap_id);
942 	if (which == BAD_SNAP_INDEX)
943 		return NULL;
944 
945 	return _rbd_dev_v1_snap_name(rbd_dev, which);
946 }
947 
948 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
949 {
950 	if (snap_id == CEPH_NOSNAP)
951 		return RBD_SNAP_HEAD_NAME;
952 
953 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
954 	if (rbd_dev->image_format == 1)
955 		return rbd_dev_v1_snap_name(rbd_dev, snap_id);
956 
957 	return rbd_dev_v2_snap_name(rbd_dev, snap_id);
958 }
959 
960 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
961 				u64 *snap_size)
962 {
963 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
964 	if (snap_id == CEPH_NOSNAP) {
965 		*snap_size = rbd_dev->header.image_size;
966 	} else if (rbd_dev->image_format == 1) {
967 		u32 which;
968 
969 		which = rbd_dev_snap_index(rbd_dev, snap_id);
970 		if (which == BAD_SNAP_INDEX)
971 			return -ENOENT;
972 
973 		*snap_size = rbd_dev->header.snap_sizes[which];
974 	} else {
975 		u64 size = 0;
976 		int ret;
977 
978 		ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
979 		if (ret)
980 			return ret;
981 
982 		*snap_size = size;
983 	}
984 	return 0;
985 }
986 
987 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
988 			u64 *snap_features)
989 {
990 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
991 	if (snap_id == CEPH_NOSNAP) {
992 		*snap_features = rbd_dev->header.features;
993 	} else if (rbd_dev->image_format == 1) {
994 		*snap_features = 0;	/* No features for format 1 */
995 	} else {
996 		u64 features = 0;
997 		int ret;
998 
999 		ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1000 		if (ret)
1001 			return ret;
1002 
1003 		*snap_features = features;
1004 	}
1005 	return 0;
1006 }
1007 
1008 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1009 {
1010 	u64 snap_id = rbd_dev->spec->snap_id;
1011 	u64 size = 0;
1012 	u64 features = 0;
1013 	int ret;
1014 
1015 	ret = rbd_snap_size(rbd_dev, snap_id, &size);
1016 	if (ret)
1017 		return ret;
1018 	ret = rbd_snap_features(rbd_dev, snap_id, &features);
1019 	if (ret)
1020 		return ret;
1021 
1022 	rbd_dev->mapping.size = size;
1023 	rbd_dev->mapping.features = features;
1024 
1025 	return 0;
1026 }
1027 
1028 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1029 {
1030 	rbd_dev->mapping.size = 0;
1031 	rbd_dev->mapping.features = 0;
1032 }
1033 
1034 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1035 {
1036 	char *name;
1037 	u64 segment;
1038 	int ret;
1039 	char *name_format;
1040 
1041 	name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1042 	if (!name)
1043 		return NULL;
1044 	segment = offset >> rbd_dev->header.obj_order;
1045 	name_format = "%s.%012llx";
1046 	if (rbd_dev->image_format == 2)
1047 		name_format = "%s.%016llx";
1048 	ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
1049 			rbd_dev->header.object_prefix, segment);
1050 	if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1051 		pr_err("error formatting segment name for #%llu (%d)\n",
1052 			segment, ret);
1053 		kfree(name);
1054 		name = NULL;
1055 	}
1056 
1057 	return name;
1058 }
1059 
1060 static void rbd_segment_name_free(const char *name)
1061 {
1062 	/* The explicit cast here is needed to drop the const qualifier */
1063 
1064 	kmem_cache_free(rbd_segment_name_cache, (void *)name);
1065 }
1066 
1067 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1068 {
1069 	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1070 
1071 	return offset & (segment_size - 1);
1072 }
1073 
1074 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1075 				u64 offset, u64 length)
1076 {
1077 	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1078 
1079 	offset &= segment_size - 1;
1080 
1081 	rbd_assert(length <= U64_MAX - offset);
1082 	if (offset + length > segment_size)
1083 		length = segment_size - offset;
1084 
1085 	return length;
1086 }
1087 
1088 /*
1089  * returns the size of an object in the image
1090  */
1091 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1092 {
1093 	return 1 << header->obj_order;
1094 }
1095 
1096 /*
1097  * bio helpers
1098  */
1099 
1100 static void bio_chain_put(struct bio *chain)
1101 {
1102 	struct bio *tmp;
1103 
1104 	while (chain) {
1105 		tmp = chain;
1106 		chain = chain->bi_next;
1107 		bio_put(tmp);
1108 	}
1109 }
1110 
1111 /*
1112  * zeros a bio chain, starting at specific offset
1113  */
1114 static void zero_bio_chain(struct bio *chain, int start_ofs)
1115 {
1116 	struct bio_vec *bv;
1117 	unsigned long flags;
1118 	void *buf;
1119 	int i;
1120 	int pos = 0;
1121 
1122 	while (chain) {
1123 		bio_for_each_segment(bv, chain, i) {
1124 			if (pos + bv->bv_len > start_ofs) {
1125 				int remainder = max(start_ofs - pos, 0);
1126 				buf = bvec_kmap_irq(bv, &flags);
1127 				memset(buf + remainder, 0,
1128 				       bv->bv_len - remainder);
1129 				bvec_kunmap_irq(buf, &flags);
1130 			}
1131 			pos += bv->bv_len;
1132 		}
1133 
1134 		chain = chain->bi_next;
1135 	}
1136 }
1137 
1138 /*
1139  * similar to zero_bio_chain(), zeros data defined by a page array,
1140  * starting at the given byte offset from the start of the array and
1141  * continuing up to the given end offset.  The pages array is
1142  * assumed to be big enough to hold all bytes up to the end.
1143  */
1144 static void zero_pages(struct page **pages, u64 offset, u64 end)
1145 {
1146 	struct page **page = &pages[offset >> PAGE_SHIFT];
1147 
1148 	rbd_assert(end > offset);
1149 	rbd_assert(end - offset <= (u64)SIZE_MAX);
1150 	while (offset < end) {
1151 		size_t page_offset;
1152 		size_t length;
1153 		unsigned long flags;
1154 		void *kaddr;
1155 
1156 		page_offset = (size_t)(offset & ~PAGE_MASK);
1157 		length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1158 		local_irq_save(flags);
1159 		kaddr = kmap_atomic(*page);
1160 		memset(kaddr + page_offset, 0, length);
1161 		kunmap_atomic(kaddr);
1162 		local_irq_restore(flags);
1163 
1164 		offset += length;
1165 		page++;
1166 	}
1167 }
1168 
1169 /*
1170  * Clone a portion of a bio, starting at the given byte offset
1171  * and continuing for the number of bytes indicated.
1172  */
1173 static struct bio *bio_clone_range(struct bio *bio_src,
1174 					unsigned int offset,
1175 					unsigned int len,
1176 					gfp_t gfpmask)
1177 {
1178 	struct bio_vec *bv;
1179 	unsigned int resid;
1180 	unsigned short idx;
1181 	unsigned int voff;
1182 	unsigned short end_idx;
1183 	unsigned short vcnt;
1184 	struct bio *bio;
1185 
1186 	/* Handle the easy case for the caller */
1187 
1188 	if (!offset && len == bio_src->bi_size)
1189 		return bio_clone(bio_src, gfpmask);
1190 
1191 	if (WARN_ON_ONCE(!len))
1192 		return NULL;
1193 	if (WARN_ON_ONCE(len > bio_src->bi_size))
1194 		return NULL;
1195 	if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1196 		return NULL;
1197 
1198 	/* Find first affected segment... */
1199 
1200 	resid = offset;
1201 	bio_for_each_segment(bv, bio_src, idx) {
1202 		if (resid < bv->bv_len)
1203 			break;
1204 		resid -= bv->bv_len;
1205 	}
1206 	voff = resid;
1207 
1208 	/* ...and the last affected segment */
1209 
1210 	resid += len;
1211 	__bio_for_each_segment(bv, bio_src, end_idx, idx) {
1212 		if (resid <= bv->bv_len)
1213 			break;
1214 		resid -= bv->bv_len;
1215 	}
1216 	vcnt = end_idx - idx + 1;
1217 
1218 	/* Build the clone */
1219 
1220 	bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1221 	if (!bio)
1222 		return NULL;	/* ENOMEM */
1223 
1224 	bio->bi_bdev = bio_src->bi_bdev;
1225 	bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1226 	bio->bi_rw = bio_src->bi_rw;
1227 	bio->bi_flags |= 1 << BIO_CLONED;
1228 
1229 	/*
1230 	 * Copy over our part of the bio_vec, then update the first
1231 	 * and last (or only) entries.
1232 	 */
1233 	memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1234 			vcnt * sizeof (struct bio_vec));
1235 	bio->bi_io_vec[0].bv_offset += voff;
1236 	if (vcnt > 1) {
1237 		bio->bi_io_vec[0].bv_len -= voff;
1238 		bio->bi_io_vec[vcnt - 1].bv_len = resid;
1239 	} else {
1240 		bio->bi_io_vec[0].bv_len = len;
1241 	}
1242 
1243 	bio->bi_vcnt = vcnt;
1244 	bio->bi_size = len;
1245 	bio->bi_idx = 0;
1246 
1247 	return bio;
1248 }
1249 
1250 /*
1251  * Clone a portion of a bio chain, starting at the given byte offset
1252  * into the first bio in the source chain and continuing for the
1253  * number of bytes indicated.  The result is another bio chain of
1254  * exactly the given length, or a null pointer on error.
1255  *
1256  * The bio_src and offset parameters are both in-out.  On entry they
1257  * refer to the first source bio and the offset into that bio where
1258  * the start of data to be cloned is located.
1259  *
1260  * On return, bio_src is updated to refer to the bio in the source
1261  * chain that contains first un-cloned byte, and *offset will
1262  * contain the offset of that byte within that bio.
1263  */
1264 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1265 					unsigned int *offset,
1266 					unsigned int len,
1267 					gfp_t gfpmask)
1268 {
1269 	struct bio *bi = *bio_src;
1270 	unsigned int off = *offset;
1271 	struct bio *chain = NULL;
1272 	struct bio **end;
1273 
1274 	/* Build up a chain of clone bios up to the limit */
1275 
1276 	if (!bi || off >= bi->bi_size || !len)
1277 		return NULL;		/* Nothing to clone */
1278 
1279 	end = &chain;
1280 	while (len) {
1281 		unsigned int bi_size;
1282 		struct bio *bio;
1283 
1284 		if (!bi) {
1285 			rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1286 			goto out_err;	/* EINVAL; ran out of bio's */
1287 		}
1288 		bi_size = min_t(unsigned int, bi->bi_size - off, len);
1289 		bio = bio_clone_range(bi, off, bi_size, gfpmask);
1290 		if (!bio)
1291 			goto out_err;	/* ENOMEM */
1292 
1293 		*end = bio;
1294 		end = &bio->bi_next;
1295 
1296 		off += bi_size;
1297 		if (off == bi->bi_size) {
1298 			bi = bi->bi_next;
1299 			off = 0;
1300 		}
1301 		len -= bi_size;
1302 	}
1303 	*bio_src = bi;
1304 	*offset = off;
1305 
1306 	return chain;
1307 out_err:
1308 	bio_chain_put(chain);
1309 
1310 	return NULL;
1311 }
1312 
1313 /*
1314  * The default/initial value for all object request flags is 0.  For
1315  * each flag, once its value is set to 1 it is never reset to 0
1316  * again.
1317  */
1318 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1319 {
1320 	if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1321 		struct rbd_device *rbd_dev;
1322 
1323 		rbd_dev = obj_request->img_request->rbd_dev;
1324 		rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1325 			obj_request);
1326 	}
1327 }
1328 
1329 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1330 {
1331 	smp_mb();
1332 	return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1333 }
1334 
1335 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1336 {
1337 	if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1338 		struct rbd_device *rbd_dev = NULL;
1339 
1340 		if (obj_request_img_data_test(obj_request))
1341 			rbd_dev = obj_request->img_request->rbd_dev;
1342 		rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1343 			obj_request);
1344 	}
1345 }
1346 
1347 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1348 {
1349 	smp_mb();
1350 	return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1351 }
1352 
1353 /*
1354  * This sets the KNOWN flag after (possibly) setting the EXISTS
1355  * flag.  The latter is set based on the "exists" value provided.
1356  *
1357  * Note that for our purposes once an object exists it never goes
1358  * away again.  It's possible that the response from two existence
1359  * checks are separated by the creation of the target object, and
1360  * the first ("doesn't exist") response arrives *after* the second
1361  * ("does exist").  In that case we ignore the second one.
1362  */
1363 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1364 				bool exists)
1365 {
1366 	if (exists)
1367 		set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1368 	set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1369 	smp_mb();
1370 }
1371 
1372 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1373 {
1374 	smp_mb();
1375 	return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1376 }
1377 
1378 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1379 {
1380 	smp_mb();
1381 	return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1382 }
1383 
1384 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1385 {
1386 	dout("%s: obj %p (was %d)\n", __func__, obj_request,
1387 		atomic_read(&obj_request->kref.refcount));
1388 	kref_get(&obj_request->kref);
1389 }
1390 
1391 static void rbd_obj_request_destroy(struct kref *kref);
1392 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1393 {
1394 	rbd_assert(obj_request != NULL);
1395 	dout("%s: obj %p (was %d)\n", __func__, obj_request,
1396 		atomic_read(&obj_request->kref.refcount));
1397 	kref_put(&obj_request->kref, rbd_obj_request_destroy);
1398 }
1399 
1400 static bool img_request_child_test(struct rbd_img_request *img_request);
1401 static void rbd_parent_request_destroy(struct kref *kref);
1402 static void rbd_img_request_destroy(struct kref *kref);
1403 static void rbd_img_request_put(struct rbd_img_request *img_request)
1404 {
1405 	rbd_assert(img_request != NULL);
1406 	dout("%s: img %p (was %d)\n", __func__, img_request,
1407 		atomic_read(&img_request->kref.refcount));
1408 	if (img_request_child_test(img_request))
1409 		kref_put(&img_request->kref, rbd_parent_request_destroy);
1410 	else
1411 		kref_put(&img_request->kref, rbd_img_request_destroy);
1412 }
1413 
1414 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1415 					struct rbd_obj_request *obj_request)
1416 {
1417 	rbd_assert(obj_request->img_request == NULL);
1418 
1419 	/* Image request now owns object's original reference */
1420 	obj_request->img_request = img_request;
1421 	obj_request->which = img_request->obj_request_count;
1422 	rbd_assert(!obj_request_img_data_test(obj_request));
1423 	obj_request_img_data_set(obj_request);
1424 	rbd_assert(obj_request->which != BAD_WHICH);
1425 	img_request->obj_request_count++;
1426 	list_add_tail(&obj_request->links, &img_request->obj_requests);
1427 	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1428 		obj_request->which);
1429 }
1430 
1431 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1432 					struct rbd_obj_request *obj_request)
1433 {
1434 	rbd_assert(obj_request->which != BAD_WHICH);
1435 
1436 	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1437 		obj_request->which);
1438 	list_del(&obj_request->links);
1439 	rbd_assert(img_request->obj_request_count > 0);
1440 	img_request->obj_request_count--;
1441 	rbd_assert(obj_request->which == img_request->obj_request_count);
1442 	obj_request->which = BAD_WHICH;
1443 	rbd_assert(obj_request_img_data_test(obj_request));
1444 	rbd_assert(obj_request->img_request == img_request);
1445 	obj_request->img_request = NULL;
1446 	obj_request->callback = NULL;
1447 	rbd_obj_request_put(obj_request);
1448 }
1449 
1450 static bool obj_request_type_valid(enum obj_request_type type)
1451 {
1452 	switch (type) {
1453 	case OBJ_REQUEST_NODATA:
1454 	case OBJ_REQUEST_BIO:
1455 	case OBJ_REQUEST_PAGES:
1456 		return true;
1457 	default:
1458 		return false;
1459 	}
1460 }
1461 
1462 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1463 				struct rbd_obj_request *obj_request)
1464 {
1465 	dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1466 
1467 	return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1468 }
1469 
1470 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1471 {
1472 
1473 	dout("%s: img %p\n", __func__, img_request);
1474 
1475 	/*
1476 	 * If no error occurred, compute the aggregate transfer
1477 	 * count for the image request.  We could instead use
1478 	 * atomic64_cmpxchg() to update it as each object request
1479 	 * completes; not clear which way is better off hand.
1480 	 */
1481 	if (!img_request->result) {
1482 		struct rbd_obj_request *obj_request;
1483 		u64 xferred = 0;
1484 
1485 		for_each_obj_request(img_request, obj_request)
1486 			xferred += obj_request->xferred;
1487 		img_request->xferred = xferred;
1488 	}
1489 
1490 	if (img_request->callback)
1491 		img_request->callback(img_request);
1492 	else
1493 		rbd_img_request_put(img_request);
1494 }
1495 
1496 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1497 
1498 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1499 {
1500 	dout("%s: obj %p\n", __func__, obj_request);
1501 
1502 	return wait_for_completion_interruptible(&obj_request->completion);
1503 }
1504 
1505 /*
1506  * The default/initial value for all image request flags is 0.  Each
1507  * is conditionally set to 1 at image request initialization time
1508  * and currently never change thereafter.
1509  */
1510 static void img_request_write_set(struct rbd_img_request *img_request)
1511 {
1512 	set_bit(IMG_REQ_WRITE, &img_request->flags);
1513 	smp_mb();
1514 }
1515 
1516 static bool img_request_write_test(struct rbd_img_request *img_request)
1517 {
1518 	smp_mb();
1519 	return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1520 }
1521 
1522 static void img_request_child_set(struct rbd_img_request *img_request)
1523 {
1524 	set_bit(IMG_REQ_CHILD, &img_request->flags);
1525 	smp_mb();
1526 }
1527 
1528 static void img_request_child_clear(struct rbd_img_request *img_request)
1529 {
1530 	clear_bit(IMG_REQ_CHILD, &img_request->flags);
1531 	smp_mb();
1532 }
1533 
1534 static bool img_request_child_test(struct rbd_img_request *img_request)
1535 {
1536 	smp_mb();
1537 	return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1538 }
1539 
1540 static void img_request_layered_set(struct rbd_img_request *img_request)
1541 {
1542 	set_bit(IMG_REQ_LAYERED, &img_request->flags);
1543 	smp_mb();
1544 }
1545 
1546 static void img_request_layered_clear(struct rbd_img_request *img_request)
1547 {
1548 	clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1549 	smp_mb();
1550 }
1551 
1552 static bool img_request_layered_test(struct rbd_img_request *img_request)
1553 {
1554 	smp_mb();
1555 	return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1556 }
1557 
1558 static void
1559 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1560 {
1561 	u64 xferred = obj_request->xferred;
1562 	u64 length = obj_request->length;
1563 
1564 	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1565 		obj_request, obj_request->img_request, obj_request->result,
1566 		xferred, length);
1567 	/*
1568 	 * ENOENT means a hole in the image.  We zero-fill the
1569 	 * entire length of the request.  A short read also implies
1570 	 * zero-fill to the end of the request.  Either way we
1571 	 * update the xferred count to indicate the whole request
1572 	 * was satisfied.
1573 	 */
1574 	rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1575 	if (obj_request->result == -ENOENT) {
1576 		if (obj_request->type == OBJ_REQUEST_BIO)
1577 			zero_bio_chain(obj_request->bio_list, 0);
1578 		else
1579 			zero_pages(obj_request->pages, 0, length);
1580 		obj_request->result = 0;
1581 		obj_request->xferred = length;
1582 	} else if (xferred < length && !obj_request->result) {
1583 		if (obj_request->type == OBJ_REQUEST_BIO)
1584 			zero_bio_chain(obj_request->bio_list, xferred);
1585 		else
1586 			zero_pages(obj_request->pages, xferred, length);
1587 		obj_request->xferred = length;
1588 	}
1589 	obj_request_done_set(obj_request);
1590 }
1591 
1592 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1593 {
1594 	dout("%s: obj %p cb %p\n", __func__, obj_request,
1595 		obj_request->callback);
1596 	if (obj_request->callback)
1597 		obj_request->callback(obj_request);
1598 	else
1599 		complete_all(&obj_request->completion);
1600 }
1601 
1602 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1603 {
1604 	dout("%s: obj %p\n", __func__, obj_request);
1605 	obj_request_done_set(obj_request);
1606 }
1607 
1608 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1609 {
1610 	struct rbd_img_request *img_request = NULL;
1611 	struct rbd_device *rbd_dev = NULL;
1612 	bool layered = false;
1613 
1614 	if (obj_request_img_data_test(obj_request)) {
1615 		img_request = obj_request->img_request;
1616 		layered = img_request && img_request_layered_test(img_request);
1617 		rbd_dev = img_request->rbd_dev;
1618 	}
1619 
1620 	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1621 		obj_request, img_request, obj_request->result,
1622 		obj_request->xferred, obj_request->length);
1623 	if (layered && obj_request->result == -ENOENT &&
1624 			obj_request->img_offset < rbd_dev->parent_overlap)
1625 		rbd_img_parent_read(obj_request);
1626 	else if (img_request)
1627 		rbd_img_obj_request_read_callback(obj_request);
1628 	else
1629 		obj_request_done_set(obj_request);
1630 }
1631 
1632 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1633 {
1634 	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1635 		obj_request->result, obj_request->length);
1636 	/*
1637 	 * There is no such thing as a successful short write.  Set
1638 	 * it to our originally-requested length.
1639 	 */
1640 	obj_request->xferred = obj_request->length;
1641 	obj_request_done_set(obj_request);
1642 }
1643 
1644 /*
1645  * For a simple stat call there's nothing to do.  We'll do more if
1646  * this is part of a write sequence for a layered image.
1647  */
1648 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1649 {
1650 	dout("%s: obj %p\n", __func__, obj_request);
1651 	obj_request_done_set(obj_request);
1652 }
1653 
1654 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1655 				struct ceph_msg *msg)
1656 {
1657 	struct rbd_obj_request *obj_request = osd_req->r_priv;
1658 	u16 opcode;
1659 
1660 	dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1661 	rbd_assert(osd_req == obj_request->osd_req);
1662 	if (obj_request_img_data_test(obj_request)) {
1663 		rbd_assert(obj_request->img_request);
1664 		rbd_assert(obj_request->which != BAD_WHICH);
1665 	} else {
1666 		rbd_assert(obj_request->which == BAD_WHICH);
1667 	}
1668 
1669 	if (osd_req->r_result < 0)
1670 		obj_request->result = osd_req->r_result;
1671 
1672 	BUG_ON(osd_req->r_num_ops > 2);
1673 
1674 	/*
1675 	 * We support a 64-bit length, but ultimately it has to be
1676 	 * passed to blk_end_request(), which takes an unsigned int.
1677 	 */
1678 	obj_request->xferred = osd_req->r_reply_op_len[0];
1679 	rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1680 	opcode = osd_req->r_ops[0].op;
1681 	switch (opcode) {
1682 	case CEPH_OSD_OP_READ:
1683 		rbd_osd_read_callback(obj_request);
1684 		break;
1685 	case CEPH_OSD_OP_WRITE:
1686 		rbd_osd_write_callback(obj_request);
1687 		break;
1688 	case CEPH_OSD_OP_STAT:
1689 		rbd_osd_stat_callback(obj_request);
1690 		break;
1691 	case CEPH_OSD_OP_CALL:
1692 	case CEPH_OSD_OP_NOTIFY_ACK:
1693 	case CEPH_OSD_OP_WATCH:
1694 		rbd_osd_trivial_callback(obj_request);
1695 		break;
1696 	default:
1697 		rbd_warn(NULL, "%s: unsupported op %hu\n",
1698 			obj_request->object_name, (unsigned short) opcode);
1699 		break;
1700 	}
1701 
1702 	if (obj_request_done_test(obj_request))
1703 		rbd_obj_request_complete(obj_request);
1704 }
1705 
1706 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1707 {
1708 	struct rbd_img_request *img_request = obj_request->img_request;
1709 	struct ceph_osd_request *osd_req = obj_request->osd_req;
1710 	u64 snap_id;
1711 
1712 	rbd_assert(osd_req != NULL);
1713 
1714 	snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1715 	ceph_osdc_build_request(osd_req, obj_request->offset,
1716 			NULL, snap_id, NULL);
1717 }
1718 
1719 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1720 {
1721 	struct rbd_img_request *img_request = obj_request->img_request;
1722 	struct ceph_osd_request *osd_req = obj_request->osd_req;
1723 	struct ceph_snap_context *snapc;
1724 	struct timespec mtime = CURRENT_TIME;
1725 
1726 	rbd_assert(osd_req != NULL);
1727 
1728 	snapc = img_request ? img_request->snapc : NULL;
1729 	ceph_osdc_build_request(osd_req, obj_request->offset,
1730 			snapc, CEPH_NOSNAP, &mtime);
1731 }
1732 
1733 static struct ceph_osd_request *rbd_osd_req_create(
1734 					struct rbd_device *rbd_dev,
1735 					bool write_request,
1736 					struct rbd_obj_request *obj_request)
1737 {
1738 	struct ceph_snap_context *snapc = NULL;
1739 	struct ceph_osd_client *osdc;
1740 	struct ceph_osd_request *osd_req;
1741 
1742 	if (obj_request_img_data_test(obj_request)) {
1743 		struct rbd_img_request *img_request = obj_request->img_request;
1744 
1745 		rbd_assert(write_request ==
1746 				img_request_write_test(img_request));
1747 		if (write_request)
1748 			snapc = img_request->snapc;
1749 	}
1750 
1751 	/* Allocate and initialize the request, for the single op */
1752 
1753 	osdc = &rbd_dev->rbd_client->client->osdc;
1754 	osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1755 	if (!osd_req)
1756 		return NULL;	/* ENOMEM */
1757 
1758 	if (write_request)
1759 		osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1760 	else
1761 		osd_req->r_flags = CEPH_OSD_FLAG_READ;
1762 
1763 	osd_req->r_callback = rbd_osd_req_callback;
1764 	osd_req->r_priv = obj_request;
1765 
1766 	osd_req->r_oid_len = strlen(obj_request->object_name);
1767 	rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1768 	memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1769 
1770 	osd_req->r_file_layout = rbd_dev->layout;	/* struct */
1771 
1772 	return osd_req;
1773 }
1774 
1775 /*
1776  * Create a copyup osd request based on the information in the
1777  * object request supplied.  A copyup request has two osd ops,
1778  * a copyup method call, and a "normal" write request.
1779  */
1780 static struct ceph_osd_request *
1781 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1782 {
1783 	struct rbd_img_request *img_request;
1784 	struct ceph_snap_context *snapc;
1785 	struct rbd_device *rbd_dev;
1786 	struct ceph_osd_client *osdc;
1787 	struct ceph_osd_request *osd_req;
1788 
1789 	rbd_assert(obj_request_img_data_test(obj_request));
1790 	img_request = obj_request->img_request;
1791 	rbd_assert(img_request);
1792 	rbd_assert(img_request_write_test(img_request));
1793 
1794 	/* Allocate and initialize the request, for the two ops */
1795 
1796 	snapc = img_request->snapc;
1797 	rbd_dev = img_request->rbd_dev;
1798 	osdc = &rbd_dev->rbd_client->client->osdc;
1799 	osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1800 	if (!osd_req)
1801 		return NULL;	/* ENOMEM */
1802 
1803 	osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1804 	osd_req->r_callback = rbd_osd_req_callback;
1805 	osd_req->r_priv = obj_request;
1806 
1807 	osd_req->r_oid_len = strlen(obj_request->object_name);
1808 	rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1809 	memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1810 
1811 	osd_req->r_file_layout = rbd_dev->layout;	/* struct */
1812 
1813 	return osd_req;
1814 }
1815 
1816 
1817 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1818 {
1819 	ceph_osdc_put_request(osd_req);
1820 }
1821 
1822 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1823 
1824 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1825 						u64 offset, u64 length,
1826 						enum obj_request_type type)
1827 {
1828 	struct rbd_obj_request *obj_request;
1829 	size_t size;
1830 	char *name;
1831 
1832 	rbd_assert(obj_request_type_valid(type));
1833 
1834 	size = strlen(object_name) + 1;
1835 	name = kmalloc(size, GFP_KERNEL);
1836 	if (!name)
1837 		return NULL;
1838 
1839 	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1840 	if (!obj_request) {
1841 		kfree(name);
1842 		return NULL;
1843 	}
1844 
1845 	obj_request->object_name = memcpy(name, object_name, size);
1846 	obj_request->offset = offset;
1847 	obj_request->length = length;
1848 	obj_request->flags = 0;
1849 	obj_request->which = BAD_WHICH;
1850 	obj_request->type = type;
1851 	INIT_LIST_HEAD(&obj_request->links);
1852 	init_completion(&obj_request->completion);
1853 	kref_init(&obj_request->kref);
1854 
1855 	dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1856 		offset, length, (int)type, obj_request);
1857 
1858 	return obj_request;
1859 }
1860 
1861 static void rbd_obj_request_destroy(struct kref *kref)
1862 {
1863 	struct rbd_obj_request *obj_request;
1864 
1865 	obj_request = container_of(kref, struct rbd_obj_request, kref);
1866 
1867 	dout("%s: obj %p\n", __func__, obj_request);
1868 
1869 	rbd_assert(obj_request->img_request == NULL);
1870 	rbd_assert(obj_request->which == BAD_WHICH);
1871 
1872 	if (obj_request->osd_req)
1873 		rbd_osd_req_destroy(obj_request->osd_req);
1874 
1875 	rbd_assert(obj_request_type_valid(obj_request->type));
1876 	switch (obj_request->type) {
1877 	case OBJ_REQUEST_NODATA:
1878 		break;		/* Nothing to do */
1879 	case OBJ_REQUEST_BIO:
1880 		if (obj_request->bio_list)
1881 			bio_chain_put(obj_request->bio_list);
1882 		break;
1883 	case OBJ_REQUEST_PAGES:
1884 		if (obj_request->pages)
1885 			ceph_release_page_vector(obj_request->pages,
1886 						obj_request->page_count);
1887 		break;
1888 	}
1889 
1890 	kfree(obj_request->object_name);
1891 	obj_request->object_name = NULL;
1892 	kmem_cache_free(rbd_obj_request_cache, obj_request);
1893 }
1894 
1895 /* It's OK to call this for a device with no parent */
1896 
1897 static void rbd_spec_put(struct rbd_spec *spec);
1898 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1899 {
1900 	rbd_dev_remove_parent(rbd_dev);
1901 	rbd_spec_put(rbd_dev->parent_spec);
1902 	rbd_dev->parent_spec = NULL;
1903 	rbd_dev->parent_overlap = 0;
1904 }
1905 
1906 /*
1907  * Parent image reference counting is used to determine when an
1908  * image's parent fields can be safely torn down--after there are no
1909  * more in-flight requests to the parent image.  When the last
1910  * reference is dropped, cleaning them up is safe.
1911  */
1912 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1913 {
1914 	int counter;
1915 
1916 	if (!rbd_dev->parent_spec)
1917 		return;
1918 
1919 	counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1920 	if (counter > 0)
1921 		return;
1922 
1923 	/* Last reference; clean up parent data structures */
1924 
1925 	if (!counter)
1926 		rbd_dev_unparent(rbd_dev);
1927 	else
1928 		rbd_warn(rbd_dev, "parent reference underflow\n");
1929 }
1930 
1931 /*
1932  * If an image has a non-zero parent overlap, get a reference to its
1933  * parent.
1934  *
1935  * We must get the reference before checking for the overlap to
1936  * coordinate properly with zeroing the parent overlap in
1937  * rbd_dev_v2_parent_info() when an image gets flattened.  We
1938  * drop it again if there is no overlap.
1939  *
1940  * Returns true if the rbd device has a parent with a non-zero
1941  * overlap and a reference for it was successfully taken, or
1942  * false otherwise.
1943  */
1944 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1945 {
1946 	int counter;
1947 
1948 	if (!rbd_dev->parent_spec)
1949 		return false;
1950 
1951 	counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1952 	if (counter > 0 && rbd_dev->parent_overlap)
1953 		return true;
1954 
1955 	/* Image was flattened, but parent is not yet torn down */
1956 
1957 	if (counter < 0)
1958 		rbd_warn(rbd_dev, "parent reference overflow\n");
1959 
1960 	return false;
1961 }
1962 
1963 /*
1964  * Caller is responsible for filling in the list of object requests
1965  * that comprises the image request, and the Linux request pointer
1966  * (if there is one).
1967  */
1968 static struct rbd_img_request *rbd_img_request_create(
1969 					struct rbd_device *rbd_dev,
1970 					u64 offset, u64 length,
1971 					bool write_request)
1972 {
1973 	struct rbd_img_request *img_request;
1974 
1975 	img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1976 	if (!img_request)
1977 		return NULL;
1978 
1979 	if (write_request) {
1980 		down_read(&rbd_dev->header_rwsem);
1981 		ceph_get_snap_context(rbd_dev->header.snapc);
1982 		up_read(&rbd_dev->header_rwsem);
1983 	}
1984 
1985 	img_request->rq = NULL;
1986 	img_request->rbd_dev = rbd_dev;
1987 	img_request->offset = offset;
1988 	img_request->length = length;
1989 	img_request->flags = 0;
1990 	if (write_request) {
1991 		img_request_write_set(img_request);
1992 		img_request->snapc = rbd_dev->header.snapc;
1993 	} else {
1994 		img_request->snap_id = rbd_dev->spec->snap_id;
1995 	}
1996 	if (rbd_dev_parent_get(rbd_dev))
1997 		img_request_layered_set(img_request);
1998 	spin_lock_init(&img_request->completion_lock);
1999 	img_request->next_completion = 0;
2000 	img_request->callback = NULL;
2001 	img_request->result = 0;
2002 	img_request->obj_request_count = 0;
2003 	INIT_LIST_HEAD(&img_request->obj_requests);
2004 	kref_init(&img_request->kref);
2005 
2006 	dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2007 		write_request ? "write" : "read", offset, length,
2008 		img_request);
2009 
2010 	return img_request;
2011 }
2012 
2013 static void rbd_img_request_destroy(struct kref *kref)
2014 {
2015 	struct rbd_img_request *img_request;
2016 	struct rbd_obj_request *obj_request;
2017 	struct rbd_obj_request *next_obj_request;
2018 
2019 	img_request = container_of(kref, struct rbd_img_request, kref);
2020 
2021 	dout("%s: img %p\n", __func__, img_request);
2022 
2023 	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2024 		rbd_img_obj_request_del(img_request, obj_request);
2025 	rbd_assert(img_request->obj_request_count == 0);
2026 
2027 	if (img_request_layered_test(img_request)) {
2028 		img_request_layered_clear(img_request);
2029 		rbd_dev_parent_put(img_request->rbd_dev);
2030 	}
2031 
2032 	if (img_request_write_test(img_request))
2033 		ceph_put_snap_context(img_request->snapc);
2034 
2035 	kmem_cache_free(rbd_img_request_cache, img_request);
2036 }
2037 
2038 static struct rbd_img_request *rbd_parent_request_create(
2039 					struct rbd_obj_request *obj_request,
2040 					u64 img_offset, u64 length)
2041 {
2042 	struct rbd_img_request *parent_request;
2043 	struct rbd_device *rbd_dev;
2044 
2045 	rbd_assert(obj_request->img_request);
2046 	rbd_dev = obj_request->img_request->rbd_dev;
2047 
2048 	parent_request = rbd_img_request_create(rbd_dev->parent,
2049 						img_offset, length, false);
2050 	if (!parent_request)
2051 		return NULL;
2052 
2053 	img_request_child_set(parent_request);
2054 	rbd_obj_request_get(obj_request);
2055 	parent_request->obj_request = obj_request;
2056 
2057 	return parent_request;
2058 }
2059 
2060 static void rbd_parent_request_destroy(struct kref *kref)
2061 {
2062 	struct rbd_img_request *parent_request;
2063 	struct rbd_obj_request *orig_request;
2064 
2065 	parent_request = container_of(kref, struct rbd_img_request, kref);
2066 	orig_request = parent_request->obj_request;
2067 
2068 	parent_request->obj_request = NULL;
2069 	rbd_obj_request_put(orig_request);
2070 	img_request_child_clear(parent_request);
2071 
2072 	rbd_img_request_destroy(kref);
2073 }
2074 
2075 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2076 {
2077 	struct rbd_img_request *img_request;
2078 	unsigned int xferred;
2079 	int result;
2080 	bool more;
2081 
2082 	rbd_assert(obj_request_img_data_test(obj_request));
2083 	img_request = obj_request->img_request;
2084 
2085 	rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2086 	xferred = (unsigned int)obj_request->xferred;
2087 	result = obj_request->result;
2088 	if (result) {
2089 		struct rbd_device *rbd_dev = img_request->rbd_dev;
2090 
2091 		rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2092 			img_request_write_test(img_request) ? "write" : "read",
2093 			obj_request->length, obj_request->img_offset,
2094 			obj_request->offset);
2095 		rbd_warn(rbd_dev, "  result %d xferred %x\n",
2096 			result, xferred);
2097 		if (!img_request->result)
2098 			img_request->result = result;
2099 	}
2100 
2101 	/* Image object requests don't own their page array */
2102 
2103 	if (obj_request->type == OBJ_REQUEST_PAGES) {
2104 		obj_request->pages = NULL;
2105 		obj_request->page_count = 0;
2106 	}
2107 
2108 	if (img_request_child_test(img_request)) {
2109 		rbd_assert(img_request->obj_request != NULL);
2110 		more = obj_request->which < img_request->obj_request_count - 1;
2111 	} else {
2112 		rbd_assert(img_request->rq != NULL);
2113 		more = blk_end_request(img_request->rq, result, xferred);
2114 	}
2115 
2116 	return more;
2117 }
2118 
2119 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2120 {
2121 	struct rbd_img_request *img_request;
2122 	u32 which = obj_request->which;
2123 	bool more = true;
2124 
2125 	rbd_assert(obj_request_img_data_test(obj_request));
2126 	img_request = obj_request->img_request;
2127 
2128 	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2129 	rbd_assert(img_request != NULL);
2130 	rbd_assert(img_request->obj_request_count > 0);
2131 	rbd_assert(which != BAD_WHICH);
2132 	rbd_assert(which < img_request->obj_request_count);
2133 	rbd_assert(which >= img_request->next_completion);
2134 
2135 	spin_lock_irq(&img_request->completion_lock);
2136 	if (which != img_request->next_completion)
2137 		goto out;
2138 
2139 	for_each_obj_request_from(img_request, obj_request) {
2140 		rbd_assert(more);
2141 		rbd_assert(which < img_request->obj_request_count);
2142 
2143 		if (!obj_request_done_test(obj_request))
2144 			break;
2145 		more = rbd_img_obj_end_request(obj_request);
2146 		which++;
2147 	}
2148 
2149 	rbd_assert(more ^ (which == img_request->obj_request_count));
2150 	img_request->next_completion = which;
2151 out:
2152 	spin_unlock_irq(&img_request->completion_lock);
2153 
2154 	if (!more)
2155 		rbd_img_request_complete(img_request);
2156 }
2157 
2158 /*
2159  * Split up an image request into one or more object requests, each
2160  * to a different object.  The "type" parameter indicates whether
2161  * "data_desc" is the pointer to the head of a list of bio
2162  * structures, or the base of a page array.  In either case this
2163  * function assumes data_desc describes memory sufficient to hold
2164  * all data described by the image request.
2165  */
2166 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2167 					enum obj_request_type type,
2168 					void *data_desc)
2169 {
2170 	struct rbd_device *rbd_dev = img_request->rbd_dev;
2171 	struct rbd_obj_request *obj_request = NULL;
2172 	struct rbd_obj_request *next_obj_request;
2173 	bool write_request = img_request_write_test(img_request);
2174 	struct bio *bio_list;
2175 	unsigned int bio_offset = 0;
2176 	struct page **pages;
2177 	u64 img_offset;
2178 	u64 resid;
2179 	u16 opcode;
2180 
2181 	dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2182 		(int)type, data_desc);
2183 
2184 	opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2185 	img_offset = img_request->offset;
2186 	resid = img_request->length;
2187 	rbd_assert(resid > 0);
2188 
2189 	if (type == OBJ_REQUEST_BIO) {
2190 		bio_list = data_desc;
2191 		rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2192 	} else {
2193 		rbd_assert(type == OBJ_REQUEST_PAGES);
2194 		pages = data_desc;
2195 	}
2196 
2197 	while (resid) {
2198 		struct ceph_osd_request *osd_req;
2199 		const char *object_name;
2200 		u64 offset;
2201 		u64 length;
2202 
2203 		object_name = rbd_segment_name(rbd_dev, img_offset);
2204 		if (!object_name)
2205 			goto out_unwind;
2206 		offset = rbd_segment_offset(rbd_dev, img_offset);
2207 		length = rbd_segment_length(rbd_dev, img_offset, resid);
2208 		obj_request = rbd_obj_request_create(object_name,
2209 						offset, length, type);
2210 		/* object request has its own copy of the object name */
2211 		rbd_segment_name_free(object_name);
2212 		if (!obj_request)
2213 			goto out_unwind;
2214 
2215 		if (type == OBJ_REQUEST_BIO) {
2216 			unsigned int clone_size;
2217 
2218 			rbd_assert(length <= (u64)UINT_MAX);
2219 			clone_size = (unsigned int)length;
2220 			obj_request->bio_list =
2221 					bio_chain_clone_range(&bio_list,
2222 								&bio_offset,
2223 								clone_size,
2224 								GFP_ATOMIC);
2225 			if (!obj_request->bio_list)
2226 				goto out_partial;
2227 		} else {
2228 			unsigned int page_count;
2229 
2230 			obj_request->pages = pages;
2231 			page_count = (u32)calc_pages_for(offset, length);
2232 			obj_request->page_count = page_count;
2233 			if ((offset + length) & ~PAGE_MASK)
2234 				page_count--;	/* more on last page */
2235 			pages += page_count;
2236 		}
2237 
2238 		osd_req = rbd_osd_req_create(rbd_dev, write_request,
2239 						obj_request);
2240 		if (!osd_req)
2241 			goto out_partial;
2242 		obj_request->osd_req = osd_req;
2243 		obj_request->callback = rbd_img_obj_callback;
2244 
2245 		osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2246 						0, 0);
2247 		if (type == OBJ_REQUEST_BIO)
2248 			osd_req_op_extent_osd_data_bio(osd_req, 0,
2249 					obj_request->bio_list, length);
2250 		else
2251 			osd_req_op_extent_osd_data_pages(osd_req, 0,
2252 					obj_request->pages, length,
2253 					offset & ~PAGE_MASK, false, false);
2254 
2255 		/*
2256 		 * set obj_request->img_request before formatting
2257 		 * the osd_request so that it gets the right snapc
2258 		 */
2259 		rbd_img_obj_request_add(img_request, obj_request);
2260 		if (write_request)
2261 			rbd_osd_req_format_write(obj_request);
2262 		else
2263 			rbd_osd_req_format_read(obj_request);
2264 
2265 		obj_request->img_offset = img_offset;
2266 
2267 		img_offset += length;
2268 		resid -= length;
2269 	}
2270 
2271 	return 0;
2272 
2273 out_partial:
2274 	rbd_obj_request_put(obj_request);
2275 out_unwind:
2276 	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2277 		rbd_obj_request_put(obj_request);
2278 
2279 	return -ENOMEM;
2280 }
2281 
2282 static void
2283 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2284 {
2285 	struct rbd_img_request *img_request;
2286 	struct rbd_device *rbd_dev;
2287 	struct page **pages;
2288 	u32 page_count;
2289 
2290 	rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2291 	rbd_assert(obj_request_img_data_test(obj_request));
2292 	img_request = obj_request->img_request;
2293 	rbd_assert(img_request);
2294 
2295 	rbd_dev = img_request->rbd_dev;
2296 	rbd_assert(rbd_dev);
2297 
2298 	pages = obj_request->copyup_pages;
2299 	rbd_assert(pages != NULL);
2300 	obj_request->copyup_pages = NULL;
2301 	page_count = obj_request->copyup_page_count;
2302 	rbd_assert(page_count);
2303 	obj_request->copyup_page_count = 0;
2304 	ceph_release_page_vector(pages, page_count);
2305 
2306 	/*
2307 	 * We want the transfer count to reflect the size of the
2308 	 * original write request.  There is no such thing as a
2309 	 * successful short write, so if the request was successful
2310 	 * we can just set it to the originally-requested length.
2311 	 */
2312 	if (!obj_request->result)
2313 		obj_request->xferred = obj_request->length;
2314 
2315 	/* Finish up with the normal image object callback */
2316 
2317 	rbd_img_obj_callback(obj_request);
2318 }
2319 
2320 static void
2321 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2322 {
2323 	struct rbd_obj_request *orig_request;
2324 	struct ceph_osd_request *osd_req;
2325 	struct ceph_osd_client *osdc;
2326 	struct rbd_device *rbd_dev;
2327 	struct page **pages;
2328 	u32 page_count;
2329 	int img_result;
2330 	u64 parent_length;
2331 	u64 offset;
2332 	u64 length;
2333 
2334 	rbd_assert(img_request_child_test(img_request));
2335 
2336 	/* First get what we need from the image request */
2337 
2338 	pages = img_request->copyup_pages;
2339 	rbd_assert(pages != NULL);
2340 	img_request->copyup_pages = NULL;
2341 	page_count = img_request->copyup_page_count;
2342 	rbd_assert(page_count);
2343 	img_request->copyup_page_count = 0;
2344 
2345 	orig_request = img_request->obj_request;
2346 	rbd_assert(orig_request != NULL);
2347 	rbd_assert(obj_request_type_valid(orig_request->type));
2348 	img_result = img_request->result;
2349 	parent_length = img_request->length;
2350 	rbd_assert(parent_length == img_request->xferred);
2351 	rbd_img_request_put(img_request);
2352 
2353 	rbd_assert(orig_request->img_request);
2354 	rbd_dev = orig_request->img_request->rbd_dev;
2355 	rbd_assert(rbd_dev);
2356 
2357 	/*
2358 	 * If the overlap has become 0 (most likely because the
2359 	 * image has been flattened) we need to free the pages
2360 	 * and re-submit the original write request.
2361 	 */
2362 	if (!rbd_dev->parent_overlap) {
2363 		struct ceph_osd_client *osdc;
2364 
2365 		ceph_release_page_vector(pages, page_count);
2366 		osdc = &rbd_dev->rbd_client->client->osdc;
2367 		img_result = rbd_obj_request_submit(osdc, orig_request);
2368 		if (!img_result)
2369 			return;
2370 	}
2371 
2372 	if (img_result)
2373 		goto out_err;
2374 
2375 	/*
2376 	 * The original osd request is of no use to use any more.
2377 	 * We need a new one that can hold the two ops in a copyup
2378 	 * request.  Allocate the new copyup osd request for the
2379 	 * original request, and release the old one.
2380 	 */
2381 	img_result = -ENOMEM;
2382 	osd_req = rbd_osd_req_create_copyup(orig_request);
2383 	if (!osd_req)
2384 		goto out_err;
2385 	rbd_osd_req_destroy(orig_request->osd_req);
2386 	orig_request->osd_req = osd_req;
2387 	orig_request->copyup_pages = pages;
2388 	orig_request->copyup_page_count = page_count;
2389 
2390 	/* Initialize the copyup op */
2391 
2392 	osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2393 	osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2394 						false, false);
2395 
2396 	/* Then the original write request op */
2397 
2398 	offset = orig_request->offset;
2399 	length = orig_request->length;
2400 	osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2401 					offset, length, 0, 0);
2402 	if (orig_request->type == OBJ_REQUEST_BIO)
2403 		osd_req_op_extent_osd_data_bio(osd_req, 1,
2404 					orig_request->bio_list, length);
2405 	else
2406 		osd_req_op_extent_osd_data_pages(osd_req, 1,
2407 					orig_request->pages, length,
2408 					offset & ~PAGE_MASK, false, false);
2409 
2410 	rbd_osd_req_format_write(orig_request);
2411 
2412 	/* All set, send it off. */
2413 
2414 	orig_request->callback = rbd_img_obj_copyup_callback;
2415 	osdc = &rbd_dev->rbd_client->client->osdc;
2416 	img_result = rbd_obj_request_submit(osdc, orig_request);
2417 	if (!img_result)
2418 		return;
2419 out_err:
2420 	/* Record the error code and complete the request */
2421 
2422 	orig_request->result = img_result;
2423 	orig_request->xferred = 0;
2424 	obj_request_done_set(orig_request);
2425 	rbd_obj_request_complete(orig_request);
2426 }
2427 
2428 /*
2429  * Read from the parent image the range of data that covers the
2430  * entire target of the given object request.  This is used for
2431  * satisfying a layered image write request when the target of an
2432  * object request from the image request does not exist.
2433  *
2434  * A page array big enough to hold the returned data is allocated
2435  * and supplied to rbd_img_request_fill() as the "data descriptor."
2436  * When the read completes, this page array will be transferred to
2437  * the original object request for the copyup operation.
2438  *
2439  * If an error occurs, record it as the result of the original
2440  * object request and mark it done so it gets completed.
2441  */
2442 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2443 {
2444 	struct rbd_img_request *img_request = NULL;
2445 	struct rbd_img_request *parent_request = NULL;
2446 	struct rbd_device *rbd_dev;
2447 	u64 img_offset;
2448 	u64 length;
2449 	struct page **pages = NULL;
2450 	u32 page_count;
2451 	int result;
2452 
2453 	rbd_assert(obj_request_img_data_test(obj_request));
2454 	rbd_assert(obj_request_type_valid(obj_request->type));
2455 
2456 	img_request = obj_request->img_request;
2457 	rbd_assert(img_request != NULL);
2458 	rbd_dev = img_request->rbd_dev;
2459 	rbd_assert(rbd_dev->parent != NULL);
2460 
2461 	/*
2462 	 * Determine the byte range covered by the object in the
2463 	 * child image to which the original request was to be sent.
2464 	 */
2465 	img_offset = obj_request->img_offset - obj_request->offset;
2466 	length = (u64)1 << rbd_dev->header.obj_order;
2467 
2468 	/*
2469 	 * There is no defined parent data beyond the parent
2470 	 * overlap, so limit what we read at that boundary if
2471 	 * necessary.
2472 	 */
2473 	if (img_offset + length > rbd_dev->parent_overlap) {
2474 		rbd_assert(img_offset < rbd_dev->parent_overlap);
2475 		length = rbd_dev->parent_overlap - img_offset;
2476 	}
2477 
2478 	/*
2479 	 * Allocate a page array big enough to receive the data read
2480 	 * from the parent.
2481 	 */
2482 	page_count = (u32)calc_pages_for(0, length);
2483 	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2484 	if (IS_ERR(pages)) {
2485 		result = PTR_ERR(pages);
2486 		pages = NULL;
2487 		goto out_err;
2488 	}
2489 
2490 	result = -ENOMEM;
2491 	parent_request = rbd_parent_request_create(obj_request,
2492 						img_offset, length);
2493 	if (!parent_request)
2494 		goto out_err;
2495 
2496 	result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2497 	if (result)
2498 		goto out_err;
2499 	parent_request->copyup_pages = pages;
2500 	parent_request->copyup_page_count = page_count;
2501 
2502 	parent_request->callback = rbd_img_obj_parent_read_full_callback;
2503 	result = rbd_img_request_submit(parent_request);
2504 	if (!result)
2505 		return 0;
2506 
2507 	parent_request->copyup_pages = NULL;
2508 	parent_request->copyup_page_count = 0;
2509 	parent_request->obj_request = NULL;
2510 	rbd_obj_request_put(obj_request);
2511 out_err:
2512 	if (pages)
2513 		ceph_release_page_vector(pages, page_count);
2514 	if (parent_request)
2515 		rbd_img_request_put(parent_request);
2516 	obj_request->result = result;
2517 	obj_request->xferred = 0;
2518 	obj_request_done_set(obj_request);
2519 
2520 	return result;
2521 }
2522 
2523 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2524 {
2525 	struct rbd_obj_request *orig_request;
2526 	struct rbd_device *rbd_dev;
2527 	int result;
2528 
2529 	rbd_assert(!obj_request_img_data_test(obj_request));
2530 
2531 	/*
2532 	 * All we need from the object request is the original
2533 	 * request and the result of the STAT op.  Grab those, then
2534 	 * we're done with the request.
2535 	 */
2536 	orig_request = obj_request->obj_request;
2537 	obj_request->obj_request = NULL;
2538 	rbd_assert(orig_request);
2539 	rbd_assert(orig_request->img_request);
2540 
2541 	result = obj_request->result;
2542 	obj_request->result = 0;
2543 
2544 	dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2545 		obj_request, orig_request, result,
2546 		obj_request->xferred, obj_request->length);
2547 	rbd_obj_request_put(obj_request);
2548 
2549 	/*
2550 	 * If the overlap has become 0 (most likely because the
2551 	 * image has been flattened) we need to free the pages
2552 	 * and re-submit the original write request.
2553 	 */
2554 	rbd_dev = orig_request->img_request->rbd_dev;
2555 	if (!rbd_dev->parent_overlap) {
2556 		struct ceph_osd_client *osdc;
2557 
2558 		rbd_obj_request_put(orig_request);
2559 		osdc = &rbd_dev->rbd_client->client->osdc;
2560 		result = rbd_obj_request_submit(osdc, orig_request);
2561 		if (!result)
2562 			return;
2563 	}
2564 
2565 	/*
2566 	 * Our only purpose here is to determine whether the object
2567 	 * exists, and we don't want to treat the non-existence as
2568 	 * an error.  If something else comes back, transfer the
2569 	 * error to the original request and complete it now.
2570 	 */
2571 	if (!result) {
2572 		obj_request_existence_set(orig_request, true);
2573 	} else if (result == -ENOENT) {
2574 		obj_request_existence_set(orig_request, false);
2575 	} else if (result) {
2576 		orig_request->result = result;
2577 		goto out;
2578 	}
2579 
2580 	/*
2581 	 * Resubmit the original request now that we have recorded
2582 	 * whether the target object exists.
2583 	 */
2584 	orig_request->result = rbd_img_obj_request_submit(orig_request);
2585 out:
2586 	if (orig_request->result)
2587 		rbd_obj_request_complete(orig_request);
2588 	rbd_obj_request_put(orig_request);
2589 }
2590 
2591 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2592 {
2593 	struct rbd_obj_request *stat_request;
2594 	struct rbd_device *rbd_dev;
2595 	struct ceph_osd_client *osdc;
2596 	struct page **pages = NULL;
2597 	u32 page_count;
2598 	size_t size;
2599 	int ret;
2600 
2601 	/*
2602 	 * The response data for a STAT call consists of:
2603 	 *     le64 length;
2604 	 *     struct {
2605 	 *         le32 tv_sec;
2606 	 *         le32 tv_nsec;
2607 	 *     } mtime;
2608 	 */
2609 	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2610 	page_count = (u32)calc_pages_for(0, size);
2611 	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2612 	if (IS_ERR(pages))
2613 		return PTR_ERR(pages);
2614 
2615 	ret = -ENOMEM;
2616 	stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2617 							OBJ_REQUEST_PAGES);
2618 	if (!stat_request)
2619 		goto out;
2620 
2621 	rbd_obj_request_get(obj_request);
2622 	stat_request->obj_request = obj_request;
2623 	stat_request->pages = pages;
2624 	stat_request->page_count = page_count;
2625 
2626 	rbd_assert(obj_request->img_request);
2627 	rbd_dev = obj_request->img_request->rbd_dev;
2628 	stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2629 						stat_request);
2630 	if (!stat_request->osd_req)
2631 		goto out;
2632 	stat_request->callback = rbd_img_obj_exists_callback;
2633 
2634 	osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2635 	osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2636 					false, false);
2637 	rbd_osd_req_format_read(stat_request);
2638 
2639 	osdc = &rbd_dev->rbd_client->client->osdc;
2640 	ret = rbd_obj_request_submit(osdc, stat_request);
2641 out:
2642 	if (ret)
2643 		rbd_obj_request_put(obj_request);
2644 
2645 	return ret;
2646 }
2647 
2648 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2649 {
2650 	struct rbd_img_request *img_request;
2651 	struct rbd_device *rbd_dev;
2652 	bool known;
2653 
2654 	rbd_assert(obj_request_img_data_test(obj_request));
2655 
2656 	img_request = obj_request->img_request;
2657 	rbd_assert(img_request);
2658 	rbd_dev = img_request->rbd_dev;
2659 
2660 	/*
2661 	 * Only writes to layered images need special handling.
2662 	 * Reads and non-layered writes are simple object requests.
2663 	 * Layered writes that start beyond the end of the overlap
2664 	 * with the parent have no parent data, so they too are
2665 	 * simple object requests.  Finally, if the target object is
2666 	 * known to already exist, its parent data has already been
2667 	 * copied, so a write to the object can also be handled as a
2668 	 * simple object request.
2669 	 */
2670 	if (!img_request_write_test(img_request) ||
2671 		!img_request_layered_test(img_request) ||
2672 		rbd_dev->parent_overlap <= obj_request->img_offset ||
2673 		((known = obj_request_known_test(obj_request)) &&
2674 			obj_request_exists_test(obj_request))) {
2675 
2676 		struct rbd_device *rbd_dev;
2677 		struct ceph_osd_client *osdc;
2678 
2679 		rbd_dev = obj_request->img_request->rbd_dev;
2680 		osdc = &rbd_dev->rbd_client->client->osdc;
2681 
2682 		return rbd_obj_request_submit(osdc, obj_request);
2683 	}
2684 
2685 	/*
2686 	 * It's a layered write.  The target object might exist but
2687 	 * we may not know that yet.  If we know it doesn't exist,
2688 	 * start by reading the data for the full target object from
2689 	 * the parent so we can use it for a copyup to the target.
2690 	 */
2691 	if (known)
2692 		return rbd_img_obj_parent_read_full(obj_request);
2693 
2694 	/* We don't know whether the target exists.  Go find out. */
2695 
2696 	return rbd_img_obj_exists_submit(obj_request);
2697 }
2698 
2699 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2700 {
2701 	struct rbd_obj_request *obj_request;
2702 	struct rbd_obj_request *next_obj_request;
2703 
2704 	dout("%s: img %p\n", __func__, img_request);
2705 	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2706 		int ret;
2707 
2708 		ret = rbd_img_obj_request_submit(obj_request);
2709 		if (ret)
2710 			return ret;
2711 	}
2712 
2713 	return 0;
2714 }
2715 
2716 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2717 {
2718 	struct rbd_obj_request *obj_request;
2719 	struct rbd_device *rbd_dev;
2720 	u64 obj_end;
2721 	u64 img_xferred;
2722 	int img_result;
2723 
2724 	rbd_assert(img_request_child_test(img_request));
2725 
2726 	/* First get what we need from the image request and release it */
2727 
2728 	obj_request = img_request->obj_request;
2729 	img_xferred = img_request->xferred;
2730 	img_result = img_request->result;
2731 	rbd_img_request_put(img_request);
2732 
2733 	/*
2734 	 * If the overlap has become 0 (most likely because the
2735 	 * image has been flattened) we need to re-submit the
2736 	 * original request.
2737 	 */
2738 	rbd_assert(obj_request);
2739 	rbd_assert(obj_request->img_request);
2740 	rbd_dev = obj_request->img_request->rbd_dev;
2741 	if (!rbd_dev->parent_overlap) {
2742 		struct ceph_osd_client *osdc;
2743 
2744 		osdc = &rbd_dev->rbd_client->client->osdc;
2745 		img_result = rbd_obj_request_submit(osdc, obj_request);
2746 		if (!img_result)
2747 			return;
2748 	}
2749 
2750 	obj_request->result = img_result;
2751 	if (obj_request->result)
2752 		goto out;
2753 
2754 	/*
2755 	 * We need to zero anything beyond the parent overlap
2756 	 * boundary.  Since rbd_img_obj_request_read_callback()
2757 	 * will zero anything beyond the end of a short read, an
2758 	 * easy way to do this is to pretend the data from the
2759 	 * parent came up short--ending at the overlap boundary.
2760 	 */
2761 	rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2762 	obj_end = obj_request->img_offset + obj_request->length;
2763 	if (obj_end > rbd_dev->parent_overlap) {
2764 		u64 xferred = 0;
2765 
2766 		if (obj_request->img_offset < rbd_dev->parent_overlap)
2767 			xferred = rbd_dev->parent_overlap -
2768 					obj_request->img_offset;
2769 
2770 		obj_request->xferred = min(img_xferred, xferred);
2771 	} else {
2772 		obj_request->xferred = img_xferred;
2773 	}
2774 out:
2775 	rbd_img_obj_request_read_callback(obj_request);
2776 	rbd_obj_request_complete(obj_request);
2777 }
2778 
2779 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2780 {
2781 	struct rbd_img_request *img_request;
2782 	int result;
2783 
2784 	rbd_assert(obj_request_img_data_test(obj_request));
2785 	rbd_assert(obj_request->img_request != NULL);
2786 	rbd_assert(obj_request->result == (s32) -ENOENT);
2787 	rbd_assert(obj_request_type_valid(obj_request->type));
2788 
2789 	/* rbd_read_finish(obj_request, obj_request->length); */
2790 	img_request = rbd_parent_request_create(obj_request,
2791 						obj_request->img_offset,
2792 						obj_request->length);
2793 	result = -ENOMEM;
2794 	if (!img_request)
2795 		goto out_err;
2796 
2797 	if (obj_request->type == OBJ_REQUEST_BIO)
2798 		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2799 						obj_request->bio_list);
2800 	else
2801 		result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2802 						obj_request->pages);
2803 	if (result)
2804 		goto out_err;
2805 
2806 	img_request->callback = rbd_img_parent_read_callback;
2807 	result = rbd_img_request_submit(img_request);
2808 	if (result)
2809 		goto out_err;
2810 
2811 	return;
2812 out_err:
2813 	if (img_request)
2814 		rbd_img_request_put(img_request);
2815 	obj_request->result = result;
2816 	obj_request->xferred = 0;
2817 	obj_request_done_set(obj_request);
2818 }
2819 
2820 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2821 {
2822 	struct rbd_obj_request *obj_request;
2823 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2824 	int ret;
2825 
2826 	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2827 							OBJ_REQUEST_NODATA);
2828 	if (!obj_request)
2829 		return -ENOMEM;
2830 
2831 	ret = -ENOMEM;
2832 	obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2833 	if (!obj_request->osd_req)
2834 		goto out;
2835 	obj_request->callback = rbd_obj_request_put;
2836 
2837 	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2838 					notify_id, 0, 0);
2839 	rbd_osd_req_format_read(obj_request);
2840 
2841 	ret = rbd_obj_request_submit(osdc, obj_request);
2842 out:
2843 	if (ret)
2844 		rbd_obj_request_put(obj_request);
2845 
2846 	return ret;
2847 }
2848 
2849 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2850 {
2851 	struct rbd_device *rbd_dev = (struct rbd_device *)data;
2852 	int ret;
2853 
2854 	if (!rbd_dev)
2855 		return;
2856 
2857 	dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2858 		rbd_dev->header_name, (unsigned long long)notify_id,
2859 		(unsigned int)opcode);
2860 	ret = rbd_dev_refresh(rbd_dev);
2861 	if (ret)
2862 		rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2863 
2864 	rbd_obj_notify_ack(rbd_dev, notify_id);
2865 }
2866 
2867 /*
2868  * Request sync osd watch/unwatch.  The value of "start" determines
2869  * whether a watch request is being initiated or torn down.
2870  */
2871 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2872 {
2873 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2874 	struct rbd_obj_request *obj_request;
2875 	int ret;
2876 
2877 	rbd_assert(start ^ !!rbd_dev->watch_event);
2878 	rbd_assert(start ^ !!rbd_dev->watch_request);
2879 
2880 	if (start) {
2881 		ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2882 						&rbd_dev->watch_event);
2883 		if (ret < 0)
2884 			return ret;
2885 		rbd_assert(rbd_dev->watch_event != NULL);
2886 	}
2887 
2888 	ret = -ENOMEM;
2889 	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2890 							OBJ_REQUEST_NODATA);
2891 	if (!obj_request)
2892 		goto out_cancel;
2893 
2894 	obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2895 	if (!obj_request->osd_req)
2896 		goto out_cancel;
2897 
2898 	if (start)
2899 		ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2900 	else
2901 		ceph_osdc_unregister_linger_request(osdc,
2902 					rbd_dev->watch_request->osd_req);
2903 
2904 	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2905 				rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2906 	rbd_osd_req_format_write(obj_request);
2907 
2908 	ret = rbd_obj_request_submit(osdc, obj_request);
2909 	if (ret)
2910 		goto out_cancel;
2911 	ret = rbd_obj_request_wait(obj_request);
2912 	if (ret)
2913 		goto out_cancel;
2914 	ret = obj_request->result;
2915 	if (ret)
2916 		goto out_cancel;
2917 
2918 	/*
2919 	 * A watch request is set to linger, so the underlying osd
2920 	 * request won't go away until we unregister it.  We retain
2921 	 * a pointer to the object request during that time (in
2922 	 * rbd_dev->watch_request), so we'll keep a reference to
2923 	 * it.  We'll drop that reference (below) after we've
2924 	 * unregistered it.
2925 	 */
2926 	if (start) {
2927 		rbd_dev->watch_request = obj_request;
2928 
2929 		return 0;
2930 	}
2931 
2932 	/* We have successfully torn down the watch request */
2933 
2934 	rbd_obj_request_put(rbd_dev->watch_request);
2935 	rbd_dev->watch_request = NULL;
2936 out_cancel:
2937 	/* Cancel the event if we're tearing down, or on error */
2938 	ceph_osdc_cancel_event(rbd_dev->watch_event);
2939 	rbd_dev->watch_event = NULL;
2940 	if (obj_request)
2941 		rbd_obj_request_put(obj_request);
2942 
2943 	return ret;
2944 }
2945 
2946 /*
2947  * Synchronous osd object method call.  Returns the number of bytes
2948  * returned in the outbound buffer, or a negative error code.
2949  */
2950 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2951 			     const char *object_name,
2952 			     const char *class_name,
2953 			     const char *method_name,
2954 			     const void *outbound,
2955 			     size_t outbound_size,
2956 			     void *inbound,
2957 			     size_t inbound_size)
2958 {
2959 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2960 	struct rbd_obj_request *obj_request;
2961 	struct page **pages;
2962 	u32 page_count;
2963 	int ret;
2964 
2965 	/*
2966 	 * Method calls are ultimately read operations.  The result
2967 	 * should placed into the inbound buffer provided.  They
2968 	 * also supply outbound data--parameters for the object
2969 	 * method.  Currently if this is present it will be a
2970 	 * snapshot id.
2971 	 */
2972 	page_count = (u32)calc_pages_for(0, inbound_size);
2973 	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2974 	if (IS_ERR(pages))
2975 		return PTR_ERR(pages);
2976 
2977 	ret = -ENOMEM;
2978 	obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2979 							OBJ_REQUEST_PAGES);
2980 	if (!obj_request)
2981 		goto out;
2982 
2983 	obj_request->pages = pages;
2984 	obj_request->page_count = page_count;
2985 
2986 	obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2987 	if (!obj_request->osd_req)
2988 		goto out;
2989 
2990 	osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2991 					class_name, method_name);
2992 	if (outbound_size) {
2993 		struct ceph_pagelist *pagelist;
2994 
2995 		pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2996 		if (!pagelist)
2997 			goto out;
2998 
2999 		ceph_pagelist_init(pagelist);
3000 		ceph_pagelist_append(pagelist, outbound, outbound_size);
3001 		osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3002 						pagelist);
3003 	}
3004 	osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3005 					obj_request->pages, inbound_size,
3006 					0, false, false);
3007 	rbd_osd_req_format_read(obj_request);
3008 
3009 	ret = rbd_obj_request_submit(osdc, obj_request);
3010 	if (ret)
3011 		goto out;
3012 	ret = rbd_obj_request_wait(obj_request);
3013 	if (ret)
3014 		goto out;
3015 
3016 	ret = obj_request->result;
3017 	if (ret < 0)
3018 		goto out;
3019 
3020 	rbd_assert(obj_request->xferred < (u64)INT_MAX);
3021 	ret = (int)obj_request->xferred;
3022 	ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3023 out:
3024 	if (obj_request)
3025 		rbd_obj_request_put(obj_request);
3026 	else
3027 		ceph_release_page_vector(pages, page_count);
3028 
3029 	return ret;
3030 }
3031 
3032 static void rbd_request_fn(struct request_queue *q)
3033 		__releases(q->queue_lock) __acquires(q->queue_lock)
3034 {
3035 	struct rbd_device *rbd_dev = q->queuedata;
3036 	bool read_only = rbd_dev->mapping.read_only;
3037 	struct request *rq;
3038 	int result;
3039 
3040 	while ((rq = blk_fetch_request(q))) {
3041 		bool write_request = rq_data_dir(rq) == WRITE;
3042 		struct rbd_img_request *img_request;
3043 		u64 offset;
3044 		u64 length;
3045 
3046 		/* Ignore any non-FS requests that filter through. */
3047 
3048 		if (rq->cmd_type != REQ_TYPE_FS) {
3049 			dout("%s: non-fs request type %d\n", __func__,
3050 				(int) rq->cmd_type);
3051 			__blk_end_request_all(rq, 0);
3052 			continue;
3053 		}
3054 
3055 		/* Ignore/skip any zero-length requests */
3056 
3057 		offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3058 		length = (u64) blk_rq_bytes(rq);
3059 
3060 		if (!length) {
3061 			dout("%s: zero-length request\n", __func__);
3062 			__blk_end_request_all(rq, 0);
3063 			continue;
3064 		}
3065 
3066 		spin_unlock_irq(q->queue_lock);
3067 
3068 		/* Disallow writes to a read-only device */
3069 
3070 		if (write_request) {
3071 			result = -EROFS;
3072 			if (read_only)
3073 				goto end_request;
3074 			rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3075 		}
3076 
3077 		/*
3078 		 * Quit early if the mapped snapshot no longer
3079 		 * exists.  It's still possible the snapshot will
3080 		 * have disappeared by the time our request arrives
3081 		 * at the osd, but there's no sense in sending it if
3082 		 * we already know.
3083 		 */
3084 		if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3085 			dout("request for non-existent snapshot");
3086 			rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3087 			result = -ENXIO;
3088 			goto end_request;
3089 		}
3090 
3091 		result = -EINVAL;
3092 		if (offset && length > U64_MAX - offset + 1) {
3093 			rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3094 				offset, length);
3095 			goto end_request;	/* Shouldn't happen */
3096 		}
3097 
3098 		result = -EIO;
3099 		if (offset + length > rbd_dev->mapping.size) {
3100 			rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3101 				offset, length, rbd_dev->mapping.size);
3102 			goto end_request;
3103 		}
3104 
3105 		result = -ENOMEM;
3106 		img_request = rbd_img_request_create(rbd_dev, offset, length,
3107 							write_request);
3108 		if (!img_request)
3109 			goto end_request;
3110 
3111 		img_request->rq = rq;
3112 
3113 		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3114 						rq->bio);
3115 		if (!result)
3116 			result = rbd_img_request_submit(img_request);
3117 		if (result)
3118 			rbd_img_request_put(img_request);
3119 end_request:
3120 		spin_lock_irq(q->queue_lock);
3121 		if (result < 0) {
3122 			rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3123 				write_request ? "write" : "read",
3124 				length, offset, result);
3125 
3126 			__blk_end_request_all(rq, result);
3127 		}
3128 	}
3129 }
3130 
3131 /*
3132  * a queue callback. Makes sure that we don't create a bio that spans across
3133  * multiple osd objects. One exception would be with a single page bios,
3134  * which we handle later at bio_chain_clone_range()
3135  */
3136 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3137 			  struct bio_vec *bvec)
3138 {
3139 	struct rbd_device *rbd_dev = q->queuedata;
3140 	sector_t sector_offset;
3141 	sector_t sectors_per_obj;
3142 	sector_t obj_sector_offset;
3143 	int ret;
3144 
3145 	/*
3146 	 * Find how far into its rbd object the partition-relative
3147 	 * bio start sector is to offset relative to the enclosing
3148 	 * device.
3149 	 */
3150 	sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3151 	sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3152 	obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3153 
3154 	/*
3155 	 * Compute the number of bytes from that offset to the end
3156 	 * of the object.  Account for what's already used by the bio.
3157 	 */
3158 	ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3159 	if (ret > bmd->bi_size)
3160 		ret -= bmd->bi_size;
3161 	else
3162 		ret = 0;
3163 
3164 	/*
3165 	 * Don't send back more than was asked for.  And if the bio
3166 	 * was empty, let the whole thing through because:  "Note
3167 	 * that a block device *must* allow a single page to be
3168 	 * added to an empty bio."
3169 	 */
3170 	rbd_assert(bvec->bv_len <= PAGE_SIZE);
3171 	if (ret > (int) bvec->bv_len || !bmd->bi_size)
3172 		ret = (int) bvec->bv_len;
3173 
3174 	return ret;
3175 }
3176 
3177 static void rbd_free_disk(struct rbd_device *rbd_dev)
3178 {
3179 	struct gendisk *disk = rbd_dev->disk;
3180 
3181 	if (!disk)
3182 		return;
3183 
3184 	rbd_dev->disk = NULL;
3185 	if (disk->flags & GENHD_FL_UP) {
3186 		del_gendisk(disk);
3187 		if (disk->queue)
3188 			blk_cleanup_queue(disk->queue);
3189 	}
3190 	put_disk(disk);
3191 }
3192 
3193 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3194 				const char *object_name,
3195 				u64 offset, u64 length, void *buf)
3196 
3197 {
3198 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3199 	struct rbd_obj_request *obj_request;
3200 	struct page **pages = NULL;
3201 	u32 page_count;
3202 	size_t size;
3203 	int ret;
3204 
3205 	page_count = (u32) calc_pages_for(offset, length);
3206 	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3207 	if (IS_ERR(pages))
3208 		ret = PTR_ERR(pages);
3209 
3210 	ret = -ENOMEM;
3211 	obj_request = rbd_obj_request_create(object_name, offset, length,
3212 							OBJ_REQUEST_PAGES);
3213 	if (!obj_request)
3214 		goto out;
3215 
3216 	obj_request->pages = pages;
3217 	obj_request->page_count = page_count;
3218 
3219 	obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3220 	if (!obj_request->osd_req)
3221 		goto out;
3222 
3223 	osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3224 					offset, length, 0, 0);
3225 	osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3226 					obj_request->pages,
3227 					obj_request->length,
3228 					obj_request->offset & ~PAGE_MASK,
3229 					false, false);
3230 	rbd_osd_req_format_read(obj_request);
3231 
3232 	ret = rbd_obj_request_submit(osdc, obj_request);
3233 	if (ret)
3234 		goto out;
3235 	ret = rbd_obj_request_wait(obj_request);
3236 	if (ret)
3237 		goto out;
3238 
3239 	ret = obj_request->result;
3240 	if (ret < 0)
3241 		goto out;
3242 
3243 	rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3244 	size = (size_t) obj_request->xferred;
3245 	ceph_copy_from_page_vector(pages, buf, 0, size);
3246 	rbd_assert(size <= (size_t)INT_MAX);
3247 	ret = (int)size;
3248 out:
3249 	if (obj_request)
3250 		rbd_obj_request_put(obj_request);
3251 	else
3252 		ceph_release_page_vector(pages, page_count);
3253 
3254 	return ret;
3255 }
3256 
3257 /*
3258  * Read the complete header for the given rbd device.  On successful
3259  * return, the rbd_dev->header field will contain up-to-date
3260  * information about the image.
3261  */
3262 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3263 {
3264 	struct rbd_image_header_ondisk *ondisk = NULL;
3265 	u32 snap_count = 0;
3266 	u64 names_size = 0;
3267 	u32 want_count;
3268 	int ret;
3269 
3270 	/*
3271 	 * The complete header will include an array of its 64-bit
3272 	 * snapshot ids, followed by the names of those snapshots as
3273 	 * a contiguous block of NUL-terminated strings.  Note that
3274 	 * the number of snapshots could change by the time we read
3275 	 * it in, in which case we re-read it.
3276 	 */
3277 	do {
3278 		size_t size;
3279 
3280 		kfree(ondisk);
3281 
3282 		size = sizeof (*ondisk);
3283 		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3284 		size += names_size;
3285 		ondisk = kmalloc(size, GFP_KERNEL);
3286 		if (!ondisk)
3287 			return -ENOMEM;
3288 
3289 		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3290 				       0, size, ondisk);
3291 		if (ret < 0)
3292 			goto out;
3293 		if ((size_t)ret < size) {
3294 			ret = -ENXIO;
3295 			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3296 				size, ret);
3297 			goto out;
3298 		}
3299 		if (!rbd_dev_ondisk_valid(ondisk)) {
3300 			ret = -ENXIO;
3301 			rbd_warn(rbd_dev, "invalid header");
3302 			goto out;
3303 		}
3304 
3305 		names_size = le64_to_cpu(ondisk->snap_names_len);
3306 		want_count = snap_count;
3307 		snap_count = le32_to_cpu(ondisk->snap_count);
3308 	} while (snap_count != want_count);
3309 
3310 	ret = rbd_header_from_disk(rbd_dev, ondisk);
3311 out:
3312 	kfree(ondisk);
3313 
3314 	return ret;
3315 }
3316 
3317 /*
3318  * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3319  * has disappeared from the (just updated) snapshot context.
3320  */
3321 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3322 {
3323 	u64 snap_id;
3324 
3325 	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3326 		return;
3327 
3328 	snap_id = rbd_dev->spec->snap_id;
3329 	if (snap_id == CEPH_NOSNAP)
3330 		return;
3331 
3332 	if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3333 		clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3334 }
3335 
3336 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3337 {
3338 	u64 mapping_size;
3339 	int ret;
3340 
3341 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3342 	mapping_size = rbd_dev->mapping.size;
3343 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3344 	if (rbd_dev->image_format == 1)
3345 		ret = rbd_dev_v1_header_info(rbd_dev);
3346 	else
3347 		ret = rbd_dev_v2_header_info(rbd_dev);
3348 
3349 	/* If it's a mapped snapshot, validate its EXISTS flag */
3350 
3351 	rbd_exists_validate(rbd_dev);
3352 	mutex_unlock(&ctl_mutex);
3353 	if (mapping_size != rbd_dev->mapping.size) {
3354 		sector_t size;
3355 
3356 		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3357 		dout("setting size to %llu sectors", (unsigned long long)size);
3358 		set_capacity(rbd_dev->disk, size);
3359 		revalidate_disk(rbd_dev->disk);
3360 	}
3361 
3362 	return ret;
3363 }
3364 
3365 static int rbd_init_disk(struct rbd_device *rbd_dev)
3366 {
3367 	struct gendisk *disk;
3368 	struct request_queue *q;
3369 	u64 segment_size;
3370 
3371 	/* create gendisk info */
3372 	disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3373 	if (!disk)
3374 		return -ENOMEM;
3375 
3376 	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3377 		 rbd_dev->dev_id);
3378 	disk->major = rbd_dev->major;
3379 	disk->first_minor = 0;
3380 	disk->fops = &rbd_bd_ops;
3381 	disk->private_data = rbd_dev;
3382 
3383 	q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3384 	if (!q)
3385 		goto out_disk;
3386 
3387 	/* We use the default size, but let's be explicit about it. */
3388 	blk_queue_physical_block_size(q, SECTOR_SIZE);
3389 
3390 	/* set io sizes to object size */
3391 	segment_size = rbd_obj_bytes(&rbd_dev->header);
3392 	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3393 	blk_queue_max_segment_size(q, segment_size);
3394 	blk_queue_io_min(q, segment_size);
3395 	blk_queue_io_opt(q, segment_size);
3396 
3397 	blk_queue_merge_bvec(q, rbd_merge_bvec);
3398 	disk->queue = q;
3399 
3400 	q->queuedata = rbd_dev;
3401 
3402 	rbd_dev->disk = disk;
3403 
3404 	return 0;
3405 out_disk:
3406 	put_disk(disk);
3407 
3408 	return -ENOMEM;
3409 }
3410 
3411 /*
3412   sysfs
3413 */
3414 
3415 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3416 {
3417 	return container_of(dev, struct rbd_device, dev);
3418 }
3419 
3420 static ssize_t rbd_size_show(struct device *dev,
3421 			     struct device_attribute *attr, char *buf)
3422 {
3423 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3424 
3425 	return sprintf(buf, "%llu\n",
3426 		(unsigned long long)rbd_dev->mapping.size);
3427 }
3428 
3429 /*
3430  * Note this shows the features for whatever's mapped, which is not
3431  * necessarily the base image.
3432  */
3433 static ssize_t rbd_features_show(struct device *dev,
3434 			     struct device_attribute *attr, char *buf)
3435 {
3436 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3437 
3438 	return sprintf(buf, "0x%016llx\n",
3439 			(unsigned long long)rbd_dev->mapping.features);
3440 }
3441 
3442 static ssize_t rbd_major_show(struct device *dev,
3443 			      struct device_attribute *attr, char *buf)
3444 {
3445 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3446 
3447 	if (rbd_dev->major)
3448 		return sprintf(buf, "%d\n", rbd_dev->major);
3449 
3450 	return sprintf(buf, "(none)\n");
3451 
3452 }
3453 
3454 static ssize_t rbd_client_id_show(struct device *dev,
3455 				  struct device_attribute *attr, char *buf)
3456 {
3457 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3458 
3459 	return sprintf(buf, "client%lld\n",
3460 			ceph_client_id(rbd_dev->rbd_client->client));
3461 }
3462 
3463 static ssize_t rbd_pool_show(struct device *dev,
3464 			     struct device_attribute *attr, char *buf)
3465 {
3466 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3467 
3468 	return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3469 }
3470 
3471 static ssize_t rbd_pool_id_show(struct device *dev,
3472 			     struct device_attribute *attr, char *buf)
3473 {
3474 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3475 
3476 	return sprintf(buf, "%llu\n",
3477 			(unsigned long long) rbd_dev->spec->pool_id);
3478 }
3479 
3480 static ssize_t rbd_name_show(struct device *dev,
3481 			     struct device_attribute *attr, char *buf)
3482 {
3483 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3484 
3485 	if (rbd_dev->spec->image_name)
3486 		return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3487 
3488 	return sprintf(buf, "(unknown)\n");
3489 }
3490 
3491 static ssize_t rbd_image_id_show(struct device *dev,
3492 			     struct device_attribute *attr, char *buf)
3493 {
3494 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3495 
3496 	return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3497 }
3498 
3499 /*
3500  * Shows the name of the currently-mapped snapshot (or
3501  * RBD_SNAP_HEAD_NAME for the base image).
3502  */
3503 static ssize_t rbd_snap_show(struct device *dev,
3504 			     struct device_attribute *attr,
3505 			     char *buf)
3506 {
3507 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3508 
3509 	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3510 }
3511 
3512 /*
3513  * For an rbd v2 image, shows the pool id, image id, and snapshot id
3514  * for the parent image.  If there is no parent, simply shows
3515  * "(no parent image)".
3516  */
3517 static ssize_t rbd_parent_show(struct device *dev,
3518 			     struct device_attribute *attr,
3519 			     char *buf)
3520 {
3521 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3522 	struct rbd_spec *spec = rbd_dev->parent_spec;
3523 	int count;
3524 	char *bufp = buf;
3525 
3526 	if (!spec)
3527 		return sprintf(buf, "(no parent image)\n");
3528 
3529 	count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3530 			(unsigned long long) spec->pool_id, spec->pool_name);
3531 	if (count < 0)
3532 		return count;
3533 	bufp += count;
3534 
3535 	count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3536 			spec->image_name ? spec->image_name : "(unknown)");
3537 	if (count < 0)
3538 		return count;
3539 	bufp += count;
3540 
3541 	count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3542 			(unsigned long long) spec->snap_id, spec->snap_name);
3543 	if (count < 0)
3544 		return count;
3545 	bufp += count;
3546 
3547 	count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3548 	if (count < 0)
3549 		return count;
3550 	bufp += count;
3551 
3552 	return (ssize_t) (bufp - buf);
3553 }
3554 
3555 static ssize_t rbd_image_refresh(struct device *dev,
3556 				 struct device_attribute *attr,
3557 				 const char *buf,
3558 				 size_t size)
3559 {
3560 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3561 	int ret;
3562 
3563 	ret = rbd_dev_refresh(rbd_dev);
3564 	if (ret)
3565 		rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3566 
3567 	return ret < 0 ? ret : size;
3568 }
3569 
3570 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3571 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3572 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3573 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3574 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3575 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3576 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3577 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3578 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3579 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3580 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3581 
3582 static struct attribute *rbd_attrs[] = {
3583 	&dev_attr_size.attr,
3584 	&dev_attr_features.attr,
3585 	&dev_attr_major.attr,
3586 	&dev_attr_client_id.attr,
3587 	&dev_attr_pool.attr,
3588 	&dev_attr_pool_id.attr,
3589 	&dev_attr_name.attr,
3590 	&dev_attr_image_id.attr,
3591 	&dev_attr_current_snap.attr,
3592 	&dev_attr_parent.attr,
3593 	&dev_attr_refresh.attr,
3594 	NULL
3595 };
3596 
3597 static struct attribute_group rbd_attr_group = {
3598 	.attrs = rbd_attrs,
3599 };
3600 
3601 static const struct attribute_group *rbd_attr_groups[] = {
3602 	&rbd_attr_group,
3603 	NULL
3604 };
3605 
3606 static void rbd_sysfs_dev_release(struct device *dev)
3607 {
3608 }
3609 
3610 static struct device_type rbd_device_type = {
3611 	.name		= "rbd",
3612 	.groups		= rbd_attr_groups,
3613 	.release	= rbd_sysfs_dev_release,
3614 };
3615 
3616 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3617 {
3618 	kref_get(&spec->kref);
3619 
3620 	return spec;
3621 }
3622 
3623 static void rbd_spec_free(struct kref *kref);
3624 static void rbd_spec_put(struct rbd_spec *spec)
3625 {
3626 	if (spec)
3627 		kref_put(&spec->kref, rbd_spec_free);
3628 }
3629 
3630 static struct rbd_spec *rbd_spec_alloc(void)
3631 {
3632 	struct rbd_spec *spec;
3633 
3634 	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3635 	if (!spec)
3636 		return NULL;
3637 	kref_init(&spec->kref);
3638 
3639 	return spec;
3640 }
3641 
3642 static void rbd_spec_free(struct kref *kref)
3643 {
3644 	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3645 
3646 	kfree(spec->pool_name);
3647 	kfree(spec->image_id);
3648 	kfree(spec->image_name);
3649 	kfree(spec->snap_name);
3650 	kfree(spec);
3651 }
3652 
3653 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3654 				struct rbd_spec *spec)
3655 {
3656 	struct rbd_device *rbd_dev;
3657 
3658 	rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3659 	if (!rbd_dev)
3660 		return NULL;
3661 
3662 	spin_lock_init(&rbd_dev->lock);
3663 	rbd_dev->flags = 0;
3664 	atomic_set(&rbd_dev->parent_ref, 0);
3665 	INIT_LIST_HEAD(&rbd_dev->node);
3666 	init_rwsem(&rbd_dev->header_rwsem);
3667 
3668 	rbd_dev->spec = spec;
3669 	rbd_dev->rbd_client = rbdc;
3670 
3671 	/* Initialize the layout used for all rbd requests */
3672 
3673 	rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3674 	rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3675 	rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3676 	rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3677 
3678 	return rbd_dev;
3679 }
3680 
3681 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3682 {
3683 	rbd_put_client(rbd_dev->rbd_client);
3684 	rbd_spec_put(rbd_dev->spec);
3685 	kfree(rbd_dev);
3686 }
3687 
3688 /*
3689  * Get the size and object order for an image snapshot, or if
3690  * snap_id is CEPH_NOSNAP, gets this information for the base
3691  * image.
3692  */
3693 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3694 				u8 *order, u64 *snap_size)
3695 {
3696 	__le64 snapid = cpu_to_le64(snap_id);
3697 	int ret;
3698 	struct {
3699 		u8 order;
3700 		__le64 size;
3701 	} __attribute__ ((packed)) size_buf = { 0 };
3702 
3703 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3704 				"rbd", "get_size",
3705 				&snapid, sizeof (snapid),
3706 				&size_buf, sizeof (size_buf));
3707 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3708 	if (ret < 0)
3709 		return ret;
3710 	if (ret < sizeof (size_buf))
3711 		return -ERANGE;
3712 
3713 	if (order)
3714 		*order = size_buf.order;
3715 	*snap_size = le64_to_cpu(size_buf.size);
3716 
3717 	dout("  snap_id 0x%016llx order = %u, snap_size = %llu\n",
3718 		(unsigned long long)snap_id, (unsigned int)*order,
3719 		(unsigned long long)*snap_size);
3720 
3721 	return 0;
3722 }
3723 
3724 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3725 {
3726 	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3727 					&rbd_dev->header.obj_order,
3728 					&rbd_dev->header.image_size);
3729 }
3730 
3731 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3732 {
3733 	void *reply_buf;
3734 	int ret;
3735 	void *p;
3736 
3737 	reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3738 	if (!reply_buf)
3739 		return -ENOMEM;
3740 
3741 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3742 				"rbd", "get_object_prefix", NULL, 0,
3743 				reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3744 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3745 	if (ret < 0)
3746 		goto out;
3747 
3748 	p = reply_buf;
3749 	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3750 						p + ret, NULL, GFP_NOIO);
3751 	ret = 0;
3752 
3753 	if (IS_ERR(rbd_dev->header.object_prefix)) {
3754 		ret = PTR_ERR(rbd_dev->header.object_prefix);
3755 		rbd_dev->header.object_prefix = NULL;
3756 	} else {
3757 		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
3758 	}
3759 out:
3760 	kfree(reply_buf);
3761 
3762 	return ret;
3763 }
3764 
3765 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3766 		u64 *snap_features)
3767 {
3768 	__le64 snapid = cpu_to_le64(snap_id);
3769 	struct {
3770 		__le64 features;
3771 		__le64 incompat;
3772 	} __attribute__ ((packed)) features_buf = { 0 };
3773 	u64 incompat;
3774 	int ret;
3775 
3776 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3777 				"rbd", "get_features",
3778 				&snapid, sizeof (snapid),
3779 				&features_buf, sizeof (features_buf));
3780 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3781 	if (ret < 0)
3782 		return ret;
3783 	if (ret < sizeof (features_buf))
3784 		return -ERANGE;
3785 
3786 	incompat = le64_to_cpu(features_buf.incompat);
3787 	if (incompat & ~RBD_FEATURES_SUPPORTED)
3788 		return -ENXIO;
3789 
3790 	*snap_features = le64_to_cpu(features_buf.features);
3791 
3792 	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3793 		(unsigned long long)snap_id,
3794 		(unsigned long long)*snap_features,
3795 		(unsigned long long)le64_to_cpu(features_buf.incompat));
3796 
3797 	return 0;
3798 }
3799 
3800 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3801 {
3802 	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3803 						&rbd_dev->header.features);
3804 }
3805 
3806 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3807 {
3808 	struct rbd_spec *parent_spec;
3809 	size_t size;
3810 	void *reply_buf = NULL;
3811 	__le64 snapid;
3812 	void *p;
3813 	void *end;
3814 	u64 pool_id;
3815 	char *image_id;
3816 	u64 overlap;
3817 	int ret;
3818 
3819 	parent_spec = rbd_spec_alloc();
3820 	if (!parent_spec)
3821 		return -ENOMEM;
3822 
3823 	size = sizeof (__le64) +				/* pool_id */
3824 		sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +	/* image_id */
3825 		sizeof (__le64) +				/* snap_id */
3826 		sizeof (__le64);				/* overlap */
3827 	reply_buf = kmalloc(size, GFP_KERNEL);
3828 	if (!reply_buf) {
3829 		ret = -ENOMEM;
3830 		goto out_err;
3831 	}
3832 
3833 	snapid = cpu_to_le64(CEPH_NOSNAP);
3834 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3835 				"rbd", "get_parent",
3836 				&snapid, sizeof (snapid),
3837 				reply_buf, size);
3838 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3839 	if (ret < 0)
3840 		goto out_err;
3841 
3842 	p = reply_buf;
3843 	end = reply_buf + ret;
3844 	ret = -ERANGE;
3845 	ceph_decode_64_safe(&p, end, pool_id, out_err);
3846 	if (pool_id == CEPH_NOPOOL) {
3847 		/*
3848 		 * Either the parent never existed, or we have
3849 		 * record of it but the image got flattened so it no
3850 		 * longer has a parent.  When the parent of a
3851 		 * layered image disappears we immediately set the
3852 		 * overlap to 0.  The effect of this is that all new
3853 		 * requests will be treated as if the image had no
3854 		 * parent.
3855 		 */
3856 		if (rbd_dev->parent_overlap) {
3857 			rbd_dev->parent_overlap = 0;
3858 			smp_mb();
3859 			rbd_dev_parent_put(rbd_dev);
3860 			pr_info("%s: clone image has been flattened\n",
3861 				rbd_dev->disk->disk_name);
3862 		}
3863 
3864 		goto out;	/* No parent?  No problem. */
3865 	}
3866 
3867 	/* The ceph file layout needs to fit pool id in 32 bits */
3868 
3869 	ret = -EIO;
3870 	if (pool_id > (u64)U32_MAX) {
3871 		rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3872 			(unsigned long long)pool_id, U32_MAX);
3873 		goto out_err;
3874 	}
3875 	parent_spec->pool_id = pool_id;
3876 
3877 	image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3878 	if (IS_ERR(image_id)) {
3879 		ret = PTR_ERR(image_id);
3880 		goto out_err;
3881 	}
3882 	parent_spec->image_id = image_id;
3883 	ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3884 	ceph_decode_64_safe(&p, end, overlap, out_err);
3885 
3886 	if (overlap) {
3887 		rbd_spec_put(rbd_dev->parent_spec);
3888 		rbd_dev->parent_spec = parent_spec;
3889 		parent_spec = NULL;	/* rbd_dev now owns this */
3890 		rbd_dev->parent_overlap = overlap;
3891 	} else {
3892 		rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3893 	}
3894 out:
3895 	ret = 0;
3896 out_err:
3897 	kfree(reply_buf);
3898 	rbd_spec_put(parent_spec);
3899 
3900 	return ret;
3901 }
3902 
3903 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3904 {
3905 	struct {
3906 		__le64 stripe_unit;
3907 		__le64 stripe_count;
3908 	} __attribute__ ((packed)) striping_info_buf = { 0 };
3909 	size_t size = sizeof (striping_info_buf);
3910 	void *p;
3911 	u64 obj_size;
3912 	u64 stripe_unit;
3913 	u64 stripe_count;
3914 	int ret;
3915 
3916 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3917 				"rbd", "get_stripe_unit_count", NULL, 0,
3918 				(char *)&striping_info_buf, size);
3919 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3920 	if (ret < 0)
3921 		return ret;
3922 	if (ret < size)
3923 		return -ERANGE;
3924 
3925 	/*
3926 	 * We don't actually support the "fancy striping" feature
3927 	 * (STRIPINGV2) yet, but if the striping sizes are the
3928 	 * defaults the behavior is the same as before.  So find
3929 	 * out, and only fail if the image has non-default values.
3930 	 */
3931 	ret = -EINVAL;
3932 	obj_size = (u64)1 << rbd_dev->header.obj_order;
3933 	p = &striping_info_buf;
3934 	stripe_unit = ceph_decode_64(&p);
3935 	if (stripe_unit != obj_size) {
3936 		rbd_warn(rbd_dev, "unsupported stripe unit "
3937 				"(got %llu want %llu)",
3938 				stripe_unit, obj_size);
3939 		return -EINVAL;
3940 	}
3941 	stripe_count = ceph_decode_64(&p);
3942 	if (stripe_count != 1) {
3943 		rbd_warn(rbd_dev, "unsupported stripe count "
3944 				"(got %llu want 1)", stripe_count);
3945 		return -EINVAL;
3946 	}
3947 	rbd_dev->header.stripe_unit = stripe_unit;
3948 	rbd_dev->header.stripe_count = stripe_count;
3949 
3950 	return 0;
3951 }
3952 
3953 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3954 {
3955 	size_t image_id_size;
3956 	char *image_id;
3957 	void *p;
3958 	void *end;
3959 	size_t size;
3960 	void *reply_buf = NULL;
3961 	size_t len = 0;
3962 	char *image_name = NULL;
3963 	int ret;
3964 
3965 	rbd_assert(!rbd_dev->spec->image_name);
3966 
3967 	len = strlen(rbd_dev->spec->image_id);
3968 	image_id_size = sizeof (__le32) + len;
3969 	image_id = kmalloc(image_id_size, GFP_KERNEL);
3970 	if (!image_id)
3971 		return NULL;
3972 
3973 	p = image_id;
3974 	end = image_id + image_id_size;
3975 	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3976 
3977 	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3978 	reply_buf = kmalloc(size, GFP_KERNEL);
3979 	if (!reply_buf)
3980 		goto out;
3981 
3982 	ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3983 				"rbd", "dir_get_name",
3984 				image_id, image_id_size,
3985 				reply_buf, size);
3986 	if (ret < 0)
3987 		goto out;
3988 	p = reply_buf;
3989 	end = reply_buf + ret;
3990 
3991 	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3992 	if (IS_ERR(image_name))
3993 		image_name = NULL;
3994 	else
3995 		dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3996 out:
3997 	kfree(reply_buf);
3998 	kfree(image_id);
3999 
4000 	return image_name;
4001 }
4002 
4003 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4004 {
4005 	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4006 	const char *snap_name;
4007 	u32 which = 0;
4008 
4009 	/* Skip over names until we find the one we are looking for */
4010 
4011 	snap_name = rbd_dev->header.snap_names;
4012 	while (which < snapc->num_snaps) {
4013 		if (!strcmp(name, snap_name))
4014 			return snapc->snaps[which];
4015 		snap_name += strlen(snap_name) + 1;
4016 		which++;
4017 	}
4018 	return CEPH_NOSNAP;
4019 }
4020 
4021 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4022 {
4023 	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4024 	u32 which;
4025 	bool found = false;
4026 	u64 snap_id;
4027 
4028 	for (which = 0; !found && which < snapc->num_snaps; which++) {
4029 		const char *snap_name;
4030 
4031 		snap_id = snapc->snaps[which];
4032 		snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4033 		if (IS_ERR(snap_name))
4034 			break;
4035 		found = !strcmp(name, snap_name);
4036 		kfree(snap_name);
4037 	}
4038 	return found ? snap_id : CEPH_NOSNAP;
4039 }
4040 
4041 /*
4042  * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4043  * no snapshot by that name is found, or if an error occurs.
4044  */
4045 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4046 {
4047 	if (rbd_dev->image_format == 1)
4048 		return rbd_v1_snap_id_by_name(rbd_dev, name);
4049 
4050 	return rbd_v2_snap_id_by_name(rbd_dev, name);
4051 }
4052 
4053 /*
4054  * When an rbd image has a parent image, it is identified by the
4055  * pool, image, and snapshot ids (not names).  This function fills
4056  * in the names for those ids.  (It's OK if we can't figure out the
4057  * name for an image id, but the pool and snapshot ids should always
4058  * exist and have names.)  All names in an rbd spec are dynamically
4059  * allocated.
4060  *
4061  * When an image being mapped (not a parent) is probed, we have the
4062  * pool name and pool id, image name and image id, and the snapshot
4063  * name.  The only thing we're missing is the snapshot id.
4064  */
4065 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4066 {
4067 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4068 	struct rbd_spec *spec = rbd_dev->spec;
4069 	const char *pool_name;
4070 	const char *image_name;
4071 	const char *snap_name;
4072 	int ret;
4073 
4074 	/*
4075 	 * An image being mapped will have the pool name (etc.), but
4076 	 * we need to look up the snapshot id.
4077 	 */
4078 	if (spec->pool_name) {
4079 		if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4080 			u64 snap_id;
4081 
4082 			snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4083 			if (snap_id == CEPH_NOSNAP)
4084 				return -ENOENT;
4085 			spec->snap_id = snap_id;
4086 		} else {
4087 			spec->snap_id = CEPH_NOSNAP;
4088 		}
4089 
4090 		return 0;
4091 	}
4092 
4093 	/* Get the pool name; we have to make our own copy of this */
4094 
4095 	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4096 	if (!pool_name) {
4097 		rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4098 		return -EIO;
4099 	}
4100 	pool_name = kstrdup(pool_name, GFP_KERNEL);
4101 	if (!pool_name)
4102 		return -ENOMEM;
4103 
4104 	/* Fetch the image name; tolerate failure here */
4105 
4106 	image_name = rbd_dev_image_name(rbd_dev);
4107 	if (!image_name)
4108 		rbd_warn(rbd_dev, "unable to get image name");
4109 
4110 	/* Look up the snapshot name, and make a copy */
4111 
4112 	snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4113 	if (!snap_name) {
4114 		ret = -ENOMEM;
4115 		goto out_err;
4116 	}
4117 
4118 	spec->pool_name = pool_name;
4119 	spec->image_name = image_name;
4120 	spec->snap_name = snap_name;
4121 
4122 	return 0;
4123 out_err:
4124 	kfree(image_name);
4125 	kfree(pool_name);
4126 
4127 	return ret;
4128 }
4129 
4130 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4131 {
4132 	size_t size;
4133 	int ret;
4134 	void *reply_buf;
4135 	void *p;
4136 	void *end;
4137 	u64 seq;
4138 	u32 snap_count;
4139 	struct ceph_snap_context *snapc;
4140 	u32 i;
4141 
4142 	/*
4143 	 * We'll need room for the seq value (maximum snapshot id),
4144 	 * snapshot count, and array of that many snapshot ids.
4145 	 * For now we have a fixed upper limit on the number we're
4146 	 * prepared to receive.
4147 	 */
4148 	size = sizeof (__le64) + sizeof (__le32) +
4149 			RBD_MAX_SNAP_COUNT * sizeof (__le64);
4150 	reply_buf = kzalloc(size, GFP_KERNEL);
4151 	if (!reply_buf)
4152 		return -ENOMEM;
4153 
4154 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4155 				"rbd", "get_snapcontext", NULL, 0,
4156 				reply_buf, size);
4157 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4158 	if (ret < 0)
4159 		goto out;
4160 
4161 	p = reply_buf;
4162 	end = reply_buf + ret;
4163 	ret = -ERANGE;
4164 	ceph_decode_64_safe(&p, end, seq, out);
4165 	ceph_decode_32_safe(&p, end, snap_count, out);
4166 
4167 	/*
4168 	 * Make sure the reported number of snapshot ids wouldn't go
4169 	 * beyond the end of our buffer.  But before checking that,
4170 	 * make sure the computed size of the snapshot context we
4171 	 * allocate is representable in a size_t.
4172 	 */
4173 	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4174 				 / sizeof (u64)) {
4175 		ret = -EINVAL;
4176 		goto out;
4177 	}
4178 	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4179 		goto out;
4180 	ret = 0;
4181 
4182 	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4183 	if (!snapc) {
4184 		ret = -ENOMEM;
4185 		goto out;
4186 	}
4187 	snapc->seq = seq;
4188 	for (i = 0; i < snap_count; i++)
4189 		snapc->snaps[i] = ceph_decode_64(&p);
4190 
4191 	ceph_put_snap_context(rbd_dev->header.snapc);
4192 	rbd_dev->header.snapc = snapc;
4193 
4194 	dout("  snap context seq = %llu, snap_count = %u\n",
4195 		(unsigned long long)seq, (unsigned int)snap_count);
4196 out:
4197 	kfree(reply_buf);
4198 
4199 	return ret;
4200 }
4201 
4202 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4203 					u64 snap_id)
4204 {
4205 	size_t size;
4206 	void *reply_buf;
4207 	__le64 snapid;
4208 	int ret;
4209 	void *p;
4210 	void *end;
4211 	char *snap_name;
4212 
4213 	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4214 	reply_buf = kmalloc(size, GFP_KERNEL);
4215 	if (!reply_buf)
4216 		return ERR_PTR(-ENOMEM);
4217 
4218 	snapid = cpu_to_le64(snap_id);
4219 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4220 				"rbd", "get_snapshot_name",
4221 				&snapid, sizeof (snapid),
4222 				reply_buf, size);
4223 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4224 	if (ret < 0) {
4225 		snap_name = ERR_PTR(ret);
4226 		goto out;
4227 	}
4228 
4229 	p = reply_buf;
4230 	end = reply_buf + ret;
4231 	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4232 	if (IS_ERR(snap_name))
4233 		goto out;
4234 
4235 	dout("  snap_id 0x%016llx snap_name = %s\n",
4236 		(unsigned long long)snap_id, snap_name);
4237 out:
4238 	kfree(reply_buf);
4239 
4240 	return snap_name;
4241 }
4242 
4243 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4244 {
4245 	bool first_time = rbd_dev->header.object_prefix == NULL;
4246 	int ret;
4247 
4248 	down_write(&rbd_dev->header_rwsem);
4249 
4250 	ret = rbd_dev_v2_image_size(rbd_dev);
4251 	if (ret)
4252 		goto out;
4253 
4254 	if (first_time) {
4255 		ret = rbd_dev_v2_header_onetime(rbd_dev);
4256 		if (ret)
4257 			goto out;
4258 	}
4259 
4260 	/*
4261 	 * If the image supports layering, get the parent info.  We
4262 	 * need to probe the first time regardless.  Thereafter we
4263 	 * only need to if there's a parent, to see if it has
4264 	 * disappeared due to the mapped image getting flattened.
4265 	 */
4266 	if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4267 			(first_time || rbd_dev->parent_spec)) {
4268 		bool warn;
4269 
4270 		ret = rbd_dev_v2_parent_info(rbd_dev);
4271 		if (ret)
4272 			goto out;
4273 
4274 		/*
4275 		 * Print a warning if this is the initial probe and
4276 		 * the image has a parent.  Don't print it if the
4277 		 * image now being probed is itself a parent.  We
4278 		 * can tell at this point because we won't know its
4279 		 * pool name yet (just its pool id).
4280 		 */
4281 		warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4282 		if (first_time && warn)
4283 			rbd_warn(rbd_dev, "WARNING: kernel layering "
4284 					"is EXPERIMENTAL!");
4285 	}
4286 
4287 	if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4288 		if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4289 			rbd_dev->mapping.size = rbd_dev->header.image_size;
4290 
4291 	ret = rbd_dev_v2_snap_context(rbd_dev);
4292 	dout("rbd_dev_v2_snap_context returned %d\n", ret);
4293 out:
4294 	up_write(&rbd_dev->header_rwsem);
4295 
4296 	return ret;
4297 }
4298 
4299 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4300 {
4301 	struct device *dev;
4302 	int ret;
4303 
4304 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4305 
4306 	dev = &rbd_dev->dev;
4307 	dev->bus = &rbd_bus_type;
4308 	dev->type = &rbd_device_type;
4309 	dev->parent = &rbd_root_dev;
4310 	dev->release = rbd_dev_device_release;
4311 	dev_set_name(dev, "%d", rbd_dev->dev_id);
4312 	ret = device_register(dev);
4313 
4314 	mutex_unlock(&ctl_mutex);
4315 
4316 	return ret;
4317 }
4318 
4319 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4320 {
4321 	device_unregister(&rbd_dev->dev);
4322 }
4323 
4324 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4325 
4326 /*
4327  * Get a unique rbd identifier for the given new rbd_dev, and add
4328  * the rbd_dev to the global list.  The minimum rbd id is 1.
4329  */
4330 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4331 {
4332 	rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4333 
4334 	spin_lock(&rbd_dev_list_lock);
4335 	list_add_tail(&rbd_dev->node, &rbd_dev_list);
4336 	spin_unlock(&rbd_dev_list_lock);
4337 	dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4338 		(unsigned long long) rbd_dev->dev_id);
4339 }
4340 
4341 /*
4342  * Remove an rbd_dev from the global list, and record that its
4343  * identifier is no longer in use.
4344  */
4345 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4346 {
4347 	struct list_head *tmp;
4348 	int rbd_id = rbd_dev->dev_id;
4349 	int max_id;
4350 
4351 	rbd_assert(rbd_id > 0);
4352 
4353 	dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4354 		(unsigned long long) rbd_dev->dev_id);
4355 	spin_lock(&rbd_dev_list_lock);
4356 	list_del_init(&rbd_dev->node);
4357 
4358 	/*
4359 	 * If the id being "put" is not the current maximum, there
4360 	 * is nothing special we need to do.
4361 	 */
4362 	if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4363 		spin_unlock(&rbd_dev_list_lock);
4364 		return;
4365 	}
4366 
4367 	/*
4368 	 * We need to update the current maximum id.  Search the
4369 	 * list to find out what it is.  We're more likely to find
4370 	 * the maximum at the end, so search the list backward.
4371 	 */
4372 	max_id = 0;
4373 	list_for_each_prev(tmp, &rbd_dev_list) {
4374 		struct rbd_device *rbd_dev;
4375 
4376 		rbd_dev = list_entry(tmp, struct rbd_device, node);
4377 		if (rbd_dev->dev_id > max_id)
4378 			max_id = rbd_dev->dev_id;
4379 	}
4380 	spin_unlock(&rbd_dev_list_lock);
4381 
4382 	/*
4383 	 * The max id could have been updated by rbd_dev_id_get(), in
4384 	 * which case it now accurately reflects the new maximum.
4385 	 * Be careful not to overwrite the maximum value in that
4386 	 * case.
4387 	 */
4388 	atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4389 	dout("  max dev id has been reset\n");
4390 }
4391 
4392 /*
4393  * Skips over white space at *buf, and updates *buf to point to the
4394  * first found non-space character (if any). Returns the length of
4395  * the token (string of non-white space characters) found.  Note
4396  * that *buf must be terminated with '\0'.
4397  */
4398 static inline size_t next_token(const char **buf)
4399 {
4400         /*
4401         * These are the characters that produce nonzero for
4402         * isspace() in the "C" and "POSIX" locales.
4403         */
4404         const char *spaces = " \f\n\r\t\v";
4405 
4406         *buf += strspn(*buf, spaces);	/* Find start of token */
4407 
4408 	return strcspn(*buf, spaces);   /* Return token length */
4409 }
4410 
4411 /*
4412  * Finds the next token in *buf, and if the provided token buffer is
4413  * big enough, copies the found token into it.  The result, if
4414  * copied, is guaranteed to be terminated with '\0'.  Note that *buf
4415  * must be terminated with '\0' on entry.
4416  *
4417  * Returns the length of the token found (not including the '\0').
4418  * Return value will be 0 if no token is found, and it will be >=
4419  * token_size if the token would not fit.
4420  *
4421  * The *buf pointer will be updated to point beyond the end of the
4422  * found token.  Note that this occurs even if the token buffer is
4423  * too small to hold it.
4424  */
4425 static inline size_t copy_token(const char **buf,
4426 				char *token,
4427 				size_t token_size)
4428 {
4429         size_t len;
4430 
4431 	len = next_token(buf);
4432 	if (len < token_size) {
4433 		memcpy(token, *buf, len);
4434 		*(token + len) = '\0';
4435 	}
4436 	*buf += len;
4437 
4438         return len;
4439 }
4440 
4441 /*
4442  * Finds the next token in *buf, dynamically allocates a buffer big
4443  * enough to hold a copy of it, and copies the token into the new
4444  * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
4445  * that a duplicate buffer is created even for a zero-length token.
4446  *
4447  * Returns a pointer to the newly-allocated duplicate, or a null
4448  * pointer if memory for the duplicate was not available.  If
4449  * the lenp argument is a non-null pointer, the length of the token
4450  * (not including the '\0') is returned in *lenp.
4451  *
4452  * If successful, the *buf pointer will be updated to point beyond
4453  * the end of the found token.
4454  *
4455  * Note: uses GFP_KERNEL for allocation.
4456  */
4457 static inline char *dup_token(const char **buf, size_t *lenp)
4458 {
4459 	char *dup;
4460 	size_t len;
4461 
4462 	len = next_token(buf);
4463 	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4464 	if (!dup)
4465 		return NULL;
4466 	*(dup + len) = '\0';
4467 	*buf += len;
4468 
4469 	if (lenp)
4470 		*lenp = len;
4471 
4472 	return dup;
4473 }
4474 
4475 /*
4476  * Parse the options provided for an "rbd add" (i.e., rbd image
4477  * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
4478  * and the data written is passed here via a NUL-terminated buffer.
4479  * Returns 0 if successful or an error code otherwise.
4480  *
4481  * The information extracted from these options is recorded in
4482  * the other parameters which return dynamically-allocated
4483  * structures:
4484  *  ceph_opts
4485  *      The address of a pointer that will refer to a ceph options
4486  *      structure.  Caller must release the returned pointer using
4487  *      ceph_destroy_options() when it is no longer needed.
4488  *  rbd_opts
4489  *	Address of an rbd options pointer.  Fully initialized by
4490  *	this function; caller must release with kfree().
4491  *  spec
4492  *	Address of an rbd image specification pointer.  Fully
4493  *	initialized by this function based on parsed options.
4494  *	Caller must release with rbd_spec_put().
4495  *
4496  * The options passed take this form:
4497  *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4498  * where:
4499  *  <mon_addrs>
4500  *      A comma-separated list of one or more monitor addresses.
4501  *      A monitor address is an ip address, optionally followed
4502  *      by a port number (separated by a colon).
4503  *        I.e.:  ip1[:port1][,ip2[:port2]...]
4504  *  <options>
4505  *      A comma-separated list of ceph and/or rbd options.
4506  *  <pool_name>
4507  *      The name of the rados pool containing the rbd image.
4508  *  <image_name>
4509  *      The name of the image in that pool to map.
4510  *  <snap_id>
4511  *      An optional snapshot id.  If provided, the mapping will
4512  *      present data from the image at the time that snapshot was
4513  *      created.  The image head is used if no snapshot id is
4514  *      provided.  Snapshot mappings are always read-only.
4515  */
4516 static int rbd_add_parse_args(const char *buf,
4517 				struct ceph_options **ceph_opts,
4518 				struct rbd_options **opts,
4519 				struct rbd_spec **rbd_spec)
4520 {
4521 	size_t len;
4522 	char *options;
4523 	const char *mon_addrs;
4524 	char *snap_name;
4525 	size_t mon_addrs_size;
4526 	struct rbd_spec *spec = NULL;
4527 	struct rbd_options *rbd_opts = NULL;
4528 	struct ceph_options *copts;
4529 	int ret;
4530 
4531 	/* The first four tokens are required */
4532 
4533 	len = next_token(&buf);
4534 	if (!len) {
4535 		rbd_warn(NULL, "no monitor address(es) provided");
4536 		return -EINVAL;
4537 	}
4538 	mon_addrs = buf;
4539 	mon_addrs_size = len + 1;
4540 	buf += len;
4541 
4542 	ret = -EINVAL;
4543 	options = dup_token(&buf, NULL);
4544 	if (!options)
4545 		return -ENOMEM;
4546 	if (!*options) {
4547 		rbd_warn(NULL, "no options provided");
4548 		goto out_err;
4549 	}
4550 
4551 	spec = rbd_spec_alloc();
4552 	if (!spec)
4553 		goto out_mem;
4554 
4555 	spec->pool_name = dup_token(&buf, NULL);
4556 	if (!spec->pool_name)
4557 		goto out_mem;
4558 	if (!*spec->pool_name) {
4559 		rbd_warn(NULL, "no pool name provided");
4560 		goto out_err;
4561 	}
4562 
4563 	spec->image_name = dup_token(&buf, NULL);
4564 	if (!spec->image_name)
4565 		goto out_mem;
4566 	if (!*spec->image_name) {
4567 		rbd_warn(NULL, "no image name provided");
4568 		goto out_err;
4569 	}
4570 
4571 	/*
4572 	 * Snapshot name is optional; default is to use "-"
4573 	 * (indicating the head/no snapshot).
4574 	 */
4575 	len = next_token(&buf);
4576 	if (!len) {
4577 		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4578 		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4579 	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
4580 		ret = -ENAMETOOLONG;
4581 		goto out_err;
4582 	}
4583 	snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4584 	if (!snap_name)
4585 		goto out_mem;
4586 	*(snap_name + len) = '\0';
4587 	spec->snap_name = snap_name;
4588 
4589 	/* Initialize all rbd options to the defaults */
4590 
4591 	rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4592 	if (!rbd_opts)
4593 		goto out_mem;
4594 
4595 	rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4596 
4597 	copts = ceph_parse_options(options, mon_addrs,
4598 					mon_addrs + mon_addrs_size - 1,
4599 					parse_rbd_opts_token, rbd_opts);
4600 	if (IS_ERR(copts)) {
4601 		ret = PTR_ERR(copts);
4602 		goto out_err;
4603 	}
4604 	kfree(options);
4605 
4606 	*ceph_opts = copts;
4607 	*opts = rbd_opts;
4608 	*rbd_spec = spec;
4609 
4610 	return 0;
4611 out_mem:
4612 	ret = -ENOMEM;
4613 out_err:
4614 	kfree(rbd_opts);
4615 	rbd_spec_put(spec);
4616 	kfree(options);
4617 
4618 	return ret;
4619 }
4620 
4621 /*
4622  * An rbd format 2 image has a unique identifier, distinct from the
4623  * name given to it by the user.  Internally, that identifier is
4624  * what's used to specify the names of objects related to the image.
4625  *
4626  * A special "rbd id" object is used to map an rbd image name to its
4627  * id.  If that object doesn't exist, then there is no v2 rbd image
4628  * with the supplied name.
4629  *
4630  * This function will record the given rbd_dev's image_id field if
4631  * it can be determined, and in that case will return 0.  If any
4632  * errors occur a negative errno will be returned and the rbd_dev's
4633  * image_id field will be unchanged (and should be NULL).
4634  */
4635 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4636 {
4637 	int ret;
4638 	size_t size;
4639 	char *object_name;
4640 	void *response;
4641 	char *image_id;
4642 
4643 	/*
4644 	 * When probing a parent image, the image id is already
4645 	 * known (and the image name likely is not).  There's no
4646 	 * need to fetch the image id again in this case.  We
4647 	 * do still need to set the image format though.
4648 	 */
4649 	if (rbd_dev->spec->image_id) {
4650 		rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4651 
4652 		return 0;
4653 	}
4654 
4655 	/*
4656 	 * First, see if the format 2 image id file exists, and if
4657 	 * so, get the image's persistent id from it.
4658 	 */
4659 	size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4660 	object_name = kmalloc(size, GFP_NOIO);
4661 	if (!object_name)
4662 		return -ENOMEM;
4663 	sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4664 	dout("rbd id object name is %s\n", object_name);
4665 
4666 	/* Response will be an encoded string, which includes a length */
4667 
4668 	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4669 	response = kzalloc(size, GFP_NOIO);
4670 	if (!response) {
4671 		ret = -ENOMEM;
4672 		goto out;
4673 	}
4674 
4675 	/* If it doesn't exist we'll assume it's a format 1 image */
4676 
4677 	ret = rbd_obj_method_sync(rbd_dev, object_name,
4678 				"rbd", "get_id", NULL, 0,
4679 				response, RBD_IMAGE_ID_LEN_MAX);
4680 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4681 	if (ret == -ENOENT) {
4682 		image_id = kstrdup("", GFP_KERNEL);
4683 		ret = image_id ? 0 : -ENOMEM;
4684 		if (!ret)
4685 			rbd_dev->image_format = 1;
4686 	} else if (ret > sizeof (__le32)) {
4687 		void *p = response;
4688 
4689 		image_id = ceph_extract_encoded_string(&p, p + ret,
4690 						NULL, GFP_NOIO);
4691 		ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4692 		if (!ret)
4693 			rbd_dev->image_format = 2;
4694 	} else {
4695 		ret = -EINVAL;
4696 	}
4697 
4698 	if (!ret) {
4699 		rbd_dev->spec->image_id = image_id;
4700 		dout("image_id is %s\n", image_id);
4701 	}
4702 out:
4703 	kfree(response);
4704 	kfree(object_name);
4705 
4706 	return ret;
4707 }
4708 
4709 /*
4710  * Undo whatever state changes are made by v1 or v2 header info
4711  * call.
4712  */
4713 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4714 {
4715 	struct rbd_image_header	*header;
4716 
4717 	/* Drop parent reference unless it's already been done (or none) */
4718 
4719 	if (rbd_dev->parent_overlap)
4720 		rbd_dev_parent_put(rbd_dev);
4721 
4722 	/* Free dynamic fields from the header, then zero it out */
4723 
4724 	header = &rbd_dev->header;
4725 	ceph_put_snap_context(header->snapc);
4726 	kfree(header->snap_sizes);
4727 	kfree(header->snap_names);
4728 	kfree(header->object_prefix);
4729 	memset(header, 0, sizeof (*header));
4730 }
4731 
4732 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4733 {
4734 	int ret;
4735 
4736 	ret = rbd_dev_v2_object_prefix(rbd_dev);
4737 	if (ret)
4738 		goto out_err;
4739 
4740 	/*
4741 	 * Get the and check features for the image.  Currently the
4742 	 * features are assumed to never change.
4743 	 */
4744 	ret = rbd_dev_v2_features(rbd_dev);
4745 	if (ret)
4746 		goto out_err;
4747 
4748 	/* If the image supports fancy striping, get its parameters */
4749 
4750 	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4751 		ret = rbd_dev_v2_striping_info(rbd_dev);
4752 		if (ret < 0)
4753 			goto out_err;
4754 	}
4755 	/* No support for crypto and compression type format 2 images */
4756 
4757 	return 0;
4758 out_err:
4759 	rbd_dev->header.features = 0;
4760 	kfree(rbd_dev->header.object_prefix);
4761 	rbd_dev->header.object_prefix = NULL;
4762 
4763 	return ret;
4764 }
4765 
4766 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4767 {
4768 	struct rbd_device *parent = NULL;
4769 	struct rbd_spec *parent_spec;
4770 	struct rbd_client *rbdc;
4771 	int ret;
4772 
4773 	if (!rbd_dev->parent_spec)
4774 		return 0;
4775 	/*
4776 	 * We need to pass a reference to the client and the parent
4777 	 * spec when creating the parent rbd_dev.  Images related by
4778 	 * parent/child relationships always share both.
4779 	 */
4780 	parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4781 	rbdc = __rbd_get_client(rbd_dev->rbd_client);
4782 
4783 	ret = -ENOMEM;
4784 	parent = rbd_dev_create(rbdc, parent_spec);
4785 	if (!parent)
4786 		goto out_err;
4787 
4788 	ret = rbd_dev_image_probe(parent, false);
4789 	if (ret < 0)
4790 		goto out_err;
4791 	rbd_dev->parent = parent;
4792 	atomic_set(&rbd_dev->parent_ref, 1);
4793 
4794 	return 0;
4795 out_err:
4796 	if (parent) {
4797 		rbd_dev_unparent(rbd_dev);
4798 		kfree(rbd_dev->header_name);
4799 		rbd_dev_destroy(parent);
4800 	} else {
4801 		rbd_put_client(rbdc);
4802 		rbd_spec_put(parent_spec);
4803 	}
4804 
4805 	return ret;
4806 }
4807 
4808 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4809 {
4810 	int ret;
4811 
4812 	/* generate unique id: find highest unique id, add one */
4813 	rbd_dev_id_get(rbd_dev);
4814 
4815 	/* Fill in the device name, now that we have its id. */
4816 	BUILD_BUG_ON(DEV_NAME_LEN
4817 			< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4818 	sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4819 
4820 	/* Get our block major device number. */
4821 
4822 	ret = register_blkdev(0, rbd_dev->name);
4823 	if (ret < 0)
4824 		goto err_out_id;
4825 	rbd_dev->major = ret;
4826 
4827 	/* Set up the blkdev mapping. */
4828 
4829 	ret = rbd_init_disk(rbd_dev);
4830 	if (ret)
4831 		goto err_out_blkdev;
4832 
4833 	ret = rbd_dev_mapping_set(rbd_dev);
4834 	if (ret)
4835 		goto err_out_disk;
4836 	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4837 
4838 	ret = rbd_bus_add_dev(rbd_dev);
4839 	if (ret)
4840 		goto err_out_mapping;
4841 
4842 	/* Everything's ready.  Announce the disk to the world. */
4843 
4844 	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4845 	add_disk(rbd_dev->disk);
4846 
4847 	pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4848 		(unsigned long long) rbd_dev->mapping.size);
4849 
4850 	return ret;
4851 
4852 err_out_mapping:
4853 	rbd_dev_mapping_clear(rbd_dev);
4854 err_out_disk:
4855 	rbd_free_disk(rbd_dev);
4856 err_out_blkdev:
4857 	unregister_blkdev(rbd_dev->major, rbd_dev->name);
4858 err_out_id:
4859 	rbd_dev_id_put(rbd_dev);
4860 	rbd_dev_mapping_clear(rbd_dev);
4861 
4862 	return ret;
4863 }
4864 
4865 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4866 {
4867 	struct rbd_spec *spec = rbd_dev->spec;
4868 	size_t size;
4869 
4870 	/* Record the header object name for this rbd image. */
4871 
4872 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4873 
4874 	if (rbd_dev->image_format == 1)
4875 		size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4876 	else
4877 		size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4878 
4879 	rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4880 	if (!rbd_dev->header_name)
4881 		return -ENOMEM;
4882 
4883 	if (rbd_dev->image_format == 1)
4884 		sprintf(rbd_dev->header_name, "%s%s",
4885 			spec->image_name, RBD_SUFFIX);
4886 	else
4887 		sprintf(rbd_dev->header_name, "%s%s",
4888 			RBD_HEADER_PREFIX, spec->image_id);
4889 	return 0;
4890 }
4891 
4892 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4893 {
4894 	rbd_dev_unprobe(rbd_dev);
4895 	kfree(rbd_dev->header_name);
4896 	rbd_dev->header_name = NULL;
4897 	rbd_dev->image_format = 0;
4898 	kfree(rbd_dev->spec->image_id);
4899 	rbd_dev->spec->image_id = NULL;
4900 
4901 	rbd_dev_destroy(rbd_dev);
4902 }
4903 
4904 /*
4905  * Probe for the existence of the header object for the given rbd
4906  * device.  If this image is the one being mapped (i.e., not a
4907  * parent), initiate a watch on its header object before using that
4908  * object to get detailed information about the rbd image.
4909  */
4910 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4911 {
4912 	int ret;
4913 	int tmp;
4914 
4915 	/*
4916 	 * Get the id from the image id object.  Unless there's an
4917 	 * error, rbd_dev->spec->image_id will be filled in with
4918 	 * a dynamically-allocated string, and rbd_dev->image_format
4919 	 * will be set to either 1 or 2.
4920 	 */
4921 	ret = rbd_dev_image_id(rbd_dev);
4922 	if (ret)
4923 		return ret;
4924 	rbd_assert(rbd_dev->spec->image_id);
4925 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4926 
4927 	ret = rbd_dev_header_name(rbd_dev);
4928 	if (ret)
4929 		goto err_out_format;
4930 
4931 	if (mapping) {
4932 		ret = rbd_dev_header_watch_sync(rbd_dev, true);
4933 		if (ret)
4934 			goto out_header_name;
4935 	}
4936 
4937 	if (rbd_dev->image_format == 1)
4938 		ret = rbd_dev_v1_header_info(rbd_dev);
4939 	else
4940 		ret = rbd_dev_v2_header_info(rbd_dev);
4941 	if (ret)
4942 		goto err_out_watch;
4943 
4944 	ret = rbd_dev_spec_update(rbd_dev);
4945 	if (ret)
4946 		goto err_out_probe;
4947 
4948 	ret = rbd_dev_probe_parent(rbd_dev);
4949 	if (ret)
4950 		goto err_out_probe;
4951 
4952 	dout("discovered format %u image, header name is %s\n",
4953 		rbd_dev->image_format, rbd_dev->header_name);
4954 
4955 	return 0;
4956 err_out_probe:
4957 	rbd_dev_unprobe(rbd_dev);
4958 err_out_watch:
4959 	if (mapping) {
4960 		tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4961 		if (tmp)
4962 			rbd_warn(rbd_dev, "unable to tear down "
4963 					"watch request (%d)\n", tmp);
4964 	}
4965 out_header_name:
4966 	kfree(rbd_dev->header_name);
4967 	rbd_dev->header_name = NULL;
4968 err_out_format:
4969 	rbd_dev->image_format = 0;
4970 	kfree(rbd_dev->spec->image_id);
4971 	rbd_dev->spec->image_id = NULL;
4972 
4973 	dout("probe failed, returning %d\n", ret);
4974 
4975 	return ret;
4976 }
4977 
4978 static ssize_t rbd_add(struct bus_type *bus,
4979 		       const char *buf,
4980 		       size_t count)
4981 {
4982 	struct rbd_device *rbd_dev = NULL;
4983 	struct ceph_options *ceph_opts = NULL;
4984 	struct rbd_options *rbd_opts = NULL;
4985 	struct rbd_spec *spec = NULL;
4986 	struct rbd_client *rbdc;
4987 	struct ceph_osd_client *osdc;
4988 	bool read_only;
4989 	int rc = -ENOMEM;
4990 
4991 	if (!try_module_get(THIS_MODULE))
4992 		return -ENODEV;
4993 
4994 	/* parse add command */
4995 	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4996 	if (rc < 0)
4997 		goto err_out_module;
4998 	read_only = rbd_opts->read_only;
4999 	kfree(rbd_opts);
5000 	rbd_opts = NULL;	/* done with this */
5001 
5002 	rbdc = rbd_get_client(ceph_opts);
5003 	if (IS_ERR(rbdc)) {
5004 		rc = PTR_ERR(rbdc);
5005 		goto err_out_args;
5006 	}
5007 
5008 	/* pick the pool */
5009 	osdc = &rbdc->client->osdc;
5010 	rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5011 	if (rc < 0)
5012 		goto err_out_client;
5013 	spec->pool_id = (u64)rc;
5014 
5015 	/* The ceph file layout needs to fit pool id in 32 bits */
5016 
5017 	if (spec->pool_id > (u64)U32_MAX) {
5018 		rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5019 				(unsigned long long)spec->pool_id, U32_MAX);
5020 		rc = -EIO;
5021 		goto err_out_client;
5022 	}
5023 
5024 	rbd_dev = rbd_dev_create(rbdc, spec);
5025 	if (!rbd_dev)
5026 		goto err_out_client;
5027 	rbdc = NULL;		/* rbd_dev now owns this */
5028 	spec = NULL;		/* rbd_dev now owns this */
5029 
5030 	rc = rbd_dev_image_probe(rbd_dev, true);
5031 	if (rc < 0)
5032 		goto err_out_rbd_dev;
5033 
5034 	/* If we are mapping a snapshot it must be marked read-only */
5035 
5036 	if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5037 		read_only = true;
5038 	rbd_dev->mapping.read_only = read_only;
5039 
5040 	rc = rbd_dev_device_setup(rbd_dev);
5041 	if (rc) {
5042 		rbd_dev_image_release(rbd_dev);
5043 		goto err_out_module;
5044 	}
5045 
5046 	return count;
5047 
5048 err_out_rbd_dev:
5049 	rbd_dev_destroy(rbd_dev);
5050 err_out_client:
5051 	rbd_put_client(rbdc);
5052 err_out_args:
5053 	rbd_spec_put(spec);
5054 err_out_module:
5055 	module_put(THIS_MODULE);
5056 
5057 	dout("Error adding device %s\n", buf);
5058 
5059 	return (ssize_t)rc;
5060 }
5061 
5062 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
5063 {
5064 	struct list_head *tmp;
5065 	struct rbd_device *rbd_dev;
5066 
5067 	spin_lock(&rbd_dev_list_lock);
5068 	list_for_each(tmp, &rbd_dev_list) {
5069 		rbd_dev = list_entry(tmp, struct rbd_device, node);
5070 		if (rbd_dev->dev_id == dev_id) {
5071 			spin_unlock(&rbd_dev_list_lock);
5072 			return rbd_dev;
5073 		}
5074 	}
5075 	spin_unlock(&rbd_dev_list_lock);
5076 	return NULL;
5077 }
5078 
5079 static void rbd_dev_device_release(struct device *dev)
5080 {
5081 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5082 
5083 	rbd_free_disk(rbd_dev);
5084 	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5085 	rbd_dev_mapping_clear(rbd_dev);
5086 	unregister_blkdev(rbd_dev->major, rbd_dev->name);
5087 	rbd_dev->major = 0;
5088 	rbd_dev_id_put(rbd_dev);
5089 	rbd_dev_mapping_clear(rbd_dev);
5090 }
5091 
5092 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5093 {
5094 	while (rbd_dev->parent) {
5095 		struct rbd_device *first = rbd_dev;
5096 		struct rbd_device *second = first->parent;
5097 		struct rbd_device *third;
5098 
5099 		/*
5100 		 * Follow to the parent with no grandparent and
5101 		 * remove it.
5102 		 */
5103 		while (second && (third = second->parent)) {
5104 			first = second;
5105 			second = third;
5106 		}
5107 		rbd_assert(second);
5108 		rbd_dev_image_release(second);
5109 		first->parent = NULL;
5110 		first->parent_overlap = 0;
5111 
5112 		rbd_assert(first->parent_spec);
5113 		rbd_spec_put(first->parent_spec);
5114 		first->parent_spec = NULL;
5115 	}
5116 }
5117 
5118 static ssize_t rbd_remove(struct bus_type *bus,
5119 			  const char *buf,
5120 			  size_t count)
5121 {
5122 	struct rbd_device *rbd_dev = NULL;
5123 	int target_id;
5124 	unsigned long ul;
5125 	int ret;
5126 
5127 	ret = strict_strtoul(buf, 10, &ul);
5128 	if (ret)
5129 		return ret;
5130 
5131 	/* convert to int; abort if we lost anything in the conversion */
5132 	target_id = (int) ul;
5133 	if (target_id != ul)
5134 		return -EINVAL;
5135 
5136 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5137 
5138 	rbd_dev = __rbd_get_dev(target_id);
5139 	if (!rbd_dev) {
5140 		ret = -ENOENT;
5141 		goto done;
5142 	}
5143 
5144 	spin_lock_irq(&rbd_dev->lock);
5145 	if (rbd_dev->open_count)
5146 		ret = -EBUSY;
5147 	else
5148 		set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
5149 	spin_unlock_irq(&rbd_dev->lock);
5150 	if (ret < 0)
5151 		goto done;
5152 	rbd_bus_del_dev(rbd_dev);
5153 	ret = rbd_dev_header_watch_sync(rbd_dev, false);
5154 	if (ret)
5155 		rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5156 	rbd_dev_image_release(rbd_dev);
5157 	module_put(THIS_MODULE);
5158 	ret = count;
5159 done:
5160 	mutex_unlock(&ctl_mutex);
5161 
5162 	return ret;
5163 }
5164 
5165 /*
5166  * create control files in sysfs
5167  * /sys/bus/rbd/...
5168  */
5169 static int rbd_sysfs_init(void)
5170 {
5171 	int ret;
5172 
5173 	ret = device_register(&rbd_root_dev);
5174 	if (ret < 0)
5175 		return ret;
5176 
5177 	ret = bus_register(&rbd_bus_type);
5178 	if (ret < 0)
5179 		device_unregister(&rbd_root_dev);
5180 
5181 	return ret;
5182 }
5183 
5184 static void rbd_sysfs_cleanup(void)
5185 {
5186 	bus_unregister(&rbd_bus_type);
5187 	device_unregister(&rbd_root_dev);
5188 }
5189 
5190 static int rbd_slab_init(void)
5191 {
5192 	rbd_assert(!rbd_img_request_cache);
5193 	rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5194 					sizeof (struct rbd_img_request),
5195 					__alignof__(struct rbd_img_request),
5196 					0, NULL);
5197 	if (!rbd_img_request_cache)
5198 		return -ENOMEM;
5199 
5200 	rbd_assert(!rbd_obj_request_cache);
5201 	rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5202 					sizeof (struct rbd_obj_request),
5203 					__alignof__(struct rbd_obj_request),
5204 					0, NULL);
5205 	if (!rbd_obj_request_cache)
5206 		goto out_err;
5207 
5208 	rbd_assert(!rbd_segment_name_cache);
5209 	rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5210 					MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5211 	if (rbd_segment_name_cache)
5212 		return 0;
5213 out_err:
5214 	if (rbd_obj_request_cache) {
5215 		kmem_cache_destroy(rbd_obj_request_cache);
5216 		rbd_obj_request_cache = NULL;
5217 	}
5218 
5219 	kmem_cache_destroy(rbd_img_request_cache);
5220 	rbd_img_request_cache = NULL;
5221 
5222 	return -ENOMEM;
5223 }
5224 
5225 static void rbd_slab_exit(void)
5226 {
5227 	rbd_assert(rbd_segment_name_cache);
5228 	kmem_cache_destroy(rbd_segment_name_cache);
5229 	rbd_segment_name_cache = NULL;
5230 
5231 	rbd_assert(rbd_obj_request_cache);
5232 	kmem_cache_destroy(rbd_obj_request_cache);
5233 	rbd_obj_request_cache = NULL;
5234 
5235 	rbd_assert(rbd_img_request_cache);
5236 	kmem_cache_destroy(rbd_img_request_cache);
5237 	rbd_img_request_cache = NULL;
5238 }
5239 
5240 static int __init rbd_init(void)
5241 {
5242 	int rc;
5243 
5244 	if (!libceph_compatible(NULL)) {
5245 		rbd_warn(NULL, "libceph incompatibility (quitting)");
5246 
5247 		return -EINVAL;
5248 	}
5249 	rc = rbd_slab_init();
5250 	if (rc)
5251 		return rc;
5252 	rc = rbd_sysfs_init();
5253 	if (rc)
5254 		rbd_slab_exit();
5255 	else
5256 		pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5257 
5258 	return rc;
5259 }
5260 
5261 static void __exit rbd_exit(void)
5262 {
5263 	rbd_sysfs_cleanup();
5264 	rbd_slab_exit();
5265 }
5266 
5267 module_init(rbd_init);
5268 module_exit(rbd_exit);
5269 
5270 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5271 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5272 MODULE_DESCRIPTION("rados block device");
5273 
5274 /* following authorship retained from original osdblk.c */
5275 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5276 
5277 MODULE_LICENSE("GPL");
5278