xref: /linux/drivers/block/rbd.c (revision c75c5ab575af7db707689cdbb5a5c458e9a034bb)
1 /*
2    rbd.c -- Export ceph rados objects as a Linux block device
3 
4 
5    based on drivers/block/osdblk.c:
6 
7    Copyright 2009 Red Hat, Inc.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program; see the file COPYING.  If not, write to
20    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 
22 
23 
24    For usage instructions, please refer to:
25 
26                  Documentation/ABI/testing/sysfs-bus-rbd
27 
28  */
29 
30 #include <linux/ceph/libceph.h>
31 #include <linux/ceph/osd_client.h>
32 #include <linux/ceph/mon_client.h>
33 #include <linux/ceph/decode.h>
34 #include <linux/parser.h>
35 
36 #include <linux/kernel.h>
37 #include <linux/device.h>
38 #include <linux/module.h>
39 #include <linux/fs.h>
40 #include <linux/blkdev.h>
41 
42 #include "rbd_types.h"
43 
44 #define RBD_DEBUG	/* Activate rbd_assert() calls */
45 
46 /*
47  * The basic unit of block I/O is a sector.  It is interpreted in a
48  * number of contexts in Linux (blk, bio, genhd), but the default is
49  * universally 512 bytes.  These symbols are just slightly more
50  * meaningful than the bare numbers they represent.
51  */
52 #define	SECTOR_SHIFT	9
53 #define	SECTOR_SIZE	(1ULL << SECTOR_SHIFT)
54 
55 /* It might be useful to have these defined elsewhere */
56 
57 #define	U8_MAX	((u8)	(~0U))
58 #define	U16_MAX	((u16)	(~0U))
59 #define	U32_MAX	((u32)	(~0U))
60 #define	U64_MAX	((u64)	(~0ULL))
61 
62 #define RBD_DRV_NAME "rbd"
63 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
64 
65 #define RBD_MINORS_PER_MAJOR	256		/* max minors per blkdev */
66 
67 #define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
68 #define RBD_MAX_SNAP_NAME_LEN	\
69 			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
70 
71 #define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
72 
73 #define RBD_SNAP_HEAD_NAME	"-"
74 
75 /* This allows a single page to hold an image name sent by OSD */
76 #define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
77 #define RBD_IMAGE_ID_LEN_MAX	64
78 
79 #define RBD_OBJ_PREFIX_LEN_MAX	64
80 
81 /* Feature bits */
82 
83 #define RBD_FEATURE_LAYERING      1
84 
85 /* Features supported by this (client software) implementation. */
86 
87 #define RBD_FEATURES_ALL          (0)
88 
89 /*
90  * An RBD device name will be "rbd#", where the "rbd" comes from
91  * RBD_DRV_NAME above, and # is a unique integer identifier.
92  * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
93  * enough to hold all possible device names.
94  */
95 #define DEV_NAME_LEN		32
96 #define MAX_INT_FORMAT_WIDTH	((5 * sizeof (int)) / 2 + 1)
97 
98 /*
99  * block device image metadata (in-memory version)
100  */
101 struct rbd_image_header {
102 	/* These four fields never change for a given rbd image */
103 	char *object_prefix;
104 	u64 features;
105 	__u8 obj_order;
106 	__u8 crypt_type;
107 	__u8 comp_type;
108 
109 	/* The remaining fields need to be updated occasionally */
110 	u64 image_size;
111 	struct ceph_snap_context *snapc;
112 	char *snap_names;
113 	u64 *snap_sizes;
114 
115 	u64 obj_version;
116 };
117 
118 /*
119  * An rbd image specification.
120  *
121  * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
122  * identify an image.  Each rbd_dev structure includes a pointer to
123  * an rbd_spec structure that encapsulates this identity.
124  *
125  * Each of the id's in an rbd_spec has an associated name.  For a
126  * user-mapped image, the names are supplied and the id's associated
127  * with them are looked up.  For a layered image, a parent image is
128  * defined by the tuple, and the names are looked up.
129  *
130  * An rbd_dev structure contains a parent_spec pointer which is
131  * non-null if the image it represents is a child in a layered
132  * image.  This pointer will refer to the rbd_spec structure used
133  * by the parent rbd_dev for its own identity (i.e., the structure
134  * is shared between the parent and child).
135  *
136  * Since these structures are populated once, during the discovery
137  * phase of image construction, they are effectively immutable so
138  * we make no effort to synchronize access to them.
139  *
140  * Note that code herein does not assume the image name is known (it
141  * could be a null pointer).
142  */
143 struct rbd_spec {
144 	u64		pool_id;
145 	char		*pool_name;
146 
147 	char		*image_id;
148 	char		*image_name;
149 
150 	u64		snap_id;
151 	char		*snap_name;
152 
153 	struct kref	kref;
154 };
155 
156 /*
157  * an instance of the client.  multiple devices may share an rbd client.
158  */
159 struct rbd_client {
160 	struct ceph_client	*client;
161 	struct kref		kref;
162 	struct list_head	node;
163 };
164 
165 struct rbd_img_request;
166 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
167 
168 #define	BAD_WHICH	U32_MAX		/* Good which or bad which, which? */
169 
170 struct rbd_obj_request;
171 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
172 
173 enum obj_request_type {
174 	OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
175 };
176 
177 struct rbd_obj_request {
178 	const char		*object_name;
179 	u64			offset;		/* object start byte */
180 	u64			length;		/* bytes from offset */
181 
182 	struct rbd_img_request	*img_request;
183 	struct list_head	links;		/* img_request->obj_requests */
184 	u32			which;		/* posn image request list */
185 
186 	enum obj_request_type	type;
187 	union {
188 		struct bio	*bio_list;
189 		struct {
190 			struct page	**pages;
191 			u32		page_count;
192 		};
193 	};
194 
195 	struct ceph_osd_request	*osd_req;
196 
197 	u64			xferred;	/* bytes transferred */
198 	u64			version;
199 	int			result;
200 	atomic_t		done;
201 
202 	rbd_obj_callback_t	callback;
203 	struct completion	completion;
204 
205 	struct kref		kref;
206 };
207 
208 struct rbd_img_request {
209 	struct request		*rq;
210 	struct rbd_device	*rbd_dev;
211 	u64			offset;	/* starting image byte offset */
212 	u64			length;	/* byte count from offset */
213 	bool			write_request;	/* false for read */
214 	union {
215 		struct ceph_snap_context *snapc;	/* for writes */
216 		u64		snap_id;		/* for reads */
217 	};
218 	spinlock_t		completion_lock;/* protects next_completion */
219 	u32			next_completion;
220 	rbd_img_callback_t	callback;
221 
222 	u32			obj_request_count;
223 	struct list_head	obj_requests;	/* rbd_obj_request structs */
224 
225 	struct kref		kref;
226 };
227 
228 #define for_each_obj_request(ireq, oreq) \
229 	list_for_each_entry(oreq, &(ireq)->obj_requests, links)
230 #define for_each_obj_request_from(ireq, oreq) \
231 	list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
232 #define for_each_obj_request_safe(ireq, oreq, n) \
233 	list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
234 
235 struct rbd_snap {
236 	struct	device		dev;
237 	const char		*name;
238 	u64			size;
239 	struct list_head	node;
240 	u64			id;
241 	u64			features;
242 };
243 
244 struct rbd_mapping {
245 	u64                     size;
246 	u64                     features;
247 	bool			read_only;
248 };
249 
250 /*
251  * a single device
252  */
253 struct rbd_device {
254 	int			dev_id;		/* blkdev unique id */
255 
256 	int			major;		/* blkdev assigned major */
257 	struct gendisk		*disk;		/* blkdev's gendisk and rq */
258 
259 	u32			image_format;	/* Either 1 or 2 */
260 	struct rbd_client	*rbd_client;
261 
262 	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
263 
264 	spinlock_t		lock;		/* queue, flags, open_count */
265 
266 	struct rbd_image_header	header;
267 	unsigned long		flags;		/* possibly lock protected */
268 	struct rbd_spec		*spec;
269 
270 	char			*header_name;
271 
272 	struct ceph_file_layout	layout;
273 
274 	struct ceph_osd_event   *watch_event;
275 	struct rbd_obj_request	*watch_request;
276 
277 	struct rbd_spec		*parent_spec;
278 	u64			parent_overlap;
279 
280 	/* protects updating the header */
281 	struct rw_semaphore     header_rwsem;
282 
283 	struct rbd_mapping	mapping;
284 
285 	struct list_head	node;
286 
287 	/* list of snapshots */
288 	struct list_head	snaps;
289 
290 	/* sysfs related */
291 	struct device		dev;
292 	unsigned long		open_count;	/* protected by lock */
293 };
294 
295 /*
296  * Flag bits for rbd_dev->flags.  If atomicity is required,
297  * rbd_dev->lock is used to protect access.
298  *
299  * Currently, only the "removing" flag (which is coupled with the
300  * "open_count" field) requires atomic access.
301  */
302 enum rbd_dev_flags {
303 	RBD_DEV_FLAG_EXISTS,	/* mapped snapshot has not been deleted */
304 	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
305 };
306 
307 static DEFINE_MUTEX(ctl_mutex);	  /* Serialize open/close/setup/teardown */
308 
309 static LIST_HEAD(rbd_dev_list);    /* devices */
310 static DEFINE_SPINLOCK(rbd_dev_list_lock);
311 
312 static LIST_HEAD(rbd_client_list);		/* clients */
313 static DEFINE_SPINLOCK(rbd_client_list_lock);
314 
315 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
316 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
317 
318 static void rbd_dev_release(struct device *dev);
319 static void rbd_remove_snap_dev(struct rbd_snap *snap);
320 
321 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
322 		       size_t count);
323 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
324 			  size_t count);
325 
326 static struct bus_attribute rbd_bus_attrs[] = {
327 	__ATTR(add, S_IWUSR, NULL, rbd_add),
328 	__ATTR(remove, S_IWUSR, NULL, rbd_remove),
329 	__ATTR_NULL
330 };
331 
332 static struct bus_type rbd_bus_type = {
333 	.name		= "rbd",
334 	.bus_attrs	= rbd_bus_attrs,
335 };
336 
337 static void rbd_root_dev_release(struct device *dev)
338 {
339 }
340 
341 static struct device rbd_root_dev = {
342 	.init_name =    "rbd",
343 	.release =      rbd_root_dev_release,
344 };
345 
346 static __printf(2, 3)
347 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
348 {
349 	struct va_format vaf;
350 	va_list args;
351 
352 	va_start(args, fmt);
353 	vaf.fmt = fmt;
354 	vaf.va = &args;
355 
356 	if (!rbd_dev)
357 		printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
358 	else if (rbd_dev->disk)
359 		printk(KERN_WARNING "%s: %s: %pV\n",
360 			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
361 	else if (rbd_dev->spec && rbd_dev->spec->image_name)
362 		printk(KERN_WARNING "%s: image %s: %pV\n",
363 			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
364 	else if (rbd_dev->spec && rbd_dev->spec->image_id)
365 		printk(KERN_WARNING "%s: id %s: %pV\n",
366 			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
367 	else	/* punt */
368 		printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
369 			RBD_DRV_NAME, rbd_dev, &vaf);
370 	va_end(args);
371 }
372 
373 #ifdef RBD_DEBUG
374 #define rbd_assert(expr)						\
375 		if (unlikely(!(expr))) {				\
376 			printk(KERN_ERR "\nAssertion failure in %s() "	\
377 						"at line %d:\n\n"	\
378 					"\trbd_assert(%s);\n\n",	\
379 					__func__, __LINE__, #expr);	\
380 			BUG();						\
381 		}
382 #else /* !RBD_DEBUG */
383 #  define rbd_assert(expr)	((void) 0)
384 #endif /* !RBD_DEBUG */
385 
386 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
387 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
388 
389 static int rbd_open(struct block_device *bdev, fmode_t mode)
390 {
391 	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
392 	bool removing = false;
393 
394 	if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
395 		return -EROFS;
396 
397 	spin_lock_irq(&rbd_dev->lock);
398 	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
399 		removing = true;
400 	else
401 		rbd_dev->open_count++;
402 	spin_unlock_irq(&rbd_dev->lock);
403 	if (removing)
404 		return -ENOENT;
405 
406 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
407 	(void) get_device(&rbd_dev->dev);
408 	set_device_ro(bdev, rbd_dev->mapping.read_only);
409 	mutex_unlock(&ctl_mutex);
410 
411 	return 0;
412 }
413 
414 static int rbd_release(struct gendisk *disk, fmode_t mode)
415 {
416 	struct rbd_device *rbd_dev = disk->private_data;
417 	unsigned long open_count_before;
418 
419 	spin_lock_irq(&rbd_dev->lock);
420 	open_count_before = rbd_dev->open_count--;
421 	spin_unlock_irq(&rbd_dev->lock);
422 	rbd_assert(open_count_before > 0);
423 
424 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
425 	put_device(&rbd_dev->dev);
426 	mutex_unlock(&ctl_mutex);
427 
428 	return 0;
429 }
430 
431 static const struct block_device_operations rbd_bd_ops = {
432 	.owner			= THIS_MODULE,
433 	.open			= rbd_open,
434 	.release		= rbd_release,
435 };
436 
437 /*
438  * Initialize an rbd client instance.
439  * We own *ceph_opts.
440  */
441 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
442 {
443 	struct rbd_client *rbdc;
444 	int ret = -ENOMEM;
445 
446 	dout("%s:\n", __func__);
447 	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
448 	if (!rbdc)
449 		goto out_opt;
450 
451 	kref_init(&rbdc->kref);
452 	INIT_LIST_HEAD(&rbdc->node);
453 
454 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
455 
456 	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
457 	if (IS_ERR(rbdc->client))
458 		goto out_mutex;
459 	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
460 
461 	ret = ceph_open_session(rbdc->client);
462 	if (ret < 0)
463 		goto out_err;
464 
465 	spin_lock(&rbd_client_list_lock);
466 	list_add_tail(&rbdc->node, &rbd_client_list);
467 	spin_unlock(&rbd_client_list_lock);
468 
469 	mutex_unlock(&ctl_mutex);
470 	dout("%s: rbdc %p\n", __func__, rbdc);
471 
472 	return rbdc;
473 
474 out_err:
475 	ceph_destroy_client(rbdc->client);
476 out_mutex:
477 	mutex_unlock(&ctl_mutex);
478 	kfree(rbdc);
479 out_opt:
480 	if (ceph_opts)
481 		ceph_destroy_options(ceph_opts);
482 	dout("%s: error %d\n", __func__, ret);
483 
484 	return ERR_PTR(ret);
485 }
486 
487 /*
488  * Find a ceph client with specific addr and configuration.  If
489  * found, bump its reference count.
490  */
491 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
492 {
493 	struct rbd_client *client_node;
494 	bool found = false;
495 
496 	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
497 		return NULL;
498 
499 	spin_lock(&rbd_client_list_lock);
500 	list_for_each_entry(client_node, &rbd_client_list, node) {
501 		if (!ceph_compare_options(ceph_opts, client_node->client)) {
502 			kref_get(&client_node->kref);
503 			found = true;
504 			break;
505 		}
506 	}
507 	spin_unlock(&rbd_client_list_lock);
508 
509 	return found ? client_node : NULL;
510 }
511 
512 /*
513  * mount options
514  */
515 enum {
516 	Opt_last_int,
517 	/* int args above */
518 	Opt_last_string,
519 	/* string args above */
520 	Opt_read_only,
521 	Opt_read_write,
522 	/* Boolean args above */
523 	Opt_last_bool,
524 };
525 
526 static match_table_t rbd_opts_tokens = {
527 	/* int args above */
528 	/* string args above */
529 	{Opt_read_only, "read_only"},
530 	{Opt_read_only, "ro"},		/* Alternate spelling */
531 	{Opt_read_write, "read_write"},
532 	{Opt_read_write, "rw"},		/* Alternate spelling */
533 	/* Boolean args above */
534 	{-1, NULL}
535 };
536 
537 struct rbd_options {
538 	bool	read_only;
539 };
540 
541 #define RBD_READ_ONLY_DEFAULT	false
542 
543 static int parse_rbd_opts_token(char *c, void *private)
544 {
545 	struct rbd_options *rbd_opts = private;
546 	substring_t argstr[MAX_OPT_ARGS];
547 	int token, intval, ret;
548 
549 	token = match_token(c, rbd_opts_tokens, argstr);
550 	if (token < 0)
551 		return -EINVAL;
552 
553 	if (token < Opt_last_int) {
554 		ret = match_int(&argstr[0], &intval);
555 		if (ret < 0) {
556 			pr_err("bad mount option arg (not int) "
557 			       "at '%s'\n", c);
558 			return ret;
559 		}
560 		dout("got int token %d val %d\n", token, intval);
561 	} else if (token > Opt_last_int && token < Opt_last_string) {
562 		dout("got string token %d val %s\n", token,
563 		     argstr[0].from);
564 	} else if (token > Opt_last_string && token < Opt_last_bool) {
565 		dout("got Boolean token %d\n", token);
566 	} else {
567 		dout("got token %d\n", token);
568 	}
569 
570 	switch (token) {
571 	case Opt_read_only:
572 		rbd_opts->read_only = true;
573 		break;
574 	case Opt_read_write:
575 		rbd_opts->read_only = false;
576 		break;
577 	default:
578 		rbd_assert(false);
579 		break;
580 	}
581 	return 0;
582 }
583 
584 /*
585  * Get a ceph client with specific addr and configuration, if one does
586  * not exist create it.
587  */
588 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
589 {
590 	struct rbd_client *rbdc;
591 
592 	rbdc = rbd_client_find(ceph_opts);
593 	if (rbdc)	/* using an existing client */
594 		ceph_destroy_options(ceph_opts);
595 	else
596 		rbdc = rbd_client_create(ceph_opts);
597 
598 	return rbdc;
599 }
600 
601 /*
602  * Destroy ceph client
603  *
604  * Caller must hold rbd_client_list_lock.
605  */
606 static void rbd_client_release(struct kref *kref)
607 {
608 	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
609 
610 	dout("%s: rbdc %p\n", __func__, rbdc);
611 	spin_lock(&rbd_client_list_lock);
612 	list_del(&rbdc->node);
613 	spin_unlock(&rbd_client_list_lock);
614 
615 	ceph_destroy_client(rbdc->client);
616 	kfree(rbdc);
617 }
618 
619 /*
620  * Drop reference to ceph client node. If it's not referenced anymore, release
621  * it.
622  */
623 static void rbd_put_client(struct rbd_client *rbdc)
624 {
625 	if (rbdc)
626 		kref_put(&rbdc->kref, rbd_client_release);
627 }
628 
629 static bool rbd_image_format_valid(u32 image_format)
630 {
631 	return image_format == 1 || image_format == 2;
632 }
633 
634 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
635 {
636 	size_t size;
637 	u32 snap_count;
638 
639 	/* The header has to start with the magic rbd header text */
640 	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
641 		return false;
642 
643 	/* The bio layer requires at least sector-sized I/O */
644 
645 	if (ondisk->options.order < SECTOR_SHIFT)
646 		return false;
647 
648 	/* If we use u64 in a few spots we may be able to loosen this */
649 
650 	if (ondisk->options.order > 8 * sizeof (int) - 1)
651 		return false;
652 
653 	/*
654 	 * The size of a snapshot header has to fit in a size_t, and
655 	 * that limits the number of snapshots.
656 	 */
657 	snap_count = le32_to_cpu(ondisk->snap_count);
658 	size = SIZE_MAX - sizeof (struct ceph_snap_context);
659 	if (snap_count > size / sizeof (__le64))
660 		return false;
661 
662 	/*
663 	 * Not only that, but the size of the entire the snapshot
664 	 * header must also be representable in a size_t.
665 	 */
666 	size -= snap_count * sizeof (__le64);
667 	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
668 		return false;
669 
670 	return true;
671 }
672 
673 /*
674  * Create a new header structure, translate header format from the on-disk
675  * header.
676  */
677 static int rbd_header_from_disk(struct rbd_image_header *header,
678 				 struct rbd_image_header_ondisk *ondisk)
679 {
680 	u32 snap_count;
681 	size_t len;
682 	size_t size;
683 	u32 i;
684 
685 	memset(header, 0, sizeof (*header));
686 
687 	snap_count = le32_to_cpu(ondisk->snap_count);
688 
689 	len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
690 	header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
691 	if (!header->object_prefix)
692 		return -ENOMEM;
693 	memcpy(header->object_prefix, ondisk->object_prefix, len);
694 	header->object_prefix[len] = '\0';
695 
696 	if (snap_count) {
697 		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
698 
699 		/* Save a copy of the snapshot names */
700 
701 		if (snap_names_len > (u64) SIZE_MAX)
702 			return -EIO;
703 		header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
704 		if (!header->snap_names)
705 			goto out_err;
706 		/*
707 		 * Note that rbd_dev_v1_header_read() guarantees
708 		 * the ondisk buffer we're working with has
709 		 * snap_names_len bytes beyond the end of the
710 		 * snapshot id array, this memcpy() is safe.
711 		 */
712 		memcpy(header->snap_names, &ondisk->snaps[snap_count],
713 			snap_names_len);
714 
715 		/* Record each snapshot's size */
716 
717 		size = snap_count * sizeof (*header->snap_sizes);
718 		header->snap_sizes = kmalloc(size, GFP_KERNEL);
719 		if (!header->snap_sizes)
720 			goto out_err;
721 		for (i = 0; i < snap_count; i++)
722 			header->snap_sizes[i] =
723 				le64_to_cpu(ondisk->snaps[i].image_size);
724 	} else {
725 		WARN_ON(ondisk->snap_names_len);
726 		header->snap_names = NULL;
727 		header->snap_sizes = NULL;
728 	}
729 
730 	header->features = 0;	/* No features support in v1 images */
731 	header->obj_order = ondisk->options.order;
732 	header->crypt_type = ondisk->options.crypt_type;
733 	header->comp_type = ondisk->options.comp_type;
734 
735 	/* Allocate and fill in the snapshot context */
736 
737 	header->image_size = le64_to_cpu(ondisk->image_size);
738 	size = sizeof (struct ceph_snap_context);
739 	size += snap_count * sizeof (header->snapc->snaps[0]);
740 	header->snapc = kzalloc(size, GFP_KERNEL);
741 	if (!header->snapc)
742 		goto out_err;
743 
744 	atomic_set(&header->snapc->nref, 1);
745 	header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
746 	header->snapc->num_snaps = snap_count;
747 	for (i = 0; i < snap_count; i++)
748 		header->snapc->snaps[i] =
749 			le64_to_cpu(ondisk->snaps[i].id);
750 
751 	return 0;
752 
753 out_err:
754 	kfree(header->snap_sizes);
755 	header->snap_sizes = NULL;
756 	kfree(header->snap_names);
757 	header->snap_names = NULL;
758 	kfree(header->object_prefix);
759 	header->object_prefix = NULL;
760 
761 	return -ENOMEM;
762 }
763 
764 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
765 {
766 	struct rbd_snap *snap;
767 
768 	if (snap_id == CEPH_NOSNAP)
769 		return RBD_SNAP_HEAD_NAME;
770 
771 	list_for_each_entry(snap, &rbd_dev->snaps, node)
772 		if (snap_id == snap->id)
773 			return snap->name;
774 
775 	return NULL;
776 }
777 
778 static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
779 {
780 
781 	struct rbd_snap *snap;
782 
783 	list_for_each_entry(snap, &rbd_dev->snaps, node) {
784 		if (!strcmp(snap_name, snap->name)) {
785 			rbd_dev->spec->snap_id = snap->id;
786 			rbd_dev->mapping.size = snap->size;
787 			rbd_dev->mapping.features = snap->features;
788 
789 			return 0;
790 		}
791 	}
792 
793 	return -ENOENT;
794 }
795 
796 static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
797 {
798 	int ret;
799 
800 	if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
801 		    sizeof (RBD_SNAP_HEAD_NAME))) {
802 		rbd_dev->spec->snap_id = CEPH_NOSNAP;
803 		rbd_dev->mapping.size = rbd_dev->header.image_size;
804 		rbd_dev->mapping.features = rbd_dev->header.features;
805 		ret = 0;
806 	} else {
807 		ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
808 		if (ret < 0)
809 			goto done;
810 		rbd_dev->mapping.read_only = true;
811 	}
812 	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
813 
814 done:
815 	return ret;
816 }
817 
818 static void rbd_header_free(struct rbd_image_header *header)
819 {
820 	kfree(header->object_prefix);
821 	header->object_prefix = NULL;
822 	kfree(header->snap_sizes);
823 	header->snap_sizes = NULL;
824 	kfree(header->snap_names);
825 	header->snap_names = NULL;
826 	ceph_put_snap_context(header->snapc);
827 	header->snapc = NULL;
828 }
829 
830 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
831 {
832 	char *name;
833 	u64 segment;
834 	int ret;
835 
836 	name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
837 	if (!name)
838 		return NULL;
839 	segment = offset >> rbd_dev->header.obj_order;
840 	ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
841 			rbd_dev->header.object_prefix, segment);
842 	if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
843 		pr_err("error formatting segment name for #%llu (%d)\n",
844 			segment, ret);
845 		kfree(name);
846 		name = NULL;
847 	}
848 
849 	return name;
850 }
851 
852 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
853 {
854 	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
855 
856 	return offset & (segment_size - 1);
857 }
858 
859 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
860 				u64 offset, u64 length)
861 {
862 	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
863 
864 	offset &= segment_size - 1;
865 
866 	rbd_assert(length <= U64_MAX - offset);
867 	if (offset + length > segment_size)
868 		length = segment_size - offset;
869 
870 	return length;
871 }
872 
873 /*
874  * returns the size of an object in the image
875  */
876 static u64 rbd_obj_bytes(struct rbd_image_header *header)
877 {
878 	return 1 << header->obj_order;
879 }
880 
881 /*
882  * bio helpers
883  */
884 
885 static void bio_chain_put(struct bio *chain)
886 {
887 	struct bio *tmp;
888 
889 	while (chain) {
890 		tmp = chain;
891 		chain = chain->bi_next;
892 		bio_put(tmp);
893 	}
894 }
895 
896 /*
897  * zeros a bio chain, starting at specific offset
898  */
899 static void zero_bio_chain(struct bio *chain, int start_ofs)
900 {
901 	struct bio_vec *bv;
902 	unsigned long flags;
903 	void *buf;
904 	int i;
905 	int pos = 0;
906 
907 	while (chain) {
908 		bio_for_each_segment(bv, chain, i) {
909 			if (pos + bv->bv_len > start_ofs) {
910 				int remainder = max(start_ofs - pos, 0);
911 				buf = bvec_kmap_irq(bv, &flags);
912 				memset(buf + remainder, 0,
913 				       bv->bv_len - remainder);
914 				bvec_kunmap_irq(buf, &flags);
915 			}
916 			pos += bv->bv_len;
917 		}
918 
919 		chain = chain->bi_next;
920 	}
921 }
922 
923 /*
924  * Clone a portion of a bio, starting at the given byte offset
925  * and continuing for the number of bytes indicated.
926  */
927 static struct bio *bio_clone_range(struct bio *bio_src,
928 					unsigned int offset,
929 					unsigned int len,
930 					gfp_t gfpmask)
931 {
932 	struct bio_vec *bv;
933 	unsigned int resid;
934 	unsigned short idx;
935 	unsigned int voff;
936 	unsigned short end_idx;
937 	unsigned short vcnt;
938 	struct bio *bio;
939 
940 	/* Handle the easy case for the caller */
941 
942 	if (!offset && len == bio_src->bi_size)
943 		return bio_clone(bio_src, gfpmask);
944 
945 	if (WARN_ON_ONCE(!len))
946 		return NULL;
947 	if (WARN_ON_ONCE(len > bio_src->bi_size))
948 		return NULL;
949 	if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
950 		return NULL;
951 
952 	/* Find first affected segment... */
953 
954 	resid = offset;
955 	__bio_for_each_segment(bv, bio_src, idx, 0) {
956 		if (resid < bv->bv_len)
957 			break;
958 		resid -= bv->bv_len;
959 	}
960 	voff = resid;
961 
962 	/* ...and the last affected segment */
963 
964 	resid += len;
965 	__bio_for_each_segment(bv, bio_src, end_idx, idx) {
966 		if (resid <= bv->bv_len)
967 			break;
968 		resid -= bv->bv_len;
969 	}
970 	vcnt = end_idx - idx + 1;
971 
972 	/* Build the clone */
973 
974 	bio = bio_alloc(gfpmask, (unsigned int) vcnt);
975 	if (!bio)
976 		return NULL;	/* ENOMEM */
977 
978 	bio->bi_bdev = bio_src->bi_bdev;
979 	bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
980 	bio->bi_rw = bio_src->bi_rw;
981 	bio->bi_flags |= 1 << BIO_CLONED;
982 
983 	/*
984 	 * Copy over our part of the bio_vec, then update the first
985 	 * and last (or only) entries.
986 	 */
987 	memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
988 			vcnt * sizeof (struct bio_vec));
989 	bio->bi_io_vec[0].bv_offset += voff;
990 	if (vcnt > 1) {
991 		bio->bi_io_vec[0].bv_len -= voff;
992 		bio->bi_io_vec[vcnt - 1].bv_len = resid;
993 	} else {
994 		bio->bi_io_vec[0].bv_len = len;
995 	}
996 
997 	bio->bi_vcnt = vcnt;
998 	bio->bi_size = len;
999 	bio->bi_idx = 0;
1000 
1001 	return bio;
1002 }
1003 
1004 /*
1005  * Clone a portion of a bio chain, starting at the given byte offset
1006  * into the first bio in the source chain and continuing for the
1007  * number of bytes indicated.  The result is another bio chain of
1008  * exactly the given length, or a null pointer on error.
1009  *
1010  * The bio_src and offset parameters are both in-out.  On entry they
1011  * refer to the first source bio and the offset into that bio where
1012  * the start of data to be cloned is located.
1013  *
1014  * On return, bio_src is updated to refer to the bio in the source
1015  * chain that contains first un-cloned byte, and *offset will
1016  * contain the offset of that byte within that bio.
1017  */
1018 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1019 					unsigned int *offset,
1020 					unsigned int len,
1021 					gfp_t gfpmask)
1022 {
1023 	struct bio *bi = *bio_src;
1024 	unsigned int off = *offset;
1025 	struct bio *chain = NULL;
1026 	struct bio **end;
1027 
1028 	/* Build up a chain of clone bios up to the limit */
1029 
1030 	if (!bi || off >= bi->bi_size || !len)
1031 		return NULL;		/* Nothing to clone */
1032 
1033 	end = &chain;
1034 	while (len) {
1035 		unsigned int bi_size;
1036 		struct bio *bio;
1037 
1038 		if (!bi) {
1039 			rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1040 			goto out_err;	/* EINVAL; ran out of bio's */
1041 		}
1042 		bi_size = min_t(unsigned int, bi->bi_size - off, len);
1043 		bio = bio_clone_range(bi, off, bi_size, gfpmask);
1044 		if (!bio)
1045 			goto out_err;	/* ENOMEM */
1046 
1047 		*end = bio;
1048 		end = &bio->bi_next;
1049 
1050 		off += bi_size;
1051 		if (off == bi->bi_size) {
1052 			bi = bi->bi_next;
1053 			off = 0;
1054 		}
1055 		len -= bi_size;
1056 	}
1057 	*bio_src = bi;
1058 	*offset = off;
1059 
1060 	return chain;
1061 out_err:
1062 	bio_chain_put(chain);
1063 
1064 	return NULL;
1065 }
1066 
1067 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1068 {
1069 	dout("%s: obj %p (was %d)\n", __func__, obj_request,
1070 		atomic_read(&obj_request->kref.refcount));
1071 	kref_get(&obj_request->kref);
1072 }
1073 
1074 static void rbd_obj_request_destroy(struct kref *kref);
1075 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1076 {
1077 	rbd_assert(obj_request != NULL);
1078 	dout("%s: obj %p (was %d)\n", __func__, obj_request,
1079 		atomic_read(&obj_request->kref.refcount));
1080 	kref_put(&obj_request->kref, rbd_obj_request_destroy);
1081 }
1082 
1083 static void rbd_img_request_get(struct rbd_img_request *img_request)
1084 {
1085 	dout("%s: img %p (was %d)\n", __func__, img_request,
1086 		atomic_read(&img_request->kref.refcount));
1087 	kref_get(&img_request->kref);
1088 }
1089 
1090 static void rbd_img_request_destroy(struct kref *kref);
1091 static void rbd_img_request_put(struct rbd_img_request *img_request)
1092 {
1093 	rbd_assert(img_request != NULL);
1094 	dout("%s: img %p (was %d)\n", __func__, img_request,
1095 		atomic_read(&img_request->kref.refcount));
1096 	kref_put(&img_request->kref, rbd_img_request_destroy);
1097 }
1098 
1099 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1100 					struct rbd_obj_request *obj_request)
1101 {
1102 	rbd_assert(obj_request->img_request == NULL);
1103 
1104 	rbd_obj_request_get(obj_request);
1105 	obj_request->img_request = img_request;
1106 	obj_request->which = img_request->obj_request_count;
1107 	rbd_assert(obj_request->which != BAD_WHICH);
1108 	img_request->obj_request_count++;
1109 	list_add_tail(&obj_request->links, &img_request->obj_requests);
1110 	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1111 		obj_request->which);
1112 }
1113 
1114 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1115 					struct rbd_obj_request *obj_request)
1116 {
1117 	rbd_assert(obj_request->which != BAD_WHICH);
1118 
1119 	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1120 		obj_request->which);
1121 	list_del(&obj_request->links);
1122 	rbd_assert(img_request->obj_request_count > 0);
1123 	img_request->obj_request_count--;
1124 	rbd_assert(obj_request->which == img_request->obj_request_count);
1125 	obj_request->which = BAD_WHICH;
1126 	rbd_assert(obj_request->img_request == img_request);
1127 	obj_request->img_request = NULL;
1128 	obj_request->callback = NULL;
1129 	rbd_obj_request_put(obj_request);
1130 }
1131 
1132 static bool obj_request_type_valid(enum obj_request_type type)
1133 {
1134 	switch (type) {
1135 	case OBJ_REQUEST_NODATA:
1136 	case OBJ_REQUEST_BIO:
1137 	case OBJ_REQUEST_PAGES:
1138 		return true;
1139 	default:
1140 		return false;
1141 	}
1142 }
1143 
1144 static struct ceph_osd_req_op *rbd_osd_req_op_create(u16 opcode, ...)
1145 {
1146 	struct ceph_osd_req_op *op;
1147 	va_list args;
1148 	size_t size;
1149 
1150 	op = kzalloc(sizeof (*op), GFP_NOIO);
1151 	if (!op)
1152 		return NULL;
1153 	op->op = opcode;
1154 	va_start(args, opcode);
1155 	switch (opcode) {
1156 	case CEPH_OSD_OP_READ:
1157 	case CEPH_OSD_OP_WRITE:
1158 		/* rbd_osd_req_op_create(READ, offset, length) */
1159 		/* rbd_osd_req_op_create(WRITE, offset, length) */
1160 		op->extent.offset = va_arg(args, u64);
1161 		op->extent.length = va_arg(args, u64);
1162 		if (opcode == CEPH_OSD_OP_WRITE)
1163 			op->payload_len = op->extent.length;
1164 		break;
1165 	case CEPH_OSD_OP_STAT:
1166 		break;
1167 	case CEPH_OSD_OP_CALL:
1168 		/* rbd_osd_req_op_create(CALL, class, method, data, datalen) */
1169 		op->cls.class_name = va_arg(args, char *);
1170 		size = strlen(op->cls.class_name);
1171 		rbd_assert(size <= (size_t) U8_MAX);
1172 		op->cls.class_len = size;
1173 		op->payload_len = size;
1174 
1175 		op->cls.method_name = va_arg(args, char *);
1176 		size = strlen(op->cls.method_name);
1177 		rbd_assert(size <= (size_t) U8_MAX);
1178 		op->cls.method_len = size;
1179 		op->payload_len += size;
1180 
1181 		op->cls.argc = 0;
1182 		op->cls.indata = va_arg(args, void *);
1183 		size = va_arg(args, size_t);
1184 		rbd_assert(size <= (size_t) U32_MAX);
1185 		op->cls.indata_len = (u32) size;
1186 		op->payload_len += size;
1187 		break;
1188 	case CEPH_OSD_OP_NOTIFY_ACK:
1189 	case CEPH_OSD_OP_WATCH:
1190 		/* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */
1191 		/* rbd_osd_req_op_create(WATCH, cookie, version, flag) */
1192 		op->watch.cookie = va_arg(args, u64);
1193 		op->watch.ver = va_arg(args, u64);
1194 		op->watch.ver = cpu_to_le64(op->watch.ver);
1195 		if (opcode == CEPH_OSD_OP_WATCH && va_arg(args, int))
1196 			op->watch.flag = (u8) 1;
1197 		break;
1198 	default:
1199 		rbd_warn(NULL, "unsupported opcode %hu\n", opcode);
1200 		kfree(op);
1201 		op = NULL;
1202 		break;
1203 	}
1204 	va_end(args);
1205 
1206 	return op;
1207 }
1208 
1209 static void rbd_osd_req_op_destroy(struct ceph_osd_req_op *op)
1210 {
1211 	kfree(op);
1212 }
1213 
1214 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1215 				struct rbd_obj_request *obj_request)
1216 {
1217 	dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1218 
1219 	return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1220 }
1221 
1222 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1223 {
1224 	dout("%s: img %p\n", __func__, img_request);
1225 	if (img_request->callback)
1226 		img_request->callback(img_request);
1227 	else
1228 		rbd_img_request_put(img_request);
1229 }
1230 
1231 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1232 
1233 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1234 {
1235 	dout("%s: obj %p\n", __func__, obj_request);
1236 
1237 	return wait_for_completion_interruptible(&obj_request->completion);
1238 }
1239 
1240 static void obj_request_done_init(struct rbd_obj_request *obj_request)
1241 {
1242 	atomic_set(&obj_request->done, 0);
1243 	smp_wmb();
1244 }
1245 
1246 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1247 {
1248 	int done;
1249 
1250 	done = atomic_inc_return(&obj_request->done);
1251 	if (done > 1) {
1252 		struct rbd_img_request *img_request = obj_request->img_request;
1253 		struct rbd_device *rbd_dev;
1254 
1255 		rbd_dev = img_request ? img_request->rbd_dev : NULL;
1256 		rbd_warn(rbd_dev, "obj_request %p was already done\n",
1257 			obj_request);
1258 	}
1259 }
1260 
1261 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1262 {
1263 	smp_mb();
1264 	return atomic_read(&obj_request->done) != 0;
1265 }
1266 
1267 static void
1268 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1269 {
1270 	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1271 		obj_request, obj_request->img_request, obj_request->result,
1272 		obj_request->xferred, obj_request->length);
1273 	/*
1274 	 * ENOENT means a hole in the image.  We zero-fill the
1275 	 * entire length of the request.  A short read also implies
1276 	 * zero-fill to the end of the request.  Either way we
1277 	 * update the xferred count to indicate the whole request
1278 	 * was satisfied.
1279 	 */
1280 	BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
1281 	if (obj_request->result == -ENOENT) {
1282 		zero_bio_chain(obj_request->bio_list, 0);
1283 		obj_request->result = 0;
1284 		obj_request->xferred = obj_request->length;
1285 	} else if (obj_request->xferred < obj_request->length &&
1286 			!obj_request->result) {
1287 		zero_bio_chain(obj_request->bio_list, obj_request->xferred);
1288 		obj_request->xferred = obj_request->length;
1289 	}
1290 	obj_request_done_set(obj_request);
1291 }
1292 
1293 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1294 {
1295 	dout("%s: obj %p cb %p\n", __func__, obj_request,
1296 		obj_request->callback);
1297 	if (obj_request->callback)
1298 		obj_request->callback(obj_request);
1299 	else
1300 		complete_all(&obj_request->completion);
1301 }
1302 
1303 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1304 {
1305 	dout("%s: obj %p\n", __func__, obj_request);
1306 	obj_request_done_set(obj_request);
1307 }
1308 
1309 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1310 {
1311 	dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
1312 		obj_request->result, obj_request->xferred, obj_request->length);
1313 	if (obj_request->img_request)
1314 		rbd_img_obj_request_read_callback(obj_request);
1315 	else
1316 		obj_request_done_set(obj_request);
1317 }
1318 
1319 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1320 {
1321 	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1322 		obj_request->result, obj_request->length);
1323 	/*
1324 	 * There is no such thing as a successful short write.
1325 	 * Our xferred value is the number of bytes transferred
1326 	 * back.  Set it to our originally-requested length.
1327 	 */
1328 	obj_request->xferred = obj_request->length;
1329 	obj_request_done_set(obj_request);
1330 }
1331 
1332 /*
1333  * For a simple stat call there's nothing to do.  We'll do more if
1334  * this is part of a write sequence for a layered image.
1335  */
1336 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1337 {
1338 	dout("%s: obj %p\n", __func__, obj_request);
1339 	obj_request_done_set(obj_request);
1340 }
1341 
1342 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1343 				struct ceph_msg *msg)
1344 {
1345 	struct rbd_obj_request *obj_request = osd_req->r_priv;
1346 	u16 opcode;
1347 
1348 	dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1349 	rbd_assert(osd_req == obj_request->osd_req);
1350 	rbd_assert(!!obj_request->img_request ^
1351 				(obj_request->which == BAD_WHICH));
1352 
1353 	if (osd_req->r_result < 0)
1354 		obj_request->result = osd_req->r_result;
1355 	obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
1356 
1357 	WARN_ON(osd_req->r_num_ops != 1);	/* For now */
1358 
1359 	/*
1360 	 * We support a 64-bit length, but ultimately it has to be
1361 	 * passed to blk_end_request(), which takes an unsigned int.
1362 	 */
1363 	obj_request->xferred = osd_req->r_reply_op_len[0];
1364 	rbd_assert(obj_request->xferred < (u64) UINT_MAX);
1365 	opcode = osd_req->r_request_ops[0].op;
1366 	switch (opcode) {
1367 	case CEPH_OSD_OP_READ:
1368 		rbd_osd_read_callback(obj_request);
1369 		break;
1370 	case CEPH_OSD_OP_WRITE:
1371 		rbd_osd_write_callback(obj_request);
1372 		break;
1373 	case CEPH_OSD_OP_STAT:
1374 		rbd_osd_stat_callback(obj_request);
1375 		break;
1376 	case CEPH_OSD_OP_CALL:
1377 	case CEPH_OSD_OP_NOTIFY_ACK:
1378 	case CEPH_OSD_OP_WATCH:
1379 		rbd_osd_trivial_callback(obj_request);
1380 		break;
1381 	default:
1382 		rbd_warn(NULL, "%s: unsupported op %hu\n",
1383 			obj_request->object_name, (unsigned short) opcode);
1384 		break;
1385 	}
1386 
1387 	if (obj_request_done_test(obj_request))
1388 		rbd_obj_request_complete(obj_request);
1389 }
1390 
1391 static struct ceph_osd_request *rbd_osd_req_create(
1392 					struct rbd_device *rbd_dev,
1393 					bool write_request,
1394 					struct rbd_obj_request *obj_request,
1395 					struct ceph_osd_req_op *op)
1396 {
1397 	struct rbd_img_request *img_request = obj_request->img_request;
1398 	struct ceph_snap_context *snapc = NULL;
1399 	struct ceph_osd_client *osdc;
1400 	struct ceph_osd_request *osd_req;
1401 	struct timespec now;
1402 	struct timespec *mtime;
1403 	u64 snap_id = CEPH_NOSNAP;
1404 	u64 offset = obj_request->offset;
1405 	u64 length = obj_request->length;
1406 
1407 	if (img_request) {
1408 		rbd_assert(img_request->write_request == write_request);
1409 		if (img_request->write_request)
1410 			snapc = img_request->snapc;
1411 		else
1412 			snap_id = img_request->snap_id;
1413 	}
1414 
1415 	/* Allocate and initialize the request, for the single op */
1416 
1417 	osdc = &rbd_dev->rbd_client->client->osdc;
1418 	osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1419 	if (!osd_req)
1420 		return NULL;	/* ENOMEM */
1421 
1422 	rbd_assert(obj_request_type_valid(obj_request->type));
1423 	switch (obj_request->type) {
1424 	case OBJ_REQUEST_NODATA:
1425 		break;		/* Nothing to do */
1426 	case OBJ_REQUEST_BIO:
1427 		rbd_assert(obj_request->bio_list != NULL);
1428 		osd_req->r_bio = obj_request->bio_list;
1429 		break;
1430 	case OBJ_REQUEST_PAGES:
1431 		osd_req->r_pages = obj_request->pages;
1432 		osd_req->r_num_pages = obj_request->page_count;
1433 		osd_req->r_page_alignment = offset & ~PAGE_MASK;
1434 		break;
1435 	}
1436 
1437 	if (write_request) {
1438 		osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1439 		now = CURRENT_TIME;
1440 		mtime = &now;
1441 	} else {
1442 		osd_req->r_flags = CEPH_OSD_FLAG_READ;
1443 		mtime = NULL;	/* not needed for reads */
1444 		offset = 0;	/* These are not used... */
1445 		length = 0;	/* ...for osd read requests */
1446 	}
1447 
1448 	osd_req->r_callback = rbd_osd_req_callback;
1449 	osd_req->r_priv = obj_request;
1450 
1451 	osd_req->r_oid_len = strlen(obj_request->object_name);
1452 	rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1453 	memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1454 
1455 	osd_req->r_file_layout = rbd_dev->layout;	/* struct */
1456 
1457 	/* osd_req will get its own reference to snapc (if non-null) */
1458 
1459 	ceph_osdc_build_request(osd_req, offset, length, 1, op,
1460 				snapc, snap_id, mtime);
1461 
1462 	return osd_req;
1463 }
1464 
1465 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1466 {
1467 	ceph_osdc_put_request(osd_req);
1468 }
1469 
1470 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1471 
1472 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1473 						u64 offset, u64 length,
1474 						enum obj_request_type type)
1475 {
1476 	struct rbd_obj_request *obj_request;
1477 	size_t size;
1478 	char *name;
1479 
1480 	rbd_assert(obj_request_type_valid(type));
1481 
1482 	size = strlen(object_name) + 1;
1483 	obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1484 	if (!obj_request)
1485 		return NULL;
1486 
1487 	name = (char *)(obj_request + 1);
1488 	obj_request->object_name = memcpy(name, object_name, size);
1489 	obj_request->offset = offset;
1490 	obj_request->length = length;
1491 	obj_request->which = BAD_WHICH;
1492 	obj_request->type = type;
1493 	INIT_LIST_HEAD(&obj_request->links);
1494 	obj_request_done_init(obj_request);
1495 	init_completion(&obj_request->completion);
1496 	kref_init(&obj_request->kref);
1497 
1498 	dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1499 		offset, length, (int)type, obj_request);
1500 
1501 	return obj_request;
1502 }
1503 
1504 static void rbd_obj_request_destroy(struct kref *kref)
1505 {
1506 	struct rbd_obj_request *obj_request;
1507 
1508 	obj_request = container_of(kref, struct rbd_obj_request, kref);
1509 
1510 	dout("%s: obj %p\n", __func__, obj_request);
1511 
1512 	rbd_assert(obj_request->img_request == NULL);
1513 	rbd_assert(obj_request->which == BAD_WHICH);
1514 
1515 	if (obj_request->osd_req)
1516 		rbd_osd_req_destroy(obj_request->osd_req);
1517 
1518 	rbd_assert(obj_request_type_valid(obj_request->type));
1519 	switch (obj_request->type) {
1520 	case OBJ_REQUEST_NODATA:
1521 		break;		/* Nothing to do */
1522 	case OBJ_REQUEST_BIO:
1523 		if (obj_request->bio_list)
1524 			bio_chain_put(obj_request->bio_list);
1525 		break;
1526 	case OBJ_REQUEST_PAGES:
1527 		if (obj_request->pages)
1528 			ceph_release_page_vector(obj_request->pages,
1529 						obj_request->page_count);
1530 		break;
1531 	}
1532 
1533 	kfree(obj_request);
1534 }
1535 
1536 /*
1537  * Caller is responsible for filling in the list of object requests
1538  * that comprises the image request, and the Linux request pointer
1539  * (if there is one).
1540  */
1541 static struct rbd_img_request *rbd_img_request_create(
1542 					struct rbd_device *rbd_dev,
1543 					u64 offset, u64 length,
1544 					bool write_request)
1545 {
1546 	struct rbd_img_request *img_request;
1547 	struct ceph_snap_context *snapc = NULL;
1548 
1549 	img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1550 	if (!img_request)
1551 		return NULL;
1552 
1553 	if (write_request) {
1554 		down_read(&rbd_dev->header_rwsem);
1555 		snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1556 		up_read(&rbd_dev->header_rwsem);
1557 		if (WARN_ON(!snapc)) {
1558 			kfree(img_request);
1559 			return NULL;	/* Shouldn't happen */
1560 		}
1561 	}
1562 
1563 	img_request->rq = NULL;
1564 	img_request->rbd_dev = rbd_dev;
1565 	img_request->offset = offset;
1566 	img_request->length = length;
1567 	img_request->write_request = write_request;
1568 	if (write_request)
1569 		img_request->snapc = snapc;
1570 	else
1571 		img_request->snap_id = rbd_dev->spec->snap_id;
1572 	spin_lock_init(&img_request->completion_lock);
1573 	img_request->next_completion = 0;
1574 	img_request->callback = NULL;
1575 	img_request->obj_request_count = 0;
1576 	INIT_LIST_HEAD(&img_request->obj_requests);
1577 	kref_init(&img_request->kref);
1578 
1579 	rbd_img_request_get(img_request);	/* Avoid a warning */
1580 	rbd_img_request_put(img_request);	/* TEMPORARY */
1581 
1582 	dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1583 		write_request ? "write" : "read", offset, length,
1584 		img_request);
1585 
1586 	return img_request;
1587 }
1588 
1589 static void rbd_img_request_destroy(struct kref *kref)
1590 {
1591 	struct rbd_img_request *img_request;
1592 	struct rbd_obj_request *obj_request;
1593 	struct rbd_obj_request *next_obj_request;
1594 
1595 	img_request = container_of(kref, struct rbd_img_request, kref);
1596 
1597 	dout("%s: img %p\n", __func__, img_request);
1598 
1599 	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1600 		rbd_img_obj_request_del(img_request, obj_request);
1601 	rbd_assert(img_request->obj_request_count == 0);
1602 
1603 	if (img_request->write_request)
1604 		ceph_put_snap_context(img_request->snapc);
1605 
1606 	kfree(img_request);
1607 }
1608 
1609 static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
1610 					struct bio *bio_list)
1611 {
1612 	struct rbd_device *rbd_dev = img_request->rbd_dev;
1613 	struct rbd_obj_request *obj_request = NULL;
1614 	struct rbd_obj_request *next_obj_request;
1615 	unsigned int bio_offset;
1616 	u64 image_offset;
1617 	u64 resid;
1618 	u16 opcode;
1619 
1620 	dout("%s: img %p bio %p\n", __func__, img_request, bio_list);
1621 
1622 	opcode = img_request->write_request ? CEPH_OSD_OP_WRITE
1623 					      : CEPH_OSD_OP_READ;
1624 	bio_offset = 0;
1625 	image_offset = img_request->offset;
1626 	rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
1627 	resid = img_request->length;
1628 	rbd_assert(resid > 0);
1629 	while (resid) {
1630 		const char *object_name;
1631 		unsigned int clone_size;
1632 		struct ceph_osd_req_op *op;
1633 		u64 offset;
1634 		u64 length;
1635 
1636 		object_name = rbd_segment_name(rbd_dev, image_offset);
1637 		if (!object_name)
1638 			goto out_unwind;
1639 		offset = rbd_segment_offset(rbd_dev, image_offset);
1640 		length = rbd_segment_length(rbd_dev, image_offset, resid);
1641 		obj_request = rbd_obj_request_create(object_name,
1642 						offset, length,
1643 						OBJ_REQUEST_BIO);
1644 		kfree(object_name);	/* object request has its own copy */
1645 		if (!obj_request)
1646 			goto out_unwind;
1647 
1648 		rbd_assert(length <= (u64) UINT_MAX);
1649 		clone_size = (unsigned int) length;
1650 		obj_request->bio_list = bio_chain_clone_range(&bio_list,
1651 						&bio_offset, clone_size,
1652 						GFP_ATOMIC);
1653 		if (!obj_request->bio_list)
1654 			goto out_partial;
1655 
1656 		/*
1657 		 * Build up the op to use in building the osd
1658 		 * request.  Note that the contents of the op are
1659 		 * copied by rbd_osd_req_create().
1660 		 */
1661 		op = rbd_osd_req_op_create(opcode, offset, length);
1662 		if (!op)
1663 			goto out_partial;
1664 		obj_request->osd_req = rbd_osd_req_create(rbd_dev,
1665 						img_request->write_request,
1666 						obj_request, op);
1667 		rbd_osd_req_op_destroy(op);
1668 		if (!obj_request->osd_req)
1669 			goto out_partial;
1670 		/* status and version are initially zero-filled */
1671 
1672 		rbd_img_obj_request_add(img_request, obj_request);
1673 
1674 		image_offset += length;
1675 		resid -= length;
1676 	}
1677 
1678 	return 0;
1679 
1680 out_partial:
1681 	rbd_obj_request_put(obj_request);
1682 out_unwind:
1683 	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1684 		rbd_obj_request_put(obj_request);
1685 
1686 	return -ENOMEM;
1687 }
1688 
1689 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1690 {
1691 	struct rbd_img_request *img_request;
1692 	u32 which = obj_request->which;
1693 	bool more = true;
1694 
1695 	img_request = obj_request->img_request;
1696 
1697 	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1698 	rbd_assert(img_request != NULL);
1699 	rbd_assert(img_request->rq != NULL);
1700 	rbd_assert(img_request->obj_request_count > 0);
1701 	rbd_assert(which != BAD_WHICH);
1702 	rbd_assert(which < img_request->obj_request_count);
1703 	rbd_assert(which >= img_request->next_completion);
1704 
1705 	spin_lock_irq(&img_request->completion_lock);
1706 	if (which != img_request->next_completion)
1707 		goto out;
1708 
1709 	for_each_obj_request_from(img_request, obj_request) {
1710 		unsigned int xferred;
1711 		int result;
1712 
1713 		rbd_assert(more);
1714 		rbd_assert(which < img_request->obj_request_count);
1715 
1716 		if (!obj_request_done_test(obj_request))
1717 			break;
1718 
1719 		rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
1720 		xferred = (unsigned int) obj_request->xferred;
1721 		result = (int) obj_request->result;
1722 		if (result)
1723 			rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
1724 				img_request->write_request ? "write" : "read",
1725 				result, xferred);
1726 
1727 		more = blk_end_request(img_request->rq, result, xferred);
1728 		which++;
1729 	}
1730 
1731 	rbd_assert(more ^ (which == img_request->obj_request_count));
1732 	img_request->next_completion = which;
1733 out:
1734 	spin_unlock_irq(&img_request->completion_lock);
1735 
1736 	if (!more)
1737 		rbd_img_request_complete(img_request);
1738 }
1739 
1740 static int rbd_img_request_submit(struct rbd_img_request *img_request)
1741 {
1742 	struct rbd_device *rbd_dev = img_request->rbd_dev;
1743 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1744 	struct rbd_obj_request *obj_request;
1745 	struct rbd_obj_request *next_obj_request;
1746 
1747 	dout("%s: img %p\n", __func__, img_request);
1748 	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
1749 		int ret;
1750 
1751 		obj_request->callback = rbd_img_obj_callback;
1752 		ret = rbd_obj_request_submit(osdc, obj_request);
1753 		if (ret)
1754 			return ret;
1755 		/*
1756 		 * The image request has its own reference to each
1757 		 * of its object requests, so we can safely drop the
1758 		 * initial one here.
1759 		 */
1760 		rbd_obj_request_put(obj_request);
1761 	}
1762 
1763 	return 0;
1764 }
1765 
1766 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
1767 				   u64 ver, u64 notify_id)
1768 {
1769 	struct rbd_obj_request *obj_request;
1770 	struct ceph_osd_req_op *op;
1771 	struct ceph_osd_client *osdc;
1772 	int ret;
1773 
1774 	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1775 							OBJ_REQUEST_NODATA);
1776 	if (!obj_request)
1777 		return -ENOMEM;
1778 
1779 	ret = -ENOMEM;
1780 	op = rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver);
1781 	if (!op)
1782 		goto out;
1783 	obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
1784 						obj_request, op);
1785 	rbd_osd_req_op_destroy(op);
1786 	if (!obj_request->osd_req)
1787 		goto out;
1788 
1789 	osdc = &rbd_dev->rbd_client->client->osdc;
1790 	obj_request->callback = rbd_obj_request_put;
1791 	ret = rbd_obj_request_submit(osdc, obj_request);
1792 out:
1793 	if (ret)
1794 		rbd_obj_request_put(obj_request);
1795 
1796 	return ret;
1797 }
1798 
1799 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1800 {
1801 	struct rbd_device *rbd_dev = (struct rbd_device *)data;
1802 	u64 hver;
1803 	int rc;
1804 
1805 	if (!rbd_dev)
1806 		return;
1807 
1808 	dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
1809 		rbd_dev->header_name, (unsigned long long) notify_id,
1810 		(unsigned int) opcode);
1811 	rc = rbd_dev_refresh(rbd_dev, &hver);
1812 	if (rc)
1813 		rbd_warn(rbd_dev, "got notification but failed to "
1814 			   " update snaps: %d\n", rc);
1815 
1816 	rbd_obj_notify_ack(rbd_dev, hver, notify_id);
1817 }
1818 
1819 /*
1820  * Request sync osd watch/unwatch.  The value of "start" determines
1821  * whether a watch request is being initiated or torn down.
1822  */
1823 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
1824 {
1825 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1826 	struct rbd_obj_request *obj_request;
1827 	struct ceph_osd_req_op *op;
1828 	int ret;
1829 
1830 	rbd_assert(start ^ !!rbd_dev->watch_event);
1831 	rbd_assert(start ^ !!rbd_dev->watch_request);
1832 
1833 	if (start) {
1834 		ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
1835 						&rbd_dev->watch_event);
1836 		if (ret < 0)
1837 			return ret;
1838 		rbd_assert(rbd_dev->watch_event != NULL);
1839 	}
1840 
1841 	ret = -ENOMEM;
1842 	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1843 							OBJ_REQUEST_NODATA);
1844 	if (!obj_request)
1845 		goto out_cancel;
1846 
1847 	op = rbd_osd_req_op_create(CEPH_OSD_OP_WATCH,
1848 				rbd_dev->watch_event->cookie,
1849 				rbd_dev->header.obj_version, start);
1850 	if (!op)
1851 		goto out_cancel;
1852 	obj_request->osd_req = rbd_osd_req_create(rbd_dev, true,
1853 							obj_request, op);
1854 	rbd_osd_req_op_destroy(op);
1855 	if (!obj_request->osd_req)
1856 		goto out_cancel;
1857 
1858 	if (start)
1859 		ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
1860 	else
1861 		ceph_osdc_unregister_linger_request(osdc,
1862 					rbd_dev->watch_request->osd_req);
1863 	ret = rbd_obj_request_submit(osdc, obj_request);
1864 	if (ret)
1865 		goto out_cancel;
1866 	ret = rbd_obj_request_wait(obj_request);
1867 	if (ret)
1868 		goto out_cancel;
1869 	ret = obj_request->result;
1870 	if (ret)
1871 		goto out_cancel;
1872 
1873 	/*
1874 	 * A watch request is set to linger, so the underlying osd
1875 	 * request won't go away until we unregister it.  We retain
1876 	 * a pointer to the object request during that time (in
1877 	 * rbd_dev->watch_request), so we'll keep a reference to
1878 	 * it.  We'll drop that reference (below) after we've
1879 	 * unregistered it.
1880 	 */
1881 	if (start) {
1882 		rbd_dev->watch_request = obj_request;
1883 
1884 		return 0;
1885 	}
1886 
1887 	/* We have successfully torn down the watch request */
1888 
1889 	rbd_obj_request_put(rbd_dev->watch_request);
1890 	rbd_dev->watch_request = NULL;
1891 out_cancel:
1892 	/* Cancel the event if we're tearing down, or on error */
1893 	ceph_osdc_cancel_event(rbd_dev->watch_event);
1894 	rbd_dev->watch_event = NULL;
1895 	if (obj_request)
1896 		rbd_obj_request_put(obj_request);
1897 
1898 	return ret;
1899 }
1900 
1901 /*
1902  * Synchronous osd object method call
1903  */
1904 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
1905 			     const char *object_name,
1906 			     const char *class_name,
1907 			     const char *method_name,
1908 			     const char *outbound,
1909 			     size_t outbound_size,
1910 			     char *inbound,
1911 			     size_t inbound_size,
1912 			     u64 *version)
1913 {
1914 	struct rbd_obj_request *obj_request;
1915 	struct ceph_osd_client *osdc;
1916 	struct ceph_osd_req_op *op;
1917 	struct page **pages;
1918 	u32 page_count;
1919 	int ret;
1920 
1921 	/*
1922 	 * Method calls are ultimately read operations but they
1923 	 * don't involve object data (so no offset or length).
1924 	 * The result should placed into the inbound buffer
1925 	 * provided.  They also supply outbound data--parameters for
1926 	 * the object method.  Currently if this is present it will
1927 	 * be a snapshot id.
1928 	 */
1929 	page_count = (u32) calc_pages_for(0, inbound_size);
1930 	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
1931 	if (IS_ERR(pages))
1932 		return PTR_ERR(pages);
1933 
1934 	ret = -ENOMEM;
1935 	obj_request = rbd_obj_request_create(object_name, 0, 0,
1936 							OBJ_REQUEST_PAGES);
1937 	if (!obj_request)
1938 		goto out;
1939 
1940 	obj_request->pages = pages;
1941 	obj_request->page_count = page_count;
1942 
1943 	op = rbd_osd_req_op_create(CEPH_OSD_OP_CALL, class_name,
1944 					method_name, outbound, outbound_size);
1945 	if (!op)
1946 		goto out;
1947 	obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
1948 						obj_request, op);
1949 	rbd_osd_req_op_destroy(op);
1950 	if (!obj_request->osd_req)
1951 		goto out;
1952 
1953 	osdc = &rbd_dev->rbd_client->client->osdc;
1954 	ret = rbd_obj_request_submit(osdc, obj_request);
1955 	if (ret)
1956 		goto out;
1957 	ret = rbd_obj_request_wait(obj_request);
1958 	if (ret)
1959 		goto out;
1960 
1961 	ret = obj_request->result;
1962 	if (ret < 0)
1963 		goto out;
1964 	ret = 0;
1965 	ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
1966 	if (version)
1967 		*version = obj_request->version;
1968 out:
1969 	if (obj_request)
1970 		rbd_obj_request_put(obj_request);
1971 	else
1972 		ceph_release_page_vector(pages, page_count);
1973 
1974 	return ret;
1975 }
1976 
1977 static void rbd_request_fn(struct request_queue *q)
1978 		__releases(q->queue_lock) __acquires(q->queue_lock)
1979 {
1980 	struct rbd_device *rbd_dev = q->queuedata;
1981 	bool read_only = rbd_dev->mapping.read_only;
1982 	struct request *rq;
1983 	int result;
1984 
1985 	while ((rq = blk_fetch_request(q))) {
1986 		bool write_request = rq_data_dir(rq) == WRITE;
1987 		struct rbd_img_request *img_request;
1988 		u64 offset;
1989 		u64 length;
1990 
1991 		/* Ignore any non-FS requests that filter through. */
1992 
1993 		if (rq->cmd_type != REQ_TYPE_FS) {
1994 			dout("%s: non-fs request type %d\n", __func__,
1995 				(int) rq->cmd_type);
1996 			__blk_end_request_all(rq, 0);
1997 			continue;
1998 		}
1999 
2000 		/* Ignore/skip any zero-length requests */
2001 
2002 		offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2003 		length = (u64) blk_rq_bytes(rq);
2004 
2005 		if (!length) {
2006 			dout("%s: zero-length request\n", __func__);
2007 			__blk_end_request_all(rq, 0);
2008 			continue;
2009 		}
2010 
2011 		spin_unlock_irq(q->queue_lock);
2012 
2013 		/* Disallow writes to a read-only device */
2014 
2015 		if (write_request) {
2016 			result = -EROFS;
2017 			if (read_only)
2018 				goto end_request;
2019 			rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2020 		}
2021 
2022 		/*
2023 		 * Quit early if the mapped snapshot no longer
2024 		 * exists.  It's still possible the snapshot will
2025 		 * have disappeared by the time our request arrives
2026 		 * at the osd, but there's no sense in sending it if
2027 		 * we already know.
2028 		 */
2029 		if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
2030 			dout("request for non-existent snapshot");
2031 			rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2032 			result = -ENXIO;
2033 			goto end_request;
2034 		}
2035 
2036 		result = -EINVAL;
2037 		if (WARN_ON(offset && length > U64_MAX - offset + 1))
2038 			goto end_request;	/* Shouldn't happen */
2039 
2040 		result = -ENOMEM;
2041 		img_request = rbd_img_request_create(rbd_dev, offset, length,
2042 							write_request);
2043 		if (!img_request)
2044 			goto end_request;
2045 
2046 		img_request->rq = rq;
2047 
2048 		result = rbd_img_request_fill_bio(img_request, rq->bio);
2049 		if (!result)
2050 			result = rbd_img_request_submit(img_request);
2051 		if (result)
2052 			rbd_img_request_put(img_request);
2053 end_request:
2054 		spin_lock_irq(q->queue_lock);
2055 		if (result < 0) {
2056 			rbd_warn(rbd_dev, "obj_request %s result %d\n",
2057 				write_request ? "write" : "read", result);
2058 			__blk_end_request_all(rq, result);
2059 		}
2060 	}
2061 }
2062 
2063 /*
2064  * a queue callback. Makes sure that we don't create a bio that spans across
2065  * multiple osd objects. One exception would be with a single page bios,
2066  * which we handle later at bio_chain_clone_range()
2067  */
2068 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2069 			  struct bio_vec *bvec)
2070 {
2071 	struct rbd_device *rbd_dev = q->queuedata;
2072 	sector_t sector_offset;
2073 	sector_t sectors_per_obj;
2074 	sector_t obj_sector_offset;
2075 	int ret;
2076 
2077 	/*
2078 	 * Find how far into its rbd object the partition-relative
2079 	 * bio start sector is to offset relative to the enclosing
2080 	 * device.
2081 	 */
2082 	sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2083 	sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2084 	obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2085 
2086 	/*
2087 	 * Compute the number of bytes from that offset to the end
2088 	 * of the object.  Account for what's already used by the bio.
2089 	 */
2090 	ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2091 	if (ret > bmd->bi_size)
2092 		ret -= bmd->bi_size;
2093 	else
2094 		ret = 0;
2095 
2096 	/*
2097 	 * Don't send back more than was asked for.  And if the bio
2098 	 * was empty, let the whole thing through because:  "Note
2099 	 * that a block device *must* allow a single page to be
2100 	 * added to an empty bio."
2101 	 */
2102 	rbd_assert(bvec->bv_len <= PAGE_SIZE);
2103 	if (ret > (int) bvec->bv_len || !bmd->bi_size)
2104 		ret = (int) bvec->bv_len;
2105 
2106 	return ret;
2107 }
2108 
2109 static void rbd_free_disk(struct rbd_device *rbd_dev)
2110 {
2111 	struct gendisk *disk = rbd_dev->disk;
2112 
2113 	if (!disk)
2114 		return;
2115 
2116 	if (disk->flags & GENHD_FL_UP)
2117 		del_gendisk(disk);
2118 	if (disk->queue)
2119 		blk_cleanup_queue(disk->queue);
2120 	put_disk(disk);
2121 }
2122 
2123 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2124 				const char *object_name,
2125 				u64 offset, u64 length,
2126 				char *buf, u64 *version)
2127 
2128 {
2129 	struct ceph_osd_req_op *op;
2130 	struct rbd_obj_request *obj_request;
2131 	struct ceph_osd_client *osdc;
2132 	struct page **pages = NULL;
2133 	u32 page_count;
2134 	size_t size;
2135 	int ret;
2136 
2137 	page_count = (u32) calc_pages_for(offset, length);
2138 	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2139 	if (IS_ERR(pages))
2140 		ret = PTR_ERR(pages);
2141 
2142 	ret = -ENOMEM;
2143 	obj_request = rbd_obj_request_create(object_name, offset, length,
2144 							OBJ_REQUEST_PAGES);
2145 	if (!obj_request)
2146 		goto out;
2147 
2148 	obj_request->pages = pages;
2149 	obj_request->page_count = page_count;
2150 
2151 	op = rbd_osd_req_op_create(CEPH_OSD_OP_READ, offset, length);
2152 	if (!op)
2153 		goto out;
2154 	obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2155 						obj_request, op);
2156 	rbd_osd_req_op_destroy(op);
2157 	if (!obj_request->osd_req)
2158 		goto out;
2159 
2160 	osdc = &rbd_dev->rbd_client->client->osdc;
2161 	ret = rbd_obj_request_submit(osdc, obj_request);
2162 	if (ret)
2163 		goto out;
2164 	ret = rbd_obj_request_wait(obj_request);
2165 	if (ret)
2166 		goto out;
2167 
2168 	ret = obj_request->result;
2169 	if (ret < 0)
2170 		goto out;
2171 
2172 	rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2173 	size = (size_t) obj_request->xferred;
2174 	ceph_copy_from_page_vector(pages, buf, 0, size);
2175 	rbd_assert(size <= (size_t) INT_MAX);
2176 	ret = (int) size;
2177 	if (version)
2178 		*version = obj_request->version;
2179 out:
2180 	if (obj_request)
2181 		rbd_obj_request_put(obj_request);
2182 	else
2183 		ceph_release_page_vector(pages, page_count);
2184 
2185 	return ret;
2186 }
2187 
2188 /*
2189  * Read the complete header for the given rbd device.
2190  *
2191  * Returns a pointer to a dynamically-allocated buffer containing
2192  * the complete and validated header.  Caller can pass the address
2193  * of a variable that will be filled in with the version of the
2194  * header object at the time it was read.
2195  *
2196  * Returns a pointer-coded errno if a failure occurs.
2197  */
2198 static struct rbd_image_header_ondisk *
2199 rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
2200 {
2201 	struct rbd_image_header_ondisk *ondisk = NULL;
2202 	u32 snap_count = 0;
2203 	u64 names_size = 0;
2204 	u32 want_count;
2205 	int ret;
2206 
2207 	/*
2208 	 * The complete header will include an array of its 64-bit
2209 	 * snapshot ids, followed by the names of those snapshots as
2210 	 * a contiguous block of NUL-terminated strings.  Note that
2211 	 * the number of snapshots could change by the time we read
2212 	 * it in, in which case we re-read it.
2213 	 */
2214 	do {
2215 		size_t size;
2216 
2217 		kfree(ondisk);
2218 
2219 		size = sizeof (*ondisk);
2220 		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2221 		size += names_size;
2222 		ondisk = kmalloc(size, GFP_KERNEL);
2223 		if (!ondisk)
2224 			return ERR_PTR(-ENOMEM);
2225 
2226 		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
2227 				       0, size,
2228 				       (char *) ondisk, version);
2229 		if (ret < 0)
2230 			goto out_err;
2231 		if (WARN_ON((size_t) ret < size)) {
2232 			ret = -ENXIO;
2233 			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2234 				size, ret);
2235 			goto out_err;
2236 		}
2237 		if (!rbd_dev_ondisk_valid(ondisk)) {
2238 			ret = -ENXIO;
2239 			rbd_warn(rbd_dev, "invalid header");
2240 			goto out_err;
2241 		}
2242 
2243 		names_size = le64_to_cpu(ondisk->snap_names_len);
2244 		want_count = snap_count;
2245 		snap_count = le32_to_cpu(ondisk->snap_count);
2246 	} while (snap_count != want_count);
2247 
2248 	return ondisk;
2249 
2250 out_err:
2251 	kfree(ondisk);
2252 
2253 	return ERR_PTR(ret);
2254 }
2255 
2256 /*
2257  * reload the ondisk the header
2258  */
2259 static int rbd_read_header(struct rbd_device *rbd_dev,
2260 			   struct rbd_image_header *header)
2261 {
2262 	struct rbd_image_header_ondisk *ondisk;
2263 	u64 ver = 0;
2264 	int ret;
2265 
2266 	ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
2267 	if (IS_ERR(ondisk))
2268 		return PTR_ERR(ondisk);
2269 	ret = rbd_header_from_disk(header, ondisk);
2270 	if (ret >= 0)
2271 		header->obj_version = ver;
2272 	kfree(ondisk);
2273 
2274 	return ret;
2275 }
2276 
2277 static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
2278 {
2279 	struct rbd_snap *snap;
2280 	struct rbd_snap *next;
2281 
2282 	list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
2283 		rbd_remove_snap_dev(snap);
2284 }
2285 
2286 static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
2287 {
2288 	sector_t size;
2289 
2290 	if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
2291 		return;
2292 
2293 	size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
2294 	dout("setting size to %llu sectors", (unsigned long long) size);
2295 	rbd_dev->mapping.size = (u64) size;
2296 	set_capacity(rbd_dev->disk, size);
2297 }
2298 
2299 /*
2300  * only read the first part of the ondisk header, without the snaps info
2301  */
2302 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
2303 {
2304 	int ret;
2305 	struct rbd_image_header h;
2306 
2307 	ret = rbd_read_header(rbd_dev, &h);
2308 	if (ret < 0)
2309 		return ret;
2310 
2311 	down_write(&rbd_dev->header_rwsem);
2312 
2313 	/* Update image size, and check for resize of mapped image */
2314 	rbd_dev->header.image_size = h.image_size;
2315 	rbd_update_mapping_size(rbd_dev);
2316 
2317 	/* rbd_dev->header.object_prefix shouldn't change */
2318 	kfree(rbd_dev->header.snap_sizes);
2319 	kfree(rbd_dev->header.snap_names);
2320 	/* osd requests may still refer to snapc */
2321 	ceph_put_snap_context(rbd_dev->header.snapc);
2322 
2323 	if (hver)
2324 		*hver = h.obj_version;
2325 	rbd_dev->header.obj_version = h.obj_version;
2326 	rbd_dev->header.image_size = h.image_size;
2327 	rbd_dev->header.snapc = h.snapc;
2328 	rbd_dev->header.snap_names = h.snap_names;
2329 	rbd_dev->header.snap_sizes = h.snap_sizes;
2330 	/* Free the extra copy of the object prefix */
2331 	WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
2332 	kfree(h.object_prefix);
2333 
2334 	ret = rbd_dev_snaps_update(rbd_dev);
2335 	if (!ret)
2336 		ret = rbd_dev_snaps_register(rbd_dev);
2337 
2338 	up_write(&rbd_dev->header_rwsem);
2339 
2340 	return ret;
2341 }
2342 
2343 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
2344 {
2345 	int ret;
2346 
2347 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
2348 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2349 	if (rbd_dev->image_format == 1)
2350 		ret = rbd_dev_v1_refresh(rbd_dev, hver);
2351 	else
2352 		ret = rbd_dev_v2_refresh(rbd_dev, hver);
2353 	mutex_unlock(&ctl_mutex);
2354 
2355 	return ret;
2356 }
2357 
2358 static int rbd_init_disk(struct rbd_device *rbd_dev)
2359 {
2360 	struct gendisk *disk;
2361 	struct request_queue *q;
2362 	u64 segment_size;
2363 
2364 	/* create gendisk info */
2365 	disk = alloc_disk(RBD_MINORS_PER_MAJOR);
2366 	if (!disk)
2367 		return -ENOMEM;
2368 
2369 	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
2370 		 rbd_dev->dev_id);
2371 	disk->major = rbd_dev->major;
2372 	disk->first_minor = 0;
2373 	disk->fops = &rbd_bd_ops;
2374 	disk->private_data = rbd_dev;
2375 
2376 	q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
2377 	if (!q)
2378 		goto out_disk;
2379 
2380 	/* We use the default size, but let's be explicit about it. */
2381 	blk_queue_physical_block_size(q, SECTOR_SIZE);
2382 
2383 	/* set io sizes to object size */
2384 	segment_size = rbd_obj_bytes(&rbd_dev->header);
2385 	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
2386 	blk_queue_max_segment_size(q, segment_size);
2387 	blk_queue_io_min(q, segment_size);
2388 	blk_queue_io_opt(q, segment_size);
2389 
2390 	blk_queue_merge_bvec(q, rbd_merge_bvec);
2391 	disk->queue = q;
2392 
2393 	q->queuedata = rbd_dev;
2394 
2395 	rbd_dev->disk = disk;
2396 
2397 	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
2398 
2399 	return 0;
2400 out_disk:
2401 	put_disk(disk);
2402 
2403 	return -ENOMEM;
2404 }
2405 
2406 /*
2407   sysfs
2408 */
2409 
2410 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
2411 {
2412 	return container_of(dev, struct rbd_device, dev);
2413 }
2414 
2415 static ssize_t rbd_size_show(struct device *dev,
2416 			     struct device_attribute *attr, char *buf)
2417 {
2418 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2419 	sector_t size;
2420 
2421 	down_read(&rbd_dev->header_rwsem);
2422 	size = get_capacity(rbd_dev->disk);
2423 	up_read(&rbd_dev->header_rwsem);
2424 
2425 	return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
2426 }
2427 
2428 /*
2429  * Note this shows the features for whatever's mapped, which is not
2430  * necessarily the base image.
2431  */
2432 static ssize_t rbd_features_show(struct device *dev,
2433 			     struct device_attribute *attr, char *buf)
2434 {
2435 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2436 
2437 	return sprintf(buf, "0x%016llx\n",
2438 			(unsigned long long) rbd_dev->mapping.features);
2439 }
2440 
2441 static ssize_t rbd_major_show(struct device *dev,
2442 			      struct device_attribute *attr, char *buf)
2443 {
2444 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2445 
2446 	return sprintf(buf, "%d\n", rbd_dev->major);
2447 }
2448 
2449 static ssize_t rbd_client_id_show(struct device *dev,
2450 				  struct device_attribute *attr, char *buf)
2451 {
2452 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2453 
2454 	return sprintf(buf, "client%lld\n",
2455 			ceph_client_id(rbd_dev->rbd_client->client));
2456 }
2457 
2458 static ssize_t rbd_pool_show(struct device *dev,
2459 			     struct device_attribute *attr, char *buf)
2460 {
2461 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2462 
2463 	return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
2464 }
2465 
2466 static ssize_t rbd_pool_id_show(struct device *dev,
2467 			     struct device_attribute *attr, char *buf)
2468 {
2469 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2470 
2471 	return sprintf(buf, "%llu\n",
2472 		(unsigned long long) rbd_dev->spec->pool_id);
2473 }
2474 
2475 static ssize_t rbd_name_show(struct device *dev,
2476 			     struct device_attribute *attr, char *buf)
2477 {
2478 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2479 
2480 	if (rbd_dev->spec->image_name)
2481 		return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
2482 
2483 	return sprintf(buf, "(unknown)\n");
2484 }
2485 
2486 static ssize_t rbd_image_id_show(struct device *dev,
2487 			     struct device_attribute *attr, char *buf)
2488 {
2489 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2490 
2491 	return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
2492 }
2493 
2494 /*
2495  * Shows the name of the currently-mapped snapshot (or
2496  * RBD_SNAP_HEAD_NAME for the base image).
2497  */
2498 static ssize_t rbd_snap_show(struct device *dev,
2499 			     struct device_attribute *attr,
2500 			     char *buf)
2501 {
2502 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2503 
2504 	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
2505 }
2506 
2507 /*
2508  * For an rbd v2 image, shows the pool id, image id, and snapshot id
2509  * for the parent image.  If there is no parent, simply shows
2510  * "(no parent image)".
2511  */
2512 static ssize_t rbd_parent_show(struct device *dev,
2513 			     struct device_attribute *attr,
2514 			     char *buf)
2515 {
2516 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2517 	struct rbd_spec *spec = rbd_dev->parent_spec;
2518 	int count;
2519 	char *bufp = buf;
2520 
2521 	if (!spec)
2522 		return sprintf(buf, "(no parent image)\n");
2523 
2524 	count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
2525 			(unsigned long long) spec->pool_id, spec->pool_name);
2526 	if (count < 0)
2527 		return count;
2528 	bufp += count;
2529 
2530 	count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
2531 			spec->image_name ? spec->image_name : "(unknown)");
2532 	if (count < 0)
2533 		return count;
2534 	bufp += count;
2535 
2536 	count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
2537 			(unsigned long long) spec->snap_id, spec->snap_name);
2538 	if (count < 0)
2539 		return count;
2540 	bufp += count;
2541 
2542 	count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
2543 	if (count < 0)
2544 		return count;
2545 	bufp += count;
2546 
2547 	return (ssize_t) (bufp - buf);
2548 }
2549 
2550 static ssize_t rbd_image_refresh(struct device *dev,
2551 				 struct device_attribute *attr,
2552 				 const char *buf,
2553 				 size_t size)
2554 {
2555 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2556 	int ret;
2557 
2558 	ret = rbd_dev_refresh(rbd_dev, NULL);
2559 
2560 	return ret < 0 ? ret : size;
2561 }
2562 
2563 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
2564 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
2565 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
2566 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
2567 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
2568 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
2569 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
2570 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
2571 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
2572 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
2573 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
2574 
2575 static struct attribute *rbd_attrs[] = {
2576 	&dev_attr_size.attr,
2577 	&dev_attr_features.attr,
2578 	&dev_attr_major.attr,
2579 	&dev_attr_client_id.attr,
2580 	&dev_attr_pool.attr,
2581 	&dev_attr_pool_id.attr,
2582 	&dev_attr_name.attr,
2583 	&dev_attr_image_id.attr,
2584 	&dev_attr_current_snap.attr,
2585 	&dev_attr_parent.attr,
2586 	&dev_attr_refresh.attr,
2587 	NULL
2588 };
2589 
2590 static struct attribute_group rbd_attr_group = {
2591 	.attrs = rbd_attrs,
2592 };
2593 
2594 static const struct attribute_group *rbd_attr_groups[] = {
2595 	&rbd_attr_group,
2596 	NULL
2597 };
2598 
2599 static void rbd_sysfs_dev_release(struct device *dev)
2600 {
2601 }
2602 
2603 static struct device_type rbd_device_type = {
2604 	.name		= "rbd",
2605 	.groups		= rbd_attr_groups,
2606 	.release	= rbd_sysfs_dev_release,
2607 };
2608 
2609 
2610 /*
2611   sysfs - snapshots
2612 */
2613 
2614 static ssize_t rbd_snap_size_show(struct device *dev,
2615 				  struct device_attribute *attr,
2616 				  char *buf)
2617 {
2618 	struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2619 
2620 	return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
2621 }
2622 
2623 static ssize_t rbd_snap_id_show(struct device *dev,
2624 				struct device_attribute *attr,
2625 				char *buf)
2626 {
2627 	struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2628 
2629 	return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
2630 }
2631 
2632 static ssize_t rbd_snap_features_show(struct device *dev,
2633 				struct device_attribute *attr,
2634 				char *buf)
2635 {
2636 	struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2637 
2638 	return sprintf(buf, "0x%016llx\n",
2639 			(unsigned long long) snap->features);
2640 }
2641 
2642 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
2643 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
2644 static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
2645 
2646 static struct attribute *rbd_snap_attrs[] = {
2647 	&dev_attr_snap_size.attr,
2648 	&dev_attr_snap_id.attr,
2649 	&dev_attr_snap_features.attr,
2650 	NULL,
2651 };
2652 
2653 static struct attribute_group rbd_snap_attr_group = {
2654 	.attrs = rbd_snap_attrs,
2655 };
2656 
2657 static void rbd_snap_dev_release(struct device *dev)
2658 {
2659 	struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2660 	kfree(snap->name);
2661 	kfree(snap);
2662 }
2663 
2664 static const struct attribute_group *rbd_snap_attr_groups[] = {
2665 	&rbd_snap_attr_group,
2666 	NULL
2667 };
2668 
2669 static struct device_type rbd_snap_device_type = {
2670 	.groups		= rbd_snap_attr_groups,
2671 	.release	= rbd_snap_dev_release,
2672 };
2673 
2674 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
2675 {
2676 	kref_get(&spec->kref);
2677 
2678 	return spec;
2679 }
2680 
2681 static void rbd_spec_free(struct kref *kref);
2682 static void rbd_spec_put(struct rbd_spec *spec)
2683 {
2684 	if (spec)
2685 		kref_put(&spec->kref, rbd_spec_free);
2686 }
2687 
2688 static struct rbd_spec *rbd_spec_alloc(void)
2689 {
2690 	struct rbd_spec *spec;
2691 
2692 	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
2693 	if (!spec)
2694 		return NULL;
2695 	kref_init(&spec->kref);
2696 
2697 	rbd_spec_put(rbd_spec_get(spec));	/* TEMPORARY */
2698 
2699 	return spec;
2700 }
2701 
2702 static void rbd_spec_free(struct kref *kref)
2703 {
2704 	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
2705 
2706 	kfree(spec->pool_name);
2707 	kfree(spec->image_id);
2708 	kfree(spec->image_name);
2709 	kfree(spec->snap_name);
2710 	kfree(spec);
2711 }
2712 
2713 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
2714 				struct rbd_spec *spec)
2715 {
2716 	struct rbd_device *rbd_dev;
2717 
2718 	rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
2719 	if (!rbd_dev)
2720 		return NULL;
2721 
2722 	spin_lock_init(&rbd_dev->lock);
2723 	rbd_dev->flags = 0;
2724 	INIT_LIST_HEAD(&rbd_dev->node);
2725 	INIT_LIST_HEAD(&rbd_dev->snaps);
2726 	init_rwsem(&rbd_dev->header_rwsem);
2727 
2728 	rbd_dev->spec = spec;
2729 	rbd_dev->rbd_client = rbdc;
2730 
2731 	/* Initialize the layout used for all rbd requests */
2732 
2733 	rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2734 	rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
2735 	rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2736 	rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
2737 
2738 	return rbd_dev;
2739 }
2740 
2741 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
2742 {
2743 	rbd_spec_put(rbd_dev->parent_spec);
2744 	kfree(rbd_dev->header_name);
2745 	rbd_put_client(rbd_dev->rbd_client);
2746 	rbd_spec_put(rbd_dev->spec);
2747 	kfree(rbd_dev);
2748 }
2749 
2750 static bool rbd_snap_registered(struct rbd_snap *snap)
2751 {
2752 	bool ret = snap->dev.type == &rbd_snap_device_type;
2753 	bool reg = device_is_registered(&snap->dev);
2754 
2755 	rbd_assert(!ret ^ reg);
2756 
2757 	return ret;
2758 }
2759 
2760 static void rbd_remove_snap_dev(struct rbd_snap *snap)
2761 {
2762 	list_del(&snap->node);
2763 	if (device_is_registered(&snap->dev))
2764 		device_unregister(&snap->dev);
2765 }
2766 
2767 static int rbd_register_snap_dev(struct rbd_snap *snap,
2768 				  struct device *parent)
2769 {
2770 	struct device *dev = &snap->dev;
2771 	int ret;
2772 
2773 	dev->type = &rbd_snap_device_type;
2774 	dev->parent = parent;
2775 	dev->release = rbd_snap_dev_release;
2776 	dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
2777 	dout("%s: registering device for snapshot %s\n", __func__, snap->name);
2778 
2779 	ret = device_register(dev);
2780 
2781 	return ret;
2782 }
2783 
2784 static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
2785 						const char *snap_name,
2786 						u64 snap_id, u64 snap_size,
2787 						u64 snap_features)
2788 {
2789 	struct rbd_snap *snap;
2790 	int ret;
2791 
2792 	snap = kzalloc(sizeof (*snap), GFP_KERNEL);
2793 	if (!snap)
2794 		return ERR_PTR(-ENOMEM);
2795 
2796 	ret = -ENOMEM;
2797 	snap->name = kstrdup(snap_name, GFP_KERNEL);
2798 	if (!snap->name)
2799 		goto err;
2800 
2801 	snap->id = snap_id;
2802 	snap->size = snap_size;
2803 	snap->features = snap_features;
2804 
2805 	return snap;
2806 
2807 err:
2808 	kfree(snap->name);
2809 	kfree(snap);
2810 
2811 	return ERR_PTR(ret);
2812 }
2813 
2814 static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
2815 		u64 *snap_size, u64 *snap_features)
2816 {
2817 	char *snap_name;
2818 
2819 	rbd_assert(which < rbd_dev->header.snapc->num_snaps);
2820 
2821 	*snap_size = rbd_dev->header.snap_sizes[which];
2822 	*snap_features = 0;	/* No features for v1 */
2823 
2824 	/* Skip over names until we find the one we are looking for */
2825 
2826 	snap_name = rbd_dev->header.snap_names;
2827 	while (which--)
2828 		snap_name += strlen(snap_name) + 1;
2829 
2830 	return snap_name;
2831 }
2832 
2833 /*
2834  * Get the size and object order for an image snapshot, or if
2835  * snap_id is CEPH_NOSNAP, gets this information for the base
2836  * image.
2837  */
2838 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
2839 				u8 *order, u64 *snap_size)
2840 {
2841 	__le64 snapid = cpu_to_le64(snap_id);
2842 	int ret;
2843 	struct {
2844 		u8 order;
2845 		__le64 size;
2846 	} __attribute__ ((packed)) size_buf = { 0 };
2847 
2848 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2849 				"rbd", "get_size",
2850 				(char *) &snapid, sizeof (snapid),
2851 				(char *) &size_buf, sizeof (size_buf), NULL);
2852 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2853 	if (ret < 0)
2854 		return ret;
2855 
2856 	*order = size_buf.order;
2857 	*snap_size = le64_to_cpu(size_buf.size);
2858 
2859 	dout("  snap_id 0x%016llx order = %u, snap_size = %llu\n",
2860 		(unsigned long long) snap_id, (unsigned int) *order,
2861 		(unsigned long long) *snap_size);
2862 
2863 	return 0;
2864 }
2865 
2866 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
2867 {
2868 	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
2869 					&rbd_dev->header.obj_order,
2870 					&rbd_dev->header.image_size);
2871 }
2872 
2873 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
2874 {
2875 	void *reply_buf;
2876 	int ret;
2877 	void *p;
2878 
2879 	reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
2880 	if (!reply_buf)
2881 		return -ENOMEM;
2882 
2883 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2884 				"rbd", "get_object_prefix",
2885 				NULL, 0,
2886 				reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
2887 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2888 	if (ret < 0)
2889 		goto out;
2890 
2891 	p = reply_buf;
2892 	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
2893 						p + RBD_OBJ_PREFIX_LEN_MAX,
2894 						NULL, GFP_NOIO);
2895 
2896 	if (IS_ERR(rbd_dev->header.object_prefix)) {
2897 		ret = PTR_ERR(rbd_dev->header.object_prefix);
2898 		rbd_dev->header.object_prefix = NULL;
2899 	} else {
2900 		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
2901 	}
2902 
2903 out:
2904 	kfree(reply_buf);
2905 
2906 	return ret;
2907 }
2908 
2909 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
2910 		u64 *snap_features)
2911 {
2912 	__le64 snapid = cpu_to_le64(snap_id);
2913 	struct {
2914 		__le64 features;
2915 		__le64 incompat;
2916 	} features_buf = { 0 };
2917 	u64 incompat;
2918 	int ret;
2919 
2920 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2921 				"rbd", "get_features",
2922 				(char *) &snapid, sizeof (snapid),
2923 				(char *) &features_buf, sizeof (features_buf),
2924 				NULL);
2925 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2926 	if (ret < 0)
2927 		return ret;
2928 
2929 	incompat = le64_to_cpu(features_buf.incompat);
2930 	if (incompat & ~RBD_FEATURES_ALL)
2931 		return -ENXIO;
2932 
2933 	*snap_features = le64_to_cpu(features_buf.features);
2934 
2935 	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
2936 		(unsigned long long) snap_id,
2937 		(unsigned long long) *snap_features,
2938 		(unsigned long long) le64_to_cpu(features_buf.incompat));
2939 
2940 	return 0;
2941 }
2942 
2943 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
2944 {
2945 	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
2946 						&rbd_dev->header.features);
2947 }
2948 
2949 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
2950 {
2951 	struct rbd_spec *parent_spec;
2952 	size_t size;
2953 	void *reply_buf = NULL;
2954 	__le64 snapid;
2955 	void *p;
2956 	void *end;
2957 	char *image_id;
2958 	u64 overlap;
2959 	int ret;
2960 
2961 	parent_spec = rbd_spec_alloc();
2962 	if (!parent_spec)
2963 		return -ENOMEM;
2964 
2965 	size = sizeof (__le64) +				/* pool_id */
2966 		sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +	/* image_id */
2967 		sizeof (__le64) +				/* snap_id */
2968 		sizeof (__le64);				/* overlap */
2969 	reply_buf = kmalloc(size, GFP_KERNEL);
2970 	if (!reply_buf) {
2971 		ret = -ENOMEM;
2972 		goto out_err;
2973 	}
2974 
2975 	snapid = cpu_to_le64(CEPH_NOSNAP);
2976 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2977 				"rbd", "get_parent",
2978 				(char *) &snapid, sizeof (snapid),
2979 				(char *) reply_buf, size, NULL);
2980 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2981 	if (ret < 0)
2982 		goto out_err;
2983 
2984 	ret = -ERANGE;
2985 	p = reply_buf;
2986 	end = (char *) reply_buf + size;
2987 	ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
2988 	if (parent_spec->pool_id == CEPH_NOPOOL)
2989 		goto out;	/* No parent?  No problem. */
2990 
2991 	/* The ceph file layout needs to fit pool id in 32 bits */
2992 
2993 	ret = -EIO;
2994 	if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
2995 		goto out;
2996 
2997 	image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
2998 	if (IS_ERR(image_id)) {
2999 		ret = PTR_ERR(image_id);
3000 		goto out_err;
3001 	}
3002 	parent_spec->image_id = image_id;
3003 	ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3004 	ceph_decode_64_safe(&p, end, overlap, out_err);
3005 
3006 	rbd_dev->parent_overlap = overlap;
3007 	rbd_dev->parent_spec = parent_spec;
3008 	parent_spec = NULL;	/* rbd_dev now owns this */
3009 out:
3010 	ret = 0;
3011 out_err:
3012 	kfree(reply_buf);
3013 	rbd_spec_put(parent_spec);
3014 
3015 	return ret;
3016 }
3017 
3018 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3019 {
3020 	size_t image_id_size;
3021 	char *image_id;
3022 	void *p;
3023 	void *end;
3024 	size_t size;
3025 	void *reply_buf = NULL;
3026 	size_t len = 0;
3027 	char *image_name = NULL;
3028 	int ret;
3029 
3030 	rbd_assert(!rbd_dev->spec->image_name);
3031 
3032 	len = strlen(rbd_dev->spec->image_id);
3033 	image_id_size = sizeof (__le32) + len;
3034 	image_id = kmalloc(image_id_size, GFP_KERNEL);
3035 	if (!image_id)
3036 		return NULL;
3037 
3038 	p = image_id;
3039 	end = (char *) image_id + image_id_size;
3040 	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
3041 
3042 	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3043 	reply_buf = kmalloc(size, GFP_KERNEL);
3044 	if (!reply_buf)
3045 		goto out;
3046 
3047 	ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3048 				"rbd", "dir_get_name",
3049 				image_id, image_id_size,
3050 				(char *) reply_buf, size, NULL);
3051 	if (ret < 0)
3052 		goto out;
3053 	p = reply_buf;
3054 	end = (char *) reply_buf + size;
3055 	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3056 	if (IS_ERR(image_name))
3057 		image_name = NULL;
3058 	else
3059 		dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3060 out:
3061 	kfree(reply_buf);
3062 	kfree(image_id);
3063 
3064 	return image_name;
3065 }
3066 
3067 /*
3068  * When a parent image gets probed, we only have the pool, image,
3069  * and snapshot ids but not the names of any of them.  This call
3070  * is made later to fill in those names.  It has to be done after
3071  * rbd_dev_snaps_update() has completed because some of the
3072  * information (in particular, snapshot name) is not available
3073  * until then.
3074  */
3075 static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
3076 {
3077 	struct ceph_osd_client *osdc;
3078 	const char *name;
3079 	void *reply_buf = NULL;
3080 	int ret;
3081 
3082 	if (rbd_dev->spec->pool_name)
3083 		return 0;	/* Already have the names */
3084 
3085 	/* Look up the pool name */
3086 
3087 	osdc = &rbd_dev->rbd_client->client->osdc;
3088 	name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
3089 	if (!name) {
3090 		rbd_warn(rbd_dev, "there is no pool with id %llu",
3091 			rbd_dev->spec->pool_id);	/* Really a BUG() */
3092 		return -EIO;
3093 	}
3094 
3095 	rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
3096 	if (!rbd_dev->spec->pool_name)
3097 		return -ENOMEM;
3098 
3099 	/* Fetch the image name; tolerate failure here */
3100 
3101 	name = rbd_dev_image_name(rbd_dev);
3102 	if (name)
3103 		rbd_dev->spec->image_name = (char *) name;
3104 	else
3105 		rbd_warn(rbd_dev, "unable to get image name");
3106 
3107 	/* Look up the snapshot name. */
3108 
3109 	name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
3110 	if (!name) {
3111 		rbd_warn(rbd_dev, "no snapshot with id %llu",
3112 			rbd_dev->spec->snap_id);	/* Really a BUG() */
3113 		ret = -EIO;
3114 		goto out_err;
3115 	}
3116 	rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
3117 	if(!rbd_dev->spec->snap_name)
3118 		goto out_err;
3119 
3120 	return 0;
3121 out_err:
3122 	kfree(reply_buf);
3123 	kfree(rbd_dev->spec->pool_name);
3124 	rbd_dev->spec->pool_name = NULL;
3125 
3126 	return ret;
3127 }
3128 
3129 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
3130 {
3131 	size_t size;
3132 	int ret;
3133 	void *reply_buf;
3134 	void *p;
3135 	void *end;
3136 	u64 seq;
3137 	u32 snap_count;
3138 	struct ceph_snap_context *snapc;
3139 	u32 i;
3140 
3141 	/*
3142 	 * We'll need room for the seq value (maximum snapshot id),
3143 	 * snapshot count, and array of that many snapshot ids.
3144 	 * For now we have a fixed upper limit on the number we're
3145 	 * prepared to receive.
3146 	 */
3147 	size = sizeof (__le64) + sizeof (__le32) +
3148 			RBD_MAX_SNAP_COUNT * sizeof (__le64);
3149 	reply_buf = kzalloc(size, GFP_KERNEL);
3150 	if (!reply_buf)
3151 		return -ENOMEM;
3152 
3153 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3154 				"rbd", "get_snapcontext",
3155 				NULL, 0,
3156 				reply_buf, size, ver);
3157 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3158 	if (ret < 0)
3159 		goto out;
3160 
3161 	ret = -ERANGE;
3162 	p = reply_buf;
3163 	end = (char *) reply_buf + size;
3164 	ceph_decode_64_safe(&p, end, seq, out);
3165 	ceph_decode_32_safe(&p, end, snap_count, out);
3166 
3167 	/*
3168 	 * Make sure the reported number of snapshot ids wouldn't go
3169 	 * beyond the end of our buffer.  But before checking that,
3170 	 * make sure the computed size of the snapshot context we
3171 	 * allocate is representable in a size_t.
3172 	 */
3173 	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3174 				 / sizeof (u64)) {
3175 		ret = -EINVAL;
3176 		goto out;
3177 	}
3178 	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3179 		goto out;
3180 
3181 	size = sizeof (struct ceph_snap_context) +
3182 				snap_count * sizeof (snapc->snaps[0]);
3183 	snapc = kmalloc(size, GFP_KERNEL);
3184 	if (!snapc) {
3185 		ret = -ENOMEM;
3186 		goto out;
3187 	}
3188 
3189 	atomic_set(&snapc->nref, 1);
3190 	snapc->seq = seq;
3191 	snapc->num_snaps = snap_count;
3192 	for (i = 0; i < snap_count; i++)
3193 		snapc->snaps[i] = ceph_decode_64(&p);
3194 
3195 	rbd_dev->header.snapc = snapc;
3196 
3197 	dout("  snap context seq = %llu, snap_count = %u\n",
3198 		(unsigned long long) seq, (unsigned int) snap_count);
3199 
3200 out:
3201 	kfree(reply_buf);
3202 
3203 	return 0;
3204 }
3205 
3206 static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
3207 {
3208 	size_t size;
3209 	void *reply_buf;
3210 	__le64 snap_id;
3211 	int ret;
3212 	void *p;
3213 	void *end;
3214 	char *snap_name;
3215 
3216 	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3217 	reply_buf = kmalloc(size, GFP_KERNEL);
3218 	if (!reply_buf)
3219 		return ERR_PTR(-ENOMEM);
3220 
3221 	snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
3222 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3223 				"rbd", "get_snapshot_name",
3224 				(char *) &snap_id, sizeof (snap_id),
3225 				reply_buf, size, NULL);
3226 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3227 	if (ret < 0)
3228 		goto out;
3229 
3230 	p = reply_buf;
3231 	end = (char *) reply_buf + size;
3232 	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3233 	if (IS_ERR(snap_name)) {
3234 		ret = PTR_ERR(snap_name);
3235 		goto out;
3236 	} else {
3237 		dout("  snap_id 0x%016llx snap_name = %s\n",
3238 			(unsigned long long) le64_to_cpu(snap_id), snap_name);
3239 	}
3240 	kfree(reply_buf);
3241 
3242 	return snap_name;
3243 out:
3244 	kfree(reply_buf);
3245 
3246 	return ERR_PTR(ret);
3247 }
3248 
3249 static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
3250 		u64 *snap_size, u64 *snap_features)
3251 {
3252 	u64 snap_id;
3253 	u8 order;
3254 	int ret;
3255 
3256 	snap_id = rbd_dev->header.snapc->snaps[which];
3257 	ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
3258 	if (ret)
3259 		return ERR_PTR(ret);
3260 	ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
3261 	if (ret)
3262 		return ERR_PTR(ret);
3263 
3264 	return rbd_dev_v2_snap_name(rbd_dev, which);
3265 }
3266 
3267 static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
3268 		u64 *snap_size, u64 *snap_features)
3269 {
3270 	if (rbd_dev->image_format == 1)
3271 		return rbd_dev_v1_snap_info(rbd_dev, which,
3272 					snap_size, snap_features);
3273 	if (rbd_dev->image_format == 2)
3274 		return rbd_dev_v2_snap_info(rbd_dev, which,
3275 					snap_size, snap_features);
3276 	return ERR_PTR(-EINVAL);
3277 }
3278 
3279 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
3280 {
3281 	int ret;
3282 	__u8 obj_order;
3283 
3284 	down_write(&rbd_dev->header_rwsem);
3285 
3286 	/* Grab old order first, to see if it changes */
3287 
3288 	obj_order = rbd_dev->header.obj_order,
3289 	ret = rbd_dev_v2_image_size(rbd_dev);
3290 	if (ret)
3291 		goto out;
3292 	if (rbd_dev->header.obj_order != obj_order) {
3293 		ret = -EIO;
3294 		goto out;
3295 	}
3296 	rbd_update_mapping_size(rbd_dev);
3297 
3298 	ret = rbd_dev_v2_snap_context(rbd_dev, hver);
3299 	dout("rbd_dev_v2_snap_context returned %d\n", ret);
3300 	if (ret)
3301 		goto out;
3302 	ret = rbd_dev_snaps_update(rbd_dev);
3303 	dout("rbd_dev_snaps_update returned %d\n", ret);
3304 	if (ret)
3305 		goto out;
3306 	ret = rbd_dev_snaps_register(rbd_dev);
3307 	dout("rbd_dev_snaps_register returned %d\n", ret);
3308 out:
3309 	up_write(&rbd_dev->header_rwsem);
3310 
3311 	return ret;
3312 }
3313 
3314 /*
3315  * Scan the rbd device's current snapshot list and compare it to the
3316  * newly-received snapshot context.  Remove any existing snapshots
3317  * not present in the new snapshot context.  Add a new snapshot for
3318  * any snaphots in the snapshot context not in the current list.
3319  * And verify there are no changes to snapshots we already know
3320  * about.
3321  *
3322  * Assumes the snapshots in the snapshot context are sorted by
3323  * snapshot id, highest id first.  (Snapshots in the rbd_dev's list
3324  * are also maintained in that order.)
3325  */
3326 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
3327 {
3328 	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3329 	const u32 snap_count = snapc->num_snaps;
3330 	struct list_head *head = &rbd_dev->snaps;
3331 	struct list_head *links = head->next;
3332 	u32 index = 0;
3333 
3334 	dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
3335 	while (index < snap_count || links != head) {
3336 		u64 snap_id;
3337 		struct rbd_snap *snap;
3338 		char *snap_name;
3339 		u64 snap_size = 0;
3340 		u64 snap_features = 0;
3341 
3342 		snap_id = index < snap_count ? snapc->snaps[index]
3343 					     : CEPH_NOSNAP;
3344 		snap = links != head ? list_entry(links, struct rbd_snap, node)
3345 				     : NULL;
3346 		rbd_assert(!snap || snap->id != CEPH_NOSNAP);
3347 
3348 		if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
3349 			struct list_head *next = links->next;
3350 
3351 			/*
3352 			 * A previously-existing snapshot is not in
3353 			 * the new snap context.
3354 			 *
3355 			 * If the now missing snapshot is the one the
3356 			 * image is mapped to, clear its exists flag
3357 			 * so we can avoid sending any more requests
3358 			 * to it.
3359 			 */
3360 			if (rbd_dev->spec->snap_id == snap->id)
3361 				clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3362 			rbd_remove_snap_dev(snap);
3363 			dout("%ssnap id %llu has been removed\n",
3364 				rbd_dev->spec->snap_id == snap->id ?
3365 							"mapped " : "",
3366 				(unsigned long long) snap->id);
3367 
3368 			/* Done with this list entry; advance */
3369 
3370 			links = next;
3371 			continue;
3372 		}
3373 
3374 		snap_name = rbd_dev_snap_info(rbd_dev, index,
3375 					&snap_size, &snap_features);
3376 		if (IS_ERR(snap_name))
3377 			return PTR_ERR(snap_name);
3378 
3379 		dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
3380 			(unsigned long long) snap_id);
3381 		if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
3382 			struct rbd_snap *new_snap;
3383 
3384 			/* We haven't seen this snapshot before */
3385 
3386 			new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
3387 					snap_id, snap_size, snap_features);
3388 			if (IS_ERR(new_snap)) {
3389 				int err = PTR_ERR(new_snap);
3390 
3391 				dout("  failed to add dev, error %d\n", err);
3392 
3393 				return err;
3394 			}
3395 
3396 			/* New goes before existing, or at end of list */
3397 
3398 			dout("  added dev%s\n", snap ? "" : " at end\n");
3399 			if (snap)
3400 				list_add_tail(&new_snap->node, &snap->node);
3401 			else
3402 				list_add_tail(&new_snap->node, head);
3403 		} else {
3404 			/* Already have this one */
3405 
3406 			dout("  already present\n");
3407 
3408 			rbd_assert(snap->size == snap_size);
3409 			rbd_assert(!strcmp(snap->name, snap_name));
3410 			rbd_assert(snap->features == snap_features);
3411 
3412 			/* Done with this list entry; advance */
3413 
3414 			links = links->next;
3415 		}
3416 
3417 		/* Advance to the next entry in the snapshot context */
3418 
3419 		index++;
3420 	}
3421 	dout("%s: done\n", __func__);
3422 
3423 	return 0;
3424 }
3425 
3426 /*
3427  * Scan the list of snapshots and register the devices for any that
3428  * have not already been registered.
3429  */
3430 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
3431 {
3432 	struct rbd_snap *snap;
3433 	int ret = 0;
3434 
3435 	dout("%s:\n", __func__);
3436 	if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
3437 		return -EIO;
3438 
3439 	list_for_each_entry(snap, &rbd_dev->snaps, node) {
3440 		if (!rbd_snap_registered(snap)) {
3441 			ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
3442 			if (ret < 0)
3443 				break;
3444 		}
3445 	}
3446 	dout("%s: returning %d\n", __func__, ret);
3447 
3448 	return ret;
3449 }
3450 
3451 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
3452 {
3453 	struct device *dev;
3454 	int ret;
3455 
3456 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3457 
3458 	dev = &rbd_dev->dev;
3459 	dev->bus = &rbd_bus_type;
3460 	dev->type = &rbd_device_type;
3461 	dev->parent = &rbd_root_dev;
3462 	dev->release = rbd_dev_release;
3463 	dev_set_name(dev, "%d", rbd_dev->dev_id);
3464 	ret = device_register(dev);
3465 
3466 	mutex_unlock(&ctl_mutex);
3467 
3468 	return ret;
3469 }
3470 
3471 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
3472 {
3473 	device_unregister(&rbd_dev->dev);
3474 }
3475 
3476 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
3477 
3478 /*
3479  * Get a unique rbd identifier for the given new rbd_dev, and add
3480  * the rbd_dev to the global list.  The minimum rbd id is 1.
3481  */
3482 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
3483 {
3484 	rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
3485 
3486 	spin_lock(&rbd_dev_list_lock);
3487 	list_add_tail(&rbd_dev->node, &rbd_dev_list);
3488 	spin_unlock(&rbd_dev_list_lock);
3489 	dout("rbd_dev %p given dev id %llu\n", rbd_dev,
3490 		(unsigned long long) rbd_dev->dev_id);
3491 }
3492 
3493 /*
3494  * Remove an rbd_dev from the global list, and record that its
3495  * identifier is no longer in use.
3496  */
3497 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
3498 {
3499 	struct list_head *tmp;
3500 	int rbd_id = rbd_dev->dev_id;
3501 	int max_id;
3502 
3503 	rbd_assert(rbd_id > 0);
3504 
3505 	dout("rbd_dev %p released dev id %llu\n", rbd_dev,
3506 		(unsigned long long) rbd_dev->dev_id);
3507 	spin_lock(&rbd_dev_list_lock);
3508 	list_del_init(&rbd_dev->node);
3509 
3510 	/*
3511 	 * If the id being "put" is not the current maximum, there
3512 	 * is nothing special we need to do.
3513 	 */
3514 	if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
3515 		spin_unlock(&rbd_dev_list_lock);
3516 		return;
3517 	}
3518 
3519 	/*
3520 	 * We need to update the current maximum id.  Search the
3521 	 * list to find out what it is.  We're more likely to find
3522 	 * the maximum at the end, so search the list backward.
3523 	 */
3524 	max_id = 0;
3525 	list_for_each_prev(tmp, &rbd_dev_list) {
3526 		struct rbd_device *rbd_dev;
3527 
3528 		rbd_dev = list_entry(tmp, struct rbd_device, node);
3529 		if (rbd_dev->dev_id > max_id)
3530 			max_id = rbd_dev->dev_id;
3531 	}
3532 	spin_unlock(&rbd_dev_list_lock);
3533 
3534 	/*
3535 	 * The max id could have been updated by rbd_dev_id_get(), in
3536 	 * which case it now accurately reflects the new maximum.
3537 	 * Be careful not to overwrite the maximum value in that
3538 	 * case.
3539 	 */
3540 	atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
3541 	dout("  max dev id has been reset\n");
3542 }
3543 
3544 /*
3545  * Skips over white space at *buf, and updates *buf to point to the
3546  * first found non-space character (if any). Returns the length of
3547  * the token (string of non-white space characters) found.  Note
3548  * that *buf must be terminated with '\0'.
3549  */
3550 static inline size_t next_token(const char **buf)
3551 {
3552         /*
3553         * These are the characters that produce nonzero for
3554         * isspace() in the "C" and "POSIX" locales.
3555         */
3556         const char *spaces = " \f\n\r\t\v";
3557 
3558         *buf += strspn(*buf, spaces);	/* Find start of token */
3559 
3560 	return strcspn(*buf, spaces);   /* Return token length */
3561 }
3562 
3563 /*
3564  * Finds the next token in *buf, and if the provided token buffer is
3565  * big enough, copies the found token into it.  The result, if
3566  * copied, is guaranteed to be terminated with '\0'.  Note that *buf
3567  * must be terminated with '\0' on entry.
3568  *
3569  * Returns the length of the token found (not including the '\0').
3570  * Return value will be 0 if no token is found, and it will be >=
3571  * token_size if the token would not fit.
3572  *
3573  * The *buf pointer will be updated to point beyond the end of the
3574  * found token.  Note that this occurs even if the token buffer is
3575  * too small to hold it.
3576  */
3577 static inline size_t copy_token(const char **buf,
3578 				char *token,
3579 				size_t token_size)
3580 {
3581         size_t len;
3582 
3583 	len = next_token(buf);
3584 	if (len < token_size) {
3585 		memcpy(token, *buf, len);
3586 		*(token + len) = '\0';
3587 	}
3588 	*buf += len;
3589 
3590         return len;
3591 }
3592 
3593 /*
3594  * Finds the next token in *buf, dynamically allocates a buffer big
3595  * enough to hold a copy of it, and copies the token into the new
3596  * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
3597  * that a duplicate buffer is created even for a zero-length token.
3598  *
3599  * Returns a pointer to the newly-allocated duplicate, or a null
3600  * pointer if memory for the duplicate was not available.  If
3601  * the lenp argument is a non-null pointer, the length of the token
3602  * (not including the '\0') is returned in *lenp.
3603  *
3604  * If successful, the *buf pointer will be updated to point beyond
3605  * the end of the found token.
3606  *
3607  * Note: uses GFP_KERNEL for allocation.
3608  */
3609 static inline char *dup_token(const char **buf, size_t *lenp)
3610 {
3611 	char *dup;
3612 	size_t len;
3613 
3614 	len = next_token(buf);
3615 	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
3616 	if (!dup)
3617 		return NULL;
3618 	*(dup + len) = '\0';
3619 	*buf += len;
3620 
3621 	if (lenp)
3622 		*lenp = len;
3623 
3624 	return dup;
3625 }
3626 
3627 /*
3628  * Parse the options provided for an "rbd add" (i.e., rbd image
3629  * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
3630  * and the data written is passed here via a NUL-terminated buffer.
3631  * Returns 0 if successful or an error code otherwise.
3632  *
3633  * The information extracted from these options is recorded in
3634  * the other parameters which return dynamically-allocated
3635  * structures:
3636  *  ceph_opts
3637  *      The address of a pointer that will refer to a ceph options
3638  *      structure.  Caller must release the returned pointer using
3639  *      ceph_destroy_options() when it is no longer needed.
3640  *  rbd_opts
3641  *	Address of an rbd options pointer.  Fully initialized by
3642  *	this function; caller must release with kfree().
3643  *  spec
3644  *	Address of an rbd image specification pointer.  Fully
3645  *	initialized by this function based on parsed options.
3646  *	Caller must release with rbd_spec_put().
3647  *
3648  * The options passed take this form:
3649  *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
3650  * where:
3651  *  <mon_addrs>
3652  *      A comma-separated list of one or more monitor addresses.
3653  *      A monitor address is an ip address, optionally followed
3654  *      by a port number (separated by a colon).
3655  *        I.e.:  ip1[:port1][,ip2[:port2]...]
3656  *  <options>
3657  *      A comma-separated list of ceph and/or rbd options.
3658  *  <pool_name>
3659  *      The name of the rados pool containing the rbd image.
3660  *  <image_name>
3661  *      The name of the image in that pool to map.
3662  *  <snap_id>
3663  *      An optional snapshot id.  If provided, the mapping will
3664  *      present data from the image at the time that snapshot was
3665  *      created.  The image head is used if no snapshot id is
3666  *      provided.  Snapshot mappings are always read-only.
3667  */
3668 static int rbd_add_parse_args(const char *buf,
3669 				struct ceph_options **ceph_opts,
3670 				struct rbd_options **opts,
3671 				struct rbd_spec **rbd_spec)
3672 {
3673 	size_t len;
3674 	char *options;
3675 	const char *mon_addrs;
3676 	size_t mon_addrs_size;
3677 	struct rbd_spec *spec = NULL;
3678 	struct rbd_options *rbd_opts = NULL;
3679 	struct ceph_options *copts;
3680 	int ret;
3681 
3682 	/* The first four tokens are required */
3683 
3684 	len = next_token(&buf);
3685 	if (!len) {
3686 		rbd_warn(NULL, "no monitor address(es) provided");
3687 		return -EINVAL;
3688 	}
3689 	mon_addrs = buf;
3690 	mon_addrs_size = len + 1;
3691 	buf += len;
3692 
3693 	ret = -EINVAL;
3694 	options = dup_token(&buf, NULL);
3695 	if (!options)
3696 		return -ENOMEM;
3697 	if (!*options) {
3698 		rbd_warn(NULL, "no options provided");
3699 		goto out_err;
3700 	}
3701 
3702 	spec = rbd_spec_alloc();
3703 	if (!spec)
3704 		goto out_mem;
3705 
3706 	spec->pool_name = dup_token(&buf, NULL);
3707 	if (!spec->pool_name)
3708 		goto out_mem;
3709 	if (!*spec->pool_name) {
3710 		rbd_warn(NULL, "no pool name provided");
3711 		goto out_err;
3712 	}
3713 
3714 	spec->image_name = dup_token(&buf, NULL);
3715 	if (!spec->image_name)
3716 		goto out_mem;
3717 	if (!*spec->image_name) {
3718 		rbd_warn(NULL, "no image name provided");
3719 		goto out_err;
3720 	}
3721 
3722 	/*
3723 	 * Snapshot name is optional; default is to use "-"
3724 	 * (indicating the head/no snapshot).
3725 	 */
3726 	len = next_token(&buf);
3727 	if (!len) {
3728 		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
3729 		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
3730 	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
3731 		ret = -ENAMETOOLONG;
3732 		goto out_err;
3733 	}
3734 	spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
3735 	if (!spec->snap_name)
3736 		goto out_mem;
3737 	*(spec->snap_name + len) = '\0';
3738 
3739 	/* Initialize all rbd options to the defaults */
3740 
3741 	rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
3742 	if (!rbd_opts)
3743 		goto out_mem;
3744 
3745 	rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
3746 
3747 	copts = ceph_parse_options(options, mon_addrs,
3748 					mon_addrs + mon_addrs_size - 1,
3749 					parse_rbd_opts_token, rbd_opts);
3750 	if (IS_ERR(copts)) {
3751 		ret = PTR_ERR(copts);
3752 		goto out_err;
3753 	}
3754 	kfree(options);
3755 
3756 	*ceph_opts = copts;
3757 	*opts = rbd_opts;
3758 	*rbd_spec = spec;
3759 
3760 	return 0;
3761 out_mem:
3762 	ret = -ENOMEM;
3763 out_err:
3764 	kfree(rbd_opts);
3765 	rbd_spec_put(spec);
3766 	kfree(options);
3767 
3768 	return ret;
3769 }
3770 
3771 /*
3772  * An rbd format 2 image has a unique identifier, distinct from the
3773  * name given to it by the user.  Internally, that identifier is
3774  * what's used to specify the names of objects related to the image.
3775  *
3776  * A special "rbd id" object is used to map an rbd image name to its
3777  * id.  If that object doesn't exist, then there is no v2 rbd image
3778  * with the supplied name.
3779  *
3780  * This function will record the given rbd_dev's image_id field if
3781  * it can be determined, and in that case will return 0.  If any
3782  * errors occur a negative errno will be returned and the rbd_dev's
3783  * image_id field will be unchanged (and should be NULL).
3784  */
3785 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
3786 {
3787 	int ret;
3788 	size_t size;
3789 	char *object_name;
3790 	void *response;
3791 	void *p;
3792 
3793 	/*
3794 	 * When probing a parent image, the image id is already
3795 	 * known (and the image name likely is not).  There's no
3796 	 * need to fetch the image id again in this case.
3797 	 */
3798 	if (rbd_dev->spec->image_id)
3799 		return 0;
3800 
3801 	/*
3802 	 * First, see if the format 2 image id file exists, and if
3803 	 * so, get the image's persistent id from it.
3804 	 */
3805 	size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
3806 	object_name = kmalloc(size, GFP_NOIO);
3807 	if (!object_name)
3808 		return -ENOMEM;
3809 	sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
3810 	dout("rbd id object name is %s\n", object_name);
3811 
3812 	/* Response will be an encoded string, which includes a length */
3813 
3814 	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
3815 	response = kzalloc(size, GFP_NOIO);
3816 	if (!response) {
3817 		ret = -ENOMEM;
3818 		goto out;
3819 	}
3820 
3821 	ret = rbd_obj_method_sync(rbd_dev, object_name,
3822 				"rbd", "get_id",
3823 				NULL, 0,
3824 				response, RBD_IMAGE_ID_LEN_MAX, NULL);
3825 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3826 	if (ret < 0)
3827 		goto out;
3828 
3829 	p = response;
3830 	rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
3831 						p + RBD_IMAGE_ID_LEN_MAX,
3832 						NULL, GFP_NOIO);
3833 	if (IS_ERR(rbd_dev->spec->image_id)) {
3834 		ret = PTR_ERR(rbd_dev->spec->image_id);
3835 		rbd_dev->spec->image_id = NULL;
3836 	} else {
3837 		dout("image_id is %s\n", rbd_dev->spec->image_id);
3838 	}
3839 out:
3840 	kfree(response);
3841 	kfree(object_name);
3842 
3843 	return ret;
3844 }
3845 
3846 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
3847 {
3848 	int ret;
3849 	size_t size;
3850 
3851 	/* Version 1 images have no id; empty string is used */
3852 
3853 	rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
3854 	if (!rbd_dev->spec->image_id)
3855 		return -ENOMEM;
3856 
3857 	/* Record the header object name for this rbd image. */
3858 
3859 	size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
3860 	rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3861 	if (!rbd_dev->header_name) {
3862 		ret = -ENOMEM;
3863 		goto out_err;
3864 	}
3865 	sprintf(rbd_dev->header_name, "%s%s",
3866 		rbd_dev->spec->image_name, RBD_SUFFIX);
3867 
3868 	/* Populate rbd image metadata */
3869 
3870 	ret = rbd_read_header(rbd_dev, &rbd_dev->header);
3871 	if (ret < 0)
3872 		goto out_err;
3873 
3874 	/* Version 1 images have no parent (no layering) */
3875 
3876 	rbd_dev->parent_spec = NULL;
3877 	rbd_dev->parent_overlap = 0;
3878 
3879 	rbd_dev->image_format = 1;
3880 
3881 	dout("discovered version 1 image, header name is %s\n",
3882 		rbd_dev->header_name);
3883 
3884 	return 0;
3885 
3886 out_err:
3887 	kfree(rbd_dev->header_name);
3888 	rbd_dev->header_name = NULL;
3889 	kfree(rbd_dev->spec->image_id);
3890 	rbd_dev->spec->image_id = NULL;
3891 
3892 	return ret;
3893 }
3894 
3895 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
3896 {
3897 	size_t size;
3898 	int ret;
3899 	u64 ver = 0;
3900 
3901 	/*
3902 	 * Image id was filled in by the caller.  Record the header
3903 	 * object name for this rbd image.
3904 	 */
3905 	size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
3906 	rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3907 	if (!rbd_dev->header_name)
3908 		return -ENOMEM;
3909 	sprintf(rbd_dev->header_name, "%s%s",
3910 			RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
3911 
3912 	/* Get the size and object order for the image */
3913 
3914 	ret = rbd_dev_v2_image_size(rbd_dev);
3915 	if (ret < 0)
3916 		goto out_err;
3917 
3918 	/* Get the object prefix (a.k.a. block_name) for the image */
3919 
3920 	ret = rbd_dev_v2_object_prefix(rbd_dev);
3921 	if (ret < 0)
3922 		goto out_err;
3923 
3924 	/* Get the and check features for the image */
3925 
3926 	ret = rbd_dev_v2_features(rbd_dev);
3927 	if (ret < 0)
3928 		goto out_err;
3929 
3930 	/* If the image supports layering, get the parent info */
3931 
3932 	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
3933 		ret = rbd_dev_v2_parent_info(rbd_dev);
3934 		if (ret < 0)
3935 			goto out_err;
3936 	}
3937 
3938 	/* crypto and compression type aren't (yet) supported for v2 images */
3939 
3940 	rbd_dev->header.crypt_type = 0;
3941 	rbd_dev->header.comp_type = 0;
3942 
3943 	/* Get the snapshot context, plus the header version */
3944 
3945 	ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
3946 	if (ret)
3947 		goto out_err;
3948 	rbd_dev->header.obj_version = ver;
3949 
3950 	rbd_dev->image_format = 2;
3951 
3952 	dout("discovered version 2 image, header name is %s\n",
3953 		rbd_dev->header_name);
3954 
3955 	return 0;
3956 out_err:
3957 	rbd_dev->parent_overlap = 0;
3958 	rbd_spec_put(rbd_dev->parent_spec);
3959 	rbd_dev->parent_spec = NULL;
3960 	kfree(rbd_dev->header_name);
3961 	rbd_dev->header_name = NULL;
3962 	kfree(rbd_dev->header.object_prefix);
3963 	rbd_dev->header.object_prefix = NULL;
3964 
3965 	return ret;
3966 }
3967 
3968 static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
3969 {
3970 	int ret;
3971 
3972 	/* no need to lock here, as rbd_dev is not registered yet */
3973 	ret = rbd_dev_snaps_update(rbd_dev);
3974 	if (ret)
3975 		return ret;
3976 
3977 	ret = rbd_dev_probe_update_spec(rbd_dev);
3978 	if (ret)
3979 		goto err_out_snaps;
3980 
3981 	ret = rbd_dev_set_mapping(rbd_dev);
3982 	if (ret)
3983 		goto err_out_snaps;
3984 
3985 	/* generate unique id: find highest unique id, add one */
3986 	rbd_dev_id_get(rbd_dev);
3987 
3988 	/* Fill in the device name, now that we have its id. */
3989 	BUILD_BUG_ON(DEV_NAME_LEN
3990 			< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
3991 	sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
3992 
3993 	/* Get our block major device number. */
3994 
3995 	ret = register_blkdev(0, rbd_dev->name);
3996 	if (ret < 0)
3997 		goto err_out_id;
3998 	rbd_dev->major = ret;
3999 
4000 	/* Set up the blkdev mapping. */
4001 
4002 	ret = rbd_init_disk(rbd_dev);
4003 	if (ret)
4004 		goto err_out_blkdev;
4005 
4006 	ret = rbd_bus_add_dev(rbd_dev);
4007 	if (ret)
4008 		goto err_out_disk;
4009 
4010 	/*
4011 	 * At this point cleanup in the event of an error is the job
4012 	 * of the sysfs code (initiated by rbd_bus_del_dev()).
4013 	 */
4014 	down_write(&rbd_dev->header_rwsem);
4015 	ret = rbd_dev_snaps_register(rbd_dev);
4016 	up_write(&rbd_dev->header_rwsem);
4017 	if (ret)
4018 		goto err_out_bus;
4019 
4020 	ret = rbd_dev_header_watch_sync(rbd_dev, 1);
4021 	if (ret)
4022 		goto err_out_bus;
4023 
4024 	/* Everything's ready.  Announce the disk to the world. */
4025 
4026 	add_disk(rbd_dev->disk);
4027 
4028 	pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4029 		(unsigned long long) rbd_dev->mapping.size);
4030 
4031 	return ret;
4032 err_out_bus:
4033 	/* this will also clean up rest of rbd_dev stuff */
4034 
4035 	rbd_bus_del_dev(rbd_dev);
4036 
4037 	return ret;
4038 err_out_disk:
4039 	rbd_free_disk(rbd_dev);
4040 err_out_blkdev:
4041 	unregister_blkdev(rbd_dev->major, rbd_dev->name);
4042 err_out_id:
4043 	rbd_dev_id_put(rbd_dev);
4044 err_out_snaps:
4045 	rbd_remove_all_snaps(rbd_dev);
4046 
4047 	return ret;
4048 }
4049 
4050 /*
4051  * Probe for the existence of the header object for the given rbd
4052  * device.  For format 2 images this includes determining the image
4053  * id.
4054  */
4055 static int rbd_dev_probe(struct rbd_device *rbd_dev)
4056 {
4057 	int ret;
4058 
4059 	/*
4060 	 * Get the id from the image id object.  If it's not a
4061 	 * format 2 image, we'll get ENOENT back, and we'll assume
4062 	 * it's a format 1 image.
4063 	 */
4064 	ret = rbd_dev_image_id(rbd_dev);
4065 	if (ret)
4066 		ret = rbd_dev_v1_probe(rbd_dev);
4067 	else
4068 		ret = rbd_dev_v2_probe(rbd_dev);
4069 	if (ret) {
4070 		dout("probe failed, returning %d\n", ret);
4071 
4072 		return ret;
4073 	}
4074 
4075 	ret = rbd_dev_probe_finish(rbd_dev);
4076 	if (ret)
4077 		rbd_header_free(&rbd_dev->header);
4078 
4079 	return ret;
4080 }
4081 
4082 static ssize_t rbd_add(struct bus_type *bus,
4083 		       const char *buf,
4084 		       size_t count)
4085 {
4086 	struct rbd_device *rbd_dev = NULL;
4087 	struct ceph_options *ceph_opts = NULL;
4088 	struct rbd_options *rbd_opts = NULL;
4089 	struct rbd_spec *spec = NULL;
4090 	struct rbd_client *rbdc;
4091 	struct ceph_osd_client *osdc;
4092 	int rc = -ENOMEM;
4093 
4094 	if (!try_module_get(THIS_MODULE))
4095 		return -ENODEV;
4096 
4097 	/* parse add command */
4098 	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4099 	if (rc < 0)
4100 		goto err_out_module;
4101 
4102 	rbdc = rbd_get_client(ceph_opts);
4103 	if (IS_ERR(rbdc)) {
4104 		rc = PTR_ERR(rbdc);
4105 		goto err_out_args;
4106 	}
4107 	ceph_opts = NULL;	/* rbd_dev client now owns this */
4108 
4109 	/* pick the pool */
4110 	osdc = &rbdc->client->osdc;
4111 	rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
4112 	if (rc < 0)
4113 		goto err_out_client;
4114 	spec->pool_id = (u64) rc;
4115 
4116 	/* The ceph file layout needs to fit pool id in 32 bits */
4117 
4118 	if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
4119 		rc = -EIO;
4120 		goto err_out_client;
4121 	}
4122 
4123 	rbd_dev = rbd_dev_create(rbdc, spec);
4124 	if (!rbd_dev)
4125 		goto err_out_client;
4126 	rbdc = NULL;		/* rbd_dev now owns this */
4127 	spec = NULL;		/* rbd_dev now owns this */
4128 
4129 	rbd_dev->mapping.read_only = rbd_opts->read_only;
4130 	kfree(rbd_opts);
4131 	rbd_opts = NULL;	/* done with this */
4132 
4133 	rc = rbd_dev_probe(rbd_dev);
4134 	if (rc < 0)
4135 		goto err_out_rbd_dev;
4136 
4137 	return count;
4138 err_out_rbd_dev:
4139 	rbd_dev_destroy(rbd_dev);
4140 err_out_client:
4141 	rbd_put_client(rbdc);
4142 err_out_args:
4143 	if (ceph_opts)
4144 		ceph_destroy_options(ceph_opts);
4145 	kfree(rbd_opts);
4146 	rbd_spec_put(spec);
4147 err_out_module:
4148 	module_put(THIS_MODULE);
4149 
4150 	dout("Error adding device %s\n", buf);
4151 
4152 	return (ssize_t) rc;
4153 }
4154 
4155 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4156 {
4157 	struct list_head *tmp;
4158 	struct rbd_device *rbd_dev;
4159 
4160 	spin_lock(&rbd_dev_list_lock);
4161 	list_for_each(tmp, &rbd_dev_list) {
4162 		rbd_dev = list_entry(tmp, struct rbd_device, node);
4163 		if (rbd_dev->dev_id == dev_id) {
4164 			spin_unlock(&rbd_dev_list_lock);
4165 			return rbd_dev;
4166 		}
4167 	}
4168 	spin_unlock(&rbd_dev_list_lock);
4169 	return NULL;
4170 }
4171 
4172 static void rbd_dev_release(struct device *dev)
4173 {
4174 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4175 
4176 	if (rbd_dev->watch_event)
4177 		rbd_dev_header_watch_sync(rbd_dev, 0);
4178 
4179 	/* clean up and free blkdev */
4180 	rbd_free_disk(rbd_dev);
4181 	unregister_blkdev(rbd_dev->major, rbd_dev->name);
4182 
4183 	/* release allocated disk header fields */
4184 	rbd_header_free(&rbd_dev->header);
4185 
4186 	/* done with the id, and with the rbd_dev */
4187 	rbd_dev_id_put(rbd_dev);
4188 	rbd_assert(rbd_dev->rbd_client != NULL);
4189 	rbd_dev_destroy(rbd_dev);
4190 
4191 	/* release module ref */
4192 	module_put(THIS_MODULE);
4193 }
4194 
4195 static ssize_t rbd_remove(struct bus_type *bus,
4196 			  const char *buf,
4197 			  size_t count)
4198 {
4199 	struct rbd_device *rbd_dev = NULL;
4200 	int target_id, rc;
4201 	unsigned long ul;
4202 	int ret = count;
4203 
4204 	rc = strict_strtoul(buf, 10, &ul);
4205 	if (rc)
4206 		return rc;
4207 
4208 	/* convert to int; abort if we lost anything in the conversion */
4209 	target_id = (int) ul;
4210 	if (target_id != ul)
4211 		return -EINVAL;
4212 
4213 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4214 
4215 	rbd_dev = __rbd_get_dev(target_id);
4216 	if (!rbd_dev) {
4217 		ret = -ENOENT;
4218 		goto done;
4219 	}
4220 
4221 	spin_lock_irq(&rbd_dev->lock);
4222 	if (rbd_dev->open_count)
4223 		ret = -EBUSY;
4224 	else
4225 		set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
4226 	spin_unlock_irq(&rbd_dev->lock);
4227 	if (ret < 0)
4228 		goto done;
4229 
4230 	rbd_remove_all_snaps(rbd_dev);
4231 	rbd_bus_del_dev(rbd_dev);
4232 
4233 done:
4234 	mutex_unlock(&ctl_mutex);
4235 
4236 	return ret;
4237 }
4238 
4239 /*
4240  * create control files in sysfs
4241  * /sys/bus/rbd/...
4242  */
4243 static int rbd_sysfs_init(void)
4244 {
4245 	int ret;
4246 
4247 	ret = device_register(&rbd_root_dev);
4248 	if (ret < 0)
4249 		return ret;
4250 
4251 	ret = bus_register(&rbd_bus_type);
4252 	if (ret < 0)
4253 		device_unregister(&rbd_root_dev);
4254 
4255 	return ret;
4256 }
4257 
4258 static void rbd_sysfs_cleanup(void)
4259 {
4260 	bus_unregister(&rbd_bus_type);
4261 	device_unregister(&rbd_root_dev);
4262 }
4263 
4264 static int __init rbd_init(void)
4265 {
4266 	int rc;
4267 
4268 	if (!libceph_compatible(NULL)) {
4269 		rbd_warn(NULL, "libceph incompatibility (quitting)");
4270 
4271 		return -EINVAL;
4272 	}
4273 	rc = rbd_sysfs_init();
4274 	if (rc)
4275 		return rc;
4276 	pr_info("loaded " RBD_DRV_NAME_LONG "\n");
4277 	return 0;
4278 }
4279 
4280 static void __exit rbd_exit(void)
4281 {
4282 	rbd_sysfs_cleanup();
4283 }
4284 
4285 module_init(rbd_init);
4286 module_exit(rbd_exit);
4287 
4288 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
4289 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
4290 MODULE_DESCRIPTION("rados block device");
4291 
4292 /* following authorship retained from original osdblk.c */
4293 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
4294 
4295 MODULE_LICENSE("GPL");
4296