xref: /linux/block/genhd.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  *  gendisk handling
3  */
4 
5 #include <linux/module.h>
6 #include <linux/fs.h>
7 #include <linux/genhd.h>
8 #include <linux/kdev_t.h>
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/init.h>
12 #include <linux/spinlock.h>
13 #include <linux/proc_fs.h>
14 #include <linux/seq_file.h>
15 #include <linux/slab.h>
16 #include <linux/kmod.h>
17 #include <linux/kobj_map.h>
18 #include <linux/buffer_head.h>
19 #include <linux/mutex.h>
20 #include <linux/idr.h>
21 #include <linux/log2.h>
22 #include <linux/ctype.h>
23 
24 #include "blk.h"
25 
26 static DEFINE_MUTEX(block_class_lock);
27 struct kobject *block_depr;
28 
29 /* for extended dynamic devt allocation, currently only one major is used */
30 #define MAX_EXT_DEVT		(1 << MINORBITS)
31 
32 /* For extended devt allocation.  ext_devt_mutex prevents look up
33  * results from going away underneath its user.
34  */
35 static DEFINE_MUTEX(ext_devt_mutex);
36 static DEFINE_IDR(ext_devt_idr);
37 
38 static struct device_type disk_type;
39 
40 static void disk_add_events(struct gendisk *disk);
41 static void disk_del_events(struct gendisk *disk);
42 static void disk_release_events(struct gendisk *disk);
43 
44 /**
45  * disk_get_part - get partition
46  * @disk: disk to look partition from
47  * @partno: partition number
48  *
49  * Look for partition @partno from @disk.  If found, increment
50  * reference count and return it.
51  *
52  * CONTEXT:
53  * Don't care.
54  *
55  * RETURNS:
56  * Pointer to the found partition on success, NULL if not found.
57  */
58 struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
59 {
60 	struct hd_struct *part = NULL;
61 	struct disk_part_tbl *ptbl;
62 
63 	if (unlikely(partno < 0))
64 		return NULL;
65 
66 	rcu_read_lock();
67 
68 	ptbl = rcu_dereference(disk->part_tbl);
69 	if (likely(partno < ptbl->len)) {
70 		part = rcu_dereference(ptbl->part[partno]);
71 		if (part)
72 			get_device(part_to_dev(part));
73 	}
74 
75 	rcu_read_unlock();
76 
77 	return part;
78 }
79 EXPORT_SYMBOL_GPL(disk_get_part);
80 
81 /**
82  * disk_part_iter_init - initialize partition iterator
83  * @piter: iterator to initialize
84  * @disk: disk to iterate over
85  * @flags: DISK_PITER_* flags
86  *
87  * Initialize @piter so that it iterates over partitions of @disk.
88  *
89  * CONTEXT:
90  * Don't care.
91  */
92 void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
93 			  unsigned int flags)
94 {
95 	struct disk_part_tbl *ptbl;
96 
97 	rcu_read_lock();
98 	ptbl = rcu_dereference(disk->part_tbl);
99 
100 	piter->disk = disk;
101 	piter->part = NULL;
102 
103 	if (flags & DISK_PITER_REVERSE)
104 		piter->idx = ptbl->len - 1;
105 	else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
106 		piter->idx = 0;
107 	else
108 		piter->idx = 1;
109 
110 	piter->flags = flags;
111 
112 	rcu_read_unlock();
113 }
114 EXPORT_SYMBOL_GPL(disk_part_iter_init);
115 
116 /**
117  * disk_part_iter_next - proceed iterator to the next partition and return it
118  * @piter: iterator of interest
119  *
120  * Proceed @piter to the next partition and return it.
121  *
122  * CONTEXT:
123  * Don't care.
124  */
125 struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
126 {
127 	struct disk_part_tbl *ptbl;
128 	int inc, end;
129 
130 	/* put the last partition */
131 	disk_put_part(piter->part);
132 	piter->part = NULL;
133 
134 	/* get part_tbl */
135 	rcu_read_lock();
136 	ptbl = rcu_dereference(piter->disk->part_tbl);
137 
138 	/* determine iteration parameters */
139 	if (piter->flags & DISK_PITER_REVERSE) {
140 		inc = -1;
141 		if (piter->flags & (DISK_PITER_INCL_PART0 |
142 				    DISK_PITER_INCL_EMPTY_PART0))
143 			end = -1;
144 		else
145 			end = 0;
146 	} else {
147 		inc = 1;
148 		end = ptbl->len;
149 	}
150 
151 	/* iterate to the next partition */
152 	for (; piter->idx != end; piter->idx += inc) {
153 		struct hd_struct *part;
154 
155 		part = rcu_dereference(ptbl->part[piter->idx]);
156 		if (!part)
157 			continue;
158 		if (!part->nr_sects &&
159 		    !(piter->flags & DISK_PITER_INCL_EMPTY) &&
160 		    !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
161 		      piter->idx == 0))
162 			continue;
163 
164 		get_device(part_to_dev(part));
165 		piter->part = part;
166 		piter->idx += inc;
167 		break;
168 	}
169 
170 	rcu_read_unlock();
171 
172 	return piter->part;
173 }
174 EXPORT_SYMBOL_GPL(disk_part_iter_next);
175 
176 /**
177  * disk_part_iter_exit - finish up partition iteration
178  * @piter: iter of interest
179  *
180  * Called when iteration is over.  Cleans up @piter.
181  *
182  * CONTEXT:
183  * Don't care.
184  */
185 void disk_part_iter_exit(struct disk_part_iter *piter)
186 {
187 	disk_put_part(piter->part);
188 	piter->part = NULL;
189 }
190 EXPORT_SYMBOL_GPL(disk_part_iter_exit);
191 
192 static inline int sector_in_part(struct hd_struct *part, sector_t sector)
193 {
194 	return part->start_sect <= sector &&
195 		sector < part->start_sect + part->nr_sects;
196 }
197 
198 /**
199  * disk_map_sector_rcu - map sector to partition
200  * @disk: gendisk of interest
201  * @sector: sector to map
202  *
203  * Find out which partition @sector maps to on @disk.  This is
204  * primarily used for stats accounting.
205  *
206  * CONTEXT:
207  * RCU read locked.  The returned partition pointer is valid only
208  * while preemption is disabled.
209  *
210  * RETURNS:
211  * Found partition on success, part0 is returned if no partition matches
212  */
213 struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
214 {
215 	struct disk_part_tbl *ptbl;
216 	struct hd_struct *part;
217 	int i;
218 
219 	ptbl = rcu_dereference(disk->part_tbl);
220 
221 	part = rcu_dereference(ptbl->last_lookup);
222 	if (part && sector_in_part(part, sector))
223 		return part;
224 
225 	for (i = 1; i < ptbl->len; i++) {
226 		part = rcu_dereference(ptbl->part[i]);
227 
228 		if (part && sector_in_part(part, sector)) {
229 			rcu_assign_pointer(ptbl->last_lookup, part);
230 			return part;
231 		}
232 	}
233 	return &disk->part0;
234 }
235 EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
236 
237 /*
238  * Can be deleted altogether. Later.
239  *
240  */
241 static struct blk_major_name {
242 	struct blk_major_name *next;
243 	int major;
244 	char name[16];
245 } *major_names[BLKDEV_MAJOR_HASH_SIZE];
246 
247 /* index in the above - for now: assume no multimajor ranges */
248 static inline int major_to_index(unsigned major)
249 {
250 	return major % BLKDEV_MAJOR_HASH_SIZE;
251 }
252 
253 #ifdef CONFIG_PROC_FS
254 void blkdev_show(struct seq_file *seqf, off_t offset)
255 {
256 	struct blk_major_name *dp;
257 
258 	if (offset < BLKDEV_MAJOR_HASH_SIZE) {
259 		mutex_lock(&block_class_lock);
260 		for (dp = major_names[offset]; dp; dp = dp->next)
261 			seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
262 		mutex_unlock(&block_class_lock);
263 	}
264 }
265 #endif /* CONFIG_PROC_FS */
266 
267 /**
268  * register_blkdev - register a new block device
269  *
270  * @major: the requested major device number [1..255]. If @major=0, try to
271  *         allocate any unused major number.
272  * @name: the name of the new block device as a zero terminated string
273  *
274  * The @name must be unique within the system.
275  *
276  * The return value depends on the @major input parameter.
277  *  - if a major device number was requested in range [1..255] then the
278  *    function returns zero on success, or a negative error code
279  *  - if any unused major number was requested with @major=0 parameter
280  *    then the return value is the allocated major number in range
281  *    [1..255] or a negative error code otherwise
282  */
283 int register_blkdev(unsigned int major, const char *name)
284 {
285 	struct blk_major_name **n, *p;
286 	int index, ret = 0;
287 
288 	mutex_lock(&block_class_lock);
289 
290 	/* temporary */
291 	if (major == 0) {
292 		for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
293 			if (major_names[index] == NULL)
294 				break;
295 		}
296 
297 		if (index == 0) {
298 			printk("register_blkdev: failed to get major for %s\n",
299 			       name);
300 			ret = -EBUSY;
301 			goto out;
302 		}
303 		major = index;
304 		ret = major;
305 	}
306 
307 	p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
308 	if (p == NULL) {
309 		ret = -ENOMEM;
310 		goto out;
311 	}
312 
313 	p->major = major;
314 	strlcpy(p->name, name, sizeof(p->name));
315 	p->next = NULL;
316 	index = major_to_index(major);
317 
318 	for (n = &major_names[index]; *n; n = &(*n)->next) {
319 		if ((*n)->major == major)
320 			break;
321 	}
322 	if (!*n)
323 		*n = p;
324 	else
325 		ret = -EBUSY;
326 
327 	if (ret < 0) {
328 		printk("register_blkdev: cannot get major %d for %s\n",
329 		       major, name);
330 		kfree(p);
331 	}
332 out:
333 	mutex_unlock(&block_class_lock);
334 	return ret;
335 }
336 
337 EXPORT_SYMBOL(register_blkdev);
338 
339 void unregister_blkdev(unsigned int major, const char *name)
340 {
341 	struct blk_major_name **n;
342 	struct blk_major_name *p = NULL;
343 	int index = major_to_index(major);
344 
345 	mutex_lock(&block_class_lock);
346 	for (n = &major_names[index]; *n; n = &(*n)->next)
347 		if ((*n)->major == major)
348 			break;
349 	if (!*n || strcmp((*n)->name, name)) {
350 		WARN_ON(1);
351 	} else {
352 		p = *n;
353 		*n = p->next;
354 	}
355 	mutex_unlock(&block_class_lock);
356 	kfree(p);
357 }
358 
359 EXPORT_SYMBOL(unregister_blkdev);
360 
361 static struct kobj_map *bdev_map;
362 
363 /**
364  * blk_mangle_minor - scatter minor numbers apart
365  * @minor: minor number to mangle
366  *
367  * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
368  * is enabled.  Mangling twice gives the original value.
369  *
370  * RETURNS:
371  * Mangled value.
372  *
373  * CONTEXT:
374  * Don't care.
375  */
376 static int blk_mangle_minor(int minor)
377 {
378 #ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
379 	int i;
380 
381 	for (i = 0; i < MINORBITS / 2; i++) {
382 		int low = minor & (1 << i);
383 		int high = minor & (1 << (MINORBITS - 1 - i));
384 		int distance = MINORBITS - 1 - 2 * i;
385 
386 		minor ^= low | high;	/* clear both bits */
387 		low <<= distance;	/* swap the positions */
388 		high >>= distance;
389 		minor |= low | high;	/* and set */
390 	}
391 #endif
392 	return minor;
393 }
394 
395 /**
396  * blk_alloc_devt - allocate a dev_t for a partition
397  * @part: partition to allocate dev_t for
398  * @devt: out parameter for resulting dev_t
399  *
400  * Allocate a dev_t for block device.
401  *
402  * RETURNS:
403  * 0 on success, allocated dev_t is returned in *@devt.  -errno on
404  * failure.
405  *
406  * CONTEXT:
407  * Might sleep.
408  */
409 int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
410 {
411 	struct gendisk *disk = part_to_disk(part);
412 	int idx, rc;
413 
414 	/* in consecutive minor range? */
415 	if (part->partno < disk->minors) {
416 		*devt = MKDEV(disk->major, disk->first_minor + part->partno);
417 		return 0;
418 	}
419 
420 	/* allocate ext devt */
421 	do {
422 		if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
423 			return -ENOMEM;
424 		rc = idr_get_new(&ext_devt_idr, part, &idx);
425 	} while (rc == -EAGAIN);
426 
427 	if (rc)
428 		return rc;
429 
430 	if (idx > MAX_EXT_DEVT) {
431 		idr_remove(&ext_devt_idr, idx);
432 		return -EBUSY;
433 	}
434 
435 	*devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
436 	return 0;
437 }
438 
439 /**
440  * blk_free_devt - free a dev_t
441  * @devt: dev_t to free
442  *
443  * Free @devt which was allocated using blk_alloc_devt().
444  *
445  * CONTEXT:
446  * Might sleep.
447  */
448 void blk_free_devt(dev_t devt)
449 {
450 	might_sleep();
451 
452 	if (devt == MKDEV(0, 0))
453 		return;
454 
455 	if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
456 		mutex_lock(&ext_devt_mutex);
457 		idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
458 		mutex_unlock(&ext_devt_mutex);
459 	}
460 }
461 
462 static char *bdevt_str(dev_t devt, char *buf)
463 {
464 	if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
465 		char tbuf[BDEVT_SIZE];
466 		snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
467 		snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
468 	} else
469 		snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
470 
471 	return buf;
472 }
473 
474 /*
475  * Register device numbers dev..(dev+range-1)
476  * range must be nonzero
477  * The hash chain is sorted on range, so that subranges can override.
478  */
479 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
480 			 struct kobject *(*probe)(dev_t, int *, void *),
481 			 int (*lock)(dev_t, void *), void *data)
482 {
483 	kobj_map(bdev_map, devt, range, module, probe, lock, data);
484 }
485 
486 EXPORT_SYMBOL(blk_register_region);
487 
488 void blk_unregister_region(dev_t devt, unsigned long range)
489 {
490 	kobj_unmap(bdev_map, devt, range);
491 }
492 
493 EXPORT_SYMBOL(blk_unregister_region);
494 
495 static struct kobject *exact_match(dev_t devt, int *partno, void *data)
496 {
497 	struct gendisk *p = data;
498 
499 	return &disk_to_dev(p)->kobj;
500 }
501 
502 static int exact_lock(dev_t devt, void *data)
503 {
504 	struct gendisk *p = data;
505 
506 	if (!get_disk(p))
507 		return -1;
508 	return 0;
509 }
510 
511 void register_disk(struct gendisk *disk)
512 {
513 	struct device *ddev = disk_to_dev(disk);
514 	struct block_device *bdev;
515 	struct disk_part_iter piter;
516 	struct hd_struct *part;
517 	int err;
518 
519 	ddev->parent = disk->driverfs_dev;
520 
521 	dev_set_name(ddev, disk->disk_name);
522 
523 	/* delay uevents, until we scanned partition table */
524 	dev_set_uevent_suppress(ddev, 1);
525 
526 	if (device_add(ddev))
527 		return;
528 	if (!sysfs_deprecated) {
529 		err = sysfs_create_link(block_depr, &ddev->kobj,
530 					kobject_name(&ddev->kobj));
531 		if (err) {
532 			device_del(ddev);
533 			return;
534 		}
535 	}
536 	disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
537 	disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
538 
539 	/* No minors to use for partitions */
540 	if (!disk_part_scan_enabled(disk))
541 		goto exit;
542 
543 	/* No such device (e.g., media were just removed) */
544 	if (!get_capacity(disk))
545 		goto exit;
546 
547 	bdev = bdget_disk(disk, 0);
548 	if (!bdev)
549 		goto exit;
550 
551 	bdev->bd_invalidated = 1;
552 	err = blkdev_get(bdev, FMODE_READ, NULL);
553 	if (err < 0)
554 		goto exit;
555 	blkdev_put(bdev, FMODE_READ);
556 
557 exit:
558 	/* announce disk after possible partitions are created */
559 	dev_set_uevent_suppress(ddev, 0);
560 	kobject_uevent(&ddev->kobj, KOBJ_ADD);
561 
562 	/* announce possible partitions */
563 	disk_part_iter_init(&piter, disk, 0);
564 	while ((part = disk_part_iter_next(&piter)))
565 		kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
566 	disk_part_iter_exit(&piter);
567 }
568 
569 /**
570  * add_disk - add partitioning information to kernel list
571  * @disk: per-device partitioning information
572  *
573  * This function registers the partitioning information in @disk
574  * with the kernel.
575  *
576  * FIXME: error handling
577  */
578 void add_disk(struct gendisk *disk)
579 {
580 	struct backing_dev_info *bdi;
581 	dev_t devt;
582 	int retval;
583 
584 	/* minors == 0 indicates to use ext devt from part0 and should
585 	 * be accompanied with EXT_DEVT flag.  Make sure all
586 	 * parameters make sense.
587 	 */
588 	WARN_ON(disk->minors && !(disk->major || disk->first_minor));
589 	WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
590 
591 	disk->flags |= GENHD_FL_UP;
592 
593 	retval = blk_alloc_devt(&disk->part0, &devt);
594 	if (retval) {
595 		WARN_ON(1);
596 		return;
597 	}
598 	disk_to_dev(disk)->devt = devt;
599 
600 	/* ->major and ->first_minor aren't supposed to be
601 	 * dereferenced from here on, but set them just in case.
602 	 */
603 	disk->major = MAJOR(devt);
604 	disk->first_minor = MINOR(devt);
605 
606 	/* Register BDI before referencing it from bdev */
607 	bdi = &disk->queue->backing_dev_info;
608 	bdi_register_dev(bdi, disk_devt(disk));
609 
610 	blk_register_region(disk_devt(disk), disk->minors, NULL,
611 			    exact_match, exact_lock, disk);
612 	register_disk(disk);
613 	blk_register_queue(disk);
614 
615 	/*
616 	 * Take an extra ref on queue which will be put on disk_release()
617 	 * so that it sticks around as long as @disk is there.
618 	 */
619 	WARN_ON_ONCE(blk_get_queue(disk->queue));
620 
621 	retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
622 				   "bdi");
623 	WARN_ON(retval);
624 
625 	disk_add_events(disk);
626 }
627 EXPORT_SYMBOL(add_disk);
628 
629 void del_gendisk(struct gendisk *disk)
630 {
631 	struct disk_part_iter piter;
632 	struct hd_struct *part;
633 
634 	disk_del_events(disk);
635 
636 	/* invalidate stuff */
637 	disk_part_iter_init(&piter, disk,
638 			     DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
639 	while ((part = disk_part_iter_next(&piter))) {
640 		invalidate_partition(disk, part->partno);
641 		delete_partition(disk, part->partno);
642 	}
643 	disk_part_iter_exit(&piter);
644 
645 	invalidate_partition(disk, 0);
646 	blk_free_devt(disk_to_dev(disk)->devt);
647 	set_capacity(disk, 0);
648 	disk->flags &= ~GENHD_FL_UP;
649 
650 	sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
651 	bdi_unregister(&disk->queue->backing_dev_info);
652 	blk_unregister_queue(disk);
653 	blk_unregister_region(disk_devt(disk), disk->minors);
654 
655 	part_stat_set_all(&disk->part0, 0);
656 	disk->part0.stamp = 0;
657 
658 	kobject_put(disk->part0.holder_dir);
659 	kobject_put(disk->slave_dir);
660 	disk->driverfs_dev = NULL;
661 	if (!sysfs_deprecated)
662 		sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
663 	device_del(disk_to_dev(disk));
664 }
665 EXPORT_SYMBOL(del_gendisk);
666 
667 /**
668  * get_gendisk - get partitioning information for a given device
669  * @devt: device to get partitioning information for
670  * @partno: returned partition index
671  *
672  * This function gets the structure containing partitioning
673  * information for the given device @devt.
674  */
675 struct gendisk *get_gendisk(dev_t devt, int *partno)
676 {
677 	struct gendisk *disk = NULL;
678 
679 	if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
680 		struct kobject *kobj;
681 
682 		kobj = kobj_lookup(bdev_map, devt, partno);
683 		if (kobj)
684 			disk = dev_to_disk(kobj_to_dev(kobj));
685 	} else {
686 		struct hd_struct *part;
687 
688 		mutex_lock(&ext_devt_mutex);
689 		part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
690 		if (part && get_disk(part_to_disk(part))) {
691 			*partno = part->partno;
692 			disk = part_to_disk(part);
693 		}
694 		mutex_unlock(&ext_devt_mutex);
695 	}
696 
697 	return disk;
698 }
699 EXPORT_SYMBOL(get_gendisk);
700 
701 /**
702  * bdget_disk - do bdget() by gendisk and partition number
703  * @disk: gendisk of interest
704  * @partno: partition number
705  *
706  * Find partition @partno from @disk, do bdget() on it.
707  *
708  * CONTEXT:
709  * Don't care.
710  *
711  * RETURNS:
712  * Resulting block_device on success, NULL on failure.
713  */
714 struct block_device *bdget_disk(struct gendisk *disk, int partno)
715 {
716 	struct hd_struct *part;
717 	struct block_device *bdev = NULL;
718 
719 	part = disk_get_part(disk, partno);
720 	if (part)
721 		bdev = bdget(part_devt(part));
722 	disk_put_part(part);
723 
724 	return bdev;
725 }
726 EXPORT_SYMBOL(bdget_disk);
727 
728 /*
729  * print a full list of all partitions - intended for places where the root
730  * filesystem can't be mounted and thus to give the victim some idea of what
731  * went wrong
732  */
733 void __init printk_all_partitions(void)
734 {
735 	struct class_dev_iter iter;
736 	struct device *dev;
737 
738 	class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
739 	while ((dev = class_dev_iter_next(&iter))) {
740 		struct gendisk *disk = dev_to_disk(dev);
741 		struct disk_part_iter piter;
742 		struct hd_struct *part;
743 		char name_buf[BDEVNAME_SIZE];
744 		char devt_buf[BDEVT_SIZE];
745 		u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1];
746 
747 		/*
748 		 * Don't show empty devices or things that have been
749 		 * suppressed
750 		 */
751 		if (get_capacity(disk) == 0 ||
752 		    (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
753 			continue;
754 
755 		/*
756 		 * Note, unlike /proc/partitions, I am showing the
757 		 * numbers in hex - the same format as the root=
758 		 * option takes.
759 		 */
760 		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
761 		while ((part = disk_part_iter_next(&piter))) {
762 			bool is_part0 = part == &disk->part0;
763 
764 			uuid[0] = 0;
765 			if (part->info)
766 				part_unpack_uuid(part->info->uuid, uuid);
767 
768 			printk("%s%s %10llu %s %s", is_part0 ? "" : "  ",
769 			       bdevt_str(part_devt(part), devt_buf),
770 			       (unsigned long long)part->nr_sects >> 1,
771 			       disk_name(disk, part->partno, name_buf), uuid);
772 			if (is_part0) {
773 				if (disk->driverfs_dev != NULL &&
774 				    disk->driverfs_dev->driver != NULL)
775 					printk(" driver: %s\n",
776 					      disk->driverfs_dev->driver->name);
777 				else
778 					printk(" (driver?)\n");
779 			} else
780 				printk("\n");
781 		}
782 		disk_part_iter_exit(&piter);
783 	}
784 	class_dev_iter_exit(&iter);
785 }
786 
787 #ifdef CONFIG_PROC_FS
788 /* iterator */
789 static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
790 {
791 	loff_t skip = *pos;
792 	struct class_dev_iter *iter;
793 	struct device *dev;
794 
795 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
796 	if (!iter)
797 		return ERR_PTR(-ENOMEM);
798 
799 	seqf->private = iter;
800 	class_dev_iter_init(iter, &block_class, NULL, &disk_type);
801 	do {
802 		dev = class_dev_iter_next(iter);
803 		if (!dev)
804 			return NULL;
805 	} while (skip--);
806 
807 	return dev_to_disk(dev);
808 }
809 
810 static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
811 {
812 	struct device *dev;
813 
814 	(*pos)++;
815 	dev = class_dev_iter_next(seqf->private);
816 	if (dev)
817 		return dev_to_disk(dev);
818 
819 	return NULL;
820 }
821 
822 static void disk_seqf_stop(struct seq_file *seqf, void *v)
823 {
824 	struct class_dev_iter *iter = seqf->private;
825 
826 	/* stop is called even after start failed :-( */
827 	if (iter) {
828 		class_dev_iter_exit(iter);
829 		kfree(iter);
830 	}
831 }
832 
833 static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
834 {
835 	static void *p;
836 
837 	p = disk_seqf_start(seqf, pos);
838 	if (!IS_ERR_OR_NULL(p) && !*pos)
839 		seq_puts(seqf, "major minor  #blocks  name\n\n");
840 	return p;
841 }
842 
843 static int show_partition(struct seq_file *seqf, void *v)
844 {
845 	struct gendisk *sgp = v;
846 	struct disk_part_iter piter;
847 	struct hd_struct *part;
848 	char buf[BDEVNAME_SIZE];
849 
850 	/* Don't show non-partitionable removeable devices or empty devices */
851 	if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
852 				   (sgp->flags & GENHD_FL_REMOVABLE)))
853 		return 0;
854 	if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
855 		return 0;
856 
857 	/* show the full disk and all non-0 size partitions of it */
858 	disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
859 	while ((part = disk_part_iter_next(&piter)))
860 		seq_printf(seqf, "%4d  %7d %10llu %s\n",
861 			   MAJOR(part_devt(part)), MINOR(part_devt(part)),
862 			   (unsigned long long)part->nr_sects >> 1,
863 			   disk_name(sgp, part->partno, buf));
864 	disk_part_iter_exit(&piter);
865 
866 	return 0;
867 }
868 
869 static const struct seq_operations partitions_op = {
870 	.start	= show_partition_start,
871 	.next	= disk_seqf_next,
872 	.stop	= disk_seqf_stop,
873 	.show	= show_partition
874 };
875 
876 static int partitions_open(struct inode *inode, struct file *file)
877 {
878 	return seq_open(file, &partitions_op);
879 }
880 
881 static const struct file_operations proc_partitions_operations = {
882 	.open		= partitions_open,
883 	.read		= seq_read,
884 	.llseek		= seq_lseek,
885 	.release	= seq_release,
886 };
887 #endif
888 
889 
890 static struct kobject *base_probe(dev_t devt, int *partno, void *data)
891 {
892 	if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
893 		/* Make old-style 2.4 aliases work */
894 		request_module("block-major-%d", MAJOR(devt));
895 	return NULL;
896 }
897 
898 static int __init genhd_device_init(void)
899 {
900 	int error;
901 
902 	block_class.dev_kobj = sysfs_dev_block_kobj;
903 	error = class_register(&block_class);
904 	if (unlikely(error))
905 		return error;
906 	bdev_map = kobj_map_init(base_probe, &block_class_lock);
907 	blk_dev_init();
908 
909 	register_blkdev(BLOCK_EXT_MAJOR, "blkext");
910 
911 	/* create top-level block dir */
912 	if (!sysfs_deprecated)
913 		block_depr = kobject_create_and_add("block", NULL);
914 	return 0;
915 }
916 
917 subsys_initcall(genhd_device_init);
918 
919 static ssize_t alias_show(struct device *dev,
920 			       struct device_attribute *attr, char *buf)
921 {
922 	struct gendisk *disk = dev_to_disk(dev);
923 	ssize_t ret = 0;
924 
925 	if (disk->alias)
926 		ret = snprintf(buf, ALIAS_LEN, "%s\n", disk->alias);
927 	return ret;
928 }
929 
930 static ssize_t alias_store(struct device *dev, struct device_attribute *attr,
931 			   const char *buf, size_t count)
932 {
933 	struct gendisk *disk = dev_to_disk(dev);
934 	char *alias;
935 	char *envp[] = { NULL, NULL };
936 	unsigned char c;
937 	int i;
938 	ssize_t ret = count;
939 
940 	if (!count)
941 		return -EINVAL;
942 
943 	if (count >= ALIAS_LEN) {
944 		printk(KERN_ERR "alias: alias is too long\n");
945 		return -EINVAL;
946 	}
947 
948 	/* Validation check */
949 	for (i = 0; i < count; i++) {
950 		c = buf[i];
951 		if (i == count - 1 && c == '\n')
952 			break;
953 		if (!isalnum(c) && c != '_' && c != '-') {
954 			printk(KERN_ERR "alias: invalid alias\n");
955 			return -EINVAL;
956 		}
957 	}
958 
959 	if (disk->alias) {
960 		printk(KERN_INFO "alias: %s is already assigned (%s)\n",
961 		       disk->disk_name, disk->alias);
962 		return -EINVAL;
963 	}
964 
965 	alias = kasprintf(GFP_KERNEL, "%s", buf);
966 	if (!alias)
967 		return -ENOMEM;
968 
969 	if (alias[count - 1] == '\n')
970 		alias[count - 1] = '\0';
971 
972 	envp[0] = kasprintf(GFP_KERNEL, "ALIAS=%s", alias);
973 	if (!envp[0]) {
974 		kfree(alias);
975 		return -ENOMEM;
976 	}
977 
978 	disk->alias = alias;
979 	printk(KERN_INFO "alias: assigned %s to %s\n", alias, disk->disk_name);
980 
981 	kobject_uevent_env(&dev->kobj, KOBJ_ADD, envp);
982 
983 	kfree(envp[0]);
984 	return ret;
985 }
986 
987 static ssize_t disk_range_show(struct device *dev,
988 			       struct device_attribute *attr, char *buf)
989 {
990 	struct gendisk *disk = dev_to_disk(dev);
991 
992 	return sprintf(buf, "%d\n", disk->minors);
993 }
994 
995 static ssize_t disk_ext_range_show(struct device *dev,
996 				   struct device_attribute *attr, char *buf)
997 {
998 	struct gendisk *disk = dev_to_disk(dev);
999 
1000 	return sprintf(buf, "%d\n", disk_max_parts(disk));
1001 }
1002 
1003 static ssize_t disk_removable_show(struct device *dev,
1004 				   struct device_attribute *attr, char *buf)
1005 {
1006 	struct gendisk *disk = dev_to_disk(dev);
1007 
1008 	return sprintf(buf, "%d\n",
1009 		       (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
1010 }
1011 
1012 static ssize_t disk_ro_show(struct device *dev,
1013 				   struct device_attribute *attr, char *buf)
1014 {
1015 	struct gendisk *disk = dev_to_disk(dev);
1016 
1017 	return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
1018 }
1019 
1020 static ssize_t disk_capability_show(struct device *dev,
1021 				    struct device_attribute *attr, char *buf)
1022 {
1023 	struct gendisk *disk = dev_to_disk(dev);
1024 
1025 	return sprintf(buf, "%x\n", disk->flags);
1026 }
1027 
1028 static ssize_t disk_alignment_offset_show(struct device *dev,
1029 					  struct device_attribute *attr,
1030 					  char *buf)
1031 {
1032 	struct gendisk *disk = dev_to_disk(dev);
1033 
1034 	return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
1035 }
1036 
1037 static ssize_t disk_discard_alignment_show(struct device *dev,
1038 					   struct device_attribute *attr,
1039 					   char *buf)
1040 {
1041 	struct gendisk *disk = dev_to_disk(dev);
1042 
1043 	return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
1044 }
1045 
1046 static DEVICE_ATTR(alias, S_IRUGO|S_IWUSR, alias_show, alias_store);
1047 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
1048 static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
1049 static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
1050 static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
1051 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
1052 static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
1053 static DEVICE_ATTR(discard_alignment, S_IRUGO, disk_discard_alignment_show,
1054 		   NULL);
1055 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
1056 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
1057 static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
1058 #ifdef CONFIG_FAIL_MAKE_REQUEST
1059 static struct device_attribute dev_attr_fail =
1060 	__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
1061 #endif
1062 #ifdef CONFIG_FAIL_IO_TIMEOUT
1063 static struct device_attribute dev_attr_fail_timeout =
1064 	__ATTR(io-timeout-fail,  S_IRUGO|S_IWUSR, part_timeout_show,
1065 		part_timeout_store);
1066 #endif
1067 
1068 static struct attribute *disk_attrs[] = {
1069 	&dev_attr_alias.attr,
1070 	&dev_attr_range.attr,
1071 	&dev_attr_ext_range.attr,
1072 	&dev_attr_removable.attr,
1073 	&dev_attr_ro.attr,
1074 	&dev_attr_size.attr,
1075 	&dev_attr_alignment_offset.attr,
1076 	&dev_attr_discard_alignment.attr,
1077 	&dev_attr_capability.attr,
1078 	&dev_attr_stat.attr,
1079 	&dev_attr_inflight.attr,
1080 #ifdef CONFIG_FAIL_MAKE_REQUEST
1081 	&dev_attr_fail.attr,
1082 #endif
1083 #ifdef CONFIG_FAIL_IO_TIMEOUT
1084 	&dev_attr_fail_timeout.attr,
1085 #endif
1086 	NULL
1087 };
1088 
1089 static struct attribute_group disk_attr_group = {
1090 	.attrs = disk_attrs,
1091 };
1092 
1093 static const struct attribute_group *disk_attr_groups[] = {
1094 	&disk_attr_group,
1095 	NULL
1096 };
1097 
1098 /**
1099  * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
1100  * @disk: disk to replace part_tbl for
1101  * @new_ptbl: new part_tbl to install
1102  *
1103  * Replace disk->part_tbl with @new_ptbl in RCU-safe way.  The
1104  * original ptbl is freed using RCU callback.
1105  *
1106  * LOCKING:
1107  * Matching bd_mutx locked.
1108  */
1109 static void disk_replace_part_tbl(struct gendisk *disk,
1110 				  struct disk_part_tbl *new_ptbl)
1111 {
1112 	struct disk_part_tbl *old_ptbl = disk->part_tbl;
1113 
1114 	rcu_assign_pointer(disk->part_tbl, new_ptbl);
1115 
1116 	if (old_ptbl) {
1117 		rcu_assign_pointer(old_ptbl->last_lookup, NULL);
1118 		kfree_rcu(old_ptbl, rcu_head);
1119 	}
1120 }
1121 
1122 /**
1123  * disk_expand_part_tbl - expand disk->part_tbl
1124  * @disk: disk to expand part_tbl for
1125  * @partno: expand such that this partno can fit in
1126  *
1127  * Expand disk->part_tbl such that @partno can fit in.  disk->part_tbl
1128  * uses RCU to allow unlocked dereferencing for stats and other stuff.
1129  *
1130  * LOCKING:
1131  * Matching bd_mutex locked, might sleep.
1132  *
1133  * RETURNS:
1134  * 0 on success, -errno on failure.
1135  */
1136 int disk_expand_part_tbl(struct gendisk *disk, int partno)
1137 {
1138 	struct disk_part_tbl *old_ptbl = disk->part_tbl;
1139 	struct disk_part_tbl *new_ptbl;
1140 	int len = old_ptbl ? old_ptbl->len : 0;
1141 	int target = partno + 1;
1142 	size_t size;
1143 	int i;
1144 
1145 	/* disk_max_parts() is zero during initialization, ignore if so */
1146 	if (disk_max_parts(disk) && target > disk_max_parts(disk))
1147 		return -EINVAL;
1148 
1149 	if (target <= len)
1150 		return 0;
1151 
1152 	size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]);
1153 	new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id);
1154 	if (!new_ptbl)
1155 		return -ENOMEM;
1156 
1157 	new_ptbl->len = target;
1158 
1159 	for (i = 0; i < len; i++)
1160 		rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
1161 
1162 	disk_replace_part_tbl(disk, new_ptbl);
1163 	return 0;
1164 }
1165 
1166 static void disk_release(struct device *dev)
1167 {
1168 	struct gendisk *disk = dev_to_disk(dev);
1169 
1170 	disk_release_events(disk);
1171 	kfree(disk->random);
1172 	disk_replace_part_tbl(disk, NULL);
1173 	free_part_stats(&disk->part0);
1174 	free_part_info(&disk->part0);
1175 	if (disk->queue)
1176 		blk_put_queue(disk->queue);
1177 	kfree(disk);
1178 }
1179 struct class block_class = {
1180 	.name		= "block",
1181 };
1182 
1183 static char *block_devnode(struct device *dev, mode_t *mode)
1184 {
1185 	struct gendisk *disk = dev_to_disk(dev);
1186 
1187 	if (disk->devnode)
1188 		return disk->devnode(disk, mode);
1189 	return NULL;
1190 }
1191 
1192 static struct device_type disk_type = {
1193 	.name		= "disk",
1194 	.groups		= disk_attr_groups,
1195 	.release	= disk_release,
1196 	.devnode	= block_devnode,
1197 };
1198 
1199 #ifdef CONFIG_PROC_FS
1200 /*
1201  * aggregate disk stat collector.  Uses the same stats that the sysfs
1202  * entries do, above, but makes them available through one seq_file.
1203  *
1204  * The output looks suspiciously like /proc/partitions with a bunch of
1205  * extra fields.
1206  */
1207 static int diskstats_show(struct seq_file *seqf, void *v)
1208 {
1209 	struct gendisk *gp = v;
1210 	struct disk_part_iter piter;
1211 	struct hd_struct *hd;
1212 	char buf[BDEVNAME_SIZE];
1213 	int cpu;
1214 
1215 	/*
1216 	if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
1217 		seq_puts(seqf,	"major minor name"
1218 				"     rio rmerge rsect ruse wio wmerge "
1219 				"wsect wuse running use aveq"
1220 				"\n\n");
1221 	*/
1222 
1223 	disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
1224 	while ((hd = disk_part_iter_next(&piter))) {
1225 		cpu = part_stat_lock();
1226 		part_round_stats(cpu, hd);
1227 		part_stat_unlock();
1228 		seq_printf(seqf, "%4d %7d %s %lu %lu %lu "
1229 			   "%u %lu %lu %lu %u %u %u %u\n",
1230 			   MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
1231 			   disk_name(gp, hd->partno, buf),
1232 			   part_stat_read(hd, ios[READ]),
1233 			   part_stat_read(hd, merges[READ]),
1234 			   part_stat_read(hd, sectors[READ]),
1235 			   jiffies_to_msecs(part_stat_read(hd, ticks[READ])),
1236 			   part_stat_read(hd, ios[WRITE]),
1237 			   part_stat_read(hd, merges[WRITE]),
1238 			   part_stat_read(hd, sectors[WRITE]),
1239 			   jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])),
1240 			   part_in_flight(hd),
1241 			   jiffies_to_msecs(part_stat_read(hd, io_ticks)),
1242 			   jiffies_to_msecs(part_stat_read(hd, time_in_queue))
1243 			);
1244 	}
1245 	disk_part_iter_exit(&piter);
1246 
1247 	return 0;
1248 }
1249 
1250 static const struct seq_operations diskstats_op = {
1251 	.start	= disk_seqf_start,
1252 	.next	= disk_seqf_next,
1253 	.stop	= disk_seqf_stop,
1254 	.show	= diskstats_show
1255 };
1256 
1257 static int diskstats_open(struct inode *inode, struct file *file)
1258 {
1259 	return seq_open(file, &diskstats_op);
1260 }
1261 
1262 static const struct file_operations proc_diskstats_operations = {
1263 	.open		= diskstats_open,
1264 	.read		= seq_read,
1265 	.llseek		= seq_lseek,
1266 	.release	= seq_release,
1267 };
1268 
1269 static int __init proc_genhd_init(void)
1270 {
1271 	proc_create("diskstats", 0, NULL, &proc_diskstats_operations);
1272 	proc_create("partitions", 0, NULL, &proc_partitions_operations);
1273 	return 0;
1274 }
1275 module_init(proc_genhd_init);
1276 #endif /* CONFIG_PROC_FS */
1277 
1278 dev_t blk_lookup_devt(const char *name, int partno)
1279 {
1280 	dev_t devt = MKDEV(0, 0);
1281 	struct class_dev_iter iter;
1282 	struct device *dev;
1283 
1284 	class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
1285 	while ((dev = class_dev_iter_next(&iter))) {
1286 		struct gendisk *disk = dev_to_disk(dev);
1287 		struct hd_struct *part;
1288 
1289 		if (strcmp(dev_name(dev), name))
1290 			continue;
1291 
1292 		if (partno < disk->minors) {
1293 			/* We need to return the right devno, even
1294 			 * if the partition doesn't exist yet.
1295 			 */
1296 			devt = MKDEV(MAJOR(dev->devt),
1297 				     MINOR(dev->devt) + partno);
1298 			break;
1299 		}
1300 		part = disk_get_part(disk, partno);
1301 		if (part) {
1302 			devt = part_devt(part);
1303 			disk_put_part(part);
1304 			break;
1305 		}
1306 		disk_put_part(part);
1307 	}
1308 	class_dev_iter_exit(&iter);
1309 	return devt;
1310 }
1311 EXPORT_SYMBOL(blk_lookup_devt);
1312 
1313 struct gendisk *alloc_disk(int minors)
1314 {
1315 	return alloc_disk_node(minors, -1);
1316 }
1317 EXPORT_SYMBOL(alloc_disk);
1318 
1319 struct gendisk *alloc_disk_node(int minors, int node_id)
1320 {
1321 	struct gendisk *disk;
1322 
1323 	disk = kmalloc_node(sizeof(struct gendisk),
1324 				GFP_KERNEL | __GFP_ZERO, node_id);
1325 	if (disk) {
1326 		if (!init_part_stats(&disk->part0)) {
1327 			kfree(disk);
1328 			return NULL;
1329 		}
1330 		disk->node_id = node_id;
1331 		if (disk_expand_part_tbl(disk, 0)) {
1332 			free_part_stats(&disk->part0);
1333 			kfree(disk);
1334 			return NULL;
1335 		}
1336 		disk->part_tbl->part[0] = &disk->part0;
1337 
1338 		hd_ref_init(&disk->part0);
1339 
1340 		disk->minors = minors;
1341 		rand_initialize_disk(disk);
1342 		disk_to_dev(disk)->class = &block_class;
1343 		disk_to_dev(disk)->type = &disk_type;
1344 		device_initialize(disk_to_dev(disk));
1345 	}
1346 	return disk;
1347 }
1348 EXPORT_SYMBOL(alloc_disk_node);
1349 
1350 struct kobject *get_disk(struct gendisk *disk)
1351 {
1352 	struct module *owner;
1353 	struct kobject *kobj;
1354 
1355 	if (!disk->fops)
1356 		return NULL;
1357 	owner = disk->fops->owner;
1358 	if (owner && !try_module_get(owner))
1359 		return NULL;
1360 	kobj = kobject_get(&disk_to_dev(disk)->kobj);
1361 	if (kobj == NULL) {
1362 		module_put(owner);
1363 		return NULL;
1364 	}
1365 	return kobj;
1366 
1367 }
1368 
1369 EXPORT_SYMBOL(get_disk);
1370 
1371 void put_disk(struct gendisk *disk)
1372 {
1373 	if (disk)
1374 		kobject_put(&disk_to_dev(disk)->kobj);
1375 }
1376 
1377 EXPORT_SYMBOL(put_disk);
1378 
1379 static void set_disk_ro_uevent(struct gendisk *gd, int ro)
1380 {
1381 	char event[] = "DISK_RO=1";
1382 	char *envp[] = { event, NULL };
1383 
1384 	if (!ro)
1385 		event[8] = '0';
1386 	kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
1387 }
1388 
1389 void set_device_ro(struct block_device *bdev, int flag)
1390 {
1391 	bdev->bd_part->policy = flag;
1392 }
1393 
1394 EXPORT_SYMBOL(set_device_ro);
1395 
1396 void set_disk_ro(struct gendisk *disk, int flag)
1397 {
1398 	struct disk_part_iter piter;
1399 	struct hd_struct *part;
1400 
1401 	if (disk->part0.policy != flag) {
1402 		set_disk_ro_uevent(disk, flag);
1403 		disk->part0.policy = flag;
1404 	}
1405 
1406 	disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
1407 	while ((part = disk_part_iter_next(&piter)))
1408 		part->policy = flag;
1409 	disk_part_iter_exit(&piter);
1410 }
1411 
1412 EXPORT_SYMBOL(set_disk_ro);
1413 
1414 int bdev_read_only(struct block_device *bdev)
1415 {
1416 	if (!bdev)
1417 		return 0;
1418 	return bdev->bd_part->policy;
1419 }
1420 
1421 EXPORT_SYMBOL(bdev_read_only);
1422 
1423 int invalidate_partition(struct gendisk *disk, int partno)
1424 {
1425 	int res = 0;
1426 	struct block_device *bdev = bdget_disk(disk, partno);
1427 	if (bdev) {
1428 		fsync_bdev(bdev);
1429 		res = __invalidate_device(bdev, true);
1430 		bdput(bdev);
1431 	}
1432 	return res;
1433 }
1434 
1435 EXPORT_SYMBOL(invalidate_partition);
1436 
1437 /*
1438  * Disk events - monitor disk events like media change and eject request.
1439  */
1440 struct disk_events {
1441 	struct list_head	node;		/* all disk_event's */
1442 	struct gendisk		*disk;		/* the associated disk */
1443 	spinlock_t		lock;
1444 
1445 	struct mutex		block_mutex;	/* protects blocking */
1446 	int			block;		/* event blocking depth */
1447 	unsigned int		pending;	/* events already sent out */
1448 	unsigned int		clearing;	/* events being cleared */
1449 
1450 	long			poll_msecs;	/* interval, -1 for default */
1451 	struct delayed_work	dwork;
1452 };
1453 
1454 static const char *disk_events_strs[] = {
1455 	[ilog2(DISK_EVENT_MEDIA_CHANGE)]	= "media_change",
1456 	[ilog2(DISK_EVENT_EJECT_REQUEST)]	= "eject_request",
1457 };
1458 
1459 static char *disk_uevents[] = {
1460 	[ilog2(DISK_EVENT_MEDIA_CHANGE)]	= "DISK_MEDIA_CHANGE=1",
1461 	[ilog2(DISK_EVENT_EJECT_REQUEST)]	= "DISK_EJECT_REQUEST=1",
1462 };
1463 
1464 /* list of all disk_events */
1465 static DEFINE_MUTEX(disk_events_mutex);
1466 static LIST_HEAD(disk_events);
1467 
1468 /* disable in-kernel polling by default */
1469 static unsigned long disk_events_dfl_poll_msecs	= 0;
1470 
1471 static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
1472 {
1473 	struct disk_events *ev = disk->ev;
1474 	long intv_msecs = 0;
1475 
1476 	/*
1477 	 * If device-specific poll interval is set, always use it.  If
1478 	 * the default is being used, poll iff there are events which
1479 	 * can't be monitored asynchronously.
1480 	 */
1481 	if (ev->poll_msecs >= 0)
1482 		intv_msecs = ev->poll_msecs;
1483 	else if (disk->events & ~disk->async_events)
1484 		intv_msecs = disk_events_dfl_poll_msecs;
1485 
1486 	return msecs_to_jiffies(intv_msecs);
1487 }
1488 
1489 /**
1490  * disk_block_events - block and flush disk event checking
1491  * @disk: disk to block events for
1492  *
1493  * On return from this function, it is guaranteed that event checking
1494  * isn't in progress and won't happen until unblocked by
1495  * disk_unblock_events().  Events blocking is counted and the actual
1496  * unblocking happens after the matching number of unblocks are done.
1497  *
1498  * Note that this intentionally does not block event checking from
1499  * disk_clear_events().
1500  *
1501  * CONTEXT:
1502  * Might sleep.
1503  */
1504 void disk_block_events(struct gendisk *disk)
1505 {
1506 	struct disk_events *ev = disk->ev;
1507 	unsigned long flags;
1508 	bool cancel;
1509 
1510 	if (!ev)
1511 		return;
1512 
1513 	/*
1514 	 * Outer mutex ensures that the first blocker completes canceling
1515 	 * the event work before further blockers are allowed to finish.
1516 	 */
1517 	mutex_lock(&ev->block_mutex);
1518 
1519 	spin_lock_irqsave(&ev->lock, flags);
1520 	cancel = !ev->block++;
1521 	spin_unlock_irqrestore(&ev->lock, flags);
1522 
1523 	if (cancel)
1524 		cancel_delayed_work_sync(&disk->ev->dwork);
1525 
1526 	mutex_unlock(&ev->block_mutex);
1527 }
1528 
1529 static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1530 {
1531 	struct disk_events *ev = disk->ev;
1532 	unsigned long intv;
1533 	unsigned long flags;
1534 
1535 	spin_lock_irqsave(&ev->lock, flags);
1536 
1537 	if (WARN_ON_ONCE(ev->block <= 0))
1538 		goto out_unlock;
1539 
1540 	if (--ev->block)
1541 		goto out_unlock;
1542 
1543 	/*
1544 	 * Not exactly a latency critical operation, set poll timer
1545 	 * slack to 25% and kick event check.
1546 	 */
1547 	intv = disk_events_poll_jiffies(disk);
1548 	set_timer_slack(&ev->dwork.timer, intv / 4);
1549 	if (check_now)
1550 		queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1551 	else if (intv)
1552 		queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
1553 out_unlock:
1554 	spin_unlock_irqrestore(&ev->lock, flags);
1555 }
1556 
1557 /**
1558  * disk_unblock_events - unblock disk event checking
1559  * @disk: disk to unblock events for
1560  *
1561  * Undo disk_block_events().  When the block count reaches zero, it
1562  * starts events polling if configured.
1563  *
1564  * CONTEXT:
1565  * Don't care.  Safe to call from irq context.
1566  */
1567 void disk_unblock_events(struct gendisk *disk)
1568 {
1569 	if (disk->ev)
1570 		__disk_unblock_events(disk, false);
1571 }
1572 
1573 /**
1574  * disk_flush_events - schedule immediate event checking and flushing
1575  * @disk: disk to check and flush events for
1576  * @mask: events to flush
1577  *
1578  * Schedule immediate event checking on @disk if not blocked.  Events in
1579  * @mask are scheduled to be cleared from the driver.  Note that this
1580  * doesn't clear the events from @disk->ev.
1581  *
1582  * CONTEXT:
1583  * If @mask is non-zero must be called with bdev->bd_mutex held.
1584  */
1585 void disk_flush_events(struct gendisk *disk, unsigned int mask)
1586 {
1587 	struct disk_events *ev = disk->ev;
1588 
1589 	if (!ev)
1590 		return;
1591 
1592 	spin_lock_irq(&ev->lock);
1593 	ev->clearing |= mask;
1594 	if (!ev->block) {
1595 		cancel_delayed_work(&ev->dwork);
1596 		queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1597 	}
1598 	spin_unlock_irq(&ev->lock);
1599 }
1600 
1601 /**
1602  * disk_clear_events - synchronously check, clear and return pending events
1603  * @disk: disk to fetch and clear events from
1604  * @mask: mask of events to be fetched and clearted
1605  *
1606  * Disk events are synchronously checked and pending events in @mask
1607  * are cleared and returned.  This ignores the block count.
1608  *
1609  * CONTEXT:
1610  * Might sleep.
1611  */
1612 unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1613 {
1614 	const struct block_device_operations *bdops = disk->fops;
1615 	struct disk_events *ev = disk->ev;
1616 	unsigned int pending;
1617 
1618 	if (!ev) {
1619 		/* for drivers still using the old ->media_changed method */
1620 		if ((mask & DISK_EVENT_MEDIA_CHANGE) &&
1621 		    bdops->media_changed && bdops->media_changed(disk))
1622 			return DISK_EVENT_MEDIA_CHANGE;
1623 		return 0;
1624 	}
1625 
1626 	/* tell the workfn about the events being cleared */
1627 	spin_lock_irq(&ev->lock);
1628 	ev->clearing |= mask;
1629 	spin_unlock_irq(&ev->lock);
1630 
1631 	/* uncondtionally schedule event check and wait for it to finish */
1632 	disk_block_events(disk);
1633 	queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1634 	flush_delayed_work(&ev->dwork);
1635 	__disk_unblock_events(disk, false);
1636 
1637 	/* then, fetch and clear pending events */
1638 	spin_lock_irq(&ev->lock);
1639 	WARN_ON_ONCE(ev->clearing & mask);	/* cleared by workfn */
1640 	pending = ev->pending & mask;
1641 	ev->pending &= ~mask;
1642 	spin_unlock_irq(&ev->lock);
1643 
1644 	return pending;
1645 }
1646 
1647 static void disk_events_workfn(struct work_struct *work)
1648 {
1649 	struct delayed_work *dwork = to_delayed_work(work);
1650 	struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
1651 	struct gendisk *disk = ev->disk;
1652 	char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
1653 	unsigned int clearing = ev->clearing;
1654 	unsigned int events;
1655 	unsigned long intv;
1656 	int nr_events = 0, i;
1657 
1658 	/* check events */
1659 	events = disk->fops->check_events(disk, clearing);
1660 
1661 	/* accumulate pending events and schedule next poll if necessary */
1662 	spin_lock_irq(&ev->lock);
1663 
1664 	events &= ~ev->pending;
1665 	ev->pending |= events;
1666 	ev->clearing &= ~clearing;
1667 
1668 	intv = disk_events_poll_jiffies(disk);
1669 	if (!ev->block && intv)
1670 		queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
1671 
1672 	spin_unlock_irq(&ev->lock);
1673 
1674 	/*
1675 	 * Tell userland about new events.  Only the events listed in
1676 	 * @disk->events are reported.  Unlisted events are processed the
1677 	 * same internally but never get reported to userland.
1678 	 */
1679 	for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
1680 		if (events & disk->events & (1 << i))
1681 			envp[nr_events++] = disk_uevents[i];
1682 
1683 	if (nr_events)
1684 		kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
1685 }
1686 
1687 /*
1688  * A disk events enabled device has the following sysfs nodes under
1689  * its /sys/block/X/ directory.
1690  *
1691  * events		: list of all supported events
1692  * events_async		: list of events which can be detected w/o polling
1693  * events_poll_msecs	: polling interval, 0: disable, -1: system default
1694  */
1695 static ssize_t __disk_events_show(unsigned int events, char *buf)
1696 {
1697 	const char *delim = "";
1698 	ssize_t pos = 0;
1699 	int i;
1700 
1701 	for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
1702 		if (events & (1 << i)) {
1703 			pos += sprintf(buf + pos, "%s%s",
1704 				       delim, disk_events_strs[i]);
1705 			delim = " ";
1706 		}
1707 	if (pos)
1708 		pos += sprintf(buf + pos, "\n");
1709 	return pos;
1710 }
1711 
1712 static ssize_t disk_events_show(struct device *dev,
1713 				struct device_attribute *attr, char *buf)
1714 {
1715 	struct gendisk *disk = dev_to_disk(dev);
1716 
1717 	return __disk_events_show(disk->events, buf);
1718 }
1719 
1720 static ssize_t disk_events_async_show(struct device *dev,
1721 				      struct device_attribute *attr, char *buf)
1722 {
1723 	struct gendisk *disk = dev_to_disk(dev);
1724 
1725 	return __disk_events_show(disk->async_events, buf);
1726 }
1727 
1728 static ssize_t disk_events_poll_msecs_show(struct device *dev,
1729 					   struct device_attribute *attr,
1730 					   char *buf)
1731 {
1732 	struct gendisk *disk = dev_to_disk(dev);
1733 
1734 	return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
1735 }
1736 
1737 static ssize_t disk_events_poll_msecs_store(struct device *dev,
1738 					    struct device_attribute *attr,
1739 					    const char *buf, size_t count)
1740 {
1741 	struct gendisk *disk = dev_to_disk(dev);
1742 	long intv;
1743 
1744 	if (!count || !sscanf(buf, "%ld", &intv))
1745 		return -EINVAL;
1746 
1747 	if (intv < 0 && intv != -1)
1748 		return -EINVAL;
1749 
1750 	disk_block_events(disk);
1751 	disk->ev->poll_msecs = intv;
1752 	__disk_unblock_events(disk, true);
1753 
1754 	return count;
1755 }
1756 
1757 static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL);
1758 static const DEVICE_ATTR(events_async, S_IRUGO, disk_events_async_show, NULL);
1759 static const DEVICE_ATTR(events_poll_msecs, S_IRUGO|S_IWUSR,
1760 			 disk_events_poll_msecs_show,
1761 			 disk_events_poll_msecs_store);
1762 
1763 static const struct attribute *disk_events_attrs[] = {
1764 	&dev_attr_events.attr,
1765 	&dev_attr_events_async.attr,
1766 	&dev_attr_events_poll_msecs.attr,
1767 	NULL,
1768 };
1769 
1770 /*
1771  * The default polling interval can be specified by the kernel
1772  * parameter block.events_dfl_poll_msecs which defaults to 0
1773  * (disable).  This can also be modified runtime by writing to
1774  * /sys/module/block/events_dfl_poll_msecs.
1775  */
1776 static int disk_events_set_dfl_poll_msecs(const char *val,
1777 					  const struct kernel_param *kp)
1778 {
1779 	struct disk_events *ev;
1780 	int ret;
1781 
1782 	ret = param_set_ulong(val, kp);
1783 	if (ret < 0)
1784 		return ret;
1785 
1786 	mutex_lock(&disk_events_mutex);
1787 
1788 	list_for_each_entry(ev, &disk_events, node)
1789 		disk_flush_events(ev->disk, 0);
1790 
1791 	mutex_unlock(&disk_events_mutex);
1792 
1793 	return 0;
1794 }
1795 
1796 static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
1797 	.set	= disk_events_set_dfl_poll_msecs,
1798 	.get	= param_get_ulong,
1799 };
1800 
1801 #undef MODULE_PARAM_PREFIX
1802 #define MODULE_PARAM_PREFIX	"block."
1803 
1804 module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
1805 		&disk_events_dfl_poll_msecs, 0644);
1806 
1807 /*
1808  * disk_{add|del|release}_events - initialize and destroy disk_events.
1809  */
1810 static void disk_add_events(struct gendisk *disk)
1811 {
1812 	struct disk_events *ev;
1813 
1814 	if (!disk->fops->check_events)
1815 		return;
1816 
1817 	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1818 	if (!ev) {
1819 		pr_warn("%s: failed to initialize events\n", disk->disk_name);
1820 		return;
1821 	}
1822 
1823 	if (sysfs_create_files(&disk_to_dev(disk)->kobj,
1824 			       disk_events_attrs) < 0) {
1825 		pr_warn("%s: failed to create sysfs files for events\n",
1826 			disk->disk_name);
1827 		kfree(ev);
1828 		return;
1829 	}
1830 
1831 	disk->ev = ev;
1832 
1833 	INIT_LIST_HEAD(&ev->node);
1834 	ev->disk = disk;
1835 	spin_lock_init(&ev->lock);
1836 	mutex_init(&ev->block_mutex);
1837 	ev->block = 1;
1838 	ev->poll_msecs = -1;
1839 	INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
1840 
1841 	mutex_lock(&disk_events_mutex);
1842 	list_add_tail(&ev->node, &disk_events);
1843 	mutex_unlock(&disk_events_mutex);
1844 
1845 	/*
1846 	 * Block count is initialized to 1 and the following initial
1847 	 * unblock kicks it into action.
1848 	 */
1849 	__disk_unblock_events(disk, true);
1850 }
1851 
1852 static void disk_del_events(struct gendisk *disk)
1853 {
1854 	if (!disk->ev)
1855 		return;
1856 
1857 	disk_block_events(disk);
1858 
1859 	mutex_lock(&disk_events_mutex);
1860 	list_del_init(&disk->ev->node);
1861 	mutex_unlock(&disk_events_mutex);
1862 
1863 	sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
1864 }
1865 
1866 static void disk_release_events(struct gendisk *disk)
1867 {
1868 	/* the block count should be 1 from disk_del_events() */
1869 	WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
1870 	kfree(disk->ev);
1871 }
1872