Lines Matching refs:bdev

38 	struct block_device bdev;  member
47 static inline struct inode *BD_INODE(struct block_device *bdev) in BD_INODE() argument
49 return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode; in BD_INODE()
54 return &BDEV_I(inode)->bdev; in I_BDEV()
64 static void bdev_write_inode(struct block_device *bdev) in bdev_write_inode() argument
66 struct inode *inode = BD_INODE(bdev); in bdev_write_inode()
76 bdev, ret); in bdev_write_inode()
83 static void kill_bdev(struct block_device *bdev) in kill_bdev() argument
85 struct address_space *mapping = bdev->bd_mapping; in kill_bdev()
95 void invalidate_bdev(struct block_device *bdev) in invalidate_bdev() argument
97 struct address_space *mapping = bdev->bd_mapping; in invalidate_bdev()
111 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, in truncate_bdev_range() argument
120 int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL); in truncate_bdev_range()
125 truncate_inode_pages_range(bdev->bd_mapping, lstart, lend); in truncate_bdev_range()
127 bd_abort_claiming(bdev, truncate_bdev_range); in truncate_bdev_range()
135 return invalidate_inode_pages2_range(bdev->bd_mapping, in truncate_bdev_range()
140 static void set_init_blocksize(struct block_device *bdev) in set_init_blocksize() argument
142 unsigned int bsize = bdev_logical_block_size(bdev); in set_init_blocksize()
143 loff_t size = i_size_read(BD_INODE(bdev)); in set_init_blocksize()
150 BD_INODE(bdev)->i_blkbits = blksize_bits(bsize); in set_init_blocksize()
156 struct block_device *bdev = I_BDEV(inode); in set_blocksize() local
162 if (size < bdev_logical_block_size(bdev)) in set_blocksize()
170 sync_blockdev(bdev); in set_blocksize()
172 kill_bdev(bdev); in set_blocksize()
202 int sync_blockdev_nowait(struct block_device *bdev) in sync_blockdev_nowait() argument
204 if (!bdev) in sync_blockdev_nowait()
206 return filemap_flush(bdev->bd_mapping); in sync_blockdev_nowait()
214 int sync_blockdev(struct block_device *bdev) in sync_blockdev() argument
216 if (!bdev) in sync_blockdev()
218 return filemap_write_and_wait(bdev->bd_mapping); in sync_blockdev()
222 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) in sync_blockdev_range() argument
224 return filemap_write_and_wait_range(bdev->bd_mapping, in sync_blockdev_range()
243 int bdev_freeze(struct block_device *bdev) in bdev_freeze() argument
247 mutex_lock(&bdev->bd_fsfreeze_mutex); in bdev_freeze()
249 if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) { in bdev_freeze()
250 mutex_unlock(&bdev->bd_fsfreeze_mutex); in bdev_freeze()
254 mutex_lock(&bdev->bd_holder_lock); in bdev_freeze()
255 if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) { in bdev_freeze()
256 error = bdev->bd_holder_ops->freeze(bdev); in bdev_freeze()
257 lockdep_assert_not_held(&bdev->bd_holder_lock); in bdev_freeze()
259 mutex_unlock(&bdev->bd_holder_lock); in bdev_freeze()
260 error = sync_blockdev(bdev); in bdev_freeze()
264 atomic_dec(&bdev->bd_fsfreeze_count); in bdev_freeze()
266 mutex_unlock(&bdev->bd_fsfreeze_mutex); in bdev_freeze()
279 int bdev_thaw(struct block_device *bdev) in bdev_thaw() argument
283 mutex_lock(&bdev->bd_fsfreeze_mutex); in bdev_thaw()
289 nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count); in bdev_thaw()
297 mutex_lock(&bdev->bd_holder_lock); in bdev_thaw()
298 if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) { in bdev_thaw()
299 error = bdev->bd_holder_ops->thaw(bdev); in bdev_thaw()
300 lockdep_assert_not_held(&bdev->bd_holder_lock); in bdev_thaw()
302 mutex_unlock(&bdev->bd_holder_lock); in bdev_thaw()
306 atomic_inc(&bdev->bd_fsfreeze_count); in bdev_thaw()
308 mutex_unlock(&bdev->bd_fsfreeze_mutex); in bdev_thaw()
326 memset(&ei->bdev, 0, sizeof(ei->bdev)); in bdev_alloc_inode()
328 if (security_bdev_alloc(&ei->bdev)) { in bdev_alloc_inode()
337 struct block_device *bdev = I_BDEV(inode); in bdev_free_inode() local
339 free_percpu(bdev->bd_stats); in bdev_free_inode()
340 kfree(bdev->bd_meta_info); in bdev_free_inode()
341 security_bdev_free(bdev); in bdev_free_inode()
343 if (!bdev_is_partition(bdev)) { in bdev_free_inode()
344 if (bdev->bd_disk && bdev->bd_disk->bdi) in bdev_free_inode()
345 bdi_put(bdev->bd_disk->bdi); in bdev_free_inode()
346 kfree(bdev->bd_disk); in bdev_free_inode()
349 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) in bdev_free_inode()
350 blk_free_ext_minor(MINOR(bdev->bd_dev)); in bdev_free_inode()
416 struct block_device *bdev; in bdev_alloc() local
427 bdev = I_BDEV(inode); in bdev_alloc()
428 mutex_init(&bdev->bd_fsfreeze_mutex); in bdev_alloc()
429 spin_lock_init(&bdev->bd_size_lock); in bdev_alloc()
430 mutex_init(&bdev->bd_holder_lock); in bdev_alloc()
431 atomic_set(&bdev->__bd_flags, partno); in bdev_alloc()
432 bdev->bd_mapping = &inode->i_data; in bdev_alloc()
433 bdev->bd_queue = disk->queue; in bdev_alloc()
435 bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO); in bdev_alloc()
436 bdev->bd_stats = alloc_percpu(struct disk_stats); in bdev_alloc()
437 if (!bdev->bd_stats) { in bdev_alloc()
441 bdev->bd_disk = disk; in bdev_alloc()
442 return bdev; in bdev_alloc()
445 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) in bdev_set_nr_sectors() argument
447 spin_lock(&bdev->bd_size_lock); in bdev_set_nr_sectors()
448 i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT); in bdev_set_nr_sectors()
449 bdev->bd_nr_sectors = sectors; in bdev_set_nr_sectors()
450 spin_unlock(&bdev->bd_size_lock); in bdev_set_nr_sectors()
453 void bdev_add(struct block_device *bdev, dev_t dev) in bdev_add() argument
455 struct inode *inode = BD_INODE(bdev); in bdev_add()
456 if (bdev_stable_writes(bdev)) in bdev_add()
457 mapping_set_stable_writes(bdev->bd_mapping); in bdev_add()
458 bdev->bd_dev = dev; in bdev_add()
464 void bdev_unhash(struct block_device *bdev) in bdev_unhash() argument
466 remove_inode_hash(BD_INODE(bdev)); in bdev_unhash()
469 void bdev_drop(struct block_device *bdev) in bdev_drop() argument
471 iput(BD_INODE(bdev)); in bdev_drop()
498 static bool bd_may_claim(struct block_device *bdev, void *holder, in bd_may_claim() argument
501 struct block_device *whole = bdev_whole(bdev); in bd_may_claim()
505 if (bdev->bd_holder) { in bd_may_claim()
509 if (bdev->bd_holder == holder) { in bd_may_claim()
510 if (WARN_ON_ONCE(bdev->bd_holder_ops != hops)) in bd_may_claim()
521 if (whole != bdev && in bd_may_claim()
540 int bd_prepare_to_claim(struct block_device *bdev, void *holder, in bd_prepare_to_claim() argument
543 struct block_device *whole = bdev_whole(bdev); in bd_prepare_to_claim()
550 if (!bd_may_claim(bdev, holder, hops)) { in bd_prepare_to_claim()
592 static void bd_finish_claiming(struct block_device *bdev, void *holder, in bd_finish_claiming() argument
595 struct block_device *whole = bdev_whole(bdev); in bd_finish_claiming()
598 BUG_ON(!bd_may_claim(bdev, holder, hops)); in bd_finish_claiming()
605 bdev->bd_holders++; in bd_finish_claiming()
606 mutex_lock(&bdev->bd_holder_lock); in bd_finish_claiming()
607 bdev->bd_holder = holder; in bd_finish_claiming()
608 bdev->bd_holder_ops = hops; in bd_finish_claiming()
609 mutex_unlock(&bdev->bd_holder_lock); in bd_finish_claiming()
623 void bd_abort_claiming(struct block_device *bdev, void *holder) in bd_abort_claiming() argument
626 bd_clear_claiming(bdev_whole(bdev), holder); in bd_abort_claiming()
631 static void bd_end_claim(struct block_device *bdev, void *holder) in bd_end_claim() argument
633 struct block_device *whole = bdev_whole(bdev); in bd_end_claim()
641 WARN_ON_ONCE(bdev->bd_holder != holder); in bd_end_claim()
642 WARN_ON_ONCE(--bdev->bd_holders < 0); in bd_end_claim()
644 if (!bdev->bd_holders) { in bd_end_claim()
645 mutex_lock(&bdev->bd_holder_lock); in bd_end_claim()
646 bdev->bd_holder = NULL; in bd_end_claim()
647 bdev->bd_holder_ops = NULL; in bd_end_claim()
648 mutex_unlock(&bdev->bd_holder_lock); in bd_end_claim()
649 if (bdev_test_flag(bdev, BD_WRITE_HOLDER)) in bd_end_claim()
661 disk_unblock_events(bdev->bd_disk); in bd_end_claim()
662 bdev_clear_flag(bdev, BD_WRITE_HOLDER); in bd_end_claim()
666 static void blkdev_flush_mapping(struct block_device *bdev) in blkdev_flush_mapping() argument
668 WARN_ON_ONCE(bdev->bd_holders); in blkdev_flush_mapping()
669 sync_blockdev(bdev); in blkdev_flush_mapping()
670 kill_bdev(bdev); in blkdev_flush_mapping()
671 bdev_write_inode(bdev); in blkdev_flush_mapping()
674 static void blkdev_put_whole(struct block_device *bdev) in blkdev_put_whole() argument
676 if (atomic_dec_and_test(&bdev->bd_openers)) in blkdev_put_whole()
677 blkdev_flush_mapping(bdev); in blkdev_put_whole()
678 if (bdev->bd_disk->fops->release) in blkdev_put_whole()
679 bdev->bd_disk->fops->release(bdev->bd_disk); in blkdev_put_whole()
682 static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode) in blkdev_get_whole() argument
684 struct gendisk *disk = bdev->bd_disk; in blkdev_get_whole()
698 if (!atomic_read(&bdev->bd_openers)) in blkdev_get_whole()
699 set_init_blocksize(bdev); in blkdev_get_whole()
700 atomic_inc(&bdev->bd_openers); in blkdev_get_whole()
708 blkdev_put_whole(bdev); in blkdev_get_whole()
778 struct block_device *bdev; in blkdev_get_no_open() local
793 bdev = &BDEV_I(inode)->bdev; in blkdev_get_no_open()
794 if (!kobject_get_unless_zero(&bdev->bd_device.kobj)) in blkdev_get_no_open()
795 bdev = NULL; in blkdev_get_no_open()
797 return bdev; in blkdev_get_no_open()
800 void blkdev_put_no_open(struct block_device *bdev) in blkdev_put_no_open() argument
802 put_device(&bdev->bd_device); in blkdev_put_no_open()
805 static bool bdev_writes_blocked(struct block_device *bdev) in bdev_writes_blocked() argument
807 return bdev->bd_writers < 0; in bdev_writes_blocked()
810 static void bdev_block_writes(struct block_device *bdev) in bdev_block_writes() argument
812 bdev->bd_writers--; in bdev_block_writes()
815 static void bdev_unblock_writes(struct block_device *bdev) in bdev_unblock_writes() argument
817 bdev->bd_writers++; in bdev_unblock_writes()
820 static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode) in bdev_may_open() argument
825 if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev)) in bdev_may_open()
827 if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0) in bdev_may_open()
832 static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode) in bdev_claim_write_access() argument
839 bdev_block_writes(bdev); in bdev_claim_write_access()
841 bdev->bd_writers++; in bdev_claim_write_access()
851 struct block_device *bdev; in bdev_yield_write_access() local
859 bdev = file_bdev(bdev_file); in bdev_yield_write_access()
862 bdev_unblock_writes(bdev); in bdev_yield_write_access()
864 bdev->bd_writers--; in bdev_yield_write_access()
884 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, in bdev_open() argument
888 struct gendisk *disk = bdev->bd_disk; in bdev_open()
893 ret = bd_prepare_to_claim(bdev, holder, hops); in bdev_open()
910 if (!bdev_may_open(bdev, mode)) in bdev_open()
912 if (bdev_is_partition(bdev)) in bdev_open()
913 ret = blkdev_get_part(bdev, mode); in bdev_open()
915 ret = blkdev_get_whole(bdev, mode); in bdev_open()
918 bdev_claim_write_access(bdev, mode); in bdev_open()
920 bd_finish_claiming(bdev, holder, hops); in bdev_open()
930 !bdev_test_flag(bdev, BD_WRITE_HOLDER) && in bdev_open()
932 bdev_set_flag(bdev, BD_WRITE_HOLDER); in bdev_open()
943 if (bdev_nowait(bdev)) in bdev_open()
947 bdev_file->f_mapping = bdev->bd_mapping; in bdev_open()
956 bd_abort_claiming(bdev, holder); in bdev_open()
996 struct block_device *bdev; in bdev_file_open_by_dev() local
1004 bdev = blkdev_get_no_open(dev); in bdev_file_open_by_dev()
1005 if (!bdev) in bdev_file_open_by_dev()
1009 bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev), in bdev_file_open_by_dev()
1012 blkdev_put_no_open(bdev); in bdev_file_open_by_dev()
1015 ihold(BD_INODE(bdev)); in bdev_file_open_by_dev()
1017 ret = bdev_open(bdev, mode, holder, hops, bdev_file); in bdev_file_open_by_dev()
1054 struct block_device *bdev = file_bdev(bdev_file); in bd_yield_claim() local
1057 lockdep_assert_held(&bdev->bd_disk->open_mutex); in bd_yield_claim()
1063 bd_end_claim(bdev, holder); in bd_yield_claim()
1068 struct block_device *bdev = file_bdev(bdev_file); in bdev_release() local
1070 struct gendisk *disk = bdev->bd_disk; in bdev_release()
1083 if (atomic_read(&bdev->bd_openers) == 1) in bdev_release()
1084 sync_blockdev(bdev); in bdev_release()
1099 if (bdev_is_partition(bdev)) in bdev_release()
1100 blkdev_put_part(bdev); in bdev_release()
1102 blkdev_put_whole(bdev); in bdev_release()
1107 blkdev_put_no_open(bdev); in bdev_release()
1124 struct block_device *bdev = file_bdev(bdev_file); in bdev_fput() local
1125 struct gendisk *disk = bdev->bd_disk; in bdev_fput()
1196 void bdev_mark_dead(struct block_device *bdev, bool surprise) in bdev_mark_dead() argument
1198 mutex_lock(&bdev->bd_holder_lock); in bdev_mark_dead()
1199 if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead) in bdev_mark_dead()
1200 bdev->bd_holder_ops->mark_dead(bdev, surprise); in bdev_mark_dead()
1202 mutex_unlock(&bdev->bd_holder_lock); in bdev_mark_dead()
1203 sync_blockdev(bdev); in bdev_mark_dead()
1206 invalidate_bdev(bdev); in bdev_mark_dead()
1223 struct block_device *bdev; in sync_bdevs() local
1244 bdev = I_BDEV(inode); in sync_bdevs()
1246 mutex_lock(&bdev->bd_disk->open_mutex); in sync_bdevs()
1247 if (!atomic_read(&bdev->bd_openers)) { in sync_bdevs()
1260 mutex_unlock(&bdev->bd_disk->open_mutex); in sync_bdevs()
1275 struct block_device *bdev; in bdev_statx() local
1288 bdev = blkdev_get_no_open(backing_inode->i_rdev); in bdev_statx()
1289 if (!bdev) in bdev_statx()
1293 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; in bdev_statx()
1294 stat->dio_offset_align = bdev_logical_block_size(bdev); in bdev_statx()
1298 if (request_mask & STATX_WRITE_ATOMIC && bdev_can_atomic_write(bdev)) { in bdev_statx()
1299 struct request_queue *bd_queue = bdev->bd_queue; in bdev_statx()
1306 blkdev_put_no_open(bdev); in bdev_statx()
1315 unsigned int block_size(struct block_device *bdev) in block_size() argument
1317 return 1 << BD_INODE(bdev)->i_blkbits; in block_size()