Lines Matching full:md

309 int dm_deleting_md(struct mapped_device *md)  in dm_deleting_md()  argument
311 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
316 struct mapped_device *md; in dm_blk_open() local
320 md = disk->private_data; in dm_blk_open()
321 if (!md) in dm_blk_open()
324 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
325 dm_deleting_md(md)) { in dm_blk_open()
326 md = NULL; in dm_blk_open()
330 dm_get(md); in dm_blk_open()
331 atomic_inc(&md->open_count); in dm_blk_open()
335 return md ? 0 : -ENXIO; in dm_blk_open()
340 struct mapped_device *md; in dm_blk_close() local
344 md = disk->private_data; in dm_blk_close()
345 if (WARN_ON(!md)) in dm_blk_close()
348 if (atomic_dec_and_test(&md->open_count) && in dm_blk_close()
349 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) in dm_blk_close()
352 dm_put(md); in dm_blk_close()
357 int dm_open_count(struct mapped_device *md) in dm_open_count() argument
359 return atomic_read(&md->open_count); in dm_open_count()
365 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion() argument
371 if (dm_open_count(md)) { in dm_lock_for_deletion()
374 set_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_lock_for_deletion()
375 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) in dm_lock_for_deletion()
378 set_bit(DMF_DELETING, &md->flags); in dm_lock_for_deletion()
385 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove() argument
391 if (test_bit(DMF_DELETING, &md->flags)) in dm_cancel_deferred_remove()
394 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_cancel_deferred_remove()
408 struct mapped_device *md = disk->private_data; in dm_blk_getgeo() local
410 return dm_get_geometry(md, geo); in dm_blk_getgeo()
413 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, in dm_prepare_ioctl() argument
423 map = dm_get_live_table(md, srcu_idx); in dm_prepare_ioctl()
435 if (dm_suspended_md(md)) in dm_prepare_ioctl()
440 dm_put_live_table(md, *srcu_idx); in dm_prepare_ioctl()
448 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) in dm_unprepare_ioctl() argument
450 dm_put_live_table(md, srcu_idx); in dm_unprepare_ioctl()
456 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl() local
460 r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward); in dm_blk_ioctl()
483 dm_unprepare_ioctl(md, srcu_idx); in dm_blk_ioctl()
521 unlikely(dm_stats_used(&io->md->stats))) { in dm_io_acct()
529 dm_stats_account_io(&io->md->stats, bio_data_dir(bio), in dm_io_acct()
571 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask) in alloc_io() argument
577 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); in alloc_io()
592 this_cpu_inc(*md->pending_io); in alloc_io()
594 io->md = md; in alloc_io()
598 if (blk_queue_io_stat(md->queue)) in alloc_io()
602 unlikely(dm_stats_used(&md->stats))) in alloc_io()
603 dm_stats_record_start(&md->stats, &io->stats_aux); in alloc_io()
616 struct mapped_device *md = ci->io->md; in alloc_tio() local
627 &md->mempools->bs); in alloc_tio()
646 clone->bi_bdev = md->disk->part0; in alloc_tio()
648 bio_set_dev(clone, md->disk->part0); in alloc_tio()
669 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io() argument
673 spin_lock_irqsave(&md->deferred_lock, flags); in queue_io()
674 bio_list_add(&md->deferred, bio); in queue_io()
675 spin_unlock_irqrestore(&md->deferred_lock, flags); in queue_io()
676 queue_work(md->wq, &md->work); in queue_io()
681 * function to access the md->map field, and make sure they call
684 struct dm_table *dm_get_live_table(struct mapped_device *md, in dm_get_live_table() argument
685 int *srcu_idx) __acquires(md->io_barrier) in dm_get_live_table()
687 *srcu_idx = srcu_read_lock(&md->io_barrier); in dm_get_live_table()
689 return srcu_dereference(md->map, &md->io_barrier); in dm_get_live_table()
692 void dm_put_live_table(struct mapped_device *md, in dm_put_live_table() argument
693 int srcu_idx) __releases(md->io_barrier) in dm_put_live_table()
695 srcu_read_unlock(&md->io_barrier, srcu_idx); in dm_put_live_table()
698 void dm_sync_table(struct mapped_device *md) in dm_sync_table() argument
700 synchronize_srcu(&md->io_barrier); in dm_sync_table()
708 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast() argument
711 return rcu_dereference(md->map); in dm_get_live_table_fast()
714 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast() argument
724 static struct table_device *open_table_device(struct mapped_device *md, in open_table_device() argument
733 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); in open_table_device()
751 if (md->disk->slave_dir) { in open_table_device()
752 r = bd_link_disk_holder(bdev, md->disk); in open_table_device()
763 list_add(&td->list, &md->table_devices); in open_table_device()
776 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device() argument
778 if (md->disk->slave_dir) in close_table_device()
779 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); in close_table_device()
782 if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags))) in close_table_device()
804 int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode, in dm_get_table_device() argument
809 mutex_lock(&md->table_devices_lock); in dm_get_table_device()
810 td = find_table_device(&md->table_devices, dev, mode); in dm_get_table_device()
812 td = open_table_device(md, dev, mode); in dm_get_table_device()
814 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
820 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
826 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device() argument
830 mutex_lock(&md->table_devices_lock); in dm_put_table_device()
832 close_table_device(td, md); in dm_put_table_device()
833 mutex_unlock(&md->table_devices_lock); in dm_put_table_device()
839 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry() argument
841 *geo = md->geometry; in dm_get_geometry()
849 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry() argument
858 md->geometry = *geo; in dm_set_geometry()
863 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending() argument
865 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __noflush_suspending()
870 struct mapped_device *md = io->md; in dm_requeue_add_io() local
873 struct dm_io *next = md->requeue_list; in dm_requeue_add_io()
875 md->requeue_list = io; in dm_requeue_add_io()
878 bio_list_add_head(&md->deferred, io->orig_bio); in dm_requeue_add_io()
882 static void dm_kick_requeue(struct mapped_device *md, bool first_stage) in dm_kick_requeue() argument
885 queue_work(md->wq, &md->requeue_work); in dm_kick_requeue()
887 queue_work(md->wq, &md->work); in dm_kick_requeue()
900 struct mapped_device *md = io->md; in dm_handle_requeue() local
919 spin_lock_irqsave(&md->deferred_lock, flags); in dm_handle_requeue()
920 if ((__noflush_suspending(md) && in dm_handle_requeue()
921 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) || in dm_handle_requeue()
932 spin_unlock_irqrestore(&md->deferred_lock, flags); in dm_handle_requeue()
936 dm_kick_requeue(md, first_stage); in dm_handle_requeue()
944 struct mapped_device *md = io->md; in __dm_io_complete() local
967 this_cpu_dec(*md->pending_io); in __dm_io_complete()
970 if (unlikely(wq_has_sleeper(&md->wait))) in __dm_io_complete()
971 wake_up(&md->wait); in __dm_io_complete()
983 queue_io(md, bio); in __dm_io_complete()
994 struct mapped_device *md = container_of(work, struct mapped_device, in dm_wq_requeue_work() local
1000 spin_lock_irqsave(&md->deferred_lock, flags); in dm_wq_requeue_work()
1001 io = md->requeue_list; in dm_wq_requeue_work()
1002 md->requeue_list = NULL; in dm_wq_requeue_work()
1003 spin_unlock_irqrestore(&md->deferred_lock, flags); in dm_wq_requeue_work()
1008 dm_io_rewind(io, &md->disk->bio_split); in dm_wq_requeue_work()
1055 __noflush_suspending(io->md))) { in dm_io_set_error()
1071 * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
1073 static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits() argument
1075 return &md->queue->limits; in dm_get_queue_limits()
1090 struct mapped_device *md = io->md; in clone_endio() local
1095 blk_queue_disable_discard(md->queue); in clone_endio()
1098 blk_queue_disable_write_zeroes(md->queue); in clone_endio()
1116 if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) in clone_endio()
1136 up(&md->swap_bios_semaphore); in clone_endio()
1168 min(max_sectors ? : queue_max_sectors(ti->table->md->queue), in __max_io_len()
1192 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, in dm_dax_get_live_target() argument
1194 __acquires(md->io_barrier) in dm_dax_get_live_target()
1199 map = dm_get_live_table(md, srcu_idx); in dm_dax_get_live_target()
1214 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_direct_access() local
1220 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_direct_access()
1233 dm_put_live_table(md, srcu_idx); in dm_dax_direct_access()
1241 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_zero_page_range() local
1247 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_zero_page_range()
1260 dm_put_live_table(md, srcu_idx); in dm_dax_zero_page_range()
1268 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_recovery_write() local
1274 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_recovery_write()
1280 dm_put_live_table(md, srcu_idx); in dm_dax_recovery_write()
1372 trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk), in dm_submit_bio_remap()
1378 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) in __set_swap_bios_limit() argument
1380 mutex_lock(&md->swap_bios_lock); in __set_swap_bios_limit()
1381 while (latch < md->swap_bios) { in __set_swap_bios_limit()
1383 down(&md->swap_bios_semaphore); in __set_swap_bios_limit()
1384 md->swap_bios--; in __set_swap_bios_limit()
1386 while (latch > md->swap_bios) { in __set_swap_bios_limit()
1388 up(&md->swap_bios_semaphore); in __set_swap_bios_limit()
1389 md->swap_bios++; in __set_swap_bios_limit()
1391 mutex_unlock(&md->swap_bios_lock); in __set_swap_bios_limit()
1399 struct mapped_device *md = io->md; in __map_bio() local
1413 if (unlikely(latch != md->swap_bios)) in __map_bio()
1414 __set_swap_bios_limit(md, latch); in __map_bio()
1415 down(&md->swap_bios_semaphore); in __map_bio()
1438 up(&md->swap_bios_semaphore); in __map_bio()
1477 mutex_lock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1487 mutex_unlock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1540 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf); in __send_empty_flush()
1638 struct queue_limits *limits = dm_get_queue_limits(ti->table->md); in __process_abnormal_io()
1803 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) in dm_zone_plug_bio() argument
1814 struct mapped_device *md = ci->io->md; in __send_zone_reset_all_emulated() local
1815 unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors; in __send_zone_reset_all_emulated()
1829 ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin, in __send_zone_reset_all_emulated()
1927 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) in dm_zone_plug_bio() argument
1940 static void dm_split_and_process_bio(struct mapped_device *md, in dm_split_and_process_bio() argument
1972 if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio)) in dm_split_and_process_bio()
1986 io = alloc_io(md, bio, GFP_NOWAIT); in dm_split_and_process_bio()
1993 io = alloc_io(md, bio, GFP_NOIO); in dm_split_and_process_bio()
2060 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; in dm_submit_bio() local
2064 map = dm_get_live_table(md, &srcu_idx); in dm_submit_bio()
2067 dm_device_name(md)); in dm_submit_bio()
2073 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { in dm_submit_bio()
2079 queue_io(md, bio); in dm_submit_bio()
2083 dm_split_and_process_bio(md, map, bio); in dm_submit_bio()
2085 dm_put_live_table(md, srcu_idx); in dm_submit_bio()
2220 static void cleanup_mapped_device(struct mapped_device *md) in cleanup_mapped_device() argument
2222 if (md->wq) in cleanup_mapped_device()
2223 destroy_workqueue(md->wq); in cleanup_mapped_device()
2224 dm_free_md_mempools(md->mempools); in cleanup_mapped_device()
2226 if (md->dax_dev) { in cleanup_mapped_device()
2227 dax_remove_host(md->disk); in cleanup_mapped_device()
2228 kill_dax(md->dax_dev); in cleanup_mapped_device()
2229 put_dax(md->dax_dev); in cleanup_mapped_device()
2230 md->dax_dev = NULL; in cleanup_mapped_device()
2233 if (md->disk) { in cleanup_mapped_device()
2235 md->disk->private_data = NULL; in cleanup_mapped_device()
2237 if (dm_get_md_type(md) != DM_TYPE_NONE) { in cleanup_mapped_device()
2240 dm_sysfs_exit(md); in cleanup_mapped_device()
2241 list_for_each_entry(td, &md->table_devices, list) { in cleanup_mapped_device()
2243 md->disk); in cleanup_mapped_device()
2250 mutex_lock(&md->table_devices_lock); in cleanup_mapped_device()
2251 del_gendisk(md->disk); in cleanup_mapped_device()
2252 mutex_unlock(&md->table_devices_lock); in cleanup_mapped_device()
2254 dm_queue_destroy_crypto_profile(md->queue); in cleanup_mapped_device()
2255 put_disk(md->disk); in cleanup_mapped_device()
2258 if (md->pending_io) { in cleanup_mapped_device()
2259 free_percpu(md->pending_io); in cleanup_mapped_device()
2260 md->pending_io = NULL; in cleanup_mapped_device()
2263 cleanup_srcu_struct(&md->io_barrier); in cleanup_mapped_device()
2265 mutex_destroy(&md->suspend_lock); in cleanup_mapped_device()
2266 mutex_destroy(&md->type_lock); in cleanup_mapped_device()
2267 mutex_destroy(&md->table_devices_lock); in cleanup_mapped_device()
2268 mutex_destroy(&md->swap_bios_lock); in cleanup_mapped_device()
2270 dm_mq_cleanup_mapped_device(md); in cleanup_mapped_device()
2280 struct mapped_device *md; in alloc_dev() local
2283 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); in alloc_dev()
2284 if (!md) { in alloc_dev()
2300 r = init_srcu_struct(&md->io_barrier); in alloc_dev()
2304 md->numa_node_id = numa_node_id; in alloc_dev()
2305 md->init_tio_pdu = false; in alloc_dev()
2306 md->type = DM_TYPE_NONE; in alloc_dev()
2307 mutex_init(&md->suspend_lock); in alloc_dev()
2308 mutex_init(&md->type_lock); in alloc_dev()
2309 mutex_init(&md->table_devices_lock); in alloc_dev()
2310 spin_lock_init(&md->deferred_lock); in alloc_dev()
2311 atomic_set(&md->holders, 1); in alloc_dev()
2312 atomic_set(&md->open_count, 0); in alloc_dev()
2313 atomic_set(&md->event_nr, 0); in alloc_dev()
2314 atomic_set(&md->uevent_seq, 0); in alloc_dev()
2315 INIT_LIST_HEAD(&md->uevent_list); in alloc_dev()
2316 INIT_LIST_HEAD(&md->table_devices); in alloc_dev()
2317 spin_lock_init(&md->uevent_lock); in alloc_dev()
2320 * default to bio-based until DM table is loaded and md->type in alloc_dev()
2324 md->disk = blk_alloc_disk(NULL, md->numa_node_id); in alloc_dev()
2325 if (IS_ERR(md->disk)) { in alloc_dev()
2326 md->disk = NULL; in alloc_dev()
2329 md->queue = md->disk->queue; in alloc_dev()
2331 init_waitqueue_head(&md->wait); in alloc_dev()
2332 INIT_WORK(&md->work, dm_wq_work); in alloc_dev()
2333 INIT_WORK(&md->requeue_work, dm_wq_requeue_work); in alloc_dev()
2334 init_waitqueue_head(&md->eventq); in alloc_dev()
2335 init_completion(&md->kobj_holder.completion); in alloc_dev()
2337 md->requeue_list = NULL; in alloc_dev()
2338 md->swap_bios = get_swap_bios(); in alloc_dev()
2339 sema_init(&md->swap_bios_semaphore, md->swap_bios); in alloc_dev()
2340 mutex_init(&md->swap_bios_lock); in alloc_dev()
2342 md->disk->major = _major; in alloc_dev()
2343 md->disk->first_minor = minor; in alloc_dev()
2344 md->disk->minors = 1; in alloc_dev()
2345 md->disk->flags |= GENHD_FL_NO_PART; in alloc_dev()
2346 md->disk->fops = &dm_blk_dops; in alloc_dev()
2347 md->disk->private_data = md; in alloc_dev()
2348 sprintf(md->disk->disk_name, "dm-%d", minor); in alloc_dev()
2350 dax_dev = alloc_dax(md, &dm_dax_ops); in alloc_dev()
2357 md->dax_dev = dax_dev; in alloc_dev()
2358 if (dax_add_host(dax_dev, md->disk)) in alloc_dev()
2362 format_dev_t(md->name, MKDEV(_major, minor)); in alloc_dev()
2364 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name); in alloc_dev()
2365 if (!md->wq) in alloc_dev()
2368 md->pending_io = alloc_percpu(unsigned long); in alloc_dev()
2369 if (!md->pending_io) in alloc_dev()
2372 r = dm_stats_init(&md->stats); in alloc_dev()
2378 old_md = idr_replace(&_minor_idr, md, minor); in alloc_dev()
2383 return md; in alloc_dev()
2386 cleanup_mapped_device(md); in alloc_dev()
2392 kvfree(md); in alloc_dev()
2396 static void unlock_fs(struct mapped_device *md);
2398 static void free_dev(struct mapped_device *md) in free_dev() argument
2400 int minor = MINOR(disk_devt(md->disk)); in free_dev()
2402 unlock_fs(md); in free_dev()
2404 cleanup_mapped_device(md); in free_dev()
2406 WARN_ON_ONCE(!list_empty(&md->table_devices)); in free_dev()
2407 dm_stats_cleanup(&md->stats); in free_dev()
2411 kvfree(md); in free_dev()
2421 struct mapped_device *md = context; in event_callback() local
2423 spin_lock_irqsave(&md->uevent_lock, flags); in event_callback()
2424 list_splice_init(&md->uevent_list, &uevents); in event_callback()
2425 spin_unlock_irqrestore(&md->uevent_lock, flags); in event_callback()
2427 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); in event_callback()
2429 atomic_inc(&md->event_nr); in event_callback()
2430 wake_up(&md->eventq); in event_callback()
2437 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind() argument
2444 lockdep_assert_held(&md->suspend_lock); in __bind()
2448 old_size = dm_get_size(md); in __bind()
2455 set_capacity(md->disk, size); in __bind()
2457 ret = dm_table_set_restrictions(t, md->queue, limits); in __bind()
2459 set_capacity(md->disk, old_size); in __bind()
2468 memset(&md->geometry, 0, sizeof(md->geometry)); in __bind()
2470 dm_table_event_callback(t, event_callback, md); in __bind()
2477 md->immutable_target = dm_table_get_immutable_target(t); in __bind()
2487 if (!md->mempools) in __bind()
2488 md->mempools = t->mempools; in __bind()
2493 * The md may already have mempools that need changing. in __bind()
2497 dm_free_md_mempools(md->mempools); in __bind()
2498 md->mempools = t->mempools; in __bind()
2502 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __bind()
2503 rcu_assign_pointer(md->map, (void *)t); in __bind()
2504 md->immutable_target_type = dm_table_get_immutable_target_type(t); in __bind()
2507 dm_sync_table(md); in __bind()
2515 static struct dm_table *__unbind(struct mapped_device *md) in __unbind() argument
2517 struct dm_table *map = rcu_dereference_protected(md->map, 1); in __unbind()
2523 RCU_INIT_POINTER(md->map, NULL); in __unbind()
2524 dm_sync_table(md); in __unbind()
2534 struct mapped_device *md; in dm_create() local
2536 md = alloc_dev(minor); in dm_create()
2537 if (!md) in dm_create()
2540 dm_ima_reset_data(md); in dm_create()
2542 *result = md; in dm_create()
2547 * Functions to manage md->type.
2548 * All are required to hold md->type_lock.
2550 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type() argument
2552 mutex_lock(&md->type_lock); in dm_lock_md_type()
2555 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type() argument
2557 mutex_unlock(&md->type_lock); in dm_unlock_md_type()
2560 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) in dm_get_md_type() argument
2562 return md->type; in dm_get_md_type()
2565 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type() argument
2567 return md->immutable_target_type; in dm_get_immutable_target_type()
2571 * Setup the DM device's queue based on md's type
2573 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) in dm_setup_md_queue() argument
2583 md->disk->fops = &dm_rq_blk_dops; in dm_setup_md_queue()
2584 r = dm_mq_init_request_queue(md, t); in dm_setup_md_queue()
2596 r = dm_table_set_restrictions(t, md->queue, &limits); in dm_setup_md_queue()
2604 mutex_lock(&md->table_devices_lock); in dm_setup_md_queue()
2605 r = add_disk(md->disk); in dm_setup_md_queue()
2606 mutex_unlock(&md->table_devices_lock); in dm_setup_md_queue()
2614 list_for_each_entry(td, &md->table_devices, list) { in dm_setup_md_queue()
2615 r = bd_link_disk_holder(td->dm_dev.bdev, md->disk); in dm_setup_md_queue()
2620 r = dm_sysfs_init(md); in dm_setup_md_queue()
2624 md->type = type; in dm_setup_md_queue()
2628 list_for_each_entry_continue_reverse(td, &md->table_devices, list) in dm_setup_md_queue()
2629 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); in dm_setup_md_queue()
2630 mutex_lock(&md->table_devices_lock); in dm_setup_md_queue()
2631 del_gendisk(md->disk); in dm_setup_md_queue()
2632 mutex_unlock(&md->table_devices_lock); in dm_setup_md_queue()
2638 struct mapped_device *md; in dm_get_md() local
2646 md = idr_find(&_minor_idr, minor); in dm_get_md()
2647 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || in dm_get_md()
2648 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_md()
2649 md = NULL; in dm_get_md()
2652 dm_get(md); in dm_get_md()
2656 return md; in dm_get_md()
2660 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr() argument
2662 return md->interface_ptr; in dm_get_mdptr()
2665 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr() argument
2667 md->interface_ptr = ptr; in dm_set_mdptr()
2670 void dm_get(struct mapped_device *md) in dm_get() argument
2672 atomic_inc(&md->holders); in dm_get()
2673 BUG_ON(test_bit(DMF_FREEING, &md->flags)); in dm_get()
2676 int dm_hold(struct mapped_device *md) in dm_hold() argument
2679 if (test_bit(DMF_FREEING, &md->flags)) { in dm_hold()
2683 dm_get(md); in dm_hold()
2689 const char *dm_device_name(struct mapped_device *md) in dm_device_name() argument
2691 return md->name; in dm_device_name()
2695 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy() argument
2703 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); in __dm_destroy()
2704 set_bit(DMF_FREEING, &md->flags); in __dm_destroy()
2707 blk_mark_disk_dead(md->disk); in __dm_destroy()
2713 mutex_lock(&md->suspend_lock); in __dm_destroy()
2714 map = dm_get_live_table(md, &srcu_idx); in __dm_destroy()
2715 if (!dm_suspended_md(md)) { in __dm_destroy()
2717 set_bit(DMF_SUSPENDED, &md->flags); in __dm_destroy()
2718 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_destroy()
2722 dm_put_live_table(md, srcu_idx); in __dm_destroy()
2723 mutex_unlock(&md->suspend_lock); in __dm_destroy()
2732 while (atomic_read(&md->holders)) in __dm_destroy()
2734 else if (atomic_read(&md->holders)) in __dm_destroy()
2736 dm_device_name(md), atomic_read(&md->holders)); in __dm_destroy()
2738 dm_table_destroy(__unbind(md)); in __dm_destroy()
2739 free_dev(md); in __dm_destroy()
2742 void dm_destroy(struct mapped_device *md) in dm_destroy() argument
2744 __dm_destroy(md, true); in dm_destroy()
2747 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate() argument
2749 __dm_destroy(md, false); in dm_destroy_immediate()
2752 void dm_put(struct mapped_device *md) in dm_put() argument
2754 atomic_dec(&md->holders); in dm_put()
2758 static bool dm_in_flight_bios(struct mapped_device *md) in dm_in_flight_bios() argument
2764 sum += *per_cpu_ptr(md->pending_io, cpu); in dm_in_flight_bios()
2769 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) in dm_wait_for_bios_completion() argument
2775 prepare_to_wait(&md->wait, &wait, task_state); in dm_wait_for_bios_completion()
2777 if (!dm_in_flight_bios(md)) in dm_wait_for_bios_completion()
2787 finish_wait(&md->wait, &wait); in dm_wait_for_bios_completion()
2794 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) in dm_wait_for_completion() argument
2798 if (!queue_is_mq(md->queue)) in dm_wait_for_completion()
2799 return dm_wait_for_bios_completion(md, task_state); in dm_wait_for_completion()
2802 if (!blk_mq_queue_inflight(md->queue)) in dm_wait_for_completion()
2821 struct mapped_device *md = container_of(work, struct mapped_device, work); in dm_wq_work() local
2824 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { in dm_wq_work()
2825 spin_lock_irq(&md->deferred_lock); in dm_wq_work()
2826 bio = bio_list_pop(&md->deferred); in dm_wq_work()
2827 spin_unlock_irq(&md->deferred_lock); in dm_wq_work()
2837 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush() argument
2839 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_queue_flush()
2841 queue_work(md->wq, &md->work); in dm_queue_flush()
2847 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table() argument
2853 mutex_lock(&md->suspend_lock); in dm_swap_table()
2856 if (!dm_suspended_md(md)) in dm_swap_table()
2866 live_map = dm_get_live_table_fast(md); in dm_swap_table()
2868 limits = md->queue->limits; in dm_swap_table()
2869 dm_put_live_table_fast(md); in dm_swap_table()
2880 map = __bind(md, table, &limits); in dm_swap_table()
2884 mutex_unlock(&md->suspend_lock); in dm_swap_table()
2892 static int lock_fs(struct mapped_device *md) in lock_fs() argument
2896 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); in lock_fs()
2898 r = bdev_freeze(md->disk->part0); in lock_fs()
2900 set_bit(DMF_FROZEN, &md->flags); in lock_fs()
2904 static void unlock_fs(struct mapped_device *md) in unlock_fs() argument
2906 if (!test_bit(DMF_FROZEN, &md->flags)) in unlock_fs()
2908 bdev_thaw(md->disk->part0); in unlock_fs()
2909 clear_bit(DMF_FROZEN, &md->flags); in unlock_fs()
2919 * are being added to md->deferred list.
2921 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend() argument
2929 lockdep_assert_held(&md->suspend_lock); in __dm_suspend()
2936 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2938 DMDEBUG("%s: suspending with flush", dm_device_name(md)); in __dm_suspend()
2953 r = lock_fs(md); in __dm_suspend()
2969 * flush_workqueue(md->wq). in __dm_suspend()
2971 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in __dm_suspend()
2973 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2976 * Stop md->queue before flushing md->wq in case request-based in __dm_suspend()
2977 * dm defers requests to md->wq from md->queue. in __dm_suspend()
2979 if (map && dm_request_based(md)) { in __dm_suspend()
2980 dm_stop_queue(md->queue); in __dm_suspend()
2981 set_bit(DMF_QUEUE_STOPPED, &md->flags); in __dm_suspend()
2984 flush_workqueue(md->wq); in __dm_suspend()
2992 r = dm_wait_for_completion(md, task_state); in __dm_suspend()
2994 set_bit(dmf_suspended_flag, &md->flags); in __dm_suspend()
2997 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2999 synchronize_srcu(&md->io_barrier); in __dm_suspend()
3003 dm_queue_flush(md); in __dm_suspend()
3005 if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags)) in __dm_suspend()
3006 dm_start_queue(md->queue); in __dm_suspend()
3008 unlock_fs(md); in __dm_suspend()
3032 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags) in dm_suspend() argument
3038 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_suspend()
3040 if (dm_suspended_md(md)) { in dm_suspend()
3045 if (dm_suspended_internally_md(md)) { in dm_suspend()
3047 mutex_unlock(&md->suspend_lock); in dm_suspend()
3048 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_suspend()
3054 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_suspend()
3060 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); in dm_suspend()
3064 set_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
3066 clear_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
3069 mutex_unlock(&md->suspend_lock); in dm_suspend()
3073 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume() argument
3082 dm_queue_flush(md); in __dm_resume()
3089 if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags)) in __dm_resume()
3090 dm_start_queue(md->queue); in __dm_resume()
3092 unlock_fs(md); in __dm_resume()
3097 int dm_resume(struct mapped_device *md) in dm_resume() argument
3104 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_resume()
3106 if (!dm_suspended_md(md)) in dm_resume()
3109 if (dm_suspended_internally_md(md)) { in dm_resume()
3111 mutex_unlock(&md->suspend_lock); in dm_resume()
3112 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_resume()
3118 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_resume()
3122 r = __dm_resume(md, map); in dm_resume()
3126 clear_bit(DMF_SUSPENDED, &md->flags); in dm_resume()
3128 mutex_unlock(&md->suspend_lock); in dm_resume()
3139 static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags) in __dm_internal_suspend() argument
3143 lockdep_assert_held(&md->suspend_lock); in __dm_internal_suspend()
3145 if (md->internal_suspend_count++) in __dm_internal_suspend()
3148 if (dm_suspended_md(md)) { in __dm_internal_suspend()
3149 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_suspend()
3153 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __dm_internal_suspend()
3161 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, in __dm_internal_suspend()
3164 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
3166 clear_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
3169 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume() argument
3174 BUG_ON(!md->internal_suspend_count); in __dm_internal_resume()
3176 if (--md->internal_suspend_count) in __dm_internal_resume()
3179 if (dm_suspended_md(md)) in __dm_internal_resume()
3182 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __dm_internal_resume()
3183 r = __dm_resume(md, map); in __dm_internal_resume()
3197 set_bit(DMF_SUSPENDED, &md->flags); in __dm_internal_resume()
3200 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_resume()
3202 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); in __dm_internal_resume()
3205 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush() argument
3207 mutex_lock(&md->suspend_lock); in dm_internal_suspend_noflush()
3208 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); in dm_internal_suspend_noflush()
3209 mutex_unlock(&md->suspend_lock); in dm_internal_suspend_noflush()
3213 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume() argument
3215 mutex_lock(&md->suspend_lock); in dm_internal_resume()
3216 __dm_internal_resume(md); in dm_internal_resume()
3217 mutex_unlock(&md->suspend_lock); in dm_internal_resume()
3222 * Fast variants of internal suspend/resume hold md->suspend_lock,
3226 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast() argument
3228 mutex_lock(&md->suspend_lock); in dm_internal_suspend_fast()
3229 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_suspend_fast()
3232 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_internal_suspend_fast()
3233 synchronize_srcu(&md->io_barrier); in dm_internal_suspend_fast()
3234 flush_workqueue(md->wq); in dm_internal_suspend_fast()
3235 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); in dm_internal_suspend_fast()
3239 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast() argument
3241 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_resume_fast()
3244 dm_queue_flush(md); in dm_internal_resume_fast()
3247 mutex_unlock(&md->suspend_lock); in dm_internal_resume_fast()
3256 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent() argument
3275 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); in dm_kobject_uevent()
3282 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq() argument
3284 return atomic_add_return(1, &md->uevent_seq); in dm_next_uevent_seq()
3287 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr() argument
3289 return atomic_read(&md->event_nr); in dm_get_event_nr()
3292 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event() argument
3294 return wait_event_interruptible(md->eventq, in dm_wait_event()
3295 (event_nr != atomic_read(&md->event_nr))); in dm_wait_event()
3298 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add() argument
3302 spin_lock_irqsave(&md->uevent_lock, flags); in dm_uevent_add()
3303 list_add(elist, &md->uevent_list); in dm_uevent_add()
3304 spin_unlock_irqrestore(&md->uevent_lock, flags); in dm_uevent_add()
3309 * count on 'md'.
3311 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk() argument
3313 return md->disk; in dm_disk()
3317 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject() argument
3319 return &md->kobj_holder.kobj; in dm_kobject()
3324 struct mapped_device *md; in dm_get_from_kobject() local
3326 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); in dm_get_from_kobject()
3329 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_from_kobject()
3330 md = NULL; in dm_get_from_kobject()
3333 dm_get(md); in dm_get_from_kobject()
3337 return md; in dm_get_from_kobject()
3340 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md() argument
3342 return test_bit(DMF_SUSPENDED, &md->flags); in dm_suspended_md()
3345 static int dm_post_suspending_md(struct mapped_device *md) in dm_post_suspending_md() argument
3347 return test_bit(DMF_POST_SUSPENDING, &md->flags); in dm_post_suspending_md()
3350 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md() argument
3352 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in dm_suspended_internally_md()
3355 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag() argument
3357 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_test_deferred_remove_flag()
3362 return dm_suspended_md(ti->table->md); in dm_suspended()
3368 return dm_post_suspending_md(ti->table->md); in dm_post_suspending()
3374 return __noflush_suspending(ti->table->md); in dm_noflush_suspending()
3414 struct mapped_device *md = disk->private_data; in dm_blk_get_unique_id() local
3424 table = dm_get_live_table(md, &srcu_idx); in dm_blk_get_unique_id()
3438 dm_put_live_table(md, srcu_idx); in dm_blk_get_unique_id()
3457 struct mapped_device *md = bdev->bd_disk->private_data; in dm_call_pr() local
3462 table = dm_get_live_table(md, &srcu_idx); in dm_call_pr()
3471 if (dm_suspended_md(md)) { in dm_call_pr()
3483 dm_put_live_table(md, srcu_idx); in dm_call_pr()
3666 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear() local
3672 r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward); in dm_pr_clear()
3683 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_clear()