Lines Matching refs:head

156 	spin_lock_irqsave(&ns->head->requeue_lock, flags);  in nvme_failover_req()
158 bio_set_dev(bio, ns->head->disk->part0); in nvme_failover_req()
172 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
173 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
177 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
183 struct gendisk *disk = ns->head->disk; in nvme_mpath_start_request()
185 if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) && in nvme_mpath_start_request()
210 bdev_end_io_acct(ns->head->disk->part0, req_op(rq), in nvme_mpath_end_request()
223 if (!ns->head->disk) in nvme_kick_requeue_lists()
225 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
227 disk_uevent(ns->head->disk, KOBJ_CHANGE); in nvme_kick_requeue_lists()
243 struct nvme_ns_head *head = ns->head; in nvme_mpath_clear_current_path() local
247 if (!head) in nvme_mpath_clear_current_path()
251 if (ns == rcu_access_pointer(head->current_path[node])) { in nvme_mpath_clear_current_path()
252 rcu_assign_pointer(head->current_path[node], NULL); in nvme_mpath_clear_current_path()
269 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_clear_ctrl_paths()
276 struct nvme_ns_head *head = ns->head; in nvme_mpath_revalidate_paths() local
277 sector_t capacity = get_capacity(head->disk); in nvme_mpath_revalidate_paths()
281 srcu_idx = srcu_read_lock(&head->srcu); in nvme_mpath_revalidate_paths()
282 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_mpath_revalidate_paths()
283 srcu_read_lock_held(&head->srcu)) { in nvme_mpath_revalidate_paths()
287 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_mpath_revalidate_paths()
290 rcu_assign_pointer(head->current_path[node], NULL); in nvme_mpath_revalidate_paths()
291 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_revalidate_paths()
311 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) in __nvme_find_path() argument
316 list_for_each_entry_srcu(ns, &head->list, siblings, in __nvme_find_path()
317 srcu_read_lock_held(&head->srcu)) { in __nvme_find_path()
322 READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) in __nvme_find_path()
348 rcu_assign_pointer(head->current_path[node], found); in __nvme_find_path()
352 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, in nvme_next_ns() argument
355 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
359 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); in nvme_next_ns()
362 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head) in nvme_round_robin_path() argument
366 struct nvme_ns *old = srcu_dereference(head->current_path[node], in nvme_round_robin_path()
367 &head->srcu); in nvme_round_robin_path()
370 return __nvme_find_path(head, node); in nvme_round_robin_path()
372 if (list_is_singular(&head->list)) { in nvme_round_robin_path()
378 for (ns = nvme_next_ns(head, old); in nvme_round_robin_path()
380 ns = nvme_next_ns(head, ns)) { in nvme_round_robin_path()
406 rcu_assign_pointer(head->current_path[node], found); in nvme_round_robin_path()
410 static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head) in nvme_queue_depth_path() argument
416 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_queue_depth_path()
417 srcu_read_lock_held(&head->srcu)) { in nvme_queue_depth_path()
453 static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head) in nvme_numa_path() argument
458 ns = srcu_dereference(head->current_path[node], &head->srcu); in nvme_numa_path()
460 return __nvme_find_path(head, node); in nvme_numa_path()
462 return __nvme_find_path(head, node); in nvme_numa_path()
466 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) in nvme_find_path() argument
468 switch (READ_ONCE(head->subsys->iopolicy)) { in nvme_find_path()
470 return nvme_queue_depth_path(head); in nvme_find_path()
472 return nvme_round_robin_path(head); in nvme_find_path()
474 return nvme_numa_path(head); in nvme_find_path()
478 static bool nvme_available_path(struct nvme_ns_head *head) in nvme_available_path() argument
482 if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) in nvme_available_path()
485 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_available_path()
486 srcu_read_lock_held(&head->srcu)) { in nvme_available_path()
508 return nvme_mpath_queue_if_no_path(head); in nvme_available_path()
513 struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data; in nvme_ns_head_submit_bio() local
514 struct device *dev = disk_to_dev(head->disk); in nvme_ns_head_submit_bio()
527 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_submit_bio()
528 ns = nvme_find_path(head); in nvme_ns_head_submit_bio()
532 trace_block_bio_remap(bio, disk_devt(ns->head->disk), in nvme_ns_head_submit_bio()
535 } else if (nvme_available_path(head)) { in nvme_ns_head_submit_bio()
538 spin_lock_irq(&head->requeue_lock); in nvme_ns_head_submit_bio()
539 bio_list_add(&head->requeue_list, bio); in nvme_ns_head_submit_bio()
540 spin_unlock_irq(&head->requeue_lock); in nvme_ns_head_submit_bio()
547 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_submit_bio()
565 struct nvme_ns_head *head = disk->private_data; in nvme_ns_head_get_unique_id() local
569 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_get_unique_id()
570 ns = nvme_find_path(head); in nvme_ns_head_get_unique_id()
573 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_get_unique_id()
581 struct nvme_ns_head *head = disk->private_data; in nvme_ns_head_report_zones() local
585 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_report_zones()
586 ns = nvme_find_path(head); in nvme_ns_head_report_zones()
589 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_report_zones()
637 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) in nvme_add_ns_head_cdev() argument
641 head->cdev_device.parent = &head->subsys->dev; in nvme_add_ns_head_cdev()
642 ret = dev_set_name(&head->cdev_device, "ng%dn%d", in nvme_add_ns_head_cdev()
643 head->subsys->instance, head->instance); in nvme_add_ns_head_cdev()
646 ret = nvme_cdev_add(&head->cdev, &head->cdev_device, in nvme_add_ns_head_cdev()
653 struct nvme_ns_head *head = in nvme_partition_scan_work() local
657 &head->disk->state))) in nvme_partition_scan_work()
660 mutex_lock(&head->disk->open_mutex); in nvme_partition_scan_work()
661 bdev_disk_changed(head->disk, false); in nvme_partition_scan_work()
662 mutex_unlock(&head->disk->open_mutex); in nvme_partition_scan_work()
667 struct nvme_ns_head *head = in nvme_requeue_work() local
671 spin_lock_irq(&head->requeue_lock); in nvme_requeue_work()
672 next = bio_list_get(&head->requeue_list); in nvme_requeue_work()
673 spin_unlock_irq(&head->requeue_lock); in nvme_requeue_work()
683 static void nvme_remove_head(struct nvme_ns_head *head) in nvme_remove_head() argument
685 if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { in nvme_remove_head()
690 kblockd_schedule_work(&head->requeue_work); in nvme_remove_head()
692 nvme_cdev_del(&head->cdev, &head->cdev_device); in nvme_remove_head()
693 synchronize_srcu(&head->srcu); in nvme_remove_head()
694 del_gendisk(head->disk); in nvme_remove_head()
696 nvme_put_ns_head(head); in nvme_remove_head()
701 struct nvme_ns_head *head = container_of(to_delayed_work(work), in nvme_remove_head_work() local
705 mutex_lock(&head->subsys->lock); in nvme_remove_head_work()
706 if (list_empty(&head->list)) { in nvme_remove_head_work()
707 list_del_init(&head->entry); in nvme_remove_head_work()
710 mutex_unlock(&head->subsys->lock); in nvme_remove_head_work()
712 nvme_remove_head(head); in nvme_remove_head_work()
717 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) in nvme_mpath_alloc_disk() argument
721 mutex_init(&head->lock); in nvme_mpath_alloc_disk()
722 bio_list_init(&head->requeue_list); in nvme_mpath_alloc_disk()
723 spin_lock_init(&head->requeue_lock); in nvme_mpath_alloc_disk()
724 INIT_WORK(&head->requeue_work, nvme_requeue_work); in nvme_mpath_alloc_disk()
725 INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work); in nvme_mpath_alloc_disk()
726 INIT_DELAYED_WORK(&head->remove_work, nvme_remove_head_work); in nvme_mpath_alloc_disk()
727 head->delayed_removal_secs = 0; in nvme_mpath_alloc_disk()
744 if (!nvme_is_unique_nsid(ctrl, head)) in nvme_mpath_alloc_disk()
751 if (head->ids.csi == NVME_CSI_ZNS) in nvme_mpath_alloc_disk()
754 head->disk = blk_alloc_disk(&lim, ctrl->numa_node); in nvme_mpath_alloc_disk()
755 if (IS_ERR(head->disk)) in nvme_mpath_alloc_disk()
756 return PTR_ERR(head->disk); in nvme_mpath_alloc_disk()
757 head->disk->fops = &nvme_ns_head_ops; in nvme_mpath_alloc_disk()
758 head->disk->private_data = head; in nvme_mpath_alloc_disk()
768 set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state); in nvme_mpath_alloc_disk()
769 sprintf(head->disk->disk_name, "nvme%dn%d", in nvme_mpath_alloc_disk()
770 ctrl->subsys->instance, head->instance); in nvme_mpath_alloc_disk()
771 nvme_tryget_ns_head(head); in nvme_mpath_alloc_disk()
777 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live() local
780 if (!head->disk) in nvme_mpath_set_live()
788 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { in nvme_mpath_set_live()
789 rc = device_add_disk(&head->subsys->dev, head->disk, in nvme_mpath_set_live()
792 clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags); in nvme_mpath_set_live()
795 nvme_add_ns_head_cdev(head); in nvme_mpath_set_live()
796 queue_work(nvme_wq, &head->partition_scan_work); in nvme_mpath_set_live()
799 nvme_mpath_add_sysfs_link(ns->head); in nvme_mpath_set_live()
801 mutex_lock(&head->lock); in nvme_mpath_set_live()
805 srcu_idx = srcu_read_lock(&head->srcu); in nvme_mpath_set_live()
807 __nvme_find_path(head, node); in nvme_mpath_set_live()
808 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_mpath_set_live()
810 mutex_unlock(&head->lock); in nvme_mpath_set_live()
812 synchronize_srcu(&head->srcu); in nvme_mpath_set_live()
813 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_set_live()
899 if (test_bit(NVME_NSHEAD_DISK_LIVE, &ns->head->flags)) in nvme_update_ns_ana_state()
900 nvme_mpath_add_sysfs_link(ns->head); in nvme_update_ns_ana_state()
928 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
930 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
934 if (ns->head->ns_id > nsid) in nvme_update_ana_state()
1094 if (ns->head->subsys->iopolicy != NVME_IOPOLICY_QD) in queue_depth_show()
1108 struct nvme_ns_head *head = ns->head; in numa_nodes_show() local
1110 if (head->subsys->iopolicy != NVME_IOPOLICY_NUMA) in numa_nodes_show()
1115 srcu_idx = srcu_read_lock(&head->srcu); in numa_nodes_show()
1117 current_ns = srcu_dereference(head->current_path[node], in numa_nodes_show()
1118 &head->srcu); in numa_nodes_show()
1122 srcu_read_unlock(&head->srcu, srcu_idx); in numa_nodes_show()
1132 struct nvme_ns_head *head = disk->private_data; in delayed_removal_secs_show() local
1135 mutex_lock(&head->subsys->lock); in delayed_removal_secs_show()
1136 ret = sysfs_emit(buf, "%u\n", head->delayed_removal_secs); in delayed_removal_secs_show()
1137 mutex_unlock(&head->subsys->lock); in delayed_removal_secs_show()
1145 struct nvme_ns_head *head = disk->private_data; in delayed_removal_secs_store() local
1153 mutex_lock(&head->subsys->lock); in delayed_removal_secs_store()
1154 head->delayed_removal_secs = sec; in delayed_removal_secs_store()
1156 set_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags); in delayed_removal_secs_store()
1158 clear_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags); in delayed_removal_secs_store()
1159 mutex_unlock(&head->subsys->lock); in delayed_removal_secs_store()
1164 synchronize_srcu(&head->srcu); in delayed_removal_secs_store()
1183 void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head) in nvme_mpath_add_sysfs_link() argument
1194 if (!test_bit(GD_ADDED, &head->disk->state)) in nvme_mpath_add_sysfs_link()
1197 kobj = &disk_to_dev(head->disk)->kobj; in nvme_mpath_add_sysfs_link()
1203 srcu_idx = srcu_read_lock(&head->srcu); in nvme_mpath_add_sysfs_link()
1205 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_mpath_add_sysfs_link()
1206 srcu_read_lock_held(&head->srcu)) { in nvme_mpath_add_sysfs_link()
1237 dev_err(disk_to_dev(ns->head->disk), in nvme_mpath_add_sysfs_link()
1244 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_mpath_add_sysfs_link()
1256 kobj = &disk_to_dev(ns->head->disk)->kobj; in nvme_mpath_remove_sysfs_link()
1288 if (blk_queue_is_zoned(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
1289 ns->head->disk->nr_zones = ns->disk->nr_zones; in nvme_mpath_add_disk()
1293 void nvme_mpath_remove_disk(struct nvme_ns_head *head) in nvme_mpath_remove_disk() argument
1297 if (!head->disk) in nvme_mpath_remove_disk()
1300 mutex_lock(&head->subsys->lock); in nvme_mpath_remove_disk()
1310 if (!list_empty(&head->list)) in nvme_mpath_remove_disk()
1313 if (head->delayed_removal_secs) { in nvme_mpath_remove_disk()
1320 mod_delayed_work(nvme_wq, &head->remove_work, in nvme_mpath_remove_disk()
1321 head->delayed_removal_secs * HZ); in nvme_mpath_remove_disk()
1323 list_del_init(&head->entry); in nvme_mpath_remove_disk()
1327 mutex_unlock(&head->subsys->lock); in nvme_mpath_remove_disk()
1329 nvme_remove_head(head); in nvme_mpath_remove_disk()
1332 void nvme_mpath_put_disk(struct nvme_ns_head *head) in nvme_mpath_put_disk() argument
1334 if (!head->disk) in nvme_mpath_put_disk()
1337 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_put_disk()
1338 flush_work(&head->requeue_work); in nvme_mpath_put_disk()
1339 flush_work(&head->partition_scan_work); in nvme_mpath_put_disk()
1340 put_disk(head->disk); in nvme_mpath_put_disk()