Lines Matching full:ns
139 struct nvme_ns *ns = req->q->queuedata; in nvme_failover_req() local
144 nvme_mpath_clear_current_path(ns); in nvme_failover_req()
151 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) { in nvme_failover_req()
152 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_failover_req()
153 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_failover_req()
156 spin_lock_irqsave(&ns->head->requeue_lock, flags); in nvme_failover_req()
158 bio_set_dev(bio, ns->head->disk->part0); in nvme_failover_req()
172 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
173 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
177 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
182 struct nvme_ns *ns = rq->q->queuedata; in nvme_mpath_start_request() local
183 struct gendisk *disk = ns->head->disk; in nvme_mpath_start_request()
185 if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) && in nvme_mpath_start_request()
187 atomic_inc(&ns->ctrl->nr_active); in nvme_mpath_start_request()
203 struct nvme_ns *ns = rq->q->queuedata; in nvme_mpath_end_request() local
206 atomic_dec_if_positive(&ns->ctrl->nr_active); in nvme_mpath_end_request()
210 bdev_end_io_acct(ns->head->disk->part0, req_op(rq), in nvme_mpath_end_request()
217 struct nvme_ns *ns; in nvme_kick_requeue_lists() local
221 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, in nvme_kick_requeue_lists()
223 if (!ns->head->disk) in nvme_kick_requeue_lists()
225 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
226 if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE) in nvme_kick_requeue_lists()
227 disk_uevent(ns->head->disk, KOBJ_CHANGE); in nvme_kick_requeue_lists()
241 bool nvme_mpath_clear_current_path(struct nvme_ns *ns) in nvme_mpath_clear_current_path() argument
243 struct nvme_ns_head *head = ns->head; in nvme_mpath_clear_current_path()
251 if (ns == rcu_access_pointer(head->current_path[node])) { in nvme_mpath_clear_current_path()
262 struct nvme_ns *ns; in nvme_mpath_clear_ctrl_paths() local
266 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, in nvme_mpath_clear_ctrl_paths()
268 nvme_mpath_clear_current_path(ns); in nvme_mpath_clear_ctrl_paths()
269 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_clear_ctrl_paths()
274 void nvme_mpath_revalidate_paths(struct nvme_ns *ns) in nvme_mpath_revalidate_paths() argument
276 struct nvme_ns_head *head = ns->head; in nvme_mpath_revalidate_paths()
282 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_mpath_revalidate_paths()
284 if (capacity != get_capacity(ns->disk)) in nvme_mpath_revalidate_paths()
285 clear_bit(NVME_NS_READY, &ns->flags); in nvme_mpath_revalidate_paths()
294 static bool nvme_path_is_disabled(struct nvme_ns *ns) in nvme_path_is_disabled() argument
296 enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl); in nvme_path_is_disabled()
305 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || in nvme_path_is_disabled()
306 !test_bit(NVME_NS_READY, &ns->flags)) in nvme_path_is_disabled()
314 struct nvme_ns *found = NULL, *fallback = NULL, *ns; in __nvme_find_path() local
316 list_for_each_entry_srcu(ns, &head->list, siblings, in __nvme_find_path()
318 if (nvme_path_is_disabled(ns)) in __nvme_find_path()
321 if (ns->ctrl->numa_node != NUMA_NO_NODE && in __nvme_find_path()
323 distance = node_distance(node, ns->ctrl->numa_node); in __nvme_find_path()
327 switch (ns->ana_state) { in __nvme_find_path()
331 found = ns; in __nvme_find_path()
337 fallback = ns; in __nvme_find_path()
353 struct nvme_ns *ns) in nvme_next_ns() argument
355 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
357 if (ns) in nvme_next_ns()
358 return ns; in nvme_next_ns()
364 struct nvme_ns *ns, *found = NULL; in nvme_round_robin_path() local
378 for (ns = nvme_next_ns(head, old); in nvme_round_robin_path()
379 ns && ns != old; in nvme_round_robin_path()
380 ns = nvme_next_ns(head, ns)) { in nvme_round_robin_path()
381 if (nvme_path_is_disabled(ns)) in nvme_round_robin_path()
384 if (ns->ana_state == NVME_ANA_OPTIMIZED) { in nvme_round_robin_path()
385 found = ns; in nvme_round_robin_path()
388 if (ns->ana_state == NVME_ANA_NONOPTIMIZED) in nvme_round_robin_path()
389 found = ns; in nvme_round_robin_path()
412 struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns; in nvme_queue_depth_path() local
416 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_queue_depth_path()
418 if (nvme_path_is_disabled(ns)) in nvme_queue_depth_path()
421 depth = atomic_read(&ns->ctrl->nr_active); in nvme_queue_depth_path()
423 switch (ns->ana_state) { in nvme_queue_depth_path()
427 best_opt = ns; in nvme_queue_depth_path()
433 best_nonopt = ns; in nvme_queue_depth_path()
447 static inline bool nvme_path_is_optimized(struct nvme_ns *ns) in nvme_path_is_optimized() argument
449 return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE && in nvme_path_is_optimized()
450 ns->ana_state == NVME_ANA_OPTIMIZED; in nvme_path_is_optimized()
456 struct nvme_ns *ns; in nvme_numa_path() local
458 ns = srcu_dereference(head->current_path[node], &head->srcu); in nvme_numa_path()
459 if (unlikely(!ns)) in nvme_numa_path()
461 if (unlikely(!nvme_path_is_optimized(ns))) in nvme_numa_path()
463 return ns; in nvme_numa_path()
480 struct nvme_ns *ns; in nvme_available_path() local
485 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_available_path()
487 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) in nvme_available_path()
489 switch (nvme_ctrl_state(ns->ctrl)) { in nvme_available_path()
515 struct nvme_ns *ns; in nvme_ns_head_submit_bio() local
528 ns = nvme_find_path(head); in nvme_ns_head_submit_bio()
529 if (likely(ns)) { in nvme_ns_head_submit_bio()
530 bio_set_dev(bio, ns->disk->part0); in nvme_ns_head_submit_bio()
532 trace_block_bio_remap(bio, disk_devt(ns->head->disk), in nvme_ns_head_submit_bio()
566 struct nvme_ns *ns; in nvme_ns_head_get_unique_id() local
570 ns = nvme_find_path(head); in nvme_ns_head_get_unique_id()
571 if (ns) in nvme_ns_head_get_unique_id()
572 ret = nvme_ns_get_unique_id(ns, id, type); in nvme_ns_head_get_unique_id()
582 struct nvme_ns *ns; in nvme_ns_head_report_zones() local
586 ns = nvme_find_path(head); in nvme_ns_head_report_zones()
587 if (ns) in nvme_ns_head_report_zones()
588 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); in nvme_ns_head_report_zones()
775 static void nvme_mpath_set_live(struct nvme_ns *ns) in nvme_mpath_set_live() argument
777 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live()
799 nvme_mpath_add_sysfs_link(ns->head); in nvme_mpath_set_live()
802 if (nvme_path_is_optimized(ns)) { in nvme_mpath_set_live()
866 struct nvme_ns *ns) in nvme_update_ns_ana_state() argument
868 ns->ana_grpid = le32_to_cpu(desc->grpid); in nvme_update_ns_ana_state()
869 ns->ana_state = desc->state; in nvme_update_ns_ana_state()
870 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_update_ns_ana_state()
880 if (nvme_state_is_live(ns->ana_state) && in nvme_update_ns_ana_state()
881 nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE) in nvme_update_ns_ana_state()
882 nvme_mpath_set_live(ns); in nvme_update_ns_ana_state()
888 * or non-optimized) while we alloc the ns then sysfs link would in nvme_update_ns_ana_state()
899 if (test_bit(NVME_NSHEAD_DISK_LIVE, &ns->head->flags)) in nvme_update_ns_ana_state()
900 nvme_mpath_add_sysfs_link(ns->head); in nvme_update_ns_ana_state()
909 struct nvme_ns *ns; in nvme_update_ana_state() local
923 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, in nvme_update_ana_state()
928 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
930 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
931 nvme_update_ns_ana_state(desc, ns); in nvme_update_ana_state()
934 if (ns->head->ns_id > nsid) in nvme_update_ana_state()
1083 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); in ana_state_show() local
1085 return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); in ana_state_show()
1092 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); in queue_depth_show() local
1094 if (ns->head->subsys->iopolicy != NVME_IOPOLICY_QD) in queue_depth_show()
1097 return sysfs_emit(buf, "%d\n", atomic_read(&ns->ctrl->nr_active)); in queue_depth_show()
1107 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); in numa_nodes_show() local
1108 struct nvme_ns_head *head = ns->head; in numa_nodes_show()
1119 if (ns == current_ns) in numa_nodes_show()
1187 struct nvme_ns *ns; in nvme_mpath_add_sysfs_link() local
1200 * loop through each ns chained through the head->list and create the in nvme_mpath_add_sysfs_link()
1201 * sysfs link from head node to the ns path node in nvme_mpath_add_sysfs_link()
1205 list_for_each_entry_srcu(ns, &head->list, siblings, in nvme_mpath_add_sysfs_link()
1208 * Ensure that ns path disk node is already added otherwise we in nvme_mpath_add_sysfs_link()
1211 if (!test_bit(GD_ADDED, &ns->disk->state)) in nvme_mpath_add_sysfs_link()
1226 if (test_and_set_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags)) in nvme_mpath_add_sysfs_link()
1229 target = disk_to_dev(ns->disk); in nvme_mpath_add_sysfs_link()
1232 * ns path gendisk kobject @target->kobj. in nvme_mpath_add_sysfs_link()
1237 dev_err(disk_to_dev(ns->head->disk), in nvme_mpath_add_sysfs_link()
1240 clear_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags); in nvme_mpath_add_sysfs_link()
1247 void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns) in nvme_mpath_remove_sysfs_link() argument
1252 if (!test_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags)) in nvme_mpath_remove_sysfs_link()
1255 target = disk_to_dev(ns->disk); in nvme_mpath_remove_sysfs_link()
1256 kobj = &disk_to_dev(ns->head->disk)->kobj; in nvme_mpath_remove_sysfs_link()
1259 clear_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags); in nvme_mpath_remove_sysfs_link()
1262 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) in nvme_mpath_add_disk() argument
1264 if (nvme_ctrl_use_ana(ns->ctrl)) { in nvme_mpath_add_disk()
1270 mutex_lock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
1271 ns->ana_grpid = le32_to_cpu(anagrpid); in nvme_mpath_add_disk()
1272 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); in nvme_mpath_add_disk()
1273 mutex_unlock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
1276 nvme_update_ns_ana_state(&desc, ns); in nvme_mpath_add_disk()
1279 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_mpath_add_disk()
1280 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_mpath_add_disk()
1283 ns->ana_state = NVME_ANA_OPTIMIZED; in nvme_mpath_add_disk()
1284 nvme_mpath_set_live(ns); in nvme_mpath_add_disk()
1288 if (blk_queue_is_zoned(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
1289 ns->head->disk->nr_zones = ns->disk->nr_zones; in nvme_mpath_add_disk()