Lines Matching +full:segment +full:- +full:no +full:- +full:remap

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
19 #include "rnbd-clt.h"
38 return refcount_inc_not_zero(&sess->refcount); in rnbd_clt_get_sess()
47 if (refcount_dec_and_test(&sess->refcount)) in rnbd_clt_put_sess()
55 if (!refcount_dec_and_test(&dev->refcount)) in rnbd_clt_put_dev()
58 ida_free(&index_ida, dev->clt_device_id); in rnbd_clt_put_dev()
59 kfree(dev->hw_queues); in rnbd_clt_put_dev()
60 kfree(dev->pathname); in rnbd_clt_put_dev()
61 rnbd_clt_put_sess(dev->sess); in rnbd_clt_put_dev()
62 mutex_destroy(&dev->lock); in rnbd_clt_put_dev()
68 return refcount_inc_not_zero(&dev->refcount); in rnbd_clt_get_dev()
74 if (get_capacity(dev->gd) == new_nsectors) in rnbd_clt_change_capacity()
81 get_capacity(dev->gd), new_nsectors); in rnbd_clt_change_capacity()
82 set_capacity_and_notify(dev->gd, new_nsectors); in rnbd_clt_change_capacity()
91 mutex_lock(&dev->lock); in process_msg_open_rsp()
92 if (dev->dev_state == DEV_STATE_UNMAPPED) { in process_msg_open_rsp()
94 "Ignoring Open-Response message from server for unmapped device\n"); in process_msg_open_rsp()
95 err = -ENOENT; in process_msg_open_rsp()
98 if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) { in process_msg_open_rsp()
99 u64 nsectors = le64_to_cpu(rsp->nsectors); in process_msg_open_rsp()
102 gd_kobj = &disk_to_dev(dev->gd)->kobj; in process_msg_open_rsp()
106 if (!rsp->logical_block_size) { in process_msg_open_rsp()
107 err = -EINVAL; in process_msg_open_rsp()
110 dev->device_id = le32_to_cpu(rsp->device_id); in process_msg_open_rsp()
111 dev->dev_state = DEV_STATE_MAPPED; in process_msg_open_rsp()
114 mutex_unlock(&dev->lock); in process_msg_open_rsp()
123 mutex_lock(&dev->lock); in rnbd_clt_resize_disk()
124 if (dev->dev_state != DEV_STATE_MAPPED) { in rnbd_clt_resize_disk()
126 ret = -ENOENT; in rnbd_clt_resize_disk()
132 mutex_unlock(&dev->lock); in rnbd_clt_resize_disk()
139 if (WARN_ON(!q->hctx)) in rnbd_clt_dev_requeue()
143 blk_mq_run_hw_queue(q->hctx, true); in rnbd_clt_dev_requeue()
147 RNBD_DELAY_IFBUSY = -1,
151 * rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun
157 * is not empty - it is marked with a bit. This function finds first
166 bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu); in rnbd_get_cpu_qlist()
168 return per_cpu_ptr(sess->cpu_queues, bit); in rnbd_get_cpu_qlist()
171 bit = find_first_bit(sess->cpu_queues_bm, cpu); in rnbd_get_cpu_qlist()
173 return per_cpu_ptr(sess->cpu_queues, bit); in rnbd_get_cpu_qlist()
185 * rnbd_rerun_if_needed() - rerun next queue marked as stopped
208 * try to wake up someone else in round-robin manner. That of course in rnbd_rerun_if_needed()
211 cpup = get_cpu_ptr(sess->cpu_rr); in rnbd_rerun_if_needed()
213 cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) { in rnbd_rerun_if_needed()
214 if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags)) in rnbd_rerun_if_needed()
216 if (!test_bit(cpu_q->cpu, sess->cpu_queues_bm)) in rnbd_rerun_if_needed()
218 q = list_first_entry_or_null(&cpu_q->requeue_list, in rnbd_rerun_if_needed()
222 list_del_init(&q->requeue_list); in rnbd_rerun_if_needed()
223 clear_bit_unlock(0, &q->in_list); in rnbd_rerun_if_needed()
225 if (list_empty(&cpu_q->requeue_list)) { in rnbd_rerun_if_needed()
228 clear_bit(cpu_q->cpu, sess->cpu_queues_bm); in rnbd_rerun_if_needed()
231 spin_unlock_irqrestore(&cpu_q->requeue_lock, flags); in rnbd_rerun_if_needed()
238 * Saves the CPU that is going to be requeued on the per-cpu var. Just in rnbd_rerun_if_needed()
245 *cpup = cpu_q->cpu; in rnbd_rerun_if_needed()
246 put_cpu_ptr(sess->cpu_rr); in rnbd_rerun_if_needed()
255 * rnbd_rerun_all_if_idle() - rerun all queues left in the list if
256 * session is idling (there are no requests
257 * in-flight).
261 * This function tries to rerun all stopped queues if there are no
262 * requests in-flight anymore. This function tries to solve an obvious
274 * one who observes sess->busy == 0) must wake up all remaining queues.
285 } while (atomic_read(&sess->busy) == 0 && requeued); in rnbd_rerun_all_if_idle()
294 permit = rtrs_clt_get_permit(sess->rtrs, con_type, wait); in rnbd_get_permit()
301 atomic_inc(&sess->busy); in rnbd_get_permit()
309 rtrs_clt_put_permit(sess->rtrs, permit); in rnbd_put_permit()
310 atomic_dec(&sess->busy); in rnbd_put_permit()
335 iu->permit = permit; in rnbd_get_iu()
344 atomic_set(&iu->refcount, 2); in rnbd_get_iu()
345 init_waitqueue_head(&iu->comp.wait); in rnbd_get_iu()
346 iu->comp.errno = INT_MAX; in rnbd_get_iu()
348 if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) { in rnbd_get_iu()
359 if (atomic_dec_and_test(&iu->refcount)) { in rnbd_put_iu()
360 sg_free_table(&iu->sgt); in rnbd_put_iu()
361 rnbd_put_permit(sess, iu->permit); in rnbd_put_iu()
368 struct rnbd_clt_dev *dev = rq->q->disk->private_data; in rnbd_softirq_done_fn()
369 struct rnbd_clt_session *sess = dev->sess; in rnbd_softirq_done_fn()
373 sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT); in rnbd_softirq_done_fn()
374 rnbd_put_permit(sess, iu->permit); in rnbd_softirq_done_fn()
375 blk_mq_end_request(rq, errno_to_blk_status(iu->errno)); in rnbd_softirq_done_fn()
381 struct rnbd_clt_dev *dev = iu->dev; in msg_io_conf()
382 struct request *rq = iu->rq; in msg_io_conf()
385 iu->errno = errno; in msg_io_conf()
396 iu->comp.errno = errno; in wake_up_iu_comp()
397 wake_up(&iu->comp.wait); in wake_up_iu_comp()
404 iu->errno = errno; in msg_conf()
405 schedule_work(&iu->work); in msg_conf()
417 INIT_WORK(&iu->work, conf); in send_usr_msg()
422 err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit, in send_usr_msg()
425 wait_event(iu->comp.wait, iu->comp.errno != INT_MAX); in send_usr_msg()
426 *errno = iu->comp.errno; in send_usr_msg()
437 struct rnbd_clt_dev *dev = iu->dev; in msg_close_conf()
439 wake_up_iu_comp(iu, iu->errno); in msg_close_conf()
440 rnbd_put_iu(dev->sess, iu); in msg_close_conf()
447 struct rnbd_clt_session *sess = dev->sess; in send_msg_close()
458 return -ENOMEM; in send_msg_close()
460 iu->buf = NULL; in send_msg_close()
461 iu->dev = dev; in send_msg_close()
467 err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0, in send_msg_close()
483 struct rnbd_msg_open_rsp *rsp = iu->buf; in msg_open_conf()
484 struct rnbd_clt_dev *dev = iu->dev; in msg_open_conf()
485 int errno = iu->errno; in msg_open_conf()
489 if (dev->dev_state == DEV_STATE_INIT) in msg_open_conf()
499 u32 device_id = le32_to_cpu(rsp->device_id); in msg_open_conf()
511 rnbd_put_iu(dev->sess, iu); in msg_open_conf()
518 struct rnbd_msg_sess_info_rsp *rsp = iu->buf; in msg_sess_info_conf()
519 struct rnbd_clt_session *sess = iu->sess; in msg_sess_info_conf()
521 if (!iu->errno) in msg_sess_info_conf()
522 sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR); in msg_sess_info_conf()
525 wake_up_iu_comp(iu, iu->errno); in msg_sess_info_conf()
532 struct rnbd_clt_session *sess = dev->sess; in send_msg_open()
544 return -ENOMEM; in send_msg_open()
549 return -ENOMEM; in send_msg_open()
552 iu->buf = rsp; in send_msg_open()
553 iu->dev = dev; in send_msg_open()
555 sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); in send_msg_open()
558 msg.access_mode = dev->access_mode; in send_msg_open()
559 strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name)); in send_msg_open()
562 err = send_usr_msg(sess->rtrs, READ, iu, in send_msg_open()
563 &vec, sizeof(*rsp), iu->sgt.sgl, 1, in send_msg_open()
590 return -ENOMEM; in send_msg_sess_info()
595 return -ENOMEM; in send_msg_sess_info()
598 iu->buf = rsp; in send_msg_sess_info()
599 iu->sess = sess; in send_msg_sess_info()
600 sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); in send_msg_sess_info()
612 err = -ENODEV; in send_msg_sess_info()
615 err = send_usr_msg(sess->rtrs, READ, iu, in send_msg_sess_info()
616 &vec, sizeof(*rsp), iu->sgt.sgl, 1, in send_msg_sess_info()
635 mutex_lock(&sess->lock); in set_dev_states_to_disconnected()
636 list_for_each_entry(dev, &sess->devs_list, list) { in set_dev_states_to_disconnected()
639 mutex_lock(&dev->lock); in set_dev_states_to_disconnected()
640 if (dev->dev_state == DEV_STATE_MAPPED) { in set_dev_states_to_disconnected()
641 dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED; in set_dev_states_to_disconnected()
642 gd_kobj = &disk_to_dev(dev->gd)->kobj; in set_dev_states_to_disconnected()
645 mutex_unlock(&dev->lock); in set_dev_states_to_disconnected()
647 mutex_unlock(&sess->lock); in set_dev_states_to_disconnected()
669 pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err); in remap_devs()
673 err = rtrs_clt_query(sess->rtrs, &attrs); in remap_devs()
675 pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err); in remap_devs()
678 mutex_lock(&sess->lock); in remap_devs()
679 sess->max_io_size = attrs.max_io_size; in remap_devs()
681 list_for_each_entry(dev, &sess->devs_list, list) { in remap_devs()
684 mutex_lock(&dev->lock); in remap_devs()
685 skip = (dev->dev_state == DEV_STATE_INIT); in remap_devs()
686 mutex_unlock(&dev->lock); in remap_devs()
690 * time - do not remap, it will be closed soon. in remap_devs()
701 mutex_unlock(&sess->lock); in remap_devs()
717 ev, sess->sessname); in rnbd_clt_link_ev()
729 cpu_q->cpu = cpu; in rnbd_init_cpu_qlists()
730 INIT_LIST_HEAD(&cpu_q->requeue_list); in rnbd_init_cpu_qlists()
731 spin_lock_init(&cpu_q->requeue_lock); in rnbd_init_cpu_qlists()
737 if (sess->tag_set.tags) in destroy_mq_tags()
738 blk_mq_free_tag_set(&sess->tag_set); in destroy_mq_tags()
743 sess->rtrs_ready = true; in wake_up_rtrs_waiters()
744 wake_up_all(&sess->rtrs_waitq); in wake_up_rtrs_waiters()
751 if (!IS_ERR_OR_NULL(sess->rtrs)) { in close_rtrs()
752 rtrs_clt_close(sess->rtrs); in close_rtrs()
753 sess->rtrs = NULL; in close_rtrs()
760 WARN_ON(!list_empty(&sess->devs_list)); in free_sess()
766 if (!list_empty(&sess->list)) { in free_sess()
768 list_del(&sess->list); in free_sess()
771 free_percpu(sess->cpu_queues); in free_sess()
772 free_percpu(sess->cpu_rr); in free_sess()
773 mutex_destroy(&sess->lock); in free_sess()
784 return ERR_PTR(-ENOMEM); in alloc_sess()
785 strscpy(sess->sessname, sessname, sizeof(sess->sessname)); in alloc_sess()
786 atomic_set(&sess->busy, 0); in alloc_sess()
787 mutex_init(&sess->lock); in alloc_sess()
788 INIT_LIST_HEAD(&sess->devs_list); in alloc_sess()
789 INIT_LIST_HEAD(&sess->list); in alloc_sess()
790 bitmap_zero(sess->cpu_queues_bm, num_possible_cpus()); in alloc_sess()
791 init_waitqueue_head(&sess->rtrs_waitq); in alloc_sess()
792 refcount_set(&sess->refcount, 1); in alloc_sess()
794 sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist); in alloc_sess()
795 if (!sess->cpu_queues) { in alloc_sess()
796 err = -ENOMEM; in alloc_sess()
799 rnbd_init_cpu_qlists(sess->cpu_queues); in alloc_sess()
804 * to wake up queues in a round-robin manner. in alloc_sess()
806 sess->cpu_rr = alloc_percpu(int); in alloc_sess()
807 if (!sess->cpu_rr) { in alloc_sess()
808 err = -ENOMEM; in alloc_sess()
812 * per_cpu_ptr(sess->cpu_rr, cpu) = cpu; in alloc_sess()
824 wait_event(sess->rtrs_waitq, sess->rtrs_ready); in wait_for_rtrs_connection()
825 if (IS_ERR_OR_NULL(sess->rtrs)) in wait_for_rtrs_connection()
826 return -ECONNRESET; in wait_for_rtrs_connection()
837 prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE); in wait_for_rtrs_disconnection()
838 if (IS_ERR_OR_NULL(sess->rtrs)) { in wait_for_rtrs_disconnection()
839 finish_wait(&sess->rtrs_waitq, &wait); in wait_for_rtrs_disconnection()
862 if (strcmp(sessname, sess->sessname)) in __find_and_get_sess()
865 if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs)) in __find_and_get_sess()
867 * No RTRS connection, session is dying. in __find_and_get_sess()
916 list_add(&sess->list, &sess_list); in find_or_create_sess()
926 struct rnbd_clt_dev *dev = disk->private_data; in rnbd_client_open()
928 if (get_disk_ro(dev->gd) && (mode & BLK_OPEN_WRITE)) in rnbd_client_open()
929 return -EPERM; in rnbd_client_open()
931 if (dev->dev_state == DEV_STATE_UNMAPPED || in rnbd_client_open()
933 return -EIO; in rnbd_client_open()
940 struct rnbd_clt_dev *dev = gen->private_data; in rnbd_client_release()
949 struct rnbd_clt_dev *dev = block_device->bd_disk->private_data; in rnbd_client_getgeo()
950 struct queue_limits *limit = &dev->queue->limits; in rnbd_client_getgeo()
952 size = dev->size * (limit->logical_block_size / SECTOR_SIZE); in rnbd_client_getgeo()
953 geo->cylinders = size >> 6; /* size/64 */ in rnbd_client_getgeo()
954 geo->heads = 4; in rnbd_client_getgeo()
955 geo->sectors = 16; in rnbd_client_getgeo()
956 geo->start = 0; in rnbd_client_getgeo()
975 * of the scather-gather list entries.
984 tsize += sg->length; in rnbd_clt_get_sg_size()
992 struct rtrs_clt_sess *rtrs = dev->sess->rtrs; in rnbd_client_xfer_request()
993 struct rtrs_permit *permit = iu->permit; in rnbd_client_xfer_request()
1001 iu->rq = rq; in rnbd_client_xfer_request()
1002 iu->dev = dev; in rnbd_client_xfer_request()
1009 * We only support discards/WRITE_ZEROES with single segment for now. in rnbd_client_xfer_request()
1013 sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl); in rnbd_client_xfer_request()
1016 sg_mark_end(&iu->sgt.sgl[0]); in rnbd_client_xfer_request()
1019 msg.device_id = cpu_to_le32(dev->device_id); in rnbd_client_xfer_request()
1025 size = rnbd_clt_get_sg_size(iu->sgt.sgl, sg_cnt); in rnbd_client_xfer_request()
1031 &vec, 1, size, iu->sgt.sgl, sg_cnt); in rnbd_client_xfer_request()
1042 * rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy
1048 * are freed. If session is not doing anything - device is not added to
1054 struct rnbd_clt_session *sess = dev->sess; in rnbd_clt_dev_add_to_requeue()
1060 cpu_q = get_cpu_ptr(sess->cpu_queues); in rnbd_clt_dev_add_to_requeue()
1061 spin_lock_irqsave(&cpu_q->requeue_lock, flags); in rnbd_clt_dev_add_to_requeue()
1063 if (!test_and_set_bit_lock(0, &q->in_list)) { in rnbd_clt_dev_add_to_requeue()
1064 if (WARN_ON(!list_empty(&q->requeue_list))) in rnbd_clt_dev_add_to_requeue()
1067 need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm); in rnbd_clt_dev_add_to_requeue()
1069 set_bit(cpu_q->cpu, sess->cpu_queues_bm); in rnbd_clt_dev_add_to_requeue()
1075 if (atomic_read(&sess->busy)) { in rnbd_clt_dev_add_to_requeue()
1076 list_add_tail(&q->requeue_list, &cpu_q->requeue_list); in rnbd_clt_dev_add_to_requeue()
1083 clear_bit(cpu_q->cpu, sess->cpu_queues_bm); in rnbd_clt_dev_add_to_requeue()
1084 clear_bit_unlock(0, &q->in_list); in rnbd_clt_dev_add_to_requeue()
1089 spin_unlock_irqrestore(&cpu_q->requeue_lock, flags); in rnbd_clt_dev_add_to_requeue()
1090 put_cpu_ptr(sess->cpu_queues); in rnbd_clt_dev_add_to_requeue()
1099 struct rnbd_queue *q = hctx->driver_data; in rnbd_clt_dev_kick_mq_queue()
1114 struct request *rq = bd->rq; in rnbd_queue_rq()
1115 struct rnbd_clt_dev *dev = rq->q->disk->private_data; in rnbd_queue_rq()
1120 if (dev->dev_state != DEV_STATE_MAPPED) in rnbd_queue_rq()
1123 iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON, in rnbd_queue_rq()
1125 if (!iu->permit) { in rnbd_queue_rq()
1130 iu->sgt.sgl = iu->first_sgl; in rnbd_queue_rq()
1131 err = sg_alloc_table_chained(&iu->sgt, in rnbd_queue_rq()
1132 /* Even-if the request has no segment, in rnbd_queue_rq()
1136 iu->sgt.sgl, in rnbd_queue_rq()
1141 rnbd_put_permit(dev->sess, iu->permit); in rnbd_queue_rq()
1149 if (err == -EAGAIN || err == -ENOMEM) { in rnbd_queue_rq()
1153 sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT); in rnbd_queue_rq()
1154 rnbd_put_permit(dev->sess, iu->permit); in rnbd_queue_rq()
1160 struct rnbd_queue *q = hctx->driver_data; in rnbd_rdma_poll()
1161 struct rnbd_clt_dev *dev = q->dev; in rnbd_rdma_poll()
1163 return rtrs_clt_rdma_cq_direct(dev->sess->rtrs, hctx->queue_num); in rnbd_rdma_poll()
1168 struct rnbd_clt_session *sess = set->driver_data; in rnbd_rdma_map_queues()
1171 set->map[HCTX_TYPE_DEFAULT].nr_queues = num_online_cpus(); in rnbd_rdma_map_queues()
1172 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in rnbd_rdma_map_queues()
1173 set->map[HCTX_TYPE_READ].nr_queues = num_online_cpus(); in rnbd_rdma_map_queues()
1174 set->map[HCTX_TYPE_READ].queue_offset = 0; in rnbd_rdma_map_queues()
1175 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); in rnbd_rdma_map_queues()
1176 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); in rnbd_rdma_map_queues()
1178 if (sess->nr_poll_queues) { in rnbd_rdma_map_queues()
1180 set->map[HCTX_TYPE_POLL].nr_queues = sess->nr_poll_queues; in rnbd_rdma_map_queues()
1181 set->map[HCTX_TYPE_POLL].queue_offset = set->map[HCTX_TYPE_READ].queue_offset + in rnbd_rdma_map_queues()
1182 set->map[HCTX_TYPE_READ].nr_queues; in rnbd_rdma_map_queues()
1183 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); in rnbd_rdma_map_queues()
1185 sess->sessname, in rnbd_rdma_map_queues()
1186 set->map[HCTX_TYPE_DEFAULT].nr_queues, in rnbd_rdma_map_queues()
1187 set->map[HCTX_TYPE_READ].nr_queues, in rnbd_rdma_map_queues()
1188 set->map[HCTX_TYPE_POLL].nr_queues); in rnbd_rdma_map_queues()
1191 sess->sessname, in rnbd_rdma_map_queues()
1192 set->map[HCTX_TYPE_DEFAULT].nr_queues, in rnbd_rdma_map_queues()
1193 set->map[HCTX_TYPE_READ].nr_queues); in rnbd_rdma_map_queues()
1206 struct blk_mq_tag_set *tag_set = &sess->tag_set; in setup_mq_tags()
1209 tag_set->ops = &rnbd_mq_ops; in setup_mq_tags()
1210 tag_set->queue_depth = sess->queue_depth; in setup_mq_tags()
1211 tag_set->numa_node = NUMA_NO_NODE; in setup_mq_tags()
1212 tag_set->flags = BLK_MQ_F_SHOULD_MERGE | in setup_mq_tags()
1214 tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE; in setup_mq_tags()
1217 tag_set->nr_maps = sess->nr_poll_queues ? HCTX_MAX_TYPES : 2; in setup_mq_tags()
1222 tag_set->nr_hw_queues = num_online_cpus() + sess->nr_poll_queues; in setup_mq_tags()
1223 tag_set->driver_data = sess; in setup_mq_tags()
1240 if (sess == ERR_PTR(-ENOMEM)) { in find_and_get_or_create_sess()
1241 return ERR_PTR(-ENOMEM); in find_and_get_or_create_sess()
1242 } else if ((nr_poll_queues && !first) || (!nr_poll_queues && sess->nr_poll_queues)) { in find_and_get_or_create_sess()
1244 * A device MUST have its own session to use the polling-mode. in find_and_get_or_create_sess()
1247 err = -EINVAL; in find_and_get_or_create_sess()
1256 err = -ENXIO; in find_and_get_or_create_sess()
1267 sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname, in find_and_get_or_create_sess()
1272 if (IS_ERR(sess->rtrs)) { in find_and_get_or_create_sess()
1273 err = PTR_ERR(sess->rtrs); in find_and_get_or_create_sess()
1277 err = rtrs_clt_query(sess->rtrs, &attrs); in find_and_get_or_create_sess()
1281 sess->max_io_size = attrs.max_io_size; in find_and_get_or_create_sess()
1282 sess->queue_depth = attrs.queue_depth; in find_and_get_or_create_sess()
1283 sess->nr_poll_queues = nr_poll_queues; in find_and_get_or_create_sess()
1284 sess->max_segments = attrs.max_segments; in find_and_get_or_create_sess()
1314 INIT_LIST_HEAD(&q->requeue_list); in rnbd_init_hw_queue()
1315 q->dev = dev; in rnbd_init_hw_queue()
1316 q->hctx = hctx; in rnbd_init_hw_queue()
1325 queue_for_each_hw_ctx(dev->queue, hctx, i) { in rnbd_init_mq_hw_queues()
1326 q = &dev->hw_queues[i]; in rnbd_init_mq_hw_queues()
1328 hctx->driver_data = q; in rnbd_init_mq_hw_queues()
1337 dev->gd->major = rnbd_client_major; in rnbd_clt_setup_gen_disk()
1338 dev->gd->first_minor = idx << RNBD_PART_BITS; in rnbd_clt_setup_gen_disk()
1339 dev->gd->minors = 1 << RNBD_PART_BITS; in rnbd_clt_setup_gen_disk()
1340 dev->gd->fops = &rnbd_client_ops; in rnbd_clt_setup_gen_disk()
1341 dev->gd->queue = dev->queue; in rnbd_clt_setup_gen_disk()
1342 dev->gd->private_data = dev; in rnbd_clt_setup_gen_disk()
1343 snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d", in rnbd_clt_setup_gen_disk()
1346 dev->gd->disk_name, in rnbd_clt_setup_gen_disk()
1347 le64_to_cpu(rsp->nsectors) * in rnbd_clt_setup_gen_disk()
1348 (le16_to_cpu(rsp->logical_block_size) / SECTOR_SIZE)); in rnbd_clt_setup_gen_disk()
1350 set_capacity(dev->gd, le64_to_cpu(rsp->nsectors)); in rnbd_clt_setup_gen_disk()
1352 if (dev->access_mode == RNBD_ACCESS_RO) in rnbd_clt_setup_gen_disk()
1353 set_disk_ro(dev->gd, true); in rnbd_clt_setup_gen_disk()
1355 err = add_disk(dev->gd); in rnbd_clt_setup_gen_disk()
1357 put_disk(dev->gd); in rnbd_clt_setup_gen_disk()
1366 .logical_block_size = le16_to_cpu(rsp->logical_block_size), in rnbd_client_setup_device()
1367 .physical_block_size = le16_to_cpu(rsp->physical_block_size), in rnbd_client_setup_device()
1368 .io_opt = dev->sess->max_io_size, in rnbd_client_setup_device()
1369 .max_hw_sectors = dev->sess->max_io_size / SECTOR_SIZE, in rnbd_client_setup_device()
1370 .max_hw_discard_sectors = le32_to_cpu(rsp->max_discard_sectors), in rnbd_client_setup_device()
1371 .discard_granularity = le32_to_cpu(rsp->discard_granularity), in rnbd_client_setup_device()
1372 .discard_alignment = le32_to_cpu(rsp->discard_alignment), in rnbd_client_setup_device()
1373 .max_segments = dev->sess->max_segments, in rnbd_client_setup_device()
1374 .virt_boundary_mask = SZ_4K - 1, in rnbd_client_setup_device()
1376 le32_to_cpu(rsp->max_write_zeroes_sectors), in rnbd_client_setup_device()
1378 int idx = dev->clt_device_id; in rnbd_client_setup_device()
1380 dev->size = le64_to_cpu(rsp->nsectors) * in rnbd_client_setup_device()
1381 le16_to_cpu(rsp->logical_block_size); in rnbd_client_setup_device()
1383 if (rsp->secure_discard) { in rnbd_client_setup_device()
1385 le32_to_cpu(rsp->max_discard_sectors); in rnbd_client_setup_device()
1388 if (rsp->cache_policy & RNBD_WRITEBACK) { in rnbd_client_setup_device()
1390 if (rsp->cache_policy & RNBD_FUA) in rnbd_client_setup_device()
1394 dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev); in rnbd_client_setup_device()
1395 if (IS_ERR(dev->gd)) in rnbd_client_setup_device()
1396 return PTR_ERR(dev->gd); in rnbd_client_setup_device()
1397 dev->queue = dev->gd->queue; in rnbd_client_setup_device()
1413 return ERR_PTR(-ENOMEM); in init_dev()
1419 dev->hw_queues = kcalloc(nr_cpu_ids + nr_poll_queues, in init_dev()
1420 sizeof(*dev->hw_queues), in init_dev()
1422 if (!dev->hw_queues) { in init_dev()
1423 ret = -ENOMEM; in init_dev()
1427 ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1, in init_dev()
1431 pathname, sess->sessname, ret); in init_dev()
1435 dev->pathname = kstrdup(pathname, GFP_KERNEL); in init_dev()
1436 if (!dev->pathname) { in init_dev()
1437 ret = -ENOMEM; in init_dev()
1441 dev->clt_device_id = ret; in init_dev()
1442 dev->sess = sess; in init_dev()
1443 dev->access_mode = access_mode; in init_dev()
1444 dev->nr_poll_queues = nr_poll_queues; in init_dev()
1445 mutex_init(&dev->lock); in init_dev()
1446 refcount_set(&dev->refcount, 1); in init_dev()
1447 dev->dev_state = DEV_STATE_INIT; in init_dev()
1450 * Here we called from sysfs entry, thus clt-sysfs is in init_dev()
1458 kfree(dev->hw_queues); in init_dev()
1471 if (sessname && strncmp(sess->sessname, sessname, in __exists_dev()
1472 sizeof(sess->sessname))) in __exists_dev()
1474 mutex_lock(&sess->lock); in __exists_dev()
1475 list_for_each_entry(dev, &sess->devs_list, list) { in __exists_dev()
1476 if (strlen(dev->pathname) == strlen(pathname) && in __exists_dev()
1477 !strcmp(dev->pathname, pathname)) { in __exists_dev()
1482 mutex_unlock(&sess->lock); in __exists_dev()
1504 struct rnbd_clt_session *sess = dev->sess; in insert_dev_if_not_exists_devpath()
1507 found = __exists_dev(dev->pathname, sess->sessname); in insert_dev_if_not_exists_devpath()
1509 mutex_lock(&sess->lock); in insert_dev_if_not_exists_devpath()
1510 list_add_tail(&dev->list, &sess->devs_list); in insert_dev_if_not_exists_devpath()
1511 mutex_unlock(&sess->lock); in insert_dev_if_not_exists_devpath()
1520 struct rnbd_clt_session *sess = dev->sess; in delete_dev()
1522 mutex_lock(&sess->lock); in delete_dev()
1523 list_del(&dev->list); in delete_dev()
1524 mutex_unlock(&sess->lock); in delete_dev()
1546 return ERR_PTR(-EEXIST); in rnbd_clt_map_device()
1555 pathname, sess->sessname, dev); in rnbd_clt_map_device()
1560 ret = -EEXIST; in rnbd_clt_map_device()
1566 ret = -ENOMEM; in rnbd_clt_map_device()
1572 ret = -ENOMEM; in rnbd_clt_map_device()
1576 iu->buf = rsp; in rnbd_clt_map_device()
1577 iu->dev = dev; in rnbd_clt_map_device()
1578 sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); in rnbd_clt_map_device()
1581 msg.access_mode = dev->access_mode; in rnbd_clt_map_device()
1582 strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name)); in rnbd_clt_map_device()
1585 ret = send_usr_msg(sess->rtrs, READ, iu, in rnbd_clt_map_device()
1586 &vec, sizeof(*rsp), iu->sgt.sgl, 1, in rnbd_clt_map_device()
1600 mutex_lock(&dev->lock); in rnbd_clt_map_device()
1602 sess->sessname, pathname); in rnbd_clt_map_device()
1608 mutex_unlock(&dev->lock); in rnbd_clt_map_device()
1614 dev->gd->disk_name, le64_to_cpu(rsp->nsectors), in rnbd_clt_map_device()
1615 le16_to_cpu(rsp->logical_block_size), in rnbd_clt_map_device()
1616 le16_to_cpu(rsp->physical_block_size), in rnbd_clt_map_device()
1617 le32_to_cpu(rsp->max_write_zeroes_sectors), in rnbd_clt_map_device()
1618 le32_to_cpu(rsp->max_discard_sectors), in rnbd_clt_map_device()
1619 le32_to_cpu(rsp->discard_granularity), in rnbd_clt_map_device()
1620 le32_to_cpu(rsp->discard_alignment), in rnbd_clt_map_device()
1621 le16_to_cpu(rsp->secure_discard), in rnbd_clt_map_device()
1622 sess->max_segments, sess->max_io_size / SECTOR_SIZE, in rnbd_clt_map_device()
1623 !!(rsp->cache_policy & RNBD_WRITEBACK), in rnbd_clt_map_device()
1624 !!(rsp->cache_policy & RNBD_FUA)); in rnbd_clt_map_device()
1626 mutex_unlock(&dev->lock); in rnbd_clt_map_device()
1634 send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT); in rnbd_clt_map_device()
1650 del_gendisk(dev->gd); in destroy_gen_disk()
1651 put_disk(dev->gd); in destroy_gen_disk()
1658 if (dev->kobj.state_initialized) { in destroy_sysfs()
1661 sysfs_remove_file_self(&dev->kobj, sysfs_self); in destroy_sysfs()
1662 kobject_del(&dev->kobj); in destroy_sysfs()
1663 kobject_put(&dev->kobj); in destroy_sysfs()
1670 struct rnbd_clt_session *sess = dev->sess; in rnbd_clt_unmap_device()
1674 mutex_lock(&dev->lock); in rnbd_clt_unmap_device()
1675 if (dev->dev_state == DEV_STATE_UNMAPPED) { in rnbd_clt_unmap_device()
1677 ret = -EALREADY; in rnbd_clt_unmap_device()
1680 refcount = refcount_read(&dev->refcount); in rnbd_clt_unmap_device()
1684 refcount - 1); in rnbd_clt_unmap_device()
1685 ret = -EBUSY; in rnbd_clt_unmap_device()
1688 was_mapped = (dev->dev_state == DEV_STATE_MAPPED); in rnbd_clt_unmap_device()
1689 dev->dev_state = DEV_STATE_UNMAPPED; in rnbd_clt_unmap_device()
1690 mutex_unlock(&dev->lock); in rnbd_clt_unmap_device()
1695 if (was_mapped && sess->rtrs) in rnbd_clt_unmap_device()
1696 send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT); in rnbd_clt_unmap_device()
1709 mutex_unlock(&dev->lock); in rnbd_clt_unmap_device()
1718 mutex_lock(&dev->lock); in rnbd_clt_remap_device()
1719 if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) in rnbd_clt_remap_device()
1721 else if (dev->dev_state == DEV_STATE_UNMAPPED) in rnbd_clt_remap_device()
1722 err = -ENODEV; in rnbd_clt_remap_device()
1723 else if (dev->dev_state == DEV_STATE_MAPPED) in rnbd_clt_remap_device()
1724 err = -EALREADY; in rnbd_clt_remap_device()
1726 err = -EBUSY; in rnbd_clt_remap_device()
1727 mutex_unlock(&dev->lock); in rnbd_clt_remap_device()
1755 * Here at this point there is no any concurrent access to sessions in rnbd_destroy_sessions()
1757 * 1. New session or device can't be created - session sysfs files in rnbd_destroy_sessions()
1759 * 2. Device or session can't be removed - module reference is taken in rnbd_destroy_sessions()
1761 * 3. No IO requests inflight - each file open of block_dev increases in rnbd_destroy_sessions()
1773 list_for_each_entry_safe(dev, tn, &sess->devs_list, list) { in rnbd_destroy_sessions()
1780 INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work); in rnbd_destroy_sessions()
1781 queue_work(rnbd_clt_wq, &dev->unmap_on_rmmod_work); in rnbd_destroy_sessions()
1803 return -EBUSY; in rnbd_client_init()
1818 err = -ENOMEM; in rnbd_client_init()