1*f7a7a5c2SJack Wang // SPDX-License-Identifier: GPL-2.0-or-later 2*f7a7a5c2SJack Wang /* 3*f7a7a5c2SJack Wang * RDMA Network Block Driver 4*f7a7a5c2SJack Wang * 5*f7a7a5c2SJack Wang * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. 6*f7a7a5c2SJack Wang * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. 7*f7a7a5c2SJack Wang * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. 8*f7a7a5c2SJack Wang */ 9*f7a7a5c2SJack Wang 10*f7a7a5c2SJack Wang #undef pr_fmt 11*f7a7a5c2SJack Wang #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt 12*f7a7a5c2SJack Wang 13*f7a7a5c2SJack Wang #include <linux/module.h> 14*f7a7a5c2SJack Wang #include <linux/blkdev.h> 15*f7a7a5c2SJack Wang #include <linux/hdreg.h> 16*f7a7a5c2SJack Wang #include <linux/scatterlist.h> 17*f7a7a5c2SJack Wang #include <linux/idr.h> 18*f7a7a5c2SJack Wang 19*f7a7a5c2SJack Wang #include "rnbd-clt.h" 20*f7a7a5c2SJack Wang 21*f7a7a5c2SJack Wang MODULE_DESCRIPTION("RDMA Network Block Device Client"); 22*f7a7a5c2SJack Wang MODULE_LICENSE("GPL"); 23*f7a7a5c2SJack Wang 24*f7a7a5c2SJack Wang static int rnbd_client_major; 25*f7a7a5c2SJack Wang static DEFINE_IDA(index_ida); 26*f7a7a5c2SJack Wang static DEFINE_MUTEX(ida_lock); 27*f7a7a5c2SJack Wang static DEFINE_MUTEX(sess_lock); 28*f7a7a5c2SJack Wang static LIST_HEAD(sess_list); 29*f7a7a5c2SJack Wang 30*f7a7a5c2SJack Wang /* 31*f7a7a5c2SJack Wang * Maximum number of partitions an instance can have. 32*f7a7a5c2SJack Wang * 6 bits = 64 minors = 63 partitions (one minor is used for the device itself) 33*f7a7a5c2SJack Wang */ 34*f7a7a5c2SJack Wang #define RNBD_PART_BITS 6 35*f7a7a5c2SJack Wang 36*f7a7a5c2SJack Wang static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess) 37*f7a7a5c2SJack Wang { 38*f7a7a5c2SJack Wang return refcount_inc_not_zero(&sess->refcount); 39*f7a7a5c2SJack Wang } 40*f7a7a5c2SJack Wang 41*f7a7a5c2SJack Wang static void free_sess(struct rnbd_clt_session *sess); 42*f7a7a5c2SJack Wang 43*f7a7a5c2SJack Wang static void rnbd_clt_put_sess(struct rnbd_clt_session *sess) 44*f7a7a5c2SJack Wang { 45*f7a7a5c2SJack Wang might_sleep(); 46*f7a7a5c2SJack Wang 47*f7a7a5c2SJack Wang if (refcount_dec_and_test(&sess->refcount)) 48*f7a7a5c2SJack Wang free_sess(sess); 49*f7a7a5c2SJack Wang } 50*f7a7a5c2SJack Wang 51*f7a7a5c2SJack Wang static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev) 52*f7a7a5c2SJack Wang { 53*f7a7a5c2SJack Wang might_sleep(); 54*f7a7a5c2SJack Wang 55*f7a7a5c2SJack Wang if (!refcount_dec_and_test(&dev->refcount)) 56*f7a7a5c2SJack Wang return; 57*f7a7a5c2SJack Wang 58*f7a7a5c2SJack Wang mutex_lock(&ida_lock); 59*f7a7a5c2SJack Wang ida_simple_remove(&index_ida, dev->clt_device_id); 60*f7a7a5c2SJack Wang mutex_unlock(&ida_lock); 61*f7a7a5c2SJack Wang kfree(dev->hw_queues); 62*f7a7a5c2SJack Wang rnbd_clt_put_sess(dev->sess); 63*f7a7a5c2SJack Wang mutex_destroy(&dev->lock); 64*f7a7a5c2SJack Wang kfree(dev); 65*f7a7a5c2SJack Wang } 66*f7a7a5c2SJack Wang 67*f7a7a5c2SJack Wang static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev) 68*f7a7a5c2SJack Wang { 69*f7a7a5c2SJack Wang return refcount_inc_not_zero(&dev->refcount); 70*f7a7a5c2SJack Wang } 71*f7a7a5c2SJack Wang 72*f7a7a5c2SJack Wang static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev, 73*f7a7a5c2SJack Wang const struct rnbd_msg_open_rsp *rsp) 74*f7a7a5c2SJack Wang { 75*f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 76*f7a7a5c2SJack Wang 77*f7a7a5c2SJack Wang if (!rsp->logical_block_size) 78*f7a7a5c2SJack Wang return -EINVAL; 79*f7a7a5c2SJack Wang 80*f7a7a5c2SJack Wang dev->device_id = le32_to_cpu(rsp->device_id); 81*f7a7a5c2SJack Wang dev->nsectors = le64_to_cpu(rsp->nsectors); 82*f7a7a5c2SJack Wang dev->logical_block_size = le16_to_cpu(rsp->logical_block_size); 83*f7a7a5c2SJack Wang dev->physical_block_size = le16_to_cpu(rsp->physical_block_size); 84*f7a7a5c2SJack Wang dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors); 85*f7a7a5c2SJack Wang dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors); 86*f7a7a5c2SJack Wang dev->discard_granularity = le32_to_cpu(rsp->discard_granularity); 87*f7a7a5c2SJack Wang dev->discard_alignment = le32_to_cpu(rsp->discard_alignment); 88*f7a7a5c2SJack Wang dev->secure_discard = le16_to_cpu(rsp->secure_discard); 89*f7a7a5c2SJack Wang dev->rotational = rsp->rotational; 90*f7a7a5c2SJack Wang 91*f7a7a5c2SJack Wang dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE; 92*f7a7a5c2SJack Wang dev->max_segments = BMAX_SEGMENTS; 93*f7a7a5c2SJack Wang 94*f7a7a5c2SJack Wang dev->max_hw_sectors = min_t(u32, dev->max_hw_sectors, 95*f7a7a5c2SJack Wang le32_to_cpu(rsp->max_hw_sectors)); 96*f7a7a5c2SJack Wang dev->max_segments = min_t(u16, dev->max_segments, 97*f7a7a5c2SJack Wang le16_to_cpu(rsp->max_segments)); 98*f7a7a5c2SJack Wang 99*f7a7a5c2SJack Wang return 0; 100*f7a7a5c2SJack Wang } 101*f7a7a5c2SJack Wang 102*f7a7a5c2SJack Wang static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev, 103*f7a7a5c2SJack Wang size_t new_nsectors) 104*f7a7a5c2SJack Wang { 105*f7a7a5c2SJack Wang int err = 0; 106*f7a7a5c2SJack Wang 107*f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n", 108*f7a7a5c2SJack Wang dev->nsectors, new_nsectors); 109*f7a7a5c2SJack Wang dev->nsectors = new_nsectors; 110*f7a7a5c2SJack Wang set_capacity(dev->gd, dev->nsectors); 111*f7a7a5c2SJack Wang err = revalidate_disk(dev->gd); 112*f7a7a5c2SJack Wang if (err) 113*f7a7a5c2SJack Wang rnbd_clt_err(dev, 114*f7a7a5c2SJack Wang "Failed to change device size from %zu to %zu, err: %d\n", 115*f7a7a5c2SJack Wang dev->nsectors, new_nsectors, err); 116*f7a7a5c2SJack Wang return err; 117*f7a7a5c2SJack Wang } 118*f7a7a5c2SJack Wang 119*f7a7a5c2SJack Wang static int process_msg_open_rsp(struct rnbd_clt_dev *dev, 120*f7a7a5c2SJack Wang struct rnbd_msg_open_rsp *rsp) 121*f7a7a5c2SJack Wang { 122*f7a7a5c2SJack Wang int err = 0; 123*f7a7a5c2SJack Wang 124*f7a7a5c2SJack Wang mutex_lock(&dev->lock); 125*f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_UNMAPPED) { 126*f7a7a5c2SJack Wang rnbd_clt_info(dev, 127*f7a7a5c2SJack Wang "Ignoring Open-Response message from server for unmapped device\n"); 128*f7a7a5c2SJack Wang err = -ENOENT; 129*f7a7a5c2SJack Wang goto out; 130*f7a7a5c2SJack Wang } 131*f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) { 132*f7a7a5c2SJack Wang u64 nsectors = le64_to_cpu(rsp->nsectors); 133*f7a7a5c2SJack Wang 134*f7a7a5c2SJack Wang /* 135*f7a7a5c2SJack Wang * If the device was remapped and the size changed in the 136*f7a7a5c2SJack Wang * meantime we need to revalidate it 137*f7a7a5c2SJack Wang */ 138*f7a7a5c2SJack Wang if (dev->nsectors != nsectors) 139*f7a7a5c2SJack Wang rnbd_clt_change_capacity(dev, nsectors); 140*f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device online, device remapped successfully\n"); 141*f7a7a5c2SJack Wang } 142*f7a7a5c2SJack Wang err = rnbd_clt_set_dev_attr(dev, rsp); 143*f7a7a5c2SJack Wang if (err) 144*f7a7a5c2SJack Wang goto out; 145*f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_MAPPED; 146*f7a7a5c2SJack Wang 147*f7a7a5c2SJack Wang out: 148*f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 149*f7a7a5c2SJack Wang 150*f7a7a5c2SJack Wang return err; 151*f7a7a5c2SJack Wang } 152*f7a7a5c2SJack Wang 153*f7a7a5c2SJack Wang int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize) 154*f7a7a5c2SJack Wang { 155*f7a7a5c2SJack Wang int ret = 0; 156*f7a7a5c2SJack Wang 157*f7a7a5c2SJack Wang mutex_lock(&dev->lock); 158*f7a7a5c2SJack Wang if (dev->dev_state != DEV_STATE_MAPPED) { 159*f7a7a5c2SJack Wang pr_err("Failed to set new size of the device, device is not opened\n"); 160*f7a7a5c2SJack Wang ret = -ENOENT; 161*f7a7a5c2SJack Wang goto out; 162*f7a7a5c2SJack Wang } 163*f7a7a5c2SJack Wang ret = rnbd_clt_change_capacity(dev, newsize); 164*f7a7a5c2SJack Wang 165*f7a7a5c2SJack Wang out: 166*f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 167*f7a7a5c2SJack Wang 168*f7a7a5c2SJack Wang return ret; 169*f7a7a5c2SJack Wang } 170*f7a7a5c2SJack Wang 171*f7a7a5c2SJack Wang static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q) 172*f7a7a5c2SJack Wang { 173*f7a7a5c2SJack Wang if (WARN_ON(!q->hctx)) 174*f7a7a5c2SJack Wang return; 175*f7a7a5c2SJack Wang 176*f7a7a5c2SJack Wang /* We can come here from interrupt, thus async=true */ 177*f7a7a5c2SJack Wang blk_mq_run_hw_queue(q->hctx, true); 178*f7a7a5c2SJack Wang } 179*f7a7a5c2SJack Wang 180*f7a7a5c2SJack Wang enum { 181*f7a7a5c2SJack Wang RNBD_DELAY_IFBUSY = -1, 182*f7a7a5c2SJack Wang }; 183*f7a7a5c2SJack Wang 184*f7a7a5c2SJack Wang /** 185*f7a7a5c2SJack Wang * rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun 186*f7a7a5c2SJack Wang * @sess: Session to find a queue for 187*f7a7a5c2SJack Wang * @cpu: Cpu to start the search from 188*f7a7a5c2SJack Wang * 189*f7a7a5c2SJack Wang * Description: 190*f7a7a5c2SJack Wang * Each CPU has a list of HW queues, which needs to be rerun. If a list 191*f7a7a5c2SJack Wang * is not empty - it is marked with a bit. This function finds first 192*f7a7a5c2SJack Wang * set bit in a bitmap and returns corresponding CPU list. 193*f7a7a5c2SJack Wang */ 194*f7a7a5c2SJack Wang static struct rnbd_cpu_qlist * 195*f7a7a5c2SJack Wang rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu) 196*f7a7a5c2SJack Wang { 197*f7a7a5c2SJack Wang int bit; 198*f7a7a5c2SJack Wang 199*f7a7a5c2SJack Wang /* Search from cpu to nr_cpu_ids */ 200*f7a7a5c2SJack Wang bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu); 201*f7a7a5c2SJack Wang if (bit < nr_cpu_ids) { 202*f7a7a5c2SJack Wang return per_cpu_ptr(sess->cpu_queues, bit); 203*f7a7a5c2SJack Wang } else if (cpu != 0) { 204*f7a7a5c2SJack Wang /* Search from 0 to cpu */ 205*f7a7a5c2SJack Wang bit = find_next_bit(sess->cpu_queues_bm, cpu, 0); 206*f7a7a5c2SJack Wang if (bit < cpu) 207*f7a7a5c2SJack Wang return per_cpu_ptr(sess->cpu_queues, bit); 208*f7a7a5c2SJack Wang } 209*f7a7a5c2SJack Wang 210*f7a7a5c2SJack Wang return NULL; 211*f7a7a5c2SJack Wang } 212*f7a7a5c2SJack Wang 213*f7a7a5c2SJack Wang static inline int nxt_cpu(int cpu) 214*f7a7a5c2SJack Wang { 215*f7a7a5c2SJack Wang return (cpu + 1) % nr_cpu_ids; 216*f7a7a5c2SJack Wang } 217*f7a7a5c2SJack Wang 218*f7a7a5c2SJack Wang /** 219*f7a7a5c2SJack Wang * rnbd_rerun_if_needed() - rerun next queue marked as stopped 220*f7a7a5c2SJack Wang * @sess: Session to rerun a queue on 221*f7a7a5c2SJack Wang * 222*f7a7a5c2SJack Wang * Description: 223*f7a7a5c2SJack Wang * Each CPU has it's own list of HW queues, which should be rerun. 224*f7a7a5c2SJack Wang * Function finds such list with HW queues, takes a list lock, picks up 225*f7a7a5c2SJack Wang * the first HW queue out of the list and requeues it. 226*f7a7a5c2SJack Wang * 227*f7a7a5c2SJack Wang * Return: 228*f7a7a5c2SJack Wang * True if the queue was requeued, false otherwise. 229*f7a7a5c2SJack Wang * 230*f7a7a5c2SJack Wang * Context: 231*f7a7a5c2SJack Wang * Does not matter. 232*f7a7a5c2SJack Wang */ 233*f7a7a5c2SJack Wang static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess) 234*f7a7a5c2SJack Wang { 235*f7a7a5c2SJack Wang struct rnbd_queue *q = NULL; 236*f7a7a5c2SJack Wang struct rnbd_cpu_qlist *cpu_q; 237*f7a7a5c2SJack Wang unsigned long flags; 238*f7a7a5c2SJack Wang int *cpup; 239*f7a7a5c2SJack Wang 240*f7a7a5c2SJack Wang /* 241*f7a7a5c2SJack Wang * To keep fairness and not to let other queues starve we always 242*f7a7a5c2SJack Wang * try to wake up someone else in round-robin manner. That of course 243*f7a7a5c2SJack Wang * increases latency but queues always have a chance to be executed. 244*f7a7a5c2SJack Wang */ 245*f7a7a5c2SJack Wang cpup = get_cpu_ptr(sess->cpu_rr); 246*f7a7a5c2SJack Wang for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q; 247*f7a7a5c2SJack Wang cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) { 248*f7a7a5c2SJack Wang if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags)) 249*f7a7a5c2SJack Wang continue; 250*f7a7a5c2SJack Wang if (unlikely(!test_bit(cpu_q->cpu, sess->cpu_queues_bm))) 251*f7a7a5c2SJack Wang goto unlock; 252*f7a7a5c2SJack Wang q = list_first_entry_or_null(&cpu_q->requeue_list, 253*f7a7a5c2SJack Wang typeof(*q), requeue_list); 254*f7a7a5c2SJack Wang if (WARN_ON(!q)) 255*f7a7a5c2SJack Wang goto clear_bit; 256*f7a7a5c2SJack Wang list_del_init(&q->requeue_list); 257*f7a7a5c2SJack Wang clear_bit_unlock(0, &q->in_list); 258*f7a7a5c2SJack Wang 259*f7a7a5c2SJack Wang if (list_empty(&cpu_q->requeue_list)) { 260*f7a7a5c2SJack Wang /* Clear bit if nothing is left */ 261*f7a7a5c2SJack Wang clear_bit: 262*f7a7a5c2SJack Wang clear_bit(cpu_q->cpu, sess->cpu_queues_bm); 263*f7a7a5c2SJack Wang } 264*f7a7a5c2SJack Wang unlock: 265*f7a7a5c2SJack Wang spin_unlock_irqrestore(&cpu_q->requeue_lock, flags); 266*f7a7a5c2SJack Wang 267*f7a7a5c2SJack Wang if (q) 268*f7a7a5c2SJack Wang break; 269*f7a7a5c2SJack Wang } 270*f7a7a5c2SJack Wang 271*f7a7a5c2SJack Wang /** 272*f7a7a5c2SJack Wang * Saves the CPU that is going to be requeued on the per-cpu var. Just 273*f7a7a5c2SJack Wang * incrementing it doesn't work because rnbd_get_cpu_qlist() will 274*f7a7a5c2SJack Wang * always return the first CPU with something on the queue list when the 275*f7a7a5c2SJack Wang * value stored on the var is greater than the last CPU with something 276*f7a7a5c2SJack Wang * on the list. 277*f7a7a5c2SJack Wang */ 278*f7a7a5c2SJack Wang if (cpu_q) 279*f7a7a5c2SJack Wang *cpup = cpu_q->cpu; 280*f7a7a5c2SJack Wang put_cpu_var(sess->cpu_rr); 281*f7a7a5c2SJack Wang 282*f7a7a5c2SJack Wang if (q) 283*f7a7a5c2SJack Wang rnbd_clt_dev_requeue(q); 284*f7a7a5c2SJack Wang 285*f7a7a5c2SJack Wang return q; 286*f7a7a5c2SJack Wang } 287*f7a7a5c2SJack Wang 288*f7a7a5c2SJack Wang /** 289*f7a7a5c2SJack Wang * rnbd_rerun_all_if_idle() - rerun all queues left in the list if 290*f7a7a5c2SJack Wang * session is idling (there are no requests 291*f7a7a5c2SJack Wang * in-flight). 292*f7a7a5c2SJack Wang * @sess: Session to rerun the queues on 293*f7a7a5c2SJack Wang * 294*f7a7a5c2SJack Wang * Description: 295*f7a7a5c2SJack Wang * This function tries to rerun all stopped queues if there are no 296*f7a7a5c2SJack Wang * requests in-flight anymore. This function tries to solve an obvious 297*f7a7a5c2SJack Wang * problem, when number of tags < than number of queues (hctx), which 298*f7a7a5c2SJack Wang * are stopped and put to sleep. If last permit, which has been just put, 299*f7a7a5c2SJack Wang * does not wake up all left queues (hctxs), IO requests hang forever. 300*f7a7a5c2SJack Wang * 301*f7a7a5c2SJack Wang * That can happen when all number of permits, say N, have been exhausted 302*f7a7a5c2SJack Wang * from one CPU, and we have many block devices per session, say M. 303*f7a7a5c2SJack Wang * Each block device has it's own queue (hctx) for each CPU, so eventually 304*f7a7a5c2SJack Wang * we can put that number of queues (hctxs) to sleep: M x nr_cpu_ids. 305*f7a7a5c2SJack Wang * If number of permits N < M x nr_cpu_ids finally we will get an IO hang. 306*f7a7a5c2SJack Wang * 307*f7a7a5c2SJack Wang * To avoid this hang last caller of rnbd_put_permit() (last caller is the 308*f7a7a5c2SJack Wang * one who observes sess->busy == 0) must wake up all remaining queues. 309*f7a7a5c2SJack Wang * 310*f7a7a5c2SJack Wang * Context: 311*f7a7a5c2SJack Wang * Does not matter. 312*f7a7a5c2SJack Wang */ 313*f7a7a5c2SJack Wang static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess) 314*f7a7a5c2SJack Wang { 315*f7a7a5c2SJack Wang bool requeued; 316*f7a7a5c2SJack Wang 317*f7a7a5c2SJack Wang do { 318*f7a7a5c2SJack Wang requeued = rnbd_rerun_if_needed(sess); 319*f7a7a5c2SJack Wang } while (atomic_read(&sess->busy) == 0 && requeued); 320*f7a7a5c2SJack Wang } 321*f7a7a5c2SJack Wang 322*f7a7a5c2SJack Wang static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess, 323*f7a7a5c2SJack Wang enum rtrs_clt_con_type con_type, 324*f7a7a5c2SJack Wang int wait) 325*f7a7a5c2SJack Wang { 326*f7a7a5c2SJack Wang struct rtrs_permit *permit; 327*f7a7a5c2SJack Wang 328*f7a7a5c2SJack Wang permit = rtrs_clt_get_permit(sess->rtrs, con_type, 329*f7a7a5c2SJack Wang wait ? RTRS_PERMIT_WAIT : 330*f7a7a5c2SJack Wang RTRS_PERMIT_NOWAIT); 331*f7a7a5c2SJack Wang if (likely(permit)) 332*f7a7a5c2SJack Wang /* We have a subtle rare case here, when all permits can be 333*f7a7a5c2SJack Wang * consumed before busy counter increased. This is safe, 334*f7a7a5c2SJack Wang * because loser will get NULL as a permit, observe 0 busy 335*f7a7a5c2SJack Wang * counter and immediately restart the queue himself. 336*f7a7a5c2SJack Wang */ 337*f7a7a5c2SJack Wang atomic_inc(&sess->busy); 338*f7a7a5c2SJack Wang 339*f7a7a5c2SJack Wang return permit; 340*f7a7a5c2SJack Wang } 341*f7a7a5c2SJack Wang 342*f7a7a5c2SJack Wang static void rnbd_put_permit(struct rnbd_clt_session *sess, 343*f7a7a5c2SJack Wang struct rtrs_permit *permit) 344*f7a7a5c2SJack Wang { 345*f7a7a5c2SJack Wang rtrs_clt_put_permit(sess->rtrs, permit); 346*f7a7a5c2SJack Wang atomic_dec(&sess->busy); 347*f7a7a5c2SJack Wang /* Paired with rnbd_clt_dev_add_to_requeue(). Decrement first 348*f7a7a5c2SJack Wang * and then check queue bits. 349*f7a7a5c2SJack Wang */ 350*f7a7a5c2SJack Wang smp_mb__after_atomic(); 351*f7a7a5c2SJack Wang rnbd_rerun_all_if_idle(sess); 352*f7a7a5c2SJack Wang } 353*f7a7a5c2SJack Wang 354*f7a7a5c2SJack Wang static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess, 355*f7a7a5c2SJack Wang enum rtrs_clt_con_type con_type, 356*f7a7a5c2SJack Wang int wait) 357*f7a7a5c2SJack Wang { 358*f7a7a5c2SJack Wang struct rnbd_iu *iu; 359*f7a7a5c2SJack Wang struct rtrs_permit *permit; 360*f7a7a5c2SJack Wang 361*f7a7a5c2SJack Wang permit = rnbd_get_permit(sess, con_type, 362*f7a7a5c2SJack Wang wait ? RTRS_PERMIT_WAIT : 363*f7a7a5c2SJack Wang RTRS_PERMIT_NOWAIT); 364*f7a7a5c2SJack Wang if (unlikely(!permit)) 365*f7a7a5c2SJack Wang return NULL; 366*f7a7a5c2SJack Wang iu = rtrs_permit_to_pdu(permit); 367*f7a7a5c2SJack Wang iu->permit = permit; 368*f7a7a5c2SJack Wang /* 369*f7a7a5c2SJack Wang * 1st reference is dropped after finishing sending a "user" message, 370*f7a7a5c2SJack Wang * 2nd reference is dropped after confirmation with the response is 371*f7a7a5c2SJack Wang * returned. 372*f7a7a5c2SJack Wang * 1st and 2nd can happen in any order, so the rnbd_iu should be 373*f7a7a5c2SJack Wang * released (rtrs_permit returned to ibbtrs) only leased after both 374*f7a7a5c2SJack Wang * are finished. 375*f7a7a5c2SJack Wang */ 376*f7a7a5c2SJack Wang atomic_set(&iu->refcount, 2); 377*f7a7a5c2SJack Wang init_waitqueue_head(&iu->comp.wait); 378*f7a7a5c2SJack Wang iu->comp.errno = INT_MAX; 379*f7a7a5c2SJack Wang 380*f7a7a5c2SJack Wang return iu; 381*f7a7a5c2SJack Wang } 382*f7a7a5c2SJack Wang 383*f7a7a5c2SJack Wang static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu) 384*f7a7a5c2SJack Wang { 385*f7a7a5c2SJack Wang if (atomic_dec_and_test(&iu->refcount)) 386*f7a7a5c2SJack Wang rnbd_put_permit(sess, iu->permit); 387*f7a7a5c2SJack Wang } 388*f7a7a5c2SJack Wang 389*f7a7a5c2SJack Wang static void rnbd_softirq_done_fn(struct request *rq) 390*f7a7a5c2SJack Wang { 391*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = rq->rq_disk->private_data; 392*f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 393*f7a7a5c2SJack Wang struct rnbd_iu *iu; 394*f7a7a5c2SJack Wang 395*f7a7a5c2SJack Wang iu = blk_mq_rq_to_pdu(rq); 396*f7a7a5c2SJack Wang rnbd_put_permit(sess, iu->permit); 397*f7a7a5c2SJack Wang blk_mq_end_request(rq, errno_to_blk_status(iu->errno)); 398*f7a7a5c2SJack Wang } 399*f7a7a5c2SJack Wang 400*f7a7a5c2SJack Wang static void msg_io_conf(void *priv, int errno) 401*f7a7a5c2SJack Wang { 402*f7a7a5c2SJack Wang struct rnbd_iu *iu = priv; 403*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = iu->dev; 404*f7a7a5c2SJack Wang struct request *rq = iu->rq; 405*f7a7a5c2SJack Wang int rw = rq_data_dir(rq); 406*f7a7a5c2SJack Wang 407*f7a7a5c2SJack Wang iu->errno = errno; 408*f7a7a5c2SJack Wang 409*f7a7a5c2SJack Wang blk_mq_complete_request(rq); 410*f7a7a5c2SJack Wang 411*f7a7a5c2SJack Wang if (errno) 412*f7a7a5c2SJack Wang rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n", 413*f7a7a5c2SJack Wang rw == READ ? "read" : "write", errno); 414*f7a7a5c2SJack Wang } 415*f7a7a5c2SJack Wang 416*f7a7a5c2SJack Wang static void wake_up_iu_comp(struct rnbd_iu *iu, int errno) 417*f7a7a5c2SJack Wang { 418*f7a7a5c2SJack Wang iu->comp.errno = errno; 419*f7a7a5c2SJack Wang wake_up(&iu->comp.wait); 420*f7a7a5c2SJack Wang } 421*f7a7a5c2SJack Wang 422*f7a7a5c2SJack Wang static void msg_conf(void *priv, int errno) 423*f7a7a5c2SJack Wang { 424*f7a7a5c2SJack Wang struct rnbd_iu *iu = priv; 425*f7a7a5c2SJack Wang 426*f7a7a5c2SJack Wang iu->errno = errno; 427*f7a7a5c2SJack Wang schedule_work(&iu->work); 428*f7a7a5c2SJack Wang } 429*f7a7a5c2SJack Wang 430*f7a7a5c2SJack Wang enum wait_type { 431*f7a7a5c2SJack Wang NO_WAIT = 0, 432*f7a7a5c2SJack Wang WAIT = 1 433*f7a7a5c2SJack Wang }; 434*f7a7a5c2SJack Wang 435*f7a7a5c2SJack Wang static int send_usr_msg(struct rtrs_clt *rtrs, int dir, 436*f7a7a5c2SJack Wang struct rnbd_iu *iu, struct kvec *vec, size_t nr, 437*f7a7a5c2SJack Wang size_t len, struct scatterlist *sg, unsigned int sg_len, 438*f7a7a5c2SJack Wang void (*conf)(struct work_struct *work), 439*f7a7a5c2SJack Wang int *errno, enum wait_type wait) 440*f7a7a5c2SJack Wang { 441*f7a7a5c2SJack Wang int err; 442*f7a7a5c2SJack Wang struct rtrs_clt_req_ops req_ops; 443*f7a7a5c2SJack Wang 444*f7a7a5c2SJack Wang INIT_WORK(&iu->work, conf); 445*f7a7a5c2SJack Wang req_ops = (struct rtrs_clt_req_ops) { 446*f7a7a5c2SJack Wang .priv = iu, 447*f7a7a5c2SJack Wang .conf_fn = msg_conf, 448*f7a7a5c2SJack Wang }; 449*f7a7a5c2SJack Wang err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit, 450*f7a7a5c2SJack Wang vec, nr, len, sg, sg_len); 451*f7a7a5c2SJack Wang if (!err && wait) { 452*f7a7a5c2SJack Wang wait_event(iu->comp.wait, iu->comp.errno != INT_MAX); 453*f7a7a5c2SJack Wang *errno = iu->comp.errno; 454*f7a7a5c2SJack Wang } else { 455*f7a7a5c2SJack Wang *errno = 0; 456*f7a7a5c2SJack Wang } 457*f7a7a5c2SJack Wang 458*f7a7a5c2SJack Wang return err; 459*f7a7a5c2SJack Wang } 460*f7a7a5c2SJack Wang 461*f7a7a5c2SJack Wang static void msg_close_conf(struct work_struct *work) 462*f7a7a5c2SJack Wang { 463*f7a7a5c2SJack Wang struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work); 464*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = iu->dev; 465*f7a7a5c2SJack Wang 466*f7a7a5c2SJack Wang wake_up_iu_comp(iu, iu->errno); 467*f7a7a5c2SJack Wang rnbd_put_iu(dev->sess, iu); 468*f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 469*f7a7a5c2SJack Wang } 470*f7a7a5c2SJack Wang 471*f7a7a5c2SJack Wang static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait) 472*f7a7a5c2SJack Wang { 473*f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 474*f7a7a5c2SJack Wang struct rnbd_msg_close msg; 475*f7a7a5c2SJack Wang struct rnbd_iu *iu; 476*f7a7a5c2SJack Wang struct kvec vec = { 477*f7a7a5c2SJack Wang .iov_base = &msg, 478*f7a7a5c2SJack Wang .iov_len = sizeof(msg) 479*f7a7a5c2SJack Wang }; 480*f7a7a5c2SJack Wang int err, errno; 481*f7a7a5c2SJack Wang 482*f7a7a5c2SJack Wang iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT); 483*f7a7a5c2SJack Wang if (!iu) 484*f7a7a5c2SJack Wang return -ENOMEM; 485*f7a7a5c2SJack Wang 486*f7a7a5c2SJack Wang iu->buf = NULL; 487*f7a7a5c2SJack Wang iu->dev = dev; 488*f7a7a5c2SJack Wang 489*f7a7a5c2SJack Wang sg_mark_end(&iu->sglist[0]); 490*f7a7a5c2SJack Wang 491*f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE); 492*f7a7a5c2SJack Wang msg.device_id = cpu_to_le32(device_id); 493*f7a7a5c2SJack Wang 494*f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_dev(dev)); 495*f7a7a5c2SJack Wang err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 1, 0, NULL, 0, 496*f7a7a5c2SJack Wang msg_close_conf, &errno, wait); 497*f7a7a5c2SJack Wang if (err) { 498*f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 499*f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 500*f7a7a5c2SJack Wang } else { 501*f7a7a5c2SJack Wang err = errno; 502*f7a7a5c2SJack Wang } 503*f7a7a5c2SJack Wang 504*f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 505*f7a7a5c2SJack Wang return err; 506*f7a7a5c2SJack Wang } 507*f7a7a5c2SJack Wang 508*f7a7a5c2SJack Wang static void msg_open_conf(struct work_struct *work) 509*f7a7a5c2SJack Wang { 510*f7a7a5c2SJack Wang struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work); 511*f7a7a5c2SJack Wang struct rnbd_msg_open_rsp *rsp = iu->buf; 512*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = iu->dev; 513*f7a7a5c2SJack Wang int errno = iu->errno; 514*f7a7a5c2SJack Wang 515*f7a7a5c2SJack Wang if (errno) { 516*f7a7a5c2SJack Wang rnbd_clt_err(dev, 517*f7a7a5c2SJack Wang "Opening failed, server responded: %d\n", 518*f7a7a5c2SJack Wang errno); 519*f7a7a5c2SJack Wang } else { 520*f7a7a5c2SJack Wang errno = process_msg_open_rsp(dev, rsp); 521*f7a7a5c2SJack Wang if (errno) { 522*f7a7a5c2SJack Wang u32 device_id = le32_to_cpu(rsp->device_id); 523*f7a7a5c2SJack Wang /* 524*f7a7a5c2SJack Wang * If server thinks its fine, but we fail to process 525*f7a7a5c2SJack Wang * then be nice and send a close to server. 526*f7a7a5c2SJack Wang */ 527*f7a7a5c2SJack Wang (void)send_msg_close(dev, device_id, NO_WAIT); 528*f7a7a5c2SJack Wang } 529*f7a7a5c2SJack Wang } 530*f7a7a5c2SJack Wang kfree(rsp); 531*f7a7a5c2SJack Wang wake_up_iu_comp(iu, errno); 532*f7a7a5c2SJack Wang rnbd_put_iu(dev->sess, iu); 533*f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 534*f7a7a5c2SJack Wang } 535*f7a7a5c2SJack Wang 536*f7a7a5c2SJack Wang static void msg_sess_info_conf(struct work_struct *work) 537*f7a7a5c2SJack Wang { 538*f7a7a5c2SJack Wang struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work); 539*f7a7a5c2SJack Wang struct rnbd_msg_sess_info_rsp *rsp = iu->buf; 540*f7a7a5c2SJack Wang struct rnbd_clt_session *sess = iu->sess; 541*f7a7a5c2SJack Wang 542*f7a7a5c2SJack Wang if (!iu->errno) 543*f7a7a5c2SJack Wang sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR); 544*f7a7a5c2SJack Wang 545*f7a7a5c2SJack Wang kfree(rsp); 546*f7a7a5c2SJack Wang wake_up_iu_comp(iu, iu->errno); 547*f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 548*f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 549*f7a7a5c2SJack Wang } 550*f7a7a5c2SJack Wang 551*f7a7a5c2SJack Wang static int send_msg_open(struct rnbd_clt_dev *dev, bool wait) 552*f7a7a5c2SJack Wang { 553*f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 554*f7a7a5c2SJack Wang struct rnbd_msg_open_rsp *rsp; 555*f7a7a5c2SJack Wang struct rnbd_msg_open msg; 556*f7a7a5c2SJack Wang struct rnbd_iu *iu; 557*f7a7a5c2SJack Wang struct kvec vec = { 558*f7a7a5c2SJack Wang .iov_base = &msg, 559*f7a7a5c2SJack Wang .iov_len = sizeof(msg) 560*f7a7a5c2SJack Wang }; 561*f7a7a5c2SJack Wang int err, errno; 562*f7a7a5c2SJack Wang 563*f7a7a5c2SJack Wang rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); 564*f7a7a5c2SJack Wang if (!rsp) 565*f7a7a5c2SJack Wang return -ENOMEM; 566*f7a7a5c2SJack Wang 567*f7a7a5c2SJack Wang iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT); 568*f7a7a5c2SJack Wang if (!iu) { 569*f7a7a5c2SJack Wang kfree(rsp); 570*f7a7a5c2SJack Wang return -ENOMEM; 571*f7a7a5c2SJack Wang } 572*f7a7a5c2SJack Wang 573*f7a7a5c2SJack Wang iu->buf = rsp; 574*f7a7a5c2SJack Wang iu->dev = dev; 575*f7a7a5c2SJack Wang 576*f7a7a5c2SJack Wang sg_init_one(iu->sglist, rsp, sizeof(*rsp)); 577*f7a7a5c2SJack Wang 578*f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN); 579*f7a7a5c2SJack Wang msg.access_mode = dev->access_mode; 580*f7a7a5c2SJack Wang strlcpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name)); 581*f7a7a5c2SJack Wang 582*f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_dev(dev)); 583*f7a7a5c2SJack Wang err = send_usr_msg(sess->rtrs, READ, iu, 584*f7a7a5c2SJack Wang &vec, 1, sizeof(*rsp), iu->sglist, 1, 585*f7a7a5c2SJack Wang msg_open_conf, &errno, wait); 586*f7a7a5c2SJack Wang if (err) { 587*f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 588*f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 589*f7a7a5c2SJack Wang kfree(rsp); 590*f7a7a5c2SJack Wang } else { 591*f7a7a5c2SJack Wang err = errno; 592*f7a7a5c2SJack Wang } 593*f7a7a5c2SJack Wang 594*f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 595*f7a7a5c2SJack Wang return err; 596*f7a7a5c2SJack Wang } 597*f7a7a5c2SJack Wang 598*f7a7a5c2SJack Wang static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait) 599*f7a7a5c2SJack Wang { 600*f7a7a5c2SJack Wang struct rnbd_msg_sess_info_rsp *rsp; 601*f7a7a5c2SJack Wang struct rnbd_msg_sess_info msg; 602*f7a7a5c2SJack Wang struct rnbd_iu *iu; 603*f7a7a5c2SJack Wang struct kvec vec = { 604*f7a7a5c2SJack Wang .iov_base = &msg, 605*f7a7a5c2SJack Wang .iov_len = sizeof(msg) 606*f7a7a5c2SJack Wang }; 607*f7a7a5c2SJack Wang int err, errno; 608*f7a7a5c2SJack Wang 609*f7a7a5c2SJack Wang rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); 610*f7a7a5c2SJack Wang if (!rsp) 611*f7a7a5c2SJack Wang return -ENOMEM; 612*f7a7a5c2SJack Wang 613*f7a7a5c2SJack Wang iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT); 614*f7a7a5c2SJack Wang if (!iu) { 615*f7a7a5c2SJack Wang kfree(rsp); 616*f7a7a5c2SJack Wang return -ENOMEM; 617*f7a7a5c2SJack Wang } 618*f7a7a5c2SJack Wang 619*f7a7a5c2SJack Wang iu->buf = rsp; 620*f7a7a5c2SJack Wang iu->sess = sess; 621*f7a7a5c2SJack Wang 622*f7a7a5c2SJack Wang sg_init_one(iu->sglist, rsp, sizeof(*rsp)); 623*f7a7a5c2SJack Wang 624*f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO); 625*f7a7a5c2SJack Wang msg.ver = RNBD_PROTO_VER_MAJOR; 626*f7a7a5c2SJack Wang 627*f7a7a5c2SJack Wang if (!rnbd_clt_get_sess(sess)) { 628*f7a7a5c2SJack Wang /* 629*f7a7a5c2SJack Wang * That can happen only in one case, when RTRS has restablished 630*f7a7a5c2SJack Wang * the connection and link_ev() is called, but session is almost 631*f7a7a5c2SJack Wang * dead, last reference on session is put and caller is waiting 632*f7a7a5c2SJack Wang * for RTRS to close everything. 633*f7a7a5c2SJack Wang */ 634*f7a7a5c2SJack Wang err = -ENODEV; 635*f7a7a5c2SJack Wang goto put_iu; 636*f7a7a5c2SJack Wang } 637*f7a7a5c2SJack Wang err = send_usr_msg(sess->rtrs, READ, iu, 638*f7a7a5c2SJack Wang &vec, 1, sizeof(*rsp), iu->sglist, 1, 639*f7a7a5c2SJack Wang msg_sess_info_conf, &errno, wait); 640*f7a7a5c2SJack Wang if (err) { 641*f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 642*f7a7a5c2SJack Wang put_iu: 643*f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 644*f7a7a5c2SJack Wang kfree(rsp); 645*f7a7a5c2SJack Wang } else { 646*f7a7a5c2SJack Wang err = errno; 647*f7a7a5c2SJack Wang } 648*f7a7a5c2SJack Wang 649*f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 650*f7a7a5c2SJack Wang return err; 651*f7a7a5c2SJack Wang } 652*f7a7a5c2SJack Wang 653*f7a7a5c2SJack Wang static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess) 654*f7a7a5c2SJack Wang { 655*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 656*f7a7a5c2SJack Wang 657*f7a7a5c2SJack Wang mutex_lock(&sess->lock); 658*f7a7a5c2SJack Wang list_for_each_entry(dev, &sess->devs_list, list) { 659*f7a7a5c2SJack Wang rnbd_clt_err(dev, "Device disconnected.\n"); 660*f7a7a5c2SJack Wang 661*f7a7a5c2SJack Wang mutex_lock(&dev->lock); 662*f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_MAPPED) 663*f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED; 664*f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 665*f7a7a5c2SJack Wang } 666*f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 667*f7a7a5c2SJack Wang } 668*f7a7a5c2SJack Wang 669*f7a7a5c2SJack Wang static void remap_devs(struct rnbd_clt_session *sess) 670*f7a7a5c2SJack Wang { 671*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 672*f7a7a5c2SJack Wang struct rtrs_attrs attrs; 673*f7a7a5c2SJack Wang int err; 674*f7a7a5c2SJack Wang 675*f7a7a5c2SJack Wang /* 676*f7a7a5c2SJack Wang * Careful here: we are called from RTRS link event directly, 677*f7a7a5c2SJack Wang * thus we can't send any RTRS request and wait for response 678*f7a7a5c2SJack Wang * or RTRS will not be able to complete request with failure 679*f7a7a5c2SJack Wang * if something goes wrong (failing of outstanding requests 680*f7a7a5c2SJack Wang * happens exactly from the context where we are blocking now). 681*f7a7a5c2SJack Wang * 682*f7a7a5c2SJack Wang * So to avoid deadlocks each usr message sent from here must 683*f7a7a5c2SJack Wang * be asynchronous. 684*f7a7a5c2SJack Wang */ 685*f7a7a5c2SJack Wang 686*f7a7a5c2SJack Wang err = send_msg_sess_info(sess, NO_WAIT); 687*f7a7a5c2SJack Wang if (err) { 688*f7a7a5c2SJack Wang pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err); 689*f7a7a5c2SJack Wang return; 690*f7a7a5c2SJack Wang } 691*f7a7a5c2SJack Wang 692*f7a7a5c2SJack Wang rtrs_clt_query(sess->rtrs, &attrs); 693*f7a7a5c2SJack Wang mutex_lock(&sess->lock); 694*f7a7a5c2SJack Wang sess->max_io_size = attrs.max_io_size; 695*f7a7a5c2SJack Wang 696*f7a7a5c2SJack Wang list_for_each_entry(dev, &sess->devs_list, list) { 697*f7a7a5c2SJack Wang bool skip; 698*f7a7a5c2SJack Wang 699*f7a7a5c2SJack Wang mutex_lock(&dev->lock); 700*f7a7a5c2SJack Wang skip = (dev->dev_state == DEV_STATE_INIT); 701*f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 702*f7a7a5c2SJack Wang if (skip) 703*f7a7a5c2SJack Wang /* 704*f7a7a5c2SJack Wang * When device is establishing connection for the first 705*f7a7a5c2SJack Wang * time - do not remap, it will be closed soon. 706*f7a7a5c2SJack Wang */ 707*f7a7a5c2SJack Wang continue; 708*f7a7a5c2SJack Wang 709*f7a7a5c2SJack Wang rnbd_clt_info(dev, "session reconnected, remapping device\n"); 710*f7a7a5c2SJack Wang err = send_msg_open(dev, NO_WAIT); 711*f7a7a5c2SJack Wang if (err) { 712*f7a7a5c2SJack Wang rnbd_clt_err(dev, "send_msg_open(): %d\n", err); 713*f7a7a5c2SJack Wang break; 714*f7a7a5c2SJack Wang } 715*f7a7a5c2SJack Wang } 716*f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 717*f7a7a5c2SJack Wang } 718*f7a7a5c2SJack Wang 719*f7a7a5c2SJack Wang static void rnbd_clt_link_ev(void *priv, enum rtrs_clt_link_ev ev) 720*f7a7a5c2SJack Wang { 721*f7a7a5c2SJack Wang struct rnbd_clt_session *sess = priv; 722*f7a7a5c2SJack Wang 723*f7a7a5c2SJack Wang switch (ev) { 724*f7a7a5c2SJack Wang case RTRS_CLT_LINK_EV_DISCONNECTED: 725*f7a7a5c2SJack Wang set_dev_states_to_disconnected(sess); 726*f7a7a5c2SJack Wang break; 727*f7a7a5c2SJack Wang case RTRS_CLT_LINK_EV_RECONNECTED: 728*f7a7a5c2SJack Wang remap_devs(sess); 729*f7a7a5c2SJack Wang break; 730*f7a7a5c2SJack Wang default: 731*f7a7a5c2SJack Wang pr_err("Unknown session event received (%d), session: %s\n", 732*f7a7a5c2SJack Wang ev, sess->sessname); 733*f7a7a5c2SJack Wang } 734*f7a7a5c2SJack Wang } 735*f7a7a5c2SJack Wang 736*f7a7a5c2SJack Wang static void rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu *cpu_queues) 737*f7a7a5c2SJack Wang { 738*f7a7a5c2SJack Wang unsigned int cpu; 739*f7a7a5c2SJack Wang struct rnbd_cpu_qlist *cpu_q; 740*f7a7a5c2SJack Wang 741*f7a7a5c2SJack Wang for_each_possible_cpu(cpu) { 742*f7a7a5c2SJack Wang cpu_q = per_cpu_ptr(cpu_queues, cpu); 743*f7a7a5c2SJack Wang 744*f7a7a5c2SJack Wang cpu_q->cpu = cpu; 745*f7a7a5c2SJack Wang INIT_LIST_HEAD(&cpu_q->requeue_list); 746*f7a7a5c2SJack Wang spin_lock_init(&cpu_q->requeue_lock); 747*f7a7a5c2SJack Wang } 748*f7a7a5c2SJack Wang } 749*f7a7a5c2SJack Wang 750*f7a7a5c2SJack Wang static void destroy_mq_tags(struct rnbd_clt_session *sess) 751*f7a7a5c2SJack Wang { 752*f7a7a5c2SJack Wang if (sess->tag_set.tags) 753*f7a7a5c2SJack Wang blk_mq_free_tag_set(&sess->tag_set); 754*f7a7a5c2SJack Wang } 755*f7a7a5c2SJack Wang 756*f7a7a5c2SJack Wang static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess) 757*f7a7a5c2SJack Wang { 758*f7a7a5c2SJack Wang sess->rtrs_ready = true; 759*f7a7a5c2SJack Wang wake_up_all(&sess->rtrs_waitq); 760*f7a7a5c2SJack Wang } 761*f7a7a5c2SJack Wang 762*f7a7a5c2SJack Wang static void close_rtrs(struct rnbd_clt_session *sess) 763*f7a7a5c2SJack Wang { 764*f7a7a5c2SJack Wang might_sleep(); 765*f7a7a5c2SJack Wang 766*f7a7a5c2SJack Wang if (!IS_ERR_OR_NULL(sess->rtrs)) { 767*f7a7a5c2SJack Wang rtrs_clt_close(sess->rtrs); 768*f7a7a5c2SJack Wang sess->rtrs = NULL; 769*f7a7a5c2SJack Wang wake_up_rtrs_waiters(sess); 770*f7a7a5c2SJack Wang } 771*f7a7a5c2SJack Wang } 772*f7a7a5c2SJack Wang 773*f7a7a5c2SJack Wang static void free_sess(struct rnbd_clt_session *sess) 774*f7a7a5c2SJack Wang { 775*f7a7a5c2SJack Wang WARN_ON(!list_empty(&sess->devs_list)); 776*f7a7a5c2SJack Wang 777*f7a7a5c2SJack Wang might_sleep(); 778*f7a7a5c2SJack Wang 779*f7a7a5c2SJack Wang close_rtrs(sess); 780*f7a7a5c2SJack Wang destroy_mq_tags(sess); 781*f7a7a5c2SJack Wang if (!list_empty(&sess->list)) { 782*f7a7a5c2SJack Wang mutex_lock(&sess_lock); 783*f7a7a5c2SJack Wang list_del(&sess->list); 784*f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 785*f7a7a5c2SJack Wang } 786*f7a7a5c2SJack Wang free_percpu(sess->cpu_queues); 787*f7a7a5c2SJack Wang free_percpu(sess->cpu_rr); 788*f7a7a5c2SJack Wang mutex_destroy(&sess->lock); 789*f7a7a5c2SJack Wang kfree(sess); 790*f7a7a5c2SJack Wang } 791*f7a7a5c2SJack Wang 792*f7a7a5c2SJack Wang static struct rnbd_clt_session *alloc_sess(const char *sessname) 793*f7a7a5c2SJack Wang { 794*f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 795*f7a7a5c2SJack Wang int err, cpu; 796*f7a7a5c2SJack Wang 797*f7a7a5c2SJack Wang sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE); 798*f7a7a5c2SJack Wang if (!sess) 799*f7a7a5c2SJack Wang return ERR_PTR(-ENOMEM); 800*f7a7a5c2SJack Wang strlcpy(sess->sessname, sessname, sizeof(sess->sessname)); 801*f7a7a5c2SJack Wang atomic_set(&sess->busy, 0); 802*f7a7a5c2SJack Wang mutex_init(&sess->lock); 803*f7a7a5c2SJack Wang INIT_LIST_HEAD(&sess->devs_list); 804*f7a7a5c2SJack Wang INIT_LIST_HEAD(&sess->list); 805*f7a7a5c2SJack Wang bitmap_zero(sess->cpu_queues_bm, NR_CPUS); 806*f7a7a5c2SJack Wang init_waitqueue_head(&sess->rtrs_waitq); 807*f7a7a5c2SJack Wang refcount_set(&sess->refcount, 1); 808*f7a7a5c2SJack Wang 809*f7a7a5c2SJack Wang sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist); 810*f7a7a5c2SJack Wang if (!sess->cpu_queues) { 811*f7a7a5c2SJack Wang err = -ENOMEM; 812*f7a7a5c2SJack Wang goto err; 813*f7a7a5c2SJack Wang } 814*f7a7a5c2SJack Wang rnbd_init_cpu_qlists(sess->cpu_queues); 815*f7a7a5c2SJack Wang 816*f7a7a5c2SJack Wang /* 817*f7a7a5c2SJack Wang * That is simple percpu variable which stores cpu indeces, which are 818*f7a7a5c2SJack Wang * incremented on each access. We need that for the sake of fairness 819*f7a7a5c2SJack Wang * to wake up queues in a round-robin manner. 820*f7a7a5c2SJack Wang */ 821*f7a7a5c2SJack Wang sess->cpu_rr = alloc_percpu(int); 822*f7a7a5c2SJack Wang if (!sess->cpu_rr) { 823*f7a7a5c2SJack Wang err = -ENOMEM; 824*f7a7a5c2SJack Wang goto err; 825*f7a7a5c2SJack Wang } 826*f7a7a5c2SJack Wang for_each_possible_cpu(cpu) 827*f7a7a5c2SJack Wang * per_cpu_ptr(sess->cpu_rr, cpu) = cpu; 828*f7a7a5c2SJack Wang 829*f7a7a5c2SJack Wang return sess; 830*f7a7a5c2SJack Wang 831*f7a7a5c2SJack Wang err: 832*f7a7a5c2SJack Wang free_sess(sess); 833*f7a7a5c2SJack Wang 834*f7a7a5c2SJack Wang return ERR_PTR(err); 835*f7a7a5c2SJack Wang } 836*f7a7a5c2SJack Wang 837*f7a7a5c2SJack Wang static int wait_for_rtrs_connection(struct rnbd_clt_session *sess) 838*f7a7a5c2SJack Wang { 839*f7a7a5c2SJack Wang wait_event(sess->rtrs_waitq, sess->rtrs_ready); 840*f7a7a5c2SJack Wang if (IS_ERR_OR_NULL(sess->rtrs)) 841*f7a7a5c2SJack Wang return -ECONNRESET; 842*f7a7a5c2SJack Wang 843*f7a7a5c2SJack Wang return 0; 844*f7a7a5c2SJack Wang } 845*f7a7a5c2SJack Wang 846*f7a7a5c2SJack Wang static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess) 847*f7a7a5c2SJack Wang __releases(&sess_lock) 848*f7a7a5c2SJack Wang __acquires(&sess_lock) 849*f7a7a5c2SJack Wang { 850*f7a7a5c2SJack Wang DEFINE_WAIT(wait); 851*f7a7a5c2SJack Wang 852*f7a7a5c2SJack Wang prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE); 853*f7a7a5c2SJack Wang if (IS_ERR_OR_NULL(sess->rtrs)) { 854*f7a7a5c2SJack Wang finish_wait(&sess->rtrs_waitq, &wait); 855*f7a7a5c2SJack Wang return; 856*f7a7a5c2SJack Wang } 857*f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 858*f7a7a5c2SJack Wang /* loop in caller, see __find_and_get_sess(). 859*f7a7a5c2SJack Wang * You can't leave mutex locked and call schedule(), you will catch a 860*f7a7a5c2SJack Wang * deadlock with a caller of free_sess(), which has just put the last 861*f7a7a5c2SJack Wang * reference and is about to take the sess_lock in order to delete 862*f7a7a5c2SJack Wang * the session from the list. 863*f7a7a5c2SJack Wang */ 864*f7a7a5c2SJack Wang schedule(); 865*f7a7a5c2SJack Wang mutex_lock(&sess_lock); 866*f7a7a5c2SJack Wang } 867*f7a7a5c2SJack Wang 868*f7a7a5c2SJack Wang static struct rnbd_clt_session *__find_and_get_sess(const char *sessname) 869*f7a7a5c2SJack Wang __releases(&sess_lock) 870*f7a7a5c2SJack Wang __acquires(&sess_lock) 871*f7a7a5c2SJack Wang { 872*f7a7a5c2SJack Wang struct rnbd_clt_session *sess, *sn; 873*f7a7a5c2SJack Wang int err; 874*f7a7a5c2SJack Wang 875*f7a7a5c2SJack Wang again: 876*f7a7a5c2SJack Wang list_for_each_entry_safe(sess, sn, &sess_list, list) { 877*f7a7a5c2SJack Wang if (strcmp(sessname, sess->sessname)) 878*f7a7a5c2SJack Wang continue; 879*f7a7a5c2SJack Wang 880*f7a7a5c2SJack Wang if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs)) 881*f7a7a5c2SJack Wang /* 882*f7a7a5c2SJack Wang * No RTRS connection, session is dying. 883*f7a7a5c2SJack Wang */ 884*f7a7a5c2SJack Wang continue; 885*f7a7a5c2SJack Wang 886*f7a7a5c2SJack Wang if (rnbd_clt_get_sess(sess)) { 887*f7a7a5c2SJack Wang /* 888*f7a7a5c2SJack Wang * Alive session is found, wait for RTRS connection. 889*f7a7a5c2SJack Wang */ 890*f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 891*f7a7a5c2SJack Wang err = wait_for_rtrs_connection(sess); 892*f7a7a5c2SJack Wang if (err) 893*f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 894*f7a7a5c2SJack Wang mutex_lock(&sess_lock); 895*f7a7a5c2SJack Wang 896*f7a7a5c2SJack Wang if (err) 897*f7a7a5c2SJack Wang /* Session is dying, repeat the loop */ 898*f7a7a5c2SJack Wang goto again; 899*f7a7a5c2SJack Wang 900*f7a7a5c2SJack Wang return sess; 901*f7a7a5c2SJack Wang } 902*f7a7a5c2SJack Wang /* 903*f7a7a5c2SJack Wang * Ref is 0, session is dying, wait for RTRS disconnect 904*f7a7a5c2SJack Wang * in order to avoid session names clashes. 905*f7a7a5c2SJack Wang */ 906*f7a7a5c2SJack Wang wait_for_rtrs_disconnection(sess); 907*f7a7a5c2SJack Wang /* 908*f7a7a5c2SJack Wang * RTRS is disconnected and soon session will be freed, 909*f7a7a5c2SJack Wang * so repeat a loop. 910*f7a7a5c2SJack Wang */ 911*f7a7a5c2SJack Wang goto again; 912*f7a7a5c2SJack Wang } 913*f7a7a5c2SJack Wang 914*f7a7a5c2SJack Wang return NULL; 915*f7a7a5c2SJack Wang } 916*f7a7a5c2SJack Wang 917*f7a7a5c2SJack Wang static struct 918*f7a7a5c2SJack Wang rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first) 919*f7a7a5c2SJack Wang { 920*f7a7a5c2SJack Wang struct rnbd_clt_session *sess = NULL; 921*f7a7a5c2SJack Wang 922*f7a7a5c2SJack Wang mutex_lock(&sess_lock); 923*f7a7a5c2SJack Wang sess = __find_and_get_sess(sessname); 924*f7a7a5c2SJack Wang if (!sess) { 925*f7a7a5c2SJack Wang sess = alloc_sess(sessname); 926*f7a7a5c2SJack Wang if (sess) { 927*f7a7a5c2SJack Wang list_add(&sess->list, &sess_list); 928*f7a7a5c2SJack Wang *first = true; 929*f7a7a5c2SJack Wang } else { 930*f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 931*f7a7a5c2SJack Wang return ERR_PTR(-ENOMEM); 932*f7a7a5c2SJack Wang } 933*f7a7a5c2SJack Wang } else 934*f7a7a5c2SJack Wang *first = false; 935*f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 936*f7a7a5c2SJack Wang 937*f7a7a5c2SJack Wang return sess; 938*f7a7a5c2SJack Wang } 939*f7a7a5c2SJack Wang 940*f7a7a5c2SJack Wang static int rnbd_client_open(struct block_device *block_device, fmode_t mode) 941*f7a7a5c2SJack Wang { 942*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = block_device->bd_disk->private_data; 943*f7a7a5c2SJack Wang 944*f7a7a5c2SJack Wang if (dev->read_only && (mode & FMODE_WRITE)) 945*f7a7a5c2SJack Wang return -EPERM; 946*f7a7a5c2SJack Wang 947*f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_UNMAPPED || 948*f7a7a5c2SJack Wang !rnbd_clt_get_dev(dev)) 949*f7a7a5c2SJack Wang return -EIO; 950*f7a7a5c2SJack Wang 951*f7a7a5c2SJack Wang return 0; 952*f7a7a5c2SJack Wang } 953*f7a7a5c2SJack Wang 954*f7a7a5c2SJack Wang static void rnbd_client_release(struct gendisk *gen, fmode_t mode) 955*f7a7a5c2SJack Wang { 956*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = gen->private_data; 957*f7a7a5c2SJack Wang 958*f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 959*f7a7a5c2SJack Wang } 960*f7a7a5c2SJack Wang 961*f7a7a5c2SJack Wang static int rnbd_client_getgeo(struct block_device *block_device, 962*f7a7a5c2SJack Wang struct hd_geometry *geo) 963*f7a7a5c2SJack Wang { 964*f7a7a5c2SJack Wang u64 size; 965*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 966*f7a7a5c2SJack Wang 967*f7a7a5c2SJack Wang dev = block_device->bd_disk->private_data; 968*f7a7a5c2SJack Wang size = dev->size * (dev->logical_block_size / SECTOR_SIZE); 969*f7a7a5c2SJack Wang geo->cylinders = size >> 6; /* size/64 */ 970*f7a7a5c2SJack Wang geo->heads = 4; 971*f7a7a5c2SJack Wang geo->sectors = 16; 972*f7a7a5c2SJack Wang geo->start = 0; 973*f7a7a5c2SJack Wang 974*f7a7a5c2SJack Wang return 0; 975*f7a7a5c2SJack Wang } 976*f7a7a5c2SJack Wang 977*f7a7a5c2SJack Wang static const struct block_device_operations rnbd_client_ops = { 978*f7a7a5c2SJack Wang .owner = THIS_MODULE, 979*f7a7a5c2SJack Wang .open = rnbd_client_open, 980*f7a7a5c2SJack Wang .release = rnbd_client_release, 981*f7a7a5c2SJack Wang .getgeo = rnbd_client_getgeo 982*f7a7a5c2SJack Wang }; 983*f7a7a5c2SJack Wang 984*f7a7a5c2SJack Wang /* The amount of data that belongs to an I/O and the amount of data that 985*f7a7a5c2SJack Wang * should be read or written to the disk (bi_size) can differ. 986*f7a7a5c2SJack Wang * 987*f7a7a5c2SJack Wang * E.g. When WRITE_SAME is used, only a small amount of data is 988*f7a7a5c2SJack Wang * transferred that is then written repeatedly over a lot of sectors. 989*f7a7a5c2SJack Wang * 990*f7a7a5c2SJack Wang * Get the size of data to be transferred via RTRS by summing up the size 991*f7a7a5c2SJack Wang * of the scather-gather list entries. 992*f7a7a5c2SJack Wang */ 993*f7a7a5c2SJack Wang static size_t rnbd_clt_get_sg_size(struct scatterlist *sglist, u32 len) 994*f7a7a5c2SJack Wang { 995*f7a7a5c2SJack Wang struct scatterlist *sg; 996*f7a7a5c2SJack Wang size_t tsize = 0; 997*f7a7a5c2SJack Wang int i; 998*f7a7a5c2SJack Wang 999*f7a7a5c2SJack Wang for_each_sg(sglist, sg, len, i) 1000*f7a7a5c2SJack Wang tsize += sg->length; 1001*f7a7a5c2SJack Wang return tsize; 1002*f7a7a5c2SJack Wang } 1003*f7a7a5c2SJack Wang 1004*f7a7a5c2SJack Wang static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev, 1005*f7a7a5c2SJack Wang struct request *rq, 1006*f7a7a5c2SJack Wang struct rnbd_iu *iu) 1007*f7a7a5c2SJack Wang { 1008*f7a7a5c2SJack Wang struct rtrs_clt *rtrs = dev->sess->rtrs; 1009*f7a7a5c2SJack Wang struct rtrs_permit *permit = iu->permit; 1010*f7a7a5c2SJack Wang struct rnbd_msg_io msg; 1011*f7a7a5c2SJack Wang struct rtrs_clt_req_ops req_ops; 1012*f7a7a5c2SJack Wang unsigned int sg_cnt = 0; 1013*f7a7a5c2SJack Wang struct kvec vec; 1014*f7a7a5c2SJack Wang size_t size; 1015*f7a7a5c2SJack Wang int err; 1016*f7a7a5c2SJack Wang 1017*f7a7a5c2SJack Wang iu->rq = rq; 1018*f7a7a5c2SJack Wang iu->dev = dev; 1019*f7a7a5c2SJack Wang msg.sector = cpu_to_le64(blk_rq_pos(rq)); 1020*f7a7a5c2SJack Wang msg.bi_size = cpu_to_le32(blk_rq_bytes(rq)); 1021*f7a7a5c2SJack Wang msg.rw = cpu_to_le32(rq_to_rnbd_flags(rq)); 1022*f7a7a5c2SJack Wang msg.prio = cpu_to_le16(req_get_ioprio(rq)); 1023*f7a7a5c2SJack Wang 1024*f7a7a5c2SJack Wang /* 1025*f7a7a5c2SJack Wang * We only support discards with single segment for now. 1026*f7a7a5c2SJack Wang * See queue limits. 1027*f7a7a5c2SJack Wang */ 1028*f7a7a5c2SJack Wang if (req_op(rq) != REQ_OP_DISCARD) 1029*f7a7a5c2SJack Wang sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sglist); 1030*f7a7a5c2SJack Wang 1031*f7a7a5c2SJack Wang if (sg_cnt == 0) 1032*f7a7a5c2SJack Wang /* Do not forget to mark the end */ 1033*f7a7a5c2SJack Wang sg_mark_end(&iu->sglist[0]); 1034*f7a7a5c2SJack Wang 1035*f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_IO); 1036*f7a7a5c2SJack Wang msg.device_id = cpu_to_le32(dev->device_id); 1037*f7a7a5c2SJack Wang 1038*f7a7a5c2SJack Wang vec = (struct kvec) { 1039*f7a7a5c2SJack Wang .iov_base = &msg, 1040*f7a7a5c2SJack Wang .iov_len = sizeof(msg) 1041*f7a7a5c2SJack Wang }; 1042*f7a7a5c2SJack Wang size = rnbd_clt_get_sg_size(iu->sglist, sg_cnt); 1043*f7a7a5c2SJack Wang req_ops = (struct rtrs_clt_req_ops) { 1044*f7a7a5c2SJack Wang .priv = iu, 1045*f7a7a5c2SJack Wang .conf_fn = msg_io_conf, 1046*f7a7a5c2SJack Wang }; 1047*f7a7a5c2SJack Wang err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit, 1048*f7a7a5c2SJack Wang &vec, 1, size, iu->sglist, sg_cnt); 1049*f7a7a5c2SJack Wang if (unlikely(err)) { 1050*f7a7a5c2SJack Wang rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n", 1051*f7a7a5c2SJack Wang err); 1052*f7a7a5c2SJack Wang return err; 1053*f7a7a5c2SJack Wang } 1054*f7a7a5c2SJack Wang 1055*f7a7a5c2SJack Wang return 0; 1056*f7a7a5c2SJack Wang } 1057*f7a7a5c2SJack Wang 1058*f7a7a5c2SJack Wang /** 1059*f7a7a5c2SJack Wang * rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy 1060*f7a7a5c2SJack Wang * @dev: Device to be checked 1061*f7a7a5c2SJack Wang * @q: Queue to be added to the requeue list if required 1062*f7a7a5c2SJack Wang * 1063*f7a7a5c2SJack Wang * Description: 1064*f7a7a5c2SJack Wang * If session is busy, that means someone will requeue us when resources 1065*f7a7a5c2SJack Wang * are freed. If session is not doing anything - device is not added to 1066*f7a7a5c2SJack Wang * the list and @false is returned. 1067*f7a7a5c2SJack Wang */ 1068*f7a7a5c2SJack Wang static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev, 1069*f7a7a5c2SJack Wang struct rnbd_queue *q) 1070*f7a7a5c2SJack Wang { 1071*f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 1072*f7a7a5c2SJack Wang struct rnbd_cpu_qlist *cpu_q; 1073*f7a7a5c2SJack Wang unsigned long flags; 1074*f7a7a5c2SJack Wang bool added = true; 1075*f7a7a5c2SJack Wang bool need_set; 1076*f7a7a5c2SJack Wang 1077*f7a7a5c2SJack Wang cpu_q = get_cpu_ptr(sess->cpu_queues); 1078*f7a7a5c2SJack Wang spin_lock_irqsave(&cpu_q->requeue_lock, flags); 1079*f7a7a5c2SJack Wang 1080*f7a7a5c2SJack Wang if (likely(!test_and_set_bit_lock(0, &q->in_list))) { 1081*f7a7a5c2SJack Wang if (WARN_ON(!list_empty(&q->requeue_list))) 1082*f7a7a5c2SJack Wang goto unlock; 1083*f7a7a5c2SJack Wang 1084*f7a7a5c2SJack Wang need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm); 1085*f7a7a5c2SJack Wang if (need_set) { 1086*f7a7a5c2SJack Wang set_bit(cpu_q->cpu, sess->cpu_queues_bm); 1087*f7a7a5c2SJack Wang /* Paired with rnbd_put_permit(). Set a bit first 1088*f7a7a5c2SJack Wang * and then observe the busy counter. 1089*f7a7a5c2SJack Wang */ 1090*f7a7a5c2SJack Wang smp_mb__before_atomic(); 1091*f7a7a5c2SJack Wang } 1092*f7a7a5c2SJack Wang if (likely(atomic_read(&sess->busy))) { 1093*f7a7a5c2SJack Wang list_add_tail(&q->requeue_list, &cpu_q->requeue_list); 1094*f7a7a5c2SJack Wang } else { 1095*f7a7a5c2SJack Wang /* Very unlikely, but possible: busy counter was 1096*f7a7a5c2SJack Wang * observed as zero. Drop all bits and return 1097*f7a7a5c2SJack Wang * false to restart the queue by ourselves. 1098*f7a7a5c2SJack Wang */ 1099*f7a7a5c2SJack Wang if (need_set) 1100*f7a7a5c2SJack Wang clear_bit(cpu_q->cpu, sess->cpu_queues_bm); 1101*f7a7a5c2SJack Wang clear_bit_unlock(0, &q->in_list); 1102*f7a7a5c2SJack Wang added = false; 1103*f7a7a5c2SJack Wang } 1104*f7a7a5c2SJack Wang } 1105*f7a7a5c2SJack Wang unlock: 1106*f7a7a5c2SJack Wang spin_unlock_irqrestore(&cpu_q->requeue_lock, flags); 1107*f7a7a5c2SJack Wang put_cpu_ptr(sess->cpu_queues); 1108*f7a7a5c2SJack Wang 1109*f7a7a5c2SJack Wang return added; 1110*f7a7a5c2SJack Wang } 1111*f7a7a5c2SJack Wang 1112*f7a7a5c2SJack Wang static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev, 1113*f7a7a5c2SJack Wang struct blk_mq_hw_ctx *hctx, 1114*f7a7a5c2SJack Wang int delay) 1115*f7a7a5c2SJack Wang { 1116*f7a7a5c2SJack Wang struct rnbd_queue *q = hctx->driver_data; 1117*f7a7a5c2SJack Wang 1118*f7a7a5c2SJack Wang if (delay != RNBD_DELAY_IFBUSY) 1119*f7a7a5c2SJack Wang blk_mq_delay_run_hw_queue(hctx, delay); 1120*f7a7a5c2SJack Wang else if (unlikely(!rnbd_clt_dev_add_to_requeue(dev, q))) 1121*f7a7a5c2SJack Wang /* 1122*f7a7a5c2SJack Wang * If session is not busy we have to restart 1123*f7a7a5c2SJack Wang * the queue ourselves. 1124*f7a7a5c2SJack Wang */ 1125*f7a7a5c2SJack Wang blk_mq_delay_run_hw_queue(hctx, 10/*ms*/); 1126*f7a7a5c2SJack Wang } 1127*f7a7a5c2SJack Wang 1128*f7a7a5c2SJack Wang static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, 1129*f7a7a5c2SJack Wang const struct blk_mq_queue_data *bd) 1130*f7a7a5c2SJack Wang { 1131*f7a7a5c2SJack Wang struct request *rq = bd->rq; 1132*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = rq->rq_disk->private_data; 1133*f7a7a5c2SJack Wang struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq); 1134*f7a7a5c2SJack Wang int err; 1135*f7a7a5c2SJack Wang 1136*f7a7a5c2SJack Wang if (unlikely(dev->dev_state != DEV_STATE_MAPPED)) 1137*f7a7a5c2SJack Wang return BLK_STS_IOERR; 1138*f7a7a5c2SJack Wang 1139*f7a7a5c2SJack Wang iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON, 1140*f7a7a5c2SJack Wang RTRS_PERMIT_NOWAIT); 1141*f7a7a5c2SJack Wang if (unlikely(!iu->permit)) { 1142*f7a7a5c2SJack Wang rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY); 1143*f7a7a5c2SJack Wang return BLK_STS_RESOURCE; 1144*f7a7a5c2SJack Wang } 1145*f7a7a5c2SJack Wang 1146*f7a7a5c2SJack Wang blk_mq_start_request(rq); 1147*f7a7a5c2SJack Wang err = rnbd_client_xfer_request(dev, rq, iu); 1148*f7a7a5c2SJack Wang if (likely(err == 0)) 1149*f7a7a5c2SJack Wang return BLK_STS_OK; 1150*f7a7a5c2SJack Wang if (unlikely(err == -EAGAIN || err == -ENOMEM)) { 1151*f7a7a5c2SJack Wang rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); 1152*f7a7a5c2SJack Wang rnbd_put_permit(dev->sess, iu->permit); 1153*f7a7a5c2SJack Wang return BLK_STS_RESOURCE; 1154*f7a7a5c2SJack Wang } 1155*f7a7a5c2SJack Wang 1156*f7a7a5c2SJack Wang rnbd_put_permit(dev->sess, iu->permit); 1157*f7a7a5c2SJack Wang return BLK_STS_IOERR; 1158*f7a7a5c2SJack Wang } 1159*f7a7a5c2SJack Wang 1160*f7a7a5c2SJack Wang static int rnbd_init_request(struct blk_mq_tag_set *set, struct request *rq, 1161*f7a7a5c2SJack Wang unsigned int hctx_idx, unsigned int numa_node) 1162*f7a7a5c2SJack Wang { 1163*f7a7a5c2SJack Wang struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq); 1164*f7a7a5c2SJack Wang 1165*f7a7a5c2SJack Wang sg_init_table(iu->sglist, BMAX_SEGMENTS); 1166*f7a7a5c2SJack Wang return 0; 1167*f7a7a5c2SJack Wang } 1168*f7a7a5c2SJack Wang 1169*f7a7a5c2SJack Wang static struct blk_mq_ops rnbd_mq_ops = { 1170*f7a7a5c2SJack Wang .queue_rq = rnbd_queue_rq, 1171*f7a7a5c2SJack Wang .init_request = rnbd_init_request, 1172*f7a7a5c2SJack Wang .complete = rnbd_softirq_done_fn, 1173*f7a7a5c2SJack Wang }; 1174*f7a7a5c2SJack Wang 1175*f7a7a5c2SJack Wang static int setup_mq_tags(struct rnbd_clt_session *sess) 1176*f7a7a5c2SJack Wang { 1177*f7a7a5c2SJack Wang struct blk_mq_tag_set *tag_set = &sess->tag_set; 1178*f7a7a5c2SJack Wang 1179*f7a7a5c2SJack Wang memset(tag_set, 0, sizeof(*tag_set)); 1180*f7a7a5c2SJack Wang tag_set->ops = &rnbd_mq_ops; 1181*f7a7a5c2SJack Wang tag_set->queue_depth = sess->queue_depth; 1182*f7a7a5c2SJack Wang tag_set->numa_node = NUMA_NO_NODE; 1183*f7a7a5c2SJack Wang tag_set->flags = BLK_MQ_F_SHOULD_MERGE | 1184*f7a7a5c2SJack Wang BLK_MQ_F_TAG_SHARED; 1185*f7a7a5c2SJack Wang tag_set->cmd_size = sizeof(struct rnbd_iu); 1186*f7a7a5c2SJack Wang tag_set->nr_hw_queues = num_online_cpus(); 1187*f7a7a5c2SJack Wang 1188*f7a7a5c2SJack Wang return blk_mq_alloc_tag_set(tag_set); 1189*f7a7a5c2SJack Wang } 1190*f7a7a5c2SJack Wang 1191*f7a7a5c2SJack Wang static struct rnbd_clt_session * 1192*f7a7a5c2SJack Wang find_and_get_or_create_sess(const char *sessname, 1193*f7a7a5c2SJack Wang const struct rtrs_addr *paths, 1194*f7a7a5c2SJack Wang size_t path_cnt, u16 port_nr) 1195*f7a7a5c2SJack Wang { 1196*f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 1197*f7a7a5c2SJack Wang struct rtrs_attrs attrs; 1198*f7a7a5c2SJack Wang int err; 1199*f7a7a5c2SJack Wang bool first; 1200*f7a7a5c2SJack Wang struct rtrs_clt_ops rtrs_ops; 1201*f7a7a5c2SJack Wang 1202*f7a7a5c2SJack Wang sess = find_or_create_sess(sessname, &first); 1203*f7a7a5c2SJack Wang if (sess == ERR_PTR(-ENOMEM)) 1204*f7a7a5c2SJack Wang return ERR_PTR(-ENOMEM); 1205*f7a7a5c2SJack Wang else if (!first) 1206*f7a7a5c2SJack Wang return sess; 1207*f7a7a5c2SJack Wang 1208*f7a7a5c2SJack Wang rtrs_ops = (struct rtrs_clt_ops) { 1209*f7a7a5c2SJack Wang .priv = sess, 1210*f7a7a5c2SJack Wang .link_ev = rnbd_clt_link_ev, 1211*f7a7a5c2SJack Wang }; 1212*f7a7a5c2SJack Wang /* 1213*f7a7a5c2SJack Wang * Nothing was found, establish rtrs connection and proceed further. 1214*f7a7a5c2SJack Wang */ 1215*f7a7a5c2SJack Wang sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname, 1216*f7a7a5c2SJack Wang paths, path_cnt, port_nr, 1217*f7a7a5c2SJack Wang sizeof(struct rnbd_iu), 1218*f7a7a5c2SJack Wang RECONNECT_DELAY, BMAX_SEGMENTS, 1219*f7a7a5c2SJack Wang MAX_RECONNECTS); 1220*f7a7a5c2SJack Wang if (IS_ERR(sess->rtrs)) { 1221*f7a7a5c2SJack Wang err = PTR_ERR(sess->rtrs); 1222*f7a7a5c2SJack Wang goto wake_up_and_put; 1223*f7a7a5c2SJack Wang } 1224*f7a7a5c2SJack Wang rtrs_clt_query(sess->rtrs, &attrs); 1225*f7a7a5c2SJack Wang sess->max_io_size = attrs.max_io_size; 1226*f7a7a5c2SJack Wang sess->queue_depth = attrs.queue_depth; 1227*f7a7a5c2SJack Wang 1228*f7a7a5c2SJack Wang err = setup_mq_tags(sess); 1229*f7a7a5c2SJack Wang if (err) 1230*f7a7a5c2SJack Wang goto close_rtrs; 1231*f7a7a5c2SJack Wang 1232*f7a7a5c2SJack Wang err = send_msg_sess_info(sess, WAIT); 1233*f7a7a5c2SJack Wang if (err) 1234*f7a7a5c2SJack Wang goto close_rtrs; 1235*f7a7a5c2SJack Wang 1236*f7a7a5c2SJack Wang wake_up_rtrs_waiters(sess); 1237*f7a7a5c2SJack Wang 1238*f7a7a5c2SJack Wang return sess; 1239*f7a7a5c2SJack Wang 1240*f7a7a5c2SJack Wang close_rtrs: 1241*f7a7a5c2SJack Wang close_rtrs(sess); 1242*f7a7a5c2SJack Wang put_sess: 1243*f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1244*f7a7a5c2SJack Wang 1245*f7a7a5c2SJack Wang return ERR_PTR(err); 1246*f7a7a5c2SJack Wang 1247*f7a7a5c2SJack Wang wake_up_and_put: 1248*f7a7a5c2SJack Wang wake_up_rtrs_waiters(sess); 1249*f7a7a5c2SJack Wang goto put_sess; 1250*f7a7a5c2SJack Wang } 1251*f7a7a5c2SJack Wang 1252*f7a7a5c2SJack Wang static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev, 1253*f7a7a5c2SJack Wang struct rnbd_queue *q, 1254*f7a7a5c2SJack Wang struct blk_mq_hw_ctx *hctx) 1255*f7a7a5c2SJack Wang { 1256*f7a7a5c2SJack Wang INIT_LIST_HEAD(&q->requeue_list); 1257*f7a7a5c2SJack Wang q->dev = dev; 1258*f7a7a5c2SJack Wang q->hctx = hctx; 1259*f7a7a5c2SJack Wang } 1260*f7a7a5c2SJack Wang 1261*f7a7a5c2SJack Wang static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev) 1262*f7a7a5c2SJack Wang { 1263*f7a7a5c2SJack Wang int i; 1264*f7a7a5c2SJack Wang struct blk_mq_hw_ctx *hctx; 1265*f7a7a5c2SJack Wang struct rnbd_queue *q; 1266*f7a7a5c2SJack Wang 1267*f7a7a5c2SJack Wang queue_for_each_hw_ctx(dev->queue, hctx, i) { 1268*f7a7a5c2SJack Wang q = &dev->hw_queues[i]; 1269*f7a7a5c2SJack Wang rnbd_init_hw_queue(dev, q, hctx); 1270*f7a7a5c2SJack Wang hctx->driver_data = q; 1271*f7a7a5c2SJack Wang } 1272*f7a7a5c2SJack Wang } 1273*f7a7a5c2SJack Wang 1274*f7a7a5c2SJack Wang static int setup_mq_dev(struct rnbd_clt_dev *dev) 1275*f7a7a5c2SJack Wang { 1276*f7a7a5c2SJack Wang dev->queue = blk_mq_init_queue(&dev->sess->tag_set); 1277*f7a7a5c2SJack Wang if (IS_ERR(dev->queue)) { 1278*f7a7a5c2SJack Wang rnbd_clt_err(dev, "Initializing multiqueue queue failed, err: %ld\n", 1279*f7a7a5c2SJack Wang PTR_ERR(dev->queue)); 1280*f7a7a5c2SJack Wang return PTR_ERR(dev->queue); 1281*f7a7a5c2SJack Wang } 1282*f7a7a5c2SJack Wang rnbd_init_mq_hw_queues(dev); 1283*f7a7a5c2SJack Wang return 0; 1284*f7a7a5c2SJack Wang } 1285*f7a7a5c2SJack Wang 1286*f7a7a5c2SJack Wang static void setup_request_queue(struct rnbd_clt_dev *dev) 1287*f7a7a5c2SJack Wang { 1288*f7a7a5c2SJack Wang blk_queue_logical_block_size(dev->queue, dev->logical_block_size); 1289*f7a7a5c2SJack Wang blk_queue_physical_block_size(dev->queue, dev->physical_block_size); 1290*f7a7a5c2SJack Wang blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors); 1291*f7a7a5c2SJack Wang blk_queue_max_write_same_sectors(dev->queue, 1292*f7a7a5c2SJack Wang dev->max_write_same_sectors); 1293*f7a7a5c2SJack Wang 1294*f7a7a5c2SJack Wang /* 1295*f7a7a5c2SJack Wang * we don't support discards to "discontiguous" segments 1296*f7a7a5c2SJack Wang * in on request 1297*f7a7a5c2SJack Wang */ 1298*f7a7a5c2SJack Wang blk_queue_max_discard_segments(dev->queue, 1); 1299*f7a7a5c2SJack Wang 1300*f7a7a5c2SJack Wang blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors); 1301*f7a7a5c2SJack Wang dev->queue->limits.discard_granularity = dev->discard_granularity; 1302*f7a7a5c2SJack Wang dev->queue->limits.discard_alignment = dev->discard_alignment; 1303*f7a7a5c2SJack Wang if (dev->max_discard_sectors) 1304*f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue); 1305*f7a7a5c2SJack Wang if (dev->secure_discard) 1306*f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue); 1307*f7a7a5c2SJack Wang 1308*f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); 1309*f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); 1310*f7a7a5c2SJack Wang blk_queue_max_segments(dev->queue, dev->max_segments); 1311*f7a7a5c2SJack Wang blk_queue_io_opt(dev->queue, dev->sess->max_io_size); 1312*f7a7a5c2SJack Wang blk_queue_virt_boundary(dev->queue, SZ_4K - 1); 1313*f7a7a5c2SJack Wang blk_queue_write_cache(dev->queue, true, true); 1314*f7a7a5c2SJack Wang dev->queue->queuedata = dev; 1315*f7a7a5c2SJack Wang } 1316*f7a7a5c2SJack Wang 1317*f7a7a5c2SJack Wang static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx) 1318*f7a7a5c2SJack Wang { 1319*f7a7a5c2SJack Wang dev->gd->major = rnbd_client_major; 1320*f7a7a5c2SJack Wang dev->gd->first_minor = idx << RNBD_PART_BITS; 1321*f7a7a5c2SJack Wang dev->gd->fops = &rnbd_client_ops; 1322*f7a7a5c2SJack Wang dev->gd->queue = dev->queue; 1323*f7a7a5c2SJack Wang dev->gd->private_data = dev; 1324*f7a7a5c2SJack Wang snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d", 1325*f7a7a5c2SJack Wang idx); 1326*f7a7a5c2SJack Wang pr_debug("disk_name=%s, capacity=%zu\n", 1327*f7a7a5c2SJack Wang dev->gd->disk_name, 1328*f7a7a5c2SJack Wang dev->nsectors * (dev->logical_block_size / SECTOR_SIZE) 1329*f7a7a5c2SJack Wang ); 1330*f7a7a5c2SJack Wang 1331*f7a7a5c2SJack Wang set_capacity(dev->gd, dev->nsectors); 1332*f7a7a5c2SJack Wang 1333*f7a7a5c2SJack Wang if (dev->access_mode == RNBD_ACCESS_RO) { 1334*f7a7a5c2SJack Wang dev->read_only = true; 1335*f7a7a5c2SJack Wang set_disk_ro(dev->gd, true); 1336*f7a7a5c2SJack Wang } else { 1337*f7a7a5c2SJack Wang dev->read_only = false; 1338*f7a7a5c2SJack Wang } 1339*f7a7a5c2SJack Wang 1340*f7a7a5c2SJack Wang if (!dev->rotational) 1341*f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue); 1342*f7a7a5c2SJack Wang } 1343*f7a7a5c2SJack Wang 1344*f7a7a5c2SJack Wang static int rnbd_client_setup_device(struct rnbd_clt_session *sess, 1345*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev, int idx) 1346*f7a7a5c2SJack Wang { 1347*f7a7a5c2SJack Wang int err; 1348*f7a7a5c2SJack Wang 1349*f7a7a5c2SJack Wang dev->size = dev->nsectors * dev->logical_block_size; 1350*f7a7a5c2SJack Wang 1351*f7a7a5c2SJack Wang err = setup_mq_dev(dev); 1352*f7a7a5c2SJack Wang if (err) 1353*f7a7a5c2SJack Wang return err; 1354*f7a7a5c2SJack Wang 1355*f7a7a5c2SJack Wang setup_request_queue(dev); 1356*f7a7a5c2SJack Wang 1357*f7a7a5c2SJack Wang dev->gd = alloc_disk_node(1 << RNBD_PART_BITS, NUMA_NO_NODE); 1358*f7a7a5c2SJack Wang if (!dev->gd) { 1359*f7a7a5c2SJack Wang blk_cleanup_queue(dev->queue); 1360*f7a7a5c2SJack Wang return -ENOMEM; 1361*f7a7a5c2SJack Wang } 1362*f7a7a5c2SJack Wang 1363*f7a7a5c2SJack Wang rnbd_clt_setup_gen_disk(dev, idx); 1364*f7a7a5c2SJack Wang 1365*f7a7a5c2SJack Wang return 0; 1366*f7a7a5c2SJack Wang } 1367*f7a7a5c2SJack Wang 1368*f7a7a5c2SJack Wang static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, 1369*f7a7a5c2SJack Wang enum rnbd_access_mode access_mode, 1370*f7a7a5c2SJack Wang const char *pathname) 1371*f7a7a5c2SJack Wang { 1372*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1373*f7a7a5c2SJack Wang int ret; 1374*f7a7a5c2SJack Wang 1375*f7a7a5c2SJack Wang dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE); 1376*f7a7a5c2SJack Wang if (!dev) 1377*f7a7a5c2SJack Wang return ERR_PTR(-ENOMEM); 1378*f7a7a5c2SJack Wang 1379*f7a7a5c2SJack Wang dev->hw_queues = kcalloc(nr_cpu_ids, sizeof(*dev->hw_queues), 1380*f7a7a5c2SJack Wang GFP_KERNEL); 1381*f7a7a5c2SJack Wang if (!dev->hw_queues) { 1382*f7a7a5c2SJack Wang ret = -ENOMEM; 1383*f7a7a5c2SJack Wang goto out_alloc; 1384*f7a7a5c2SJack Wang } 1385*f7a7a5c2SJack Wang 1386*f7a7a5c2SJack Wang mutex_lock(&ida_lock); 1387*f7a7a5c2SJack Wang ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS), 1388*f7a7a5c2SJack Wang GFP_KERNEL); 1389*f7a7a5c2SJack Wang mutex_unlock(&ida_lock); 1390*f7a7a5c2SJack Wang if (ret < 0) { 1391*f7a7a5c2SJack Wang pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n", 1392*f7a7a5c2SJack Wang pathname, sess->sessname, ret); 1393*f7a7a5c2SJack Wang goto out_queues; 1394*f7a7a5c2SJack Wang } 1395*f7a7a5c2SJack Wang dev->clt_device_id = ret; 1396*f7a7a5c2SJack Wang dev->sess = sess; 1397*f7a7a5c2SJack Wang dev->access_mode = access_mode; 1398*f7a7a5c2SJack Wang strlcpy(dev->pathname, pathname, sizeof(dev->pathname)); 1399*f7a7a5c2SJack Wang mutex_init(&dev->lock); 1400*f7a7a5c2SJack Wang refcount_set(&dev->refcount, 1); 1401*f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_INIT; 1402*f7a7a5c2SJack Wang 1403*f7a7a5c2SJack Wang /* 1404*f7a7a5c2SJack Wang * Here we called from sysfs entry, thus clt-sysfs is 1405*f7a7a5c2SJack Wang * responsible that session will not disappear. 1406*f7a7a5c2SJack Wang */ 1407*f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_sess(sess)); 1408*f7a7a5c2SJack Wang 1409*f7a7a5c2SJack Wang return dev; 1410*f7a7a5c2SJack Wang 1411*f7a7a5c2SJack Wang out_queues: 1412*f7a7a5c2SJack Wang kfree(dev->hw_queues); 1413*f7a7a5c2SJack Wang out_alloc: 1414*f7a7a5c2SJack Wang kfree(dev); 1415*f7a7a5c2SJack Wang return ERR_PTR(ret); 1416*f7a7a5c2SJack Wang } 1417*f7a7a5c2SJack Wang 1418*f7a7a5c2SJack Wang static bool __exists_dev(const char *pathname) 1419*f7a7a5c2SJack Wang { 1420*f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 1421*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1422*f7a7a5c2SJack Wang bool found = false; 1423*f7a7a5c2SJack Wang 1424*f7a7a5c2SJack Wang list_for_each_entry(sess, &sess_list, list) { 1425*f7a7a5c2SJack Wang mutex_lock(&sess->lock); 1426*f7a7a5c2SJack Wang list_for_each_entry(dev, &sess->devs_list, list) { 1427*f7a7a5c2SJack Wang if (!strncmp(dev->pathname, pathname, 1428*f7a7a5c2SJack Wang sizeof(dev->pathname))) { 1429*f7a7a5c2SJack Wang found = true; 1430*f7a7a5c2SJack Wang break; 1431*f7a7a5c2SJack Wang } 1432*f7a7a5c2SJack Wang } 1433*f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 1434*f7a7a5c2SJack Wang if (found) 1435*f7a7a5c2SJack Wang break; 1436*f7a7a5c2SJack Wang } 1437*f7a7a5c2SJack Wang 1438*f7a7a5c2SJack Wang return found; 1439*f7a7a5c2SJack Wang } 1440*f7a7a5c2SJack Wang 1441*f7a7a5c2SJack Wang static bool exists_devpath(const char *pathname) 1442*f7a7a5c2SJack Wang { 1443*f7a7a5c2SJack Wang bool found; 1444*f7a7a5c2SJack Wang 1445*f7a7a5c2SJack Wang mutex_lock(&sess_lock); 1446*f7a7a5c2SJack Wang found = __exists_dev(pathname); 1447*f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 1448*f7a7a5c2SJack Wang 1449*f7a7a5c2SJack Wang return found; 1450*f7a7a5c2SJack Wang } 1451*f7a7a5c2SJack Wang 1452*f7a7a5c2SJack Wang static bool insert_dev_if_not_exists_devpath(const char *pathname, 1453*f7a7a5c2SJack Wang struct rnbd_clt_session *sess, 1454*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev) 1455*f7a7a5c2SJack Wang { 1456*f7a7a5c2SJack Wang bool found; 1457*f7a7a5c2SJack Wang 1458*f7a7a5c2SJack Wang mutex_lock(&sess_lock); 1459*f7a7a5c2SJack Wang found = __exists_dev(pathname); 1460*f7a7a5c2SJack Wang if (!found) { 1461*f7a7a5c2SJack Wang mutex_lock(&sess->lock); 1462*f7a7a5c2SJack Wang list_add_tail(&dev->list, &sess->devs_list); 1463*f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 1464*f7a7a5c2SJack Wang } 1465*f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 1466*f7a7a5c2SJack Wang 1467*f7a7a5c2SJack Wang return found; 1468*f7a7a5c2SJack Wang } 1469*f7a7a5c2SJack Wang 1470*f7a7a5c2SJack Wang static void delete_dev(struct rnbd_clt_dev *dev) 1471*f7a7a5c2SJack Wang { 1472*f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 1473*f7a7a5c2SJack Wang 1474*f7a7a5c2SJack Wang mutex_lock(&sess->lock); 1475*f7a7a5c2SJack Wang list_del(&dev->list); 1476*f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 1477*f7a7a5c2SJack Wang } 1478*f7a7a5c2SJack Wang 1479*f7a7a5c2SJack Wang struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, 1480*f7a7a5c2SJack Wang struct rtrs_addr *paths, 1481*f7a7a5c2SJack Wang size_t path_cnt, u16 port_nr, 1482*f7a7a5c2SJack Wang const char *pathname, 1483*f7a7a5c2SJack Wang enum rnbd_access_mode access_mode) 1484*f7a7a5c2SJack Wang { 1485*f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 1486*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1487*f7a7a5c2SJack Wang int ret; 1488*f7a7a5c2SJack Wang 1489*f7a7a5c2SJack Wang if (exists_devpath(pathname)) 1490*f7a7a5c2SJack Wang return ERR_PTR(-EEXIST); 1491*f7a7a5c2SJack Wang 1492*f7a7a5c2SJack Wang sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr); 1493*f7a7a5c2SJack Wang if (IS_ERR(sess)) 1494*f7a7a5c2SJack Wang return ERR_CAST(sess); 1495*f7a7a5c2SJack Wang 1496*f7a7a5c2SJack Wang dev = init_dev(sess, access_mode, pathname); 1497*f7a7a5c2SJack Wang if (IS_ERR(dev)) { 1498*f7a7a5c2SJack Wang pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n", 1499*f7a7a5c2SJack Wang pathname, sess->sessname, PTR_ERR(dev)); 1500*f7a7a5c2SJack Wang ret = PTR_ERR(dev); 1501*f7a7a5c2SJack Wang goto put_sess; 1502*f7a7a5c2SJack Wang } 1503*f7a7a5c2SJack Wang if (insert_dev_if_not_exists_devpath(pathname, sess, dev)) { 1504*f7a7a5c2SJack Wang ret = -EEXIST; 1505*f7a7a5c2SJack Wang goto put_dev; 1506*f7a7a5c2SJack Wang } 1507*f7a7a5c2SJack Wang ret = send_msg_open(dev, WAIT); 1508*f7a7a5c2SJack Wang if (ret) { 1509*f7a7a5c2SJack Wang rnbd_clt_err(dev, 1510*f7a7a5c2SJack Wang "map_device: failed, can't open remote device, err: %d\n", 1511*f7a7a5c2SJack Wang ret); 1512*f7a7a5c2SJack Wang goto del_dev; 1513*f7a7a5c2SJack Wang } 1514*f7a7a5c2SJack Wang mutex_lock(&dev->lock); 1515*f7a7a5c2SJack Wang pr_debug("Opened remote device: session=%s, path='%s'\n", 1516*f7a7a5c2SJack Wang sess->sessname, pathname); 1517*f7a7a5c2SJack Wang ret = rnbd_client_setup_device(sess, dev, dev->clt_device_id); 1518*f7a7a5c2SJack Wang if (ret) { 1519*f7a7a5c2SJack Wang rnbd_clt_err(dev, 1520*f7a7a5c2SJack Wang "map_device: Failed to configure device, err: %d\n", 1521*f7a7a5c2SJack Wang ret); 1522*f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1523*f7a7a5c2SJack Wang goto del_dev; 1524*f7a7a5c2SJack Wang } 1525*f7a7a5c2SJack Wang 1526*f7a7a5c2SJack Wang rnbd_clt_info(dev, 1527*f7a7a5c2SJack Wang "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n", 1528*f7a7a5c2SJack Wang dev->gd->disk_name, dev->nsectors, 1529*f7a7a5c2SJack Wang dev->logical_block_size, dev->physical_block_size, 1530*f7a7a5c2SJack Wang dev->max_write_same_sectors, dev->max_discard_sectors, 1531*f7a7a5c2SJack Wang dev->discard_granularity, dev->discard_alignment, 1532*f7a7a5c2SJack Wang dev->secure_discard, dev->max_segments, 1533*f7a7a5c2SJack Wang dev->max_hw_sectors, dev->rotational); 1534*f7a7a5c2SJack Wang 1535*f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1536*f7a7a5c2SJack Wang 1537*f7a7a5c2SJack Wang add_disk(dev->gd); 1538*f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1539*f7a7a5c2SJack Wang 1540*f7a7a5c2SJack Wang return dev; 1541*f7a7a5c2SJack Wang 1542*f7a7a5c2SJack Wang del_dev: 1543*f7a7a5c2SJack Wang delete_dev(dev); 1544*f7a7a5c2SJack Wang put_dev: 1545*f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 1546*f7a7a5c2SJack Wang put_sess: 1547*f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1548*f7a7a5c2SJack Wang 1549*f7a7a5c2SJack Wang return ERR_PTR(ret); 1550*f7a7a5c2SJack Wang } 1551*f7a7a5c2SJack Wang 1552*f7a7a5c2SJack Wang static void destroy_gen_disk(struct rnbd_clt_dev *dev) 1553*f7a7a5c2SJack Wang { 1554*f7a7a5c2SJack Wang del_gendisk(dev->gd); 1555*f7a7a5c2SJack Wang blk_cleanup_queue(dev->queue); 1556*f7a7a5c2SJack Wang put_disk(dev->gd); 1557*f7a7a5c2SJack Wang } 1558*f7a7a5c2SJack Wang 1559*f7a7a5c2SJack Wang static void destroy_sysfs(struct rnbd_clt_dev *dev, 1560*f7a7a5c2SJack Wang const struct attribute *sysfs_self) 1561*f7a7a5c2SJack Wang { 1562*f7a7a5c2SJack Wang rnbd_clt_remove_dev_symlink(dev); 1563*f7a7a5c2SJack Wang if (dev->kobj.state_initialized) { 1564*f7a7a5c2SJack Wang if (sysfs_self) 1565*f7a7a5c2SJack Wang /* To avoid deadlock firstly remove itself */ 1566*f7a7a5c2SJack Wang sysfs_remove_file_self(&dev->kobj, sysfs_self); 1567*f7a7a5c2SJack Wang kobject_del(&dev->kobj); 1568*f7a7a5c2SJack Wang kobject_put(&dev->kobj); 1569*f7a7a5c2SJack Wang } 1570*f7a7a5c2SJack Wang } 1571*f7a7a5c2SJack Wang 1572*f7a7a5c2SJack Wang int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force, 1573*f7a7a5c2SJack Wang const struct attribute *sysfs_self) 1574*f7a7a5c2SJack Wang { 1575*f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 1576*f7a7a5c2SJack Wang int refcount, ret = 0; 1577*f7a7a5c2SJack Wang bool was_mapped; 1578*f7a7a5c2SJack Wang 1579*f7a7a5c2SJack Wang mutex_lock(&dev->lock); 1580*f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_UNMAPPED) { 1581*f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device is already being unmapped\n"); 1582*f7a7a5c2SJack Wang ret = -EALREADY; 1583*f7a7a5c2SJack Wang goto err; 1584*f7a7a5c2SJack Wang } 1585*f7a7a5c2SJack Wang refcount = refcount_read(&dev->refcount); 1586*f7a7a5c2SJack Wang if (!force && refcount > 1) { 1587*f7a7a5c2SJack Wang rnbd_clt_err(dev, 1588*f7a7a5c2SJack Wang "Closing device failed, device is in use, (%d device users)\n", 1589*f7a7a5c2SJack Wang refcount - 1); 1590*f7a7a5c2SJack Wang ret = -EBUSY; 1591*f7a7a5c2SJack Wang goto err; 1592*f7a7a5c2SJack Wang } 1593*f7a7a5c2SJack Wang was_mapped = (dev->dev_state == DEV_STATE_MAPPED); 1594*f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_UNMAPPED; 1595*f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1596*f7a7a5c2SJack Wang 1597*f7a7a5c2SJack Wang delete_dev(dev); 1598*f7a7a5c2SJack Wang destroy_sysfs(dev, sysfs_self); 1599*f7a7a5c2SJack Wang destroy_gen_disk(dev); 1600*f7a7a5c2SJack Wang if (was_mapped && sess->rtrs) 1601*f7a7a5c2SJack Wang send_msg_close(dev, dev->device_id, WAIT); 1602*f7a7a5c2SJack Wang 1603*f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device is unmapped\n"); 1604*f7a7a5c2SJack Wang 1605*f7a7a5c2SJack Wang /* Likely last reference put */ 1606*f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 1607*f7a7a5c2SJack Wang 1608*f7a7a5c2SJack Wang /* 1609*f7a7a5c2SJack Wang * Here device and session can be vanished! 1610*f7a7a5c2SJack Wang */ 1611*f7a7a5c2SJack Wang 1612*f7a7a5c2SJack Wang return 0; 1613*f7a7a5c2SJack Wang err: 1614*f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1615*f7a7a5c2SJack Wang 1616*f7a7a5c2SJack Wang return ret; 1617*f7a7a5c2SJack Wang } 1618*f7a7a5c2SJack Wang 1619*f7a7a5c2SJack Wang int rnbd_clt_remap_device(struct rnbd_clt_dev *dev) 1620*f7a7a5c2SJack Wang { 1621*f7a7a5c2SJack Wang int err; 1622*f7a7a5c2SJack Wang 1623*f7a7a5c2SJack Wang mutex_lock(&dev->lock); 1624*f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) 1625*f7a7a5c2SJack Wang err = 0; 1626*f7a7a5c2SJack Wang else if (dev->dev_state == DEV_STATE_UNMAPPED) 1627*f7a7a5c2SJack Wang err = -ENODEV; 1628*f7a7a5c2SJack Wang else if (dev->dev_state == DEV_STATE_MAPPED) 1629*f7a7a5c2SJack Wang err = -EALREADY; 1630*f7a7a5c2SJack Wang else 1631*f7a7a5c2SJack Wang err = -EBUSY; 1632*f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1633*f7a7a5c2SJack Wang if (!err) { 1634*f7a7a5c2SJack Wang rnbd_clt_info(dev, "Remapping device.\n"); 1635*f7a7a5c2SJack Wang err = send_msg_open(dev, WAIT); 1636*f7a7a5c2SJack Wang if (err) 1637*f7a7a5c2SJack Wang rnbd_clt_err(dev, "remap_device: %d\n", err); 1638*f7a7a5c2SJack Wang } 1639*f7a7a5c2SJack Wang 1640*f7a7a5c2SJack Wang return err; 1641*f7a7a5c2SJack Wang } 1642*f7a7a5c2SJack Wang 1643*f7a7a5c2SJack Wang static void unmap_device_work(struct work_struct *work) 1644*f7a7a5c2SJack Wang { 1645*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1646*f7a7a5c2SJack Wang 1647*f7a7a5c2SJack Wang dev = container_of(work, typeof(*dev), unmap_on_rmmod_work); 1648*f7a7a5c2SJack Wang rnbd_clt_unmap_device(dev, true, NULL); 1649*f7a7a5c2SJack Wang } 1650*f7a7a5c2SJack Wang 1651*f7a7a5c2SJack Wang static void rnbd_destroy_sessions(void) 1652*f7a7a5c2SJack Wang { 1653*f7a7a5c2SJack Wang struct rnbd_clt_session *sess, *sn; 1654*f7a7a5c2SJack Wang struct rnbd_clt_dev *dev, *tn; 1655*f7a7a5c2SJack Wang 1656*f7a7a5c2SJack Wang /* Firstly forbid access through sysfs interface */ 1657*f7a7a5c2SJack Wang rnbd_clt_destroy_default_group(); 1658*f7a7a5c2SJack Wang rnbd_clt_destroy_sysfs_files(); 1659*f7a7a5c2SJack Wang 1660*f7a7a5c2SJack Wang /* 1661*f7a7a5c2SJack Wang * Here at this point there is no any concurrent access to sessions 1662*f7a7a5c2SJack Wang * list and devices list: 1663*f7a7a5c2SJack Wang * 1. New session or device can'be be created - session sysfs files 1664*f7a7a5c2SJack Wang * are removed. 1665*f7a7a5c2SJack Wang * 2. Device or session can't be removed - module reference is taken 1666*f7a7a5c2SJack Wang * into account in unmap device sysfs callback. 1667*f7a7a5c2SJack Wang * 3. No IO requests inflight - each file open of block_dev increases 1668*f7a7a5c2SJack Wang * module reference in get_disk(). 1669*f7a7a5c2SJack Wang * 1670*f7a7a5c2SJack Wang * But still there can be user requests inflights, which are sent by 1671*f7a7a5c2SJack Wang * asynchronous send_msg_*() functions, thus before unmapping devices 1672*f7a7a5c2SJack Wang * RTRS session must be explicitly closed. 1673*f7a7a5c2SJack Wang */ 1674*f7a7a5c2SJack Wang 1675*f7a7a5c2SJack Wang list_for_each_entry_safe(sess, sn, &sess_list, list) { 1676*f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_sess(sess)); 1677*f7a7a5c2SJack Wang close_rtrs(sess); 1678*f7a7a5c2SJack Wang list_for_each_entry_safe(dev, tn, &sess->devs_list, list) { 1679*f7a7a5c2SJack Wang /* 1680*f7a7a5c2SJack Wang * Here unmap happens in parallel for only one reason: 1681*f7a7a5c2SJack Wang * blk_cleanup_queue() takes around half a second, so 1682*f7a7a5c2SJack Wang * on huge amount of devices the whole module unload 1683*f7a7a5c2SJack Wang * procedure takes minutes. 1684*f7a7a5c2SJack Wang */ 1685*f7a7a5c2SJack Wang INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work); 1686*f7a7a5c2SJack Wang queue_work(system_long_wq, &dev->unmap_on_rmmod_work); 1687*f7a7a5c2SJack Wang } 1688*f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1689*f7a7a5c2SJack Wang } 1690*f7a7a5c2SJack Wang /* Wait for all scheduled unmap works */ 1691*f7a7a5c2SJack Wang flush_workqueue(system_long_wq); 1692*f7a7a5c2SJack Wang WARN_ON(!list_empty(&sess_list)); 1693*f7a7a5c2SJack Wang } 1694*f7a7a5c2SJack Wang 1695*f7a7a5c2SJack Wang static int __init rnbd_client_init(void) 1696*f7a7a5c2SJack Wang { 1697*f7a7a5c2SJack Wang int err = 0; 1698*f7a7a5c2SJack Wang 1699*f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4); 1700*f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36); 1701*f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36); 1702*f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264); 1703*f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8); 1704*f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56); 1705*f7a7a5c2SJack Wang rnbd_client_major = register_blkdev(rnbd_client_major, "rnbd"); 1706*f7a7a5c2SJack Wang if (rnbd_client_major <= 0) { 1707*f7a7a5c2SJack Wang pr_err("Failed to load module, block device registration failed\n"); 1708*f7a7a5c2SJack Wang return -EBUSY; 1709*f7a7a5c2SJack Wang } 1710*f7a7a5c2SJack Wang 1711*f7a7a5c2SJack Wang err = rnbd_clt_create_sysfs_files(); 1712*f7a7a5c2SJack Wang if (err) { 1713*f7a7a5c2SJack Wang pr_err("Failed to load module, creating sysfs device files failed, err: %d\n", 1714*f7a7a5c2SJack Wang err); 1715*f7a7a5c2SJack Wang unregister_blkdev(rnbd_client_major, "rnbd"); 1716*f7a7a5c2SJack Wang } 1717*f7a7a5c2SJack Wang 1718*f7a7a5c2SJack Wang return err; 1719*f7a7a5c2SJack Wang } 1720*f7a7a5c2SJack Wang 1721*f7a7a5c2SJack Wang static void __exit rnbd_client_exit(void) 1722*f7a7a5c2SJack Wang { 1723*f7a7a5c2SJack Wang rnbd_destroy_sessions(); 1724*f7a7a5c2SJack Wang unregister_blkdev(rnbd_client_major, "rnbd"); 1725*f7a7a5c2SJack Wang ida_destroy(&index_ida); 1726*f7a7a5c2SJack Wang } 1727*f7a7a5c2SJack Wang 1728*f7a7a5c2SJack Wang module_init(rnbd_client_init); 1729*f7a7a5c2SJack Wang module_exit(rnbd_client_exit); 1730