1aef9ec39SRoland Dreier /* 2aef9ec39SRoland Dreier * Copyright (c) 2005 Cisco Systems. All rights reserved. 3aef9ec39SRoland Dreier * 4aef9ec39SRoland Dreier * This software is available to you under a choice of one of two 5aef9ec39SRoland Dreier * licenses. You may choose to be licensed under the terms of the GNU 6aef9ec39SRoland Dreier * General Public License (GPL) Version 2, available from the file 7aef9ec39SRoland Dreier * COPYING in the main directory of this source tree, or the 8aef9ec39SRoland Dreier * OpenIB.org BSD license below: 9aef9ec39SRoland Dreier * 10aef9ec39SRoland Dreier * Redistribution and use in source and binary forms, with or 11aef9ec39SRoland Dreier * without modification, are permitted provided that the following 12aef9ec39SRoland Dreier * conditions are met: 13aef9ec39SRoland Dreier * 14aef9ec39SRoland Dreier * - Redistributions of source code must retain the above 15aef9ec39SRoland Dreier * copyright notice, this list of conditions and the following 16aef9ec39SRoland Dreier * disclaimer. 17aef9ec39SRoland Dreier * 18aef9ec39SRoland Dreier * - Redistributions in binary form must reproduce the above 19aef9ec39SRoland Dreier * copyright notice, this list of conditions and the following 20aef9ec39SRoland Dreier * disclaimer in the documentation and/or other materials 21aef9ec39SRoland Dreier * provided with the distribution. 22aef9ec39SRoland Dreier * 23aef9ec39SRoland Dreier * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24aef9ec39SRoland Dreier * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25aef9ec39SRoland Dreier * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26aef9ec39SRoland Dreier * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27aef9ec39SRoland Dreier * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28aef9ec39SRoland Dreier * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29aef9ec39SRoland Dreier * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30aef9ec39SRoland Dreier * SOFTWARE. 31aef9ec39SRoland Dreier */ 32aef9ec39SRoland Dreier 33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34e0bda7d8SBart Van Assche 35aef9ec39SRoland Dreier #include <linux/module.h> 36aef9ec39SRoland Dreier #include <linux/init.h> 37aef9ec39SRoland Dreier #include <linux/slab.h> 38aef9ec39SRoland Dreier #include <linux/err.h> 39aef9ec39SRoland Dreier #include <linux/string.h> 40aef9ec39SRoland Dreier #include <linux/parser.h> 41aef9ec39SRoland Dreier #include <linux/random.h> 42de25968cSTim Schmielau #include <linux/jiffies.h> 4356b5390cSBart Van Assche #include <rdma/ib_cache.h> 44aef9ec39SRoland Dreier 4560063497SArun Sharma #include <linux/atomic.h> 46aef9ec39SRoland Dreier 47aef9ec39SRoland Dreier #include <scsi/scsi.h> 48aef9ec39SRoland Dreier #include <scsi/scsi_device.h> 49aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h> 5071444b97SJack Wang #include <scsi/scsi_tcq.h> 51aef9ec39SRoland Dreier #include <scsi/srp.h> 523236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h> 53aef9ec39SRoland Dreier 54aef9ec39SRoland Dreier #include "ib_srp.h" 55aef9ec39SRoland Dreier 56aef9ec39SRoland Dreier #define DRV_NAME "ib_srp" 57aef9ec39SRoland Dreier #define PFX DRV_NAME ": " 58*713ef24eSBart Van Assche #define DRV_VERSION "2.0" 59*713ef24eSBart Van Assche #define DRV_RELDATE "July 26, 2015" 60aef9ec39SRoland Dreier 61aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier"); 6233ab3e5bSBart Van Assche MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); 63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL"); 6433ab3e5bSBart Van Assche MODULE_VERSION(DRV_VERSION); 6533ab3e5bSBart Van Assche MODULE_INFO(release_date, DRV_RELDATE); 66aef9ec39SRoland Dreier 6749248644SDavid Dillow static unsigned int srp_sg_tablesize; 6849248644SDavid Dillow static unsigned int cmd_sg_entries; 69c07d424dSDavid Dillow static unsigned int indirect_sg_entries; 70c07d424dSDavid Dillow static bool allow_ext_sg; 715cfb1782SBart Van Assche static bool prefer_fr; 72b1b8854dSBart Van Assche static bool register_always; 73aef9ec39SRoland Dreier static int topspin_workarounds = 1; 74aef9ec39SRoland Dreier 7549248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444); 7649248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); 7749248644SDavid Dillow 7849248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444); 7949248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries, 8049248644SDavid Dillow "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); 8149248644SDavid Dillow 82c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444); 83c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries, 84c07d424dSDavid Dillow "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); 85c07d424dSDavid Dillow 86c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444); 87c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg, 88c07d424dSDavid Dillow "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); 89c07d424dSDavid Dillow 90aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444); 91aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds, 92aef9ec39SRoland Dreier "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 93aef9ec39SRoland Dreier 945cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444); 955cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr, 965cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported"); 975cfb1782SBart Van Assche 98b1b8854dSBart Van Assche module_param(register_always, bool, 0444); 99b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always, 100b1b8854dSBart Van Assche "Use memory registration even for contiguous memory regions"); 101b1b8854dSBart Van Assche 1029c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops; 103ed9b2264SBart Van Assche 104a95cadb9SBart Van Assche static int srp_reconnect_delay = 10; 105a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, 106a95cadb9SBart Van Assche S_IRUGO | S_IWUSR); 107a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); 108a95cadb9SBart Van Assche 109ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15; 110ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo, 111ed9b2264SBart Van Assche S_IRUGO | S_IWUSR); 112ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo, 113ed9b2264SBart Van Assche "Number of seconds between the observation of a transport" 114ed9b2264SBart Van Assche " layer error and failing all I/O. \"off\" means that this" 115ed9b2264SBart Van Assche " functionality is disabled."); 116ed9b2264SBart Van Assche 117a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600; 118ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, 119ed9b2264SBart Van Assche S_IRUGO | S_IWUSR); 120ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo, 121ed9b2264SBart Van Assche "Maximum number of seconds that the SRP transport should" 122ed9b2264SBart Van Assche " insulate transport layer errors. After this time has been" 123ed9b2264SBart Van Assche " exceeded the SCSI host is removed. Should be" 124ed9b2264SBart Van Assche " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) 125ed9b2264SBart Van Assche " if fast_io_fail_tmo has not been set. \"off\" means that" 126ed9b2264SBart Van Assche " this functionality is disabled."); 127ed9b2264SBart Van Assche 128d92c0da7SBart Van Assche static unsigned ch_count; 129d92c0da7SBart Van Assche module_param(ch_count, uint, 0444); 130d92c0da7SBart Van Assche MODULE_PARM_DESC(ch_count, 131d92c0da7SBart Van Assche "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); 132d92c0da7SBart Van Assche 133aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device); 1347c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data); 135509c07bcSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr); 136509c07bcSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *ch_ptr); 137aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 138aef9ec39SRoland Dreier 1393236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template; 140bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq; 1413236822bSFUJITA Tomonori 142aef9ec39SRoland Dreier static struct ib_client srp_client = { 143aef9ec39SRoland Dreier .name = "srp", 144aef9ec39SRoland Dreier .add = srp_add_one, 145aef9ec39SRoland Dreier .remove = srp_remove_one 146aef9ec39SRoland Dreier }; 147aef9ec39SRoland Dreier 148c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client; 149c1a0b23bSMichael S. Tsirkin 150ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp) 151ed9b2264SBart Van Assche { 152ed9b2264SBart Van Assche int tmo = *(int *)kp->arg; 153ed9b2264SBart Van Assche 154ed9b2264SBart Van Assche if (tmo >= 0) 155ed9b2264SBart Van Assche return sprintf(buffer, "%d", tmo); 156ed9b2264SBart Van Assche else 157ed9b2264SBart Van Assche return sprintf(buffer, "off"); 158ed9b2264SBart Van Assche } 159ed9b2264SBart Van Assche 160ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp) 161ed9b2264SBart Van Assche { 162ed9b2264SBart Van Assche int tmo, res; 163ed9b2264SBart Van Assche 1643fdf70acSSagi Grimberg res = srp_parse_tmo(&tmo, val); 165ed9b2264SBart Van Assche if (res) 166ed9b2264SBart Van Assche goto out; 1673fdf70acSSagi Grimberg 168a95cadb9SBart Van Assche if (kp->arg == &srp_reconnect_delay) 169a95cadb9SBart Van Assche res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, 170a95cadb9SBart Van Assche srp_dev_loss_tmo); 171a95cadb9SBart Van Assche else if (kp->arg == &srp_fast_io_fail_tmo) 172a95cadb9SBart Van Assche res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo); 173ed9b2264SBart Van Assche else 174a95cadb9SBart Van Assche res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo, 175a95cadb9SBart Van Assche tmo); 176ed9b2264SBart Van Assche if (res) 177ed9b2264SBart Van Assche goto out; 178ed9b2264SBart Van Assche *(int *)kp->arg = tmo; 179ed9b2264SBart Van Assche 180ed9b2264SBart Van Assche out: 181ed9b2264SBart Van Assche return res; 182ed9b2264SBart Van Assche } 183ed9b2264SBart Van Assche 1849c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops = { 185ed9b2264SBart Van Assche .get = srp_tmo_get, 186ed9b2264SBart Van Assche .set = srp_tmo_set, 187ed9b2264SBart Van Assche }; 188ed9b2264SBart Van Assche 189aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 190aef9ec39SRoland Dreier { 191aef9ec39SRoland Dreier return (struct srp_target_port *) host->hostdata; 192aef9ec39SRoland Dreier } 193aef9ec39SRoland Dreier 194aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host) 195aef9ec39SRoland Dreier { 196aef9ec39SRoland Dreier return host_to_target(host)->target_name; 197aef9ec39SRoland Dreier } 198aef9ec39SRoland Dreier 1995d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target) 2005d7cbfd6SRoland Dreier { 2015d7cbfd6SRoland Dreier static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 2023d1ff48dSRaghava Kondapalli static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 2035d7cbfd6SRoland Dreier 2045d7cbfd6SRoland Dreier return topspin_workarounds && 2053d1ff48dSRaghava Kondapalli (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 2063d1ff48dSRaghava Kondapalli !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 2075d7cbfd6SRoland Dreier } 2085d7cbfd6SRoland Dreier 209aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 210aef9ec39SRoland Dreier gfp_t gfp_mask, 211aef9ec39SRoland Dreier enum dma_data_direction direction) 212aef9ec39SRoland Dreier { 213aef9ec39SRoland Dreier struct srp_iu *iu; 214aef9ec39SRoland Dreier 215aef9ec39SRoland Dreier iu = kmalloc(sizeof *iu, gfp_mask); 216aef9ec39SRoland Dreier if (!iu) 217aef9ec39SRoland Dreier goto out; 218aef9ec39SRoland Dreier 219aef9ec39SRoland Dreier iu->buf = kzalloc(size, gfp_mask); 220aef9ec39SRoland Dreier if (!iu->buf) 221aef9ec39SRoland Dreier goto out_free_iu; 222aef9ec39SRoland Dreier 22305321937SGreg Kroah-Hartman iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, 22405321937SGreg Kroah-Hartman direction); 22505321937SGreg Kroah-Hartman if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) 226aef9ec39SRoland Dreier goto out_free_buf; 227aef9ec39SRoland Dreier 228aef9ec39SRoland Dreier iu->size = size; 229aef9ec39SRoland Dreier iu->direction = direction; 230aef9ec39SRoland Dreier 231aef9ec39SRoland Dreier return iu; 232aef9ec39SRoland Dreier 233aef9ec39SRoland Dreier out_free_buf: 234aef9ec39SRoland Dreier kfree(iu->buf); 235aef9ec39SRoland Dreier out_free_iu: 236aef9ec39SRoland Dreier kfree(iu); 237aef9ec39SRoland Dreier out: 238aef9ec39SRoland Dreier return NULL; 239aef9ec39SRoland Dreier } 240aef9ec39SRoland Dreier 241aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 242aef9ec39SRoland Dreier { 243aef9ec39SRoland Dreier if (!iu) 244aef9ec39SRoland Dreier return; 245aef9ec39SRoland Dreier 24605321937SGreg Kroah-Hartman ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, 24705321937SGreg Kroah-Hartman iu->direction); 248aef9ec39SRoland Dreier kfree(iu->buf); 249aef9ec39SRoland Dreier kfree(iu); 250aef9ec39SRoland Dreier } 251aef9ec39SRoland Dreier 252aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context) 253aef9ec39SRoland Dreier { 25457363d98SSagi Grimberg pr_debug("QP event %s (%d)\n", 25557363d98SSagi Grimberg ib_event_msg(event->event), event->event); 256aef9ec39SRoland Dreier } 257aef9ec39SRoland Dreier 258aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target, 259aef9ec39SRoland Dreier struct ib_qp *qp) 260aef9ec39SRoland Dreier { 261aef9ec39SRoland Dreier struct ib_qp_attr *attr; 262aef9ec39SRoland Dreier int ret; 263aef9ec39SRoland Dreier 264aef9ec39SRoland Dreier attr = kmalloc(sizeof *attr, GFP_KERNEL); 265aef9ec39SRoland Dreier if (!attr) 266aef9ec39SRoland Dreier return -ENOMEM; 267aef9ec39SRoland Dreier 26856b5390cSBart Van Assche ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, 269aef9ec39SRoland Dreier target->srp_host->port, 270747fe000SBart Van Assche be16_to_cpu(target->pkey), 271aef9ec39SRoland Dreier &attr->pkey_index); 272aef9ec39SRoland Dreier if (ret) 273aef9ec39SRoland Dreier goto out; 274aef9ec39SRoland Dreier 275aef9ec39SRoland Dreier attr->qp_state = IB_QPS_INIT; 276aef9ec39SRoland Dreier attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 277aef9ec39SRoland Dreier IB_ACCESS_REMOTE_WRITE); 278aef9ec39SRoland Dreier attr->port_num = target->srp_host->port; 279aef9ec39SRoland Dreier 280aef9ec39SRoland Dreier ret = ib_modify_qp(qp, attr, 281aef9ec39SRoland Dreier IB_QP_STATE | 282aef9ec39SRoland Dreier IB_QP_PKEY_INDEX | 283aef9ec39SRoland Dreier IB_QP_ACCESS_FLAGS | 284aef9ec39SRoland Dreier IB_QP_PORT); 285aef9ec39SRoland Dreier 286aef9ec39SRoland Dreier out: 287aef9ec39SRoland Dreier kfree(attr); 288aef9ec39SRoland Dreier return ret; 289aef9ec39SRoland Dreier } 290aef9ec39SRoland Dreier 291509c07bcSBart Van Assche static int srp_new_cm_id(struct srp_rdma_ch *ch) 2929fe4bcf4SDavid Dillow { 293509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2949fe4bcf4SDavid Dillow struct ib_cm_id *new_cm_id; 2959fe4bcf4SDavid Dillow 29605321937SGreg Kroah-Hartman new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 297509c07bcSBart Van Assche srp_cm_handler, ch); 2989fe4bcf4SDavid Dillow if (IS_ERR(new_cm_id)) 2999fe4bcf4SDavid Dillow return PTR_ERR(new_cm_id); 3009fe4bcf4SDavid Dillow 301509c07bcSBart Van Assche if (ch->cm_id) 302509c07bcSBart Van Assche ib_destroy_cm_id(ch->cm_id); 303509c07bcSBart Van Assche ch->cm_id = new_cm_id; 304509c07bcSBart Van Assche ch->path.sgid = target->sgid; 305509c07bcSBart Van Assche ch->path.dgid = target->orig_dgid; 306509c07bcSBart Van Assche ch->path.pkey = target->pkey; 307509c07bcSBart Van Assche ch->path.service_id = target->service_id; 3089fe4bcf4SDavid Dillow 3099fe4bcf4SDavid Dillow return 0; 3109fe4bcf4SDavid Dillow } 3119fe4bcf4SDavid Dillow 312d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target) 313d1b4289eSBart Van Assche { 314d1b4289eSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 315d1b4289eSBart Van Assche struct ib_fmr_pool_param fmr_param; 316d1b4289eSBart Van Assche 317d1b4289eSBart Van Assche memset(&fmr_param, 0, sizeof(fmr_param)); 318d1b4289eSBart Van Assche fmr_param.pool_size = target->scsi_host->can_queue; 319d1b4289eSBart Van Assche fmr_param.dirty_watermark = fmr_param.pool_size / 4; 320d1b4289eSBart Van Assche fmr_param.cache = 1; 32152ede08fSBart Van Assche fmr_param.max_pages_per_fmr = dev->max_pages_per_mr; 32252ede08fSBart Van Assche fmr_param.page_shift = ilog2(dev->mr_page_size); 323d1b4289eSBart Van Assche fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 324d1b4289eSBart Van Assche IB_ACCESS_REMOTE_WRITE | 325d1b4289eSBart Van Assche IB_ACCESS_REMOTE_READ); 326d1b4289eSBart Van Assche 327d1b4289eSBart Van Assche return ib_create_fmr_pool(dev->pd, &fmr_param); 328d1b4289eSBart Van Assche } 329d1b4289eSBart Van Assche 3305cfb1782SBart Van Assche /** 3315cfb1782SBart Van Assche * srp_destroy_fr_pool() - free the resources owned by a pool 3325cfb1782SBart Van Assche * @pool: Fast registration pool to be destroyed. 3335cfb1782SBart Van Assche */ 3345cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool) 3355cfb1782SBart Van Assche { 3365cfb1782SBart Van Assche int i; 3375cfb1782SBart Van Assche struct srp_fr_desc *d; 3385cfb1782SBart Van Assche 3395cfb1782SBart Van Assche if (!pool) 3405cfb1782SBart Van Assche return; 3415cfb1782SBart Van Assche 3425cfb1782SBart Van Assche for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 3435cfb1782SBart Van Assche if (d->frpl) 3445cfb1782SBart Van Assche ib_free_fast_reg_page_list(d->frpl); 3455cfb1782SBart Van Assche if (d->mr) 3465cfb1782SBart Van Assche ib_dereg_mr(d->mr); 3475cfb1782SBart Van Assche } 3485cfb1782SBart Van Assche kfree(pool); 3495cfb1782SBart Van Assche } 3505cfb1782SBart Van Assche 3515cfb1782SBart Van Assche /** 3525cfb1782SBart Van Assche * srp_create_fr_pool() - allocate and initialize a pool for fast registration 3535cfb1782SBart Van Assche * @device: IB device to allocate fast registration descriptors for. 3545cfb1782SBart Van Assche * @pd: Protection domain associated with the FR descriptors. 3555cfb1782SBart Van Assche * @pool_size: Number of descriptors to allocate. 3565cfb1782SBart Van Assche * @max_page_list_len: Maximum fast registration work request page list length. 3575cfb1782SBart Van Assche */ 3585cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, 3595cfb1782SBart Van Assche struct ib_pd *pd, int pool_size, 3605cfb1782SBart Van Assche int max_page_list_len) 3615cfb1782SBart Van Assche { 3625cfb1782SBart Van Assche struct srp_fr_pool *pool; 3635cfb1782SBart Van Assche struct srp_fr_desc *d; 3645cfb1782SBart Van Assche struct ib_mr *mr; 3655cfb1782SBart Van Assche struct ib_fast_reg_page_list *frpl; 3665cfb1782SBart Van Assche int i, ret = -EINVAL; 3675cfb1782SBart Van Assche 3685cfb1782SBart Van Assche if (pool_size <= 0) 3695cfb1782SBart Van Assche goto err; 3705cfb1782SBart Van Assche ret = -ENOMEM; 3715cfb1782SBart Van Assche pool = kzalloc(sizeof(struct srp_fr_pool) + 3725cfb1782SBart Van Assche pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL); 3735cfb1782SBart Van Assche if (!pool) 3745cfb1782SBart Van Assche goto err; 3755cfb1782SBart Van Assche pool->size = pool_size; 3765cfb1782SBart Van Assche pool->max_page_list_len = max_page_list_len; 3775cfb1782SBart Van Assche spin_lock_init(&pool->lock); 3785cfb1782SBart Van Assche INIT_LIST_HEAD(&pool->free_list); 3795cfb1782SBart Van Assche 3805cfb1782SBart Van Assche for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 381563b67c5SSagi Grimberg mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 382563b67c5SSagi Grimberg max_page_list_len); 3835cfb1782SBart Van Assche if (IS_ERR(mr)) { 3845cfb1782SBart Van Assche ret = PTR_ERR(mr); 3855cfb1782SBart Van Assche goto destroy_pool; 3865cfb1782SBart Van Assche } 3875cfb1782SBart Van Assche d->mr = mr; 3885cfb1782SBart Van Assche frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len); 3895cfb1782SBart Van Assche if (IS_ERR(frpl)) { 3905cfb1782SBart Van Assche ret = PTR_ERR(frpl); 3915cfb1782SBart Van Assche goto destroy_pool; 3925cfb1782SBart Van Assche } 3935cfb1782SBart Van Assche d->frpl = frpl; 3945cfb1782SBart Van Assche list_add_tail(&d->entry, &pool->free_list); 3955cfb1782SBart Van Assche } 3965cfb1782SBart Van Assche 3975cfb1782SBart Van Assche out: 3985cfb1782SBart Van Assche return pool; 3995cfb1782SBart Van Assche 4005cfb1782SBart Van Assche destroy_pool: 4015cfb1782SBart Van Assche srp_destroy_fr_pool(pool); 4025cfb1782SBart Van Assche 4035cfb1782SBart Van Assche err: 4045cfb1782SBart Van Assche pool = ERR_PTR(ret); 4055cfb1782SBart Van Assche goto out; 4065cfb1782SBart Van Assche } 4075cfb1782SBart Van Assche 4085cfb1782SBart Van Assche /** 4095cfb1782SBart Van Assche * srp_fr_pool_get() - obtain a descriptor suitable for fast registration 4105cfb1782SBart Van Assche * @pool: Pool to obtain descriptor from. 4115cfb1782SBart Van Assche */ 4125cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool) 4135cfb1782SBart Van Assche { 4145cfb1782SBart Van Assche struct srp_fr_desc *d = NULL; 4155cfb1782SBart Van Assche unsigned long flags; 4165cfb1782SBart Van Assche 4175cfb1782SBart Van Assche spin_lock_irqsave(&pool->lock, flags); 4185cfb1782SBart Van Assche if (!list_empty(&pool->free_list)) { 4195cfb1782SBart Van Assche d = list_first_entry(&pool->free_list, typeof(*d), entry); 4205cfb1782SBart Van Assche list_del(&d->entry); 4215cfb1782SBart Van Assche } 4225cfb1782SBart Van Assche spin_unlock_irqrestore(&pool->lock, flags); 4235cfb1782SBart Van Assche 4245cfb1782SBart Van Assche return d; 4255cfb1782SBart Van Assche } 4265cfb1782SBart Van Assche 4275cfb1782SBart Van Assche /** 4285cfb1782SBart Van Assche * srp_fr_pool_put() - put an FR descriptor back in the free list 4295cfb1782SBart Van Assche * @pool: Pool the descriptor was allocated from. 4305cfb1782SBart Van Assche * @desc: Pointer to an array of fast registration descriptor pointers. 4315cfb1782SBart Van Assche * @n: Number of descriptors to put back. 4325cfb1782SBart Van Assche * 4335cfb1782SBart Van Assche * Note: The caller must already have queued an invalidation request for 4345cfb1782SBart Van Assche * desc->mr->rkey before calling this function. 4355cfb1782SBart Van Assche */ 4365cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, 4375cfb1782SBart Van Assche int n) 4385cfb1782SBart Van Assche { 4395cfb1782SBart Van Assche unsigned long flags; 4405cfb1782SBart Van Assche int i; 4415cfb1782SBart Van Assche 4425cfb1782SBart Van Assche spin_lock_irqsave(&pool->lock, flags); 4435cfb1782SBart Van Assche for (i = 0; i < n; i++) 4445cfb1782SBart Van Assche list_add(&desc[i]->entry, &pool->free_list); 4455cfb1782SBart Van Assche spin_unlock_irqrestore(&pool->lock, flags); 4465cfb1782SBart Van Assche } 4475cfb1782SBart Van Assche 4485cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) 4495cfb1782SBart Van Assche { 4505cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 4515cfb1782SBart Van Assche 4525cfb1782SBart Van Assche return srp_create_fr_pool(dev->dev, dev->pd, 4535cfb1782SBart Van Assche target->scsi_host->can_queue, 4545cfb1782SBart Van Assche dev->max_pages_per_mr); 4555cfb1782SBart Van Assche } 4565cfb1782SBart Van Assche 4577dad6b2eSBart Van Assche /** 4587dad6b2eSBart Van Assche * srp_destroy_qp() - destroy an RDMA queue pair 4597dad6b2eSBart Van Assche * @ch: SRP RDMA channel. 4607dad6b2eSBart Van Assche * 4617dad6b2eSBart Van Assche * Change a queue pair into the error state and wait until all receive 4627dad6b2eSBart Van Assche * completions have been processed before destroying it. This avoids that 4637dad6b2eSBart Van Assche * the receive completion handler can access the queue pair while it is 4647dad6b2eSBart Van Assche * being destroyed. 4657dad6b2eSBart Van Assche */ 4667dad6b2eSBart Van Assche static void srp_destroy_qp(struct srp_rdma_ch *ch) 4677dad6b2eSBart Van Assche { 4687dad6b2eSBart Van Assche static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 4697dad6b2eSBart Van Assche static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID }; 4707dad6b2eSBart Van Assche struct ib_recv_wr *bad_wr; 4717dad6b2eSBart Van Assche int ret; 4727dad6b2eSBart Van Assche 4737dad6b2eSBart Van Assche /* Destroying a QP and reusing ch->done is only safe if not connected */ 474c014c8cdSBart Van Assche WARN_ON_ONCE(ch->connected); 4757dad6b2eSBart Van Assche 4767dad6b2eSBart Van Assche ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); 4777dad6b2eSBart Van Assche WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret); 4787dad6b2eSBart Van Assche if (ret) 4797dad6b2eSBart Van Assche goto out; 4807dad6b2eSBart Van Assche 4817dad6b2eSBart Van Assche init_completion(&ch->done); 4827dad6b2eSBart Van Assche ret = ib_post_recv(ch->qp, &wr, &bad_wr); 4837dad6b2eSBart Van Assche WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret); 4847dad6b2eSBart Van Assche if (ret == 0) 4857dad6b2eSBart Van Assche wait_for_completion(&ch->done); 4867dad6b2eSBart Van Assche 4877dad6b2eSBart Van Assche out: 4887dad6b2eSBart Van Assche ib_destroy_qp(ch->qp); 4897dad6b2eSBart Van Assche } 4907dad6b2eSBart Van Assche 491509c07bcSBart Van Assche static int srp_create_ch_ib(struct srp_rdma_ch *ch) 492aef9ec39SRoland Dreier { 493509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 49462154b2eSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 495aef9ec39SRoland Dreier struct ib_qp_init_attr *init_attr; 49673aa89edSIshai Rabinovitz struct ib_cq *recv_cq, *send_cq; 49773aa89edSIshai Rabinovitz struct ib_qp *qp; 498d1b4289eSBart Van Assche struct ib_fmr_pool *fmr_pool = NULL; 4995cfb1782SBart Van Assche struct srp_fr_pool *fr_pool = NULL; 5005cfb1782SBart Van Assche const int m = 1 + dev->use_fast_reg; 5018e37210bSMatan Barak struct ib_cq_init_attr cq_attr = {}; 502aef9ec39SRoland Dreier int ret; 503aef9ec39SRoland Dreier 504aef9ec39SRoland Dreier init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 505aef9ec39SRoland Dreier if (!init_attr) 506aef9ec39SRoland Dreier return -ENOMEM; 507aef9ec39SRoland Dreier 5087dad6b2eSBart Van Assche /* + 1 for SRP_LAST_WR_ID */ 5098e37210bSMatan Barak cq_attr.cqe = target->queue_size + 1; 5108e37210bSMatan Barak cq_attr.comp_vector = ch->comp_vector; 511509c07bcSBart Van Assche recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, 5128e37210bSMatan Barak &cq_attr); 51373aa89edSIshai Rabinovitz if (IS_ERR(recv_cq)) { 51473aa89edSIshai Rabinovitz ret = PTR_ERR(recv_cq); 515da9d2f07SRoland Dreier goto err; 516aef9ec39SRoland Dreier } 517aef9ec39SRoland Dreier 5188e37210bSMatan Barak cq_attr.cqe = m * target->queue_size; 5198e37210bSMatan Barak cq_attr.comp_vector = ch->comp_vector; 520509c07bcSBart Van Assche send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, 5218e37210bSMatan Barak &cq_attr); 52273aa89edSIshai Rabinovitz if (IS_ERR(send_cq)) { 52373aa89edSIshai Rabinovitz ret = PTR_ERR(send_cq); 524da9d2f07SRoland Dreier goto err_recv_cq; 5259c03dc9fSBart Van Assche } 5269c03dc9fSBart Van Assche 52773aa89edSIshai Rabinovitz ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); 528aef9ec39SRoland Dreier 529aef9ec39SRoland Dreier init_attr->event_handler = srp_qp_event; 5305cfb1782SBart Van Assche init_attr->cap.max_send_wr = m * target->queue_size; 5317dad6b2eSBart Van Assche init_attr->cap.max_recv_wr = target->queue_size + 1; 532aef9ec39SRoland Dreier init_attr->cap.max_recv_sge = 1; 533aef9ec39SRoland Dreier init_attr->cap.max_send_sge = 1; 5345cfb1782SBart Van Assche init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 535aef9ec39SRoland Dreier init_attr->qp_type = IB_QPT_RC; 53673aa89edSIshai Rabinovitz init_attr->send_cq = send_cq; 53773aa89edSIshai Rabinovitz init_attr->recv_cq = recv_cq; 538aef9ec39SRoland Dreier 53962154b2eSBart Van Assche qp = ib_create_qp(dev->pd, init_attr); 54073aa89edSIshai Rabinovitz if (IS_ERR(qp)) { 54173aa89edSIshai Rabinovitz ret = PTR_ERR(qp); 542da9d2f07SRoland Dreier goto err_send_cq; 543aef9ec39SRoland Dreier } 544aef9ec39SRoland Dreier 54573aa89edSIshai Rabinovitz ret = srp_init_qp(target, qp); 546da9d2f07SRoland Dreier if (ret) 547da9d2f07SRoland Dreier goto err_qp; 548aef9ec39SRoland Dreier 5495cfb1782SBart Van Assche if (dev->use_fast_reg && dev->has_fr) { 5505cfb1782SBart Van Assche fr_pool = srp_alloc_fr_pool(target); 5515cfb1782SBart Van Assche if (IS_ERR(fr_pool)) { 5525cfb1782SBart Van Assche ret = PTR_ERR(fr_pool); 5535cfb1782SBart Van Assche shost_printk(KERN_WARNING, target->scsi_host, PFX 5545cfb1782SBart Van Assche "FR pool allocation failed (%d)\n", ret); 5555cfb1782SBart Van Assche goto err_qp; 5565cfb1782SBart Van Assche } 557509c07bcSBart Van Assche if (ch->fr_pool) 558509c07bcSBart Van Assche srp_destroy_fr_pool(ch->fr_pool); 559509c07bcSBart Van Assche ch->fr_pool = fr_pool; 5605cfb1782SBart Van Assche } else if (!dev->use_fast_reg && dev->has_fmr) { 561d1b4289eSBart Van Assche fmr_pool = srp_alloc_fmr_pool(target); 562d1b4289eSBart Van Assche if (IS_ERR(fmr_pool)) { 563d1b4289eSBart Van Assche ret = PTR_ERR(fmr_pool); 564d1b4289eSBart Van Assche shost_printk(KERN_WARNING, target->scsi_host, PFX 565d1b4289eSBart Van Assche "FMR pool allocation failed (%d)\n", ret); 566d1b4289eSBart Van Assche goto err_qp; 567d1b4289eSBart Van Assche } 568509c07bcSBart Van Assche if (ch->fmr_pool) 569509c07bcSBart Van Assche ib_destroy_fmr_pool(ch->fmr_pool); 570509c07bcSBart Van Assche ch->fmr_pool = fmr_pool; 571d1b4289eSBart Van Assche } 572d1b4289eSBart Van Assche 573509c07bcSBart Van Assche if (ch->qp) 5747dad6b2eSBart Van Assche srp_destroy_qp(ch); 575509c07bcSBart Van Assche if (ch->recv_cq) 576509c07bcSBart Van Assche ib_destroy_cq(ch->recv_cq); 577509c07bcSBart Van Assche if (ch->send_cq) 578509c07bcSBart Van Assche ib_destroy_cq(ch->send_cq); 57973aa89edSIshai Rabinovitz 580509c07bcSBart Van Assche ch->qp = qp; 581509c07bcSBart Van Assche ch->recv_cq = recv_cq; 582509c07bcSBart Van Assche ch->send_cq = send_cq; 58373aa89edSIshai Rabinovitz 584da9d2f07SRoland Dreier kfree(init_attr); 585da9d2f07SRoland Dreier return 0; 586da9d2f07SRoland Dreier 587da9d2f07SRoland Dreier err_qp: 58873aa89edSIshai Rabinovitz ib_destroy_qp(qp); 589da9d2f07SRoland Dreier 590da9d2f07SRoland Dreier err_send_cq: 59173aa89edSIshai Rabinovitz ib_destroy_cq(send_cq); 592da9d2f07SRoland Dreier 593da9d2f07SRoland Dreier err_recv_cq: 59473aa89edSIshai Rabinovitz ib_destroy_cq(recv_cq); 595da9d2f07SRoland Dreier 596da9d2f07SRoland Dreier err: 597aef9ec39SRoland Dreier kfree(init_attr); 598aef9ec39SRoland Dreier return ret; 599aef9ec39SRoland Dreier } 600aef9ec39SRoland Dreier 6014d73f95fSBart Van Assche /* 6024d73f95fSBart Van Assche * Note: this function may be called without srp_alloc_iu_bufs() having been 603509c07bcSBart Van Assche * invoked. Hence the ch->[rt]x_ring checks. 6044d73f95fSBart Van Assche */ 605509c07bcSBart Van Assche static void srp_free_ch_ib(struct srp_target_port *target, 606509c07bcSBart Van Assche struct srp_rdma_ch *ch) 607aef9ec39SRoland Dreier { 6085cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 609aef9ec39SRoland Dreier int i; 610aef9ec39SRoland Dreier 611d92c0da7SBart Van Assche if (!ch->target) 612d92c0da7SBart Van Assche return; 613d92c0da7SBart Van Assche 614509c07bcSBart Van Assche if (ch->cm_id) { 615509c07bcSBart Van Assche ib_destroy_cm_id(ch->cm_id); 616509c07bcSBart Van Assche ch->cm_id = NULL; 617394c595eSBart Van Assche } 618394c595eSBart Van Assche 619d92c0da7SBart Van Assche /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */ 620d92c0da7SBart Van Assche if (!ch->qp) 621d92c0da7SBart Van Assche return; 622d92c0da7SBart Van Assche 6235cfb1782SBart Van Assche if (dev->use_fast_reg) { 624509c07bcSBart Van Assche if (ch->fr_pool) 625509c07bcSBart Van Assche srp_destroy_fr_pool(ch->fr_pool); 6265cfb1782SBart Van Assche } else { 627509c07bcSBart Van Assche if (ch->fmr_pool) 628509c07bcSBart Van Assche ib_destroy_fmr_pool(ch->fmr_pool); 6295cfb1782SBart Van Assche } 6307dad6b2eSBart Van Assche srp_destroy_qp(ch); 631509c07bcSBart Van Assche ib_destroy_cq(ch->send_cq); 632509c07bcSBart Van Assche ib_destroy_cq(ch->recv_cq); 633aef9ec39SRoland Dreier 634d92c0da7SBart Van Assche /* 635d92c0da7SBart Van Assche * Avoid that the SCSI error handler tries to use this channel after 636d92c0da7SBart Van Assche * it has been freed. The SCSI error handler can namely continue 637d92c0da7SBart Van Assche * trying to perform recovery actions after scsi_remove_host() 638d92c0da7SBart Van Assche * returned. 639d92c0da7SBart Van Assche */ 640d92c0da7SBart Van Assche ch->target = NULL; 641d92c0da7SBart Van Assche 642509c07bcSBart Van Assche ch->qp = NULL; 643509c07bcSBart Van Assche ch->send_cq = ch->recv_cq = NULL; 64473aa89edSIshai Rabinovitz 645509c07bcSBart Van Assche if (ch->rx_ring) { 6464d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) 647509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->rx_ring[i]); 648509c07bcSBart Van Assche kfree(ch->rx_ring); 649509c07bcSBart Van Assche ch->rx_ring = NULL; 6504d73f95fSBart Van Assche } 651509c07bcSBart Van Assche if (ch->tx_ring) { 6524d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) 653509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->tx_ring[i]); 654509c07bcSBart Van Assche kfree(ch->tx_ring); 655509c07bcSBart Van Assche ch->tx_ring = NULL; 6564d73f95fSBart Van Assche } 657aef9ec39SRoland Dreier } 658aef9ec39SRoland Dreier 659aef9ec39SRoland Dreier static void srp_path_rec_completion(int status, 660aef9ec39SRoland Dreier struct ib_sa_path_rec *pathrec, 661509c07bcSBart Van Assche void *ch_ptr) 662aef9ec39SRoland Dreier { 663509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 664509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 665aef9ec39SRoland Dreier 666509c07bcSBart Van Assche ch->status = status; 667aef9ec39SRoland Dreier if (status) 6687aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 6697aa54bd7SDavid Dillow PFX "Got failed path rec status %d\n", status); 670aef9ec39SRoland Dreier else 671509c07bcSBart Van Assche ch->path = *pathrec; 672509c07bcSBart Van Assche complete(&ch->done); 673aef9ec39SRoland Dreier } 674aef9ec39SRoland Dreier 675509c07bcSBart Van Assche static int srp_lookup_path(struct srp_rdma_ch *ch) 676aef9ec39SRoland Dreier { 677509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 678a702adceSBart Van Assche int ret; 679a702adceSBart Van Assche 680509c07bcSBart Van Assche ch->path.numb_path = 1; 681aef9ec39SRoland Dreier 682509c07bcSBart Van Assche init_completion(&ch->done); 683aef9ec39SRoland Dreier 684509c07bcSBart Van Assche ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 68505321937SGreg Kroah-Hartman target->srp_host->srp_dev->dev, 686aef9ec39SRoland Dreier target->srp_host->port, 687509c07bcSBart Van Assche &ch->path, 688247e020eSSean Hefty IB_SA_PATH_REC_SERVICE_ID | 689aef9ec39SRoland Dreier IB_SA_PATH_REC_DGID | 690aef9ec39SRoland Dreier IB_SA_PATH_REC_SGID | 691aef9ec39SRoland Dreier IB_SA_PATH_REC_NUMB_PATH | 692aef9ec39SRoland Dreier IB_SA_PATH_REC_PKEY, 693aef9ec39SRoland Dreier SRP_PATH_REC_TIMEOUT_MS, 694aef9ec39SRoland Dreier GFP_KERNEL, 695aef9ec39SRoland Dreier srp_path_rec_completion, 696509c07bcSBart Van Assche ch, &ch->path_query); 697509c07bcSBart Van Assche if (ch->path_query_id < 0) 698509c07bcSBart Van Assche return ch->path_query_id; 699aef9ec39SRoland Dreier 700509c07bcSBart Van Assche ret = wait_for_completion_interruptible(&ch->done); 701a702adceSBart Van Assche if (ret < 0) 702a702adceSBart Van Assche return ret; 703aef9ec39SRoland Dreier 704509c07bcSBart Van Assche if (ch->status < 0) 7057aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 7067aa54bd7SDavid Dillow PFX "Path record query failed\n"); 707aef9ec39SRoland Dreier 708509c07bcSBart Van Assche return ch->status; 709aef9ec39SRoland Dreier } 710aef9ec39SRoland Dreier 711d92c0da7SBart Van Assche static int srp_send_req(struct srp_rdma_ch *ch, bool multich) 712aef9ec39SRoland Dreier { 713509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 714aef9ec39SRoland Dreier struct { 715aef9ec39SRoland Dreier struct ib_cm_req_param param; 716aef9ec39SRoland Dreier struct srp_login_req priv; 717aef9ec39SRoland Dreier } *req = NULL; 718aef9ec39SRoland Dreier int status; 719aef9ec39SRoland Dreier 720aef9ec39SRoland Dreier req = kzalloc(sizeof *req, GFP_KERNEL); 721aef9ec39SRoland Dreier if (!req) 722aef9ec39SRoland Dreier return -ENOMEM; 723aef9ec39SRoland Dreier 724509c07bcSBart Van Assche req->param.primary_path = &ch->path; 725aef9ec39SRoland Dreier req->param.alternate_path = NULL; 726aef9ec39SRoland Dreier req->param.service_id = target->service_id; 727509c07bcSBart Van Assche req->param.qp_num = ch->qp->qp_num; 728509c07bcSBart Van Assche req->param.qp_type = ch->qp->qp_type; 729aef9ec39SRoland Dreier req->param.private_data = &req->priv; 730aef9ec39SRoland Dreier req->param.private_data_len = sizeof req->priv; 731aef9ec39SRoland Dreier req->param.flow_control = 1; 732aef9ec39SRoland Dreier 733aef9ec39SRoland Dreier get_random_bytes(&req->param.starting_psn, 4); 734aef9ec39SRoland Dreier req->param.starting_psn &= 0xffffff; 735aef9ec39SRoland Dreier 736aef9ec39SRoland Dreier /* 737aef9ec39SRoland Dreier * Pick some arbitrary defaults here; we could make these 738aef9ec39SRoland Dreier * module parameters if anyone cared about setting them. 739aef9ec39SRoland Dreier */ 740aef9ec39SRoland Dreier req->param.responder_resources = 4; 741aef9ec39SRoland Dreier req->param.remote_cm_response_timeout = 20; 742aef9ec39SRoland Dreier req->param.local_cm_response_timeout = 20; 7437bb312e4SVu Pham req->param.retry_count = target->tl_retry_count; 744aef9ec39SRoland Dreier req->param.rnr_retry_count = 7; 745aef9ec39SRoland Dreier req->param.max_cm_retries = 15; 746aef9ec39SRoland Dreier 747aef9ec39SRoland Dreier req->priv.opcode = SRP_LOGIN_REQ; 748aef9ec39SRoland Dreier req->priv.tag = 0; 74949248644SDavid Dillow req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); 750aef9ec39SRoland Dreier req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 751aef9ec39SRoland Dreier SRP_BUF_FORMAT_INDIRECT); 752d92c0da7SBart Van Assche req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI : 753d92c0da7SBart Van Assche SRP_MULTICHAN_SINGLE); 7540c0450dbSRamachandra K /* 7550c0450dbSRamachandra K * In the published SRP specification (draft rev. 16a), the 7560c0450dbSRamachandra K * port identifier format is 8 bytes of ID extension followed 7570c0450dbSRamachandra K * by 8 bytes of GUID. Older drafts put the two halves in the 7580c0450dbSRamachandra K * opposite order, so that the GUID comes first. 7590c0450dbSRamachandra K * 7600c0450dbSRamachandra K * Targets conforming to these obsolete drafts can be 7610c0450dbSRamachandra K * recognized by the I/O Class they report. 7620c0450dbSRamachandra K */ 7630c0450dbSRamachandra K if (target->io_class == SRP_REV10_IB_IO_CLASS) { 7640c0450dbSRamachandra K memcpy(req->priv.initiator_port_id, 765747fe000SBart Van Assche &target->sgid.global.interface_id, 8); 7660c0450dbSRamachandra K memcpy(req->priv.initiator_port_id + 8, 76701cb9bcbSIshai Rabinovitz &target->initiator_ext, 8); 7680c0450dbSRamachandra K memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 7690c0450dbSRamachandra K memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); 7700c0450dbSRamachandra K } else { 7710c0450dbSRamachandra K memcpy(req->priv.initiator_port_id, 77201cb9bcbSIshai Rabinovitz &target->initiator_ext, 8); 77301cb9bcbSIshai Rabinovitz memcpy(req->priv.initiator_port_id + 8, 774747fe000SBart Van Assche &target->sgid.global.interface_id, 8); 7750c0450dbSRamachandra K memcpy(req->priv.target_port_id, &target->id_ext, 8); 7760c0450dbSRamachandra K memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 7770c0450dbSRamachandra K } 7780c0450dbSRamachandra K 779aef9ec39SRoland Dreier /* 780aef9ec39SRoland Dreier * Topspin/Cisco SRP targets will reject our login unless we 78101cb9bcbSIshai Rabinovitz * zero out the first 8 bytes of our initiator port ID and set 78201cb9bcbSIshai Rabinovitz * the second 8 bytes to the local node GUID. 783aef9ec39SRoland Dreier */ 7845d7cbfd6SRoland Dreier if (srp_target_is_topspin(target)) { 7857aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 7867aa54bd7SDavid Dillow PFX "Topspin/Cisco initiator port ID workaround " 787aef9ec39SRoland Dreier "activated for target GUID %016llx\n", 78845c37cadSBart Van Assche be64_to_cpu(target->ioc_guid)); 789aef9ec39SRoland Dreier memset(req->priv.initiator_port_id, 0, 8); 79001cb9bcbSIshai Rabinovitz memcpy(req->priv.initiator_port_id + 8, 79105321937SGreg Kroah-Hartman &target->srp_host->srp_dev->dev->node_guid, 8); 792aef9ec39SRoland Dreier } 793aef9ec39SRoland Dreier 794509c07bcSBart Van Assche status = ib_send_cm_req(ch->cm_id, &req->param); 795aef9ec39SRoland Dreier 796aef9ec39SRoland Dreier kfree(req); 797aef9ec39SRoland Dreier 798aef9ec39SRoland Dreier return status; 799aef9ec39SRoland Dreier } 800aef9ec39SRoland Dreier 801ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target) 802ef6c49d8SBart Van Assche { 803ef6c49d8SBart Van Assche bool changed = false; 804ef6c49d8SBart Van Assche 805ef6c49d8SBart Van Assche spin_lock_irq(&target->lock); 806ef6c49d8SBart Van Assche if (target->state != SRP_TARGET_REMOVED) { 807ef6c49d8SBart Van Assche target->state = SRP_TARGET_REMOVED; 808ef6c49d8SBart Van Assche changed = true; 809ef6c49d8SBart Van Assche } 810ef6c49d8SBart Van Assche spin_unlock_irq(&target->lock); 811ef6c49d8SBart Van Assche 812ef6c49d8SBart Van Assche if (changed) 813bcc05910SBart Van Assche queue_work(srp_remove_wq, &target->remove_work); 814ef6c49d8SBart Van Assche 815ef6c49d8SBart Van Assche return changed; 816ef6c49d8SBart Van Assche } 817ef6c49d8SBart Van Assche 818aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target) 819aef9ec39SRoland Dreier { 820d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 821d92c0da7SBart Van Assche int i; 822509c07bcSBart Van Assche 823aef9ec39SRoland Dreier /* XXX should send SRP_I_LOGOUT request */ 824aef9ec39SRoland Dreier 825d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 826d92c0da7SBart Van Assche ch = &target->ch[i]; 827c014c8cdSBart Van Assche ch->connected = false; 828d92c0da7SBart Van Assche if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { 8297aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 8307aa54bd7SDavid Dillow PFX "Sending CM DREQ failed\n"); 831aef9ec39SRoland Dreier } 832294c875aSBart Van Assche } 833294c875aSBart Van Assche } 834aef9ec39SRoland Dreier 835509c07bcSBart Van Assche static void srp_free_req_data(struct srp_target_port *target, 836509c07bcSBart Van Assche struct srp_rdma_ch *ch) 8378f26c9ffSDavid Dillow { 8385cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 8395cfb1782SBart Van Assche struct ib_device *ibdev = dev->dev; 8408f26c9ffSDavid Dillow struct srp_request *req; 8418f26c9ffSDavid Dillow int i; 8428f26c9ffSDavid Dillow 84347513cf4SBart Van Assche if (!ch->req_ring) 8444d73f95fSBart Van Assche return; 8454d73f95fSBart Van Assche 8464d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 847509c07bcSBart Van Assche req = &ch->req_ring[i]; 8485cfb1782SBart Van Assche if (dev->use_fast_reg) 8495cfb1782SBart Van Assche kfree(req->fr_list); 8505cfb1782SBart Van Assche else 8518f26c9ffSDavid Dillow kfree(req->fmr_list); 8528f26c9ffSDavid Dillow kfree(req->map_page); 853c07d424dSDavid Dillow if (req->indirect_dma_addr) { 854c07d424dSDavid Dillow ib_dma_unmap_single(ibdev, req->indirect_dma_addr, 855c07d424dSDavid Dillow target->indirect_size, 856c07d424dSDavid Dillow DMA_TO_DEVICE); 857c07d424dSDavid Dillow } 858c07d424dSDavid Dillow kfree(req->indirect_desc); 8598f26c9ffSDavid Dillow } 8604d73f95fSBart Van Assche 861509c07bcSBart Van Assche kfree(ch->req_ring); 862509c07bcSBart Van Assche ch->req_ring = NULL; 8638f26c9ffSDavid Dillow } 8648f26c9ffSDavid Dillow 865509c07bcSBart Van Assche static int srp_alloc_req_data(struct srp_rdma_ch *ch) 866b81d00bdSBart Van Assche { 867509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 868b81d00bdSBart Van Assche struct srp_device *srp_dev = target->srp_host->srp_dev; 869b81d00bdSBart Van Assche struct ib_device *ibdev = srp_dev->dev; 870b81d00bdSBart Van Assche struct srp_request *req; 8715cfb1782SBart Van Assche void *mr_list; 872b81d00bdSBart Van Assche dma_addr_t dma_addr; 873b81d00bdSBart Van Assche int i, ret = -ENOMEM; 874b81d00bdSBart Van Assche 875509c07bcSBart Van Assche ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), 876509c07bcSBart Van Assche GFP_KERNEL); 877509c07bcSBart Van Assche if (!ch->req_ring) 8784d73f95fSBart Van Assche goto out; 8794d73f95fSBart Van Assche 8804d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 881509c07bcSBart Van Assche req = &ch->req_ring[i]; 8825cfb1782SBart Van Assche mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), 883b81d00bdSBart Van Assche GFP_KERNEL); 8845cfb1782SBart Van Assche if (!mr_list) 8855cfb1782SBart Van Assche goto out; 8865cfb1782SBart Van Assche if (srp_dev->use_fast_reg) 8875cfb1782SBart Van Assche req->fr_list = mr_list; 8885cfb1782SBart Van Assche else 8895cfb1782SBart Van Assche req->fmr_list = mr_list; 89052ede08fSBart Van Assche req->map_page = kmalloc(srp_dev->max_pages_per_mr * 891d1b4289eSBart Van Assche sizeof(void *), GFP_KERNEL); 8925cfb1782SBart Van Assche if (!req->map_page) 8935cfb1782SBart Van Assche goto out; 894b81d00bdSBart Van Assche req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); 8955cfb1782SBart Van Assche if (!req->indirect_desc) 896b81d00bdSBart Van Assche goto out; 897b81d00bdSBart Van Assche 898b81d00bdSBart Van Assche dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, 899b81d00bdSBart Van Assche target->indirect_size, 900b81d00bdSBart Van Assche DMA_TO_DEVICE); 901b81d00bdSBart Van Assche if (ib_dma_mapping_error(ibdev, dma_addr)) 902b81d00bdSBart Van Assche goto out; 903b81d00bdSBart Van Assche 904b81d00bdSBart Van Assche req->indirect_dma_addr = dma_addr; 905b81d00bdSBart Van Assche } 906b81d00bdSBart Van Assche ret = 0; 907b81d00bdSBart Van Assche 908b81d00bdSBart Van Assche out: 909b81d00bdSBart Van Assche return ret; 910b81d00bdSBart Van Assche } 911b81d00bdSBart Van Assche 912683b159aSBart Van Assche /** 913683b159aSBart Van Assche * srp_del_scsi_host_attr() - Remove attributes defined in the host template. 914683b159aSBart Van Assche * @shost: SCSI host whose attributes to remove from sysfs. 915683b159aSBart Van Assche * 916683b159aSBart Van Assche * Note: Any attributes defined in the host template and that did not exist 917683b159aSBart Van Assche * before invocation of this function will be ignored. 918683b159aSBart Van Assche */ 919683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost) 920683b159aSBart Van Assche { 921683b159aSBart Van Assche struct device_attribute **attr; 922683b159aSBart Van Assche 923683b159aSBart Van Assche for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) 924683b159aSBart Van Assche device_remove_file(&shost->shost_dev, *attr); 925683b159aSBart Van Assche } 926683b159aSBart Van Assche 927ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target) 928ee12d6a8SBart Van Assche { 929d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 930d92c0da7SBart Van Assche int i; 931509c07bcSBart Van Assche 932ef6c49d8SBart Van Assche WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 933ef6c49d8SBart Van Assche 934ee12d6a8SBart Van Assche srp_del_scsi_host_attr(target->scsi_host); 9359dd69a60SBart Van Assche srp_rport_get(target->rport); 936ee12d6a8SBart Van Assche srp_remove_host(target->scsi_host); 937ee12d6a8SBart Van Assche scsi_remove_host(target->scsi_host); 93893079162SBart Van Assche srp_stop_rport_timers(target->rport); 939ef6c49d8SBart Van Assche srp_disconnect_target(target); 940d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 941d92c0da7SBart Van Assche ch = &target->ch[i]; 942509c07bcSBart Van Assche srp_free_ch_ib(target, ch); 943d92c0da7SBart Van Assche } 944c1120f89SBart Van Assche cancel_work_sync(&target->tl_err_work); 9459dd69a60SBart Van Assche srp_rport_put(target->rport); 946d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 947d92c0da7SBart Van Assche ch = &target->ch[i]; 948509c07bcSBart Van Assche srp_free_req_data(target, ch); 949d92c0da7SBart Van Assche } 950d92c0da7SBart Van Assche kfree(target->ch); 951d92c0da7SBart Van Assche target->ch = NULL; 95265d7dd2fSVu Pham 95365d7dd2fSVu Pham spin_lock(&target->srp_host->target_lock); 95465d7dd2fSVu Pham list_del(&target->list); 95565d7dd2fSVu Pham spin_unlock(&target->srp_host->target_lock); 95665d7dd2fSVu Pham 957ee12d6a8SBart Van Assche scsi_host_put(target->scsi_host); 958ee12d6a8SBart Van Assche } 959ee12d6a8SBart Van Assche 960c4028958SDavid Howells static void srp_remove_work(struct work_struct *work) 961aef9ec39SRoland Dreier { 962c4028958SDavid Howells struct srp_target_port *target = 963ef6c49d8SBart Van Assche container_of(work, struct srp_target_port, remove_work); 964aef9ec39SRoland Dreier 965ef6c49d8SBart Van Assche WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 966aef9ec39SRoland Dreier 96796fc248aSBart Van Assche srp_remove_target(target); 968aef9ec39SRoland Dreier } 969aef9ec39SRoland Dreier 970dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport) 971dc1bdbd9SBart Van Assche { 972dc1bdbd9SBart Van Assche struct srp_target_port *target = rport->lld_data; 973dc1bdbd9SBart Van Assche 974dc1bdbd9SBart Van Assche srp_queue_remove_work(target); 975dc1bdbd9SBart Van Assche } 976dc1bdbd9SBart Van Assche 977c014c8cdSBart Van Assche /** 978c014c8cdSBart Van Assche * srp_connected_ch() - number of connected channels 979c014c8cdSBart Van Assche * @target: SRP target port. 980c014c8cdSBart Van Assche */ 981c014c8cdSBart Van Assche static int srp_connected_ch(struct srp_target_port *target) 982c014c8cdSBart Van Assche { 983c014c8cdSBart Van Assche int i, c = 0; 984c014c8cdSBart Van Assche 985c014c8cdSBart Van Assche for (i = 0; i < target->ch_count; i++) 986c014c8cdSBart Van Assche c += target->ch[i].connected; 987c014c8cdSBart Van Assche 988c014c8cdSBart Van Assche return c; 989c014c8cdSBart Van Assche } 990c014c8cdSBart Van Assche 991d92c0da7SBart Van Assche static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) 992aef9ec39SRoland Dreier { 993509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 994aef9ec39SRoland Dreier int ret; 995aef9ec39SRoland Dreier 996c014c8cdSBart Van Assche WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); 997294c875aSBart Van Assche 998509c07bcSBart Van Assche ret = srp_lookup_path(ch); 999aef9ec39SRoland Dreier if (ret) 1000aef9ec39SRoland Dreier return ret; 1001aef9ec39SRoland Dreier 1002aef9ec39SRoland Dreier while (1) { 1003509c07bcSBart Van Assche init_completion(&ch->done); 1004d92c0da7SBart Van Assche ret = srp_send_req(ch, multich); 1005aef9ec39SRoland Dreier if (ret) 1006aef9ec39SRoland Dreier return ret; 1007509c07bcSBart Van Assche ret = wait_for_completion_interruptible(&ch->done); 1008a702adceSBart Van Assche if (ret < 0) 1009a702adceSBart Van Assche return ret; 1010aef9ec39SRoland Dreier 1011aef9ec39SRoland Dreier /* 1012aef9ec39SRoland Dreier * The CM event handling code will set status to 1013aef9ec39SRoland Dreier * SRP_PORT_REDIRECT if we get a port redirect REJ 1014aef9ec39SRoland Dreier * back, or SRP_DLID_REDIRECT if we get a lid/qp 1015aef9ec39SRoland Dreier * redirect REJ back. 1016aef9ec39SRoland Dreier */ 1017509c07bcSBart Van Assche switch (ch->status) { 1018aef9ec39SRoland Dreier case 0: 1019c014c8cdSBart Van Assche ch->connected = true; 1020aef9ec39SRoland Dreier return 0; 1021aef9ec39SRoland Dreier 1022aef9ec39SRoland Dreier case SRP_PORT_REDIRECT: 1023509c07bcSBart Van Assche ret = srp_lookup_path(ch); 1024aef9ec39SRoland Dreier if (ret) 1025aef9ec39SRoland Dreier return ret; 1026aef9ec39SRoland Dreier break; 1027aef9ec39SRoland Dreier 1028aef9ec39SRoland Dreier case SRP_DLID_REDIRECT: 1029aef9ec39SRoland Dreier break; 1030aef9ec39SRoland Dreier 10319fe4bcf4SDavid Dillow case SRP_STALE_CONN: 10329fe4bcf4SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 10339fe4bcf4SDavid Dillow "giving up on stale connection\n"); 1034509c07bcSBart Van Assche ch->status = -ECONNRESET; 1035509c07bcSBart Van Assche return ch->status; 10369fe4bcf4SDavid Dillow 1037aef9ec39SRoland Dreier default: 1038509c07bcSBart Van Assche return ch->status; 1039aef9ec39SRoland Dreier } 1040aef9ec39SRoland Dreier } 1041aef9ec39SRoland Dreier } 1042aef9ec39SRoland Dreier 1043509c07bcSBart Van Assche static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) 10445cfb1782SBart Van Assche { 10455cfb1782SBart Van Assche struct ib_send_wr *bad_wr; 10465cfb1782SBart Van Assche struct ib_send_wr wr = { 10475cfb1782SBart Van Assche .opcode = IB_WR_LOCAL_INV, 10485cfb1782SBart Van Assche .wr_id = LOCAL_INV_WR_ID_MASK, 10495cfb1782SBart Van Assche .next = NULL, 10505cfb1782SBart Van Assche .num_sge = 0, 10515cfb1782SBart Van Assche .send_flags = 0, 10525cfb1782SBart Van Assche .ex.invalidate_rkey = rkey, 10535cfb1782SBart Van Assche }; 10545cfb1782SBart Van Assche 1055509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 10565cfb1782SBart Van Assche } 10575cfb1782SBart Van Assche 1058d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd, 1059509c07bcSBart Van Assche struct srp_rdma_ch *ch, 1060d945e1dfSRoland Dreier struct srp_request *req) 1061d945e1dfSRoland Dreier { 1062509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 10635cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 10645cfb1782SBart Van Assche struct ib_device *ibdev = dev->dev; 10655cfb1782SBart Van Assche int i, res; 10668f26c9ffSDavid Dillow 1067bb350d1dSFUJITA Tomonori if (!scsi_sglist(scmnd) || 1068d945e1dfSRoland Dreier (scmnd->sc_data_direction != DMA_TO_DEVICE && 1069d945e1dfSRoland Dreier scmnd->sc_data_direction != DMA_FROM_DEVICE)) 1070d945e1dfSRoland Dreier return; 1071d945e1dfSRoland Dreier 10725cfb1782SBart Van Assche if (dev->use_fast_reg) { 10735cfb1782SBart Van Assche struct srp_fr_desc **pfr; 10745cfb1782SBart Van Assche 10755cfb1782SBart Van Assche for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { 1076509c07bcSBart Van Assche res = srp_inv_rkey(ch, (*pfr)->mr->rkey); 10775cfb1782SBart Van Assche if (res < 0) { 10785cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 10795cfb1782SBart Van Assche "Queueing INV WR for rkey %#x failed (%d)\n", 10805cfb1782SBart Van Assche (*pfr)->mr->rkey, res); 10815cfb1782SBart Van Assche queue_work(system_long_wq, 10825cfb1782SBart Van Assche &target->tl_err_work); 10835cfb1782SBart Van Assche } 10845cfb1782SBart Van Assche } 10855cfb1782SBart Van Assche if (req->nmdesc) 1086509c07bcSBart Van Assche srp_fr_pool_put(ch->fr_pool, req->fr_list, 10875cfb1782SBart Van Assche req->nmdesc); 10885cfb1782SBart Van Assche } else { 10895cfb1782SBart Van Assche struct ib_pool_fmr **pfmr; 10905cfb1782SBart Van Assche 10915cfb1782SBart Van Assche for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++) 10925cfb1782SBart Van Assche ib_fmr_pool_unmap(*pfmr); 10935cfb1782SBart Van Assche } 1094f5358a17SRoland Dreier 10958f26c9ffSDavid Dillow ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), 10968f26c9ffSDavid Dillow scmnd->sc_data_direction); 1097d945e1dfSRoland Dreier } 1098d945e1dfSRoland Dreier 109922032991SBart Van Assche /** 110022032991SBart Van Assche * srp_claim_req - Take ownership of the scmnd associated with a request. 1101509c07bcSBart Van Assche * @ch: SRP RDMA channel. 110222032991SBart Van Assche * @req: SRP request. 1103b3fe628dSBart Van Assche * @sdev: If not NULL, only take ownership for this SCSI device. 110422032991SBart Van Assche * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take 110522032991SBart Van Assche * ownership of @req->scmnd if it equals @scmnd. 110622032991SBart Van Assche * 110722032991SBart Van Assche * Return value: 110822032991SBart Van Assche * Either NULL or a pointer to the SCSI command the caller became owner of. 110922032991SBart Van Assche */ 1110509c07bcSBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, 111122032991SBart Van Assche struct srp_request *req, 1112b3fe628dSBart Van Assche struct scsi_device *sdev, 111322032991SBart Van Assche struct scsi_cmnd *scmnd) 1114526b4caaSIshai Rabinovitz { 111594a9174cSBart Van Assche unsigned long flags; 111694a9174cSBart Van Assche 1117509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1118b3fe628dSBart Van Assche if (req->scmnd && 1119b3fe628dSBart Van Assche (!sdev || req->scmnd->device == sdev) && 1120b3fe628dSBart Van Assche (!scmnd || req->scmnd == scmnd)) { 112122032991SBart Van Assche scmnd = req->scmnd; 112222032991SBart Van Assche req->scmnd = NULL; 112322032991SBart Van Assche } else { 112422032991SBart Van Assche scmnd = NULL; 112522032991SBart Van Assche } 1126509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 112722032991SBart Van Assche 112822032991SBart Van Assche return scmnd; 112922032991SBart Van Assche } 113022032991SBart Van Assche 113122032991SBart Van Assche /** 113222032991SBart Van Assche * srp_free_req() - Unmap data and add request to the free request list. 1133509c07bcSBart Van Assche * @ch: SRP RDMA channel. 1134af24663bSBart Van Assche * @req: Request to be freed. 1135af24663bSBart Van Assche * @scmnd: SCSI command associated with @req. 1136af24663bSBart Van Assche * @req_lim_delta: Amount to be added to @target->req_lim. 113722032991SBart Van Assche */ 1138509c07bcSBart Van Assche static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, 1139509c07bcSBart Van Assche struct scsi_cmnd *scmnd, s32 req_lim_delta) 114022032991SBart Van Assche { 114122032991SBart Van Assche unsigned long flags; 114222032991SBart Van Assche 1143509c07bcSBart Van Assche srp_unmap_data(scmnd, ch, req); 114422032991SBart Van Assche 1145509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1146509c07bcSBart Van Assche ch->req_lim += req_lim_delta; 1147509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 1148526b4caaSIshai Rabinovitz } 1149526b4caaSIshai Rabinovitz 1150509c07bcSBart Van Assche static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, 1151509c07bcSBart Van Assche struct scsi_device *sdev, int result) 1152526b4caaSIshai Rabinovitz { 1153509c07bcSBart Van Assche struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); 115422032991SBart Van Assche 115522032991SBart Van Assche if (scmnd) { 1156509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 0); 1157ed9b2264SBart Van Assche scmnd->result = result; 115822032991SBart Van Assche scmnd->scsi_done(scmnd); 115922032991SBart Van Assche } 1160526b4caaSIshai Rabinovitz } 1161526b4caaSIshai Rabinovitz 1162ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport) 1163aef9ec39SRoland Dreier { 1164ed9b2264SBart Van Assche struct srp_target_port *target = rport->lld_data; 1165d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 1166b3fe628dSBart Van Assche struct Scsi_Host *shost = target->scsi_host; 1167b3fe628dSBart Van Assche struct scsi_device *sdev; 1168d92c0da7SBart Van Assche int i, j; 1169aef9ec39SRoland Dreier 1170b3fe628dSBart Van Assche /* 1171b3fe628dSBart Van Assche * Invoking srp_terminate_io() while srp_queuecommand() is running 1172b3fe628dSBart Van Assche * is not safe. Hence the warning statement below. 1173b3fe628dSBart Van Assche */ 1174b3fe628dSBart Van Assche shost_for_each_device(sdev, shost) 1175b3fe628dSBart Van Assche WARN_ON_ONCE(sdev->request_queue->request_fn_active); 1176b3fe628dSBart Van Assche 1177d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1178d92c0da7SBart Van Assche ch = &target->ch[i]; 1179509c07bcSBart Van Assche 1180d92c0da7SBart Van Assche for (j = 0; j < target->req_ring_size; ++j) { 1181d92c0da7SBart Van Assche struct srp_request *req = &ch->req_ring[j]; 1182d92c0da7SBart Van Assche 1183d92c0da7SBart Van Assche srp_finish_req(ch, req, NULL, 1184d92c0da7SBart Van Assche DID_TRANSPORT_FAILFAST << 16); 1185d92c0da7SBart Van Assche } 1186ed9b2264SBart Van Assche } 1187ed9b2264SBart Van Assche } 1188ed9b2264SBart Van Assche 1189ed9b2264SBart Van Assche /* 1190ed9b2264SBart Van Assche * It is up to the caller to ensure that srp_rport_reconnect() calls are 1191ed9b2264SBart Van Assche * serialized and that no concurrent srp_queuecommand(), srp_abort(), 1192ed9b2264SBart Van Assche * srp_reset_device() or srp_reset_host() calls will occur while this function 1193ed9b2264SBart Van Assche * is in progress. One way to realize that is not to call this function 1194ed9b2264SBart Van Assche * directly but to call srp_reconnect_rport() instead since that last function 1195ed9b2264SBart Van Assche * serializes calls of this function via rport->mutex and also blocks 1196ed9b2264SBart Van Assche * srp_queuecommand() calls before invoking this function. 1197ed9b2264SBart Van Assche */ 1198ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport) 1199ed9b2264SBart Van Assche { 1200ed9b2264SBart Van Assche struct srp_target_port *target = rport->lld_data; 1201d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 1202d92c0da7SBart Van Assche int i, j, ret = 0; 1203d92c0da7SBart Van Assche bool multich = false; 120409be70a2SBart Van Assche 1205aef9ec39SRoland Dreier srp_disconnect_target(target); 120634aa654eSBart Van Assche 120734aa654eSBart Van Assche if (target->state == SRP_TARGET_SCANNING) 120834aa654eSBart Van Assche return -ENODEV; 120934aa654eSBart Van Assche 1210aef9ec39SRoland Dreier /* 1211c7c4e7ffSBart Van Assche * Now get a new local CM ID so that we avoid confusing the target in 1212c7c4e7ffSBart Van Assche * case things are really fouled up. Doing so also ensures that all CM 1213c7c4e7ffSBart Van Assche * callbacks will have finished before a new QP is allocated. 1214aef9ec39SRoland Dreier */ 1215d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1216d92c0da7SBart Van Assche ch = &target->ch[i]; 1217d92c0da7SBart Van Assche ret += srp_new_cm_id(ch); 1218d92c0da7SBart Van Assche } 1219d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1220d92c0da7SBart Van Assche ch = &target->ch[i]; 1221d92c0da7SBart Van Assche for (j = 0; j < target->req_ring_size; ++j) { 1222d92c0da7SBart Van Assche struct srp_request *req = &ch->req_ring[j]; 1223509c07bcSBart Van Assche 1224509c07bcSBart Van Assche srp_finish_req(ch, req, NULL, DID_RESET << 16); 1225536ae14eSBart Van Assche } 1226d92c0da7SBart Van Assche } 1227d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1228d92c0da7SBart Van Assche ch = &target->ch[i]; 12295cfb1782SBart Van Assche /* 12305cfb1782SBart Van Assche * Whether or not creating a new CM ID succeeded, create a new 1231d92c0da7SBart Van Assche * QP. This guarantees that all completion callback function 1232d92c0da7SBart Van Assche * invocations have finished before request resetting starts. 12335cfb1782SBart Van Assche */ 1234509c07bcSBart Van Assche ret += srp_create_ch_ib(ch); 12355cfb1782SBart Van Assche 1236509c07bcSBart Van Assche INIT_LIST_HEAD(&ch->free_tx); 1237d92c0da7SBart Van Assche for (j = 0; j < target->queue_size; ++j) 1238d92c0da7SBart Van Assche list_add(&ch->tx_ring[j]->list, &ch->free_tx); 1239d92c0da7SBart Van Assche } 12408de9fe3aSBart Van Assche 12418de9fe3aSBart Van Assche target->qp_in_error = false; 12428de9fe3aSBart Van Assche 1243d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1244d92c0da7SBart Van Assche ch = &target->ch[i]; 1245bbac5ccfSBart Van Assche if (ret) 1246d92c0da7SBart Van Assche break; 1247d92c0da7SBart Van Assche ret = srp_connect_ch(ch, multich); 1248d92c0da7SBart Van Assche multich = true; 1249d92c0da7SBart Van Assche } 125009be70a2SBart Van Assche 1251ed9b2264SBart Van Assche if (ret == 0) 1252ed9b2264SBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 1253ed9b2264SBart Van Assche PFX "reconnect succeeded\n"); 1254aef9ec39SRoland Dreier 1255aef9ec39SRoland Dreier return ret; 1256aef9ec39SRoland Dreier } 1257aef9ec39SRoland Dreier 12588f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, 12598f26c9ffSDavid Dillow unsigned int dma_len, u32 rkey) 1260f5358a17SRoland Dreier { 12618f26c9ffSDavid Dillow struct srp_direct_buf *desc = state->desc; 12628f26c9ffSDavid Dillow 12638f26c9ffSDavid Dillow desc->va = cpu_to_be64(dma_addr); 12648f26c9ffSDavid Dillow desc->key = cpu_to_be32(rkey); 12658f26c9ffSDavid Dillow desc->len = cpu_to_be32(dma_len); 12668f26c9ffSDavid Dillow 12678f26c9ffSDavid Dillow state->total_len += dma_len; 12688f26c9ffSDavid Dillow state->desc++; 12698f26c9ffSDavid Dillow state->ndesc++; 12708f26c9ffSDavid Dillow } 12718f26c9ffSDavid Dillow 12728f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state, 1273509c07bcSBart Van Assche struct srp_rdma_ch *ch) 12748f26c9ffSDavid Dillow { 12758f26c9ffSDavid Dillow struct ib_pool_fmr *fmr; 1276f5358a17SRoland Dreier u64 io_addr = 0; 12778f26c9ffSDavid Dillow 1278509c07bcSBart Van Assche fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, 12798f26c9ffSDavid Dillow state->npages, io_addr); 12808f26c9ffSDavid Dillow if (IS_ERR(fmr)) 12818f26c9ffSDavid Dillow return PTR_ERR(fmr); 12828f26c9ffSDavid Dillow 12838f26c9ffSDavid Dillow *state->next_fmr++ = fmr; 128452ede08fSBart Van Assche state->nmdesc++; 12858f26c9ffSDavid Dillow 128652ede08fSBart Van Assche srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey); 1287539dde6fSBart Van Assche 12888f26c9ffSDavid Dillow return 0; 12898f26c9ffSDavid Dillow } 12908f26c9ffSDavid Dillow 12915cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state, 1292509c07bcSBart Van Assche struct srp_rdma_ch *ch) 12935cfb1782SBart Van Assche { 1294509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 12955cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 12965cfb1782SBart Van Assche struct ib_send_wr *bad_wr; 12975cfb1782SBart Van Assche struct ib_send_wr wr; 12985cfb1782SBart Van Assche struct srp_fr_desc *desc; 12995cfb1782SBart Van Assche u32 rkey; 13005cfb1782SBart Van Assche 1301509c07bcSBart Van Assche desc = srp_fr_pool_get(ch->fr_pool); 13025cfb1782SBart Van Assche if (!desc) 13035cfb1782SBart Van Assche return -ENOMEM; 13045cfb1782SBart Van Assche 13055cfb1782SBart Van Assche rkey = ib_inc_rkey(desc->mr->rkey); 13065cfb1782SBart Van Assche ib_update_fast_reg_key(desc->mr, rkey); 13075cfb1782SBart Van Assche 13085cfb1782SBart Van Assche memcpy(desc->frpl->page_list, state->pages, 13095cfb1782SBart Van Assche sizeof(state->pages[0]) * state->npages); 13105cfb1782SBart Van Assche 13115cfb1782SBart Van Assche memset(&wr, 0, sizeof(wr)); 13125cfb1782SBart Van Assche wr.opcode = IB_WR_FAST_REG_MR; 13135cfb1782SBart Van Assche wr.wr_id = FAST_REG_WR_ID_MASK; 13145cfb1782SBart Van Assche wr.wr.fast_reg.iova_start = state->base_dma_addr; 13155cfb1782SBart Van Assche wr.wr.fast_reg.page_list = desc->frpl; 13165cfb1782SBart Van Assche wr.wr.fast_reg.page_list_len = state->npages; 13175cfb1782SBart Van Assche wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size); 13185cfb1782SBart Van Assche wr.wr.fast_reg.length = state->dma_len; 13195cfb1782SBart Van Assche wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | 13205cfb1782SBart Van Assche IB_ACCESS_REMOTE_READ | 13215cfb1782SBart Van Assche IB_ACCESS_REMOTE_WRITE); 13225cfb1782SBart Van Assche wr.wr.fast_reg.rkey = desc->mr->lkey; 13235cfb1782SBart Van Assche 13245cfb1782SBart Van Assche *state->next_fr++ = desc; 13255cfb1782SBart Van Assche state->nmdesc++; 13265cfb1782SBart Van Assche 13275cfb1782SBart Van Assche srp_map_desc(state, state->base_dma_addr, state->dma_len, 13285cfb1782SBart Van Assche desc->mr->rkey); 13295cfb1782SBart Van Assche 1330509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 13315cfb1782SBart Van Assche } 13325cfb1782SBart Van Assche 1333539dde6fSBart Van Assche static int srp_finish_mapping(struct srp_map_state *state, 1334509c07bcSBart Van Assche struct srp_rdma_ch *ch) 1335539dde6fSBart Van Assche { 1336509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1337539dde6fSBart Van Assche int ret = 0; 1338539dde6fSBart Van Assche 1339539dde6fSBart Van Assche if (state->npages == 0) 1340539dde6fSBart Van Assche return 0; 1341539dde6fSBart Van Assche 1342b1b8854dSBart Van Assche if (state->npages == 1 && !register_always) 134352ede08fSBart Van Assche srp_map_desc(state, state->base_dma_addr, state->dma_len, 1344539dde6fSBart Van Assche target->rkey); 1345539dde6fSBart Van Assche else 13465cfb1782SBart Van Assche ret = target->srp_host->srp_dev->use_fast_reg ? 1347509c07bcSBart Van Assche srp_map_finish_fr(state, ch) : 1348509c07bcSBart Van Assche srp_map_finish_fmr(state, ch); 1349539dde6fSBart Van Assche 1350539dde6fSBart Van Assche if (ret == 0) { 1351539dde6fSBart Van Assche state->npages = 0; 135252ede08fSBart Van Assche state->dma_len = 0; 1353539dde6fSBart Van Assche } 1354539dde6fSBart Van Assche 1355539dde6fSBart Van Assche return ret; 1356539dde6fSBart Van Assche } 1357539dde6fSBart Van Assche 13588f26c9ffSDavid Dillow static void srp_map_update_start(struct srp_map_state *state, 13598f26c9ffSDavid Dillow struct scatterlist *sg, int sg_index, 13608f26c9ffSDavid Dillow dma_addr_t dma_addr) 13618f26c9ffSDavid Dillow { 13628f26c9ffSDavid Dillow state->unmapped_sg = sg; 13638f26c9ffSDavid Dillow state->unmapped_index = sg_index; 13648f26c9ffSDavid Dillow state->unmapped_addr = dma_addr; 13658f26c9ffSDavid Dillow } 13668f26c9ffSDavid Dillow 13678f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state, 1368509c07bcSBart Van Assche struct srp_rdma_ch *ch, 13698f26c9ffSDavid Dillow struct scatterlist *sg, int sg_index, 13705cfb1782SBart Van Assche bool use_mr) 13718f26c9ffSDavid Dillow { 1372509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 137305321937SGreg Kroah-Hartman struct srp_device *dev = target->srp_host->srp_dev; 137485507bccSRalph Campbell struct ib_device *ibdev = dev->dev; 13758f26c9ffSDavid Dillow dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); 1376bb350d1dSFUJITA Tomonori unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 13778f26c9ffSDavid Dillow unsigned int len; 13788f26c9ffSDavid Dillow int ret; 137985507bccSRalph Campbell 13808f26c9ffSDavid Dillow if (!dma_len) 13818f26c9ffSDavid Dillow return 0; 13828f26c9ffSDavid Dillow 13835cfb1782SBart Van Assche if (!use_mr) { 13845cfb1782SBart Van Assche /* 13855cfb1782SBart Van Assche * Once we're in direct map mode for a request, we don't 13865cfb1782SBart Van Assche * go back to FMR or FR mode, so no need to update anything 13878f26c9ffSDavid Dillow * other than the descriptor. 13888f26c9ffSDavid Dillow */ 13898f26c9ffSDavid Dillow srp_map_desc(state, dma_addr, dma_len, target->rkey); 13908f26c9ffSDavid Dillow return 0; 1391f5358a17SRoland Dreier } 1392f5358a17SRoland Dreier 13935cfb1782SBart Van Assche /* 13945cfb1782SBart Van Assche * Since not all RDMA HW drivers support non-zero page offsets for 13955cfb1782SBart Van Assche * FMR, if we start at an offset into a page, don't merge into the 13965cfb1782SBart Van Assche * current FMR mapping. Finish it out, and use the kernel's MR for 13975cfb1782SBart Van Assche * this sg entry. 13988f26c9ffSDavid Dillow */ 13995cfb1782SBart Van Assche if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) || 14005cfb1782SBart Van Assche dma_len > dev->mr_max_size) { 1401509c07bcSBart Van Assche ret = srp_finish_mapping(state, ch); 14028f26c9ffSDavid Dillow if (ret) 14038f26c9ffSDavid Dillow return ret; 14048f26c9ffSDavid Dillow 14058f26c9ffSDavid Dillow srp_map_desc(state, dma_addr, dma_len, target->rkey); 14068f26c9ffSDavid Dillow srp_map_update_start(state, NULL, 0, 0); 14078f26c9ffSDavid Dillow return 0; 1408f5358a17SRoland Dreier } 1409f5358a17SRoland Dreier 14105cfb1782SBart Van Assche /* 14115cfb1782SBart Van Assche * If this is the first sg that will be mapped via FMR or via FR, save 14125cfb1782SBart Van Assche * our position. We need to know the first unmapped entry, its index, 14135cfb1782SBart Van Assche * and the first unmapped address within that entry to be able to 14145cfb1782SBart Van Assche * restart mapping after an error. 14158f26c9ffSDavid Dillow */ 14168f26c9ffSDavid Dillow if (!state->unmapped_sg) 14178f26c9ffSDavid Dillow srp_map_update_start(state, sg, sg_index, dma_addr); 1418f5358a17SRoland Dreier 14198f26c9ffSDavid Dillow while (dma_len) { 14205cfb1782SBart Van Assche unsigned offset = dma_addr & ~dev->mr_page_mask; 14215cfb1782SBart Van Assche if (state->npages == dev->max_pages_per_mr || offset != 0) { 1422509c07bcSBart Van Assche ret = srp_finish_mapping(state, ch); 14238f26c9ffSDavid Dillow if (ret) 14248f26c9ffSDavid Dillow return ret; 1425f5358a17SRoland Dreier 14268f26c9ffSDavid Dillow srp_map_update_start(state, sg, sg_index, dma_addr); 142785507bccSRalph Campbell } 1428f5358a17SRoland Dreier 14295cfb1782SBart Van Assche len = min_t(unsigned int, dma_len, dev->mr_page_size - offset); 14308f26c9ffSDavid Dillow 14318f26c9ffSDavid Dillow if (!state->npages) 14328f26c9ffSDavid Dillow state->base_dma_addr = dma_addr; 14335cfb1782SBart Van Assche state->pages[state->npages++] = dma_addr & dev->mr_page_mask; 143452ede08fSBart Van Assche state->dma_len += len; 14358f26c9ffSDavid Dillow dma_addr += len; 14368f26c9ffSDavid Dillow dma_len -= len; 1437f5358a17SRoland Dreier } 1438f5358a17SRoland Dreier 14395cfb1782SBart Van Assche /* 14405cfb1782SBart Van Assche * If the last entry of the MR wasn't a full page, then we need to 14418f26c9ffSDavid Dillow * close it out and start a new one -- we can only merge at page 14428f26c9ffSDavid Dillow * boundries. 14438f26c9ffSDavid Dillow */ 1444f5358a17SRoland Dreier ret = 0; 144552ede08fSBart Van Assche if (len != dev->mr_page_size) { 1446509c07bcSBart Van Assche ret = srp_finish_mapping(state, ch); 14478f26c9ffSDavid Dillow if (!ret) 14488f26c9ffSDavid Dillow srp_map_update_start(state, NULL, 0, 0); 14498f26c9ffSDavid Dillow } 1450f5358a17SRoland Dreier return ret; 1451f5358a17SRoland Dreier } 1452f5358a17SRoland Dreier 1453509c07bcSBart Van Assche static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch, 1454509c07bcSBart Van Assche struct srp_request *req, struct scatterlist *scat, 1455509c07bcSBart Van Assche int count) 145676bc1e1dSBart Van Assche { 1457509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 145876bc1e1dSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 145976bc1e1dSBart Van Assche struct ib_device *ibdev = dev->dev; 146076bc1e1dSBart Van Assche struct scatterlist *sg; 14615cfb1782SBart Van Assche int i; 14625cfb1782SBart Van Assche bool use_mr; 146376bc1e1dSBart Van Assche 146476bc1e1dSBart Van Assche state->desc = req->indirect_desc; 146576bc1e1dSBart Van Assche state->pages = req->map_page; 14665cfb1782SBart Van Assche if (dev->use_fast_reg) { 14675cfb1782SBart Van Assche state->next_fr = req->fr_list; 1468509c07bcSBart Van Assche use_mr = !!ch->fr_pool; 14695cfb1782SBart Van Assche } else { 147076bc1e1dSBart Van Assche state->next_fmr = req->fmr_list; 1471509c07bcSBart Van Assche use_mr = !!ch->fmr_pool; 14725cfb1782SBart Van Assche } 147376bc1e1dSBart Van Assche 147476bc1e1dSBart Van Assche for_each_sg(scat, sg, count, i) { 1475509c07bcSBart Van Assche if (srp_map_sg_entry(state, ch, sg, i, use_mr)) { 14765cfb1782SBart Van Assche /* 14775cfb1782SBart Van Assche * Memory registration failed, so backtrack to the 14785cfb1782SBart Van Assche * first unmapped entry and continue on without using 14795cfb1782SBart Van Assche * memory registration. 148076bc1e1dSBart Van Assche */ 148176bc1e1dSBart Van Assche dma_addr_t dma_addr; 148276bc1e1dSBart Van Assche unsigned int dma_len; 148376bc1e1dSBart Van Assche 148476bc1e1dSBart Van Assche backtrack: 148576bc1e1dSBart Van Assche sg = state->unmapped_sg; 148676bc1e1dSBart Van Assche i = state->unmapped_index; 148776bc1e1dSBart Van Assche 148876bc1e1dSBart Van Assche dma_addr = ib_sg_dma_address(ibdev, sg); 148976bc1e1dSBart Van Assche dma_len = ib_sg_dma_len(ibdev, sg); 149076bc1e1dSBart Van Assche dma_len -= (state->unmapped_addr - dma_addr); 149176bc1e1dSBart Van Assche dma_addr = state->unmapped_addr; 14925cfb1782SBart Van Assche use_mr = false; 149376bc1e1dSBart Van Assche srp_map_desc(state, dma_addr, dma_len, target->rkey); 149476bc1e1dSBart Van Assche } 149576bc1e1dSBart Van Assche } 149676bc1e1dSBart Van Assche 1497509c07bcSBart Van Assche if (use_mr && srp_finish_mapping(state, ch)) 149876bc1e1dSBart Van Assche goto backtrack; 149976bc1e1dSBart Van Assche 150052ede08fSBart Van Assche req->nmdesc = state->nmdesc; 15015cfb1782SBart Van Assche 15025cfb1782SBart Van Assche return 0; 150376bc1e1dSBart Van Assche } 150476bc1e1dSBart Van Assche 1505509c07bcSBart Van Assche static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, 1506aef9ec39SRoland Dreier struct srp_request *req) 1507aef9ec39SRoland Dreier { 1508509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 150976bc1e1dSBart Van Assche struct scatterlist *scat; 1510aef9ec39SRoland Dreier struct srp_cmd *cmd = req->cmd->buf; 151176bc1e1dSBart Van Assche int len, nents, count; 151285507bccSRalph Campbell struct srp_device *dev; 151385507bccSRalph Campbell struct ib_device *ibdev; 15148f26c9ffSDavid Dillow struct srp_map_state state; 15158f26c9ffSDavid Dillow struct srp_indirect_buf *indirect_hdr; 15168f26c9ffSDavid Dillow u32 table_len; 15178f26c9ffSDavid Dillow u8 fmt; 1518aef9ec39SRoland Dreier 1519bb350d1dSFUJITA Tomonori if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 1520aef9ec39SRoland Dreier return sizeof (struct srp_cmd); 1521aef9ec39SRoland Dreier 1522aef9ec39SRoland Dreier if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 1523aef9ec39SRoland Dreier scmnd->sc_data_direction != DMA_TO_DEVICE) { 15247aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 15257aa54bd7SDavid Dillow PFX "Unhandled data direction %d\n", 1526aef9ec39SRoland Dreier scmnd->sc_data_direction); 1527aef9ec39SRoland Dreier return -EINVAL; 1528aef9ec39SRoland Dreier } 1529aef9ec39SRoland Dreier 1530bb350d1dSFUJITA Tomonori nents = scsi_sg_count(scmnd); 1531bb350d1dSFUJITA Tomonori scat = scsi_sglist(scmnd); 1532aef9ec39SRoland Dreier 153305321937SGreg Kroah-Hartman dev = target->srp_host->srp_dev; 153485507bccSRalph Campbell ibdev = dev->dev; 153585507bccSRalph Campbell 153685507bccSRalph Campbell count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 15378f26c9ffSDavid Dillow if (unlikely(count == 0)) 15388f26c9ffSDavid Dillow return -EIO; 1539aef9ec39SRoland Dreier 1540aef9ec39SRoland Dreier fmt = SRP_DATA_DESC_DIRECT; 1541f5358a17SRoland Dreier len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 1542f5358a17SRoland Dreier 1543b1b8854dSBart Van Assche if (count == 1 && !register_always) { 1544f5358a17SRoland Dreier /* 1545f5358a17SRoland Dreier * The midlayer only generated a single gather/scatter 1546f5358a17SRoland Dreier * entry, or DMA mapping coalesced everything to a 1547f5358a17SRoland Dreier * single entry. So a direct descriptor along with 1548f5358a17SRoland Dreier * the DMA MR suffices. 1549f5358a17SRoland Dreier */ 1550f5358a17SRoland Dreier struct srp_direct_buf *buf = (void *) cmd->add_data; 1551aef9ec39SRoland Dreier 155285507bccSRalph Campbell buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 15539af76271SDavid Dillow buf->key = cpu_to_be32(target->rkey); 155485507bccSRalph Campbell buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 15558f26c9ffSDavid Dillow 155652ede08fSBart Van Assche req->nmdesc = 0; 15578f26c9ffSDavid Dillow goto map_complete; 15588f26c9ffSDavid Dillow } 15598f26c9ffSDavid Dillow 15605cfb1782SBart Van Assche /* 15615cfb1782SBart Van Assche * We have more than one scatter/gather entry, so build our indirect 15625cfb1782SBart Van Assche * descriptor table, trying to merge as many entries as we can. 1563f5358a17SRoland Dreier */ 15648f26c9ffSDavid Dillow indirect_hdr = (void *) cmd->add_data; 15658f26c9ffSDavid Dillow 1566c07d424dSDavid Dillow ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, 1567c07d424dSDavid Dillow target->indirect_size, DMA_TO_DEVICE); 1568c07d424dSDavid Dillow 15698f26c9ffSDavid Dillow memset(&state, 0, sizeof(state)); 1570509c07bcSBart Van Assche srp_map_sg(&state, ch, req, scat, count); 15718f26c9ffSDavid Dillow 1572c07d424dSDavid Dillow /* We've mapped the request, now pull as much of the indirect 1573c07d424dSDavid Dillow * descriptor table as we can into the command buffer. If this 1574c07d424dSDavid Dillow * target is not using an external indirect table, we are 1575c07d424dSDavid Dillow * guaranteed to fit into the command, as the SCSI layer won't 1576c07d424dSDavid Dillow * give us more S/G entries than we allow. 15778f26c9ffSDavid Dillow */ 15788f26c9ffSDavid Dillow if (state.ndesc == 1) { 15795cfb1782SBart Van Assche /* 15805cfb1782SBart Van Assche * Memory registration collapsed the sg-list into one entry, 15818f26c9ffSDavid Dillow * so use a direct descriptor. 15828f26c9ffSDavid Dillow */ 15838f26c9ffSDavid Dillow struct srp_direct_buf *buf = (void *) cmd->add_data; 15848f26c9ffSDavid Dillow 1585c07d424dSDavid Dillow *buf = req->indirect_desc[0]; 15868f26c9ffSDavid Dillow goto map_complete; 15878f26c9ffSDavid Dillow } 15888f26c9ffSDavid Dillow 1589c07d424dSDavid Dillow if (unlikely(target->cmd_sg_cnt < state.ndesc && 1590c07d424dSDavid Dillow !target->allow_ext_sg)) { 1591c07d424dSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 1592c07d424dSDavid Dillow "Could not fit S/G list into SRP_CMD\n"); 1593c07d424dSDavid Dillow return -EIO; 1594c07d424dSDavid Dillow } 1595c07d424dSDavid Dillow 1596c07d424dSDavid Dillow count = min(state.ndesc, target->cmd_sg_cnt); 15978f26c9ffSDavid Dillow table_len = state.ndesc * sizeof (struct srp_direct_buf); 1598aef9ec39SRoland Dreier 1599aef9ec39SRoland Dreier fmt = SRP_DATA_DESC_INDIRECT; 16008f26c9ffSDavid Dillow len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); 1601c07d424dSDavid Dillow len += count * sizeof (struct srp_direct_buf); 1602f5358a17SRoland Dreier 1603c07d424dSDavid Dillow memcpy(indirect_hdr->desc_list, req->indirect_desc, 1604c07d424dSDavid Dillow count * sizeof (struct srp_direct_buf)); 160585507bccSRalph Campbell 1606c07d424dSDavid Dillow indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 16078f26c9ffSDavid Dillow indirect_hdr->table_desc.key = cpu_to_be32(target->rkey); 16088f26c9ffSDavid Dillow indirect_hdr->table_desc.len = cpu_to_be32(table_len); 16098f26c9ffSDavid Dillow indirect_hdr->len = cpu_to_be32(state.total_len); 1610aef9ec39SRoland Dreier 1611aef9ec39SRoland Dreier if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1612c07d424dSDavid Dillow cmd->data_out_desc_cnt = count; 1613aef9ec39SRoland Dreier else 1614c07d424dSDavid Dillow cmd->data_in_desc_cnt = count; 1615c07d424dSDavid Dillow 1616c07d424dSDavid Dillow ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, 1617c07d424dSDavid Dillow DMA_TO_DEVICE); 1618aef9ec39SRoland Dreier 16198f26c9ffSDavid Dillow map_complete: 1620aef9ec39SRoland Dreier if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1621aef9ec39SRoland Dreier cmd->buf_fmt = fmt << 4; 1622aef9ec39SRoland Dreier else 1623aef9ec39SRoland Dreier cmd->buf_fmt = fmt; 1624aef9ec39SRoland Dreier 1625aef9ec39SRoland Dreier return len; 1626aef9ec39SRoland Dreier } 1627aef9ec39SRoland Dreier 162805a1d750SDavid Dillow /* 162976c75b25SBart Van Assche * Return an IU and possible credit to the free pool 163076c75b25SBart Van Assche */ 1631509c07bcSBart Van Assche static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, 163276c75b25SBart Van Assche enum srp_iu_type iu_type) 163376c75b25SBart Van Assche { 163476c75b25SBart Van Assche unsigned long flags; 163576c75b25SBart Van Assche 1636509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1637509c07bcSBart Van Assche list_add(&iu->list, &ch->free_tx); 163876c75b25SBart Van Assche if (iu_type != SRP_IU_RSP) 1639509c07bcSBart Van Assche ++ch->req_lim; 1640509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 164176c75b25SBart Van Assche } 164276c75b25SBart Van Assche 164376c75b25SBart Van Assche /* 1644509c07bcSBart Van Assche * Must be called with ch->lock held to protect req_lim and free_tx. 1645e9684678SBart Van Assche * If IU is not sent, it must be returned using srp_put_tx_iu(). 164605a1d750SDavid Dillow * 164705a1d750SDavid Dillow * Note: 164805a1d750SDavid Dillow * An upper limit for the number of allocated information units for each 164905a1d750SDavid Dillow * request type is: 165005a1d750SDavid Dillow * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues 165105a1d750SDavid Dillow * more than Scsi_Host.can_queue requests. 165205a1d750SDavid Dillow * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. 165305a1d750SDavid Dillow * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 165405a1d750SDavid Dillow * one unanswered SRP request to an initiator. 165505a1d750SDavid Dillow */ 1656509c07bcSBart Van Assche static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, 165705a1d750SDavid Dillow enum srp_iu_type iu_type) 165805a1d750SDavid Dillow { 1659509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 166005a1d750SDavid Dillow s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 166105a1d750SDavid Dillow struct srp_iu *iu; 166205a1d750SDavid Dillow 1663509c07bcSBart Van Assche srp_send_completion(ch->send_cq, ch); 166405a1d750SDavid Dillow 1665509c07bcSBart Van Assche if (list_empty(&ch->free_tx)) 166605a1d750SDavid Dillow return NULL; 166705a1d750SDavid Dillow 166805a1d750SDavid Dillow /* Initiator responses to target requests do not consume credits */ 166976c75b25SBart Van Assche if (iu_type != SRP_IU_RSP) { 1670509c07bcSBart Van Assche if (ch->req_lim <= rsv) { 167105a1d750SDavid Dillow ++target->zero_req_lim; 167205a1d750SDavid Dillow return NULL; 167305a1d750SDavid Dillow } 167405a1d750SDavid Dillow 1675509c07bcSBart Van Assche --ch->req_lim; 167676c75b25SBart Van Assche } 167776c75b25SBart Van Assche 1678509c07bcSBart Van Assche iu = list_first_entry(&ch->free_tx, struct srp_iu, list); 167976c75b25SBart Van Assche list_del(&iu->list); 168005a1d750SDavid Dillow return iu; 168105a1d750SDavid Dillow } 168205a1d750SDavid Dillow 1683509c07bcSBart Van Assche static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) 168405a1d750SDavid Dillow { 1685509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 168605a1d750SDavid Dillow struct ib_sge list; 168705a1d750SDavid Dillow struct ib_send_wr wr, *bad_wr; 168805a1d750SDavid Dillow 168905a1d750SDavid Dillow list.addr = iu->dma; 169005a1d750SDavid Dillow list.length = len; 16919af76271SDavid Dillow list.lkey = target->lkey; 169205a1d750SDavid Dillow 169305a1d750SDavid Dillow wr.next = NULL; 1694dcb4cb85SBart Van Assche wr.wr_id = (uintptr_t) iu; 169505a1d750SDavid Dillow wr.sg_list = &list; 169605a1d750SDavid Dillow wr.num_sge = 1; 169705a1d750SDavid Dillow wr.opcode = IB_WR_SEND; 169805a1d750SDavid Dillow wr.send_flags = IB_SEND_SIGNALED; 169905a1d750SDavid Dillow 1700509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 170105a1d750SDavid Dillow } 170205a1d750SDavid Dillow 1703509c07bcSBart Van Assche static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) 1704c996bb47SBart Van Assche { 1705509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1706c996bb47SBart Van Assche struct ib_recv_wr wr, *bad_wr; 1707dcb4cb85SBart Van Assche struct ib_sge list; 1708c996bb47SBart Van Assche 1709c996bb47SBart Van Assche list.addr = iu->dma; 1710c996bb47SBart Van Assche list.length = iu->size; 17119af76271SDavid Dillow list.lkey = target->lkey; 1712c996bb47SBart Van Assche 1713c996bb47SBart Van Assche wr.next = NULL; 1714dcb4cb85SBart Van Assche wr.wr_id = (uintptr_t) iu; 1715c996bb47SBart Van Assche wr.sg_list = &list; 1716c996bb47SBart Van Assche wr.num_sge = 1; 1717c996bb47SBart Van Assche 1718509c07bcSBart Van Assche return ib_post_recv(ch->qp, &wr, &bad_wr); 1719c996bb47SBart Van Assche } 1720c996bb47SBart Van Assche 1721509c07bcSBart Van Assche static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) 1722aef9ec39SRoland Dreier { 1723509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1724aef9ec39SRoland Dreier struct srp_request *req; 1725aef9ec39SRoland Dreier struct scsi_cmnd *scmnd; 1726aef9ec39SRoland Dreier unsigned long flags; 1727aef9ec39SRoland Dreier 1728aef9ec39SRoland Dreier if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 1729509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1730509c07bcSBart Van Assche ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1731509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 173294a9174cSBart Van Assche 1733509c07bcSBart Van Assche ch->tsk_mgmt_status = -1; 1734f8b6e31eSDavid Dillow if (be32_to_cpu(rsp->resp_data_len) >= 4) 1735509c07bcSBart Van Assche ch->tsk_mgmt_status = rsp->data[3]; 1736509c07bcSBart Van Assche complete(&ch->tsk_mgmt_done); 1737aef9ec39SRoland Dreier } else { 173877f2c1a4SBart Van Assche scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); 173977f2c1a4SBart Van Assche if (scmnd) { 174077f2c1a4SBart Van Assche req = (void *)scmnd->host_scribble; 174177f2c1a4SBart Van Assche scmnd = srp_claim_req(ch, req, NULL, scmnd); 174277f2c1a4SBart Van Assche } 174322032991SBart Van Assche if (!scmnd) { 17447aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 1745d92c0da7SBart Van Assche "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", 1746d92c0da7SBart Van Assche rsp->tag, ch - target->ch, ch->qp->qp_num); 174722032991SBart Van Assche 1748509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1749509c07bcSBart Van Assche ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1750509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 175122032991SBart Van Assche 175222032991SBart Van Assche return; 175322032991SBart Van Assche } 1754aef9ec39SRoland Dreier scmnd->result = rsp->status; 1755aef9ec39SRoland Dreier 1756aef9ec39SRoland Dreier if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 1757aef9ec39SRoland Dreier memcpy(scmnd->sense_buffer, rsp->data + 1758aef9ec39SRoland Dreier be32_to_cpu(rsp->resp_data_len), 1759aef9ec39SRoland Dreier min_t(int, be32_to_cpu(rsp->sense_data_len), 1760aef9ec39SRoland Dreier SCSI_SENSE_BUFFERSIZE)); 1761aef9ec39SRoland Dreier } 1762aef9ec39SRoland Dreier 1763e714531aSBart Van Assche if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) 1764bb350d1dSFUJITA Tomonori scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 1765e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER)) 1766e714531aSBart Van Assche scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt)); 1767e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) 1768e714531aSBart Van Assche scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 1769e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) 1770e714531aSBart Van Assche scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); 1771aef9ec39SRoland Dreier 1772509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 177322032991SBart Van Assche be32_to_cpu(rsp->req_lim_delta)); 177422032991SBart Van Assche 1775f8b6e31eSDavid Dillow scmnd->host_scribble = NULL; 1776aef9ec39SRoland Dreier scmnd->scsi_done(scmnd); 1777aef9ec39SRoland Dreier } 1778aef9ec39SRoland Dreier } 1779aef9ec39SRoland Dreier 1780509c07bcSBart Van Assche static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, 1781bb12588aSDavid Dillow void *rsp, int len) 1782bb12588aSDavid Dillow { 1783509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 178476c75b25SBart Van Assche struct ib_device *dev = target->srp_host->srp_dev->dev; 1785bb12588aSDavid Dillow unsigned long flags; 1786bb12588aSDavid Dillow struct srp_iu *iu; 178776c75b25SBart Van Assche int err; 1788bb12588aSDavid Dillow 1789509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1790509c07bcSBart Van Assche ch->req_lim += req_delta; 1791509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_RSP); 1792509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 179376c75b25SBart Van Assche 1794bb12588aSDavid Dillow if (!iu) { 1795bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1796bb12588aSDavid Dillow "no IU available to send response\n"); 179776c75b25SBart Van Assche return 1; 1798bb12588aSDavid Dillow } 1799bb12588aSDavid Dillow 1800bb12588aSDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 1801bb12588aSDavid Dillow memcpy(iu->buf, rsp, len); 1802bb12588aSDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 1803bb12588aSDavid Dillow 1804509c07bcSBart Van Assche err = srp_post_send(ch, iu, len); 180576c75b25SBart Van Assche if (err) { 1806bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1807bb12588aSDavid Dillow "unable to post response: %d\n", err); 1808509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_RSP); 180976c75b25SBart Van Assche } 1810bb12588aSDavid Dillow 1811bb12588aSDavid Dillow return err; 1812bb12588aSDavid Dillow } 1813bb12588aSDavid Dillow 1814509c07bcSBart Van Assche static void srp_process_cred_req(struct srp_rdma_ch *ch, 1815bb12588aSDavid Dillow struct srp_cred_req *req) 1816bb12588aSDavid Dillow { 1817bb12588aSDavid Dillow struct srp_cred_rsp rsp = { 1818bb12588aSDavid Dillow .opcode = SRP_CRED_RSP, 1819bb12588aSDavid Dillow .tag = req->tag, 1820bb12588aSDavid Dillow }; 1821bb12588aSDavid Dillow s32 delta = be32_to_cpu(req->req_lim_delta); 1822bb12588aSDavid Dillow 1823509c07bcSBart Van Assche if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 1824509c07bcSBart Van Assche shost_printk(KERN_ERR, ch->target->scsi_host, PFX 1825bb12588aSDavid Dillow "problems processing SRP_CRED_REQ\n"); 1826bb12588aSDavid Dillow } 1827bb12588aSDavid Dillow 1828509c07bcSBart Van Assche static void srp_process_aer_req(struct srp_rdma_ch *ch, 1829bb12588aSDavid Dillow struct srp_aer_req *req) 1830bb12588aSDavid Dillow { 1831509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1832bb12588aSDavid Dillow struct srp_aer_rsp rsp = { 1833bb12588aSDavid Dillow .opcode = SRP_AER_RSP, 1834bb12588aSDavid Dillow .tag = req->tag, 1835bb12588aSDavid Dillow }; 1836bb12588aSDavid Dillow s32 delta = be32_to_cpu(req->req_lim_delta); 1837bb12588aSDavid Dillow 1838bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1839985aa495SBart Van Assche "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); 1840bb12588aSDavid Dillow 1841509c07bcSBart Van Assche if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 1842bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1843bb12588aSDavid Dillow "problems processing SRP_AER_REQ\n"); 1844bb12588aSDavid Dillow } 1845bb12588aSDavid Dillow 1846509c07bcSBart Van Assche static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc) 1847aef9ec39SRoland Dreier { 1848509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1849dcb4cb85SBart Van Assche struct ib_device *dev = target->srp_host->srp_dev->dev; 1850737b94ebSRoland Dreier struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; 1851c996bb47SBart Van Assche int res; 1852aef9ec39SRoland Dreier u8 opcode; 1853aef9ec39SRoland Dreier 1854509c07bcSBart Van Assche ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, 185585507bccSRalph Campbell DMA_FROM_DEVICE); 1856aef9ec39SRoland Dreier 1857aef9ec39SRoland Dreier opcode = *(u8 *) iu->buf; 1858aef9ec39SRoland Dreier 1859aef9ec39SRoland Dreier if (0) { 18607aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 18617aa54bd7SDavid Dillow PFX "recv completion, opcode 0x%02x\n", opcode); 18627a700811SBart Van Assche print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, 18637a700811SBart Van Assche iu->buf, wc->byte_len, true); 1864aef9ec39SRoland Dreier } 1865aef9ec39SRoland Dreier 1866aef9ec39SRoland Dreier switch (opcode) { 1867aef9ec39SRoland Dreier case SRP_RSP: 1868509c07bcSBart Van Assche srp_process_rsp(ch, iu->buf); 1869aef9ec39SRoland Dreier break; 1870aef9ec39SRoland Dreier 1871bb12588aSDavid Dillow case SRP_CRED_REQ: 1872509c07bcSBart Van Assche srp_process_cred_req(ch, iu->buf); 1873bb12588aSDavid Dillow break; 1874bb12588aSDavid Dillow 1875bb12588aSDavid Dillow case SRP_AER_REQ: 1876509c07bcSBart Van Assche srp_process_aer_req(ch, iu->buf); 1877bb12588aSDavid Dillow break; 1878bb12588aSDavid Dillow 1879aef9ec39SRoland Dreier case SRP_T_LOGOUT: 1880aef9ec39SRoland Dreier /* XXX Handle target logout */ 18817aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 18827aa54bd7SDavid Dillow PFX "Got target logout request\n"); 1883aef9ec39SRoland Dreier break; 1884aef9ec39SRoland Dreier 1885aef9ec39SRoland Dreier default: 18867aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 18877aa54bd7SDavid Dillow PFX "Unhandled SRP opcode 0x%02x\n", opcode); 1888aef9ec39SRoland Dreier break; 1889aef9ec39SRoland Dreier } 1890aef9ec39SRoland Dreier 1891509c07bcSBart Van Assche ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, 189285507bccSRalph Campbell DMA_FROM_DEVICE); 1893c996bb47SBart Van Assche 1894509c07bcSBart Van Assche res = srp_post_recv(ch, iu); 1895c996bb47SBart Van Assche if (res != 0) 1896c996bb47SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 1897c996bb47SBart Van Assche PFX "Recv failed with error code %d\n", res); 1898aef9ec39SRoland Dreier } 1899aef9ec39SRoland Dreier 1900c1120f89SBart Van Assche /** 1901c1120f89SBart Van Assche * srp_tl_err_work() - handle a transport layer error 1902af24663bSBart Van Assche * @work: Work structure embedded in an SRP target port. 1903c1120f89SBart Van Assche * 1904c1120f89SBart Van Assche * Note: This function may get invoked before the rport has been created, 1905c1120f89SBart Van Assche * hence the target->rport test. 1906c1120f89SBart Van Assche */ 1907c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work) 1908c1120f89SBart Van Assche { 1909c1120f89SBart Van Assche struct srp_target_port *target; 1910c1120f89SBart Van Assche 1911c1120f89SBart Van Assche target = container_of(work, struct srp_target_port, tl_err_work); 1912c1120f89SBart Van Assche if (target->rport) 1913c1120f89SBart Van Assche srp_start_tl_fail_timers(target->rport); 1914c1120f89SBart Van Assche } 1915c1120f89SBart Van Assche 19165cfb1782SBart Van Assche static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, 19177dad6b2eSBart Van Assche bool send_err, struct srp_rdma_ch *ch) 1918948d1e88SBart Van Assche { 19197dad6b2eSBart Van Assche struct srp_target_port *target = ch->target; 19207dad6b2eSBart Van Assche 19217dad6b2eSBart Van Assche if (wr_id == SRP_LAST_WR_ID) { 19227dad6b2eSBart Van Assche complete(&ch->done); 19237dad6b2eSBart Van Assche return; 19247dad6b2eSBart Van Assche } 19257dad6b2eSBart Van Assche 1926c014c8cdSBart Van Assche if (ch->connected && !target->qp_in_error) { 19275cfb1782SBart Van Assche if (wr_id & LOCAL_INV_WR_ID_MASK) { 19285cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 192957363d98SSagi Grimberg "LOCAL_INV failed with status %s (%d)\n", 193057363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status); 19315cfb1782SBart Van Assche } else if (wr_id & FAST_REG_WR_ID_MASK) { 19325cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 193357363d98SSagi Grimberg "FAST_REG_MR failed status %s (%d)\n", 193457363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status); 19355cfb1782SBart Van Assche } else { 19365cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 193757363d98SSagi Grimberg PFX "failed %s status %s (%d) for iu %p\n", 19385cfb1782SBart Van Assche send_err ? "send" : "receive", 193957363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status, 194057363d98SSagi Grimberg (void *)(uintptr_t)wr_id); 19415cfb1782SBart Van Assche } 1942c1120f89SBart Van Assche queue_work(system_long_wq, &target->tl_err_work); 19434f0af697SBart Van Assche } 1944948d1e88SBart Van Assche target->qp_in_error = true; 1945948d1e88SBart Van Assche } 1946948d1e88SBart Van Assche 1947509c07bcSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr) 1948aef9ec39SRoland Dreier { 1949509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 1950aef9ec39SRoland Dreier struct ib_wc wc; 1951aef9ec39SRoland Dreier 1952aef9ec39SRoland Dreier ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 1953aef9ec39SRoland Dreier while (ib_poll_cq(cq, 1, &wc) > 0) { 1954948d1e88SBart Van Assche if (likely(wc.status == IB_WC_SUCCESS)) { 1955509c07bcSBart Van Assche srp_handle_recv(ch, &wc); 1956948d1e88SBart Van Assche } else { 19577dad6b2eSBart Van Assche srp_handle_qp_err(wc.wr_id, wc.status, false, ch); 1958aef9ec39SRoland Dreier } 19599c03dc9fSBart Van Assche } 19609c03dc9fSBart Van Assche } 19619c03dc9fSBart Van Assche 1962509c07bcSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *ch_ptr) 19639c03dc9fSBart Van Assche { 1964509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 19659c03dc9fSBart Van Assche struct ib_wc wc; 1966dcb4cb85SBart Van Assche struct srp_iu *iu; 19679c03dc9fSBart Van Assche 19689c03dc9fSBart Van Assche while (ib_poll_cq(cq, 1, &wc) > 0) { 1969948d1e88SBart Van Assche if (likely(wc.status == IB_WC_SUCCESS)) { 1970737b94ebSRoland Dreier iu = (struct srp_iu *) (uintptr_t) wc.wr_id; 1971509c07bcSBart Van Assche list_add(&iu->list, &ch->free_tx); 1972948d1e88SBart Van Assche } else { 19737dad6b2eSBart Van Assche srp_handle_qp_err(wc.wr_id, wc.status, true, ch); 1974948d1e88SBart Van Assche } 1975aef9ec39SRoland Dreier } 1976aef9ec39SRoland Dreier } 1977aef9ec39SRoland Dreier 197876c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) 1979aef9ec39SRoland Dreier { 198076c75b25SBart Van Assche struct srp_target_port *target = host_to_target(shost); 1981a95cadb9SBart Van Assche struct srp_rport *rport = target->rport; 1982509c07bcSBart Van Assche struct srp_rdma_ch *ch; 1983aef9ec39SRoland Dreier struct srp_request *req; 1984aef9ec39SRoland Dreier struct srp_iu *iu; 1985aef9ec39SRoland Dreier struct srp_cmd *cmd; 198685507bccSRalph Campbell struct ib_device *dev; 198776c75b25SBart Van Assche unsigned long flags; 198877f2c1a4SBart Van Assche u32 tag; 198977f2c1a4SBart Van Assche u16 idx; 1990d1b4289eSBart Van Assche int len, ret; 1991a95cadb9SBart Van Assche const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; 1992a95cadb9SBart Van Assche 1993a95cadb9SBart Van Assche /* 1994a95cadb9SBart Van Assche * The SCSI EH thread is the only context from which srp_queuecommand() 1995a95cadb9SBart Van Assche * can get invoked for blocked devices (SDEV_BLOCK / 1996a95cadb9SBart Van Assche * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by 1997a95cadb9SBart Van Assche * locking the rport mutex if invoked from inside the SCSI EH. 1998a95cadb9SBart Van Assche */ 1999a95cadb9SBart Van Assche if (in_scsi_eh) 2000a95cadb9SBart Van Assche mutex_lock(&rport->mutex); 2001aef9ec39SRoland Dreier 2002d1b4289eSBart Van Assche scmnd->result = srp_chkready(target->rport); 2003d1b4289eSBart Van Assche if (unlikely(scmnd->result)) 2004d1b4289eSBart Van Assche goto err; 20052ce19e72SBart Van Assche 200677f2c1a4SBart Van Assche WARN_ON_ONCE(scmnd->request->tag < 0); 200777f2c1a4SBart Van Assche tag = blk_mq_unique_tag(scmnd->request); 2008d92c0da7SBart Van Assche ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; 200977f2c1a4SBart Van Assche idx = blk_mq_unique_tag_to_tag(tag); 201077f2c1a4SBart Van Assche WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", 201177f2c1a4SBart Van Assche dev_name(&shost->shost_gendev), tag, idx, 201277f2c1a4SBart Van Assche target->req_ring_size); 2013509c07bcSBart Van Assche 2014509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 2015509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_CMD); 2016509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 2017aef9ec39SRoland Dreier 201877f2c1a4SBart Van Assche if (!iu) 201977f2c1a4SBart Van Assche goto err; 202077f2c1a4SBart Van Assche 202177f2c1a4SBart Van Assche req = &ch->req_ring[idx]; 202205321937SGreg Kroah-Hartman dev = target->srp_host->srp_dev->dev; 202349248644SDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, 202485507bccSRalph Campbell DMA_TO_DEVICE); 2025aef9ec39SRoland Dreier 2026f8b6e31eSDavid Dillow scmnd->host_scribble = (void *) req; 2027aef9ec39SRoland Dreier 2028aef9ec39SRoland Dreier cmd = iu->buf; 2029aef9ec39SRoland Dreier memset(cmd, 0, sizeof *cmd); 2030aef9ec39SRoland Dreier 2031aef9ec39SRoland Dreier cmd->opcode = SRP_CMD; 2032985aa495SBart Van Assche int_to_scsilun(scmnd->device->lun, &cmd->lun); 203377f2c1a4SBart Van Assche cmd->tag = tag; 2034aef9ec39SRoland Dreier memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 2035aef9ec39SRoland Dreier 2036aef9ec39SRoland Dreier req->scmnd = scmnd; 2037aef9ec39SRoland Dreier req->cmd = iu; 2038aef9ec39SRoland Dreier 2039509c07bcSBart Van Assche len = srp_map_data(scmnd, ch, req); 2040aef9ec39SRoland Dreier if (len < 0) { 20417aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 2042d1b4289eSBart Van Assche PFX "Failed to map data (%d)\n", len); 2043d1b4289eSBart Van Assche /* 2044d1b4289eSBart Van Assche * If we ran out of memory descriptors (-ENOMEM) because an 2045d1b4289eSBart Van Assche * application is queuing many requests with more than 204652ede08fSBart Van Assche * max_pages_per_mr sg-list elements, tell the SCSI mid-layer 2047d1b4289eSBart Van Assche * to reduce queue depth temporarily. 2048d1b4289eSBart Van Assche */ 2049d1b4289eSBart Van Assche scmnd->result = len == -ENOMEM ? 2050d1b4289eSBart Van Assche DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16; 205176c75b25SBart Van Assche goto err_iu; 2052aef9ec39SRoland Dreier } 2053aef9ec39SRoland Dreier 205449248644SDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, 205585507bccSRalph Campbell DMA_TO_DEVICE); 2056aef9ec39SRoland Dreier 2057509c07bcSBart Van Assche if (srp_post_send(ch, iu, len)) { 20587aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 2059aef9ec39SRoland Dreier goto err_unmap; 2060aef9ec39SRoland Dreier } 2061aef9ec39SRoland Dreier 2062d1b4289eSBart Van Assche ret = 0; 2063d1b4289eSBart Van Assche 2064a95cadb9SBart Van Assche unlock_rport: 2065a95cadb9SBart Van Assche if (in_scsi_eh) 2066a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2067a95cadb9SBart Van Assche 2068d1b4289eSBart Van Assche return ret; 2069aef9ec39SRoland Dreier 2070aef9ec39SRoland Dreier err_unmap: 2071509c07bcSBart Van Assche srp_unmap_data(scmnd, ch, req); 2072aef9ec39SRoland Dreier 207376c75b25SBart Van Assche err_iu: 2074509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_CMD); 207576c75b25SBart Van Assche 2076024ca901SBart Van Assche /* 2077024ca901SBart Van Assche * Avoid that the loops that iterate over the request ring can 2078024ca901SBart Van Assche * encounter a dangling SCSI command pointer. 2079024ca901SBart Van Assche */ 2080024ca901SBart Van Assche req->scmnd = NULL; 2081024ca901SBart Van Assche 2082d1b4289eSBart Van Assche err: 2083d1b4289eSBart Van Assche if (scmnd->result) { 2084d1b4289eSBart Van Assche scmnd->scsi_done(scmnd); 2085d1b4289eSBart Van Assche ret = 0; 2086d1b4289eSBart Van Assche } else { 2087d1b4289eSBart Van Assche ret = SCSI_MLQUEUE_HOST_BUSY; 2088d1b4289eSBart Van Assche } 2089a95cadb9SBart Van Assche 2090d1b4289eSBart Van Assche goto unlock_rport; 2091aef9ec39SRoland Dreier } 2092aef9ec39SRoland Dreier 20934d73f95fSBart Van Assche /* 20944d73f95fSBart Van Assche * Note: the resources allocated in this function are freed in 2095509c07bcSBart Van Assche * srp_free_ch_ib(). 20964d73f95fSBart Van Assche */ 2097509c07bcSBart Van Assche static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) 2098aef9ec39SRoland Dreier { 2099509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2100aef9ec39SRoland Dreier int i; 2101aef9ec39SRoland Dreier 2102509c07bcSBart Van Assche ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), 21034d73f95fSBart Van Assche GFP_KERNEL); 2104509c07bcSBart Van Assche if (!ch->rx_ring) 21054d73f95fSBart Van Assche goto err_no_ring; 2106509c07bcSBart Van Assche ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), 21074d73f95fSBart Van Assche GFP_KERNEL); 2108509c07bcSBart Van Assche if (!ch->tx_ring) 21094d73f95fSBart Van Assche goto err_no_ring; 21104d73f95fSBart Van Assche 21114d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2112509c07bcSBart Van Assche ch->rx_ring[i] = srp_alloc_iu(target->srp_host, 2113509c07bcSBart Van Assche ch->max_ti_iu_len, 2114aef9ec39SRoland Dreier GFP_KERNEL, DMA_FROM_DEVICE); 2115509c07bcSBart Van Assche if (!ch->rx_ring[i]) 2116aef9ec39SRoland Dreier goto err; 2117aef9ec39SRoland Dreier } 2118aef9ec39SRoland Dreier 21194d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2120509c07bcSBart Van Assche ch->tx_ring[i] = srp_alloc_iu(target->srp_host, 212149248644SDavid Dillow target->max_iu_len, 2122aef9ec39SRoland Dreier GFP_KERNEL, DMA_TO_DEVICE); 2123509c07bcSBart Van Assche if (!ch->tx_ring[i]) 2124aef9ec39SRoland Dreier goto err; 2125dcb4cb85SBart Van Assche 2126509c07bcSBart Van Assche list_add(&ch->tx_ring[i]->list, &ch->free_tx); 2127aef9ec39SRoland Dreier } 2128aef9ec39SRoland Dreier 2129aef9ec39SRoland Dreier return 0; 2130aef9ec39SRoland Dreier 2131aef9ec39SRoland Dreier err: 21324d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2133509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->rx_ring[i]); 2134509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->tx_ring[i]); 2135aef9ec39SRoland Dreier } 2136aef9ec39SRoland Dreier 21374d73f95fSBart Van Assche 21384d73f95fSBart Van Assche err_no_ring: 2139509c07bcSBart Van Assche kfree(ch->tx_ring); 2140509c07bcSBart Van Assche ch->tx_ring = NULL; 2141509c07bcSBart Van Assche kfree(ch->rx_ring); 2142509c07bcSBart Van Assche ch->rx_ring = NULL; 2143aef9ec39SRoland Dreier 2144aef9ec39SRoland Dreier return -ENOMEM; 2145aef9ec39SRoland Dreier } 2146aef9ec39SRoland Dreier 2147c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) 2148c9b03c1aSBart Van Assche { 2149c9b03c1aSBart Van Assche uint64_t T_tr_ns, max_compl_time_ms; 2150c9b03c1aSBart Van Assche uint32_t rq_tmo_jiffies; 2151c9b03c1aSBart Van Assche 2152c9b03c1aSBart Van Assche /* 2153c9b03c1aSBart Van Assche * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, 2154c9b03c1aSBart Van Assche * table 91), both the QP timeout and the retry count have to be set 2155c9b03c1aSBart Van Assche * for RC QP's during the RTR to RTS transition. 2156c9b03c1aSBart Van Assche */ 2157c9b03c1aSBart Van Assche WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) != 2158c9b03c1aSBart Van Assche (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)); 2159c9b03c1aSBart Van Assche 2160c9b03c1aSBart Van Assche /* 2161c9b03c1aSBart Van Assche * Set target->rq_tmo_jiffies to one second more than the largest time 2162c9b03c1aSBart Van Assche * it can take before an error completion is generated. See also 2163c9b03c1aSBart Van Assche * C9-140..142 in the IBTA spec for more information about how to 2164c9b03c1aSBart Van Assche * convert the QP Local ACK Timeout value to nanoseconds. 2165c9b03c1aSBart Van Assche */ 2166c9b03c1aSBart Van Assche T_tr_ns = 4096 * (1ULL << qp_attr->timeout); 2167c9b03c1aSBart Van Assche max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; 2168c9b03c1aSBart Van Assche do_div(max_compl_time_ms, NSEC_PER_MSEC); 2169c9b03c1aSBart Van Assche rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000); 2170c9b03c1aSBart Van Assche 2171c9b03c1aSBart Van Assche return rq_tmo_jiffies; 2172c9b03c1aSBart Van Assche } 2173c9b03c1aSBart Van Assche 2174961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 2175e6300cbdSBart Van Assche const struct srp_login_rsp *lrsp, 2176509c07bcSBart Van Assche struct srp_rdma_ch *ch) 2177961e0be8SDavid Dillow { 2178509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2179961e0be8SDavid Dillow struct ib_qp_attr *qp_attr = NULL; 2180961e0be8SDavid Dillow int attr_mask = 0; 2181961e0be8SDavid Dillow int ret; 2182961e0be8SDavid Dillow int i; 2183961e0be8SDavid Dillow 2184961e0be8SDavid Dillow if (lrsp->opcode == SRP_LOGIN_RSP) { 2185509c07bcSBart Van Assche ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); 2186509c07bcSBart Van Assche ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); 2187961e0be8SDavid Dillow 2188961e0be8SDavid Dillow /* 2189961e0be8SDavid Dillow * Reserve credits for task management so we don't 2190961e0be8SDavid Dillow * bounce requests back to the SCSI mid-layer. 2191961e0be8SDavid Dillow */ 2192961e0be8SDavid Dillow target->scsi_host->can_queue 2193509c07bcSBart Van Assche = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, 2194961e0be8SDavid Dillow target->scsi_host->can_queue); 21954d73f95fSBart Van Assche target->scsi_host->cmd_per_lun 21964d73f95fSBart Van Assche = min_t(int, target->scsi_host->can_queue, 21974d73f95fSBart Van Assche target->scsi_host->cmd_per_lun); 2198961e0be8SDavid Dillow } else { 2199961e0be8SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 2200961e0be8SDavid Dillow PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); 2201961e0be8SDavid Dillow ret = -ECONNRESET; 2202961e0be8SDavid Dillow goto error; 2203961e0be8SDavid Dillow } 2204961e0be8SDavid Dillow 2205509c07bcSBart Van Assche if (!ch->rx_ring) { 2206509c07bcSBart Van Assche ret = srp_alloc_iu_bufs(ch); 2207961e0be8SDavid Dillow if (ret) 2208961e0be8SDavid Dillow goto error; 2209961e0be8SDavid Dillow } 2210961e0be8SDavid Dillow 2211961e0be8SDavid Dillow ret = -ENOMEM; 2212961e0be8SDavid Dillow qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 2213961e0be8SDavid Dillow if (!qp_attr) 2214961e0be8SDavid Dillow goto error; 2215961e0be8SDavid Dillow 2216961e0be8SDavid Dillow qp_attr->qp_state = IB_QPS_RTR; 2217961e0be8SDavid Dillow ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2218961e0be8SDavid Dillow if (ret) 2219961e0be8SDavid Dillow goto error_free; 2220961e0be8SDavid Dillow 2221509c07bcSBart Van Assche ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2222961e0be8SDavid Dillow if (ret) 2223961e0be8SDavid Dillow goto error_free; 2224961e0be8SDavid Dillow 22254d73f95fSBart Van Assche for (i = 0; i < target->queue_size; i++) { 2226509c07bcSBart Van Assche struct srp_iu *iu = ch->rx_ring[i]; 2227509c07bcSBart Van Assche 2228509c07bcSBart Van Assche ret = srp_post_recv(ch, iu); 2229961e0be8SDavid Dillow if (ret) 2230961e0be8SDavid Dillow goto error_free; 2231961e0be8SDavid Dillow } 2232961e0be8SDavid Dillow 2233961e0be8SDavid Dillow qp_attr->qp_state = IB_QPS_RTS; 2234961e0be8SDavid Dillow ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2235961e0be8SDavid Dillow if (ret) 2236961e0be8SDavid Dillow goto error_free; 2237961e0be8SDavid Dillow 2238c9b03c1aSBart Van Assche target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); 2239c9b03c1aSBart Van Assche 2240509c07bcSBart Van Assche ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2241961e0be8SDavid Dillow if (ret) 2242961e0be8SDavid Dillow goto error_free; 2243961e0be8SDavid Dillow 2244961e0be8SDavid Dillow ret = ib_send_cm_rtu(cm_id, NULL, 0); 2245961e0be8SDavid Dillow 2246961e0be8SDavid Dillow error_free: 2247961e0be8SDavid Dillow kfree(qp_attr); 2248961e0be8SDavid Dillow 2249961e0be8SDavid Dillow error: 2250509c07bcSBart Van Assche ch->status = ret; 2251961e0be8SDavid Dillow } 2252961e0be8SDavid Dillow 2253aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 2254aef9ec39SRoland Dreier struct ib_cm_event *event, 2255509c07bcSBart Van Assche struct srp_rdma_ch *ch) 2256aef9ec39SRoland Dreier { 2257509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 22587aa54bd7SDavid Dillow struct Scsi_Host *shost = target->scsi_host; 2259aef9ec39SRoland Dreier struct ib_class_port_info *cpi; 2260aef9ec39SRoland Dreier int opcode; 2261aef9ec39SRoland Dreier 2262aef9ec39SRoland Dreier switch (event->param.rej_rcvd.reason) { 2263aef9ec39SRoland Dreier case IB_CM_REJ_PORT_CM_REDIRECT: 2264aef9ec39SRoland Dreier cpi = event->param.rej_rcvd.ari; 2265509c07bcSBart Van Assche ch->path.dlid = cpi->redirect_lid; 2266509c07bcSBart Van Assche ch->path.pkey = cpi->redirect_pkey; 2267aef9ec39SRoland Dreier cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 2268509c07bcSBart Van Assche memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16); 2269aef9ec39SRoland Dreier 2270509c07bcSBart Van Assche ch->status = ch->path.dlid ? 2271aef9ec39SRoland Dreier SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 2272aef9ec39SRoland Dreier break; 2273aef9ec39SRoland Dreier 2274aef9ec39SRoland Dreier case IB_CM_REJ_PORT_REDIRECT: 22755d7cbfd6SRoland Dreier if (srp_target_is_topspin(target)) { 2276aef9ec39SRoland Dreier /* 2277aef9ec39SRoland Dreier * Topspin/Cisco SRP gateways incorrectly send 2278aef9ec39SRoland Dreier * reject reason code 25 when they mean 24 2279aef9ec39SRoland Dreier * (port redirect). 2280aef9ec39SRoland Dreier */ 2281509c07bcSBart Van Assche memcpy(ch->path.dgid.raw, 2282aef9ec39SRoland Dreier event->param.rej_rcvd.ari, 16); 2283aef9ec39SRoland Dreier 22847aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, shost, 22857aa54bd7SDavid Dillow PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 2286509c07bcSBart Van Assche be64_to_cpu(ch->path.dgid.global.subnet_prefix), 2287509c07bcSBart Van Assche be64_to_cpu(ch->path.dgid.global.interface_id)); 2288aef9ec39SRoland Dreier 2289509c07bcSBart Van Assche ch->status = SRP_PORT_REDIRECT; 2290aef9ec39SRoland Dreier } else { 22917aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 22927aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 2293509c07bcSBart Van Assche ch->status = -ECONNRESET; 2294aef9ec39SRoland Dreier } 2295aef9ec39SRoland Dreier break; 2296aef9ec39SRoland Dreier 2297aef9ec39SRoland Dreier case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 22987aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 22997aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 2300509c07bcSBart Van Assche ch->status = -ECONNRESET; 2301aef9ec39SRoland Dreier break; 2302aef9ec39SRoland Dreier 2303aef9ec39SRoland Dreier case IB_CM_REJ_CONSUMER_DEFINED: 2304aef9ec39SRoland Dreier opcode = *(u8 *) event->private_data; 2305aef9ec39SRoland Dreier if (opcode == SRP_LOGIN_REJ) { 2306aef9ec39SRoland Dreier struct srp_login_rej *rej = event->private_data; 2307aef9ec39SRoland Dreier u32 reason = be32_to_cpu(rej->reason); 2308aef9ec39SRoland Dreier 2309aef9ec39SRoland Dreier if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 23107aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23117aa54bd7SDavid Dillow PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 2312aef9ec39SRoland Dreier else 2313e7ffde01SBart Van Assche shost_printk(KERN_WARNING, shost, PFX 2314e7ffde01SBart Van Assche "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", 2315747fe000SBart Van Assche target->sgid.raw, 2316747fe000SBart Van Assche target->orig_dgid.raw, reason); 2317aef9ec39SRoland Dreier } else 23187aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23197aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 2320aef9ec39SRoland Dreier " opcode 0x%02x\n", opcode); 2321509c07bcSBart Van Assche ch->status = -ECONNRESET; 2322aef9ec39SRoland Dreier break; 2323aef9ec39SRoland Dreier 23249fe4bcf4SDavid Dillow case IB_CM_REJ_STALE_CONN: 23259fe4bcf4SDavid Dillow shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 2326509c07bcSBart Van Assche ch->status = SRP_STALE_CONN; 23279fe4bcf4SDavid Dillow break; 23289fe4bcf4SDavid Dillow 2329aef9ec39SRoland Dreier default: 23307aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 2331aef9ec39SRoland Dreier event->param.rej_rcvd.reason); 2332509c07bcSBart Van Assche ch->status = -ECONNRESET; 2333aef9ec39SRoland Dreier } 2334aef9ec39SRoland Dreier } 2335aef9ec39SRoland Dreier 2336aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 2337aef9ec39SRoland Dreier { 2338509c07bcSBart Van Assche struct srp_rdma_ch *ch = cm_id->context; 2339509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2340aef9ec39SRoland Dreier int comp = 0; 2341aef9ec39SRoland Dreier 2342aef9ec39SRoland Dreier switch (event->event) { 2343aef9ec39SRoland Dreier case IB_CM_REQ_ERROR: 23447aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 23457aa54bd7SDavid Dillow PFX "Sending CM REQ failed\n"); 2346aef9ec39SRoland Dreier comp = 1; 2347509c07bcSBart Van Assche ch->status = -ECONNRESET; 2348aef9ec39SRoland Dreier break; 2349aef9ec39SRoland Dreier 2350aef9ec39SRoland Dreier case IB_CM_REP_RECEIVED: 2351aef9ec39SRoland Dreier comp = 1; 2352509c07bcSBart Van Assche srp_cm_rep_handler(cm_id, event->private_data, ch); 2353aef9ec39SRoland Dreier break; 2354aef9ec39SRoland Dreier 2355aef9ec39SRoland Dreier case IB_CM_REJ_RECEIVED: 23567aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 2357aef9ec39SRoland Dreier comp = 1; 2358aef9ec39SRoland Dreier 2359509c07bcSBart Van Assche srp_cm_rej_handler(cm_id, event, ch); 2360aef9ec39SRoland Dreier break; 2361aef9ec39SRoland Dreier 2362b7ac4ab4SIshai Rabinovitz case IB_CM_DREQ_RECEIVED: 23637aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 23647aa54bd7SDavid Dillow PFX "DREQ received - connection closed\n"); 2365c014c8cdSBart Van Assche ch->connected = false; 2366b7ac4ab4SIshai Rabinovitz if (ib_send_cm_drep(cm_id, NULL, 0)) 23677aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 23687aa54bd7SDavid Dillow PFX "Sending CM DREP failed\n"); 2369c1120f89SBart Van Assche queue_work(system_long_wq, &target->tl_err_work); 2370aef9ec39SRoland Dreier break; 2371aef9ec39SRoland Dreier 2372aef9ec39SRoland Dreier case IB_CM_TIMEWAIT_EXIT: 23737aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 23747aa54bd7SDavid Dillow PFX "connection closed\n"); 2375ac72d766SBart Van Assche comp = 1; 2376aef9ec39SRoland Dreier 2377509c07bcSBart Van Assche ch->status = 0; 2378aef9ec39SRoland Dreier break; 2379aef9ec39SRoland Dreier 2380b7ac4ab4SIshai Rabinovitz case IB_CM_MRA_RECEIVED: 2381b7ac4ab4SIshai Rabinovitz case IB_CM_DREQ_ERROR: 2382b7ac4ab4SIshai Rabinovitz case IB_CM_DREP_RECEIVED: 2383b7ac4ab4SIshai Rabinovitz break; 2384b7ac4ab4SIshai Rabinovitz 2385aef9ec39SRoland Dreier default: 23867aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 23877aa54bd7SDavid Dillow PFX "Unhandled CM event %d\n", event->event); 2388aef9ec39SRoland Dreier break; 2389aef9ec39SRoland Dreier } 2390aef9ec39SRoland Dreier 2391aef9ec39SRoland Dreier if (comp) 2392509c07bcSBart Van Assche complete(&ch->done); 2393aef9ec39SRoland Dreier 2394aef9ec39SRoland Dreier return 0; 2395aef9ec39SRoland Dreier } 2396aef9ec39SRoland Dreier 239771444b97SJack Wang /** 239871444b97SJack Wang * srp_change_queue_depth - setting device queue depth 239971444b97SJack Wang * @sdev: scsi device struct 240071444b97SJack Wang * @qdepth: requested queue depth 240171444b97SJack Wang * 240271444b97SJack Wang * Returns queue depth. 240371444b97SJack Wang */ 240471444b97SJack Wang static int 2405db5ed4dfSChristoph Hellwig srp_change_queue_depth(struct scsi_device *sdev, int qdepth) 240671444b97SJack Wang { 240771444b97SJack Wang if (!sdev->tagged_supported) 24081e6f2416SChristoph Hellwig qdepth = 1; 2409db5ed4dfSChristoph Hellwig return scsi_change_queue_depth(sdev, qdepth); 241071444b97SJack Wang } 241171444b97SJack Wang 2412985aa495SBart Van Assche static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, 2413985aa495SBart Van Assche u8 func) 2414aef9ec39SRoland Dreier { 2415509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2416a95cadb9SBart Van Assche struct srp_rport *rport = target->rport; 241719081f31SDavid Dillow struct ib_device *dev = target->srp_host->srp_dev->dev; 2418aef9ec39SRoland Dreier struct srp_iu *iu; 2419aef9ec39SRoland Dreier struct srp_tsk_mgmt *tsk_mgmt; 2420aef9ec39SRoland Dreier 2421c014c8cdSBart Van Assche if (!ch->connected || target->qp_in_error) 24223780d1f0SBart Van Assche return -1; 24233780d1f0SBart Van Assche 2424509c07bcSBart Van Assche init_completion(&ch->tsk_mgmt_done); 2425aef9ec39SRoland Dreier 2426a95cadb9SBart Van Assche /* 2427509c07bcSBart Van Assche * Lock the rport mutex to avoid that srp_create_ch_ib() is 2428a95cadb9SBart Van Assche * invoked while a task management function is being sent. 2429a95cadb9SBart Van Assche */ 2430a95cadb9SBart Van Assche mutex_lock(&rport->mutex); 2431509c07bcSBart Van Assche spin_lock_irq(&ch->lock); 2432509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); 2433509c07bcSBart Van Assche spin_unlock_irq(&ch->lock); 243476c75b25SBart Van Assche 2435a95cadb9SBart Van Assche if (!iu) { 2436a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2437a95cadb9SBart Van Assche 243876c75b25SBart Van Assche return -1; 2439a95cadb9SBart Van Assche } 2440aef9ec39SRoland Dreier 244119081f31SDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 244219081f31SDavid Dillow DMA_TO_DEVICE); 2443aef9ec39SRoland Dreier tsk_mgmt = iu->buf; 2444aef9ec39SRoland Dreier memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 2445aef9ec39SRoland Dreier 2446aef9ec39SRoland Dreier tsk_mgmt->opcode = SRP_TSK_MGMT; 2447985aa495SBart Van Assche int_to_scsilun(lun, &tsk_mgmt->lun); 2448f8b6e31eSDavid Dillow tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; 2449aef9ec39SRoland Dreier tsk_mgmt->tsk_mgmt_func = func; 2450f8b6e31eSDavid Dillow tsk_mgmt->task_tag = req_tag; 2451aef9ec39SRoland Dreier 245219081f31SDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 245319081f31SDavid Dillow DMA_TO_DEVICE); 2454509c07bcSBart Van Assche if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { 2455509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); 2456a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2457a95cadb9SBart Van Assche 245876c75b25SBart Van Assche return -1; 245976c75b25SBart Van Assche } 2460a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2461d945e1dfSRoland Dreier 2462509c07bcSBart Van Assche if (!wait_for_completion_timeout(&ch->tsk_mgmt_done, 2463aef9ec39SRoland Dreier msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 2464d945e1dfSRoland Dreier return -1; 2465aef9ec39SRoland Dreier 2466d945e1dfSRoland Dreier return 0; 2467d945e1dfSRoland Dreier } 2468d945e1dfSRoland Dreier 2469aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd) 2470aef9ec39SRoland Dreier { 2471d945e1dfSRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2472f8b6e31eSDavid Dillow struct srp_request *req = (struct srp_request *) scmnd->host_scribble; 247377f2c1a4SBart Van Assche u32 tag; 2474d92c0da7SBart Van Assche u16 ch_idx; 2475509c07bcSBart Van Assche struct srp_rdma_ch *ch; 2476086f44f5SBart Van Assche int ret; 2477d945e1dfSRoland Dreier 24787aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 2479aef9ec39SRoland Dreier 2480d92c0da7SBart Van Assche if (!req) 248199b6697aSBart Van Assche return SUCCESS; 248277f2c1a4SBart Van Assche tag = blk_mq_unique_tag(scmnd->request); 2483d92c0da7SBart Van Assche ch_idx = blk_mq_unique_tag_to_hwq(tag); 2484d92c0da7SBart Van Assche if (WARN_ON_ONCE(ch_idx >= target->ch_count)) 2485d92c0da7SBart Van Assche return SUCCESS; 2486d92c0da7SBart Van Assche ch = &target->ch[ch_idx]; 2487d92c0da7SBart Van Assche if (!srp_claim_req(ch, req, NULL, scmnd)) 2488d92c0da7SBart Van Assche return SUCCESS; 2489d92c0da7SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 2490d92c0da7SBart Van Assche "Sending SRP abort for tag %#x\n", tag); 249177f2c1a4SBart Van Assche if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, 249280d5e8a2SBart Van Assche SRP_TSK_ABORT_TASK) == 0) 2493086f44f5SBart Van Assche ret = SUCCESS; 2494ed9b2264SBart Van Assche else if (target->rport->state == SRP_RPORT_LOST) 249599e1c139SBart Van Assche ret = FAST_IO_FAIL; 2496086f44f5SBart Van Assche else 2497086f44f5SBart Van Assche ret = FAILED; 2498509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 0); 2499d945e1dfSRoland Dreier scmnd->result = DID_ABORT << 16; 2500d8536670SBart Van Assche scmnd->scsi_done(scmnd); 2501d945e1dfSRoland Dreier 2502086f44f5SBart Van Assche return ret; 2503aef9ec39SRoland Dreier } 2504aef9ec39SRoland Dreier 2505aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd) 2506aef9ec39SRoland Dreier { 2507d945e1dfSRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2508d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 2509536ae14eSBart Van Assche int i; 2510d945e1dfSRoland Dreier 25117aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 2512aef9ec39SRoland Dreier 2513d92c0da7SBart Van Assche ch = &target->ch[0]; 2514509c07bcSBart Van Assche if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, 2515f8b6e31eSDavid Dillow SRP_TSK_LUN_RESET)) 2516d945e1dfSRoland Dreier return FAILED; 2517509c07bcSBart Van Assche if (ch->tsk_mgmt_status) 2518d945e1dfSRoland Dreier return FAILED; 2519d945e1dfSRoland Dreier 2520d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 2521d92c0da7SBart Van Assche ch = &target->ch[i]; 25224d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 2523509c07bcSBart Van Assche struct srp_request *req = &ch->req_ring[i]; 2524509c07bcSBart Van Assche 2525509c07bcSBart Van Assche srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); 2526536ae14eSBart Van Assche } 2527d92c0da7SBart Van Assche } 2528d945e1dfSRoland Dreier 2529d945e1dfSRoland Dreier return SUCCESS; 2530aef9ec39SRoland Dreier } 2531aef9ec39SRoland Dreier 2532aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd) 2533aef9ec39SRoland Dreier { 2534aef9ec39SRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2535aef9ec39SRoland Dreier 25367aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); 2537aef9ec39SRoland Dreier 2538ed9b2264SBart Van Assche return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; 2539aef9ec39SRoland Dreier } 2540aef9ec39SRoland Dreier 2541c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev) 2542c9b03c1aSBart Van Assche { 2543c9b03c1aSBart Van Assche struct Scsi_Host *shost = sdev->host; 2544c9b03c1aSBart Van Assche struct srp_target_port *target = host_to_target(shost); 2545c9b03c1aSBart Van Assche struct request_queue *q = sdev->request_queue; 2546c9b03c1aSBart Van Assche unsigned long timeout; 2547c9b03c1aSBart Van Assche 2548c9b03c1aSBart Van Assche if (sdev->type == TYPE_DISK) { 2549c9b03c1aSBart Van Assche timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); 2550c9b03c1aSBart Van Assche blk_queue_rq_timeout(q, timeout); 2551c9b03c1aSBart Van Assche } 2552c9b03c1aSBart Van Assche 2553c9b03c1aSBart Van Assche return 0; 2554c9b03c1aSBart Van Assche } 2555c9b03c1aSBart Van Assche 2556ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, 2557ee959b00STony Jones char *buf) 25586ecb0c84SRoland Dreier { 2559ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 25606ecb0c84SRoland Dreier 256145c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); 25626ecb0c84SRoland Dreier } 25636ecb0c84SRoland Dreier 2564ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, 2565ee959b00STony Jones char *buf) 25666ecb0c84SRoland Dreier { 2567ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 25686ecb0c84SRoland Dreier 256945c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); 25706ecb0c84SRoland Dreier } 25716ecb0c84SRoland Dreier 2572ee959b00STony Jones static ssize_t show_service_id(struct device *dev, 2573ee959b00STony Jones struct device_attribute *attr, char *buf) 25746ecb0c84SRoland Dreier { 2575ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 25766ecb0c84SRoland Dreier 257745c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id)); 25786ecb0c84SRoland Dreier } 25796ecb0c84SRoland Dreier 2580ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, 2581ee959b00STony Jones char *buf) 25826ecb0c84SRoland Dreier { 2583ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 25846ecb0c84SRoland Dreier 2585747fe000SBart Van Assche return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey)); 25866ecb0c84SRoland Dreier } 25876ecb0c84SRoland Dreier 2588848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, 2589848b3082SBart Van Assche char *buf) 2590848b3082SBart Van Assche { 2591848b3082SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2592848b3082SBart Van Assche 2593747fe000SBart Van Assche return sprintf(buf, "%pI6\n", target->sgid.raw); 2594848b3082SBart Van Assche } 2595848b3082SBart Van Assche 2596ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 2597ee959b00STony Jones char *buf) 25986ecb0c84SRoland Dreier { 2599ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2600d92c0da7SBart Van Assche struct srp_rdma_ch *ch = &target->ch[0]; 26016ecb0c84SRoland Dreier 2602509c07bcSBart Van Assche return sprintf(buf, "%pI6\n", ch->path.dgid.raw); 26036ecb0c84SRoland Dreier } 26046ecb0c84SRoland Dreier 2605ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev, 2606ee959b00STony Jones struct device_attribute *attr, char *buf) 26073633b3d0SIshai Rabinovitz { 2608ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26093633b3d0SIshai Rabinovitz 2610747fe000SBart Van Assche return sprintf(buf, "%pI6\n", target->orig_dgid.raw); 26113633b3d0SIshai Rabinovitz } 26123633b3d0SIshai Rabinovitz 261389de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev, 261489de7486SBart Van Assche struct device_attribute *attr, char *buf) 261589de7486SBart Van Assche { 261689de7486SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2617d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 2618d92c0da7SBart Van Assche int i, req_lim = INT_MAX; 261989de7486SBart Van Assche 2620d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 2621d92c0da7SBart Van Assche ch = &target->ch[i]; 2622d92c0da7SBart Van Assche req_lim = min(req_lim, ch->req_lim); 2623d92c0da7SBart Van Assche } 2624d92c0da7SBart Van Assche return sprintf(buf, "%d\n", req_lim); 262589de7486SBart Van Assche } 262689de7486SBart Van Assche 2627ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev, 2628ee959b00STony Jones struct device_attribute *attr, char *buf) 26296bfa24faSRoland Dreier { 2630ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26316bfa24faSRoland Dreier 26326bfa24faSRoland Dreier return sprintf(buf, "%d\n", target->zero_req_lim); 26336bfa24faSRoland Dreier } 26346bfa24faSRoland Dreier 2635ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev, 2636ee959b00STony Jones struct device_attribute *attr, char *buf) 2637ded7f1a1SIshai Rabinovitz { 2638ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2639ded7f1a1SIshai Rabinovitz 2640ded7f1a1SIshai Rabinovitz return sprintf(buf, "%d\n", target->srp_host->port); 2641ded7f1a1SIshai Rabinovitz } 2642ded7f1a1SIshai Rabinovitz 2643ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev, 2644ee959b00STony Jones struct device_attribute *attr, char *buf) 2645ded7f1a1SIshai Rabinovitz { 2646ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2647ded7f1a1SIshai Rabinovitz 264805321937SGreg Kroah-Hartman return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 2649ded7f1a1SIshai Rabinovitz } 2650ded7f1a1SIshai Rabinovitz 2651d92c0da7SBart Van Assche static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr, 2652d92c0da7SBart Van Assche char *buf) 2653d92c0da7SBart Van Assche { 2654d92c0da7SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2655d92c0da7SBart Van Assche 2656d92c0da7SBart Van Assche return sprintf(buf, "%d\n", target->ch_count); 2657d92c0da7SBart Van Assche } 2658d92c0da7SBart Van Assche 26594b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev, 26604b5e5f41SBart Van Assche struct device_attribute *attr, char *buf) 26614b5e5f41SBart Van Assche { 26624b5e5f41SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26634b5e5f41SBart Van Assche 26644b5e5f41SBart Van Assche return sprintf(buf, "%d\n", target->comp_vector); 26654b5e5f41SBart Van Assche } 26664b5e5f41SBart Van Assche 26677bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev, 26687bb312e4SVu Pham struct device_attribute *attr, char *buf) 26697bb312e4SVu Pham { 26707bb312e4SVu Pham struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26717bb312e4SVu Pham 26727bb312e4SVu Pham return sprintf(buf, "%d\n", target->tl_retry_count); 26737bb312e4SVu Pham } 26747bb312e4SVu Pham 267549248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev, 267649248644SDavid Dillow struct device_attribute *attr, char *buf) 267749248644SDavid Dillow { 267849248644SDavid Dillow struct srp_target_port *target = host_to_target(class_to_shost(dev)); 267949248644SDavid Dillow 268049248644SDavid Dillow return sprintf(buf, "%u\n", target->cmd_sg_cnt); 268149248644SDavid Dillow } 268249248644SDavid Dillow 2683c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev, 2684c07d424dSDavid Dillow struct device_attribute *attr, char *buf) 2685c07d424dSDavid Dillow { 2686c07d424dSDavid Dillow struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2687c07d424dSDavid Dillow 2688c07d424dSDavid Dillow return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); 2689c07d424dSDavid Dillow } 2690c07d424dSDavid Dillow 2691ee959b00STony Jones static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 2692ee959b00STony Jones static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 2693ee959b00STony Jones static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 2694ee959b00STony Jones static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 2695848b3082SBart Van Assche static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL); 2696ee959b00STony Jones static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 2697ee959b00STony Jones static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); 269889de7486SBart Van Assche static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); 2699ee959b00STony Jones static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 2700ee959b00STony Jones static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 2701ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 2702d92c0da7SBart Van Assche static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL); 27034b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); 27047bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); 270549248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); 2706c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); 27076ecb0c84SRoland Dreier 2708ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = { 2709ee959b00STony Jones &dev_attr_id_ext, 2710ee959b00STony Jones &dev_attr_ioc_guid, 2711ee959b00STony Jones &dev_attr_service_id, 2712ee959b00STony Jones &dev_attr_pkey, 2713848b3082SBart Van Assche &dev_attr_sgid, 2714ee959b00STony Jones &dev_attr_dgid, 2715ee959b00STony Jones &dev_attr_orig_dgid, 271689de7486SBart Van Assche &dev_attr_req_lim, 2717ee959b00STony Jones &dev_attr_zero_req_lim, 2718ee959b00STony Jones &dev_attr_local_ib_port, 2719ee959b00STony Jones &dev_attr_local_ib_device, 2720d92c0da7SBart Van Assche &dev_attr_ch_count, 27214b5e5f41SBart Van Assche &dev_attr_comp_vector, 27227bb312e4SVu Pham &dev_attr_tl_retry_count, 272349248644SDavid Dillow &dev_attr_cmd_sg_entries, 2724c07d424dSDavid Dillow &dev_attr_allow_ext_sg, 27256ecb0c84SRoland Dreier NULL 27266ecb0c84SRoland Dreier }; 27276ecb0c84SRoland Dreier 2728aef9ec39SRoland Dreier static struct scsi_host_template srp_template = { 2729aef9ec39SRoland Dreier .module = THIS_MODULE, 2730b7f008fdSRoland Dreier .name = "InfiniBand SRP initiator", 2731b7f008fdSRoland Dreier .proc_name = DRV_NAME, 2732c9b03c1aSBart Van Assche .slave_configure = srp_slave_configure, 2733aef9ec39SRoland Dreier .info = srp_target_info, 2734aef9ec39SRoland Dreier .queuecommand = srp_queuecommand, 273571444b97SJack Wang .change_queue_depth = srp_change_queue_depth, 2736aef9ec39SRoland Dreier .eh_abort_handler = srp_abort, 2737aef9ec39SRoland Dreier .eh_device_reset_handler = srp_reset_device, 2738aef9ec39SRoland Dreier .eh_host_reset_handler = srp_reset_host, 27392742c1daSBart Van Assche .skip_settle_delay = true, 274049248644SDavid Dillow .sg_tablesize = SRP_DEF_SG_TABLESIZE, 27414d73f95fSBart Van Assche .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, 2742aef9ec39SRoland Dreier .this_id = -1, 27434d73f95fSBart Van Assche .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, 27446ecb0c84SRoland Dreier .use_clustering = ENABLE_CLUSTERING, 274577f2c1a4SBart Van Assche .shost_attrs = srp_host_attrs, 274677f2c1a4SBart Van Assche .use_blk_tags = 1, 2747c40ecc12SChristoph Hellwig .track_queue_depth = 1, 2748aef9ec39SRoland Dreier }; 2749aef9ec39SRoland Dreier 275034aa654eSBart Van Assche static int srp_sdev_count(struct Scsi_Host *host) 275134aa654eSBart Van Assche { 275234aa654eSBart Van Assche struct scsi_device *sdev; 275334aa654eSBart Van Assche int c = 0; 275434aa654eSBart Van Assche 275534aa654eSBart Van Assche shost_for_each_device(sdev, host) 275634aa654eSBart Van Assche c++; 275734aa654eSBart Van Assche 275834aa654eSBart Van Assche return c; 275934aa654eSBart Van Assche } 276034aa654eSBart Van Assche 2761aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 2762aef9ec39SRoland Dreier { 27633236822bSFUJITA Tomonori struct srp_rport_identifiers ids; 27643236822bSFUJITA Tomonori struct srp_rport *rport; 27653236822bSFUJITA Tomonori 276634aa654eSBart Van Assche target->state = SRP_TARGET_SCANNING; 2767aef9ec39SRoland Dreier sprintf(target->target_name, "SRP.T10:%016llX", 276845c37cadSBart Van Assche be64_to_cpu(target->id_ext)); 2769aef9ec39SRoland Dreier 277005321937SGreg Kroah-Hartman if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) 2771aef9ec39SRoland Dreier return -ENODEV; 2772aef9ec39SRoland Dreier 27733236822bSFUJITA Tomonori memcpy(ids.port_id, &target->id_ext, 8); 27743236822bSFUJITA Tomonori memcpy(ids.port_id + 8, &target->ioc_guid, 8); 2775aebd5e47SFUJITA Tomonori ids.roles = SRP_RPORT_ROLE_TARGET; 27763236822bSFUJITA Tomonori rport = srp_rport_add(target->scsi_host, &ids); 27773236822bSFUJITA Tomonori if (IS_ERR(rport)) { 27783236822bSFUJITA Tomonori scsi_remove_host(target->scsi_host); 27793236822bSFUJITA Tomonori return PTR_ERR(rport); 27803236822bSFUJITA Tomonori } 27813236822bSFUJITA Tomonori 2782dc1bdbd9SBart Van Assche rport->lld_data = target; 27839dd69a60SBart Van Assche target->rport = rport; 2784dc1bdbd9SBart Van Assche 2785b3589fd4SMatthew Wilcox spin_lock(&host->target_lock); 2786aef9ec39SRoland Dreier list_add_tail(&target->list, &host->target_list); 2787b3589fd4SMatthew Wilcox spin_unlock(&host->target_lock); 2788aef9ec39SRoland Dreier 2789aef9ec39SRoland Dreier scsi_scan_target(&target->scsi_host->shost_gendev, 27901962a4a1SMatthew Wilcox 0, target->scsi_id, SCAN_WILD_CARD, 0); 2791aef9ec39SRoland Dreier 2792c014c8cdSBart Van Assche if (srp_connected_ch(target) < target->ch_count || 2793c014c8cdSBart Van Assche target->qp_in_error) { 279434aa654eSBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 279534aa654eSBart Van Assche PFX "SCSI scan failed - removing SCSI host\n"); 279634aa654eSBart Van Assche srp_queue_remove_work(target); 279734aa654eSBart Van Assche goto out; 279834aa654eSBart Van Assche } 279934aa654eSBart Van Assche 280034aa654eSBart Van Assche pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n", 280134aa654eSBart Van Assche dev_name(&target->scsi_host->shost_gendev), 280234aa654eSBart Van Assche srp_sdev_count(target->scsi_host)); 280334aa654eSBart Van Assche 280434aa654eSBart Van Assche spin_lock_irq(&target->lock); 280534aa654eSBart Van Assche if (target->state == SRP_TARGET_SCANNING) 280634aa654eSBart Van Assche target->state = SRP_TARGET_LIVE; 280734aa654eSBart Van Assche spin_unlock_irq(&target->lock); 280834aa654eSBart Van Assche 280934aa654eSBart Van Assche out: 2810aef9ec39SRoland Dreier return 0; 2811aef9ec39SRoland Dreier } 2812aef9ec39SRoland Dreier 2813ee959b00STony Jones static void srp_release_dev(struct device *dev) 2814aef9ec39SRoland Dreier { 2815aef9ec39SRoland Dreier struct srp_host *host = 2816ee959b00STony Jones container_of(dev, struct srp_host, dev); 2817aef9ec39SRoland Dreier 2818aef9ec39SRoland Dreier complete(&host->released); 2819aef9ec39SRoland Dreier } 2820aef9ec39SRoland Dreier 2821aef9ec39SRoland Dreier static struct class srp_class = { 2822aef9ec39SRoland Dreier .name = "infiniband_srp", 2823ee959b00STony Jones .dev_release = srp_release_dev 2824aef9ec39SRoland Dreier }; 2825aef9ec39SRoland Dreier 282696fc248aSBart Van Assche /** 282796fc248aSBart Van Assche * srp_conn_unique() - check whether the connection to a target is unique 2828af24663bSBart Van Assche * @host: SRP host. 2829af24663bSBart Van Assche * @target: SRP target port. 283096fc248aSBart Van Assche */ 283196fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host, 283296fc248aSBart Van Assche struct srp_target_port *target) 283396fc248aSBart Van Assche { 283496fc248aSBart Van Assche struct srp_target_port *t; 283596fc248aSBart Van Assche bool ret = false; 283696fc248aSBart Van Assche 283796fc248aSBart Van Assche if (target->state == SRP_TARGET_REMOVED) 283896fc248aSBart Van Assche goto out; 283996fc248aSBart Van Assche 284096fc248aSBart Van Assche ret = true; 284196fc248aSBart Van Assche 284296fc248aSBart Van Assche spin_lock(&host->target_lock); 284396fc248aSBart Van Assche list_for_each_entry(t, &host->target_list, list) { 284496fc248aSBart Van Assche if (t != target && 284596fc248aSBart Van Assche target->id_ext == t->id_ext && 284696fc248aSBart Van Assche target->ioc_guid == t->ioc_guid && 284796fc248aSBart Van Assche target->initiator_ext == t->initiator_ext) { 284896fc248aSBart Van Assche ret = false; 284996fc248aSBart Van Assche break; 285096fc248aSBart Van Assche } 285196fc248aSBart Van Assche } 285296fc248aSBart Van Assche spin_unlock(&host->target_lock); 285396fc248aSBart Van Assche 285496fc248aSBart Van Assche out: 285596fc248aSBart Van Assche return ret; 285696fc248aSBart Van Assche } 285796fc248aSBart Van Assche 2858aef9ec39SRoland Dreier /* 2859aef9ec39SRoland Dreier * Target ports are added by writing 2860aef9ec39SRoland Dreier * 2861aef9ec39SRoland Dreier * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 2862aef9ec39SRoland Dreier * pkey=<P_Key>,service_id=<service ID> 2863aef9ec39SRoland Dreier * 2864aef9ec39SRoland Dreier * to the add_target sysfs attribute. 2865aef9ec39SRoland Dreier */ 2866aef9ec39SRoland Dreier enum { 2867aef9ec39SRoland Dreier SRP_OPT_ERR = 0, 2868aef9ec39SRoland Dreier SRP_OPT_ID_EXT = 1 << 0, 2869aef9ec39SRoland Dreier SRP_OPT_IOC_GUID = 1 << 1, 2870aef9ec39SRoland Dreier SRP_OPT_DGID = 1 << 2, 2871aef9ec39SRoland Dreier SRP_OPT_PKEY = 1 << 3, 2872aef9ec39SRoland Dreier SRP_OPT_SERVICE_ID = 1 << 4, 2873aef9ec39SRoland Dreier SRP_OPT_MAX_SECT = 1 << 5, 287452fb2b50SVu Pham SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 28750c0450dbSRamachandra K SRP_OPT_IO_CLASS = 1 << 7, 287601cb9bcbSIshai Rabinovitz SRP_OPT_INITIATOR_EXT = 1 << 8, 287749248644SDavid Dillow SRP_OPT_CMD_SG_ENTRIES = 1 << 9, 2878c07d424dSDavid Dillow SRP_OPT_ALLOW_EXT_SG = 1 << 10, 2879c07d424dSDavid Dillow SRP_OPT_SG_TABLESIZE = 1 << 11, 28804b5e5f41SBart Van Assche SRP_OPT_COMP_VECTOR = 1 << 12, 28817bb312e4SVu Pham SRP_OPT_TL_RETRY_COUNT = 1 << 13, 28824d73f95fSBart Van Assche SRP_OPT_QUEUE_SIZE = 1 << 14, 2883aef9ec39SRoland Dreier SRP_OPT_ALL = (SRP_OPT_ID_EXT | 2884aef9ec39SRoland Dreier SRP_OPT_IOC_GUID | 2885aef9ec39SRoland Dreier SRP_OPT_DGID | 2886aef9ec39SRoland Dreier SRP_OPT_PKEY | 2887aef9ec39SRoland Dreier SRP_OPT_SERVICE_ID), 2888aef9ec39SRoland Dreier }; 2889aef9ec39SRoland Dreier 2890a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = { 2891aef9ec39SRoland Dreier { SRP_OPT_ID_EXT, "id_ext=%s" }, 2892aef9ec39SRoland Dreier { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 2893aef9ec39SRoland Dreier { SRP_OPT_DGID, "dgid=%s" }, 2894aef9ec39SRoland Dreier { SRP_OPT_PKEY, "pkey=%x" }, 2895aef9ec39SRoland Dreier { SRP_OPT_SERVICE_ID, "service_id=%s" }, 2896aef9ec39SRoland Dreier { SRP_OPT_MAX_SECT, "max_sect=%d" }, 289752fb2b50SVu Pham { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 28980c0450dbSRamachandra K { SRP_OPT_IO_CLASS, "io_class=%x" }, 289901cb9bcbSIshai Rabinovitz { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 290049248644SDavid Dillow { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, 2901c07d424dSDavid Dillow { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, 2902c07d424dSDavid Dillow { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, 29034b5e5f41SBart Van Assche { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, 29047bb312e4SVu Pham { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, 29054d73f95fSBart Van Assche { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, 2906aef9ec39SRoland Dreier { SRP_OPT_ERR, NULL } 2907aef9ec39SRoland Dreier }; 2908aef9ec39SRoland Dreier 2909aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target) 2910aef9ec39SRoland Dreier { 2911aef9ec39SRoland Dreier char *options, *sep_opt; 2912aef9ec39SRoland Dreier char *p; 2913aef9ec39SRoland Dreier char dgid[3]; 2914aef9ec39SRoland Dreier substring_t args[MAX_OPT_ARGS]; 2915aef9ec39SRoland Dreier int opt_mask = 0; 2916aef9ec39SRoland Dreier int token; 2917aef9ec39SRoland Dreier int ret = -EINVAL; 2918aef9ec39SRoland Dreier int i; 2919aef9ec39SRoland Dreier 2920aef9ec39SRoland Dreier options = kstrdup(buf, GFP_KERNEL); 2921aef9ec39SRoland Dreier if (!options) 2922aef9ec39SRoland Dreier return -ENOMEM; 2923aef9ec39SRoland Dreier 2924aef9ec39SRoland Dreier sep_opt = options; 29257dcf9c19SSagi Grimberg while ((p = strsep(&sep_opt, ",\n")) != NULL) { 2926aef9ec39SRoland Dreier if (!*p) 2927aef9ec39SRoland Dreier continue; 2928aef9ec39SRoland Dreier 2929aef9ec39SRoland Dreier token = match_token(p, srp_opt_tokens, args); 2930aef9ec39SRoland Dreier opt_mask |= token; 2931aef9ec39SRoland Dreier 2932aef9ec39SRoland Dreier switch (token) { 2933aef9ec39SRoland Dreier case SRP_OPT_ID_EXT: 2934aef9ec39SRoland Dreier p = match_strdup(args); 2935a20f3a6dSIshai Rabinovitz if (!p) { 2936a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 2937a20f3a6dSIshai Rabinovitz goto out; 2938a20f3a6dSIshai Rabinovitz } 2939aef9ec39SRoland Dreier target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2940aef9ec39SRoland Dreier kfree(p); 2941aef9ec39SRoland Dreier break; 2942aef9ec39SRoland Dreier 2943aef9ec39SRoland Dreier case SRP_OPT_IOC_GUID: 2944aef9ec39SRoland Dreier p = match_strdup(args); 2945a20f3a6dSIshai Rabinovitz if (!p) { 2946a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 2947a20f3a6dSIshai Rabinovitz goto out; 2948a20f3a6dSIshai Rabinovitz } 2949aef9ec39SRoland Dreier target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2950aef9ec39SRoland Dreier kfree(p); 2951aef9ec39SRoland Dreier break; 2952aef9ec39SRoland Dreier 2953aef9ec39SRoland Dreier case SRP_OPT_DGID: 2954aef9ec39SRoland Dreier p = match_strdup(args); 2955a20f3a6dSIshai Rabinovitz if (!p) { 2956a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 2957a20f3a6dSIshai Rabinovitz goto out; 2958a20f3a6dSIshai Rabinovitz } 2959aef9ec39SRoland Dreier if (strlen(p) != 32) { 2960e0bda7d8SBart Van Assche pr_warn("bad dest GID parameter '%s'\n", p); 2961ce1823f0SRoland Dreier kfree(p); 2962aef9ec39SRoland Dreier goto out; 2963aef9ec39SRoland Dreier } 2964aef9ec39SRoland Dreier 2965aef9ec39SRoland Dreier for (i = 0; i < 16; ++i) { 2966747fe000SBart Van Assche strlcpy(dgid, p + i * 2, sizeof(dgid)); 2967747fe000SBart Van Assche if (sscanf(dgid, "%hhx", 2968747fe000SBart Van Assche &target->orig_dgid.raw[i]) < 1) { 2969747fe000SBart Van Assche ret = -EINVAL; 2970747fe000SBart Van Assche kfree(p); 2971747fe000SBart Van Assche goto out; 2972747fe000SBart Van Assche } 2973aef9ec39SRoland Dreier } 2974bf17c1c7SRoland Dreier kfree(p); 2975aef9ec39SRoland Dreier break; 2976aef9ec39SRoland Dreier 2977aef9ec39SRoland Dreier case SRP_OPT_PKEY: 2978aef9ec39SRoland Dreier if (match_hex(args, &token)) { 2979e0bda7d8SBart Van Assche pr_warn("bad P_Key parameter '%s'\n", p); 2980aef9ec39SRoland Dreier goto out; 2981aef9ec39SRoland Dreier } 2982747fe000SBart Van Assche target->pkey = cpu_to_be16(token); 2983aef9ec39SRoland Dreier break; 2984aef9ec39SRoland Dreier 2985aef9ec39SRoland Dreier case SRP_OPT_SERVICE_ID: 2986aef9ec39SRoland Dreier p = match_strdup(args); 2987a20f3a6dSIshai Rabinovitz if (!p) { 2988a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 2989a20f3a6dSIshai Rabinovitz goto out; 2990a20f3a6dSIshai Rabinovitz } 2991aef9ec39SRoland Dreier target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2992aef9ec39SRoland Dreier kfree(p); 2993aef9ec39SRoland Dreier break; 2994aef9ec39SRoland Dreier 2995aef9ec39SRoland Dreier case SRP_OPT_MAX_SECT: 2996aef9ec39SRoland Dreier if (match_int(args, &token)) { 2997e0bda7d8SBart Van Assche pr_warn("bad max sect parameter '%s'\n", p); 2998aef9ec39SRoland Dreier goto out; 2999aef9ec39SRoland Dreier } 3000aef9ec39SRoland Dreier target->scsi_host->max_sectors = token; 3001aef9ec39SRoland Dreier break; 3002aef9ec39SRoland Dreier 30034d73f95fSBart Van Assche case SRP_OPT_QUEUE_SIZE: 30044d73f95fSBart Van Assche if (match_int(args, &token) || token < 1) { 30054d73f95fSBart Van Assche pr_warn("bad queue_size parameter '%s'\n", p); 30064d73f95fSBart Van Assche goto out; 30074d73f95fSBart Van Assche } 30084d73f95fSBart Van Assche target->scsi_host->can_queue = token; 30094d73f95fSBart Van Assche target->queue_size = token + SRP_RSP_SQ_SIZE + 30104d73f95fSBart Van Assche SRP_TSK_MGMT_SQ_SIZE; 30114d73f95fSBart Van Assche if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 30124d73f95fSBart Van Assche target->scsi_host->cmd_per_lun = token; 30134d73f95fSBart Van Assche break; 30144d73f95fSBart Van Assche 301552fb2b50SVu Pham case SRP_OPT_MAX_CMD_PER_LUN: 30164d73f95fSBart Van Assche if (match_int(args, &token) || token < 1) { 3017e0bda7d8SBart Van Assche pr_warn("bad max cmd_per_lun parameter '%s'\n", 3018e0bda7d8SBart Van Assche p); 301952fb2b50SVu Pham goto out; 302052fb2b50SVu Pham } 30214d73f95fSBart Van Assche target->scsi_host->cmd_per_lun = token; 302252fb2b50SVu Pham break; 302352fb2b50SVu Pham 30240c0450dbSRamachandra K case SRP_OPT_IO_CLASS: 30250c0450dbSRamachandra K if (match_hex(args, &token)) { 3026e0bda7d8SBart Van Assche pr_warn("bad IO class parameter '%s'\n", p); 30270c0450dbSRamachandra K goto out; 30280c0450dbSRamachandra K } 30290c0450dbSRamachandra K if (token != SRP_REV10_IB_IO_CLASS && 30300c0450dbSRamachandra K token != SRP_REV16A_IB_IO_CLASS) { 3031e0bda7d8SBart Van Assche pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", 3032e0bda7d8SBart Van Assche token, SRP_REV10_IB_IO_CLASS, 3033e0bda7d8SBart Van Assche SRP_REV16A_IB_IO_CLASS); 30340c0450dbSRamachandra K goto out; 30350c0450dbSRamachandra K } 30360c0450dbSRamachandra K target->io_class = token; 30370c0450dbSRamachandra K break; 30380c0450dbSRamachandra K 303901cb9bcbSIshai Rabinovitz case SRP_OPT_INITIATOR_EXT: 304001cb9bcbSIshai Rabinovitz p = match_strdup(args); 3041a20f3a6dSIshai Rabinovitz if (!p) { 3042a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3043a20f3a6dSIshai Rabinovitz goto out; 3044a20f3a6dSIshai Rabinovitz } 304501cb9bcbSIshai Rabinovitz target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 304601cb9bcbSIshai Rabinovitz kfree(p); 304701cb9bcbSIshai Rabinovitz break; 304801cb9bcbSIshai Rabinovitz 304949248644SDavid Dillow case SRP_OPT_CMD_SG_ENTRIES: 305049248644SDavid Dillow if (match_int(args, &token) || token < 1 || token > 255) { 3051e0bda7d8SBart Van Assche pr_warn("bad max cmd_sg_entries parameter '%s'\n", 3052e0bda7d8SBart Van Assche p); 305349248644SDavid Dillow goto out; 305449248644SDavid Dillow } 305549248644SDavid Dillow target->cmd_sg_cnt = token; 305649248644SDavid Dillow break; 305749248644SDavid Dillow 3058c07d424dSDavid Dillow case SRP_OPT_ALLOW_EXT_SG: 3059c07d424dSDavid Dillow if (match_int(args, &token)) { 3060e0bda7d8SBart Van Assche pr_warn("bad allow_ext_sg parameter '%s'\n", p); 3061c07d424dSDavid Dillow goto out; 3062c07d424dSDavid Dillow } 3063c07d424dSDavid Dillow target->allow_ext_sg = !!token; 3064c07d424dSDavid Dillow break; 3065c07d424dSDavid Dillow 3066c07d424dSDavid Dillow case SRP_OPT_SG_TABLESIZE: 3067c07d424dSDavid Dillow if (match_int(args, &token) || token < 1 || 3068c07d424dSDavid Dillow token > SCSI_MAX_SG_CHAIN_SEGMENTS) { 3069e0bda7d8SBart Van Assche pr_warn("bad max sg_tablesize parameter '%s'\n", 3070e0bda7d8SBart Van Assche p); 3071c07d424dSDavid Dillow goto out; 3072c07d424dSDavid Dillow } 3073c07d424dSDavid Dillow target->sg_tablesize = token; 3074c07d424dSDavid Dillow break; 3075c07d424dSDavid Dillow 30764b5e5f41SBart Van Assche case SRP_OPT_COMP_VECTOR: 30774b5e5f41SBart Van Assche if (match_int(args, &token) || token < 0) { 30784b5e5f41SBart Van Assche pr_warn("bad comp_vector parameter '%s'\n", p); 30794b5e5f41SBart Van Assche goto out; 30804b5e5f41SBart Van Assche } 30814b5e5f41SBart Van Assche target->comp_vector = token; 30824b5e5f41SBart Van Assche break; 30834b5e5f41SBart Van Assche 30847bb312e4SVu Pham case SRP_OPT_TL_RETRY_COUNT: 30857bb312e4SVu Pham if (match_int(args, &token) || token < 2 || token > 7) { 30867bb312e4SVu Pham pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", 30877bb312e4SVu Pham p); 30887bb312e4SVu Pham goto out; 30897bb312e4SVu Pham } 30907bb312e4SVu Pham target->tl_retry_count = token; 30917bb312e4SVu Pham break; 30927bb312e4SVu Pham 3093aef9ec39SRoland Dreier default: 3094e0bda7d8SBart Van Assche pr_warn("unknown parameter or missing value '%s' in target creation request\n", 3095e0bda7d8SBart Van Assche p); 3096aef9ec39SRoland Dreier goto out; 3097aef9ec39SRoland Dreier } 3098aef9ec39SRoland Dreier } 3099aef9ec39SRoland Dreier 3100aef9ec39SRoland Dreier if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 3101aef9ec39SRoland Dreier ret = 0; 3102aef9ec39SRoland Dreier else 3103aef9ec39SRoland Dreier for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 3104aef9ec39SRoland Dreier if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 3105aef9ec39SRoland Dreier !(srp_opt_tokens[i].token & opt_mask)) 3106e0bda7d8SBart Van Assche pr_warn("target creation request is missing parameter '%s'\n", 3107aef9ec39SRoland Dreier srp_opt_tokens[i].pattern); 3108aef9ec39SRoland Dreier 31094d73f95fSBart Van Assche if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue 31104d73f95fSBart Van Assche && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 31114d73f95fSBart Van Assche pr_warn("cmd_per_lun = %d > queue_size = %d\n", 31124d73f95fSBart Van Assche target->scsi_host->cmd_per_lun, 31134d73f95fSBart Van Assche target->scsi_host->can_queue); 31144d73f95fSBart Van Assche 3115aef9ec39SRoland Dreier out: 3116aef9ec39SRoland Dreier kfree(options); 3117aef9ec39SRoland Dreier return ret; 3118aef9ec39SRoland Dreier } 3119aef9ec39SRoland Dreier 3120ee959b00STony Jones static ssize_t srp_create_target(struct device *dev, 3121ee959b00STony Jones struct device_attribute *attr, 3122aef9ec39SRoland Dreier const char *buf, size_t count) 3123aef9ec39SRoland Dreier { 3124aef9ec39SRoland Dreier struct srp_host *host = 3125ee959b00STony Jones container_of(dev, struct srp_host, dev); 3126aef9ec39SRoland Dreier struct Scsi_Host *target_host; 3127aef9ec39SRoland Dreier struct srp_target_port *target; 3128509c07bcSBart Van Assche struct srp_rdma_ch *ch; 3129d1b4289eSBart Van Assche struct srp_device *srp_dev = host->srp_dev; 3130d1b4289eSBart Van Assche struct ib_device *ibdev = srp_dev->dev; 3131d92c0da7SBart Van Assche int ret, node_idx, node, cpu, i; 3132d92c0da7SBart Van Assche bool multich = false; 3133aef9ec39SRoland Dreier 3134aef9ec39SRoland Dreier target_host = scsi_host_alloc(&srp_template, 3135aef9ec39SRoland Dreier sizeof (struct srp_target_port)); 3136aef9ec39SRoland Dreier if (!target_host) 3137aef9ec39SRoland Dreier return -ENOMEM; 3138aef9ec39SRoland Dreier 31393236822bSFUJITA Tomonori target_host->transportt = ib_srp_transport_template; 3140fd1b6c4aSBart Van Assche target_host->max_channel = 0; 3141fd1b6c4aSBart Van Assche target_host->max_id = 1; 3142985aa495SBart Van Assche target_host->max_lun = -1LL; 31433c8edf0eSArne Redlich target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 31445f068992SRoland Dreier 3145aef9ec39SRoland Dreier target = host_to_target(target_host); 3146aef9ec39SRoland Dreier 31470c0450dbSRamachandra K target->io_class = SRP_REV16A_IB_IO_CLASS; 3148aef9ec39SRoland Dreier target->scsi_host = target_host; 3149aef9ec39SRoland Dreier target->srp_host = host; 31509af76271SDavid Dillow target->lkey = host->srp_dev->mr->lkey; 31519af76271SDavid Dillow target->rkey = host->srp_dev->mr->rkey; 315249248644SDavid Dillow target->cmd_sg_cnt = cmd_sg_entries; 3153c07d424dSDavid Dillow target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 3154c07d424dSDavid Dillow target->allow_ext_sg = allow_ext_sg; 31557bb312e4SVu Pham target->tl_retry_count = 7; 31564d73f95fSBart Van Assche target->queue_size = SRP_DEFAULT_QUEUE_SIZE; 3157aef9ec39SRoland Dreier 315834aa654eSBart Van Assche /* 315934aa654eSBart Van Assche * Avoid that the SCSI host can be removed by srp_remove_target() 316034aa654eSBart Van Assche * before this function returns. 316134aa654eSBart Van Assche */ 316234aa654eSBart Van Assche scsi_host_get(target->scsi_host); 316334aa654eSBart Van Assche 31642d7091bcSBart Van Assche mutex_lock(&host->add_target_mutex); 31652d7091bcSBart Van Assche 3166aef9ec39SRoland Dreier ret = srp_parse_options(buf, target); 3167aef9ec39SRoland Dreier if (ret) 3168fb49c8bbSBart Van Assche goto out; 3169aef9ec39SRoland Dreier 317077f2c1a4SBart Van Assche ret = scsi_init_shared_tag_map(target_host, target_host->can_queue); 317177f2c1a4SBart Van Assche if (ret) 3172fb49c8bbSBart Van Assche goto out; 317377f2c1a4SBart Van Assche 31744d73f95fSBart Van Assche target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; 31754d73f95fSBart Van Assche 317696fc248aSBart Van Assche if (!srp_conn_unique(target->srp_host, target)) { 317796fc248aSBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 317896fc248aSBart Van Assche PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", 317996fc248aSBart Van Assche be64_to_cpu(target->id_ext), 318096fc248aSBart Van Assche be64_to_cpu(target->ioc_guid), 318196fc248aSBart Van Assche be64_to_cpu(target->initiator_ext)); 318296fc248aSBart Van Assche ret = -EEXIST; 3183fb49c8bbSBart Van Assche goto out; 318496fc248aSBart Van Assche } 318596fc248aSBart Van Assche 31865cfb1782SBart Van Assche if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && 3187c07d424dSDavid Dillow target->cmd_sg_cnt < target->sg_tablesize) { 31885cfb1782SBart Van Assche pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); 3189c07d424dSDavid Dillow target->sg_tablesize = target->cmd_sg_cnt; 3190c07d424dSDavid Dillow } 3191c07d424dSDavid Dillow 3192c07d424dSDavid Dillow target_host->sg_tablesize = target->sg_tablesize; 3193c07d424dSDavid Dillow target->indirect_size = target->sg_tablesize * 3194c07d424dSDavid Dillow sizeof (struct srp_direct_buf); 319549248644SDavid Dillow target->max_iu_len = sizeof (struct srp_cmd) + 319649248644SDavid Dillow sizeof (struct srp_indirect_buf) + 319749248644SDavid Dillow target->cmd_sg_cnt * sizeof (struct srp_direct_buf); 319849248644SDavid Dillow 3199c1120f89SBart Van Assche INIT_WORK(&target->tl_err_work, srp_tl_err_work); 3200ef6c49d8SBart Van Assche INIT_WORK(&target->remove_work, srp_remove_work); 32018f26c9ffSDavid Dillow spin_lock_init(&target->lock); 3202747fe000SBart Van Assche ret = ib_query_gid(ibdev, host->port, 0, &target->sgid); 32032088ca66SSagi Grimberg if (ret) 3204fb49c8bbSBart Van Assche goto out; 3205d92c0da7SBart Van Assche 3206d92c0da7SBart Van Assche ret = -ENOMEM; 3207d92c0da7SBart Van Assche target->ch_count = max_t(unsigned, num_online_nodes(), 3208d92c0da7SBart Van Assche min(ch_count ? : 3209d92c0da7SBart Van Assche min(4 * num_online_nodes(), 3210d92c0da7SBart Van Assche ibdev->num_comp_vectors), 3211d92c0da7SBart Van Assche num_online_cpus())); 3212d92c0da7SBart Van Assche target->ch = kcalloc(target->ch_count, sizeof(*target->ch), 3213d92c0da7SBart Van Assche GFP_KERNEL); 3214d92c0da7SBart Van Assche if (!target->ch) 3215fb49c8bbSBart Van Assche goto out; 3216d92c0da7SBart Van Assche 3217d92c0da7SBart Van Assche node_idx = 0; 3218d92c0da7SBart Van Assche for_each_online_node(node) { 3219d92c0da7SBart Van Assche const int ch_start = (node_idx * target->ch_count / 3220d92c0da7SBart Van Assche num_online_nodes()); 3221d92c0da7SBart Van Assche const int ch_end = ((node_idx + 1) * target->ch_count / 3222d92c0da7SBart Van Assche num_online_nodes()); 3223d92c0da7SBart Van Assche const int cv_start = (node_idx * ibdev->num_comp_vectors / 3224d92c0da7SBart Van Assche num_online_nodes() + target->comp_vector) 3225d92c0da7SBart Van Assche % ibdev->num_comp_vectors; 3226d92c0da7SBart Van Assche const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors / 3227d92c0da7SBart Van Assche num_online_nodes() + target->comp_vector) 3228d92c0da7SBart Van Assche % ibdev->num_comp_vectors; 3229d92c0da7SBart Van Assche int cpu_idx = 0; 3230d92c0da7SBart Van Assche 3231d92c0da7SBart Van Assche for_each_online_cpu(cpu) { 3232d92c0da7SBart Van Assche if (cpu_to_node(cpu) != node) 3233d92c0da7SBart Van Assche continue; 3234d92c0da7SBart Van Assche if (ch_start + cpu_idx >= ch_end) 3235d92c0da7SBart Van Assche continue; 3236d92c0da7SBart Van Assche ch = &target->ch[ch_start + cpu_idx]; 3237d92c0da7SBart Van Assche ch->target = target; 3238d92c0da7SBart Van Assche ch->comp_vector = cv_start == cv_end ? cv_start : 3239d92c0da7SBart Van Assche cv_start + cpu_idx % (cv_end - cv_start); 3240d92c0da7SBart Van Assche spin_lock_init(&ch->lock); 3241d92c0da7SBart Van Assche INIT_LIST_HEAD(&ch->free_tx); 3242d92c0da7SBart Van Assche ret = srp_new_cm_id(ch); 3243d92c0da7SBart Van Assche if (ret) 3244d92c0da7SBart Van Assche goto err_disconnect; 3245aef9ec39SRoland Dreier 3246509c07bcSBart Van Assche ret = srp_create_ch_ib(ch); 3247aef9ec39SRoland Dreier if (ret) 3248d92c0da7SBart Van Assche goto err_disconnect; 3249aef9ec39SRoland Dreier 3250d92c0da7SBart Van Assche ret = srp_alloc_req_data(ch); 32519fe4bcf4SDavid Dillow if (ret) 3252d92c0da7SBart Van Assche goto err_disconnect; 3253aef9ec39SRoland Dreier 3254d92c0da7SBart Van Assche ret = srp_connect_ch(ch, multich); 3255aef9ec39SRoland Dreier if (ret) { 32567aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 3257d92c0da7SBart Van Assche PFX "Connection %d/%d failed\n", 3258d92c0da7SBart Van Assche ch_start + cpu_idx, 3259d92c0da7SBart Van Assche target->ch_count); 3260d92c0da7SBart Van Assche if (node_idx == 0 && cpu_idx == 0) { 3261d92c0da7SBart Van Assche goto err_disconnect; 3262d92c0da7SBart Van Assche } else { 3263d92c0da7SBart Van Assche srp_free_ch_ib(target, ch); 3264d92c0da7SBart Van Assche srp_free_req_data(target, ch); 3265d92c0da7SBart Van Assche target->ch_count = ch - target->ch; 3266c257ea6fSBart Van Assche goto connected; 3267aef9ec39SRoland Dreier } 3268d92c0da7SBart Van Assche } 3269d92c0da7SBart Van Assche 3270d92c0da7SBart Van Assche multich = true; 3271d92c0da7SBart Van Assche cpu_idx++; 3272d92c0da7SBart Van Assche } 3273d92c0da7SBart Van Assche node_idx++; 3274d92c0da7SBart Van Assche } 3275d92c0da7SBart Van Assche 3276c257ea6fSBart Van Assche connected: 3277d92c0da7SBart Van Assche target->scsi_host->nr_hw_queues = target->ch_count; 3278aef9ec39SRoland Dreier 3279aef9ec39SRoland Dreier ret = srp_add_target(host, target); 3280aef9ec39SRoland Dreier if (ret) 3281aef9ec39SRoland Dreier goto err_disconnect; 3282aef9ec39SRoland Dreier 328334aa654eSBart Van Assche if (target->state != SRP_TARGET_REMOVED) { 3284e7ffde01SBart Van Assche shost_printk(KERN_DEBUG, target->scsi_host, PFX 3285e7ffde01SBart Van Assche "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", 3286e7ffde01SBart Van Assche be64_to_cpu(target->id_ext), 3287e7ffde01SBart Van Assche be64_to_cpu(target->ioc_guid), 3288747fe000SBart Van Assche be16_to_cpu(target->pkey), 3289e7ffde01SBart Van Assche be64_to_cpu(target->service_id), 3290747fe000SBart Van Assche target->sgid.raw, target->orig_dgid.raw); 329134aa654eSBart Van Assche } 3292e7ffde01SBart Van Assche 32932d7091bcSBart Van Assche ret = count; 32942d7091bcSBart Van Assche 32952d7091bcSBart Van Assche out: 32962d7091bcSBart Van Assche mutex_unlock(&host->add_target_mutex); 329734aa654eSBart Van Assche 329834aa654eSBart Van Assche scsi_host_put(target->scsi_host); 329934aa654eSBart Van Assche 33002d7091bcSBart Van Assche return ret; 3301aef9ec39SRoland Dreier 3302aef9ec39SRoland Dreier err_disconnect: 3303aef9ec39SRoland Dreier srp_disconnect_target(target); 3304aef9ec39SRoland Dreier 3305d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 3306d92c0da7SBart Van Assche ch = &target->ch[i]; 3307509c07bcSBart Van Assche srp_free_ch_ib(target, ch); 3308509c07bcSBart Van Assche srp_free_req_data(target, ch); 3309d92c0da7SBart Van Assche } 3310d92c0da7SBart Van Assche 3311d92c0da7SBart Van Assche kfree(target->ch); 33122d7091bcSBart Van Assche goto out; 3313aef9ec39SRoland Dreier } 3314aef9ec39SRoland Dreier 3315ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 3316aef9ec39SRoland Dreier 3317ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 3318ee959b00STony Jones char *buf) 3319aef9ec39SRoland Dreier { 3320ee959b00STony Jones struct srp_host *host = container_of(dev, struct srp_host, dev); 3321aef9ec39SRoland Dreier 332205321937SGreg Kroah-Hartman return sprintf(buf, "%s\n", host->srp_dev->dev->name); 3323aef9ec39SRoland Dreier } 3324aef9ec39SRoland Dreier 3325ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 3326aef9ec39SRoland Dreier 3327ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr, 3328ee959b00STony Jones char *buf) 3329aef9ec39SRoland Dreier { 3330ee959b00STony Jones struct srp_host *host = container_of(dev, struct srp_host, dev); 3331aef9ec39SRoland Dreier 3332aef9ec39SRoland Dreier return sprintf(buf, "%d\n", host->port); 3333aef9ec39SRoland Dreier } 3334aef9ec39SRoland Dreier 3335ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 3336aef9ec39SRoland Dreier 3337f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port) 3338aef9ec39SRoland Dreier { 3339aef9ec39SRoland Dreier struct srp_host *host; 3340aef9ec39SRoland Dreier 3341aef9ec39SRoland Dreier host = kzalloc(sizeof *host, GFP_KERNEL); 3342aef9ec39SRoland Dreier if (!host) 3343aef9ec39SRoland Dreier return NULL; 3344aef9ec39SRoland Dreier 3345aef9ec39SRoland Dreier INIT_LIST_HEAD(&host->target_list); 3346b3589fd4SMatthew Wilcox spin_lock_init(&host->target_lock); 3347aef9ec39SRoland Dreier init_completion(&host->released); 33482d7091bcSBart Van Assche mutex_init(&host->add_target_mutex); 334905321937SGreg Kroah-Hartman host->srp_dev = device; 3350aef9ec39SRoland Dreier host->port = port; 3351aef9ec39SRoland Dreier 3352ee959b00STony Jones host->dev.class = &srp_class; 3353ee959b00STony Jones host->dev.parent = device->dev->dma_device; 3354d927e38cSKay Sievers dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); 3355aef9ec39SRoland Dreier 3356ee959b00STony Jones if (device_register(&host->dev)) 3357f5358a17SRoland Dreier goto free_host; 3358ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_add_target)) 3359aef9ec39SRoland Dreier goto err_class; 3360ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_ibdev)) 3361aef9ec39SRoland Dreier goto err_class; 3362ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_port)) 3363aef9ec39SRoland Dreier goto err_class; 3364aef9ec39SRoland Dreier 3365aef9ec39SRoland Dreier return host; 3366aef9ec39SRoland Dreier 3367aef9ec39SRoland Dreier err_class: 3368ee959b00STony Jones device_unregister(&host->dev); 3369aef9ec39SRoland Dreier 3370f5358a17SRoland Dreier free_host: 3371aef9ec39SRoland Dreier kfree(host); 3372aef9ec39SRoland Dreier 3373aef9ec39SRoland Dreier return NULL; 3374aef9ec39SRoland Dreier } 3375aef9ec39SRoland Dreier 3376aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device) 3377aef9ec39SRoland Dreier { 3378f5358a17SRoland Dreier struct srp_device *srp_dev; 3379f5358a17SRoland Dreier struct ib_device_attr *dev_attr; 3380aef9ec39SRoland Dreier struct srp_host *host; 33814139032bSHal Rosenstock int mr_page_shift, p; 338252ede08fSBart Van Assche u64 max_pages_per_mr; 3383aef9ec39SRoland Dreier 3384f5358a17SRoland Dreier dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 3385f5358a17SRoland Dreier if (!dev_attr) 3386cf311cd4SSean Hefty return; 3387aef9ec39SRoland Dreier 3388f5358a17SRoland Dreier if (ib_query_device(device, dev_attr)) { 3389e0bda7d8SBart Van Assche pr_warn("Query device failed for %s\n", device->name); 3390f5358a17SRoland Dreier goto free_attr; 3391f5358a17SRoland Dreier } 3392f5358a17SRoland Dreier 3393f5358a17SRoland Dreier srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); 3394f5358a17SRoland Dreier if (!srp_dev) 3395f5358a17SRoland Dreier goto free_attr; 3396f5358a17SRoland Dreier 3397d1b4289eSBart Van Assche srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && 3398d1b4289eSBart Van Assche device->map_phys_fmr && device->unmap_fmr); 33995cfb1782SBart Van Assche srp_dev->has_fr = (dev_attr->device_cap_flags & 34005cfb1782SBart Van Assche IB_DEVICE_MEM_MGT_EXTENSIONS); 34015cfb1782SBart Van Assche if (!srp_dev->has_fmr && !srp_dev->has_fr) 34025cfb1782SBart Van Assche dev_warn(&device->dev, "neither FMR nor FR is supported\n"); 34035cfb1782SBart Van Assche 34045cfb1782SBart Van Assche srp_dev->use_fast_reg = (srp_dev->has_fr && 34055cfb1782SBart Van Assche (!srp_dev->has_fmr || prefer_fr)); 3406d1b4289eSBart Van Assche 3407f5358a17SRoland Dreier /* 3408f5358a17SRoland Dreier * Use the smallest page size supported by the HCA, down to a 34098f26c9ffSDavid Dillow * minimum of 4096 bytes. We're unlikely to build large sglists 34108f26c9ffSDavid Dillow * out of smaller entries. 3411f5358a17SRoland Dreier */ 341252ede08fSBart Van Assche mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1); 341352ede08fSBart Van Assche srp_dev->mr_page_size = 1 << mr_page_shift; 341452ede08fSBart Van Assche srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); 341552ede08fSBart Van Assche max_pages_per_mr = dev_attr->max_mr_size; 341652ede08fSBart Van Assche do_div(max_pages_per_mr, srp_dev->mr_page_size); 341752ede08fSBart Van Assche srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, 341852ede08fSBart Van Assche max_pages_per_mr); 34195cfb1782SBart Van Assche if (srp_dev->use_fast_reg) { 34205cfb1782SBart Van Assche srp_dev->max_pages_per_mr = 34215cfb1782SBart Van Assche min_t(u32, srp_dev->max_pages_per_mr, 34225cfb1782SBart Van Assche dev_attr->max_fast_reg_page_list_len); 34235cfb1782SBart Van Assche } 342452ede08fSBart Van Assche srp_dev->mr_max_size = srp_dev->mr_page_size * 342552ede08fSBart Van Assche srp_dev->max_pages_per_mr; 34265cfb1782SBart Van Assche pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", 342752ede08fSBart Van Assche device->name, mr_page_shift, dev_attr->max_mr_size, 34285cfb1782SBart Van Assche dev_attr->max_fast_reg_page_list_len, 342952ede08fSBart Van Assche srp_dev->max_pages_per_mr, srp_dev->mr_max_size); 3430f5358a17SRoland Dreier 3431f5358a17SRoland Dreier INIT_LIST_HEAD(&srp_dev->dev_list); 3432f5358a17SRoland Dreier 3433f5358a17SRoland Dreier srp_dev->dev = device; 3434f5358a17SRoland Dreier srp_dev->pd = ib_alloc_pd(device); 3435f5358a17SRoland Dreier if (IS_ERR(srp_dev->pd)) 3436f5358a17SRoland Dreier goto free_dev; 3437f5358a17SRoland Dreier 3438f5358a17SRoland Dreier srp_dev->mr = ib_get_dma_mr(srp_dev->pd, 3439f5358a17SRoland Dreier IB_ACCESS_LOCAL_WRITE | 3440f5358a17SRoland Dreier IB_ACCESS_REMOTE_READ | 3441f5358a17SRoland Dreier IB_ACCESS_REMOTE_WRITE); 3442f5358a17SRoland Dreier if (IS_ERR(srp_dev->mr)) 3443f5358a17SRoland Dreier goto err_pd; 3444f5358a17SRoland Dreier 34454139032bSHal Rosenstock for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 3446f5358a17SRoland Dreier host = srp_add_port(srp_dev, p); 3447aef9ec39SRoland Dreier if (host) 3448f5358a17SRoland Dreier list_add_tail(&host->list, &srp_dev->dev_list); 3449aef9ec39SRoland Dreier } 3450aef9ec39SRoland Dreier 3451f5358a17SRoland Dreier ib_set_client_data(device, &srp_client, srp_dev); 3452f5358a17SRoland Dreier 3453f5358a17SRoland Dreier goto free_attr; 3454f5358a17SRoland Dreier 3455f5358a17SRoland Dreier err_pd: 3456f5358a17SRoland Dreier ib_dealloc_pd(srp_dev->pd); 3457f5358a17SRoland Dreier 3458f5358a17SRoland Dreier free_dev: 3459f5358a17SRoland Dreier kfree(srp_dev); 3460f5358a17SRoland Dreier 3461f5358a17SRoland Dreier free_attr: 3462f5358a17SRoland Dreier kfree(dev_attr); 3463aef9ec39SRoland Dreier } 3464aef9ec39SRoland Dreier 34657c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data) 3466aef9ec39SRoland Dreier { 3467f5358a17SRoland Dreier struct srp_device *srp_dev; 3468aef9ec39SRoland Dreier struct srp_host *host, *tmp_host; 3469ef6c49d8SBart Van Assche struct srp_target_port *target; 3470aef9ec39SRoland Dreier 34717c1eb45aSHaggai Eran srp_dev = client_data; 34721fe0cb84SDotan Barak if (!srp_dev) 34731fe0cb84SDotan Barak return; 3474aef9ec39SRoland Dreier 3475f5358a17SRoland Dreier list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 3476ee959b00STony Jones device_unregister(&host->dev); 3477aef9ec39SRoland Dreier /* 3478aef9ec39SRoland Dreier * Wait for the sysfs entry to go away, so that no new 3479aef9ec39SRoland Dreier * target ports can be created. 3480aef9ec39SRoland Dreier */ 3481aef9ec39SRoland Dreier wait_for_completion(&host->released); 3482aef9ec39SRoland Dreier 3483aef9ec39SRoland Dreier /* 3484ef6c49d8SBart Van Assche * Remove all target ports. 3485aef9ec39SRoland Dreier */ 3486b3589fd4SMatthew Wilcox spin_lock(&host->target_lock); 3487ef6c49d8SBart Van Assche list_for_each_entry(target, &host->target_list, list) 3488ef6c49d8SBart Van Assche srp_queue_remove_work(target); 3489b3589fd4SMatthew Wilcox spin_unlock(&host->target_lock); 3490aef9ec39SRoland Dreier 3491aef9ec39SRoland Dreier /* 3492bcc05910SBart Van Assche * Wait for tl_err and target port removal tasks. 3493aef9ec39SRoland Dreier */ 3494ef6c49d8SBart Van Assche flush_workqueue(system_long_wq); 3495bcc05910SBart Van Assche flush_workqueue(srp_remove_wq); 3496aef9ec39SRoland Dreier 3497aef9ec39SRoland Dreier kfree(host); 3498aef9ec39SRoland Dreier } 3499aef9ec39SRoland Dreier 3500f5358a17SRoland Dreier ib_dereg_mr(srp_dev->mr); 3501f5358a17SRoland Dreier ib_dealloc_pd(srp_dev->pd); 3502f5358a17SRoland Dreier 3503f5358a17SRoland Dreier kfree(srp_dev); 3504aef9ec39SRoland Dreier } 3505aef9ec39SRoland Dreier 35063236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = { 3507ed9b2264SBart Van Assche .has_rport_state = true, 3508ed9b2264SBart Van Assche .reset_timer_if_blocked = true, 3509a95cadb9SBart Van Assche .reconnect_delay = &srp_reconnect_delay, 3510ed9b2264SBart Van Assche .fast_io_fail_tmo = &srp_fast_io_fail_tmo, 3511ed9b2264SBart Van Assche .dev_loss_tmo = &srp_dev_loss_tmo, 3512ed9b2264SBart Van Assche .reconnect = srp_rport_reconnect, 3513dc1bdbd9SBart Van Assche .rport_delete = srp_rport_delete, 3514ed9b2264SBart Van Assche .terminate_rport_io = srp_terminate_io, 35153236822bSFUJITA Tomonori }; 35163236822bSFUJITA Tomonori 3517aef9ec39SRoland Dreier static int __init srp_init_module(void) 3518aef9ec39SRoland Dreier { 3519aef9ec39SRoland Dreier int ret; 3520aef9ec39SRoland Dreier 3521dcb4cb85SBart Van Assche BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); 3522dd5e6e38SBart Van Assche 352349248644SDavid Dillow if (srp_sg_tablesize) { 3524e0bda7d8SBart Van Assche pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); 352549248644SDavid Dillow if (!cmd_sg_entries) 352649248644SDavid Dillow cmd_sg_entries = srp_sg_tablesize; 352749248644SDavid Dillow } 352849248644SDavid Dillow 352949248644SDavid Dillow if (!cmd_sg_entries) 353049248644SDavid Dillow cmd_sg_entries = SRP_DEF_SG_TABLESIZE; 353149248644SDavid Dillow 353249248644SDavid Dillow if (cmd_sg_entries > 255) { 3533e0bda7d8SBart Van Assche pr_warn("Clamping cmd_sg_entries to 255\n"); 353449248644SDavid Dillow cmd_sg_entries = 255; 35351e89a194SDavid Dillow } 35361e89a194SDavid Dillow 3537c07d424dSDavid Dillow if (!indirect_sg_entries) 3538c07d424dSDavid Dillow indirect_sg_entries = cmd_sg_entries; 3539c07d424dSDavid Dillow else if (indirect_sg_entries < cmd_sg_entries) { 3540e0bda7d8SBart Van Assche pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", 3541e0bda7d8SBart Van Assche cmd_sg_entries); 3542c07d424dSDavid Dillow indirect_sg_entries = cmd_sg_entries; 3543c07d424dSDavid Dillow } 3544c07d424dSDavid Dillow 3545bcc05910SBart Van Assche srp_remove_wq = create_workqueue("srp_remove"); 3546da05be29SWei Yongjun if (!srp_remove_wq) { 3547da05be29SWei Yongjun ret = -ENOMEM; 3548bcc05910SBart Van Assche goto out; 3549bcc05910SBart Van Assche } 3550bcc05910SBart Van Assche 3551bcc05910SBart Van Assche ret = -ENOMEM; 35523236822bSFUJITA Tomonori ib_srp_transport_template = 35533236822bSFUJITA Tomonori srp_attach_transport(&ib_srp_transport_functions); 35543236822bSFUJITA Tomonori if (!ib_srp_transport_template) 3555bcc05910SBart Van Assche goto destroy_wq; 35563236822bSFUJITA Tomonori 3557aef9ec39SRoland Dreier ret = class_register(&srp_class); 3558aef9ec39SRoland Dreier if (ret) { 3559e0bda7d8SBart Van Assche pr_err("couldn't register class infiniband_srp\n"); 3560bcc05910SBart Van Assche goto release_tr; 3561aef9ec39SRoland Dreier } 3562aef9ec39SRoland Dreier 3563c1a0b23bSMichael S. Tsirkin ib_sa_register_client(&srp_sa_client); 3564c1a0b23bSMichael S. Tsirkin 3565aef9ec39SRoland Dreier ret = ib_register_client(&srp_client); 3566aef9ec39SRoland Dreier if (ret) { 3567e0bda7d8SBart Van Assche pr_err("couldn't register IB client\n"); 3568bcc05910SBart Van Assche goto unreg_sa; 3569aef9ec39SRoland Dreier } 3570aef9ec39SRoland Dreier 3571bcc05910SBart Van Assche out: 3572bcc05910SBart Van Assche return ret; 3573bcc05910SBart Van Assche 3574bcc05910SBart Van Assche unreg_sa: 3575bcc05910SBart Van Assche ib_sa_unregister_client(&srp_sa_client); 3576bcc05910SBart Van Assche class_unregister(&srp_class); 3577bcc05910SBart Van Assche 3578bcc05910SBart Van Assche release_tr: 3579bcc05910SBart Van Assche srp_release_transport(ib_srp_transport_template); 3580bcc05910SBart Van Assche 3581bcc05910SBart Van Assche destroy_wq: 3582bcc05910SBart Van Assche destroy_workqueue(srp_remove_wq); 3583bcc05910SBart Van Assche goto out; 3584aef9ec39SRoland Dreier } 3585aef9ec39SRoland Dreier 3586aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void) 3587aef9ec39SRoland Dreier { 3588aef9ec39SRoland Dreier ib_unregister_client(&srp_client); 3589c1a0b23bSMichael S. Tsirkin ib_sa_unregister_client(&srp_sa_client); 3590aef9ec39SRoland Dreier class_unregister(&srp_class); 35913236822bSFUJITA Tomonori srp_release_transport(ib_srp_transport_template); 3592bcc05910SBart Van Assche destroy_workqueue(srp_remove_wq); 3593aef9ec39SRoland Dreier } 3594aef9ec39SRoland Dreier 3595aef9ec39SRoland Dreier module_init(srp_init_module); 3596aef9ec39SRoland Dreier module_exit(srp_cleanup_module); 3597