1aef9ec39SRoland Dreier /* 2aef9ec39SRoland Dreier * Copyright (c) 2005 Cisco Systems. All rights reserved. 3aef9ec39SRoland Dreier * 4aef9ec39SRoland Dreier * This software is available to you under a choice of one of two 5aef9ec39SRoland Dreier * licenses. You may choose to be licensed under the terms of the GNU 6aef9ec39SRoland Dreier * General Public License (GPL) Version 2, available from the file 7aef9ec39SRoland Dreier * COPYING in the main directory of this source tree, or the 8aef9ec39SRoland Dreier * OpenIB.org BSD license below: 9aef9ec39SRoland Dreier * 10aef9ec39SRoland Dreier * Redistribution and use in source and binary forms, with or 11aef9ec39SRoland Dreier * without modification, are permitted provided that the following 12aef9ec39SRoland Dreier * conditions are met: 13aef9ec39SRoland Dreier * 14aef9ec39SRoland Dreier * - Redistributions of source code must retain the above 15aef9ec39SRoland Dreier * copyright notice, this list of conditions and the following 16aef9ec39SRoland Dreier * disclaimer. 17aef9ec39SRoland Dreier * 18aef9ec39SRoland Dreier * - Redistributions in binary form must reproduce the above 19aef9ec39SRoland Dreier * copyright notice, this list of conditions and the following 20aef9ec39SRoland Dreier * disclaimer in the documentation and/or other materials 21aef9ec39SRoland Dreier * provided with the distribution. 22aef9ec39SRoland Dreier * 23aef9ec39SRoland Dreier * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24aef9ec39SRoland Dreier * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25aef9ec39SRoland Dreier * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26aef9ec39SRoland Dreier * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27aef9ec39SRoland Dreier * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28aef9ec39SRoland Dreier * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29aef9ec39SRoland Dreier * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30aef9ec39SRoland Dreier * SOFTWARE. 31aef9ec39SRoland Dreier */ 32aef9ec39SRoland Dreier 33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34e0bda7d8SBart Van Assche 35aef9ec39SRoland Dreier #include <linux/module.h> 36aef9ec39SRoland Dreier #include <linux/init.h> 37aef9ec39SRoland Dreier #include <linux/slab.h> 38aef9ec39SRoland Dreier #include <linux/err.h> 39aef9ec39SRoland Dreier #include <linux/string.h> 40aef9ec39SRoland Dreier #include <linux/parser.h> 41aef9ec39SRoland Dreier #include <linux/random.h> 42de25968cSTim Schmielau #include <linux/jiffies.h> 4356b5390cSBart Van Assche #include <rdma/ib_cache.h> 44aef9ec39SRoland Dreier 4560063497SArun Sharma #include <linux/atomic.h> 46aef9ec39SRoland Dreier 47aef9ec39SRoland Dreier #include <scsi/scsi.h> 48aef9ec39SRoland Dreier #include <scsi/scsi_device.h> 49aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h> 5071444b97SJack Wang #include <scsi/scsi_tcq.h> 51aef9ec39SRoland Dreier #include <scsi/srp.h> 523236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h> 53aef9ec39SRoland Dreier 54aef9ec39SRoland Dreier #include "ib_srp.h" 55aef9ec39SRoland Dreier 56aef9ec39SRoland Dreier #define DRV_NAME "ib_srp" 57aef9ec39SRoland Dreier #define PFX DRV_NAME ": " 58713ef24eSBart Van Assche #define DRV_VERSION "2.0" 59713ef24eSBart Van Assche #define DRV_RELDATE "July 26, 2015" 60aef9ec39SRoland Dreier 61aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier"); 6233ab3e5bSBart Van Assche MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); 63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL"); 6433ab3e5bSBart Van Assche MODULE_VERSION(DRV_VERSION); 6533ab3e5bSBart Van Assche MODULE_INFO(release_date, DRV_RELDATE); 66aef9ec39SRoland Dreier 6749248644SDavid Dillow static unsigned int srp_sg_tablesize; 6849248644SDavid Dillow static unsigned int cmd_sg_entries; 69c07d424dSDavid Dillow static unsigned int indirect_sg_entries; 70c07d424dSDavid Dillow static bool allow_ext_sg; 7103f6fb93SBart Van Assche static bool prefer_fr = true; 7203f6fb93SBart Van Assche static bool register_always = true; 73aef9ec39SRoland Dreier static int topspin_workarounds = 1; 74aef9ec39SRoland Dreier 7549248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444); 7649248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); 7749248644SDavid Dillow 7849248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444); 7949248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries, 8049248644SDavid Dillow "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); 8149248644SDavid Dillow 82c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444); 83c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries, 84c07d424dSDavid Dillow "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); 85c07d424dSDavid Dillow 86c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444); 87c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg, 88c07d424dSDavid Dillow "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); 89c07d424dSDavid Dillow 90aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444); 91aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds, 92aef9ec39SRoland Dreier "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 93aef9ec39SRoland Dreier 945cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444); 955cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr, 965cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported"); 975cfb1782SBart Van Assche 98b1b8854dSBart Van Assche module_param(register_always, bool, 0444); 99b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always, 100b1b8854dSBart Van Assche "Use memory registration even for contiguous memory regions"); 101b1b8854dSBart Van Assche 1029c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops; 103ed9b2264SBart Van Assche 104a95cadb9SBart Van Assche static int srp_reconnect_delay = 10; 105a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, 106a95cadb9SBart Van Assche S_IRUGO | S_IWUSR); 107a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); 108a95cadb9SBart Van Assche 109ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15; 110ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo, 111ed9b2264SBart Van Assche S_IRUGO | S_IWUSR); 112ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo, 113ed9b2264SBart Van Assche "Number of seconds between the observation of a transport" 114ed9b2264SBart Van Assche " layer error and failing all I/O. \"off\" means that this" 115ed9b2264SBart Van Assche " functionality is disabled."); 116ed9b2264SBart Van Assche 117a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600; 118ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, 119ed9b2264SBart Van Assche S_IRUGO | S_IWUSR); 120ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo, 121ed9b2264SBart Van Assche "Maximum number of seconds that the SRP transport should" 122ed9b2264SBart Van Assche " insulate transport layer errors. After this time has been" 123ed9b2264SBart Van Assche " exceeded the SCSI host is removed. Should be" 124ed9b2264SBart Van Assche " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) 125ed9b2264SBart Van Assche " if fast_io_fail_tmo has not been set. \"off\" means that" 126ed9b2264SBart Van Assche " this functionality is disabled."); 127ed9b2264SBart Van Assche 128d92c0da7SBart Van Assche static unsigned ch_count; 129d92c0da7SBart Van Assche module_param(ch_count, uint, 0444); 130d92c0da7SBart Van Assche MODULE_PARM_DESC(ch_count, 131d92c0da7SBart Van Assche "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); 132d92c0da7SBart Van Assche 133aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device); 1347c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data); 135509c07bcSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr); 136509c07bcSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *ch_ptr); 137aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 138aef9ec39SRoland Dreier 1393236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template; 140bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq; 1413236822bSFUJITA Tomonori 142aef9ec39SRoland Dreier static struct ib_client srp_client = { 143aef9ec39SRoland Dreier .name = "srp", 144aef9ec39SRoland Dreier .add = srp_add_one, 145aef9ec39SRoland Dreier .remove = srp_remove_one 146aef9ec39SRoland Dreier }; 147aef9ec39SRoland Dreier 148c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client; 149c1a0b23bSMichael S. Tsirkin 150ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp) 151ed9b2264SBart Van Assche { 152ed9b2264SBart Van Assche int tmo = *(int *)kp->arg; 153ed9b2264SBart Van Assche 154ed9b2264SBart Van Assche if (tmo >= 0) 155ed9b2264SBart Van Assche return sprintf(buffer, "%d", tmo); 156ed9b2264SBart Van Assche else 157ed9b2264SBart Van Assche return sprintf(buffer, "off"); 158ed9b2264SBart Van Assche } 159ed9b2264SBart Van Assche 160ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp) 161ed9b2264SBart Van Assche { 162ed9b2264SBart Van Assche int tmo, res; 163ed9b2264SBart Van Assche 1643fdf70acSSagi Grimberg res = srp_parse_tmo(&tmo, val); 165ed9b2264SBart Van Assche if (res) 166ed9b2264SBart Van Assche goto out; 1673fdf70acSSagi Grimberg 168a95cadb9SBart Van Assche if (kp->arg == &srp_reconnect_delay) 169a95cadb9SBart Van Assche res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, 170a95cadb9SBart Van Assche srp_dev_loss_tmo); 171a95cadb9SBart Van Assche else if (kp->arg == &srp_fast_io_fail_tmo) 172a95cadb9SBart Van Assche res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo); 173ed9b2264SBart Van Assche else 174a95cadb9SBart Van Assche res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo, 175a95cadb9SBart Van Assche tmo); 176ed9b2264SBart Van Assche if (res) 177ed9b2264SBart Van Assche goto out; 178ed9b2264SBart Van Assche *(int *)kp->arg = tmo; 179ed9b2264SBart Van Assche 180ed9b2264SBart Van Assche out: 181ed9b2264SBart Van Assche return res; 182ed9b2264SBart Van Assche } 183ed9b2264SBart Van Assche 1849c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops = { 185ed9b2264SBart Van Assche .get = srp_tmo_get, 186ed9b2264SBart Van Assche .set = srp_tmo_set, 187ed9b2264SBart Van Assche }; 188ed9b2264SBart Van Assche 189aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 190aef9ec39SRoland Dreier { 191aef9ec39SRoland Dreier return (struct srp_target_port *) host->hostdata; 192aef9ec39SRoland Dreier } 193aef9ec39SRoland Dreier 194aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host) 195aef9ec39SRoland Dreier { 196aef9ec39SRoland Dreier return host_to_target(host)->target_name; 197aef9ec39SRoland Dreier } 198aef9ec39SRoland Dreier 1995d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target) 2005d7cbfd6SRoland Dreier { 2015d7cbfd6SRoland Dreier static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 2023d1ff48dSRaghava Kondapalli static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 2035d7cbfd6SRoland Dreier 2045d7cbfd6SRoland Dreier return topspin_workarounds && 2053d1ff48dSRaghava Kondapalli (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 2063d1ff48dSRaghava Kondapalli !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 2075d7cbfd6SRoland Dreier } 2085d7cbfd6SRoland Dreier 209aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 210aef9ec39SRoland Dreier gfp_t gfp_mask, 211aef9ec39SRoland Dreier enum dma_data_direction direction) 212aef9ec39SRoland Dreier { 213aef9ec39SRoland Dreier struct srp_iu *iu; 214aef9ec39SRoland Dreier 215aef9ec39SRoland Dreier iu = kmalloc(sizeof *iu, gfp_mask); 216aef9ec39SRoland Dreier if (!iu) 217aef9ec39SRoland Dreier goto out; 218aef9ec39SRoland Dreier 219aef9ec39SRoland Dreier iu->buf = kzalloc(size, gfp_mask); 220aef9ec39SRoland Dreier if (!iu->buf) 221aef9ec39SRoland Dreier goto out_free_iu; 222aef9ec39SRoland Dreier 22305321937SGreg Kroah-Hartman iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, 22405321937SGreg Kroah-Hartman direction); 22505321937SGreg Kroah-Hartman if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) 226aef9ec39SRoland Dreier goto out_free_buf; 227aef9ec39SRoland Dreier 228aef9ec39SRoland Dreier iu->size = size; 229aef9ec39SRoland Dreier iu->direction = direction; 230aef9ec39SRoland Dreier 231aef9ec39SRoland Dreier return iu; 232aef9ec39SRoland Dreier 233aef9ec39SRoland Dreier out_free_buf: 234aef9ec39SRoland Dreier kfree(iu->buf); 235aef9ec39SRoland Dreier out_free_iu: 236aef9ec39SRoland Dreier kfree(iu); 237aef9ec39SRoland Dreier out: 238aef9ec39SRoland Dreier return NULL; 239aef9ec39SRoland Dreier } 240aef9ec39SRoland Dreier 241aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 242aef9ec39SRoland Dreier { 243aef9ec39SRoland Dreier if (!iu) 244aef9ec39SRoland Dreier return; 245aef9ec39SRoland Dreier 24605321937SGreg Kroah-Hartman ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, 24705321937SGreg Kroah-Hartman iu->direction); 248aef9ec39SRoland Dreier kfree(iu->buf); 249aef9ec39SRoland Dreier kfree(iu); 250aef9ec39SRoland Dreier } 251aef9ec39SRoland Dreier 252aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context) 253aef9ec39SRoland Dreier { 25457363d98SSagi Grimberg pr_debug("QP event %s (%d)\n", 25557363d98SSagi Grimberg ib_event_msg(event->event), event->event); 256aef9ec39SRoland Dreier } 257aef9ec39SRoland Dreier 258aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target, 259aef9ec39SRoland Dreier struct ib_qp *qp) 260aef9ec39SRoland Dreier { 261aef9ec39SRoland Dreier struct ib_qp_attr *attr; 262aef9ec39SRoland Dreier int ret; 263aef9ec39SRoland Dreier 264aef9ec39SRoland Dreier attr = kmalloc(sizeof *attr, GFP_KERNEL); 265aef9ec39SRoland Dreier if (!attr) 266aef9ec39SRoland Dreier return -ENOMEM; 267aef9ec39SRoland Dreier 26856b5390cSBart Van Assche ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, 269aef9ec39SRoland Dreier target->srp_host->port, 270747fe000SBart Van Assche be16_to_cpu(target->pkey), 271aef9ec39SRoland Dreier &attr->pkey_index); 272aef9ec39SRoland Dreier if (ret) 273aef9ec39SRoland Dreier goto out; 274aef9ec39SRoland Dreier 275aef9ec39SRoland Dreier attr->qp_state = IB_QPS_INIT; 276aef9ec39SRoland Dreier attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 277aef9ec39SRoland Dreier IB_ACCESS_REMOTE_WRITE); 278aef9ec39SRoland Dreier attr->port_num = target->srp_host->port; 279aef9ec39SRoland Dreier 280aef9ec39SRoland Dreier ret = ib_modify_qp(qp, attr, 281aef9ec39SRoland Dreier IB_QP_STATE | 282aef9ec39SRoland Dreier IB_QP_PKEY_INDEX | 283aef9ec39SRoland Dreier IB_QP_ACCESS_FLAGS | 284aef9ec39SRoland Dreier IB_QP_PORT); 285aef9ec39SRoland Dreier 286aef9ec39SRoland Dreier out: 287aef9ec39SRoland Dreier kfree(attr); 288aef9ec39SRoland Dreier return ret; 289aef9ec39SRoland Dreier } 290aef9ec39SRoland Dreier 291509c07bcSBart Van Assche static int srp_new_cm_id(struct srp_rdma_ch *ch) 2929fe4bcf4SDavid Dillow { 293509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2949fe4bcf4SDavid Dillow struct ib_cm_id *new_cm_id; 2959fe4bcf4SDavid Dillow 29605321937SGreg Kroah-Hartman new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 297509c07bcSBart Van Assche srp_cm_handler, ch); 2989fe4bcf4SDavid Dillow if (IS_ERR(new_cm_id)) 2999fe4bcf4SDavid Dillow return PTR_ERR(new_cm_id); 3009fe4bcf4SDavid Dillow 301509c07bcSBart Van Assche if (ch->cm_id) 302509c07bcSBart Van Assche ib_destroy_cm_id(ch->cm_id); 303509c07bcSBart Van Assche ch->cm_id = new_cm_id; 304509c07bcSBart Van Assche ch->path.sgid = target->sgid; 305509c07bcSBart Van Assche ch->path.dgid = target->orig_dgid; 306509c07bcSBart Van Assche ch->path.pkey = target->pkey; 307509c07bcSBart Van Assche ch->path.service_id = target->service_id; 3089fe4bcf4SDavid Dillow 3099fe4bcf4SDavid Dillow return 0; 3109fe4bcf4SDavid Dillow } 3119fe4bcf4SDavid Dillow 312d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target) 313d1b4289eSBart Van Assche { 314d1b4289eSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 315d1b4289eSBart Van Assche struct ib_fmr_pool_param fmr_param; 316d1b4289eSBart Van Assche 317d1b4289eSBart Van Assche memset(&fmr_param, 0, sizeof(fmr_param)); 318d1b4289eSBart Van Assche fmr_param.pool_size = target->scsi_host->can_queue; 319d1b4289eSBart Van Assche fmr_param.dirty_watermark = fmr_param.pool_size / 4; 320d1b4289eSBart Van Assche fmr_param.cache = 1; 32152ede08fSBart Van Assche fmr_param.max_pages_per_fmr = dev->max_pages_per_mr; 32252ede08fSBart Van Assche fmr_param.page_shift = ilog2(dev->mr_page_size); 323d1b4289eSBart Van Assche fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 324d1b4289eSBart Van Assche IB_ACCESS_REMOTE_WRITE | 325d1b4289eSBart Van Assche IB_ACCESS_REMOTE_READ); 326d1b4289eSBart Van Assche 327d1b4289eSBart Van Assche return ib_create_fmr_pool(dev->pd, &fmr_param); 328d1b4289eSBart Van Assche } 329d1b4289eSBart Van Assche 3305cfb1782SBart Van Assche /** 3315cfb1782SBart Van Assche * srp_destroy_fr_pool() - free the resources owned by a pool 3325cfb1782SBart Van Assche * @pool: Fast registration pool to be destroyed. 3335cfb1782SBart Van Assche */ 3345cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool) 3355cfb1782SBart Van Assche { 3365cfb1782SBart Van Assche int i; 3375cfb1782SBart Van Assche struct srp_fr_desc *d; 3385cfb1782SBart Van Assche 3395cfb1782SBart Van Assche if (!pool) 3405cfb1782SBart Van Assche return; 3415cfb1782SBart Van Assche 3425cfb1782SBart Van Assche for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 3435cfb1782SBart Van Assche if (d->frpl) 3445cfb1782SBart Van Assche ib_free_fast_reg_page_list(d->frpl); 3455cfb1782SBart Van Assche if (d->mr) 3465cfb1782SBart Van Assche ib_dereg_mr(d->mr); 3475cfb1782SBart Van Assche } 3485cfb1782SBart Van Assche kfree(pool); 3495cfb1782SBart Van Assche } 3505cfb1782SBart Van Assche 3515cfb1782SBart Van Assche /** 3525cfb1782SBart Van Assche * srp_create_fr_pool() - allocate and initialize a pool for fast registration 3535cfb1782SBart Van Assche * @device: IB device to allocate fast registration descriptors for. 3545cfb1782SBart Van Assche * @pd: Protection domain associated with the FR descriptors. 3555cfb1782SBart Van Assche * @pool_size: Number of descriptors to allocate. 3565cfb1782SBart Van Assche * @max_page_list_len: Maximum fast registration work request page list length. 3575cfb1782SBart Van Assche */ 3585cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, 3595cfb1782SBart Van Assche struct ib_pd *pd, int pool_size, 3605cfb1782SBart Van Assche int max_page_list_len) 3615cfb1782SBart Van Assche { 3625cfb1782SBart Van Assche struct srp_fr_pool *pool; 3635cfb1782SBart Van Assche struct srp_fr_desc *d; 3645cfb1782SBart Van Assche struct ib_mr *mr; 3655cfb1782SBart Van Assche struct ib_fast_reg_page_list *frpl; 3665cfb1782SBart Van Assche int i, ret = -EINVAL; 3675cfb1782SBart Van Assche 3685cfb1782SBart Van Assche if (pool_size <= 0) 3695cfb1782SBart Van Assche goto err; 3705cfb1782SBart Van Assche ret = -ENOMEM; 3715cfb1782SBart Van Assche pool = kzalloc(sizeof(struct srp_fr_pool) + 3725cfb1782SBart Van Assche pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL); 3735cfb1782SBart Van Assche if (!pool) 3745cfb1782SBart Van Assche goto err; 3755cfb1782SBart Van Assche pool->size = pool_size; 3765cfb1782SBart Van Assche pool->max_page_list_len = max_page_list_len; 3775cfb1782SBart Van Assche spin_lock_init(&pool->lock); 3785cfb1782SBart Van Assche INIT_LIST_HEAD(&pool->free_list); 3795cfb1782SBart Van Assche 3805cfb1782SBart Van Assche for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 381563b67c5SSagi Grimberg mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 382563b67c5SSagi Grimberg max_page_list_len); 3835cfb1782SBart Van Assche if (IS_ERR(mr)) { 3845cfb1782SBart Van Assche ret = PTR_ERR(mr); 3855cfb1782SBart Van Assche goto destroy_pool; 3865cfb1782SBart Van Assche } 3875cfb1782SBart Van Assche d->mr = mr; 3885cfb1782SBart Van Assche frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len); 3895cfb1782SBart Van Assche if (IS_ERR(frpl)) { 3905cfb1782SBart Van Assche ret = PTR_ERR(frpl); 3915cfb1782SBart Van Assche goto destroy_pool; 3925cfb1782SBart Van Assche } 3935cfb1782SBart Van Assche d->frpl = frpl; 3945cfb1782SBart Van Assche list_add_tail(&d->entry, &pool->free_list); 3955cfb1782SBart Van Assche } 3965cfb1782SBart Van Assche 3975cfb1782SBart Van Assche out: 3985cfb1782SBart Van Assche return pool; 3995cfb1782SBart Van Assche 4005cfb1782SBart Van Assche destroy_pool: 4015cfb1782SBart Van Assche srp_destroy_fr_pool(pool); 4025cfb1782SBart Van Assche 4035cfb1782SBart Van Assche err: 4045cfb1782SBart Van Assche pool = ERR_PTR(ret); 4055cfb1782SBart Van Assche goto out; 4065cfb1782SBart Van Assche } 4075cfb1782SBart Van Assche 4085cfb1782SBart Van Assche /** 4095cfb1782SBart Van Assche * srp_fr_pool_get() - obtain a descriptor suitable for fast registration 4105cfb1782SBart Van Assche * @pool: Pool to obtain descriptor from. 4115cfb1782SBart Van Assche */ 4125cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool) 4135cfb1782SBart Van Assche { 4145cfb1782SBart Van Assche struct srp_fr_desc *d = NULL; 4155cfb1782SBart Van Assche unsigned long flags; 4165cfb1782SBart Van Assche 4175cfb1782SBart Van Assche spin_lock_irqsave(&pool->lock, flags); 4185cfb1782SBart Van Assche if (!list_empty(&pool->free_list)) { 4195cfb1782SBart Van Assche d = list_first_entry(&pool->free_list, typeof(*d), entry); 4205cfb1782SBart Van Assche list_del(&d->entry); 4215cfb1782SBart Van Assche } 4225cfb1782SBart Van Assche spin_unlock_irqrestore(&pool->lock, flags); 4235cfb1782SBart Van Assche 4245cfb1782SBart Van Assche return d; 4255cfb1782SBart Van Assche } 4265cfb1782SBart Van Assche 4275cfb1782SBart Van Assche /** 4285cfb1782SBart Van Assche * srp_fr_pool_put() - put an FR descriptor back in the free list 4295cfb1782SBart Van Assche * @pool: Pool the descriptor was allocated from. 4305cfb1782SBart Van Assche * @desc: Pointer to an array of fast registration descriptor pointers. 4315cfb1782SBart Van Assche * @n: Number of descriptors to put back. 4325cfb1782SBart Van Assche * 4335cfb1782SBart Van Assche * Note: The caller must already have queued an invalidation request for 4345cfb1782SBart Van Assche * desc->mr->rkey before calling this function. 4355cfb1782SBart Van Assche */ 4365cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, 4375cfb1782SBart Van Assche int n) 4385cfb1782SBart Van Assche { 4395cfb1782SBart Van Assche unsigned long flags; 4405cfb1782SBart Van Assche int i; 4415cfb1782SBart Van Assche 4425cfb1782SBart Van Assche spin_lock_irqsave(&pool->lock, flags); 4435cfb1782SBart Van Assche for (i = 0; i < n; i++) 4445cfb1782SBart Van Assche list_add(&desc[i]->entry, &pool->free_list); 4455cfb1782SBart Van Assche spin_unlock_irqrestore(&pool->lock, flags); 4465cfb1782SBart Van Assche } 4475cfb1782SBart Van Assche 4485cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) 4495cfb1782SBart Van Assche { 4505cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 4515cfb1782SBart Van Assche 4525cfb1782SBart Van Assche return srp_create_fr_pool(dev->dev, dev->pd, 4535cfb1782SBart Van Assche target->scsi_host->can_queue, 4545cfb1782SBart Van Assche dev->max_pages_per_mr); 4555cfb1782SBart Van Assche } 4565cfb1782SBart Van Assche 4577dad6b2eSBart Van Assche /** 4587dad6b2eSBart Van Assche * srp_destroy_qp() - destroy an RDMA queue pair 4597dad6b2eSBart Van Assche * @ch: SRP RDMA channel. 4607dad6b2eSBart Van Assche * 4617dad6b2eSBart Van Assche * Change a queue pair into the error state and wait until all receive 4627dad6b2eSBart Van Assche * completions have been processed before destroying it. This avoids that 4637dad6b2eSBart Van Assche * the receive completion handler can access the queue pair while it is 4647dad6b2eSBart Van Assche * being destroyed. 4657dad6b2eSBart Van Assche */ 4667dad6b2eSBart Van Assche static void srp_destroy_qp(struct srp_rdma_ch *ch) 4677dad6b2eSBart Van Assche { 4687dad6b2eSBart Van Assche static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 4697dad6b2eSBart Van Assche static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID }; 4707dad6b2eSBart Van Assche struct ib_recv_wr *bad_wr; 4717dad6b2eSBart Van Assche int ret; 4727dad6b2eSBart Van Assche 4737dad6b2eSBart Van Assche /* Destroying a QP and reusing ch->done is only safe if not connected */ 474c014c8cdSBart Van Assche WARN_ON_ONCE(ch->connected); 4757dad6b2eSBart Van Assche 4767dad6b2eSBart Van Assche ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); 4777dad6b2eSBart Van Assche WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret); 4787dad6b2eSBart Van Assche if (ret) 4797dad6b2eSBart Van Assche goto out; 4807dad6b2eSBart Van Assche 4817dad6b2eSBart Van Assche init_completion(&ch->done); 4827dad6b2eSBart Van Assche ret = ib_post_recv(ch->qp, &wr, &bad_wr); 4837dad6b2eSBart Van Assche WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret); 4847dad6b2eSBart Van Assche if (ret == 0) 4857dad6b2eSBart Van Assche wait_for_completion(&ch->done); 4867dad6b2eSBart Van Assche 4877dad6b2eSBart Van Assche out: 4887dad6b2eSBart Van Assche ib_destroy_qp(ch->qp); 4897dad6b2eSBart Van Assche } 4907dad6b2eSBart Van Assche 491509c07bcSBart Van Assche static int srp_create_ch_ib(struct srp_rdma_ch *ch) 492aef9ec39SRoland Dreier { 493509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 49462154b2eSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 495aef9ec39SRoland Dreier struct ib_qp_init_attr *init_attr; 49673aa89edSIshai Rabinovitz struct ib_cq *recv_cq, *send_cq; 49773aa89edSIshai Rabinovitz struct ib_qp *qp; 498d1b4289eSBart Van Assche struct ib_fmr_pool *fmr_pool = NULL; 4995cfb1782SBart Van Assche struct srp_fr_pool *fr_pool = NULL; 5005cfb1782SBart Van Assche const int m = 1 + dev->use_fast_reg; 5018e37210bSMatan Barak struct ib_cq_init_attr cq_attr = {}; 502aef9ec39SRoland Dreier int ret; 503aef9ec39SRoland Dreier 504aef9ec39SRoland Dreier init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 505aef9ec39SRoland Dreier if (!init_attr) 506aef9ec39SRoland Dreier return -ENOMEM; 507aef9ec39SRoland Dreier 5087dad6b2eSBart Van Assche /* + 1 for SRP_LAST_WR_ID */ 5098e37210bSMatan Barak cq_attr.cqe = target->queue_size + 1; 5108e37210bSMatan Barak cq_attr.comp_vector = ch->comp_vector; 511509c07bcSBart Van Assche recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, 5128e37210bSMatan Barak &cq_attr); 51373aa89edSIshai Rabinovitz if (IS_ERR(recv_cq)) { 51473aa89edSIshai Rabinovitz ret = PTR_ERR(recv_cq); 515da9d2f07SRoland Dreier goto err; 516aef9ec39SRoland Dreier } 517aef9ec39SRoland Dreier 5188e37210bSMatan Barak cq_attr.cqe = m * target->queue_size; 5198e37210bSMatan Barak cq_attr.comp_vector = ch->comp_vector; 520509c07bcSBart Van Assche send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, 5218e37210bSMatan Barak &cq_attr); 52273aa89edSIshai Rabinovitz if (IS_ERR(send_cq)) { 52373aa89edSIshai Rabinovitz ret = PTR_ERR(send_cq); 524da9d2f07SRoland Dreier goto err_recv_cq; 5259c03dc9fSBart Van Assche } 5269c03dc9fSBart Van Assche 52773aa89edSIshai Rabinovitz ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); 528aef9ec39SRoland Dreier 529aef9ec39SRoland Dreier init_attr->event_handler = srp_qp_event; 5305cfb1782SBart Van Assche init_attr->cap.max_send_wr = m * target->queue_size; 5317dad6b2eSBart Van Assche init_attr->cap.max_recv_wr = target->queue_size + 1; 532aef9ec39SRoland Dreier init_attr->cap.max_recv_sge = 1; 533aef9ec39SRoland Dreier init_attr->cap.max_send_sge = 1; 5345cfb1782SBart Van Assche init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 535aef9ec39SRoland Dreier init_attr->qp_type = IB_QPT_RC; 53673aa89edSIshai Rabinovitz init_attr->send_cq = send_cq; 53773aa89edSIshai Rabinovitz init_attr->recv_cq = recv_cq; 538aef9ec39SRoland Dreier 53962154b2eSBart Van Assche qp = ib_create_qp(dev->pd, init_attr); 54073aa89edSIshai Rabinovitz if (IS_ERR(qp)) { 54173aa89edSIshai Rabinovitz ret = PTR_ERR(qp); 542da9d2f07SRoland Dreier goto err_send_cq; 543aef9ec39SRoland Dreier } 544aef9ec39SRoland Dreier 54573aa89edSIshai Rabinovitz ret = srp_init_qp(target, qp); 546da9d2f07SRoland Dreier if (ret) 547da9d2f07SRoland Dreier goto err_qp; 548aef9ec39SRoland Dreier 549002f1567SBart Van Assche if (dev->use_fast_reg) { 5505cfb1782SBart Van Assche fr_pool = srp_alloc_fr_pool(target); 5515cfb1782SBart Van Assche if (IS_ERR(fr_pool)) { 5525cfb1782SBart Van Assche ret = PTR_ERR(fr_pool); 5535cfb1782SBart Van Assche shost_printk(KERN_WARNING, target->scsi_host, PFX 5545cfb1782SBart Van Assche "FR pool allocation failed (%d)\n", ret); 5555cfb1782SBart Van Assche goto err_qp; 5565cfb1782SBart Van Assche } 557002f1567SBart Van Assche } else if (dev->use_fmr) { 558d1b4289eSBart Van Assche fmr_pool = srp_alloc_fmr_pool(target); 559d1b4289eSBart Van Assche if (IS_ERR(fmr_pool)) { 560d1b4289eSBart Van Assche ret = PTR_ERR(fmr_pool); 561d1b4289eSBart Van Assche shost_printk(KERN_WARNING, target->scsi_host, PFX 562d1b4289eSBart Van Assche "FMR pool allocation failed (%d)\n", ret); 563d1b4289eSBart Van Assche goto err_qp; 564d1b4289eSBart Van Assche } 565d1b4289eSBart Van Assche } 566d1b4289eSBart Van Assche 567509c07bcSBart Van Assche if (ch->qp) 5687dad6b2eSBart Van Assche srp_destroy_qp(ch); 569509c07bcSBart Van Assche if (ch->recv_cq) 570509c07bcSBart Van Assche ib_destroy_cq(ch->recv_cq); 571509c07bcSBart Van Assche if (ch->send_cq) 572509c07bcSBart Van Assche ib_destroy_cq(ch->send_cq); 57373aa89edSIshai Rabinovitz 574509c07bcSBart Van Assche ch->qp = qp; 575509c07bcSBart Van Assche ch->recv_cq = recv_cq; 576509c07bcSBart Van Assche ch->send_cq = send_cq; 57773aa89edSIshai Rabinovitz 5787fbc67dfSSagi Grimberg if (dev->use_fast_reg) { 5797fbc67dfSSagi Grimberg if (ch->fr_pool) 5807fbc67dfSSagi Grimberg srp_destroy_fr_pool(ch->fr_pool); 5817fbc67dfSSagi Grimberg ch->fr_pool = fr_pool; 5827fbc67dfSSagi Grimberg } else if (dev->use_fmr) { 5837fbc67dfSSagi Grimberg if (ch->fmr_pool) 5847fbc67dfSSagi Grimberg ib_destroy_fmr_pool(ch->fmr_pool); 5857fbc67dfSSagi Grimberg ch->fmr_pool = fmr_pool; 5867fbc67dfSSagi Grimberg } 5877fbc67dfSSagi Grimberg 588da9d2f07SRoland Dreier kfree(init_attr); 589da9d2f07SRoland Dreier return 0; 590da9d2f07SRoland Dreier 591da9d2f07SRoland Dreier err_qp: 59273aa89edSIshai Rabinovitz ib_destroy_qp(qp); 593da9d2f07SRoland Dreier 594da9d2f07SRoland Dreier err_send_cq: 59573aa89edSIshai Rabinovitz ib_destroy_cq(send_cq); 596da9d2f07SRoland Dreier 597da9d2f07SRoland Dreier err_recv_cq: 59873aa89edSIshai Rabinovitz ib_destroy_cq(recv_cq); 599da9d2f07SRoland Dreier 600da9d2f07SRoland Dreier err: 601aef9ec39SRoland Dreier kfree(init_attr); 602aef9ec39SRoland Dreier return ret; 603aef9ec39SRoland Dreier } 604aef9ec39SRoland Dreier 6054d73f95fSBart Van Assche /* 6064d73f95fSBart Van Assche * Note: this function may be called without srp_alloc_iu_bufs() having been 607509c07bcSBart Van Assche * invoked. Hence the ch->[rt]x_ring checks. 6084d73f95fSBart Van Assche */ 609509c07bcSBart Van Assche static void srp_free_ch_ib(struct srp_target_port *target, 610509c07bcSBart Van Assche struct srp_rdma_ch *ch) 611aef9ec39SRoland Dreier { 6125cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 613aef9ec39SRoland Dreier int i; 614aef9ec39SRoland Dreier 615d92c0da7SBart Van Assche if (!ch->target) 616d92c0da7SBart Van Assche return; 617d92c0da7SBart Van Assche 618509c07bcSBart Van Assche if (ch->cm_id) { 619509c07bcSBart Van Assche ib_destroy_cm_id(ch->cm_id); 620509c07bcSBart Van Assche ch->cm_id = NULL; 621394c595eSBart Van Assche } 622394c595eSBart Van Assche 623d92c0da7SBart Van Assche /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */ 624d92c0da7SBart Van Assche if (!ch->qp) 625d92c0da7SBart Van Assche return; 626d92c0da7SBart Van Assche 6275cfb1782SBart Van Assche if (dev->use_fast_reg) { 628509c07bcSBart Van Assche if (ch->fr_pool) 629509c07bcSBart Van Assche srp_destroy_fr_pool(ch->fr_pool); 630002f1567SBart Van Assche } else if (dev->use_fmr) { 631509c07bcSBart Van Assche if (ch->fmr_pool) 632509c07bcSBart Van Assche ib_destroy_fmr_pool(ch->fmr_pool); 6335cfb1782SBart Van Assche } 6347dad6b2eSBart Van Assche srp_destroy_qp(ch); 635509c07bcSBart Van Assche ib_destroy_cq(ch->send_cq); 636509c07bcSBart Van Assche ib_destroy_cq(ch->recv_cq); 637aef9ec39SRoland Dreier 638d92c0da7SBart Van Assche /* 639d92c0da7SBart Van Assche * Avoid that the SCSI error handler tries to use this channel after 640d92c0da7SBart Van Assche * it has been freed. The SCSI error handler can namely continue 641d92c0da7SBart Van Assche * trying to perform recovery actions after scsi_remove_host() 642d92c0da7SBart Van Assche * returned. 643d92c0da7SBart Van Assche */ 644d92c0da7SBart Van Assche ch->target = NULL; 645d92c0da7SBart Van Assche 646509c07bcSBart Van Assche ch->qp = NULL; 647509c07bcSBart Van Assche ch->send_cq = ch->recv_cq = NULL; 64873aa89edSIshai Rabinovitz 649509c07bcSBart Van Assche if (ch->rx_ring) { 6504d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) 651509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->rx_ring[i]); 652509c07bcSBart Van Assche kfree(ch->rx_ring); 653509c07bcSBart Van Assche ch->rx_ring = NULL; 6544d73f95fSBart Van Assche } 655509c07bcSBart Van Assche if (ch->tx_ring) { 6564d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) 657509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->tx_ring[i]); 658509c07bcSBart Van Assche kfree(ch->tx_ring); 659509c07bcSBart Van Assche ch->tx_ring = NULL; 6604d73f95fSBart Van Assche } 661aef9ec39SRoland Dreier } 662aef9ec39SRoland Dreier 663aef9ec39SRoland Dreier static void srp_path_rec_completion(int status, 664aef9ec39SRoland Dreier struct ib_sa_path_rec *pathrec, 665509c07bcSBart Van Assche void *ch_ptr) 666aef9ec39SRoland Dreier { 667509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 668509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 669aef9ec39SRoland Dreier 670509c07bcSBart Van Assche ch->status = status; 671aef9ec39SRoland Dreier if (status) 6727aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 6737aa54bd7SDavid Dillow PFX "Got failed path rec status %d\n", status); 674aef9ec39SRoland Dreier else 675509c07bcSBart Van Assche ch->path = *pathrec; 676509c07bcSBart Van Assche complete(&ch->done); 677aef9ec39SRoland Dreier } 678aef9ec39SRoland Dreier 679509c07bcSBart Van Assche static int srp_lookup_path(struct srp_rdma_ch *ch) 680aef9ec39SRoland Dreier { 681509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 682a702adceSBart Van Assche int ret; 683a702adceSBart Van Assche 684509c07bcSBart Van Assche ch->path.numb_path = 1; 685aef9ec39SRoland Dreier 686509c07bcSBart Van Assche init_completion(&ch->done); 687aef9ec39SRoland Dreier 688509c07bcSBart Van Assche ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 68905321937SGreg Kroah-Hartman target->srp_host->srp_dev->dev, 690aef9ec39SRoland Dreier target->srp_host->port, 691509c07bcSBart Van Assche &ch->path, 692247e020eSSean Hefty IB_SA_PATH_REC_SERVICE_ID | 693aef9ec39SRoland Dreier IB_SA_PATH_REC_DGID | 694aef9ec39SRoland Dreier IB_SA_PATH_REC_SGID | 695aef9ec39SRoland Dreier IB_SA_PATH_REC_NUMB_PATH | 696aef9ec39SRoland Dreier IB_SA_PATH_REC_PKEY, 697aef9ec39SRoland Dreier SRP_PATH_REC_TIMEOUT_MS, 698aef9ec39SRoland Dreier GFP_KERNEL, 699aef9ec39SRoland Dreier srp_path_rec_completion, 700509c07bcSBart Van Assche ch, &ch->path_query); 701509c07bcSBart Van Assche if (ch->path_query_id < 0) 702509c07bcSBart Van Assche return ch->path_query_id; 703aef9ec39SRoland Dreier 704509c07bcSBart Van Assche ret = wait_for_completion_interruptible(&ch->done); 705a702adceSBart Van Assche if (ret < 0) 706a702adceSBart Van Assche return ret; 707aef9ec39SRoland Dreier 708509c07bcSBart Van Assche if (ch->status < 0) 7097aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 7107aa54bd7SDavid Dillow PFX "Path record query failed\n"); 711aef9ec39SRoland Dreier 712509c07bcSBart Van Assche return ch->status; 713aef9ec39SRoland Dreier } 714aef9ec39SRoland Dreier 715d92c0da7SBart Van Assche static int srp_send_req(struct srp_rdma_ch *ch, bool multich) 716aef9ec39SRoland Dreier { 717509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 718aef9ec39SRoland Dreier struct { 719aef9ec39SRoland Dreier struct ib_cm_req_param param; 720aef9ec39SRoland Dreier struct srp_login_req priv; 721aef9ec39SRoland Dreier } *req = NULL; 722aef9ec39SRoland Dreier int status; 723aef9ec39SRoland Dreier 724aef9ec39SRoland Dreier req = kzalloc(sizeof *req, GFP_KERNEL); 725aef9ec39SRoland Dreier if (!req) 726aef9ec39SRoland Dreier return -ENOMEM; 727aef9ec39SRoland Dreier 728509c07bcSBart Van Assche req->param.primary_path = &ch->path; 729aef9ec39SRoland Dreier req->param.alternate_path = NULL; 730aef9ec39SRoland Dreier req->param.service_id = target->service_id; 731509c07bcSBart Van Assche req->param.qp_num = ch->qp->qp_num; 732509c07bcSBart Van Assche req->param.qp_type = ch->qp->qp_type; 733aef9ec39SRoland Dreier req->param.private_data = &req->priv; 734aef9ec39SRoland Dreier req->param.private_data_len = sizeof req->priv; 735aef9ec39SRoland Dreier req->param.flow_control = 1; 736aef9ec39SRoland Dreier 737aef9ec39SRoland Dreier get_random_bytes(&req->param.starting_psn, 4); 738aef9ec39SRoland Dreier req->param.starting_psn &= 0xffffff; 739aef9ec39SRoland Dreier 740aef9ec39SRoland Dreier /* 741aef9ec39SRoland Dreier * Pick some arbitrary defaults here; we could make these 742aef9ec39SRoland Dreier * module parameters if anyone cared about setting them. 743aef9ec39SRoland Dreier */ 744aef9ec39SRoland Dreier req->param.responder_resources = 4; 745aef9ec39SRoland Dreier req->param.remote_cm_response_timeout = 20; 746aef9ec39SRoland Dreier req->param.local_cm_response_timeout = 20; 7477bb312e4SVu Pham req->param.retry_count = target->tl_retry_count; 748aef9ec39SRoland Dreier req->param.rnr_retry_count = 7; 749aef9ec39SRoland Dreier req->param.max_cm_retries = 15; 750aef9ec39SRoland Dreier 751aef9ec39SRoland Dreier req->priv.opcode = SRP_LOGIN_REQ; 752aef9ec39SRoland Dreier req->priv.tag = 0; 75349248644SDavid Dillow req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); 754aef9ec39SRoland Dreier req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 755aef9ec39SRoland Dreier SRP_BUF_FORMAT_INDIRECT); 756d92c0da7SBart Van Assche req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI : 757d92c0da7SBart Van Assche SRP_MULTICHAN_SINGLE); 7580c0450dbSRamachandra K /* 7590c0450dbSRamachandra K * In the published SRP specification (draft rev. 16a), the 7600c0450dbSRamachandra K * port identifier format is 8 bytes of ID extension followed 7610c0450dbSRamachandra K * by 8 bytes of GUID. Older drafts put the two halves in the 7620c0450dbSRamachandra K * opposite order, so that the GUID comes first. 7630c0450dbSRamachandra K * 7640c0450dbSRamachandra K * Targets conforming to these obsolete drafts can be 7650c0450dbSRamachandra K * recognized by the I/O Class they report. 7660c0450dbSRamachandra K */ 7670c0450dbSRamachandra K if (target->io_class == SRP_REV10_IB_IO_CLASS) { 7680c0450dbSRamachandra K memcpy(req->priv.initiator_port_id, 769747fe000SBart Van Assche &target->sgid.global.interface_id, 8); 7700c0450dbSRamachandra K memcpy(req->priv.initiator_port_id + 8, 77101cb9bcbSIshai Rabinovitz &target->initiator_ext, 8); 7720c0450dbSRamachandra K memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 7730c0450dbSRamachandra K memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); 7740c0450dbSRamachandra K } else { 7750c0450dbSRamachandra K memcpy(req->priv.initiator_port_id, 77601cb9bcbSIshai Rabinovitz &target->initiator_ext, 8); 77701cb9bcbSIshai Rabinovitz memcpy(req->priv.initiator_port_id + 8, 778747fe000SBart Van Assche &target->sgid.global.interface_id, 8); 7790c0450dbSRamachandra K memcpy(req->priv.target_port_id, &target->id_ext, 8); 7800c0450dbSRamachandra K memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 7810c0450dbSRamachandra K } 7820c0450dbSRamachandra K 783aef9ec39SRoland Dreier /* 784aef9ec39SRoland Dreier * Topspin/Cisco SRP targets will reject our login unless we 78501cb9bcbSIshai Rabinovitz * zero out the first 8 bytes of our initiator port ID and set 78601cb9bcbSIshai Rabinovitz * the second 8 bytes to the local node GUID. 787aef9ec39SRoland Dreier */ 7885d7cbfd6SRoland Dreier if (srp_target_is_topspin(target)) { 7897aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 7907aa54bd7SDavid Dillow PFX "Topspin/Cisco initiator port ID workaround " 791aef9ec39SRoland Dreier "activated for target GUID %016llx\n", 79245c37cadSBart Van Assche be64_to_cpu(target->ioc_guid)); 793aef9ec39SRoland Dreier memset(req->priv.initiator_port_id, 0, 8); 79401cb9bcbSIshai Rabinovitz memcpy(req->priv.initiator_port_id + 8, 79505321937SGreg Kroah-Hartman &target->srp_host->srp_dev->dev->node_guid, 8); 796aef9ec39SRoland Dreier } 797aef9ec39SRoland Dreier 798509c07bcSBart Van Assche status = ib_send_cm_req(ch->cm_id, &req->param); 799aef9ec39SRoland Dreier 800aef9ec39SRoland Dreier kfree(req); 801aef9ec39SRoland Dreier 802aef9ec39SRoland Dreier return status; 803aef9ec39SRoland Dreier } 804aef9ec39SRoland Dreier 805ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target) 806ef6c49d8SBart Van Assche { 807ef6c49d8SBart Van Assche bool changed = false; 808ef6c49d8SBart Van Assche 809ef6c49d8SBart Van Assche spin_lock_irq(&target->lock); 810ef6c49d8SBart Van Assche if (target->state != SRP_TARGET_REMOVED) { 811ef6c49d8SBart Van Assche target->state = SRP_TARGET_REMOVED; 812ef6c49d8SBart Van Assche changed = true; 813ef6c49d8SBart Van Assche } 814ef6c49d8SBart Van Assche spin_unlock_irq(&target->lock); 815ef6c49d8SBart Van Assche 816ef6c49d8SBart Van Assche if (changed) 817bcc05910SBart Van Assche queue_work(srp_remove_wq, &target->remove_work); 818ef6c49d8SBart Van Assche 819ef6c49d8SBart Van Assche return changed; 820ef6c49d8SBart Van Assche } 821ef6c49d8SBart Van Assche 822aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target) 823aef9ec39SRoland Dreier { 824d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 825d92c0da7SBart Van Assche int i; 826509c07bcSBart Van Assche 827aef9ec39SRoland Dreier /* XXX should send SRP_I_LOGOUT request */ 828aef9ec39SRoland Dreier 829d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 830d92c0da7SBart Van Assche ch = &target->ch[i]; 831c014c8cdSBart Van Assche ch->connected = false; 832d92c0da7SBart Van Assche if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { 8337aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 8347aa54bd7SDavid Dillow PFX "Sending CM DREQ failed\n"); 835aef9ec39SRoland Dreier } 836294c875aSBart Van Assche } 837294c875aSBart Van Assche } 838aef9ec39SRoland Dreier 839509c07bcSBart Van Assche static void srp_free_req_data(struct srp_target_port *target, 840509c07bcSBart Van Assche struct srp_rdma_ch *ch) 8418f26c9ffSDavid Dillow { 8425cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 8435cfb1782SBart Van Assche struct ib_device *ibdev = dev->dev; 8448f26c9ffSDavid Dillow struct srp_request *req; 8458f26c9ffSDavid Dillow int i; 8468f26c9ffSDavid Dillow 84747513cf4SBart Van Assche if (!ch->req_ring) 8484d73f95fSBart Van Assche return; 8494d73f95fSBart Van Assche 8504d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 851509c07bcSBart Van Assche req = &ch->req_ring[i]; 8525cfb1782SBart Van Assche if (dev->use_fast_reg) 8535cfb1782SBart Van Assche kfree(req->fr_list); 8545cfb1782SBart Van Assche else 8558f26c9ffSDavid Dillow kfree(req->fmr_list); 8568f26c9ffSDavid Dillow kfree(req->map_page); 857c07d424dSDavid Dillow if (req->indirect_dma_addr) { 858c07d424dSDavid Dillow ib_dma_unmap_single(ibdev, req->indirect_dma_addr, 859c07d424dSDavid Dillow target->indirect_size, 860c07d424dSDavid Dillow DMA_TO_DEVICE); 861c07d424dSDavid Dillow } 862c07d424dSDavid Dillow kfree(req->indirect_desc); 8638f26c9ffSDavid Dillow } 8644d73f95fSBart Van Assche 865509c07bcSBart Van Assche kfree(ch->req_ring); 866509c07bcSBart Van Assche ch->req_ring = NULL; 8678f26c9ffSDavid Dillow } 8688f26c9ffSDavid Dillow 869509c07bcSBart Van Assche static int srp_alloc_req_data(struct srp_rdma_ch *ch) 870b81d00bdSBart Van Assche { 871509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 872b81d00bdSBart Van Assche struct srp_device *srp_dev = target->srp_host->srp_dev; 873b81d00bdSBart Van Assche struct ib_device *ibdev = srp_dev->dev; 874b81d00bdSBart Van Assche struct srp_request *req; 8755cfb1782SBart Van Assche void *mr_list; 876b81d00bdSBart Van Assche dma_addr_t dma_addr; 877b81d00bdSBart Van Assche int i, ret = -ENOMEM; 878b81d00bdSBart Van Assche 879509c07bcSBart Van Assche ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), 880509c07bcSBart Van Assche GFP_KERNEL); 881509c07bcSBart Van Assche if (!ch->req_ring) 8824d73f95fSBart Van Assche goto out; 8834d73f95fSBart Van Assche 8844d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 885509c07bcSBart Van Assche req = &ch->req_ring[i]; 8865cfb1782SBart Van Assche mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), 887b81d00bdSBart Van Assche GFP_KERNEL); 8885cfb1782SBart Van Assche if (!mr_list) 8895cfb1782SBart Van Assche goto out; 8905cfb1782SBart Van Assche if (srp_dev->use_fast_reg) 8915cfb1782SBart Van Assche req->fr_list = mr_list; 8925cfb1782SBart Van Assche else 8935cfb1782SBart Van Assche req->fmr_list = mr_list; 89452ede08fSBart Van Assche req->map_page = kmalloc(srp_dev->max_pages_per_mr * 895d1b4289eSBart Van Assche sizeof(void *), GFP_KERNEL); 8965cfb1782SBart Van Assche if (!req->map_page) 8975cfb1782SBart Van Assche goto out; 898b81d00bdSBart Van Assche req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); 8995cfb1782SBart Van Assche if (!req->indirect_desc) 900b81d00bdSBart Van Assche goto out; 901b81d00bdSBart Van Assche 902b81d00bdSBart Van Assche dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, 903b81d00bdSBart Van Assche target->indirect_size, 904b81d00bdSBart Van Assche DMA_TO_DEVICE); 905b81d00bdSBart Van Assche if (ib_dma_mapping_error(ibdev, dma_addr)) 906b81d00bdSBart Van Assche goto out; 907b81d00bdSBart Van Assche 908b81d00bdSBart Van Assche req->indirect_dma_addr = dma_addr; 909b81d00bdSBart Van Assche } 910b81d00bdSBart Van Assche ret = 0; 911b81d00bdSBart Van Assche 912b81d00bdSBart Van Assche out: 913b81d00bdSBart Van Assche return ret; 914b81d00bdSBart Van Assche } 915b81d00bdSBart Van Assche 916683b159aSBart Van Assche /** 917683b159aSBart Van Assche * srp_del_scsi_host_attr() - Remove attributes defined in the host template. 918683b159aSBart Van Assche * @shost: SCSI host whose attributes to remove from sysfs. 919683b159aSBart Van Assche * 920683b159aSBart Van Assche * Note: Any attributes defined in the host template and that did not exist 921683b159aSBart Van Assche * before invocation of this function will be ignored. 922683b159aSBart Van Assche */ 923683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost) 924683b159aSBart Van Assche { 925683b159aSBart Van Assche struct device_attribute **attr; 926683b159aSBart Van Assche 927683b159aSBart Van Assche for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) 928683b159aSBart Van Assche device_remove_file(&shost->shost_dev, *attr); 929683b159aSBart Van Assche } 930683b159aSBart Van Assche 931ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target) 932ee12d6a8SBart Van Assche { 933d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 934d92c0da7SBart Van Assche int i; 935509c07bcSBart Van Assche 936ef6c49d8SBart Van Assche WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 937ef6c49d8SBart Van Assche 938ee12d6a8SBart Van Assche srp_del_scsi_host_attr(target->scsi_host); 9399dd69a60SBart Van Assche srp_rport_get(target->rport); 940ee12d6a8SBart Van Assche srp_remove_host(target->scsi_host); 941ee12d6a8SBart Van Assche scsi_remove_host(target->scsi_host); 94293079162SBart Van Assche srp_stop_rport_timers(target->rport); 943ef6c49d8SBart Van Assche srp_disconnect_target(target); 944d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 945d92c0da7SBart Van Assche ch = &target->ch[i]; 946509c07bcSBart Van Assche srp_free_ch_ib(target, ch); 947d92c0da7SBart Van Assche } 948c1120f89SBart Van Assche cancel_work_sync(&target->tl_err_work); 9499dd69a60SBart Van Assche srp_rport_put(target->rport); 950d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 951d92c0da7SBart Van Assche ch = &target->ch[i]; 952509c07bcSBart Van Assche srp_free_req_data(target, ch); 953d92c0da7SBart Van Assche } 954d92c0da7SBart Van Assche kfree(target->ch); 955d92c0da7SBart Van Assche target->ch = NULL; 95665d7dd2fSVu Pham 95765d7dd2fSVu Pham spin_lock(&target->srp_host->target_lock); 95865d7dd2fSVu Pham list_del(&target->list); 95965d7dd2fSVu Pham spin_unlock(&target->srp_host->target_lock); 96065d7dd2fSVu Pham 961ee12d6a8SBart Van Assche scsi_host_put(target->scsi_host); 962ee12d6a8SBart Van Assche } 963ee12d6a8SBart Van Assche 964c4028958SDavid Howells static void srp_remove_work(struct work_struct *work) 965aef9ec39SRoland Dreier { 966c4028958SDavid Howells struct srp_target_port *target = 967ef6c49d8SBart Van Assche container_of(work, struct srp_target_port, remove_work); 968aef9ec39SRoland Dreier 969ef6c49d8SBart Van Assche WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 970aef9ec39SRoland Dreier 97196fc248aSBart Van Assche srp_remove_target(target); 972aef9ec39SRoland Dreier } 973aef9ec39SRoland Dreier 974dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport) 975dc1bdbd9SBart Van Assche { 976dc1bdbd9SBart Van Assche struct srp_target_port *target = rport->lld_data; 977dc1bdbd9SBart Van Assche 978dc1bdbd9SBart Van Assche srp_queue_remove_work(target); 979dc1bdbd9SBart Van Assche } 980dc1bdbd9SBart Van Assche 981c014c8cdSBart Van Assche /** 982c014c8cdSBart Van Assche * srp_connected_ch() - number of connected channels 983c014c8cdSBart Van Assche * @target: SRP target port. 984c014c8cdSBart Van Assche */ 985c014c8cdSBart Van Assche static int srp_connected_ch(struct srp_target_port *target) 986c014c8cdSBart Van Assche { 987c014c8cdSBart Van Assche int i, c = 0; 988c014c8cdSBart Van Assche 989c014c8cdSBart Van Assche for (i = 0; i < target->ch_count; i++) 990c014c8cdSBart Van Assche c += target->ch[i].connected; 991c014c8cdSBart Van Assche 992c014c8cdSBart Van Assche return c; 993c014c8cdSBart Van Assche } 994c014c8cdSBart Van Assche 995d92c0da7SBart Van Assche static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) 996aef9ec39SRoland Dreier { 997509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 998aef9ec39SRoland Dreier int ret; 999aef9ec39SRoland Dreier 1000c014c8cdSBart Van Assche WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); 1001294c875aSBart Van Assche 1002509c07bcSBart Van Assche ret = srp_lookup_path(ch); 1003aef9ec39SRoland Dreier if (ret) 1004aef9ec39SRoland Dreier return ret; 1005aef9ec39SRoland Dreier 1006aef9ec39SRoland Dreier while (1) { 1007509c07bcSBart Van Assche init_completion(&ch->done); 1008d92c0da7SBart Van Assche ret = srp_send_req(ch, multich); 1009aef9ec39SRoland Dreier if (ret) 1010aef9ec39SRoland Dreier return ret; 1011509c07bcSBart Van Assche ret = wait_for_completion_interruptible(&ch->done); 1012a702adceSBart Van Assche if (ret < 0) 1013a702adceSBart Van Assche return ret; 1014aef9ec39SRoland Dreier 1015aef9ec39SRoland Dreier /* 1016aef9ec39SRoland Dreier * The CM event handling code will set status to 1017aef9ec39SRoland Dreier * SRP_PORT_REDIRECT if we get a port redirect REJ 1018aef9ec39SRoland Dreier * back, or SRP_DLID_REDIRECT if we get a lid/qp 1019aef9ec39SRoland Dreier * redirect REJ back. 1020aef9ec39SRoland Dreier */ 1021509c07bcSBart Van Assche switch (ch->status) { 1022aef9ec39SRoland Dreier case 0: 1023c014c8cdSBart Van Assche ch->connected = true; 1024aef9ec39SRoland Dreier return 0; 1025aef9ec39SRoland Dreier 1026aef9ec39SRoland Dreier case SRP_PORT_REDIRECT: 1027509c07bcSBart Van Assche ret = srp_lookup_path(ch); 1028aef9ec39SRoland Dreier if (ret) 1029aef9ec39SRoland Dreier return ret; 1030aef9ec39SRoland Dreier break; 1031aef9ec39SRoland Dreier 1032aef9ec39SRoland Dreier case SRP_DLID_REDIRECT: 1033aef9ec39SRoland Dreier break; 1034aef9ec39SRoland Dreier 10359fe4bcf4SDavid Dillow case SRP_STALE_CONN: 10369fe4bcf4SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 10379fe4bcf4SDavid Dillow "giving up on stale connection\n"); 1038509c07bcSBart Van Assche ch->status = -ECONNRESET; 1039509c07bcSBart Van Assche return ch->status; 10409fe4bcf4SDavid Dillow 1041aef9ec39SRoland Dreier default: 1042509c07bcSBart Van Assche return ch->status; 1043aef9ec39SRoland Dreier } 1044aef9ec39SRoland Dreier } 1045aef9ec39SRoland Dreier } 1046aef9ec39SRoland Dreier 1047509c07bcSBart Van Assche static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) 10485cfb1782SBart Van Assche { 10495cfb1782SBart Van Assche struct ib_send_wr *bad_wr; 10505cfb1782SBart Van Assche struct ib_send_wr wr = { 10515cfb1782SBart Van Assche .opcode = IB_WR_LOCAL_INV, 10525cfb1782SBart Van Assche .wr_id = LOCAL_INV_WR_ID_MASK, 10535cfb1782SBart Van Assche .next = NULL, 10545cfb1782SBart Van Assche .num_sge = 0, 10555cfb1782SBart Van Assche .send_flags = 0, 10565cfb1782SBart Van Assche .ex.invalidate_rkey = rkey, 10575cfb1782SBart Van Assche }; 10585cfb1782SBart Van Assche 1059509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 10605cfb1782SBart Van Assche } 10615cfb1782SBart Van Assche 1062d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd, 1063509c07bcSBart Van Assche struct srp_rdma_ch *ch, 1064d945e1dfSRoland Dreier struct srp_request *req) 1065d945e1dfSRoland Dreier { 1066509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 10675cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 10685cfb1782SBart Van Assche struct ib_device *ibdev = dev->dev; 10695cfb1782SBart Van Assche int i, res; 10708f26c9ffSDavid Dillow 1071bb350d1dSFUJITA Tomonori if (!scsi_sglist(scmnd) || 1072d945e1dfSRoland Dreier (scmnd->sc_data_direction != DMA_TO_DEVICE && 1073d945e1dfSRoland Dreier scmnd->sc_data_direction != DMA_FROM_DEVICE)) 1074d945e1dfSRoland Dreier return; 1075d945e1dfSRoland Dreier 10765cfb1782SBart Van Assche if (dev->use_fast_reg) { 10775cfb1782SBart Van Assche struct srp_fr_desc **pfr; 10785cfb1782SBart Van Assche 10795cfb1782SBart Van Assche for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { 1080509c07bcSBart Van Assche res = srp_inv_rkey(ch, (*pfr)->mr->rkey); 10815cfb1782SBart Van Assche if (res < 0) { 10825cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 10835cfb1782SBart Van Assche "Queueing INV WR for rkey %#x failed (%d)\n", 10845cfb1782SBart Van Assche (*pfr)->mr->rkey, res); 10855cfb1782SBart Van Assche queue_work(system_long_wq, 10865cfb1782SBart Van Assche &target->tl_err_work); 10875cfb1782SBart Van Assche } 10885cfb1782SBart Van Assche } 10895cfb1782SBart Van Assche if (req->nmdesc) 1090509c07bcSBart Van Assche srp_fr_pool_put(ch->fr_pool, req->fr_list, 10915cfb1782SBart Van Assche req->nmdesc); 1092002f1567SBart Van Assche } else if (dev->use_fmr) { 10935cfb1782SBart Van Assche struct ib_pool_fmr **pfmr; 10945cfb1782SBart Van Assche 10955cfb1782SBart Van Assche for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++) 10965cfb1782SBart Van Assche ib_fmr_pool_unmap(*pfmr); 10975cfb1782SBart Van Assche } 1098f5358a17SRoland Dreier 10998f26c9ffSDavid Dillow ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), 11008f26c9ffSDavid Dillow scmnd->sc_data_direction); 1101d945e1dfSRoland Dreier } 1102d945e1dfSRoland Dreier 110322032991SBart Van Assche /** 110422032991SBart Van Assche * srp_claim_req - Take ownership of the scmnd associated with a request. 1105509c07bcSBart Van Assche * @ch: SRP RDMA channel. 110622032991SBart Van Assche * @req: SRP request. 1107b3fe628dSBart Van Assche * @sdev: If not NULL, only take ownership for this SCSI device. 110822032991SBart Van Assche * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take 110922032991SBart Van Assche * ownership of @req->scmnd if it equals @scmnd. 111022032991SBart Van Assche * 111122032991SBart Van Assche * Return value: 111222032991SBart Van Assche * Either NULL or a pointer to the SCSI command the caller became owner of. 111322032991SBart Van Assche */ 1114509c07bcSBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, 111522032991SBart Van Assche struct srp_request *req, 1116b3fe628dSBart Van Assche struct scsi_device *sdev, 111722032991SBart Van Assche struct scsi_cmnd *scmnd) 1118526b4caaSIshai Rabinovitz { 111994a9174cSBart Van Assche unsigned long flags; 112094a9174cSBart Van Assche 1121509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1122b3fe628dSBart Van Assche if (req->scmnd && 1123b3fe628dSBart Van Assche (!sdev || req->scmnd->device == sdev) && 1124b3fe628dSBart Van Assche (!scmnd || req->scmnd == scmnd)) { 112522032991SBart Van Assche scmnd = req->scmnd; 112622032991SBart Van Assche req->scmnd = NULL; 112722032991SBart Van Assche } else { 112822032991SBart Van Assche scmnd = NULL; 112922032991SBart Van Assche } 1130509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 113122032991SBart Van Assche 113222032991SBart Van Assche return scmnd; 113322032991SBart Van Assche } 113422032991SBart Van Assche 113522032991SBart Van Assche /** 113622032991SBart Van Assche * srp_free_req() - Unmap data and add request to the free request list. 1137509c07bcSBart Van Assche * @ch: SRP RDMA channel. 1138af24663bSBart Van Assche * @req: Request to be freed. 1139af24663bSBart Van Assche * @scmnd: SCSI command associated with @req. 1140af24663bSBart Van Assche * @req_lim_delta: Amount to be added to @target->req_lim. 114122032991SBart Van Assche */ 1142509c07bcSBart Van Assche static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, 1143509c07bcSBart Van Assche struct scsi_cmnd *scmnd, s32 req_lim_delta) 114422032991SBart Van Assche { 114522032991SBart Van Assche unsigned long flags; 114622032991SBart Van Assche 1147509c07bcSBart Van Assche srp_unmap_data(scmnd, ch, req); 114822032991SBart Van Assche 1149509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1150509c07bcSBart Van Assche ch->req_lim += req_lim_delta; 1151509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 1152526b4caaSIshai Rabinovitz } 1153526b4caaSIshai Rabinovitz 1154509c07bcSBart Van Assche static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, 1155509c07bcSBart Van Assche struct scsi_device *sdev, int result) 1156526b4caaSIshai Rabinovitz { 1157509c07bcSBart Van Assche struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); 115822032991SBart Van Assche 115922032991SBart Van Assche if (scmnd) { 1160509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 0); 1161ed9b2264SBart Van Assche scmnd->result = result; 116222032991SBart Van Assche scmnd->scsi_done(scmnd); 116322032991SBart Van Assche } 1164526b4caaSIshai Rabinovitz } 1165526b4caaSIshai Rabinovitz 1166ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport) 1167aef9ec39SRoland Dreier { 1168ed9b2264SBart Van Assche struct srp_target_port *target = rport->lld_data; 1169d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 1170b3fe628dSBart Van Assche struct Scsi_Host *shost = target->scsi_host; 1171b3fe628dSBart Van Assche struct scsi_device *sdev; 1172d92c0da7SBart Van Assche int i, j; 1173aef9ec39SRoland Dreier 1174b3fe628dSBart Van Assche /* 1175b3fe628dSBart Van Assche * Invoking srp_terminate_io() while srp_queuecommand() is running 1176b3fe628dSBart Van Assche * is not safe. Hence the warning statement below. 1177b3fe628dSBart Van Assche */ 1178b3fe628dSBart Van Assche shost_for_each_device(sdev, shost) 1179b3fe628dSBart Van Assche WARN_ON_ONCE(sdev->request_queue->request_fn_active); 1180b3fe628dSBart Van Assche 1181d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1182d92c0da7SBart Van Assche ch = &target->ch[i]; 1183509c07bcSBart Van Assche 1184d92c0da7SBart Van Assche for (j = 0; j < target->req_ring_size; ++j) { 1185d92c0da7SBart Van Assche struct srp_request *req = &ch->req_ring[j]; 1186d92c0da7SBart Van Assche 1187d92c0da7SBart Van Assche srp_finish_req(ch, req, NULL, 1188d92c0da7SBart Van Assche DID_TRANSPORT_FAILFAST << 16); 1189d92c0da7SBart Van Assche } 1190ed9b2264SBart Van Assche } 1191ed9b2264SBart Van Assche } 1192ed9b2264SBart Van Assche 1193ed9b2264SBart Van Assche /* 1194ed9b2264SBart Van Assche * It is up to the caller to ensure that srp_rport_reconnect() calls are 1195ed9b2264SBart Van Assche * serialized and that no concurrent srp_queuecommand(), srp_abort(), 1196ed9b2264SBart Van Assche * srp_reset_device() or srp_reset_host() calls will occur while this function 1197ed9b2264SBart Van Assche * is in progress. One way to realize that is not to call this function 1198ed9b2264SBart Van Assche * directly but to call srp_reconnect_rport() instead since that last function 1199ed9b2264SBart Van Assche * serializes calls of this function via rport->mutex and also blocks 1200ed9b2264SBart Van Assche * srp_queuecommand() calls before invoking this function. 1201ed9b2264SBart Van Assche */ 1202ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport) 1203ed9b2264SBart Van Assche { 1204ed9b2264SBart Van Assche struct srp_target_port *target = rport->lld_data; 1205d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 1206d92c0da7SBart Van Assche int i, j, ret = 0; 1207d92c0da7SBart Van Assche bool multich = false; 120809be70a2SBart Van Assche 1209aef9ec39SRoland Dreier srp_disconnect_target(target); 121034aa654eSBart Van Assche 121134aa654eSBart Van Assche if (target->state == SRP_TARGET_SCANNING) 121234aa654eSBart Van Assche return -ENODEV; 121334aa654eSBart Van Assche 1214aef9ec39SRoland Dreier /* 1215c7c4e7ffSBart Van Assche * Now get a new local CM ID so that we avoid confusing the target in 1216c7c4e7ffSBart Van Assche * case things are really fouled up. Doing so also ensures that all CM 1217c7c4e7ffSBart Van Assche * callbacks will have finished before a new QP is allocated. 1218aef9ec39SRoland Dreier */ 1219d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1220d92c0da7SBart Van Assche ch = &target->ch[i]; 1221d92c0da7SBart Van Assche ret += srp_new_cm_id(ch); 1222d92c0da7SBart Van Assche } 1223d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1224d92c0da7SBart Van Assche ch = &target->ch[i]; 1225d92c0da7SBart Van Assche for (j = 0; j < target->req_ring_size; ++j) { 1226d92c0da7SBart Van Assche struct srp_request *req = &ch->req_ring[j]; 1227509c07bcSBart Van Assche 1228509c07bcSBart Van Assche srp_finish_req(ch, req, NULL, DID_RESET << 16); 1229536ae14eSBart Van Assche } 1230d92c0da7SBart Van Assche } 1231d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1232d92c0da7SBart Van Assche ch = &target->ch[i]; 12335cfb1782SBart Van Assche /* 12345cfb1782SBart Van Assche * Whether or not creating a new CM ID succeeded, create a new 1235d92c0da7SBart Van Assche * QP. This guarantees that all completion callback function 1236d92c0da7SBart Van Assche * invocations have finished before request resetting starts. 12375cfb1782SBart Van Assche */ 1238509c07bcSBart Van Assche ret += srp_create_ch_ib(ch); 12395cfb1782SBart Van Assche 1240509c07bcSBart Van Assche INIT_LIST_HEAD(&ch->free_tx); 1241d92c0da7SBart Van Assche for (j = 0; j < target->queue_size; ++j) 1242d92c0da7SBart Van Assche list_add(&ch->tx_ring[j]->list, &ch->free_tx); 1243d92c0da7SBart Van Assche } 12448de9fe3aSBart Van Assche 12458de9fe3aSBart Van Assche target->qp_in_error = false; 12468de9fe3aSBart Van Assche 1247d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1248d92c0da7SBart Van Assche ch = &target->ch[i]; 1249bbac5ccfSBart Van Assche if (ret) 1250d92c0da7SBart Van Assche break; 1251d92c0da7SBart Van Assche ret = srp_connect_ch(ch, multich); 1252d92c0da7SBart Van Assche multich = true; 1253d92c0da7SBart Van Assche } 125409be70a2SBart Van Assche 1255ed9b2264SBart Van Assche if (ret == 0) 1256ed9b2264SBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 1257ed9b2264SBart Van Assche PFX "reconnect succeeded\n"); 1258aef9ec39SRoland Dreier 1259aef9ec39SRoland Dreier return ret; 1260aef9ec39SRoland Dreier } 1261aef9ec39SRoland Dreier 12628f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, 12638f26c9ffSDavid Dillow unsigned int dma_len, u32 rkey) 1264f5358a17SRoland Dreier { 12658f26c9ffSDavid Dillow struct srp_direct_buf *desc = state->desc; 12668f26c9ffSDavid Dillow 12673ae95da8SBart Van Assche WARN_ON_ONCE(!dma_len); 12683ae95da8SBart Van Assche 12698f26c9ffSDavid Dillow desc->va = cpu_to_be64(dma_addr); 12708f26c9ffSDavid Dillow desc->key = cpu_to_be32(rkey); 12718f26c9ffSDavid Dillow desc->len = cpu_to_be32(dma_len); 12728f26c9ffSDavid Dillow 12738f26c9ffSDavid Dillow state->total_len += dma_len; 12748f26c9ffSDavid Dillow state->desc++; 12758f26c9ffSDavid Dillow state->ndesc++; 12768f26c9ffSDavid Dillow } 12778f26c9ffSDavid Dillow 12788f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state, 1279509c07bcSBart Van Assche struct srp_rdma_ch *ch) 12808f26c9ffSDavid Dillow { 1281186fbc66SBart Van Assche struct srp_target_port *target = ch->target; 1282186fbc66SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 12838f26c9ffSDavid Dillow struct ib_pool_fmr *fmr; 1284f5358a17SRoland Dreier u64 io_addr = 0; 12858f26c9ffSDavid Dillow 1286f731ed62SBart Van Assche if (state->fmr.next >= state->fmr.end) 1287f731ed62SBart Van Assche return -ENOMEM; 1288f731ed62SBart Van Assche 1289509c07bcSBart Van Assche fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, 12908f26c9ffSDavid Dillow state->npages, io_addr); 12918f26c9ffSDavid Dillow if (IS_ERR(fmr)) 12928f26c9ffSDavid Dillow return PTR_ERR(fmr); 12938f26c9ffSDavid Dillow 1294f731ed62SBart Van Assche *state->fmr.next++ = fmr; 129552ede08fSBart Van Assche state->nmdesc++; 12968f26c9ffSDavid Dillow 1297186fbc66SBart Van Assche srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask, 1298186fbc66SBart Van Assche state->dma_len, fmr->fmr->rkey); 1299539dde6fSBart Van Assche 13008f26c9ffSDavid Dillow return 0; 13018f26c9ffSDavid Dillow } 13028f26c9ffSDavid Dillow 13035cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state, 1304509c07bcSBart Van Assche struct srp_rdma_ch *ch) 13055cfb1782SBart Van Assche { 1306509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 13075cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 13085cfb1782SBart Van Assche struct ib_send_wr *bad_wr; 13095cfb1782SBart Van Assche struct ib_send_wr wr; 13105cfb1782SBart Van Assche struct srp_fr_desc *desc; 13115cfb1782SBart Van Assche u32 rkey; 13125cfb1782SBart Van Assche 1313f731ed62SBart Van Assche if (state->fr.next >= state->fr.end) 1314f731ed62SBart Van Assche return -ENOMEM; 1315f731ed62SBart Van Assche 1316509c07bcSBart Van Assche desc = srp_fr_pool_get(ch->fr_pool); 13175cfb1782SBart Van Assche if (!desc) 13185cfb1782SBart Van Assche return -ENOMEM; 13195cfb1782SBart Van Assche 13205cfb1782SBart Van Assche rkey = ib_inc_rkey(desc->mr->rkey); 13215cfb1782SBart Van Assche ib_update_fast_reg_key(desc->mr, rkey); 13225cfb1782SBart Van Assche 13235cfb1782SBart Van Assche memcpy(desc->frpl->page_list, state->pages, 13245cfb1782SBart Van Assche sizeof(state->pages[0]) * state->npages); 13255cfb1782SBart Van Assche 13265cfb1782SBart Van Assche memset(&wr, 0, sizeof(wr)); 13275cfb1782SBart Van Assche wr.opcode = IB_WR_FAST_REG_MR; 13285cfb1782SBart Van Assche wr.wr_id = FAST_REG_WR_ID_MASK; 13295cfb1782SBart Van Assche wr.wr.fast_reg.iova_start = state->base_dma_addr; 13305cfb1782SBart Van Assche wr.wr.fast_reg.page_list = desc->frpl; 13315cfb1782SBart Van Assche wr.wr.fast_reg.page_list_len = state->npages; 13325cfb1782SBart Van Assche wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size); 13335cfb1782SBart Van Assche wr.wr.fast_reg.length = state->dma_len; 13345cfb1782SBart Van Assche wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | 13355cfb1782SBart Van Assche IB_ACCESS_REMOTE_READ | 13365cfb1782SBart Van Assche IB_ACCESS_REMOTE_WRITE); 13375cfb1782SBart Van Assche wr.wr.fast_reg.rkey = desc->mr->lkey; 13385cfb1782SBart Van Assche 1339f731ed62SBart Van Assche *state->fr.next++ = desc; 13405cfb1782SBart Van Assche state->nmdesc++; 13415cfb1782SBart Van Assche 13425cfb1782SBart Van Assche srp_map_desc(state, state->base_dma_addr, state->dma_len, 13435cfb1782SBart Van Assche desc->mr->rkey); 13445cfb1782SBart Van Assche 1345509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 13465cfb1782SBart Van Assche } 13475cfb1782SBart Van Assche 1348539dde6fSBart Van Assche static int srp_finish_mapping(struct srp_map_state *state, 1349509c07bcSBart Van Assche struct srp_rdma_ch *ch) 1350539dde6fSBart Van Assche { 1351509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1352002f1567SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 1353539dde6fSBart Van Assche int ret = 0; 1354539dde6fSBart Van Assche 1355002f1567SBart Van Assche WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr); 1356002f1567SBart Van Assche 1357539dde6fSBart Van Assche if (state->npages == 0) 1358539dde6fSBart Van Assche return 0; 1359539dde6fSBart Van Assche 136003f6fb93SBart Van Assche if (state->npages == 1 && target->global_mr) 136152ede08fSBart Van Assche srp_map_desc(state, state->base_dma_addr, state->dma_len, 136203f6fb93SBart Van Assche target->global_mr->rkey); 1363539dde6fSBart Van Assche else 1364002f1567SBart Van Assche ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) : 1365509c07bcSBart Van Assche srp_map_finish_fmr(state, ch); 1366539dde6fSBart Van Assche 1367539dde6fSBart Van Assche if (ret == 0) { 1368539dde6fSBart Van Assche state->npages = 0; 136952ede08fSBart Van Assche state->dma_len = 0; 1370539dde6fSBart Van Assche } 1371539dde6fSBart Van Assche 1372539dde6fSBart Van Assche return ret; 1373539dde6fSBart Van Assche } 1374539dde6fSBart Van Assche 13758f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state, 1376509c07bcSBart Van Assche struct srp_rdma_ch *ch, 13773ae95da8SBart Van Assche struct scatterlist *sg, int sg_index) 13788f26c9ffSDavid Dillow { 1379509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 138005321937SGreg Kroah-Hartman struct srp_device *dev = target->srp_host->srp_dev; 138185507bccSRalph Campbell struct ib_device *ibdev = dev->dev; 13828f26c9ffSDavid Dillow dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); 1383bb350d1dSFUJITA Tomonori unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 13843ae95da8SBart Van Assche unsigned int len = 0; 13858f26c9ffSDavid Dillow int ret; 138685507bccSRalph Campbell 13873ae95da8SBart Van Assche WARN_ON_ONCE(!dma_len); 1388f5358a17SRoland Dreier 13898f26c9ffSDavid Dillow while (dma_len) { 13905cfb1782SBart Van Assche unsigned offset = dma_addr & ~dev->mr_page_mask; 13915cfb1782SBart Van Assche if (state->npages == dev->max_pages_per_mr || offset != 0) { 1392509c07bcSBart Van Assche ret = srp_finish_mapping(state, ch); 13938f26c9ffSDavid Dillow if (ret) 13948f26c9ffSDavid Dillow return ret; 139585507bccSRalph Campbell } 1396f5358a17SRoland Dreier 13975cfb1782SBart Van Assche len = min_t(unsigned int, dma_len, dev->mr_page_size - offset); 13988f26c9ffSDavid Dillow 13998f26c9ffSDavid Dillow if (!state->npages) 14008f26c9ffSDavid Dillow state->base_dma_addr = dma_addr; 14015cfb1782SBart Van Assche state->pages[state->npages++] = dma_addr & dev->mr_page_mask; 140252ede08fSBart Van Assche state->dma_len += len; 14038f26c9ffSDavid Dillow dma_addr += len; 14048f26c9ffSDavid Dillow dma_len -= len; 1405f5358a17SRoland Dreier } 1406f5358a17SRoland Dreier 14075cfb1782SBart Van Assche /* 14085cfb1782SBart Van Assche * If the last entry of the MR wasn't a full page, then we need to 14098f26c9ffSDavid Dillow * close it out and start a new one -- we can only merge at page 14108f26c9ffSDavid Dillow * boundries. 14118f26c9ffSDavid Dillow */ 1412f5358a17SRoland Dreier ret = 0; 14130e0d3a48SBart Van Assche if (len != dev->mr_page_size) 1414509c07bcSBart Van Assche ret = srp_finish_mapping(state, ch); 1415f5358a17SRoland Dreier return ret; 1416f5358a17SRoland Dreier } 1417f5358a17SRoland Dreier 1418509c07bcSBart Van Assche static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch, 1419509c07bcSBart Van Assche struct srp_request *req, struct scatterlist *scat, 1420509c07bcSBart Van Assche int count) 142176bc1e1dSBart Van Assche { 1422509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 142376bc1e1dSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 142476bc1e1dSBart Van Assche struct scatterlist *sg; 14250e0d3a48SBart Van Assche int i, ret; 142676bc1e1dSBart Van Assche 142776bc1e1dSBart Van Assche state->desc = req->indirect_desc; 142876bc1e1dSBart Van Assche state->pages = req->map_page; 14295cfb1782SBart Van Assche if (dev->use_fast_reg) { 1430f731ed62SBart Van Assche state->fr.next = req->fr_list; 1431f731ed62SBart Van Assche state->fr.end = req->fr_list + target->cmd_sg_cnt; 1432002f1567SBart Van Assche } else if (dev->use_fmr) { 1433f731ed62SBart Van Assche state->fmr.next = req->fmr_list; 1434f731ed62SBart Van Assche state->fmr.end = req->fmr_list + target->cmd_sg_cnt; 14355cfb1782SBart Van Assche } 143676bc1e1dSBart Van Assche 1437002f1567SBart Van Assche if (dev->use_fast_reg || dev->use_fmr) { 143876bc1e1dSBart Van Assche for_each_sg(scat, sg, count, i) { 14393ae95da8SBart Van Assche ret = srp_map_sg_entry(state, ch, sg, i); 14400e0d3a48SBart Van Assche if (ret) 14410e0d3a48SBart Van Assche goto out; 144276bc1e1dSBart Van Assche } 14430e0d3a48SBart Van Assche ret = srp_finish_mapping(state, ch); 14440e0d3a48SBart Van Assche if (ret) 14450e0d3a48SBart Van Assche goto out; 14463ae95da8SBart Van Assche } else { 14473ae95da8SBart Van Assche for_each_sg(scat, sg, count, i) { 14483ae95da8SBart Van Assche srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), 144903f6fb93SBart Van Assche ib_sg_dma_len(dev->dev, sg), 145003f6fb93SBart Van Assche target->global_mr->rkey); 14513ae95da8SBart Van Assche } 14520e0d3a48SBart Van Assche } 145376bc1e1dSBart Van Assche 145452ede08fSBart Van Assche req->nmdesc = state->nmdesc; 14550e0d3a48SBart Van Assche ret = 0; 14565cfb1782SBart Van Assche 14570e0d3a48SBart Van Assche out: 14580e0d3a48SBart Van Assche return ret; 145976bc1e1dSBart Van Assche } 146076bc1e1dSBart Van Assche 1461330179f2SBart Van Assche /* 1462330179f2SBart Van Assche * Register the indirect data buffer descriptor with the HCA. 1463330179f2SBart Van Assche * 1464330179f2SBart Van Assche * Note: since the indirect data buffer descriptor has been allocated with 1465330179f2SBart Van Assche * kmalloc() it is guaranteed that this buffer is a physically contiguous 1466330179f2SBart Van Assche * memory buffer. 1467330179f2SBart Van Assche */ 1468330179f2SBart Van Assche static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, 1469330179f2SBart Van Assche void **next_mr, void **end_mr, u32 idb_len, 1470330179f2SBart Van Assche __be32 *idb_rkey) 1471330179f2SBart Van Assche { 1472330179f2SBart Van Assche struct srp_target_port *target = ch->target; 1473330179f2SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 1474330179f2SBart Van Assche struct srp_map_state state; 1475330179f2SBart Van Assche struct srp_direct_buf idb_desc; 1476330179f2SBart Van Assche u64 idb_pages[1]; 1477330179f2SBart Van Assche int ret; 1478330179f2SBart Van Assche 1479330179f2SBart Van Assche memset(&state, 0, sizeof(state)); 1480330179f2SBart Van Assche memset(&idb_desc, 0, sizeof(idb_desc)); 1481330179f2SBart Van Assche state.gen.next = next_mr; 1482330179f2SBart Van Assche state.gen.end = end_mr; 1483330179f2SBart Van Assche state.desc = &idb_desc; 1484330179f2SBart Van Assche state.pages = idb_pages; 1485330179f2SBart Van Assche state.pages[0] = (req->indirect_dma_addr & 1486330179f2SBart Van Assche dev->mr_page_mask); 1487330179f2SBart Van Assche state.npages = 1; 1488330179f2SBart Van Assche state.base_dma_addr = req->indirect_dma_addr; 1489330179f2SBart Van Assche state.dma_len = idb_len; 1490330179f2SBart Van Assche ret = srp_finish_mapping(&state, ch); 1491330179f2SBart Van Assche if (ret < 0) 1492330179f2SBart Van Assche goto out; 1493330179f2SBart Van Assche 1494330179f2SBart Van Assche *idb_rkey = idb_desc.key; 1495330179f2SBart Van Assche 1496330179f2SBart Van Assche out: 1497330179f2SBart Van Assche return ret; 1498330179f2SBart Van Assche } 1499330179f2SBart Van Assche 1500509c07bcSBart Van Assche static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, 1501aef9ec39SRoland Dreier struct srp_request *req) 1502aef9ec39SRoland Dreier { 1503509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 150476bc1e1dSBart Van Assche struct scatterlist *scat; 1505aef9ec39SRoland Dreier struct srp_cmd *cmd = req->cmd->buf; 1506330179f2SBart Van Assche int len, nents, count, ret; 150785507bccSRalph Campbell struct srp_device *dev; 150885507bccSRalph Campbell struct ib_device *ibdev; 15098f26c9ffSDavid Dillow struct srp_map_state state; 15108f26c9ffSDavid Dillow struct srp_indirect_buf *indirect_hdr; 1511330179f2SBart Van Assche u32 idb_len, table_len; 1512330179f2SBart Van Assche __be32 idb_rkey; 15138f26c9ffSDavid Dillow u8 fmt; 1514aef9ec39SRoland Dreier 1515bb350d1dSFUJITA Tomonori if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 1516aef9ec39SRoland Dreier return sizeof (struct srp_cmd); 1517aef9ec39SRoland Dreier 1518aef9ec39SRoland Dreier if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 1519aef9ec39SRoland Dreier scmnd->sc_data_direction != DMA_TO_DEVICE) { 15207aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 15217aa54bd7SDavid Dillow PFX "Unhandled data direction %d\n", 1522aef9ec39SRoland Dreier scmnd->sc_data_direction); 1523aef9ec39SRoland Dreier return -EINVAL; 1524aef9ec39SRoland Dreier } 1525aef9ec39SRoland Dreier 1526bb350d1dSFUJITA Tomonori nents = scsi_sg_count(scmnd); 1527bb350d1dSFUJITA Tomonori scat = scsi_sglist(scmnd); 1528aef9ec39SRoland Dreier 152905321937SGreg Kroah-Hartman dev = target->srp_host->srp_dev; 153085507bccSRalph Campbell ibdev = dev->dev; 153185507bccSRalph Campbell 153285507bccSRalph Campbell count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 15338f26c9ffSDavid Dillow if (unlikely(count == 0)) 15348f26c9ffSDavid Dillow return -EIO; 1535aef9ec39SRoland Dreier 1536aef9ec39SRoland Dreier fmt = SRP_DATA_DESC_DIRECT; 1537f5358a17SRoland Dreier len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 1538f5358a17SRoland Dreier 153903f6fb93SBart Van Assche if (count == 1 && target->global_mr) { 1540f5358a17SRoland Dreier /* 1541f5358a17SRoland Dreier * The midlayer only generated a single gather/scatter 1542f5358a17SRoland Dreier * entry, or DMA mapping coalesced everything to a 1543f5358a17SRoland Dreier * single entry. So a direct descriptor along with 1544f5358a17SRoland Dreier * the DMA MR suffices. 1545f5358a17SRoland Dreier */ 1546f5358a17SRoland Dreier struct srp_direct_buf *buf = (void *) cmd->add_data; 1547aef9ec39SRoland Dreier 154885507bccSRalph Campbell buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 154903f6fb93SBart Van Assche buf->key = cpu_to_be32(target->global_mr->rkey); 155085507bccSRalph Campbell buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 15518f26c9ffSDavid Dillow 155252ede08fSBart Van Assche req->nmdesc = 0; 15538f26c9ffSDavid Dillow goto map_complete; 15548f26c9ffSDavid Dillow } 15558f26c9ffSDavid Dillow 15565cfb1782SBart Van Assche /* 15575cfb1782SBart Van Assche * We have more than one scatter/gather entry, so build our indirect 15585cfb1782SBart Van Assche * descriptor table, trying to merge as many entries as we can. 1559f5358a17SRoland Dreier */ 15608f26c9ffSDavid Dillow indirect_hdr = (void *) cmd->add_data; 15618f26c9ffSDavid Dillow 1562c07d424dSDavid Dillow ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, 1563c07d424dSDavid Dillow target->indirect_size, DMA_TO_DEVICE); 1564c07d424dSDavid Dillow 15658f26c9ffSDavid Dillow memset(&state, 0, sizeof(state)); 1566509c07bcSBart Van Assche srp_map_sg(&state, ch, req, scat, count); 15678f26c9ffSDavid Dillow 1568c07d424dSDavid Dillow /* We've mapped the request, now pull as much of the indirect 1569c07d424dSDavid Dillow * descriptor table as we can into the command buffer. If this 1570c07d424dSDavid Dillow * target is not using an external indirect table, we are 1571c07d424dSDavid Dillow * guaranteed to fit into the command, as the SCSI layer won't 1572c07d424dSDavid Dillow * give us more S/G entries than we allow. 15738f26c9ffSDavid Dillow */ 15748f26c9ffSDavid Dillow if (state.ndesc == 1) { 15755cfb1782SBart Van Assche /* 15765cfb1782SBart Van Assche * Memory registration collapsed the sg-list into one entry, 15778f26c9ffSDavid Dillow * so use a direct descriptor. 15788f26c9ffSDavid Dillow */ 15798f26c9ffSDavid Dillow struct srp_direct_buf *buf = (void *) cmd->add_data; 15808f26c9ffSDavid Dillow 1581c07d424dSDavid Dillow *buf = req->indirect_desc[0]; 15828f26c9ffSDavid Dillow goto map_complete; 15838f26c9ffSDavid Dillow } 15848f26c9ffSDavid Dillow 1585c07d424dSDavid Dillow if (unlikely(target->cmd_sg_cnt < state.ndesc && 1586c07d424dSDavid Dillow !target->allow_ext_sg)) { 1587c07d424dSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 1588c07d424dSDavid Dillow "Could not fit S/G list into SRP_CMD\n"); 1589c07d424dSDavid Dillow return -EIO; 1590c07d424dSDavid Dillow } 1591c07d424dSDavid Dillow 1592c07d424dSDavid Dillow count = min(state.ndesc, target->cmd_sg_cnt); 15938f26c9ffSDavid Dillow table_len = state.ndesc * sizeof (struct srp_direct_buf); 1594330179f2SBart Van Assche idb_len = sizeof(struct srp_indirect_buf) + table_len; 1595aef9ec39SRoland Dreier 1596aef9ec39SRoland Dreier fmt = SRP_DATA_DESC_INDIRECT; 15978f26c9ffSDavid Dillow len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); 1598c07d424dSDavid Dillow len += count * sizeof (struct srp_direct_buf); 1599f5358a17SRoland Dreier 1600c07d424dSDavid Dillow memcpy(indirect_hdr->desc_list, req->indirect_desc, 1601c07d424dSDavid Dillow count * sizeof (struct srp_direct_buf)); 160285507bccSRalph Campbell 160303f6fb93SBart Van Assche if (!target->global_mr) { 1604330179f2SBart Van Assche ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, 1605330179f2SBart Van Assche idb_len, &idb_rkey); 1606330179f2SBart Van Assche if (ret < 0) 1607330179f2SBart Van Assche return ret; 1608330179f2SBart Van Assche req->nmdesc++; 1609330179f2SBart Van Assche } else { 161003f6fb93SBart Van Assche idb_rkey = target->global_mr->rkey; 1611330179f2SBart Van Assche } 1612330179f2SBart Van Assche 1613c07d424dSDavid Dillow indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 1614330179f2SBart Van Assche indirect_hdr->table_desc.key = idb_rkey; 16158f26c9ffSDavid Dillow indirect_hdr->table_desc.len = cpu_to_be32(table_len); 16168f26c9ffSDavid Dillow indirect_hdr->len = cpu_to_be32(state.total_len); 1617aef9ec39SRoland Dreier 1618aef9ec39SRoland Dreier if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1619c07d424dSDavid Dillow cmd->data_out_desc_cnt = count; 1620aef9ec39SRoland Dreier else 1621c07d424dSDavid Dillow cmd->data_in_desc_cnt = count; 1622c07d424dSDavid Dillow 1623c07d424dSDavid Dillow ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, 1624c07d424dSDavid Dillow DMA_TO_DEVICE); 1625aef9ec39SRoland Dreier 16268f26c9ffSDavid Dillow map_complete: 1627aef9ec39SRoland Dreier if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1628aef9ec39SRoland Dreier cmd->buf_fmt = fmt << 4; 1629aef9ec39SRoland Dreier else 1630aef9ec39SRoland Dreier cmd->buf_fmt = fmt; 1631aef9ec39SRoland Dreier 1632aef9ec39SRoland Dreier return len; 1633aef9ec39SRoland Dreier } 1634aef9ec39SRoland Dreier 163505a1d750SDavid Dillow /* 163676c75b25SBart Van Assche * Return an IU and possible credit to the free pool 163776c75b25SBart Van Assche */ 1638509c07bcSBart Van Assche static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, 163976c75b25SBart Van Assche enum srp_iu_type iu_type) 164076c75b25SBart Van Assche { 164176c75b25SBart Van Assche unsigned long flags; 164276c75b25SBart Van Assche 1643509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1644509c07bcSBart Van Assche list_add(&iu->list, &ch->free_tx); 164576c75b25SBart Van Assche if (iu_type != SRP_IU_RSP) 1646509c07bcSBart Van Assche ++ch->req_lim; 1647509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 164876c75b25SBart Van Assche } 164976c75b25SBart Van Assche 165076c75b25SBart Van Assche /* 1651509c07bcSBart Van Assche * Must be called with ch->lock held to protect req_lim and free_tx. 1652e9684678SBart Van Assche * If IU is not sent, it must be returned using srp_put_tx_iu(). 165305a1d750SDavid Dillow * 165405a1d750SDavid Dillow * Note: 165505a1d750SDavid Dillow * An upper limit for the number of allocated information units for each 165605a1d750SDavid Dillow * request type is: 165705a1d750SDavid Dillow * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues 165805a1d750SDavid Dillow * more than Scsi_Host.can_queue requests. 165905a1d750SDavid Dillow * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. 166005a1d750SDavid Dillow * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 166105a1d750SDavid Dillow * one unanswered SRP request to an initiator. 166205a1d750SDavid Dillow */ 1663509c07bcSBart Van Assche static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, 166405a1d750SDavid Dillow enum srp_iu_type iu_type) 166505a1d750SDavid Dillow { 1666509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 166705a1d750SDavid Dillow s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 166805a1d750SDavid Dillow struct srp_iu *iu; 166905a1d750SDavid Dillow 1670509c07bcSBart Van Assche srp_send_completion(ch->send_cq, ch); 167105a1d750SDavid Dillow 1672509c07bcSBart Van Assche if (list_empty(&ch->free_tx)) 167305a1d750SDavid Dillow return NULL; 167405a1d750SDavid Dillow 167505a1d750SDavid Dillow /* Initiator responses to target requests do not consume credits */ 167676c75b25SBart Van Assche if (iu_type != SRP_IU_RSP) { 1677509c07bcSBart Van Assche if (ch->req_lim <= rsv) { 167805a1d750SDavid Dillow ++target->zero_req_lim; 167905a1d750SDavid Dillow return NULL; 168005a1d750SDavid Dillow } 168105a1d750SDavid Dillow 1682509c07bcSBart Van Assche --ch->req_lim; 168376c75b25SBart Van Assche } 168476c75b25SBart Van Assche 1685509c07bcSBart Van Assche iu = list_first_entry(&ch->free_tx, struct srp_iu, list); 168676c75b25SBart Van Assche list_del(&iu->list); 168705a1d750SDavid Dillow return iu; 168805a1d750SDavid Dillow } 168905a1d750SDavid Dillow 1690509c07bcSBart Van Assche static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) 169105a1d750SDavid Dillow { 1692509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 169305a1d750SDavid Dillow struct ib_sge list; 169405a1d750SDavid Dillow struct ib_send_wr wr, *bad_wr; 169505a1d750SDavid Dillow 169605a1d750SDavid Dillow list.addr = iu->dma; 169705a1d750SDavid Dillow list.length = len; 16989af76271SDavid Dillow list.lkey = target->lkey; 169905a1d750SDavid Dillow 170005a1d750SDavid Dillow wr.next = NULL; 1701dcb4cb85SBart Van Assche wr.wr_id = (uintptr_t) iu; 170205a1d750SDavid Dillow wr.sg_list = &list; 170305a1d750SDavid Dillow wr.num_sge = 1; 170405a1d750SDavid Dillow wr.opcode = IB_WR_SEND; 170505a1d750SDavid Dillow wr.send_flags = IB_SEND_SIGNALED; 170605a1d750SDavid Dillow 1707509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 170805a1d750SDavid Dillow } 170905a1d750SDavid Dillow 1710509c07bcSBart Van Assche static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) 1711c996bb47SBart Van Assche { 1712509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1713c996bb47SBart Van Assche struct ib_recv_wr wr, *bad_wr; 1714dcb4cb85SBart Van Assche struct ib_sge list; 1715c996bb47SBart Van Assche 1716c996bb47SBart Van Assche list.addr = iu->dma; 1717c996bb47SBart Van Assche list.length = iu->size; 17189af76271SDavid Dillow list.lkey = target->lkey; 1719c996bb47SBart Van Assche 1720c996bb47SBart Van Assche wr.next = NULL; 1721dcb4cb85SBart Van Assche wr.wr_id = (uintptr_t) iu; 1722c996bb47SBart Van Assche wr.sg_list = &list; 1723c996bb47SBart Van Assche wr.num_sge = 1; 1724c996bb47SBart Van Assche 1725509c07bcSBart Van Assche return ib_post_recv(ch->qp, &wr, &bad_wr); 1726c996bb47SBart Van Assche } 1727c996bb47SBart Van Assche 1728509c07bcSBart Van Assche static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) 1729aef9ec39SRoland Dreier { 1730509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1731aef9ec39SRoland Dreier struct srp_request *req; 1732aef9ec39SRoland Dreier struct scsi_cmnd *scmnd; 1733aef9ec39SRoland Dreier unsigned long flags; 1734aef9ec39SRoland Dreier 1735aef9ec39SRoland Dreier if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 1736509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1737509c07bcSBart Van Assche ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1738509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 173994a9174cSBart Van Assche 1740509c07bcSBart Van Assche ch->tsk_mgmt_status = -1; 1741f8b6e31eSDavid Dillow if (be32_to_cpu(rsp->resp_data_len) >= 4) 1742509c07bcSBart Van Assche ch->tsk_mgmt_status = rsp->data[3]; 1743509c07bcSBart Van Assche complete(&ch->tsk_mgmt_done); 1744aef9ec39SRoland Dreier } else { 174577f2c1a4SBart Van Assche scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); 174677f2c1a4SBart Van Assche if (scmnd) { 174777f2c1a4SBart Van Assche req = (void *)scmnd->host_scribble; 174877f2c1a4SBart Van Assche scmnd = srp_claim_req(ch, req, NULL, scmnd); 174977f2c1a4SBart Van Assche } 175022032991SBart Van Assche if (!scmnd) { 17517aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 1752d92c0da7SBart Van Assche "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", 1753d92c0da7SBart Van Assche rsp->tag, ch - target->ch, ch->qp->qp_num); 175422032991SBart Van Assche 1755509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1756509c07bcSBart Van Assche ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1757509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 175822032991SBart Van Assche 175922032991SBart Van Assche return; 176022032991SBart Van Assche } 1761aef9ec39SRoland Dreier scmnd->result = rsp->status; 1762aef9ec39SRoland Dreier 1763aef9ec39SRoland Dreier if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 1764aef9ec39SRoland Dreier memcpy(scmnd->sense_buffer, rsp->data + 1765aef9ec39SRoland Dreier be32_to_cpu(rsp->resp_data_len), 1766aef9ec39SRoland Dreier min_t(int, be32_to_cpu(rsp->sense_data_len), 1767aef9ec39SRoland Dreier SCSI_SENSE_BUFFERSIZE)); 1768aef9ec39SRoland Dreier } 1769aef9ec39SRoland Dreier 1770e714531aSBart Van Assche if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) 1771bb350d1dSFUJITA Tomonori scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 1772e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER)) 1773e714531aSBart Van Assche scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt)); 1774e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) 1775e714531aSBart Van Assche scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 1776e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) 1777e714531aSBart Van Assche scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); 1778aef9ec39SRoland Dreier 1779509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 178022032991SBart Van Assche be32_to_cpu(rsp->req_lim_delta)); 178122032991SBart Van Assche 1782f8b6e31eSDavid Dillow scmnd->host_scribble = NULL; 1783aef9ec39SRoland Dreier scmnd->scsi_done(scmnd); 1784aef9ec39SRoland Dreier } 1785aef9ec39SRoland Dreier } 1786aef9ec39SRoland Dreier 1787509c07bcSBart Van Assche static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, 1788bb12588aSDavid Dillow void *rsp, int len) 1789bb12588aSDavid Dillow { 1790509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 179176c75b25SBart Van Assche struct ib_device *dev = target->srp_host->srp_dev->dev; 1792bb12588aSDavid Dillow unsigned long flags; 1793bb12588aSDavid Dillow struct srp_iu *iu; 179476c75b25SBart Van Assche int err; 1795bb12588aSDavid Dillow 1796509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1797509c07bcSBart Van Assche ch->req_lim += req_delta; 1798509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_RSP); 1799509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 180076c75b25SBart Van Assche 1801bb12588aSDavid Dillow if (!iu) { 1802bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1803bb12588aSDavid Dillow "no IU available to send response\n"); 180476c75b25SBart Van Assche return 1; 1805bb12588aSDavid Dillow } 1806bb12588aSDavid Dillow 1807bb12588aSDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 1808bb12588aSDavid Dillow memcpy(iu->buf, rsp, len); 1809bb12588aSDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 1810bb12588aSDavid Dillow 1811509c07bcSBart Van Assche err = srp_post_send(ch, iu, len); 181276c75b25SBart Van Assche if (err) { 1813bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1814bb12588aSDavid Dillow "unable to post response: %d\n", err); 1815509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_RSP); 181676c75b25SBart Van Assche } 1817bb12588aSDavid Dillow 1818bb12588aSDavid Dillow return err; 1819bb12588aSDavid Dillow } 1820bb12588aSDavid Dillow 1821509c07bcSBart Van Assche static void srp_process_cred_req(struct srp_rdma_ch *ch, 1822bb12588aSDavid Dillow struct srp_cred_req *req) 1823bb12588aSDavid Dillow { 1824bb12588aSDavid Dillow struct srp_cred_rsp rsp = { 1825bb12588aSDavid Dillow .opcode = SRP_CRED_RSP, 1826bb12588aSDavid Dillow .tag = req->tag, 1827bb12588aSDavid Dillow }; 1828bb12588aSDavid Dillow s32 delta = be32_to_cpu(req->req_lim_delta); 1829bb12588aSDavid Dillow 1830509c07bcSBart Van Assche if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 1831509c07bcSBart Van Assche shost_printk(KERN_ERR, ch->target->scsi_host, PFX 1832bb12588aSDavid Dillow "problems processing SRP_CRED_REQ\n"); 1833bb12588aSDavid Dillow } 1834bb12588aSDavid Dillow 1835509c07bcSBart Van Assche static void srp_process_aer_req(struct srp_rdma_ch *ch, 1836bb12588aSDavid Dillow struct srp_aer_req *req) 1837bb12588aSDavid Dillow { 1838509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1839bb12588aSDavid Dillow struct srp_aer_rsp rsp = { 1840bb12588aSDavid Dillow .opcode = SRP_AER_RSP, 1841bb12588aSDavid Dillow .tag = req->tag, 1842bb12588aSDavid Dillow }; 1843bb12588aSDavid Dillow s32 delta = be32_to_cpu(req->req_lim_delta); 1844bb12588aSDavid Dillow 1845bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1846985aa495SBart Van Assche "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); 1847bb12588aSDavid Dillow 1848509c07bcSBart Van Assche if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 1849bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1850bb12588aSDavid Dillow "problems processing SRP_AER_REQ\n"); 1851bb12588aSDavid Dillow } 1852bb12588aSDavid Dillow 1853509c07bcSBart Van Assche static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc) 1854aef9ec39SRoland Dreier { 1855509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1856dcb4cb85SBart Van Assche struct ib_device *dev = target->srp_host->srp_dev->dev; 1857737b94ebSRoland Dreier struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; 1858c996bb47SBart Van Assche int res; 1859aef9ec39SRoland Dreier u8 opcode; 1860aef9ec39SRoland Dreier 1861509c07bcSBart Van Assche ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, 186285507bccSRalph Campbell DMA_FROM_DEVICE); 1863aef9ec39SRoland Dreier 1864aef9ec39SRoland Dreier opcode = *(u8 *) iu->buf; 1865aef9ec39SRoland Dreier 1866aef9ec39SRoland Dreier if (0) { 18677aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 18687aa54bd7SDavid Dillow PFX "recv completion, opcode 0x%02x\n", opcode); 18697a700811SBart Van Assche print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, 18707a700811SBart Van Assche iu->buf, wc->byte_len, true); 1871aef9ec39SRoland Dreier } 1872aef9ec39SRoland Dreier 1873aef9ec39SRoland Dreier switch (opcode) { 1874aef9ec39SRoland Dreier case SRP_RSP: 1875509c07bcSBart Van Assche srp_process_rsp(ch, iu->buf); 1876aef9ec39SRoland Dreier break; 1877aef9ec39SRoland Dreier 1878bb12588aSDavid Dillow case SRP_CRED_REQ: 1879509c07bcSBart Van Assche srp_process_cred_req(ch, iu->buf); 1880bb12588aSDavid Dillow break; 1881bb12588aSDavid Dillow 1882bb12588aSDavid Dillow case SRP_AER_REQ: 1883509c07bcSBart Van Assche srp_process_aer_req(ch, iu->buf); 1884bb12588aSDavid Dillow break; 1885bb12588aSDavid Dillow 1886aef9ec39SRoland Dreier case SRP_T_LOGOUT: 1887aef9ec39SRoland Dreier /* XXX Handle target logout */ 18887aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 18897aa54bd7SDavid Dillow PFX "Got target logout request\n"); 1890aef9ec39SRoland Dreier break; 1891aef9ec39SRoland Dreier 1892aef9ec39SRoland Dreier default: 18937aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 18947aa54bd7SDavid Dillow PFX "Unhandled SRP opcode 0x%02x\n", opcode); 1895aef9ec39SRoland Dreier break; 1896aef9ec39SRoland Dreier } 1897aef9ec39SRoland Dreier 1898509c07bcSBart Van Assche ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, 189985507bccSRalph Campbell DMA_FROM_DEVICE); 1900c996bb47SBart Van Assche 1901509c07bcSBart Van Assche res = srp_post_recv(ch, iu); 1902c996bb47SBart Van Assche if (res != 0) 1903c996bb47SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 1904c996bb47SBart Van Assche PFX "Recv failed with error code %d\n", res); 1905aef9ec39SRoland Dreier } 1906aef9ec39SRoland Dreier 1907c1120f89SBart Van Assche /** 1908c1120f89SBart Van Assche * srp_tl_err_work() - handle a transport layer error 1909af24663bSBart Van Assche * @work: Work structure embedded in an SRP target port. 1910c1120f89SBart Van Assche * 1911c1120f89SBart Van Assche * Note: This function may get invoked before the rport has been created, 1912c1120f89SBart Van Assche * hence the target->rport test. 1913c1120f89SBart Van Assche */ 1914c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work) 1915c1120f89SBart Van Assche { 1916c1120f89SBart Van Assche struct srp_target_port *target; 1917c1120f89SBart Van Assche 1918c1120f89SBart Van Assche target = container_of(work, struct srp_target_port, tl_err_work); 1919c1120f89SBart Van Assche if (target->rport) 1920c1120f89SBart Van Assche srp_start_tl_fail_timers(target->rport); 1921c1120f89SBart Van Assche } 1922c1120f89SBart Van Assche 19235cfb1782SBart Van Assche static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, 19247dad6b2eSBart Van Assche bool send_err, struct srp_rdma_ch *ch) 1925948d1e88SBart Van Assche { 19267dad6b2eSBart Van Assche struct srp_target_port *target = ch->target; 19277dad6b2eSBart Van Assche 19287dad6b2eSBart Van Assche if (wr_id == SRP_LAST_WR_ID) { 19297dad6b2eSBart Van Assche complete(&ch->done); 19307dad6b2eSBart Van Assche return; 19317dad6b2eSBart Van Assche } 19327dad6b2eSBart Van Assche 1933c014c8cdSBart Van Assche if (ch->connected && !target->qp_in_error) { 19345cfb1782SBart Van Assche if (wr_id & LOCAL_INV_WR_ID_MASK) { 19355cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 193657363d98SSagi Grimberg "LOCAL_INV failed with status %s (%d)\n", 193757363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status); 19385cfb1782SBart Van Assche } else if (wr_id & FAST_REG_WR_ID_MASK) { 19395cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 194057363d98SSagi Grimberg "FAST_REG_MR failed status %s (%d)\n", 194157363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status); 19425cfb1782SBart Van Assche } else { 19435cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 194457363d98SSagi Grimberg PFX "failed %s status %s (%d) for iu %p\n", 19455cfb1782SBart Van Assche send_err ? "send" : "receive", 194657363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status, 194757363d98SSagi Grimberg (void *)(uintptr_t)wr_id); 19485cfb1782SBart Van Assche } 1949c1120f89SBart Van Assche queue_work(system_long_wq, &target->tl_err_work); 19504f0af697SBart Van Assche } 1951948d1e88SBart Van Assche target->qp_in_error = true; 1952948d1e88SBart Van Assche } 1953948d1e88SBart Van Assche 1954509c07bcSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr) 1955aef9ec39SRoland Dreier { 1956509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 1957aef9ec39SRoland Dreier struct ib_wc wc; 1958aef9ec39SRoland Dreier 1959aef9ec39SRoland Dreier ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 1960aef9ec39SRoland Dreier while (ib_poll_cq(cq, 1, &wc) > 0) { 1961948d1e88SBart Van Assche if (likely(wc.status == IB_WC_SUCCESS)) { 1962509c07bcSBart Van Assche srp_handle_recv(ch, &wc); 1963948d1e88SBart Van Assche } else { 19647dad6b2eSBart Van Assche srp_handle_qp_err(wc.wr_id, wc.status, false, ch); 1965aef9ec39SRoland Dreier } 19669c03dc9fSBart Van Assche } 19679c03dc9fSBart Van Assche } 19689c03dc9fSBart Van Assche 1969509c07bcSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *ch_ptr) 19709c03dc9fSBart Van Assche { 1971509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 19729c03dc9fSBart Van Assche struct ib_wc wc; 1973dcb4cb85SBart Van Assche struct srp_iu *iu; 19749c03dc9fSBart Van Assche 19759c03dc9fSBart Van Assche while (ib_poll_cq(cq, 1, &wc) > 0) { 1976948d1e88SBart Van Assche if (likely(wc.status == IB_WC_SUCCESS)) { 1977737b94ebSRoland Dreier iu = (struct srp_iu *) (uintptr_t) wc.wr_id; 1978509c07bcSBart Van Assche list_add(&iu->list, &ch->free_tx); 1979948d1e88SBart Van Assche } else { 19807dad6b2eSBart Van Assche srp_handle_qp_err(wc.wr_id, wc.status, true, ch); 1981948d1e88SBart Van Assche } 1982aef9ec39SRoland Dreier } 1983aef9ec39SRoland Dreier } 1984aef9ec39SRoland Dreier 198576c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) 1986aef9ec39SRoland Dreier { 198776c75b25SBart Van Assche struct srp_target_port *target = host_to_target(shost); 1988a95cadb9SBart Van Assche struct srp_rport *rport = target->rport; 1989509c07bcSBart Van Assche struct srp_rdma_ch *ch; 1990aef9ec39SRoland Dreier struct srp_request *req; 1991aef9ec39SRoland Dreier struct srp_iu *iu; 1992aef9ec39SRoland Dreier struct srp_cmd *cmd; 199385507bccSRalph Campbell struct ib_device *dev; 199476c75b25SBart Van Assche unsigned long flags; 199577f2c1a4SBart Van Assche u32 tag; 199677f2c1a4SBart Van Assche u16 idx; 1997d1b4289eSBart Van Assche int len, ret; 1998a95cadb9SBart Van Assche const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; 1999a95cadb9SBart Van Assche 2000a95cadb9SBart Van Assche /* 2001a95cadb9SBart Van Assche * The SCSI EH thread is the only context from which srp_queuecommand() 2002a95cadb9SBart Van Assche * can get invoked for blocked devices (SDEV_BLOCK / 2003a95cadb9SBart Van Assche * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by 2004a95cadb9SBart Van Assche * locking the rport mutex if invoked from inside the SCSI EH. 2005a95cadb9SBart Van Assche */ 2006a95cadb9SBart Van Assche if (in_scsi_eh) 2007a95cadb9SBart Van Assche mutex_lock(&rport->mutex); 2008aef9ec39SRoland Dreier 2009d1b4289eSBart Van Assche scmnd->result = srp_chkready(target->rport); 2010d1b4289eSBart Van Assche if (unlikely(scmnd->result)) 2011d1b4289eSBart Van Assche goto err; 20122ce19e72SBart Van Assche 201377f2c1a4SBart Van Assche WARN_ON_ONCE(scmnd->request->tag < 0); 201477f2c1a4SBart Van Assche tag = blk_mq_unique_tag(scmnd->request); 2015d92c0da7SBart Van Assche ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; 201677f2c1a4SBart Van Assche idx = blk_mq_unique_tag_to_tag(tag); 201777f2c1a4SBart Van Assche WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", 201877f2c1a4SBart Van Assche dev_name(&shost->shost_gendev), tag, idx, 201977f2c1a4SBart Van Assche target->req_ring_size); 2020509c07bcSBart Van Assche 2021509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 2022509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_CMD); 2023509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 2024aef9ec39SRoland Dreier 202577f2c1a4SBart Van Assche if (!iu) 202677f2c1a4SBart Van Assche goto err; 202777f2c1a4SBart Van Assche 202877f2c1a4SBart Van Assche req = &ch->req_ring[idx]; 202905321937SGreg Kroah-Hartman dev = target->srp_host->srp_dev->dev; 203049248644SDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, 203185507bccSRalph Campbell DMA_TO_DEVICE); 2032aef9ec39SRoland Dreier 2033f8b6e31eSDavid Dillow scmnd->host_scribble = (void *) req; 2034aef9ec39SRoland Dreier 2035aef9ec39SRoland Dreier cmd = iu->buf; 2036aef9ec39SRoland Dreier memset(cmd, 0, sizeof *cmd); 2037aef9ec39SRoland Dreier 2038aef9ec39SRoland Dreier cmd->opcode = SRP_CMD; 2039985aa495SBart Van Assche int_to_scsilun(scmnd->device->lun, &cmd->lun); 204077f2c1a4SBart Van Assche cmd->tag = tag; 2041aef9ec39SRoland Dreier memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 2042aef9ec39SRoland Dreier 2043aef9ec39SRoland Dreier req->scmnd = scmnd; 2044aef9ec39SRoland Dreier req->cmd = iu; 2045aef9ec39SRoland Dreier 2046509c07bcSBart Van Assche len = srp_map_data(scmnd, ch, req); 2047aef9ec39SRoland Dreier if (len < 0) { 20487aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 2049d1b4289eSBart Van Assche PFX "Failed to map data (%d)\n", len); 2050d1b4289eSBart Van Assche /* 2051d1b4289eSBart Van Assche * If we ran out of memory descriptors (-ENOMEM) because an 2052d1b4289eSBart Van Assche * application is queuing many requests with more than 205352ede08fSBart Van Assche * max_pages_per_mr sg-list elements, tell the SCSI mid-layer 2054d1b4289eSBart Van Assche * to reduce queue depth temporarily. 2055d1b4289eSBart Van Assche */ 2056d1b4289eSBart Van Assche scmnd->result = len == -ENOMEM ? 2057d1b4289eSBart Van Assche DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16; 205876c75b25SBart Van Assche goto err_iu; 2059aef9ec39SRoland Dreier } 2060aef9ec39SRoland Dreier 206149248644SDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, 206285507bccSRalph Campbell DMA_TO_DEVICE); 2063aef9ec39SRoland Dreier 2064509c07bcSBart Van Assche if (srp_post_send(ch, iu, len)) { 20657aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 2066aef9ec39SRoland Dreier goto err_unmap; 2067aef9ec39SRoland Dreier } 2068aef9ec39SRoland Dreier 2069d1b4289eSBart Van Assche ret = 0; 2070d1b4289eSBart Van Assche 2071a95cadb9SBart Van Assche unlock_rport: 2072a95cadb9SBart Van Assche if (in_scsi_eh) 2073a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2074a95cadb9SBart Van Assche 2075d1b4289eSBart Van Assche return ret; 2076aef9ec39SRoland Dreier 2077aef9ec39SRoland Dreier err_unmap: 2078509c07bcSBart Van Assche srp_unmap_data(scmnd, ch, req); 2079aef9ec39SRoland Dreier 208076c75b25SBart Van Assche err_iu: 2081509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_CMD); 208276c75b25SBart Van Assche 2083024ca901SBart Van Assche /* 2084024ca901SBart Van Assche * Avoid that the loops that iterate over the request ring can 2085024ca901SBart Van Assche * encounter a dangling SCSI command pointer. 2086024ca901SBart Van Assche */ 2087024ca901SBart Van Assche req->scmnd = NULL; 2088024ca901SBart Van Assche 2089d1b4289eSBart Van Assche err: 2090d1b4289eSBart Van Assche if (scmnd->result) { 2091d1b4289eSBart Van Assche scmnd->scsi_done(scmnd); 2092d1b4289eSBart Van Assche ret = 0; 2093d1b4289eSBart Van Assche } else { 2094d1b4289eSBart Van Assche ret = SCSI_MLQUEUE_HOST_BUSY; 2095d1b4289eSBart Van Assche } 2096a95cadb9SBart Van Assche 2097d1b4289eSBart Van Assche goto unlock_rport; 2098aef9ec39SRoland Dreier } 2099aef9ec39SRoland Dreier 21004d73f95fSBart Van Assche /* 21014d73f95fSBart Van Assche * Note: the resources allocated in this function are freed in 2102509c07bcSBart Van Assche * srp_free_ch_ib(). 21034d73f95fSBart Van Assche */ 2104509c07bcSBart Van Assche static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) 2105aef9ec39SRoland Dreier { 2106509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2107aef9ec39SRoland Dreier int i; 2108aef9ec39SRoland Dreier 2109509c07bcSBart Van Assche ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), 21104d73f95fSBart Van Assche GFP_KERNEL); 2111509c07bcSBart Van Assche if (!ch->rx_ring) 21124d73f95fSBart Van Assche goto err_no_ring; 2113509c07bcSBart Van Assche ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), 21144d73f95fSBart Van Assche GFP_KERNEL); 2115509c07bcSBart Van Assche if (!ch->tx_ring) 21164d73f95fSBart Van Assche goto err_no_ring; 21174d73f95fSBart Van Assche 21184d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2119509c07bcSBart Van Assche ch->rx_ring[i] = srp_alloc_iu(target->srp_host, 2120509c07bcSBart Van Assche ch->max_ti_iu_len, 2121aef9ec39SRoland Dreier GFP_KERNEL, DMA_FROM_DEVICE); 2122509c07bcSBart Van Assche if (!ch->rx_ring[i]) 2123aef9ec39SRoland Dreier goto err; 2124aef9ec39SRoland Dreier } 2125aef9ec39SRoland Dreier 21264d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2127509c07bcSBart Van Assche ch->tx_ring[i] = srp_alloc_iu(target->srp_host, 212849248644SDavid Dillow target->max_iu_len, 2129aef9ec39SRoland Dreier GFP_KERNEL, DMA_TO_DEVICE); 2130509c07bcSBart Van Assche if (!ch->tx_ring[i]) 2131aef9ec39SRoland Dreier goto err; 2132dcb4cb85SBart Van Assche 2133509c07bcSBart Van Assche list_add(&ch->tx_ring[i]->list, &ch->free_tx); 2134aef9ec39SRoland Dreier } 2135aef9ec39SRoland Dreier 2136aef9ec39SRoland Dreier return 0; 2137aef9ec39SRoland Dreier 2138aef9ec39SRoland Dreier err: 21394d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2140509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->rx_ring[i]); 2141509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->tx_ring[i]); 2142aef9ec39SRoland Dreier } 2143aef9ec39SRoland Dreier 21444d73f95fSBart Van Assche 21454d73f95fSBart Van Assche err_no_ring: 2146509c07bcSBart Van Assche kfree(ch->tx_ring); 2147509c07bcSBart Van Assche ch->tx_ring = NULL; 2148509c07bcSBart Van Assche kfree(ch->rx_ring); 2149509c07bcSBart Van Assche ch->rx_ring = NULL; 2150aef9ec39SRoland Dreier 2151aef9ec39SRoland Dreier return -ENOMEM; 2152aef9ec39SRoland Dreier } 2153aef9ec39SRoland Dreier 2154c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) 2155c9b03c1aSBart Van Assche { 2156c9b03c1aSBart Van Assche uint64_t T_tr_ns, max_compl_time_ms; 2157c9b03c1aSBart Van Assche uint32_t rq_tmo_jiffies; 2158c9b03c1aSBart Van Assche 2159c9b03c1aSBart Van Assche /* 2160c9b03c1aSBart Van Assche * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, 2161c9b03c1aSBart Van Assche * table 91), both the QP timeout and the retry count have to be set 2162c9b03c1aSBart Van Assche * for RC QP's during the RTR to RTS transition. 2163c9b03c1aSBart Van Assche */ 2164c9b03c1aSBart Van Assche WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) != 2165c9b03c1aSBart Van Assche (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)); 2166c9b03c1aSBart Van Assche 2167c9b03c1aSBart Van Assche /* 2168c9b03c1aSBart Van Assche * Set target->rq_tmo_jiffies to one second more than the largest time 2169c9b03c1aSBart Van Assche * it can take before an error completion is generated. See also 2170c9b03c1aSBart Van Assche * C9-140..142 in the IBTA spec for more information about how to 2171c9b03c1aSBart Van Assche * convert the QP Local ACK Timeout value to nanoseconds. 2172c9b03c1aSBart Van Assche */ 2173c9b03c1aSBart Van Assche T_tr_ns = 4096 * (1ULL << qp_attr->timeout); 2174c9b03c1aSBart Van Assche max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; 2175c9b03c1aSBart Van Assche do_div(max_compl_time_ms, NSEC_PER_MSEC); 2176c9b03c1aSBart Van Assche rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000); 2177c9b03c1aSBart Van Assche 2178c9b03c1aSBart Van Assche return rq_tmo_jiffies; 2179c9b03c1aSBart Van Assche } 2180c9b03c1aSBart Van Assche 2181961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 2182e6300cbdSBart Van Assche const struct srp_login_rsp *lrsp, 2183509c07bcSBart Van Assche struct srp_rdma_ch *ch) 2184961e0be8SDavid Dillow { 2185509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2186961e0be8SDavid Dillow struct ib_qp_attr *qp_attr = NULL; 2187961e0be8SDavid Dillow int attr_mask = 0; 2188961e0be8SDavid Dillow int ret; 2189961e0be8SDavid Dillow int i; 2190961e0be8SDavid Dillow 2191961e0be8SDavid Dillow if (lrsp->opcode == SRP_LOGIN_RSP) { 2192509c07bcSBart Van Assche ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); 2193509c07bcSBart Van Assche ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); 2194961e0be8SDavid Dillow 2195961e0be8SDavid Dillow /* 2196961e0be8SDavid Dillow * Reserve credits for task management so we don't 2197961e0be8SDavid Dillow * bounce requests back to the SCSI mid-layer. 2198961e0be8SDavid Dillow */ 2199961e0be8SDavid Dillow target->scsi_host->can_queue 2200509c07bcSBart Van Assche = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, 2201961e0be8SDavid Dillow target->scsi_host->can_queue); 22024d73f95fSBart Van Assche target->scsi_host->cmd_per_lun 22034d73f95fSBart Van Assche = min_t(int, target->scsi_host->can_queue, 22044d73f95fSBart Van Assche target->scsi_host->cmd_per_lun); 2205961e0be8SDavid Dillow } else { 2206961e0be8SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 2207961e0be8SDavid Dillow PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); 2208961e0be8SDavid Dillow ret = -ECONNRESET; 2209961e0be8SDavid Dillow goto error; 2210961e0be8SDavid Dillow } 2211961e0be8SDavid Dillow 2212509c07bcSBart Van Assche if (!ch->rx_ring) { 2213509c07bcSBart Van Assche ret = srp_alloc_iu_bufs(ch); 2214961e0be8SDavid Dillow if (ret) 2215961e0be8SDavid Dillow goto error; 2216961e0be8SDavid Dillow } 2217961e0be8SDavid Dillow 2218961e0be8SDavid Dillow ret = -ENOMEM; 2219961e0be8SDavid Dillow qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 2220961e0be8SDavid Dillow if (!qp_attr) 2221961e0be8SDavid Dillow goto error; 2222961e0be8SDavid Dillow 2223961e0be8SDavid Dillow qp_attr->qp_state = IB_QPS_RTR; 2224961e0be8SDavid Dillow ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2225961e0be8SDavid Dillow if (ret) 2226961e0be8SDavid Dillow goto error_free; 2227961e0be8SDavid Dillow 2228509c07bcSBart Van Assche ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2229961e0be8SDavid Dillow if (ret) 2230961e0be8SDavid Dillow goto error_free; 2231961e0be8SDavid Dillow 22324d73f95fSBart Van Assche for (i = 0; i < target->queue_size; i++) { 2233509c07bcSBart Van Assche struct srp_iu *iu = ch->rx_ring[i]; 2234509c07bcSBart Van Assche 2235509c07bcSBart Van Assche ret = srp_post_recv(ch, iu); 2236961e0be8SDavid Dillow if (ret) 2237961e0be8SDavid Dillow goto error_free; 2238961e0be8SDavid Dillow } 2239961e0be8SDavid Dillow 2240961e0be8SDavid Dillow qp_attr->qp_state = IB_QPS_RTS; 2241961e0be8SDavid Dillow ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2242961e0be8SDavid Dillow if (ret) 2243961e0be8SDavid Dillow goto error_free; 2244961e0be8SDavid Dillow 2245c9b03c1aSBart Van Assche target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); 2246c9b03c1aSBart Van Assche 2247509c07bcSBart Van Assche ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2248961e0be8SDavid Dillow if (ret) 2249961e0be8SDavid Dillow goto error_free; 2250961e0be8SDavid Dillow 2251961e0be8SDavid Dillow ret = ib_send_cm_rtu(cm_id, NULL, 0); 2252961e0be8SDavid Dillow 2253961e0be8SDavid Dillow error_free: 2254961e0be8SDavid Dillow kfree(qp_attr); 2255961e0be8SDavid Dillow 2256961e0be8SDavid Dillow error: 2257509c07bcSBart Van Assche ch->status = ret; 2258961e0be8SDavid Dillow } 2259961e0be8SDavid Dillow 2260aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 2261aef9ec39SRoland Dreier struct ib_cm_event *event, 2262509c07bcSBart Van Assche struct srp_rdma_ch *ch) 2263aef9ec39SRoland Dreier { 2264509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 22657aa54bd7SDavid Dillow struct Scsi_Host *shost = target->scsi_host; 2266aef9ec39SRoland Dreier struct ib_class_port_info *cpi; 2267aef9ec39SRoland Dreier int opcode; 2268aef9ec39SRoland Dreier 2269aef9ec39SRoland Dreier switch (event->param.rej_rcvd.reason) { 2270aef9ec39SRoland Dreier case IB_CM_REJ_PORT_CM_REDIRECT: 2271aef9ec39SRoland Dreier cpi = event->param.rej_rcvd.ari; 2272509c07bcSBart Van Assche ch->path.dlid = cpi->redirect_lid; 2273509c07bcSBart Van Assche ch->path.pkey = cpi->redirect_pkey; 2274aef9ec39SRoland Dreier cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 2275509c07bcSBart Van Assche memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16); 2276aef9ec39SRoland Dreier 2277509c07bcSBart Van Assche ch->status = ch->path.dlid ? 2278aef9ec39SRoland Dreier SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 2279aef9ec39SRoland Dreier break; 2280aef9ec39SRoland Dreier 2281aef9ec39SRoland Dreier case IB_CM_REJ_PORT_REDIRECT: 22825d7cbfd6SRoland Dreier if (srp_target_is_topspin(target)) { 2283aef9ec39SRoland Dreier /* 2284aef9ec39SRoland Dreier * Topspin/Cisco SRP gateways incorrectly send 2285aef9ec39SRoland Dreier * reject reason code 25 when they mean 24 2286aef9ec39SRoland Dreier * (port redirect). 2287aef9ec39SRoland Dreier */ 2288509c07bcSBart Van Assche memcpy(ch->path.dgid.raw, 2289aef9ec39SRoland Dreier event->param.rej_rcvd.ari, 16); 2290aef9ec39SRoland Dreier 22917aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, shost, 22927aa54bd7SDavid Dillow PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 2293509c07bcSBart Van Assche be64_to_cpu(ch->path.dgid.global.subnet_prefix), 2294509c07bcSBart Van Assche be64_to_cpu(ch->path.dgid.global.interface_id)); 2295aef9ec39SRoland Dreier 2296509c07bcSBart Van Assche ch->status = SRP_PORT_REDIRECT; 2297aef9ec39SRoland Dreier } else { 22987aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 22997aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 2300509c07bcSBart Van Assche ch->status = -ECONNRESET; 2301aef9ec39SRoland Dreier } 2302aef9ec39SRoland Dreier break; 2303aef9ec39SRoland Dreier 2304aef9ec39SRoland Dreier case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 23057aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23067aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 2307509c07bcSBart Van Assche ch->status = -ECONNRESET; 2308aef9ec39SRoland Dreier break; 2309aef9ec39SRoland Dreier 2310aef9ec39SRoland Dreier case IB_CM_REJ_CONSUMER_DEFINED: 2311aef9ec39SRoland Dreier opcode = *(u8 *) event->private_data; 2312aef9ec39SRoland Dreier if (opcode == SRP_LOGIN_REJ) { 2313aef9ec39SRoland Dreier struct srp_login_rej *rej = event->private_data; 2314aef9ec39SRoland Dreier u32 reason = be32_to_cpu(rej->reason); 2315aef9ec39SRoland Dreier 2316aef9ec39SRoland Dreier if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 23177aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23187aa54bd7SDavid Dillow PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 2319aef9ec39SRoland Dreier else 2320e7ffde01SBart Van Assche shost_printk(KERN_WARNING, shost, PFX 2321e7ffde01SBart Van Assche "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", 2322747fe000SBart Van Assche target->sgid.raw, 2323747fe000SBart Van Assche target->orig_dgid.raw, reason); 2324aef9ec39SRoland Dreier } else 23257aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23267aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 2327aef9ec39SRoland Dreier " opcode 0x%02x\n", opcode); 2328509c07bcSBart Van Assche ch->status = -ECONNRESET; 2329aef9ec39SRoland Dreier break; 2330aef9ec39SRoland Dreier 23319fe4bcf4SDavid Dillow case IB_CM_REJ_STALE_CONN: 23329fe4bcf4SDavid Dillow shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 2333509c07bcSBart Van Assche ch->status = SRP_STALE_CONN; 23349fe4bcf4SDavid Dillow break; 23359fe4bcf4SDavid Dillow 2336aef9ec39SRoland Dreier default: 23377aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 2338aef9ec39SRoland Dreier event->param.rej_rcvd.reason); 2339509c07bcSBart Van Assche ch->status = -ECONNRESET; 2340aef9ec39SRoland Dreier } 2341aef9ec39SRoland Dreier } 2342aef9ec39SRoland Dreier 2343aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 2344aef9ec39SRoland Dreier { 2345509c07bcSBart Van Assche struct srp_rdma_ch *ch = cm_id->context; 2346509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2347aef9ec39SRoland Dreier int comp = 0; 2348aef9ec39SRoland Dreier 2349aef9ec39SRoland Dreier switch (event->event) { 2350aef9ec39SRoland Dreier case IB_CM_REQ_ERROR: 23517aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 23527aa54bd7SDavid Dillow PFX "Sending CM REQ failed\n"); 2353aef9ec39SRoland Dreier comp = 1; 2354509c07bcSBart Van Assche ch->status = -ECONNRESET; 2355aef9ec39SRoland Dreier break; 2356aef9ec39SRoland Dreier 2357aef9ec39SRoland Dreier case IB_CM_REP_RECEIVED: 2358aef9ec39SRoland Dreier comp = 1; 2359509c07bcSBart Van Assche srp_cm_rep_handler(cm_id, event->private_data, ch); 2360aef9ec39SRoland Dreier break; 2361aef9ec39SRoland Dreier 2362aef9ec39SRoland Dreier case IB_CM_REJ_RECEIVED: 23637aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 2364aef9ec39SRoland Dreier comp = 1; 2365aef9ec39SRoland Dreier 2366509c07bcSBart Van Assche srp_cm_rej_handler(cm_id, event, ch); 2367aef9ec39SRoland Dreier break; 2368aef9ec39SRoland Dreier 2369b7ac4ab4SIshai Rabinovitz case IB_CM_DREQ_RECEIVED: 23707aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 23717aa54bd7SDavid Dillow PFX "DREQ received - connection closed\n"); 2372c014c8cdSBart Van Assche ch->connected = false; 2373b7ac4ab4SIshai Rabinovitz if (ib_send_cm_drep(cm_id, NULL, 0)) 23747aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 23757aa54bd7SDavid Dillow PFX "Sending CM DREP failed\n"); 2376c1120f89SBart Van Assche queue_work(system_long_wq, &target->tl_err_work); 2377aef9ec39SRoland Dreier break; 2378aef9ec39SRoland Dreier 2379aef9ec39SRoland Dreier case IB_CM_TIMEWAIT_EXIT: 23807aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 23817aa54bd7SDavid Dillow PFX "connection closed\n"); 2382ac72d766SBart Van Assche comp = 1; 2383aef9ec39SRoland Dreier 2384509c07bcSBart Van Assche ch->status = 0; 2385aef9ec39SRoland Dreier break; 2386aef9ec39SRoland Dreier 2387b7ac4ab4SIshai Rabinovitz case IB_CM_MRA_RECEIVED: 2388b7ac4ab4SIshai Rabinovitz case IB_CM_DREQ_ERROR: 2389b7ac4ab4SIshai Rabinovitz case IB_CM_DREP_RECEIVED: 2390b7ac4ab4SIshai Rabinovitz break; 2391b7ac4ab4SIshai Rabinovitz 2392aef9ec39SRoland Dreier default: 23937aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 23947aa54bd7SDavid Dillow PFX "Unhandled CM event %d\n", event->event); 2395aef9ec39SRoland Dreier break; 2396aef9ec39SRoland Dreier } 2397aef9ec39SRoland Dreier 2398aef9ec39SRoland Dreier if (comp) 2399509c07bcSBart Van Assche complete(&ch->done); 2400aef9ec39SRoland Dreier 2401aef9ec39SRoland Dreier return 0; 2402aef9ec39SRoland Dreier } 2403aef9ec39SRoland Dreier 240471444b97SJack Wang /** 240571444b97SJack Wang * srp_change_queue_depth - setting device queue depth 240671444b97SJack Wang * @sdev: scsi device struct 240771444b97SJack Wang * @qdepth: requested queue depth 240871444b97SJack Wang * 240971444b97SJack Wang * Returns queue depth. 241071444b97SJack Wang */ 241171444b97SJack Wang static int 2412db5ed4dfSChristoph Hellwig srp_change_queue_depth(struct scsi_device *sdev, int qdepth) 241371444b97SJack Wang { 241471444b97SJack Wang if (!sdev->tagged_supported) 24151e6f2416SChristoph Hellwig qdepth = 1; 2416db5ed4dfSChristoph Hellwig return scsi_change_queue_depth(sdev, qdepth); 241771444b97SJack Wang } 241871444b97SJack Wang 2419985aa495SBart Van Assche static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, 2420985aa495SBart Van Assche u8 func) 2421aef9ec39SRoland Dreier { 2422509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2423a95cadb9SBart Van Assche struct srp_rport *rport = target->rport; 242419081f31SDavid Dillow struct ib_device *dev = target->srp_host->srp_dev->dev; 2425aef9ec39SRoland Dreier struct srp_iu *iu; 2426aef9ec39SRoland Dreier struct srp_tsk_mgmt *tsk_mgmt; 2427aef9ec39SRoland Dreier 2428c014c8cdSBart Van Assche if (!ch->connected || target->qp_in_error) 24293780d1f0SBart Van Assche return -1; 24303780d1f0SBart Van Assche 2431509c07bcSBart Van Assche init_completion(&ch->tsk_mgmt_done); 2432aef9ec39SRoland Dreier 2433a95cadb9SBart Van Assche /* 2434509c07bcSBart Van Assche * Lock the rport mutex to avoid that srp_create_ch_ib() is 2435a95cadb9SBart Van Assche * invoked while a task management function is being sent. 2436a95cadb9SBart Van Assche */ 2437a95cadb9SBart Van Assche mutex_lock(&rport->mutex); 2438509c07bcSBart Van Assche spin_lock_irq(&ch->lock); 2439509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); 2440509c07bcSBart Van Assche spin_unlock_irq(&ch->lock); 244176c75b25SBart Van Assche 2442a95cadb9SBart Van Assche if (!iu) { 2443a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2444a95cadb9SBart Van Assche 244576c75b25SBart Van Assche return -1; 2446a95cadb9SBart Van Assche } 2447aef9ec39SRoland Dreier 244819081f31SDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 244919081f31SDavid Dillow DMA_TO_DEVICE); 2450aef9ec39SRoland Dreier tsk_mgmt = iu->buf; 2451aef9ec39SRoland Dreier memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 2452aef9ec39SRoland Dreier 2453aef9ec39SRoland Dreier tsk_mgmt->opcode = SRP_TSK_MGMT; 2454985aa495SBart Van Assche int_to_scsilun(lun, &tsk_mgmt->lun); 2455f8b6e31eSDavid Dillow tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; 2456aef9ec39SRoland Dreier tsk_mgmt->tsk_mgmt_func = func; 2457f8b6e31eSDavid Dillow tsk_mgmt->task_tag = req_tag; 2458aef9ec39SRoland Dreier 245919081f31SDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 246019081f31SDavid Dillow DMA_TO_DEVICE); 2461509c07bcSBart Van Assche if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { 2462509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); 2463a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2464a95cadb9SBart Van Assche 246576c75b25SBart Van Assche return -1; 246676c75b25SBart Van Assche } 2467a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2468d945e1dfSRoland Dreier 2469509c07bcSBart Van Assche if (!wait_for_completion_timeout(&ch->tsk_mgmt_done, 2470aef9ec39SRoland Dreier msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 2471d945e1dfSRoland Dreier return -1; 2472aef9ec39SRoland Dreier 2473d945e1dfSRoland Dreier return 0; 2474d945e1dfSRoland Dreier } 2475d945e1dfSRoland Dreier 2476aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd) 2477aef9ec39SRoland Dreier { 2478d945e1dfSRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2479f8b6e31eSDavid Dillow struct srp_request *req = (struct srp_request *) scmnd->host_scribble; 248077f2c1a4SBart Van Assche u32 tag; 2481d92c0da7SBart Van Assche u16 ch_idx; 2482509c07bcSBart Van Assche struct srp_rdma_ch *ch; 2483086f44f5SBart Van Assche int ret; 2484d945e1dfSRoland Dreier 24857aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 2486aef9ec39SRoland Dreier 2487d92c0da7SBart Van Assche if (!req) 248899b6697aSBart Van Assche return SUCCESS; 248977f2c1a4SBart Van Assche tag = blk_mq_unique_tag(scmnd->request); 2490d92c0da7SBart Van Assche ch_idx = blk_mq_unique_tag_to_hwq(tag); 2491d92c0da7SBart Van Assche if (WARN_ON_ONCE(ch_idx >= target->ch_count)) 2492d92c0da7SBart Van Assche return SUCCESS; 2493d92c0da7SBart Van Assche ch = &target->ch[ch_idx]; 2494d92c0da7SBart Van Assche if (!srp_claim_req(ch, req, NULL, scmnd)) 2495d92c0da7SBart Van Assche return SUCCESS; 2496d92c0da7SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 2497d92c0da7SBart Van Assche "Sending SRP abort for tag %#x\n", tag); 249877f2c1a4SBart Van Assche if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, 249980d5e8a2SBart Van Assche SRP_TSK_ABORT_TASK) == 0) 2500086f44f5SBart Van Assche ret = SUCCESS; 2501ed9b2264SBart Van Assche else if (target->rport->state == SRP_RPORT_LOST) 250299e1c139SBart Van Assche ret = FAST_IO_FAIL; 2503086f44f5SBart Van Assche else 2504086f44f5SBart Van Assche ret = FAILED; 2505509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 0); 2506d945e1dfSRoland Dreier scmnd->result = DID_ABORT << 16; 2507d8536670SBart Van Assche scmnd->scsi_done(scmnd); 2508d945e1dfSRoland Dreier 2509086f44f5SBart Van Assche return ret; 2510aef9ec39SRoland Dreier } 2511aef9ec39SRoland Dreier 2512aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd) 2513aef9ec39SRoland Dreier { 2514d945e1dfSRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2515d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 2516536ae14eSBart Van Assche int i; 2517d945e1dfSRoland Dreier 25187aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 2519aef9ec39SRoland Dreier 2520d92c0da7SBart Van Assche ch = &target->ch[0]; 2521509c07bcSBart Van Assche if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, 2522f8b6e31eSDavid Dillow SRP_TSK_LUN_RESET)) 2523d945e1dfSRoland Dreier return FAILED; 2524509c07bcSBart Van Assche if (ch->tsk_mgmt_status) 2525d945e1dfSRoland Dreier return FAILED; 2526d945e1dfSRoland Dreier 2527d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 2528d92c0da7SBart Van Assche ch = &target->ch[i]; 25294d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 2530509c07bcSBart Van Assche struct srp_request *req = &ch->req_ring[i]; 2531509c07bcSBart Van Assche 2532509c07bcSBart Van Assche srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); 2533536ae14eSBart Van Assche } 2534d92c0da7SBart Van Assche } 2535d945e1dfSRoland Dreier 2536d945e1dfSRoland Dreier return SUCCESS; 2537aef9ec39SRoland Dreier } 2538aef9ec39SRoland Dreier 2539aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd) 2540aef9ec39SRoland Dreier { 2541aef9ec39SRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2542aef9ec39SRoland Dreier 25437aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); 2544aef9ec39SRoland Dreier 2545ed9b2264SBart Van Assche return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; 2546aef9ec39SRoland Dreier } 2547aef9ec39SRoland Dreier 2548c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev) 2549c9b03c1aSBart Van Assche { 2550c9b03c1aSBart Van Assche struct Scsi_Host *shost = sdev->host; 2551c9b03c1aSBart Van Assche struct srp_target_port *target = host_to_target(shost); 2552c9b03c1aSBart Van Assche struct request_queue *q = sdev->request_queue; 2553c9b03c1aSBart Van Assche unsigned long timeout; 2554c9b03c1aSBart Van Assche 2555c9b03c1aSBart Van Assche if (sdev->type == TYPE_DISK) { 2556c9b03c1aSBart Van Assche timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); 2557c9b03c1aSBart Van Assche blk_queue_rq_timeout(q, timeout); 2558c9b03c1aSBart Van Assche } 2559c9b03c1aSBart Van Assche 2560c9b03c1aSBart Van Assche return 0; 2561c9b03c1aSBart Van Assche } 2562c9b03c1aSBart Van Assche 2563ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, 2564ee959b00STony Jones char *buf) 25656ecb0c84SRoland Dreier { 2566ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 25676ecb0c84SRoland Dreier 256845c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); 25696ecb0c84SRoland Dreier } 25706ecb0c84SRoland Dreier 2571ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, 2572ee959b00STony Jones char *buf) 25736ecb0c84SRoland Dreier { 2574ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 25756ecb0c84SRoland Dreier 257645c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); 25776ecb0c84SRoland Dreier } 25786ecb0c84SRoland Dreier 2579ee959b00STony Jones static ssize_t show_service_id(struct device *dev, 2580ee959b00STony Jones struct device_attribute *attr, char *buf) 25816ecb0c84SRoland Dreier { 2582ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 25836ecb0c84SRoland Dreier 258445c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id)); 25856ecb0c84SRoland Dreier } 25866ecb0c84SRoland Dreier 2587ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, 2588ee959b00STony Jones char *buf) 25896ecb0c84SRoland Dreier { 2590ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 25916ecb0c84SRoland Dreier 2592747fe000SBart Van Assche return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey)); 25936ecb0c84SRoland Dreier } 25946ecb0c84SRoland Dreier 2595848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, 2596848b3082SBart Van Assche char *buf) 2597848b3082SBart Van Assche { 2598848b3082SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2599848b3082SBart Van Assche 2600747fe000SBart Van Assche return sprintf(buf, "%pI6\n", target->sgid.raw); 2601848b3082SBart Van Assche } 2602848b3082SBart Van Assche 2603ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 2604ee959b00STony Jones char *buf) 26056ecb0c84SRoland Dreier { 2606ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2607d92c0da7SBart Van Assche struct srp_rdma_ch *ch = &target->ch[0]; 26086ecb0c84SRoland Dreier 2609509c07bcSBart Van Assche return sprintf(buf, "%pI6\n", ch->path.dgid.raw); 26106ecb0c84SRoland Dreier } 26116ecb0c84SRoland Dreier 2612ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev, 2613ee959b00STony Jones struct device_attribute *attr, char *buf) 26143633b3d0SIshai Rabinovitz { 2615ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26163633b3d0SIshai Rabinovitz 2617747fe000SBart Van Assche return sprintf(buf, "%pI6\n", target->orig_dgid.raw); 26183633b3d0SIshai Rabinovitz } 26193633b3d0SIshai Rabinovitz 262089de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev, 262189de7486SBart Van Assche struct device_attribute *attr, char *buf) 262289de7486SBart Van Assche { 262389de7486SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2624d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 2625d92c0da7SBart Van Assche int i, req_lim = INT_MAX; 262689de7486SBart Van Assche 2627d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 2628d92c0da7SBart Van Assche ch = &target->ch[i]; 2629d92c0da7SBart Van Assche req_lim = min(req_lim, ch->req_lim); 2630d92c0da7SBart Van Assche } 2631d92c0da7SBart Van Assche return sprintf(buf, "%d\n", req_lim); 263289de7486SBart Van Assche } 263389de7486SBart Van Assche 2634ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev, 2635ee959b00STony Jones struct device_attribute *attr, char *buf) 26366bfa24faSRoland Dreier { 2637ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26386bfa24faSRoland Dreier 26396bfa24faSRoland Dreier return sprintf(buf, "%d\n", target->zero_req_lim); 26406bfa24faSRoland Dreier } 26416bfa24faSRoland Dreier 2642ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev, 2643ee959b00STony Jones struct device_attribute *attr, char *buf) 2644ded7f1a1SIshai Rabinovitz { 2645ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2646ded7f1a1SIshai Rabinovitz 2647ded7f1a1SIshai Rabinovitz return sprintf(buf, "%d\n", target->srp_host->port); 2648ded7f1a1SIshai Rabinovitz } 2649ded7f1a1SIshai Rabinovitz 2650ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev, 2651ee959b00STony Jones struct device_attribute *attr, char *buf) 2652ded7f1a1SIshai Rabinovitz { 2653ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2654ded7f1a1SIshai Rabinovitz 265505321937SGreg Kroah-Hartman return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 2656ded7f1a1SIshai Rabinovitz } 2657ded7f1a1SIshai Rabinovitz 2658d92c0da7SBart Van Assche static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr, 2659d92c0da7SBart Van Assche char *buf) 2660d92c0da7SBart Van Assche { 2661d92c0da7SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2662d92c0da7SBart Van Assche 2663d92c0da7SBart Van Assche return sprintf(buf, "%d\n", target->ch_count); 2664d92c0da7SBart Van Assche } 2665d92c0da7SBart Van Assche 26664b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev, 26674b5e5f41SBart Van Assche struct device_attribute *attr, char *buf) 26684b5e5f41SBart Van Assche { 26694b5e5f41SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26704b5e5f41SBart Van Assche 26714b5e5f41SBart Van Assche return sprintf(buf, "%d\n", target->comp_vector); 26724b5e5f41SBart Van Assche } 26734b5e5f41SBart Van Assche 26747bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev, 26757bb312e4SVu Pham struct device_attribute *attr, char *buf) 26767bb312e4SVu Pham { 26777bb312e4SVu Pham struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26787bb312e4SVu Pham 26797bb312e4SVu Pham return sprintf(buf, "%d\n", target->tl_retry_count); 26807bb312e4SVu Pham } 26817bb312e4SVu Pham 268249248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev, 268349248644SDavid Dillow struct device_attribute *attr, char *buf) 268449248644SDavid Dillow { 268549248644SDavid Dillow struct srp_target_port *target = host_to_target(class_to_shost(dev)); 268649248644SDavid Dillow 268749248644SDavid Dillow return sprintf(buf, "%u\n", target->cmd_sg_cnt); 268849248644SDavid Dillow } 268949248644SDavid Dillow 2690c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev, 2691c07d424dSDavid Dillow struct device_attribute *attr, char *buf) 2692c07d424dSDavid Dillow { 2693c07d424dSDavid Dillow struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2694c07d424dSDavid Dillow 2695c07d424dSDavid Dillow return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); 2696c07d424dSDavid Dillow } 2697c07d424dSDavid Dillow 2698ee959b00STony Jones static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 2699ee959b00STony Jones static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 2700ee959b00STony Jones static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 2701ee959b00STony Jones static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 2702848b3082SBart Van Assche static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL); 2703ee959b00STony Jones static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 2704ee959b00STony Jones static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); 270589de7486SBart Van Assche static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); 2706ee959b00STony Jones static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 2707ee959b00STony Jones static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 2708ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 2709d92c0da7SBart Van Assche static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL); 27104b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); 27117bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); 271249248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); 2713c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); 27146ecb0c84SRoland Dreier 2715ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = { 2716ee959b00STony Jones &dev_attr_id_ext, 2717ee959b00STony Jones &dev_attr_ioc_guid, 2718ee959b00STony Jones &dev_attr_service_id, 2719ee959b00STony Jones &dev_attr_pkey, 2720848b3082SBart Van Assche &dev_attr_sgid, 2721ee959b00STony Jones &dev_attr_dgid, 2722ee959b00STony Jones &dev_attr_orig_dgid, 272389de7486SBart Van Assche &dev_attr_req_lim, 2724ee959b00STony Jones &dev_attr_zero_req_lim, 2725ee959b00STony Jones &dev_attr_local_ib_port, 2726ee959b00STony Jones &dev_attr_local_ib_device, 2727d92c0da7SBart Van Assche &dev_attr_ch_count, 27284b5e5f41SBart Van Assche &dev_attr_comp_vector, 27297bb312e4SVu Pham &dev_attr_tl_retry_count, 273049248644SDavid Dillow &dev_attr_cmd_sg_entries, 2731c07d424dSDavid Dillow &dev_attr_allow_ext_sg, 27326ecb0c84SRoland Dreier NULL 27336ecb0c84SRoland Dreier }; 27346ecb0c84SRoland Dreier 2735aef9ec39SRoland Dreier static struct scsi_host_template srp_template = { 2736aef9ec39SRoland Dreier .module = THIS_MODULE, 2737b7f008fdSRoland Dreier .name = "InfiniBand SRP initiator", 2738b7f008fdSRoland Dreier .proc_name = DRV_NAME, 2739c9b03c1aSBart Van Assche .slave_configure = srp_slave_configure, 2740aef9ec39SRoland Dreier .info = srp_target_info, 2741aef9ec39SRoland Dreier .queuecommand = srp_queuecommand, 274271444b97SJack Wang .change_queue_depth = srp_change_queue_depth, 2743aef9ec39SRoland Dreier .eh_abort_handler = srp_abort, 2744aef9ec39SRoland Dreier .eh_device_reset_handler = srp_reset_device, 2745aef9ec39SRoland Dreier .eh_host_reset_handler = srp_reset_host, 27462742c1daSBart Van Assche .skip_settle_delay = true, 274749248644SDavid Dillow .sg_tablesize = SRP_DEF_SG_TABLESIZE, 27484d73f95fSBart Van Assche .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, 2749aef9ec39SRoland Dreier .this_id = -1, 27504d73f95fSBart Van Assche .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, 27516ecb0c84SRoland Dreier .use_clustering = ENABLE_CLUSTERING, 275277f2c1a4SBart Van Assche .shost_attrs = srp_host_attrs, 275377f2c1a4SBart Van Assche .use_blk_tags = 1, 2754c40ecc12SChristoph Hellwig .track_queue_depth = 1, 2755aef9ec39SRoland Dreier }; 2756aef9ec39SRoland Dreier 275734aa654eSBart Van Assche static int srp_sdev_count(struct Scsi_Host *host) 275834aa654eSBart Van Assche { 275934aa654eSBart Van Assche struct scsi_device *sdev; 276034aa654eSBart Van Assche int c = 0; 276134aa654eSBart Van Assche 276234aa654eSBart Van Assche shost_for_each_device(sdev, host) 276334aa654eSBart Van Assche c++; 276434aa654eSBart Van Assche 276534aa654eSBart Van Assche return c; 276634aa654eSBart Van Assche } 276734aa654eSBart Van Assche 2768bc44bd1dSBart Van Assche /* 2769bc44bd1dSBart Van Assche * Return values: 2770bc44bd1dSBart Van Assche * < 0 upon failure. Caller is responsible for SRP target port cleanup. 2771bc44bd1dSBart Van Assche * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port 2772bc44bd1dSBart Van Assche * removal has been scheduled. 2773bc44bd1dSBart Van Assche * 0 and target->state != SRP_TARGET_REMOVED upon success. 2774bc44bd1dSBart Van Assche */ 2775aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 2776aef9ec39SRoland Dreier { 27773236822bSFUJITA Tomonori struct srp_rport_identifiers ids; 27783236822bSFUJITA Tomonori struct srp_rport *rport; 27793236822bSFUJITA Tomonori 278034aa654eSBart Van Assche target->state = SRP_TARGET_SCANNING; 2781aef9ec39SRoland Dreier sprintf(target->target_name, "SRP.T10:%016llX", 278245c37cadSBart Van Assche be64_to_cpu(target->id_ext)); 2783aef9ec39SRoland Dreier 278405321937SGreg Kroah-Hartman if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) 2785aef9ec39SRoland Dreier return -ENODEV; 2786aef9ec39SRoland Dreier 27873236822bSFUJITA Tomonori memcpy(ids.port_id, &target->id_ext, 8); 27883236822bSFUJITA Tomonori memcpy(ids.port_id + 8, &target->ioc_guid, 8); 2789aebd5e47SFUJITA Tomonori ids.roles = SRP_RPORT_ROLE_TARGET; 27903236822bSFUJITA Tomonori rport = srp_rport_add(target->scsi_host, &ids); 27913236822bSFUJITA Tomonori if (IS_ERR(rport)) { 27923236822bSFUJITA Tomonori scsi_remove_host(target->scsi_host); 27933236822bSFUJITA Tomonori return PTR_ERR(rport); 27943236822bSFUJITA Tomonori } 27953236822bSFUJITA Tomonori 2796dc1bdbd9SBart Van Assche rport->lld_data = target; 27979dd69a60SBart Van Assche target->rport = rport; 2798dc1bdbd9SBart Van Assche 2799b3589fd4SMatthew Wilcox spin_lock(&host->target_lock); 2800aef9ec39SRoland Dreier list_add_tail(&target->list, &host->target_list); 2801b3589fd4SMatthew Wilcox spin_unlock(&host->target_lock); 2802aef9ec39SRoland Dreier 2803aef9ec39SRoland Dreier scsi_scan_target(&target->scsi_host->shost_gendev, 28041962a4a1SMatthew Wilcox 0, target->scsi_id, SCAN_WILD_CARD, 0); 2805aef9ec39SRoland Dreier 2806c014c8cdSBart Van Assche if (srp_connected_ch(target) < target->ch_count || 2807c014c8cdSBart Van Assche target->qp_in_error) { 280834aa654eSBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 280934aa654eSBart Van Assche PFX "SCSI scan failed - removing SCSI host\n"); 281034aa654eSBart Van Assche srp_queue_remove_work(target); 281134aa654eSBart Van Assche goto out; 281234aa654eSBart Van Assche } 281334aa654eSBart Van Assche 281434aa654eSBart Van Assche pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n", 281534aa654eSBart Van Assche dev_name(&target->scsi_host->shost_gendev), 281634aa654eSBart Van Assche srp_sdev_count(target->scsi_host)); 281734aa654eSBart Van Assche 281834aa654eSBart Van Assche spin_lock_irq(&target->lock); 281934aa654eSBart Van Assche if (target->state == SRP_TARGET_SCANNING) 282034aa654eSBart Van Assche target->state = SRP_TARGET_LIVE; 282134aa654eSBart Van Assche spin_unlock_irq(&target->lock); 282234aa654eSBart Van Assche 282334aa654eSBart Van Assche out: 2824aef9ec39SRoland Dreier return 0; 2825aef9ec39SRoland Dreier } 2826aef9ec39SRoland Dreier 2827ee959b00STony Jones static void srp_release_dev(struct device *dev) 2828aef9ec39SRoland Dreier { 2829aef9ec39SRoland Dreier struct srp_host *host = 2830ee959b00STony Jones container_of(dev, struct srp_host, dev); 2831aef9ec39SRoland Dreier 2832aef9ec39SRoland Dreier complete(&host->released); 2833aef9ec39SRoland Dreier } 2834aef9ec39SRoland Dreier 2835aef9ec39SRoland Dreier static struct class srp_class = { 2836aef9ec39SRoland Dreier .name = "infiniband_srp", 2837ee959b00STony Jones .dev_release = srp_release_dev 2838aef9ec39SRoland Dreier }; 2839aef9ec39SRoland Dreier 284096fc248aSBart Van Assche /** 284196fc248aSBart Van Assche * srp_conn_unique() - check whether the connection to a target is unique 2842af24663bSBart Van Assche * @host: SRP host. 2843af24663bSBart Van Assche * @target: SRP target port. 284496fc248aSBart Van Assche */ 284596fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host, 284696fc248aSBart Van Assche struct srp_target_port *target) 284796fc248aSBart Van Assche { 284896fc248aSBart Van Assche struct srp_target_port *t; 284996fc248aSBart Van Assche bool ret = false; 285096fc248aSBart Van Assche 285196fc248aSBart Van Assche if (target->state == SRP_TARGET_REMOVED) 285296fc248aSBart Van Assche goto out; 285396fc248aSBart Van Assche 285496fc248aSBart Van Assche ret = true; 285596fc248aSBart Van Assche 285696fc248aSBart Van Assche spin_lock(&host->target_lock); 285796fc248aSBart Van Assche list_for_each_entry(t, &host->target_list, list) { 285896fc248aSBart Van Assche if (t != target && 285996fc248aSBart Van Assche target->id_ext == t->id_ext && 286096fc248aSBart Van Assche target->ioc_guid == t->ioc_guid && 286196fc248aSBart Van Assche target->initiator_ext == t->initiator_ext) { 286296fc248aSBart Van Assche ret = false; 286396fc248aSBart Van Assche break; 286496fc248aSBart Van Assche } 286596fc248aSBart Van Assche } 286696fc248aSBart Van Assche spin_unlock(&host->target_lock); 286796fc248aSBart Van Assche 286896fc248aSBart Van Assche out: 286996fc248aSBart Van Assche return ret; 287096fc248aSBart Van Assche } 287196fc248aSBart Van Assche 2872aef9ec39SRoland Dreier /* 2873aef9ec39SRoland Dreier * Target ports are added by writing 2874aef9ec39SRoland Dreier * 2875aef9ec39SRoland Dreier * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 2876aef9ec39SRoland Dreier * pkey=<P_Key>,service_id=<service ID> 2877aef9ec39SRoland Dreier * 2878aef9ec39SRoland Dreier * to the add_target sysfs attribute. 2879aef9ec39SRoland Dreier */ 2880aef9ec39SRoland Dreier enum { 2881aef9ec39SRoland Dreier SRP_OPT_ERR = 0, 2882aef9ec39SRoland Dreier SRP_OPT_ID_EXT = 1 << 0, 2883aef9ec39SRoland Dreier SRP_OPT_IOC_GUID = 1 << 1, 2884aef9ec39SRoland Dreier SRP_OPT_DGID = 1 << 2, 2885aef9ec39SRoland Dreier SRP_OPT_PKEY = 1 << 3, 2886aef9ec39SRoland Dreier SRP_OPT_SERVICE_ID = 1 << 4, 2887aef9ec39SRoland Dreier SRP_OPT_MAX_SECT = 1 << 5, 288852fb2b50SVu Pham SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 28890c0450dbSRamachandra K SRP_OPT_IO_CLASS = 1 << 7, 289001cb9bcbSIshai Rabinovitz SRP_OPT_INITIATOR_EXT = 1 << 8, 289149248644SDavid Dillow SRP_OPT_CMD_SG_ENTRIES = 1 << 9, 2892c07d424dSDavid Dillow SRP_OPT_ALLOW_EXT_SG = 1 << 10, 2893c07d424dSDavid Dillow SRP_OPT_SG_TABLESIZE = 1 << 11, 28944b5e5f41SBart Van Assche SRP_OPT_COMP_VECTOR = 1 << 12, 28957bb312e4SVu Pham SRP_OPT_TL_RETRY_COUNT = 1 << 13, 28964d73f95fSBart Van Assche SRP_OPT_QUEUE_SIZE = 1 << 14, 2897aef9ec39SRoland Dreier SRP_OPT_ALL = (SRP_OPT_ID_EXT | 2898aef9ec39SRoland Dreier SRP_OPT_IOC_GUID | 2899aef9ec39SRoland Dreier SRP_OPT_DGID | 2900aef9ec39SRoland Dreier SRP_OPT_PKEY | 2901aef9ec39SRoland Dreier SRP_OPT_SERVICE_ID), 2902aef9ec39SRoland Dreier }; 2903aef9ec39SRoland Dreier 2904a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = { 2905aef9ec39SRoland Dreier { SRP_OPT_ID_EXT, "id_ext=%s" }, 2906aef9ec39SRoland Dreier { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 2907aef9ec39SRoland Dreier { SRP_OPT_DGID, "dgid=%s" }, 2908aef9ec39SRoland Dreier { SRP_OPT_PKEY, "pkey=%x" }, 2909aef9ec39SRoland Dreier { SRP_OPT_SERVICE_ID, "service_id=%s" }, 2910aef9ec39SRoland Dreier { SRP_OPT_MAX_SECT, "max_sect=%d" }, 291152fb2b50SVu Pham { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 29120c0450dbSRamachandra K { SRP_OPT_IO_CLASS, "io_class=%x" }, 291301cb9bcbSIshai Rabinovitz { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 291449248644SDavid Dillow { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, 2915c07d424dSDavid Dillow { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, 2916c07d424dSDavid Dillow { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, 29174b5e5f41SBart Van Assche { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, 29187bb312e4SVu Pham { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, 29194d73f95fSBart Van Assche { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, 2920aef9ec39SRoland Dreier { SRP_OPT_ERR, NULL } 2921aef9ec39SRoland Dreier }; 2922aef9ec39SRoland Dreier 2923aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target) 2924aef9ec39SRoland Dreier { 2925aef9ec39SRoland Dreier char *options, *sep_opt; 2926aef9ec39SRoland Dreier char *p; 2927aef9ec39SRoland Dreier char dgid[3]; 2928aef9ec39SRoland Dreier substring_t args[MAX_OPT_ARGS]; 2929aef9ec39SRoland Dreier int opt_mask = 0; 2930aef9ec39SRoland Dreier int token; 2931aef9ec39SRoland Dreier int ret = -EINVAL; 2932aef9ec39SRoland Dreier int i; 2933aef9ec39SRoland Dreier 2934aef9ec39SRoland Dreier options = kstrdup(buf, GFP_KERNEL); 2935aef9ec39SRoland Dreier if (!options) 2936aef9ec39SRoland Dreier return -ENOMEM; 2937aef9ec39SRoland Dreier 2938aef9ec39SRoland Dreier sep_opt = options; 29397dcf9c19SSagi Grimberg while ((p = strsep(&sep_opt, ",\n")) != NULL) { 2940aef9ec39SRoland Dreier if (!*p) 2941aef9ec39SRoland Dreier continue; 2942aef9ec39SRoland Dreier 2943aef9ec39SRoland Dreier token = match_token(p, srp_opt_tokens, args); 2944aef9ec39SRoland Dreier opt_mask |= token; 2945aef9ec39SRoland Dreier 2946aef9ec39SRoland Dreier switch (token) { 2947aef9ec39SRoland Dreier case SRP_OPT_ID_EXT: 2948aef9ec39SRoland Dreier p = match_strdup(args); 2949a20f3a6dSIshai Rabinovitz if (!p) { 2950a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 2951a20f3a6dSIshai Rabinovitz goto out; 2952a20f3a6dSIshai Rabinovitz } 2953aef9ec39SRoland Dreier target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2954aef9ec39SRoland Dreier kfree(p); 2955aef9ec39SRoland Dreier break; 2956aef9ec39SRoland Dreier 2957aef9ec39SRoland Dreier case SRP_OPT_IOC_GUID: 2958aef9ec39SRoland Dreier p = match_strdup(args); 2959a20f3a6dSIshai Rabinovitz if (!p) { 2960a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 2961a20f3a6dSIshai Rabinovitz goto out; 2962a20f3a6dSIshai Rabinovitz } 2963aef9ec39SRoland Dreier target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 2964aef9ec39SRoland Dreier kfree(p); 2965aef9ec39SRoland Dreier break; 2966aef9ec39SRoland Dreier 2967aef9ec39SRoland Dreier case SRP_OPT_DGID: 2968aef9ec39SRoland Dreier p = match_strdup(args); 2969a20f3a6dSIshai Rabinovitz if (!p) { 2970a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 2971a20f3a6dSIshai Rabinovitz goto out; 2972a20f3a6dSIshai Rabinovitz } 2973aef9ec39SRoland Dreier if (strlen(p) != 32) { 2974e0bda7d8SBart Van Assche pr_warn("bad dest GID parameter '%s'\n", p); 2975ce1823f0SRoland Dreier kfree(p); 2976aef9ec39SRoland Dreier goto out; 2977aef9ec39SRoland Dreier } 2978aef9ec39SRoland Dreier 2979aef9ec39SRoland Dreier for (i = 0; i < 16; ++i) { 2980747fe000SBart Van Assche strlcpy(dgid, p + i * 2, sizeof(dgid)); 2981747fe000SBart Van Assche if (sscanf(dgid, "%hhx", 2982747fe000SBart Van Assche &target->orig_dgid.raw[i]) < 1) { 2983747fe000SBart Van Assche ret = -EINVAL; 2984747fe000SBart Van Assche kfree(p); 2985747fe000SBart Van Assche goto out; 2986747fe000SBart Van Assche } 2987aef9ec39SRoland Dreier } 2988bf17c1c7SRoland Dreier kfree(p); 2989aef9ec39SRoland Dreier break; 2990aef9ec39SRoland Dreier 2991aef9ec39SRoland Dreier case SRP_OPT_PKEY: 2992aef9ec39SRoland Dreier if (match_hex(args, &token)) { 2993e0bda7d8SBart Van Assche pr_warn("bad P_Key parameter '%s'\n", p); 2994aef9ec39SRoland Dreier goto out; 2995aef9ec39SRoland Dreier } 2996747fe000SBart Van Assche target->pkey = cpu_to_be16(token); 2997aef9ec39SRoland Dreier break; 2998aef9ec39SRoland Dreier 2999aef9ec39SRoland Dreier case SRP_OPT_SERVICE_ID: 3000aef9ec39SRoland Dreier p = match_strdup(args); 3001a20f3a6dSIshai Rabinovitz if (!p) { 3002a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3003a20f3a6dSIshai Rabinovitz goto out; 3004a20f3a6dSIshai Rabinovitz } 3005aef9ec39SRoland Dreier target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 3006aef9ec39SRoland Dreier kfree(p); 3007aef9ec39SRoland Dreier break; 3008aef9ec39SRoland Dreier 3009aef9ec39SRoland Dreier case SRP_OPT_MAX_SECT: 3010aef9ec39SRoland Dreier if (match_int(args, &token)) { 3011e0bda7d8SBart Van Assche pr_warn("bad max sect parameter '%s'\n", p); 3012aef9ec39SRoland Dreier goto out; 3013aef9ec39SRoland Dreier } 3014aef9ec39SRoland Dreier target->scsi_host->max_sectors = token; 3015aef9ec39SRoland Dreier break; 3016aef9ec39SRoland Dreier 30174d73f95fSBart Van Assche case SRP_OPT_QUEUE_SIZE: 30184d73f95fSBart Van Assche if (match_int(args, &token) || token < 1) { 30194d73f95fSBart Van Assche pr_warn("bad queue_size parameter '%s'\n", p); 30204d73f95fSBart Van Assche goto out; 30214d73f95fSBart Van Assche } 30224d73f95fSBart Van Assche target->scsi_host->can_queue = token; 30234d73f95fSBart Van Assche target->queue_size = token + SRP_RSP_SQ_SIZE + 30244d73f95fSBart Van Assche SRP_TSK_MGMT_SQ_SIZE; 30254d73f95fSBart Van Assche if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 30264d73f95fSBart Van Assche target->scsi_host->cmd_per_lun = token; 30274d73f95fSBart Van Assche break; 30284d73f95fSBart Van Assche 302952fb2b50SVu Pham case SRP_OPT_MAX_CMD_PER_LUN: 30304d73f95fSBart Van Assche if (match_int(args, &token) || token < 1) { 3031e0bda7d8SBart Van Assche pr_warn("bad max cmd_per_lun parameter '%s'\n", 3032e0bda7d8SBart Van Assche p); 303352fb2b50SVu Pham goto out; 303452fb2b50SVu Pham } 30354d73f95fSBart Van Assche target->scsi_host->cmd_per_lun = token; 303652fb2b50SVu Pham break; 303752fb2b50SVu Pham 30380c0450dbSRamachandra K case SRP_OPT_IO_CLASS: 30390c0450dbSRamachandra K if (match_hex(args, &token)) { 3040e0bda7d8SBart Van Assche pr_warn("bad IO class parameter '%s'\n", p); 30410c0450dbSRamachandra K goto out; 30420c0450dbSRamachandra K } 30430c0450dbSRamachandra K if (token != SRP_REV10_IB_IO_CLASS && 30440c0450dbSRamachandra K token != SRP_REV16A_IB_IO_CLASS) { 3045e0bda7d8SBart Van Assche pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", 3046e0bda7d8SBart Van Assche token, SRP_REV10_IB_IO_CLASS, 3047e0bda7d8SBart Van Assche SRP_REV16A_IB_IO_CLASS); 30480c0450dbSRamachandra K goto out; 30490c0450dbSRamachandra K } 30500c0450dbSRamachandra K target->io_class = token; 30510c0450dbSRamachandra K break; 30520c0450dbSRamachandra K 305301cb9bcbSIshai Rabinovitz case SRP_OPT_INITIATOR_EXT: 305401cb9bcbSIshai Rabinovitz p = match_strdup(args); 3055a20f3a6dSIshai Rabinovitz if (!p) { 3056a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3057a20f3a6dSIshai Rabinovitz goto out; 3058a20f3a6dSIshai Rabinovitz } 305901cb9bcbSIshai Rabinovitz target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 306001cb9bcbSIshai Rabinovitz kfree(p); 306101cb9bcbSIshai Rabinovitz break; 306201cb9bcbSIshai Rabinovitz 306349248644SDavid Dillow case SRP_OPT_CMD_SG_ENTRIES: 306449248644SDavid Dillow if (match_int(args, &token) || token < 1 || token > 255) { 3065e0bda7d8SBart Van Assche pr_warn("bad max cmd_sg_entries parameter '%s'\n", 3066e0bda7d8SBart Van Assche p); 306749248644SDavid Dillow goto out; 306849248644SDavid Dillow } 306949248644SDavid Dillow target->cmd_sg_cnt = token; 307049248644SDavid Dillow break; 307149248644SDavid Dillow 3072c07d424dSDavid Dillow case SRP_OPT_ALLOW_EXT_SG: 3073c07d424dSDavid Dillow if (match_int(args, &token)) { 3074e0bda7d8SBart Van Assche pr_warn("bad allow_ext_sg parameter '%s'\n", p); 3075c07d424dSDavid Dillow goto out; 3076c07d424dSDavid Dillow } 3077c07d424dSDavid Dillow target->allow_ext_sg = !!token; 3078c07d424dSDavid Dillow break; 3079c07d424dSDavid Dillow 3080c07d424dSDavid Dillow case SRP_OPT_SG_TABLESIZE: 3081c07d424dSDavid Dillow if (match_int(args, &token) || token < 1 || 3082c07d424dSDavid Dillow token > SCSI_MAX_SG_CHAIN_SEGMENTS) { 3083e0bda7d8SBart Van Assche pr_warn("bad max sg_tablesize parameter '%s'\n", 3084e0bda7d8SBart Van Assche p); 3085c07d424dSDavid Dillow goto out; 3086c07d424dSDavid Dillow } 3087c07d424dSDavid Dillow target->sg_tablesize = token; 3088c07d424dSDavid Dillow break; 3089c07d424dSDavid Dillow 30904b5e5f41SBart Van Assche case SRP_OPT_COMP_VECTOR: 30914b5e5f41SBart Van Assche if (match_int(args, &token) || token < 0) { 30924b5e5f41SBart Van Assche pr_warn("bad comp_vector parameter '%s'\n", p); 30934b5e5f41SBart Van Assche goto out; 30944b5e5f41SBart Van Assche } 30954b5e5f41SBart Van Assche target->comp_vector = token; 30964b5e5f41SBart Van Assche break; 30974b5e5f41SBart Van Assche 30987bb312e4SVu Pham case SRP_OPT_TL_RETRY_COUNT: 30997bb312e4SVu Pham if (match_int(args, &token) || token < 2 || token > 7) { 31007bb312e4SVu Pham pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", 31017bb312e4SVu Pham p); 31027bb312e4SVu Pham goto out; 31037bb312e4SVu Pham } 31047bb312e4SVu Pham target->tl_retry_count = token; 31057bb312e4SVu Pham break; 31067bb312e4SVu Pham 3107aef9ec39SRoland Dreier default: 3108e0bda7d8SBart Van Assche pr_warn("unknown parameter or missing value '%s' in target creation request\n", 3109e0bda7d8SBart Van Assche p); 3110aef9ec39SRoland Dreier goto out; 3111aef9ec39SRoland Dreier } 3112aef9ec39SRoland Dreier } 3113aef9ec39SRoland Dreier 3114aef9ec39SRoland Dreier if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 3115aef9ec39SRoland Dreier ret = 0; 3116aef9ec39SRoland Dreier else 3117aef9ec39SRoland Dreier for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 3118aef9ec39SRoland Dreier if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 3119aef9ec39SRoland Dreier !(srp_opt_tokens[i].token & opt_mask)) 3120e0bda7d8SBart Van Assche pr_warn("target creation request is missing parameter '%s'\n", 3121aef9ec39SRoland Dreier srp_opt_tokens[i].pattern); 3122aef9ec39SRoland Dreier 31234d73f95fSBart Van Assche if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue 31244d73f95fSBart Van Assche && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 31254d73f95fSBart Van Assche pr_warn("cmd_per_lun = %d > queue_size = %d\n", 31264d73f95fSBart Van Assche target->scsi_host->cmd_per_lun, 31274d73f95fSBart Van Assche target->scsi_host->can_queue); 31284d73f95fSBart Van Assche 3129aef9ec39SRoland Dreier out: 3130aef9ec39SRoland Dreier kfree(options); 3131aef9ec39SRoland Dreier return ret; 3132aef9ec39SRoland Dreier } 3133aef9ec39SRoland Dreier 3134ee959b00STony Jones static ssize_t srp_create_target(struct device *dev, 3135ee959b00STony Jones struct device_attribute *attr, 3136aef9ec39SRoland Dreier const char *buf, size_t count) 3137aef9ec39SRoland Dreier { 3138aef9ec39SRoland Dreier struct srp_host *host = 3139ee959b00STony Jones container_of(dev, struct srp_host, dev); 3140aef9ec39SRoland Dreier struct Scsi_Host *target_host; 3141aef9ec39SRoland Dreier struct srp_target_port *target; 3142509c07bcSBart Van Assche struct srp_rdma_ch *ch; 3143d1b4289eSBart Van Assche struct srp_device *srp_dev = host->srp_dev; 3144d1b4289eSBart Van Assche struct ib_device *ibdev = srp_dev->dev; 3145d92c0da7SBart Van Assche int ret, node_idx, node, cpu, i; 3146d92c0da7SBart Van Assche bool multich = false; 3147aef9ec39SRoland Dreier 3148aef9ec39SRoland Dreier target_host = scsi_host_alloc(&srp_template, 3149aef9ec39SRoland Dreier sizeof (struct srp_target_port)); 3150aef9ec39SRoland Dreier if (!target_host) 3151aef9ec39SRoland Dreier return -ENOMEM; 3152aef9ec39SRoland Dreier 31533236822bSFUJITA Tomonori target_host->transportt = ib_srp_transport_template; 3154fd1b6c4aSBart Van Assche target_host->max_channel = 0; 3155fd1b6c4aSBart Van Assche target_host->max_id = 1; 3156985aa495SBart Van Assche target_host->max_lun = -1LL; 31573c8edf0eSArne Redlich target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 31585f068992SRoland Dreier 3159aef9ec39SRoland Dreier target = host_to_target(target_host); 3160aef9ec39SRoland Dreier 31610c0450dbSRamachandra K target->io_class = SRP_REV16A_IB_IO_CLASS; 3162aef9ec39SRoland Dreier target->scsi_host = target_host; 3163aef9ec39SRoland Dreier target->srp_host = host; 3164e6bf5f48SJason Gunthorpe target->lkey = host->srp_dev->pd->local_dma_lkey; 316503f6fb93SBart Van Assche target->global_mr = host->srp_dev->global_mr; 316649248644SDavid Dillow target->cmd_sg_cnt = cmd_sg_entries; 3167c07d424dSDavid Dillow target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 3168c07d424dSDavid Dillow target->allow_ext_sg = allow_ext_sg; 31697bb312e4SVu Pham target->tl_retry_count = 7; 31704d73f95fSBart Van Assche target->queue_size = SRP_DEFAULT_QUEUE_SIZE; 3171aef9ec39SRoland Dreier 317234aa654eSBart Van Assche /* 317334aa654eSBart Van Assche * Avoid that the SCSI host can be removed by srp_remove_target() 317434aa654eSBart Van Assche * before this function returns. 317534aa654eSBart Van Assche */ 317634aa654eSBart Van Assche scsi_host_get(target->scsi_host); 317734aa654eSBart Van Assche 31782d7091bcSBart Van Assche mutex_lock(&host->add_target_mutex); 31792d7091bcSBart Van Assche 3180aef9ec39SRoland Dreier ret = srp_parse_options(buf, target); 3181aef9ec39SRoland Dreier if (ret) 3182fb49c8bbSBart Van Assche goto out; 3183aef9ec39SRoland Dreier 318477f2c1a4SBart Van Assche ret = scsi_init_shared_tag_map(target_host, target_host->can_queue); 318577f2c1a4SBart Van Assche if (ret) 3186fb49c8bbSBart Van Assche goto out; 318777f2c1a4SBart Van Assche 31884d73f95fSBart Van Assche target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; 31894d73f95fSBart Van Assche 319096fc248aSBart Van Assche if (!srp_conn_unique(target->srp_host, target)) { 319196fc248aSBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 319296fc248aSBart Van Assche PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", 319396fc248aSBart Van Assche be64_to_cpu(target->id_ext), 319496fc248aSBart Van Assche be64_to_cpu(target->ioc_guid), 319596fc248aSBart Van Assche be64_to_cpu(target->initiator_ext)); 319696fc248aSBart Van Assche ret = -EEXIST; 3197fb49c8bbSBart Van Assche goto out; 319896fc248aSBart Van Assche } 319996fc248aSBart Van Assche 32005cfb1782SBart Van Assche if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && 3201c07d424dSDavid Dillow target->cmd_sg_cnt < target->sg_tablesize) { 32025cfb1782SBart Van Assche pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); 3203c07d424dSDavid Dillow target->sg_tablesize = target->cmd_sg_cnt; 3204c07d424dSDavid Dillow } 3205c07d424dSDavid Dillow 3206c07d424dSDavid Dillow target_host->sg_tablesize = target->sg_tablesize; 3207c07d424dSDavid Dillow target->indirect_size = target->sg_tablesize * 3208c07d424dSDavid Dillow sizeof (struct srp_direct_buf); 320949248644SDavid Dillow target->max_iu_len = sizeof (struct srp_cmd) + 321049248644SDavid Dillow sizeof (struct srp_indirect_buf) + 321149248644SDavid Dillow target->cmd_sg_cnt * sizeof (struct srp_direct_buf); 321249248644SDavid Dillow 3213c1120f89SBart Van Assche INIT_WORK(&target->tl_err_work, srp_tl_err_work); 3214ef6c49d8SBart Van Assche INIT_WORK(&target->remove_work, srp_remove_work); 32158f26c9ffSDavid Dillow spin_lock_init(&target->lock); 3216*55ee3ab2SMatan Barak ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL); 32172088ca66SSagi Grimberg if (ret) 3218fb49c8bbSBart Van Assche goto out; 3219d92c0da7SBart Van Assche 3220d92c0da7SBart Van Assche ret = -ENOMEM; 3221d92c0da7SBart Van Assche target->ch_count = max_t(unsigned, num_online_nodes(), 3222d92c0da7SBart Van Assche min(ch_count ? : 3223d92c0da7SBart Van Assche min(4 * num_online_nodes(), 3224d92c0da7SBart Van Assche ibdev->num_comp_vectors), 3225d92c0da7SBart Van Assche num_online_cpus())); 3226d92c0da7SBart Van Assche target->ch = kcalloc(target->ch_count, sizeof(*target->ch), 3227d92c0da7SBart Van Assche GFP_KERNEL); 3228d92c0da7SBart Van Assche if (!target->ch) 3229fb49c8bbSBart Van Assche goto out; 3230d92c0da7SBart Van Assche 3231d92c0da7SBart Van Assche node_idx = 0; 3232d92c0da7SBart Van Assche for_each_online_node(node) { 3233d92c0da7SBart Van Assche const int ch_start = (node_idx * target->ch_count / 3234d92c0da7SBart Van Assche num_online_nodes()); 3235d92c0da7SBart Van Assche const int ch_end = ((node_idx + 1) * target->ch_count / 3236d92c0da7SBart Van Assche num_online_nodes()); 3237d92c0da7SBart Van Assche const int cv_start = (node_idx * ibdev->num_comp_vectors / 3238d92c0da7SBart Van Assche num_online_nodes() + target->comp_vector) 3239d92c0da7SBart Van Assche % ibdev->num_comp_vectors; 3240d92c0da7SBart Van Assche const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors / 3241d92c0da7SBart Van Assche num_online_nodes() + target->comp_vector) 3242d92c0da7SBart Van Assche % ibdev->num_comp_vectors; 3243d92c0da7SBart Van Assche int cpu_idx = 0; 3244d92c0da7SBart Van Assche 3245d92c0da7SBart Van Assche for_each_online_cpu(cpu) { 3246d92c0da7SBart Van Assche if (cpu_to_node(cpu) != node) 3247d92c0da7SBart Van Assche continue; 3248d92c0da7SBart Van Assche if (ch_start + cpu_idx >= ch_end) 3249d92c0da7SBart Van Assche continue; 3250d92c0da7SBart Van Assche ch = &target->ch[ch_start + cpu_idx]; 3251d92c0da7SBart Van Assche ch->target = target; 3252d92c0da7SBart Van Assche ch->comp_vector = cv_start == cv_end ? cv_start : 3253d92c0da7SBart Van Assche cv_start + cpu_idx % (cv_end - cv_start); 3254d92c0da7SBart Van Assche spin_lock_init(&ch->lock); 3255d92c0da7SBart Van Assche INIT_LIST_HEAD(&ch->free_tx); 3256d92c0da7SBart Van Assche ret = srp_new_cm_id(ch); 3257d92c0da7SBart Van Assche if (ret) 3258d92c0da7SBart Van Assche goto err_disconnect; 3259aef9ec39SRoland Dreier 3260509c07bcSBart Van Assche ret = srp_create_ch_ib(ch); 3261aef9ec39SRoland Dreier if (ret) 3262d92c0da7SBart Van Assche goto err_disconnect; 3263aef9ec39SRoland Dreier 3264d92c0da7SBart Van Assche ret = srp_alloc_req_data(ch); 32659fe4bcf4SDavid Dillow if (ret) 3266d92c0da7SBart Van Assche goto err_disconnect; 3267aef9ec39SRoland Dreier 3268d92c0da7SBart Van Assche ret = srp_connect_ch(ch, multich); 3269aef9ec39SRoland Dreier if (ret) { 32707aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 3271d92c0da7SBart Van Assche PFX "Connection %d/%d failed\n", 3272d92c0da7SBart Van Assche ch_start + cpu_idx, 3273d92c0da7SBart Van Assche target->ch_count); 3274d92c0da7SBart Van Assche if (node_idx == 0 && cpu_idx == 0) { 3275d92c0da7SBart Van Assche goto err_disconnect; 3276d92c0da7SBart Van Assche } else { 3277d92c0da7SBart Van Assche srp_free_ch_ib(target, ch); 3278d92c0da7SBart Van Assche srp_free_req_data(target, ch); 3279d92c0da7SBart Van Assche target->ch_count = ch - target->ch; 3280c257ea6fSBart Van Assche goto connected; 3281aef9ec39SRoland Dreier } 3282d92c0da7SBart Van Assche } 3283d92c0da7SBart Van Assche 3284d92c0da7SBart Van Assche multich = true; 3285d92c0da7SBart Van Assche cpu_idx++; 3286d92c0da7SBart Van Assche } 3287d92c0da7SBart Van Assche node_idx++; 3288d92c0da7SBart Van Assche } 3289d92c0da7SBart Van Assche 3290c257ea6fSBart Van Assche connected: 3291d92c0da7SBart Van Assche target->scsi_host->nr_hw_queues = target->ch_count; 3292aef9ec39SRoland Dreier 3293aef9ec39SRoland Dreier ret = srp_add_target(host, target); 3294aef9ec39SRoland Dreier if (ret) 3295aef9ec39SRoland Dreier goto err_disconnect; 3296aef9ec39SRoland Dreier 329734aa654eSBart Van Assche if (target->state != SRP_TARGET_REMOVED) { 3298e7ffde01SBart Van Assche shost_printk(KERN_DEBUG, target->scsi_host, PFX 3299e7ffde01SBart Van Assche "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", 3300e7ffde01SBart Van Assche be64_to_cpu(target->id_ext), 3301e7ffde01SBart Van Assche be64_to_cpu(target->ioc_guid), 3302747fe000SBart Van Assche be16_to_cpu(target->pkey), 3303e7ffde01SBart Van Assche be64_to_cpu(target->service_id), 3304747fe000SBart Van Assche target->sgid.raw, target->orig_dgid.raw); 330534aa654eSBart Van Assche } 3306e7ffde01SBart Van Assche 33072d7091bcSBart Van Assche ret = count; 33082d7091bcSBart Van Assche 33092d7091bcSBart Van Assche out: 33102d7091bcSBart Van Assche mutex_unlock(&host->add_target_mutex); 331134aa654eSBart Van Assche 331234aa654eSBart Van Assche scsi_host_put(target->scsi_host); 3313bc44bd1dSBart Van Assche if (ret < 0) 3314bc44bd1dSBart Van Assche scsi_host_put(target->scsi_host); 331534aa654eSBart Van Assche 33162d7091bcSBart Van Assche return ret; 3317aef9ec39SRoland Dreier 3318aef9ec39SRoland Dreier err_disconnect: 3319aef9ec39SRoland Dreier srp_disconnect_target(target); 3320aef9ec39SRoland Dreier 3321d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 3322d92c0da7SBart Van Assche ch = &target->ch[i]; 3323509c07bcSBart Van Assche srp_free_ch_ib(target, ch); 3324509c07bcSBart Van Assche srp_free_req_data(target, ch); 3325d92c0da7SBart Van Assche } 3326d92c0da7SBart Van Assche 3327d92c0da7SBart Van Assche kfree(target->ch); 33282d7091bcSBart Van Assche goto out; 3329aef9ec39SRoland Dreier } 3330aef9ec39SRoland Dreier 3331ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 3332aef9ec39SRoland Dreier 3333ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 3334ee959b00STony Jones char *buf) 3335aef9ec39SRoland Dreier { 3336ee959b00STony Jones struct srp_host *host = container_of(dev, struct srp_host, dev); 3337aef9ec39SRoland Dreier 333805321937SGreg Kroah-Hartman return sprintf(buf, "%s\n", host->srp_dev->dev->name); 3339aef9ec39SRoland Dreier } 3340aef9ec39SRoland Dreier 3341ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 3342aef9ec39SRoland Dreier 3343ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr, 3344ee959b00STony Jones char *buf) 3345aef9ec39SRoland Dreier { 3346ee959b00STony Jones struct srp_host *host = container_of(dev, struct srp_host, dev); 3347aef9ec39SRoland Dreier 3348aef9ec39SRoland Dreier return sprintf(buf, "%d\n", host->port); 3349aef9ec39SRoland Dreier } 3350aef9ec39SRoland Dreier 3351ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 3352aef9ec39SRoland Dreier 3353f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port) 3354aef9ec39SRoland Dreier { 3355aef9ec39SRoland Dreier struct srp_host *host; 3356aef9ec39SRoland Dreier 3357aef9ec39SRoland Dreier host = kzalloc(sizeof *host, GFP_KERNEL); 3358aef9ec39SRoland Dreier if (!host) 3359aef9ec39SRoland Dreier return NULL; 3360aef9ec39SRoland Dreier 3361aef9ec39SRoland Dreier INIT_LIST_HEAD(&host->target_list); 3362b3589fd4SMatthew Wilcox spin_lock_init(&host->target_lock); 3363aef9ec39SRoland Dreier init_completion(&host->released); 33642d7091bcSBart Van Assche mutex_init(&host->add_target_mutex); 336505321937SGreg Kroah-Hartman host->srp_dev = device; 3366aef9ec39SRoland Dreier host->port = port; 3367aef9ec39SRoland Dreier 3368ee959b00STony Jones host->dev.class = &srp_class; 3369ee959b00STony Jones host->dev.parent = device->dev->dma_device; 3370d927e38cSKay Sievers dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); 3371aef9ec39SRoland Dreier 3372ee959b00STony Jones if (device_register(&host->dev)) 3373f5358a17SRoland Dreier goto free_host; 3374ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_add_target)) 3375aef9ec39SRoland Dreier goto err_class; 3376ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_ibdev)) 3377aef9ec39SRoland Dreier goto err_class; 3378ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_port)) 3379aef9ec39SRoland Dreier goto err_class; 3380aef9ec39SRoland Dreier 3381aef9ec39SRoland Dreier return host; 3382aef9ec39SRoland Dreier 3383aef9ec39SRoland Dreier err_class: 3384ee959b00STony Jones device_unregister(&host->dev); 3385aef9ec39SRoland Dreier 3386f5358a17SRoland Dreier free_host: 3387aef9ec39SRoland Dreier kfree(host); 3388aef9ec39SRoland Dreier 3389aef9ec39SRoland Dreier return NULL; 3390aef9ec39SRoland Dreier } 3391aef9ec39SRoland Dreier 3392aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device) 3393aef9ec39SRoland Dreier { 3394f5358a17SRoland Dreier struct srp_device *srp_dev; 3395f5358a17SRoland Dreier struct ib_device_attr *dev_attr; 3396aef9ec39SRoland Dreier struct srp_host *host; 33974139032bSHal Rosenstock int mr_page_shift, p; 339852ede08fSBart Van Assche u64 max_pages_per_mr; 3399aef9ec39SRoland Dreier 3400f5358a17SRoland Dreier dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 3401f5358a17SRoland Dreier if (!dev_attr) 3402cf311cd4SSean Hefty return; 3403aef9ec39SRoland Dreier 3404f5358a17SRoland Dreier if (ib_query_device(device, dev_attr)) { 3405e0bda7d8SBart Van Assche pr_warn("Query device failed for %s\n", device->name); 3406f5358a17SRoland Dreier goto free_attr; 3407f5358a17SRoland Dreier } 3408f5358a17SRoland Dreier 3409f5358a17SRoland Dreier srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); 3410f5358a17SRoland Dreier if (!srp_dev) 3411f5358a17SRoland Dreier goto free_attr; 3412f5358a17SRoland Dreier 3413d1b4289eSBart Van Assche srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && 3414d1b4289eSBart Van Assche device->map_phys_fmr && device->unmap_fmr); 34155cfb1782SBart Van Assche srp_dev->has_fr = (dev_attr->device_cap_flags & 34165cfb1782SBart Van Assche IB_DEVICE_MEM_MGT_EXTENSIONS); 34175cfb1782SBart Van Assche if (!srp_dev->has_fmr && !srp_dev->has_fr) 34185cfb1782SBart Van Assche dev_warn(&device->dev, "neither FMR nor FR is supported\n"); 34195cfb1782SBart Van Assche 34205cfb1782SBart Van Assche srp_dev->use_fast_reg = (srp_dev->has_fr && 34215cfb1782SBart Van Assche (!srp_dev->has_fmr || prefer_fr)); 3422002f1567SBart Van Assche srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr; 3423d1b4289eSBart Van Assche 3424f5358a17SRoland Dreier /* 3425f5358a17SRoland Dreier * Use the smallest page size supported by the HCA, down to a 34268f26c9ffSDavid Dillow * minimum of 4096 bytes. We're unlikely to build large sglists 34278f26c9ffSDavid Dillow * out of smaller entries. 3428f5358a17SRoland Dreier */ 342952ede08fSBart Van Assche mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1); 343052ede08fSBart Van Assche srp_dev->mr_page_size = 1 << mr_page_shift; 343152ede08fSBart Van Assche srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); 343252ede08fSBart Van Assche max_pages_per_mr = dev_attr->max_mr_size; 343352ede08fSBart Van Assche do_div(max_pages_per_mr, srp_dev->mr_page_size); 343452ede08fSBart Van Assche srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, 343552ede08fSBart Van Assche max_pages_per_mr); 34365cfb1782SBart Van Assche if (srp_dev->use_fast_reg) { 34375cfb1782SBart Van Assche srp_dev->max_pages_per_mr = 34385cfb1782SBart Van Assche min_t(u32, srp_dev->max_pages_per_mr, 34395cfb1782SBart Van Assche dev_attr->max_fast_reg_page_list_len); 34405cfb1782SBart Van Assche } 344152ede08fSBart Van Assche srp_dev->mr_max_size = srp_dev->mr_page_size * 344252ede08fSBart Van Assche srp_dev->max_pages_per_mr; 34435cfb1782SBart Van Assche pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", 344452ede08fSBart Van Assche device->name, mr_page_shift, dev_attr->max_mr_size, 34455cfb1782SBart Van Assche dev_attr->max_fast_reg_page_list_len, 344652ede08fSBart Van Assche srp_dev->max_pages_per_mr, srp_dev->mr_max_size); 3447f5358a17SRoland Dreier 3448f5358a17SRoland Dreier INIT_LIST_HEAD(&srp_dev->dev_list); 3449f5358a17SRoland Dreier 3450f5358a17SRoland Dreier srp_dev->dev = device; 3451f5358a17SRoland Dreier srp_dev->pd = ib_alloc_pd(device); 3452f5358a17SRoland Dreier if (IS_ERR(srp_dev->pd)) 3453f5358a17SRoland Dreier goto free_dev; 3454f5358a17SRoland Dreier 345503f6fb93SBart Van Assche if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) { 345603f6fb93SBart Van Assche srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd, 3457f5358a17SRoland Dreier IB_ACCESS_LOCAL_WRITE | 3458f5358a17SRoland Dreier IB_ACCESS_REMOTE_READ | 3459f5358a17SRoland Dreier IB_ACCESS_REMOTE_WRITE); 346003f6fb93SBart Van Assche if (IS_ERR(srp_dev->global_mr)) 3461f5358a17SRoland Dreier goto err_pd; 346203f6fb93SBart Van Assche } else { 346303f6fb93SBart Van Assche srp_dev->global_mr = NULL; 346403f6fb93SBart Van Assche } 3465f5358a17SRoland Dreier 34664139032bSHal Rosenstock for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 3467f5358a17SRoland Dreier host = srp_add_port(srp_dev, p); 3468aef9ec39SRoland Dreier if (host) 3469f5358a17SRoland Dreier list_add_tail(&host->list, &srp_dev->dev_list); 3470aef9ec39SRoland Dreier } 3471aef9ec39SRoland Dreier 3472f5358a17SRoland Dreier ib_set_client_data(device, &srp_client, srp_dev); 3473f5358a17SRoland Dreier 3474f5358a17SRoland Dreier goto free_attr; 3475f5358a17SRoland Dreier 3476f5358a17SRoland Dreier err_pd: 3477f5358a17SRoland Dreier ib_dealloc_pd(srp_dev->pd); 3478f5358a17SRoland Dreier 3479f5358a17SRoland Dreier free_dev: 3480f5358a17SRoland Dreier kfree(srp_dev); 3481f5358a17SRoland Dreier 3482f5358a17SRoland Dreier free_attr: 3483f5358a17SRoland Dreier kfree(dev_attr); 3484aef9ec39SRoland Dreier } 3485aef9ec39SRoland Dreier 34867c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data) 3487aef9ec39SRoland Dreier { 3488f5358a17SRoland Dreier struct srp_device *srp_dev; 3489aef9ec39SRoland Dreier struct srp_host *host, *tmp_host; 3490ef6c49d8SBart Van Assche struct srp_target_port *target; 3491aef9ec39SRoland Dreier 34927c1eb45aSHaggai Eran srp_dev = client_data; 34931fe0cb84SDotan Barak if (!srp_dev) 34941fe0cb84SDotan Barak return; 3495aef9ec39SRoland Dreier 3496f5358a17SRoland Dreier list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 3497ee959b00STony Jones device_unregister(&host->dev); 3498aef9ec39SRoland Dreier /* 3499aef9ec39SRoland Dreier * Wait for the sysfs entry to go away, so that no new 3500aef9ec39SRoland Dreier * target ports can be created. 3501aef9ec39SRoland Dreier */ 3502aef9ec39SRoland Dreier wait_for_completion(&host->released); 3503aef9ec39SRoland Dreier 3504aef9ec39SRoland Dreier /* 3505ef6c49d8SBart Van Assche * Remove all target ports. 3506aef9ec39SRoland Dreier */ 3507b3589fd4SMatthew Wilcox spin_lock(&host->target_lock); 3508ef6c49d8SBart Van Assche list_for_each_entry(target, &host->target_list, list) 3509ef6c49d8SBart Van Assche srp_queue_remove_work(target); 3510b3589fd4SMatthew Wilcox spin_unlock(&host->target_lock); 3511aef9ec39SRoland Dreier 3512aef9ec39SRoland Dreier /* 3513bcc05910SBart Van Assche * Wait for tl_err and target port removal tasks. 3514aef9ec39SRoland Dreier */ 3515ef6c49d8SBart Van Assche flush_workqueue(system_long_wq); 3516bcc05910SBart Van Assche flush_workqueue(srp_remove_wq); 3517aef9ec39SRoland Dreier 3518aef9ec39SRoland Dreier kfree(host); 3519aef9ec39SRoland Dreier } 3520aef9ec39SRoland Dreier 352103f6fb93SBart Van Assche if (srp_dev->global_mr) 352203f6fb93SBart Van Assche ib_dereg_mr(srp_dev->global_mr); 3523f5358a17SRoland Dreier ib_dealloc_pd(srp_dev->pd); 3524f5358a17SRoland Dreier 3525f5358a17SRoland Dreier kfree(srp_dev); 3526aef9ec39SRoland Dreier } 3527aef9ec39SRoland Dreier 35283236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = { 3529ed9b2264SBart Van Assche .has_rport_state = true, 3530ed9b2264SBart Van Assche .reset_timer_if_blocked = true, 3531a95cadb9SBart Van Assche .reconnect_delay = &srp_reconnect_delay, 3532ed9b2264SBart Van Assche .fast_io_fail_tmo = &srp_fast_io_fail_tmo, 3533ed9b2264SBart Van Assche .dev_loss_tmo = &srp_dev_loss_tmo, 3534ed9b2264SBart Van Assche .reconnect = srp_rport_reconnect, 3535dc1bdbd9SBart Van Assche .rport_delete = srp_rport_delete, 3536ed9b2264SBart Van Assche .terminate_rport_io = srp_terminate_io, 35373236822bSFUJITA Tomonori }; 35383236822bSFUJITA Tomonori 3539aef9ec39SRoland Dreier static int __init srp_init_module(void) 3540aef9ec39SRoland Dreier { 3541aef9ec39SRoland Dreier int ret; 3542aef9ec39SRoland Dreier 3543dcb4cb85SBart Van Assche BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); 3544dd5e6e38SBart Van Assche 354549248644SDavid Dillow if (srp_sg_tablesize) { 3546e0bda7d8SBart Van Assche pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); 354749248644SDavid Dillow if (!cmd_sg_entries) 354849248644SDavid Dillow cmd_sg_entries = srp_sg_tablesize; 354949248644SDavid Dillow } 355049248644SDavid Dillow 355149248644SDavid Dillow if (!cmd_sg_entries) 355249248644SDavid Dillow cmd_sg_entries = SRP_DEF_SG_TABLESIZE; 355349248644SDavid Dillow 355449248644SDavid Dillow if (cmd_sg_entries > 255) { 3555e0bda7d8SBart Van Assche pr_warn("Clamping cmd_sg_entries to 255\n"); 355649248644SDavid Dillow cmd_sg_entries = 255; 35571e89a194SDavid Dillow } 35581e89a194SDavid Dillow 3559c07d424dSDavid Dillow if (!indirect_sg_entries) 3560c07d424dSDavid Dillow indirect_sg_entries = cmd_sg_entries; 3561c07d424dSDavid Dillow else if (indirect_sg_entries < cmd_sg_entries) { 3562e0bda7d8SBart Van Assche pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", 3563e0bda7d8SBart Van Assche cmd_sg_entries); 3564c07d424dSDavid Dillow indirect_sg_entries = cmd_sg_entries; 3565c07d424dSDavid Dillow } 3566c07d424dSDavid Dillow 3567bcc05910SBart Van Assche srp_remove_wq = create_workqueue("srp_remove"); 3568da05be29SWei Yongjun if (!srp_remove_wq) { 3569da05be29SWei Yongjun ret = -ENOMEM; 3570bcc05910SBart Van Assche goto out; 3571bcc05910SBart Van Assche } 3572bcc05910SBart Van Assche 3573bcc05910SBart Van Assche ret = -ENOMEM; 35743236822bSFUJITA Tomonori ib_srp_transport_template = 35753236822bSFUJITA Tomonori srp_attach_transport(&ib_srp_transport_functions); 35763236822bSFUJITA Tomonori if (!ib_srp_transport_template) 3577bcc05910SBart Van Assche goto destroy_wq; 35783236822bSFUJITA Tomonori 3579aef9ec39SRoland Dreier ret = class_register(&srp_class); 3580aef9ec39SRoland Dreier if (ret) { 3581e0bda7d8SBart Van Assche pr_err("couldn't register class infiniband_srp\n"); 3582bcc05910SBart Van Assche goto release_tr; 3583aef9ec39SRoland Dreier } 3584aef9ec39SRoland Dreier 3585c1a0b23bSMichael S. Tsirkin ib_sa_register_client(&srp_sa_client); 3586c1a0b23bSMichael S. Tsirkin 3587aef9ec39SRoland Dreier ret = ib_register_client(&srp_client); 3588aef9ec39SRoland Dreier if (ret) { 3589e0bda7d8SBart Van Assche pr_err("couldn't register IB client\n"); 3590bcc05910SBart Van Assche goto unreg_sa; 3591aef9ec39SRoland Dreier } 3592aef9ec39SRoland Dreier 3593bcc05910SBart Van Assche out: 3594bcc05910SBart Van Assche return ret; 3595bcc05910SBart Van Assche 3596bcc05910SBart Van Assche unreg_sa: 3597bcc05910SBart Van Assche ib_sa_unregister_client(&srp_sa_client); 3598bcc05910SBart Van Assche class_unregister(&srp_class); 3599bcc05910SBart Van Assche 3600bcc05910SBart Van Assche release_tr: 3601bcc05910SBart Van Assche srp_release_transport(ib_srp_transport_template); 3602bcc05910SBart Van Assche 3603bcc05910SBart Van Assche destroy_wq: 3604bcc05910SBart Van Assche destroy_workqueue(srp_remove_wq); 3605bcc05910SBart Van Assche goto out; 3606aef9ec39SRoland Dreier } 3607aef9ec39SRoland Dreier 3608aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void) 3609aef9ec39SRoland Dreier { 3610aef9ec39SRoland Dreier ib_unregister_client(&srp_client); 3611c1a0b23bSMichael S. Tsirkin ib_sa_unregister_client(&srp_sa_client); 3612aef9ec39SRoland Dreier class_unregister(&srp_class); 36133236822bSFUJITA Tomonori srp_release_transport(ib_srp_transport_template); 3614bcc05910SBart Van Assche destroy_workqueue(srp_remove_wq); 3615aef9ec39SRoland Dreier } 3616aef9ec39SRoland Dreier 3617aef9ec39SRoland Dreier module_init(srp_init_module); 3618aef9ec39SRoland Dreier module_exit(srp_cleanup_module); 3619