1aef9ec39SRoland Dreier /* 2aef9ec39SRoland Dreier * Copyright (c) 2005 Cisco Systems. All rights reserved. 3aef9ec39SRoland Dreier * 4aef9ec39SRoland Dreier * This software is available to you under a choice of one of two 5aef9ec39SRoland Dreier * licenses. You may choose to be licensed under the terms of the GNU 6aef9ec39SRoland Dreier * General Public License (GPL) Version 2, available from the file 7aef9ec39SRoland Dreier * COPYING in the main directory of this source tree, or the 8aef9ec39SRoland Dreier * OpenIB.org BSD license below: 9aef9ec39SRoland Dreier * 10aef9ec39SRoland Dreier * Redistribution and use in source and binary forms, with or 11aef9ec39SRoland Dreier * without modification, are permitted provided that the following 12aef9ec39SRoland Dreier * conditions are met: 13aef9ec39SRoland Dreier * 14aef9ec39SRoland Dreier * - Redistributions of source code must retain the above 15aef9ec39SRoland Dreier * copyright notice, this list of conditions and the following 16aef9ec39SRoland Dreier * disclaimer. 17aef9ec39SRoland Dreier * 18aef9ec39SRoland Dreier * - Redistributions in binary form must reproduce the above 19aef9ec39SRoland Dreier * copyright notice, this list of conditions and the following 20aef9ec39SRoland Dreier * disclaimer in the documentation and/or other materials 21aef9ec39SRoland Dreier * provided with the distribution. 22aef9ec39SRoland Dreier * 23aef9ec39SRoland Dreier * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24aef9ec39SRoland Dreier * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25aef9ec39SRoland Dreier * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26aef9ec39SRoland Dreier * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27aef9ec39SRoland Dreier * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28aef9ec39SRoland Dreier * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29aef9ec39SRoland Dreier * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30aef9ec39SRoland Dreier * SOFTWARE. 31aef9ec39SRoland Dreier */ 32aef9ec39SRoland Dreier 33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34e0bda7d8SBart Van Assche 35aef9ec39SRoland Dreier #include <linux/module.h> 36aef9ec39SRoland Dreier #include <linux/init.h> 37aef9ec39SRoland Dreier #include <linux/slab.h> 38aef9ec39SRoland Dreier #include <linux/err.h> 39aef9ec39SRoland Dreier #include <linux/string.h> 40aef9ec39SRoland Dreier #include <linux/parser.h> 41aef9ec39SRoland Dreier #include <linux/random.h> 42de25968cSTim Schmielau #include <linux/jiffies.h> 4356b5390cSBart Van Assche #include <rdma/ib_cache.h> 44aef9ec39SRoland Dreier 4560063497SArun Sharma #include <linux/atomic.h> 46aef9ec39SRoland Dreier 47aef9ec39SRoland Dreier #include <scsi/scsi.h> 48aef9ec39SRoland Dreier #include <scsi/scsi_device.h> 49aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h> 5071444b97SJack Wang #include <scsi/scsi_tcq.h> 51aef9ec39SRoland Dreier #include <scsi/srp.h> 523236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h> 53aef9ec39SRoland Dreier 54aef9ec39SRoland Dreier #include "ib_srp.h" 55aef9ec39SRoland Dreier 56aef9ec39SRoland Dreier #define DRV_NAME "ib_srp" 57aef9ec39SRoland Dreier #define PFX DRV_NAME ": " 58713ef24eSBart Van Assche #define DRV_VERSION "2.0" 59713ef24eSBart Van Assche #define DRV_RELDATE "July 26, 2015" 60aef9ec39SRoland Dreier 61aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier"); 6233ab3e5bSBart Van Assche MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); 63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL"); 6433ab3e5bSBart Van Assche MODULE_VERSION(DRV_VERSION); 6533ab3e5bSBart Van Assche MODULE_INFO(release_date, DRV_RELDATE); 66aef9ec39SRoland Dreier 6749248644SDavid Dillow static unsigned int srp_sg_tablesize; 6849248644SDavid Dillow static unsigned int cmd_sg_entries; 69c07d424dSDavid Dillow static unsigned int indirect_sg_entries; 70c07d424dSDavid Dillow static bool allow_ext_sg; 7103f6fb93SBart Van Assche static bool prefer_fr = true; 7203f6fb93SBart Van Assche static bool register_always = true; 73aef9ec39SRoland Dreier static int topspin_workarounds = 1; 74aef9ec39SRoland Dreier 7549248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444); 7649248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); 7749248644SDavid Dillow 7849248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444); 7949248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries, 8049248644SDavid Dillow "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); 8149248644SDavid Dillow 82c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444); 83c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries, 84c07d424dSDavid Dillow "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); 85c07d424dSDavid Dillow 86c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444); 87c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg, 88c07d424dSDavid Dillow "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); 89c07d424dSDavid Dillow 90aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444); 91aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds, 92aef9ec39SRoland Dreier "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 93aef9ec39SRoland Dreier 945cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444); 955cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr, 965cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported"); 975cfb1782SBart Van Assche 98b1b8854dSBart Van Assche module_param(register_always, bool, 0444); 99b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always, 100b1b8854dSBart Van Assche "Use memory registration even for contiguous memory regions"); 101b1b8854dSBart Van Assche 1029c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops; 103ed9b2264SBart Van Assche 104a95cadb9SBart Van Assche static int srp_reconnect_delay = 10; 105a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, 106a95cadb9SBart Van Assche S_IRUGO | S_IWUSR); 107a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); 108a95cadb9SBart Van Assche 109ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15; 110ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo, 111ed9b2264SBart Van Assche S_IRUGO | S_IWUSR); 112ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo, 113ed9b2264SBart Van Assche "Number of seconds between the observation of a transport" 114ed9b2264SBart Van Assche " layer error and failing all I/O. \"off\" means that this" 115ed9b2264SBart Van Assche " functionality is disabled."); 116ed9b2264SBart Van Assche 117a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600; 118ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, 119ed9b2264SBart Van Assche S_IRUGO | S_IWUSR); 120ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo, 121ed9b2264SBart Van Assche "Maximum number of seconds that the SRP transport should" 122ed9b2264SBart Van Assche " insulate transport layer errors. After this time has been" 123ed9b2264SBart Van Assche " exceeded the SCSI host is removed. Should be" 124ed9b2264SBart Van Assche " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) 125ed9b2264SBart Van Assche " if fast_io_fail_tmo has not been set. \"off\" means that" 126ed9b2264SBart Van Assche " this functionality is disabled."); 127ed9b2264SBart Van Assche 128d92c0da7SBart Van Assche static unsigned ch_count; 129d92c0da7SBart Van Assche module_param(ch_count, uint, 0444); 130d92c0da7SBart Van Assche MODULE_PARM_DESC(ch_count, 131d92c0da7SBart Van Assche "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); 132d92c0da7SBart Van Assche 133aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device); 1347c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data); 135509c07bcSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr); 136509c07bcSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *ch_ptr); 137aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 138aef9ec39SRoland Dreier 1393236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template; 140bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq; 1413236822bSFUJITA Tomonori 142aef9ec39SRoland Dreier static struct ib_client srp_client = { 143aef9ec39SRoland Dreier .name = "srp", 144aef9ec39SRoland Dreier .add = srp_add_one, 145aef9ec39SRoland Dreier .remove = srp_remove_one 146aef9ec39SRoland Dreier }; 147aef9ec39SRoland Dreier 148c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client; 149c1a0b23bSMichael S. Tsirkin 150ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp) 151ed9b2264SBart Van Assche { 152ed9b2264SBart Van Assche int tmo = *(int *)kp->arg; 153ed9b2264SBart Van Assche 154ed9b2264SBart Van Assche if (tmo >= 0) 155ed9b2264SBart Van Assche return sprintf(buffer, "%d", tmo); 156ed9b2264SBart Van Assche else 157ed9b2264SBart Van Assche return sprintf(buffer, "off"); 158ed9b2264SBart Van Assche } 159ed9b2264SBart Van Assche 160ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp) 161ed9b2264SBart Van Assche { 162ed9b2264SBart Van Assche int tmo, res; 163ed9b2264SBart Van Assche 1643fdf70acSSagi Grimberg res = srp_parse_tmo(&tmo, val); 165ed9b2264SBart Van Assche if (res) 166ed9b2264SBart Van Assche goto out; 1673fdf70acSSagi Grimberg 168a95cadb9SBart Van Assche if (kp->arg == &srp_reconnect_delay) 169a95cadb9SBart Van Assche res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, 170a95cadb9SBart Van Assche srp_dev_loss_tmo); 171a95cadb9SBart Van Assche else if (kp->arg == &srp_fast_io_fail_tmo) 172a95cadb9SBart Van Assche res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo); 173ed9b2264SBart Van Assche else 174a95cadb9SBart Van Assche res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo, 175a95cadb9SBart Van Assche tmo); 176ed9b2264SBart Van Assche if (res) 177ed9b2264SBart Van Assche goto out; 178ed9b2264SBart Van Assche *(int *)kp->arg = tmo; 179ed9b2264SBart Van Assche 180ed9b2264SBart Van Assche out: 181ed9b2264SBart Van Assche return res; 182ed9b2264SBart Van Assche } 183ed9b2264SBart Van Assche 1849c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops = { 185ed9b2264SBart Van Assche .get = srp_tmo_get, 186ed9b2264SBart Van Assche .set = srp_tmo_set, 187ed9b2264SBart Van Assche }; 188ed9b2264SBart Van Assche 189aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 190aef9ec39SRoland Dreier { 191aef9ec39SRoland Dreier return (struct srp_target_port *) host->hostdata; 192aef9ec39SRoland Dreier } 193aef9ec39SRoland Dreier 194aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host) 195aef9ec39SRoland Dreier { 196aef9ec39SRoland Dreier return host_to_target(host)->target_name; 197aef9ec39SRoland Dreier } 198aef9ec39SRoland Dreier 1995d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target) 2005d7cbfd6SRoland Dreier { 2015d7cbfd6SRoland Dreier static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 2023d1ff48dSRaghava Kondapalli static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 2035d7cbfd6SRoland Dreier 2045d7cbfd6SRoland Dreier return topspin_workarounds && 2053d1ff48dSRaghava Kondapalli (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 2063d1ff48dSRaghava Kondapalli !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 2075d7cbfd6SRoland Dreier } 2085d7cbfd6SRoland Dreier 209aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 210aef9ec39SRoland Dreier gfp_t gfp_mask, 211aef9ec39SRoland Dreier enum dma_data_direction direction) 212aef9ec39SRoland Dreier { 213aef9ec39SRoland Dreier struct srp_iu *iu; 214aef9ec39SRoland Dreier 215aef9ec39SRoland Dreier iu = kmalloc(sizeof *iu, gfp_mask); 216aef9ec39SRoland Dreier if (!iu) 217aef9ec39SRoland Dreier goto out; 218aef9ec39SRoland Dreier 219aef9ec39SRoland Dreier iu->buf = kzalloc(size, gfp_mask); 220aef9ec39SRoland Dreier if (!iu->buf) 221aef9ec39SRoland Dreier goto out_free_iu; 222aef9ec39SRoland Dreier 22305321937SGreg Kroah-Hartman iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, 22405321937SGreg Kroah-Hartman direction); 22505321937SGreg Kroah-Hartman if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) 226aef9ec39SRoland Dreier goto out_free_buf; 227aef9ec39SRoland Dreier 228aef9ec39SRoland Dreier iu->size = size; 229aef9ec39SRoland Dreier iu->direction = direction; 230aef9ec39SRoland Dreier 231aef9ec39SRoland Dreier return iu; 232aef9ec39SRoland Dreier 233aef9ec39SRoland Dreier out_free_buf: 234aef9ec39SRoland Dreier kfree(iu->buf); 235aef9ec39SRoland Dreier out_free_iu: 236aef9ec39SRoland Dreier kfree(iu); 237aef9ec39SRoland Dreier out: 238aef9ec39SRoland Dreier return NULL; 239aef9ec39SRoland Dreier } 240aef9ec39SRoland Dreier 241aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 242aef9ec39SRoland Dreier { 243aef9ec39SRoland Dreier if (!iu) 244aef9ec39SRoland Dreier return; 245aef9ec39SRoland Dreier 24605321937SGreg Kroah-Hartman ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, 24705321937SGreg Kroah-Hartman iu->direction); 248aef9ec39SRoland Dreier kfree(iu->buf); 249aef9ec39SRoland Dreier kfree(iu); 250aef9ec39SRoland Dreier } 251aef9ec39SRoland Dreier 252aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context) 253aef9ec39SRoland Dreier { 25457363d98SSagi Grimberg pr_debug("QP event %s (%d)\n", 25557363d98SSagi Grimberg ib_event_msg(event->event), event->event); 256aef9ec39SRoland Dreier } 257aef9ec39SRoland Dreier 258aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target, 259aef9ec39SRoland Dreier struct ib_qp *qp) 260aef9ec39SRoland Dreier { 261aef9ec39SRoland Dreier struct ib_qp_attr *attr; 262aef9ec39SRoland Dreier int ret; 263aef9ec39SRoland Dreier 264aef9ec39SRoland Dreier attr = kmalloc(sizeof *attr, GFP_KERNEL); 265aef9ec39SRoland Dreier if (!attr) 266aef9ec39SRoland Dreier return -ENOMEM; 267aef9ec39SRoland Dreier 26856b5390cSBart Van Assche ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, 269aef9ec39SRoland Dreier target->srp_host->port, 270747fe000SBart Van Assche be16_to_cpu(target->pkey), 271aef9ec39SRoland Dreier &attr->pkey_index); 272aef9ec39SRoland Dreier if (ret) 273aef9ec39SRoland Dreier goto out; 274aef9ec39SRoland Dreier 275aef9ec39SRoland Dreier attr->qp_state = IB_QPS_INIT; 276aef9ec39SRoland Dreier attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 277aef9ec39SRoland Dreier IB_ACCESS_REMOTE_WRITE); 278aef9ec39SRoland Dreier attr->port_num = target->srp_host->port; 279aef9ec39SRoland Dreier 280aef9ec39SRoland Dreier ret = ib_modify_qp(qp, attr, 281aef9ec39SRoland Dreier IB_QP_STATE | 282aef9ec39SRoland Dreier IB_QP_PKEY_INDEX | 283aef9ec39SRoland Dreier IB_QP_ACCESS_FLAGS | 284aef9ec39SRoland Dreier IB_QP_PORT); 285aef9ec39SRoland Dreier 286aef9ec39SRoland Dreier out: 287aef9ec39SRoland Dreier kfree(attr); 288aef9ec39SRoland Dreier return ret; 289aef9ec39SRoland Dreier } 290aef9ec39SRoland Dreier 291509c07bcSBart Van Assche static int srp_new_cm_id(struct srp_rdma_ch *ch) 2929fe4bcf4SDavid Dillow { 293509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2949fe4bcf4SDavid Dillow struct ib_cm_id *new_cm_id; 2959fe4bcf4SDavid Dillow 29605321937SGreg Kroah-Hartman new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 297509c07bcSBart Van Assche srp_cm_handler, ch); 2989fe4bcf4SDavid Dillow if (IS_ERR(new_cm_id)) 2999fe4bcf4SDavid Dillow return PTR_ERR(new_cm_id); 3009fe4bcf4SDavid Dillow 301509c07bcSBart Van Assche if (ch->cm_id) 302509c07bcSBart Van Assche ib_destroy_cm_id(ch->cm_id); 303509c07bcSBart Van Assche ch->cm_id = new_cm_id; 304509c07bcSBart Van Assche ch->path.sgid = target->sgid; 305509c07bcSBart Van Assche ch->path.dgid = target->orig_dgid; 306509c07bcSBart Van Assche ch->path.pkey = target->pkey; 307509c07bcSBart Van Assche ch->path.service_id = target->service_id; 3089fe4bcf4SDavid Dillow 3099fe4bcf4SDavid Dillow return 0; 3109fe4bcf4SDavid Dillow } 3119fe4bcf4SDavid Dillow 312d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target) 313d1b4289eSBart Van Assche { 314d1b4289eSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 315d1b4289eSBart Van Assche struct ib_fmr_pool_param fmr_param; 316d1b4289eSBart Van Assche 317d1b4289eSBart Van Assche memset(&fmr_param, 0, sizeof(fmr_param)); 318d1b4289eSBart Van Assche fmr_param.pool_size = target->scsi_host->can_queue; 319d1b4289eSBart Van Assche fmr_param.dirty_watermark = fmr_param.pool_size / 4; 320d1b4289eSBart Van Assche fmr_param.cache = 1; 32152ede08fSBart Van Assche fmr_param.max_pages_per_fmr = dev->max_pages_per_mr; 32252ede08fSBart Van Assche fmr_param.page_shift = ilog2(dev->mr_page_size); 323d1b4289eSBart Van Assche fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 324d1b4289eSBart Van Assche IB_ACCESS_REMOTE_WRITE | 325d1b4289eSBart Van Assche IB_ACCESS_REMOTE_READ); 326d1b4289eSBart Van Assche 327d1b4289eSBart Van Assche return ib_create_fmr_pool(dev->pd, &fmr_param); 328d1b4289eSBart Van Assche } 329d1b4289eSBart Van Assche 3305cfb1782SBart Van Assche /** 3315cfb1782SBart Van Assche * srp_destroy_fr_pool() - free the resources owned by a pool 3325cfb1782SBart Van Assche * @pool: Fast registration pool to be destroyed. 3335cfb1782SBart Van Assche */ 3345cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool) 3355cfb1782SBart Van Assche { 3365cfb1782SBart Van Assche int i; 3375cfb1782SBart Van Assche struct srp_fr_desc *d; 3385cfb1782SBart Van Assche 3395cfb1782SBart Van Assche if (!pool) 3405cfb1782SBart Van Assche return; 3415cfb1782SBart Van Assche 3425cfb1782SBart Van Assche for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 3435cfb1782SBart Van Assche if (d->mr) 3445cfb1782SBart Van Assche ib_dereg_mr(d->mr); 3455cfb1782SBart Van Assche } 3465cfb1782SBart Van Assche kfree(pool); 3475cfb1782SBart Van Assche } 3485cfb1782SBart Van Assche 3495cfb1782SBart Van Assche /** 3505cfb1782SBart Van Assche * srp_create_fr_pool() - allocate and initialize a pool for fast registration 3515cfb1782SBart Van Assche * @device: IB device to allocate fast registration descriptors for. 3525cfb1782SBart Van Assche * @pd: Protection domain associated with the FR descriptors. 3535cfb1782SBart Van Assche * @pool_size: Number of descriptors to allocate. 3545cfb1782SBart Van Assche * @max_page_list_len: Maximum fast registration work request page list length. 3555cfb1782SBart Van Assche */ 3565cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, 3575cfb1782SBart Van Assche struct ib_pd *pd, int pool_size, 3585cfb1782SBart Van Assche int max_page_list_len) 3595cfb1782SBart Van Assche { 3605cfb1782SBart Van Assche struct srp_fr_pool *pool; 3615cfb1782SBart Van Assche struct srp_fr_desc *d; 3625cfb1782SBart Van Assche struct ib_mr *mr; 3635cfb1782SBart Van Assche int i, ret = -EINVAL; 3645cfb1782SBart Van Assche 3655cfb1782SBart Van Assche if (pool_size <= 0) 3665cfb1782SBart Van Assche goto err; 3675cfb1782SBart Van Assche ret = -ENOMEM; 3685cfb1782SBart Van Assche pool = kzalloc(sizeof(struct srp_fr_pool) + 3695cfb1782SBart Van Assche pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL); 3705cfb1782SBart Van Assche if (!pool) 3715cfb1782SBart Van Assche goto err; 3725cfb1782SBart Van Assche pool->size = pool_size; 3735cfb1782SBart Van Assche pool->max_page_list_len = max_page_list_len; 3745cfb1782SBart Van Assche spin_lock_init(&pool->lock); 3755cfb1782SBart Van Assche INIT_LIST_HEAD(&pool->free_list); 3765cfb1782SBart Van Assche 3775cfb1782SBart Van Assche for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 378563b67c5SSagi Grimberg mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 379563b67c5SSagi Grimberg max_page_list_len); 3805cfb1782SBart Van Assche if (IS_ERR(mr)) { 3815cfb1782SBart Van Assche ret = PTR_ERR(mr); 3825cfb1782SBart Van Assche goto destroy_pool; 3835cfb1782SBart Van Assche } 3845cfb1782SBart Van Assche d->mr = mr; 3855cfb1782SBart Van Assche list_add_tail(&d->entry, &pool->free_list); 3865cfb1782SBart Van Assche } 3875cfb1782SBart Van Assche 3885cfb1782SBart Van Assche out: 3895cfb1782SBart Van Assche return pool; 3905cfb1782SBart Van Assche 3915cfb1782SBart Van Assche destroy_pool: 3925cfb1782SBart Van Assche srp_destroy_fr_pool(pool); 3935cfb1782SBart Van Assche 3945cfb1782SBart Van Assche err: 3955cfb1782SBart Van Assche pool = ERR_PTR(ret); 3965cfb1782SBart Van Assche goto out; 3975cfb1782SBart Van Assche } 3985cfb1782SBart Van Assche 3995cfb1782SBart Van Assche /** 4005cfb1782SBart Van Assche * srp_fr_pool_get() - obtain a descriptor suitable for fast registration 4015cfb1782SBart Van Assche * @pool: Pool to obtain descriptor from. 4025cfb1782SBart Van Assche */ 4035cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool) 4045cfb1782SBart Van Assche { 4055cfb1782SBart Van Assche struct srp_fr_desc *d = NULL; 4065cfb1782SBart Van Assche unsigned long flags; 4075cfb1782SBart Van Assche 4085cfb1782SBart Van Assche spin_lock_irqsave(&pool->lock, flags); 4095cfb1782SBart Van Assche if (!list_empty(&pool->free_list)) { 4105cfb1782SBart Van Assche d = list_first_entry(&pool->free_list, typeof(*d), entry); 4115cfb1782SBart Van Assche list_del(&d->entry); 4125cfb1782SBart Van Assche } 4135cfb1782SBart Van Assche spin_unlock_irqrestore(&pool->lock, flags); 4145cfb1782SBart Van Assche 4155cfb1782SBart Van Assche return d; 4165cfb1782SBart Van Assche } 4175cfb1782SBart Van Assche 4185cfb1782SBart Van Assche /** 4195cfb1782SBart Van Assche * srp_fr_pool_put() - put an FR descriptor back in the free list 4205cfb1782SBart Van Assche * @pool: Pool the descriptor was allocated from. 4215cfb1782SBart Van Assche * @desc: Pointer to an array of fast registration descriptor pointers. 4225cfb1782SBart Van Assche * @n: Number of descriptors to put back. 4235cfb1782SBart Van Assche * 4245cfb1782SBart Van Assche * Note: The caller must already have queued an invalidation request for 4255cfb1782SBart Van Assche * desc->mr->rkey before calling this function. 4265cfb1782SBart Van Assche */ 4275cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, 4285cfb1782SBart Van Assche int n) 4295cfb1782SBart Van Assche { 4305cfb1782SBart Van Assche unsigned long flags; 4315cfb1782SBart Van Assche int i; 4325cfb1782SBart Van Assche 4335cfb1782SBart Van Assche spin_lock_irqsave(&pool->lock, flags); 4345cfb1782SBart Van Assche for (i = 0; i < n; i++) 4355cfb1782SBart Van Assche list_add(&desc[i]->entry, &pool->free_list); 4365cfb1782SBart Van Assche spin_unlock_irqrestore(&pool->lock, flags); 4375cfb1782SBart Van Assche } 4385cfb1782SBart Van Assche 4395cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) 4405cfb1782SBart Van Assche { 4415cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 4425cfb1782SBart Van Assche 4435cfb1782SBart Van Assche return srp_create_fr_pool(dev->dev, dev->pd, 4445cfb1782SBart Van Assche target->scsi_host->can_queue, 4455cfb1782SBart Van Assche dev->max_pages_per_mr); 4465cfb1782SBart Van Assche } 4475cfb1782SBart Van Assche 4487dad6b2eSBart Van Assche /** 4497dad6b2eSBart Van Assche * srp_destroy_qp() - destroy an RDMA queue pair 4507dad6b2eSBart Van Assche * @ch: SRP RDMA channel. 4517dad6b2eSBart Van Assche * 4527dad6b2eSBart Van Assche * Change a queue pair into the error state and wait until all receive 4537dad6b2eSBart Van Assche * completions have been processed before destroying it. This avoids that 4547dad6b2eSBart Van Assche * the receive completion handler can access the queue pair while it is 4557dad6b2eSBart Van Assche * being destroyed. 4567dad6b2eSBart Van Assche */ 4577dad6b2eSBart Van Assche static void srp_destroy_qp(struct srp_rdma_ch *ch) 4587dad6b2eSBart Van Assche { 4597dad6b2eSBart Van Assche static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 4607dad6b2eSBart Van Assche static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID }; 4617dad6b2eSBart Van Assche struct ib_recv_wr *bad_wr; 4627dad6b2eSBart Van Assche int ret; 4637dad6b2eSBart Van Assche 4647dad6b2eSBart Van Assche /* Destroying a QP and reusing ch->done is only safe if not connected */ 465c014c8cdSBart Van Assche WARN_ON_ONCE(ch->connected); 4667dad6b2eSBart Van Assche 4677dad6b2eSBart Van Assche ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); 4687dad6b2eSBart Van Assche WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret); 4697dad6b2eSBart Van Assche if (ret) 4707dad6b2eSBart Van Assche goto out; 4717dad6b2eSBart Van Assche 4727dad6b2eSBart Van Assche init_completion(&ch->done); 4737dad6b2eSBart Van Assche ret = ib_post_recv(ch->qp, &wr, &bad_wr); 4747dad6b2eSBart Van Assche WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret); 4757dad6b2eSBart Van Assche if (ret == 0) 4767dad6b2eSBart Van Assche wait_for_completion(&ch->done); 4777dad6b2eSBart Van Assche 4787dad6b2eSBart Van Assche out: 4797dad6b2eSBart Van Assche ib_destroy_qp(ch->qp); 4807dad6b2eSBart Van Assche } 4817dad6b2eSBart Van Assche 482509c07bcSBart Van Assche static int srp_create_ch_ib(struct srp_rdma_ch *ch) 483aef9ec39SRoland Dreier { 484509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 48562154b2eSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 486aef9ec39SRoland Dreier struct ib_qp_init_attr *init_attr; 48773aa89edSIshai Rabinovitz struct ib_cq *recv_cq, *send_cq; 48873aa89edSIshai Rabinovitz struct ib_qp *qp; 489d1b4289eSBart Van Assche struct ib_fmr_pool *fmr_pool = NULL; 4905cfb1782SBart Van Assche struct srp_fr_pool *fr_pool = NULL; 4915cfb1782SBart Van Assche const int m = 1 + dev->use_fast_reg; 4928e37210bSMatan Barak struct ib_cq_init_attr cq_attr = {}; 493aef9ec39SRoland Dreier int ret; 494aef9ec39SRoland Dreier 495aef9ec39SRoland Dreier init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 496aef9ec39SRoland Dreier if (!init_attr) 497aef9ec39SRoland Dreier return -ENOMEM; 498aef9ec39SRoland Dreier 4997dad6b2eSBart Van Assche /* + 1 for SRP_LAST_WR_ID */ 5008e37210bSMatan Barak cq_attr.cqe = target->queue_size + 1; 5018e37210bSMatan Barak cq_attr.comp_vector = ch->comp_vector; 502509c07bcSBart Van Assche recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, 5038e37210bSMatan Barak &cq_attr); 50473aa89edSIshai Rabinovitz if (IS_ERR(recv_cq)) { 50573aa89edSIshai Rabinovitz ret = PTR_ERR(recv_cq); 506da9d2f07SRoland Dreier goto err; 507aef9ec39SRoland Dreier } 508aef9ec39SRoland Dreier 5098e37210bSMatan Barak cq_attr.cqe = m * target->queue_size; 5108e37210bSMatan Barak cq_attr.comp_vector = ch->comp_vector; 511509c07bcSBart Van Assche send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, 5128e37210bSMatan Barak &cq_attr); 51373aa89edSIshai Rabinovitz if (IS_ERR(send_cq)) { 51473aa89edSIshai Rabinovitz ret = PTR_ERR(send_cq); 515da9d2f07SRoland Dreier goto err_recv_cq; 5169c03dc9fSBart Van Assche } 5179c03dc9fSBart Van Assche 51873aa89edSIshai Rabinovitz ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); 519aef9ec39SRoland Dreier 520aef9ec39SRoland Dreier init_attr->event_handler = srp_qp_event; 5215cfb1782SBart Van Assche init_attr->cap.max_send_wr = m * target->queue_size; 5227dad6b2eSBart Van Assche init_attr->cap.max_recv_wr = target->queue_size + 1; 523aef9ec39SRoland Dreier init_attr->cap.max_recv_sge = 1; 524aef9ec39SRoland Dreier init_attr->cap.max_send_sge = 1; 5255cfb1782SBart Van Assche init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 526aef9ec39SRoland Dreier init_attr->qp_type = IB_QPT_RC; 52773aa89edSIshai Rabinovitz init_attr->send_cq = send_cq; 52873aa89edSIshai Rabinovitz init_attr->recv_cq = recv_cq; 529aef9ec39SRoland Dreier 53062154b2eSBart Van Assche qp = ib_create_qp(dev->pd, init_attr); 53173aa89edSIshai Rabinovitz if (IS_ERR(qp)) { 53273aa89edSIshai Rabinovitz ret = PTR_ERR(qp); 533da9d2f07SRoland Dreier goto err_send_cq; 534aef9ec39SRoland Dreier } 535aef9ec39SRoland Dreier 53673aa89edSIshai Rabinovitz ret = srp_init_qp(target, qp); 537da9d2f07SRoland Dreier if (ret) 538da9d2f07SRoland Dreier goto err_qp; 539aef9ec39SRoland Dreier 540002f1567SBart Van Assche if (dev->use_fast_reg) { 5415cfb1782SBart Van Assche fr_pool = srp_alloc_fr_pool(target); 5425cfb1782SBart Van Assche if (IS_ERR(fr_pool)) { 5435cfb1782SBart Van Assche ret = PTR_ERR(fr_pool); 5445cfb1782SBart Van Assche shost_printk(KERN_WARNING, target->scsi_host, PFX 5455cfb1782SBart Van Assche "FR pool allocation failed (%d)\n", ret); 5465cfb1782SBart Van Assche goto err_qp; 5475cfb1782SBart Van Assche } 548002f1567SBart Van Assche } else if (dev->use_fmr) { 549d1b4289eSBart Van Assche fmr_pool = srp_alloc_fmr_pool(target); 550d1b4289eSBart Van Assche if (IS_ERR(fmr_pool)) { 551d1b4289eSBart Van Assche ret = PTR_ERR(fmr_pool); 552d1b4289eSBart Van Assche shost_printk(KERN_WARNING, target->scsi_host, PFX 553d1b4289eSBart Van Assche "FMR pool allocation failed (%d)\n", ret); 554d1b4289eSBart Van Assche goto err_qp; 555d1b4289eSBart Van Assche } 556d1b4289eSBart Van Assche } 557d1b4289eSBart Van Assche 558509c07bcSBart Van Assche if (ch->qp) 5597dad6b2eSBart Van Assche srp_destroy_qp(ch); 560509c07bcSBart Van Assche if (ch->recv_cq) 561509c07bcSBart Van Assche ib_destroy_cq(ch->recv_cq); 562509c07bcSBart Van Assche if (ch->send_cq) 563509c07bcSBart Van Assche ib_destroy_cq(ch->send_cq); 56473aa89edSIshai Rabinovitz 565509c07bcSBart Van Assche ch->qp = qp; 566509c07bcSBart Van Assche ch->recv_cq = recv_cq; 567509c07bcSBart Van Assche ch->send_cq = send_cq; 56873aa89edSIshai Rabinovitz 5697fbc67dfSSagi Grimberg if (dev->use_fast_reg) { 5707fbc67dfSSagi Grimberg if (ch->fr_pool) 5717fbc67dfSSagi Grimberg srp_destroy_fr_pool(ch->fr_pool); 5727fbc67dfSSagi Grimberg ch->fr_pool = fr_pool; 5737fbc67dfSSagi Grimberg } else if (dev->use_fmr) { 5747fbc67dfSSagi Grimberg if (ch->fmr_pool) 5757fbc67dfSSagi Grimberg ib_destroy_fmr_pool(ch->fmr_pool); 5767fbc67dfSSagi Grimberg ch->fmr_pool = fmr_pool; 5777fbc67dfSSagi Grimberg } 5787fbc67dfSSagi Grimberg 579da9d2f07SRoland Dreier kfree(init_attr); 580da9d2f07SRoland Dreier return 0; 581da9d2f07SRoland Dreier 582da9d2f07SRoland Dreier err_qp: 58373aa89edSIshai Rabinovitz ib_destroy_qp(qp); 584da9d2f07SRoland Dreier 585da9d2f07SRoland Dreier err_send_cq: 58673aa89edSIshai Rabinovitz ib_destroy_cq(send_cq); 587da9d2f07SRoland Dreier 588da9d2f07SRoland Dreier err_recv_cq: 58973aa89edSIshai Rabinovitz ib_destroy_cq(recv_cq); 590da9d2f07SRoland Dreier 591da9d2f07SRoland Dreier err: 592aef9ec39SRoland Dreier kfree(init_attr); 593aef9ec39SRoland Dreier return ret; 594aef9ec39SRoland Dreier } 595aef9ec39SRoland Dreier 5964d73f95fSBart Van Assche /* 5974d73f95fSBart Van Assche * Note: this function may be called without srp_alloc_iu_bufs() having been 598509c07bcSBart Van Assche * invoked. Hence the ch->[rt]x_ring checks. 5994d73f95fSBart Van Assche */ 600509c07bcSBart Van Assche static void srp_free_ch_ib(struct srp_target_port *target, 601509c07bcSBart Van Assche struct srp_rdma_ch *ch) 602aef9ec39SRoland Dreier { 6035cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 604aef9ec39SRoland Dreier int i; 605aef9ec39SRoland Dreier 606d92c0da7SBart Van Assche if (!ch->target) 607d92c0da7SBart Van Assche return; 608d92c0da7SBart Van Assche 609509c07bcSBart Van Assche if (ch->cm_id) { 610509c07bcSBart Van Assche ib_destroy_cm_id(ch->cm_id); 611509c07bcSBart Van Assche ch->cm_id = NULL; 612394c595eSBart Van Assche } 613394c595eSBart Van Assche 614d92c0da7SBart Van Assche /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */ 615d92c0da7SBart Van Assche if (!ch->qp) 616d92c0da7SBart Van Assche return; 617d92c0da7SBart Van Assche 6185cfb1782SBart Van Assche if (dev->use_fast_reg) { 619509c07bcSBart Van Assche if (ch->fr_pool) 620509c07bcSBart Van Assche srp_destroy_fr_pool(ch->fr_pool); 621002f1567SBart Van Assche } else if (dev->use_fmr) { 622509c07bcSBart Van Assche if (ch->fmr_pool) 623509c07bcSBart Van Assche ib_destroy_fmr_pool(ch->fmr_pool); 6245cfb1782SBart Van Assche } 6257dad6b2eSBart Van Assche srp_destroy_qp(ch); 626509c07bcSBart Van Assche ib_destroy_cq(ch->send_cq); 627509c07bcSBart Van Assche ib_destroy_cq(ch->recv_cq); 628aef9ec39SRoland Dreier 629d92c0da7SBart Van Assche /* 630d92c0da7SBart Van Assche * Avoid that the SCSI error handler tries to use this channel after 631d92c0da7SBart Van Assche * it has been freed. The SCSI error handler can namely continue 632d92c0da7SBart Van Assche * trying to perform recovery actions after scsi_remove_host() 633d92c0da7SBart Van Assche * returned. 634d92c0da7SBart Van Assche */ 635d92c0da7SBart Van Assche ch->target = NULL; 636d92c0da7SBart Van Assche 637509c07bcSBart Van Assche ch->qp = NULL; 638509c07bcSBart Van Assche ch->send_cq = ch->recv_cq = NULL; 63973aa89edSIshai Rabinovitz 640509c07bcSBart Van Assche if (ch->rx_ring) { 6414d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) 642509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->rx_ring[i]); 643509c07bcSBart Van Assche kfree(ch->rx_ring); 644509c07bcSBart Van Assche ch->rx_ring = NULL; 6454d73f95fSBart Van Assche } 646509c07bcSBart Van Assche if (ch->tx_ring) { 6474d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) 648509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->tx_ring[i]); 649509c07bcSBart Van Assche kfree(ch->tx_ring); 650509c07bcSBart Van Assche ch->tx_ring = NULL; 6514d73f95fSBart Van Assche } 652aef9ec39SRoland Dreier } 653aef9ec39SRoland Dreier 654aef9ec39SRoland Dreier static void srp_path_rec_completion(int status, 655aef9ec39SRoland Dreier struct ib_sa_path_rec *pathrec, 656509c07bcSBart Van Assche void *ch_ptr) 657aef9ec39SRoland Dreier { 658509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 659509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 660aef9ec39SRoland Dreier 661509c07bcSBart Van Assche ch->status = status; 662aef9ec39SRoland Dreier if (status) 6637aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 6647aa54bd7SDavid Dillow PFX "Got failed path rec status %d\n", status); 665aef9ec39SRoland Dreier else 666509c07bcSBart Van Assche ch->path = *pathrec; 667509c07bcSBart Van Assche complete(&ch->done); 668aef9ec39SRoland Dreier } 669aef9ec39SRoland Dreier 670509c07bcSBart Van Assche static int srp_lookup_path(struct srp_rdma_ch *ch) 671aef9ec39SRoland Dreier { 672509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 673a702adceSBart Van Assche int ret; 674a702adceSBart Van Assche 675509c07bcSBart Van Assche ch->path.numb_path = 1; 676aef9ec39SRoland Dreier 677509c07bcSBart Van Assche init_completion(&ch->done); 678aef9ec39SRoland Dreier 679509c07bcSBart Van Assche ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 68005321937SGreg Kroah-Hartman target->srp_host->srp_dev->dev, 681aef9ec39SRoland Dreier target->srp_host->port, 682509c07bcSBart Van Assche &ch->path, 683247e020eSSean Hefty IB_SA_PATH_REC_SERVICE_ID | 684aef9ec39SRoland Dreier IB_SA_PATH_REC_DGID | 685aef9ec39SRoland Dreier IB_SA_PATH_REC_SGID | 686aef9ec39SRoland Dreier IB_SA_PATH_REC_NUMB_PATH | 687aef9ec39SRoland Dreier IB_SA_PATH_REC_PKEY, 688aef9ec39SRoland Dreier SRP_PATH_REC_TIMEOUT_MS, 689aef9ec39SRoland Dreier GFP_KERNEL, 690aef9ec39SRoland Dreier srp_path_rec_completion, 691509c07bcSBart Van Assche ch, &ch->path_query); 692509c07bcSBart Van Assche if (ch->path_query_id < 0) 693509c07bcSBart Van Assche return ch->path_query_id; 694aef9ec39SRoland Dreier 695509c07bcSBart Van Assche ret = wait_for_completion_interruptible(&ch->done); 696a702adceSBart Van Assche if (ret < 0) 697a702adceSBart Van Assche return ret; 698aef9ec39SRoland Dreier 699509c07bcSBart Van Assche if (ch->status < 0) 7007aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 7017aa54bd7SDavid Dillow PFX "Path record query failed\n"); 702aef9ec39SRoland Dreier 703509c07bcSBart Van Assche return ch->status; 704aef9ec39SRoland Dreier } 705aef9ec39SRoland Dreier 706d92c0da7SBart Van Assche static int srp_send_req(struct srp_rdma_ch *ch, bool multich) 707aef9ec39SRoland Dreier { 708509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 709aef9ec39SRoland Dreier struct { 710aef9ec39SRoland Dreier struct ib_cm_req_param param; 711aef9ec39SRoland Dreier struct srp_login_req priv; 712aef9ec39SRoland Dreier } *req = NULL; 713aef9ec39SRoland Dreier int status; 714aef9ec39SRoland Dreier 715aef9ec39SRoland Dreier req = kzalloc(sizeof *req, GFP_KERNEL); 716aef9ec39SRoland Dreier if (!req) 717aef9ec39SRoland Dreier return -ENOMEM; 718aef9ec39SRoland Dreier 719509c07bcSBart Van Assche req->param.primary_path = &ch->path; 720aef9ec39SRoland Dreier req->param.alternate_path = NULL; 721aef9ec39SRoland Dreier req->param.service_id = target->service_id; 722509c07bcSBart Van Assche req->param.qp_num = ch->qp->qp_num; 723509c07bcSBart Van Assche req->param.qp_type = ch->qp->qp_type; 724aef9ec39SRoland Dreier req->param.private_data = &req->priv; 725aef9ec39SRoland Dreier req->param.private_data_len = sizeof req->priv; 726aef9ec39SRoland Dreier req->param.flow_control = 1; 727aef9ec39SRoland Dreier 728aef9ec39SRoland Dreier get_random_bytes(&req->param.starting_psn, 4); 729aef9ec39SRoland Dreier req->param.starting_psn &= 0xffffff; 730aef9ec39SRoland Dreier 731aef9ec39SRoland Dreier /* 732aef9ec39SRoland Dreier * Pick some arbitrary defaults here; we could make these 733aef9ec39SRoland Dreier * module parameters if anyone cared about setting them. 734aef9ec39SRoland Dreier */ 735aef9ec39SRoland Dreier req->param.responder_resources = 4; 736aef9ec39SRoland Dreier req->param.remote_cm_response_timeout = 20; 737aef9ec39SRoland Dreier req->param.local_cm_response_timeout = 20; 7387bb312e4SVu Pham req->param.retry_count = target->tl_retry_count; 739aef9ec39SRoland Dreier req->param.rnr_retry_count = 7; 740aef9ec39SRoland Dreier req->param.max_cm_retries = 15; 741aef9ec39SRoland Dreier 742aef9ec39SRoland Dreier req->priv.opcode = SRP_LOGIN_REQ; 743aef9ec39SRoland Dreier req->priv.tag = 0; 74449248644SDavid Dillow req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); 745aef9ec39SRoland Dreier req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 746aef9ec39SRoland Dreier SRP_BUF_FORMAT_INDIRECT); 747d92c0da7SBart Van Assche req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI : 748d92c0da7SBart Van Assche SRP_MULTICHAN_SINGLE); 7490c0450dbSRamachandra K /* 7500c0450dbSRamachandra K * In the published SRP specification (draft rev. 16a), the 7510c0450dbSRamachandra K * port identifier format is 8 bytes of ID extension followed 7520c0450dbSRamachandra K * by 8 bytes of GUID. Older drafts put the two halves in the 7530c0450dbSRamachandra K * opposite order, so that the GUID comes first. 7540c0450dbSRamachandra K * 7550c0450dbSRamachandra K * Targets conforming to these obsolete drafts can be 7560c0450dbSRamachandra K * recognized by the I/O Class they report. 7570c0450dbSRamachandra K */ 7580c0450dbSRamachandra K if (target->io_class == SRP_REV10_IB_IO_CLASS) { 7590c0450dbSRamachandra K memcpy(req->priv.initiator_port_id, 760747fe000SBart Van Assche &target->sgid.global.interface_id, 8); 7610c0450dbSRamachandra K memcpy(req->priv.initiator_port_id + 8, 76201cb9bcbSIshai Rabinovitz &target->initiator_ext, 8); 7630c0450dbSRamachandra K memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 7640c0450dbSRamachandra K memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); 7650c0450dbSRamachandra K } else { 7660c0450dbSRamachandra K memcpy(req->priv.initiator_port_id, 76701cb9bcbSIshai Rabinovitz &target->initiator_ext, 8); 76801cb9bcbSIshai Rabinovitz memcpy(req->priv.initiator_port_id + 8, 769747fe000SBart Van Assche &target->sgid.global.interface_id, 8); 7700c0450dbSRamachandra K memcpy(req->priv.target_port_id, &target->id_ext, 8); 7710c0450dbSRamachandra K memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 7720c0450dbSRamachandra K } 7730c0450dbSRamachandra K 774aef9ec39SRoland Dreier /* 775aef9ec39SRoland Dreier * Topspin/Cisco SRP targets will reject our login unless we 77601cb9bcbSIshai Rabinovitz * zero out the first 8 bytes of our initiator port ID and set 77701cb9bcbSIshai Rabinovitz * the second 8 bytes to the local node GUID. 778aef9ec39SRoland Dreier */ 7795d7cbfd6SRoland Dreier if (srp_target_is_topspin(target)) { 7807aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 7817aa54bd7SDavid Dillow PFX "Topspin/Cisco initiator port ID workaround " 782aef9ec39SRoland Dreier "activated for target GUID %016llx\n", 78345c37cadSBart Van Assche be64_to_cpu(target->ioc_guid)); 784aef9ec39SRoland Dreier memset(req->priv.initiator_port_id, 0, 8); 78501cb9bcbSIshai Rabinovitz memcpy(req->priv.initiator_port_id + 8, 78605321937SGreg Kroah-Hartman &target->srp_host->srp_dev->dev->node_guid, 8); 787aef9ec39SRoland Dreier } 788aef9ec39SRoland Dreier 789509c07bcSBart Van Assche status = ib_send_cm_req(ch->cm_id, &req->param); 790aef9ec39SRoland Dreier 791aef9ec39SRoland Dreier kfree(req); 792aef9ec39SRoland Dreier 793aef9ec39SRoland Dreier return status; 794aef9ec39SRoland Dreier } 795aef9ec39SRoland Dreier 796ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target) 797ef6c49d8SBart Van Assche { 798ef6c49d8SBart Van Assche bool changed = false; 799ef6c49d8SBart Van Assche 800ef6c49d8SBart Van Assche spin_lock_irq(&target->lock); 801ef6c49d8SBart Van Assche if (target->state != SRP_TARGET_REMOVED) { 802ef6c49d8SBart Van Assche target->state = SRP_TARGET_REMOVED; 803ef6c49d8SBart Van Assche changed = true; 804ef6c49d8SBart Van Assche } 805ef6c49d8SBart Van Assche spin_unlock_irq(&target->lock); 806ef6c49d8SBart Van Assche 807ef6c49d8SBart Van Assche if (changed) 808bcc05910SBart Van Assche queue_work(srp_remove_wq, &target->remove_work); 809ef6c49d8SBart Van Assche 810ef6c49d8SBart Van Assche return changed; 811ef6c49d8SBart Van Assche } 812ef6c49d8SBart Van Assche 813aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target) 814aef9ec39SRoland Dreier { 815d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 816d92c0da7SBart Van Assche int i; 817509c07bcSBart Van Assche 818aef9ec39SRoland Dreier /* XXX should send SRP_I_LOGOUT request */ 819aef9ec39SRoland Dreier 820d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 821d92c0da7SBart Van Assche ch = &target->ch[i]; 822c014c8cdSBart Van Assche ch->connected = false; 823d92c0da7SBart Van Assche if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { 8247aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 8257aa54bd7SDavid Dillow PFX "Sending CM DREQ failed\n"); 826aef9ec39SRoland Dreier } 827294c875aSBart Van Assche } 828294c875aSBart Van Assche } 829aef9ec39SRoland Dreier 830509c07bcSBart Van Assche static void srp_free_req_data(struct srp_target_port *target, 831509c07bcSBart Van Assche struct srp_rdma_ch *ch) 8328f26c9ffSDavid Dillow { 8335cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 8345cfb1782SBart Van Assche struct ib_device *ibdev = dev->dev; 8358f26c9ffSDavid Dillow struct srp_request *req; 8368f26c9ffSDavid Dillow int i; 8378f26c9ffSDavid Dillow 83847513cf4SBart Van Assche if (!ch->req_ring) 8394d73f95fSBart Van Assche return; 8404d73f95fSBart Van Assche 8414d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 842509c07bcSBart Van Assche req = &ch->req_ring[i]; 8439a21be53SSagi Grimberg if (dev->use_fast_reg) { 8445cfb1782SBart Van Assche kfree(req->fr_list); 8459a21be53SSagi Grimberg } else { 8468f26c9ffSDavid Dillow kfree(req->fmr_list); 8478f26c9ffSDavid Dillow kfree(req->map_page); 8489a21be53SSagi Grimberg } 849c07d424dSDavid Dillow if (req->indirect_dma_addr) { 850c07d424dSDavid Dillow ib_dma_unmap_single(ibdev, req->indirect_dma_addr, 851c07d424dSDavid Dillow target->indirect_size, 852c07d424dSDavid Dillow DMA_TO_DEVICE); 853c07d424dSDavid Dillow } 854c07d424dSDavid Dillow kfree(req->indirect_desc); 8558f26c9ffSDavid Dillow } 8564d73f95fSBart Van Assche 857509c07bcSBart Van Assche kfree(ch->req_ring); 858509c07bcSBart Van Assche ch->req_ring = NULL; 8598f26c9ffSDavid Dillow } 8608f26c9ffSDavid Dillow 861509c07bcSBart Van Assche static int srp_alloc_req_data(struct srp_rdma_ch *ch) 862b81d00bdSBart Van Assche { 863509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 864b81d00bdSBart Van Assche struct srp_device *srp_dev = target->srp_host->srp_dev; 865b81d00bdSBart Van Assche struct ib_device *ibdev = srp_dev->dev; 866b81d00bdSBart Van Assche struct srp_request *req; 8675cfb1782SBart Van Assche void *mr_list; 868b81d00bdSBart Van Assche dma_addr_t dma_addr; 869b81d00bdSBart Van Assche int i, ret = -ENOMEM; 870b81d00bdSBart Van Assche 871509c07bcSBart Van Assche ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), 872509c07bcSBart Van Assche GFP_KERNEL); 873509c07bcSBart Van Assche if (!ch->req_ring) 8744d73f95fSBart Van Assche goto out; 8754d73f95fSBart Van Assche 8764d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 877509c07bcSBart Van Assche req = &ch->req_ring[i]; 8785cfb1782SBart Van Assche mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), 879b81d00bdSBart Van Assche GFP_KERNEL); 8805cfb1782SBart Van Assche if (!mr_list) 8815cfb1782SBart Van Assche goto out; 8829a21be53SSagi Grimberg if (srp_dev->use_fast_reg) { 8835cfb1782SBart Van Assche req->fr_list = mr_list; 8849a21be53SSagi Grimberg } else { 8855cfb1782SBart Van Assche req->fmr_list = mr_list; 88652ede08fSBart Van Assche req->map_page = kmalloc(srp_dev->max_pages_per_mr * 887d1b4289eSBart Van Assche sizeof(void *), GFP_KERNEL); 8885cfb1782SBart Van Assche if (!req->map_page) 8895cfb1782SBart Van Assche goto out; 8909a21be53SSagi Grimberg } 891b81d00bdSBart Van Assche req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); 8925cfb1782SBart Van Assche if (!req->indirect_desc) 893b81d00bdSBart Van Assche goto out; 894b81d00bdSBart Van Assche 895b81d00bdSBart Van Assche dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, 896b81d00bdSBart Van Assche target->indirect_size, 897b81d00bdSBart Van Assche DMA_TO_DEVICE); 898b81d00bdSBart Van Assche if (ib_dma_mapping_error(ibdev, dma_addr)) 899b81d00bdSBart Van Assche goto out; 900b81d00bdSBart Van Assche 901b81d00bdSBart Van Assche req->indirect_dma_addr = dma_addr; 902b81d00bdSBart Van Assche } 903b81d00bdSBart Van Assche ret = 0; 904b81d00bdSBart Van Assche 905b81d00bdSBart Van Assche out: 906b81d00bdSBart Van Assche return ret; 907b81d00bdSBart Van Assche } 908b81d00bdSBart Van Assche 909683b159aSBart Van Assche /** 910683b159aSBart Van Assche * srp_del_scsi_host_attr() - Remove attributes defined in the host template. 911683b159aSBart Van Assche * @shost: SCSI host whose attributes to remove from sysfs. 912683b159aSBart Van Assche * 913683b159aSBart Van Assche * Note: Any attributes defined in the host template and that did not exist 914683b159aSBart Van Assche * before invocation of this function will be ignored. 915683b159aSBart Van Assche */ 916683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost) 917683b159aSBart Van Assche { 918683b159aSBart Van Assche struct device_attribute **attr; 919683b159aSBart Van Assche 920683b159aSBart Van Assche for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) 921683b159aSBart Van Assche device_remove_file(&shost->shost_dev, *attr); 922683b159aSBart Van Assche } 923683b159aSBart Van Assche 924ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target) 925ee12d6a8SBart Van Assche { 926d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 927d92c0da7SBart Van Assche int i; 928509c07bcSBart Van Assche 929ef6c49d8SBart Van Assche WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 930ef6c49d8SBart Van Assche 931ee12d6a8SBart Van Assche srp_del_scsi_host_attr(target->scsi_host); 9329dd69a60SBart Van Assche srp_rport_get(target->rport); 933ee12d6a8SBart Van Assche srp_remove_host(target->scsi_host); 934ee12d6a8SBart Van Assche scsi_remove_host(target->scsi_host); 93593079162SBart Van Assche srp_stop_rport_timers(target->rport); 936ef6c49d8SBart Van Assche srp_disconnect_target(target); 937d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 938d92c0da7SBart Van Assche ch = &target->ch[i]; 939509c07bcSBart Van Assche srp_free_ch_ib(target, ch); 940d92c0da7SBart Van Assche } 941c1120f89SBart Van Assche cancel_work_sync(&target->tl_err_work); 9429dd69a60SBart Van Assche srp_rport_put(target->rport); 943d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 944d92c0da7SBart Van Assche ch = &target->ch[i]; 945509c07bcSBart Van Assche srp_free_req_data(target, ch); 946d92c0da7SBart Van Assche } 947d92c0da7SBart Van Assche kfree(target->ch); 948d92c0da7SBart Van Assche target->ch = NULL; 94965d7dd2fSVu Pham 95065d7dd2fSVu Pham spin_lock(&target->srp_host->target_lock); 95165d7dd2fSVu Pham list_del(&target->list); 95265d7dd2fSVu Pham spin_unlock(&target->srp_host->target_lock); 95365d7dd2fSVu Pham 954ee12d6a8SBart Van Assche scsi_host_put(target->scsi_host); 955ee12d6a8SBart Van Assche } 956ee12d6a8SBart Van Assche 957c4028958SDavid Howells static void srp_remove_work(struct work_struct *work) 958aef9ec39SRoland Dreier { 959c4028958SDavid Howells struct srp_target_port *target = 960ef6c49d8SBart Van Assche container_of(work, struct srp_target_port, remove_work); 961aef9ec39SRoland Dreier 962ef6c49d8SBart Van Assche WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 963aef9ec39SRoland Dreier 96496fc248aSBart Van Assche srp_remove_target(target); 965aef9ec39SRoland Dreier } 966aef9ec39SRoland Dreier 967dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport) 968dc1bdbd9SBart Van Assche { 969dc1bdbd9SBart Van Assche struct srp_target_port *target = rport->lld_data; 970dc1bdbd9SBart Van Assche 971dc1bdbd9SBart Van Assche srp_queue_remove_work(target); 972dc1bdbd9SBart Van Assche } 973dc1bdbd9SBart Van Assche 974c014c8cdSBart Van Assche /** 975c014c8cdSBart Van Assche * srp_connected_ch() - number of connected channels 976c014c8cdSBart Van Assche * @target: SRP target port. 977c014c8cdSBart Van Assche */ 978c014c8cdSBart Van Assche static int srp_connected_ch(struct srp_target_port *target) 979c014c8cdSBart Van Assche { 980c014c8cdSBart Van Assche int i, c = 0; 981c014c8cdSBart Van Assche 982c014c8cdSBart Van Assche for (i = 0; i < target->ch_count; i++) 983c014c8cdSBart Van Assche c += target->ch[i].connected; 984c014c8cdSBart Van Assche 985c014c8cdSBart Van Assche return c; 986c014c8cdSBart Van Assche } 987c014c8cdSBart Van Assche 988d92c0da7SBart Van Assche static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) 989aef9ec39SRoland Dreier { 990509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 991aef9ec39SRoland Dreier int ret; 992aef9ec39SRoland Dreier 993c014c8cdSBart Van Assche WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); 994294c875aSBart Van Assche 995509c07bcSBart Van Assche ret = srp_lookup_path(ch); 996aef9ec39SRoland Dreier if (ret) 997*4d59ad29SBart Van Assche goto out; 998aef9ec39SRoland Dreier 999aef9ec39SRoland Dreier while (1) { 1000509c07bcSBart Van Assche init_completion(&ch->done); 1001d92c0da7SBart Van Assche ret = srp_send_req(ch, multich); 1002aef9ec39SRoland Dreier if (ret) 1003*4d59ad29SBart Van Assche goto out; 1004509c07bcSBart Van Assche ret = wait_for_completion_interruptible(&ch->done); 1005a702adceSBart Van Assche if (ret < 0) 1006*4d59ad29SBart Van Assche goto out; 1007aef9ec39SRoland Dreier 1008aef9ec39SRoland Dreier /* 1009aef9ec39SRoland Dreier * The CM event handling code will set status to 1010aef9ec39SRoland Dreier * SRP_PORT_REDIRECT if we get a port redirect REJ 1011aef9ec39SRoland Dreier * back, or SRP_DLID_REDIRECT if we get a lid/qp 1012aef9ec39SRoland Dreier * redirect REJ back. 1013aef9ec39SRoland Dreier */ 1014*4d59ad29SBart Van Assche ret = ch->status; 1015*4d59ad29SBart Van Assche switch (ret) { 1016aef9ec39SRoland Dreier case 0: 1017c014c8cdSBart Van Assche ch->connected = true; 1018*4d59ad29SBart Van Assche goto out; 1019aef9ec39SRoland Dreier 1020aef9ec39SRoland Dreier case SRP_PORT_REDIRECT: 1021509c07bcSBart Van Assche ret = srp_lookup_path(ch); 1022aef9ec39SRoland Dreier if (ret) 1023*4d59ad29SBart Van Assche goto out; 1024aef9ec39SRoland Dreier break; 1025aef9ec39SRoland Dreier 1026aef9ec39SRoland Dreier case SRP_DLID_REDIRECT: 1027aef9ec39SRoland Dreier break; 1028aef9ec39SRoland Dreier 10299fe4bcf4SDavid Dillow case SRP_STALE_CONN: 10309fe4bcf4SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 10319fe4bcf4SDavid Dillow "giving up on stale connection\n"); 1032*4d59ad29SBart Van Assche ret = -ECONNRESET; 1033*4d59ad29SBart Van Assche goto out; 10349fe4bcf4SDavid Dillow 1035aef9ec39SRoland Dreier default: 1036*4d59ad29SBart Van Assche goto out; 1037aef9ec39SRoland Dreier } 1038aef9ec39SRoland Dreier } 1039*4d59ad29SBart Van Assche 1040*4d59ad29SBart Van Assche out: 1041*4d59ad29SBart Van Assche return ret <= 0 ? ret : -ENODEV; 1042aef9ec39SRoland Dreier } 1043aef9ec39SRoland Dreier 1044509c07bcSBart Van Assche static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) 10455cfb1782SBart Van Assche { 10465cfb1782SBart Van Assche struct ib_send_wr *bad_wr; 10475cfb1782SBart Van Assche struct ib_send_wr wr = { 10485cfb1782SBart Van Assche .opcode = IB_WR_LOCAL_INV, 10495cfb1782SBart Van Assche .wr_id = LOCAL_INV_WR_ID_MASK, 10505cfb1782SBart Van Assche .next = NULL, 10515cfb1782SBart Van Assche .num_sge = 0, 10525cfb1782SBart Van Assche .send_flags = 0, 10535cfb1782SBart Van Assche .ex.invalidate_rkey = rkey, 10545cfb1782SBart Van Assche }; 10555cfb1782SBart Van Assche 1056509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 10575cfb1782SBart Van Assche } 10585cfb1782SBart Van Assche 1059d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd, 1060509c07bcSBart Van Assche struct srp_rdma_ch *ch, 1061d945e1dfSRoland Dreier struct srp_request *req) 1062d945e1dfSRoland Dreier { 1063509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 10645cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 10655cfb1782SBart Van Assche struct ib_device *ibdev = dev->dev; 10665cfb1782SBart Van Assche int i, res; 10678f26c9ffSDavid Dillow 1068bb350d1dSFUJITA Tomonori if (!scsi_sglist(scmnd) || 1069d945e1dfSRoland Dreier (scmnd->sc_data_direction != DMA_TO_DEVICE && 1070d945e1dfSRoland Dreier scmnd->sc_data_direction != DMA_FROM_DEVICE)) 1071d945e1dfSRoland Dreier return; 1072d945e1dfSRoland Dreier 10735cfb1782SBart Van Assche if (dev->use_fast_reg) { 10745cfb1782SBart Van Assche struct srp_fr_desc **pfr; 10755cfb1782SBart Van Assche 10765cfb1782SBart Van Assche for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { 1077509c07bcSBart Van Assche res = srp_inv_rkey(ch, (*pfr)->mr->rkey); 10785cfb1782SBart Van Assche if (res < 0) { 10795cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 10805cfb1782SBart Van Assche "Queueing INV WR for rkey %#x failed (%d)\n", 10815cfb1782SBart Van Assche (*pfr)->mr->rkey, res); 10825cfb1782SBart Van Assche queue_work(system_long_wq, 10835cfb1782SBart Van Assche &target->tl_err_work); 10845cfb1782SBart Van Assche } 10855cfb1782SBart Van Assche } 10865cfb1782SBart Van Assche if (req->nmdesc) 1087509c07bcSBart Van Assche srp_fr_pool_put(ch->fr_pool, req->fr_list, 10885cfb1782SBart Van Assche req->nmdesc); 1089002f1567SBart Van Assche } else if (dev->use_fmr) { 10905cfb1782SBart Van Assche struct ib_pool_fmr **pfmr; 10915cfb1782SBart Van Assche 10925cfb1782SBart Van Assche for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++) 10935cfb1782SBart Van Assche ib_fmr_pool_unmap(*pfmr); 10945cfb1782SBart Van Assche } 1095f5358a17SRoland Dreier 10968f26c9ffSDavid Dillow ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), 10978f26c9ffSDavid Dillow scmnd->sc_data_direction); 1098d945e1dfSRoland Dreier } 1099d945e1dfSRoland Dreier 110022032991SBart Van Assche /** 110122032991SBart Van Assche * srp_claim_req - Take ownership of the scmnd associated with a request. 1102509c07bcSBart Van Assche * @ch: SRP RDMA channel. 110322032991SBart Van Assche * @req: SRP request. 1104b3fe628dSBart Van Assche * @sdev: If not NULL, only take ownership for this SCSI device. 110522032991SBart Van Assche * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take 110622032991SBart Van Assche * ownership of @req->scmnd if it equals @scmnd. 110722032991SBart Van Assche * 110822032991SBart Van Assche * Return value: 110922032991SBart Van Assche * Either NULL or a pointer to the SCSI command the caller became owner of. 111022032991SBart Van Assche */ 1111509c07bcSBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, 111222032991SBart Van Assche struct srp_request *req, 1113b3fe628dSBart Van Assche struct scsi_device *sdev, 111422032991SBart Van Assche struct scsi_cmnd *scmnd) 1115526b4caaSIshai Rabinovitz { 111694a9174cSBart Van Assche unsigned long flags; 111794a9174cSBart Van Assche 1118509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1119b3fe628dSBart Van Assche if (req->scmnd && 1120b3fe628dSBart Van Assche (!sdev || req->scmnd->device == sdev) && 1121b3fe628dSBart Van Assche (!scmnd || req->scmnd == scmnd)) { 112222032991SBart Van Assche scmnd = req->scmnd; 112322032991SBart Van Assche req->scmnd = NULL; 112422032991SBart Van Assche } else { 112522032991SBart Van Assche scmnd = NULL; 112622032991SBart Van Assche } 1127509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 112822032991SBart Van Assche 112922032991SBart Van Assche return scmnd; 113022032991SBart Van Assche } 113122032991SBart Van Assche 113222032991SBart Van Assche /** 113322032991SBart Van Assche * srp_free_req() - Unmap data and add request to the free request list. 1134509c07bcSBart Van Assche * @ch: SRP RDMA channel. 1135af24663bSBart Van Assche * @req: Request to be freed. 1136af24663bSBart Van Assche * @scmnd: SCSI command associated with @req. 1137af24663bSBart Van Assche * @req_lim_delta: Amount to be added to @target->req_lim. 113822032991SBart Van Assche */ 1139509c07bcSBart Van Assche static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, 1140509c07bcSBart Van Assche struct scsi_cmnd *scmnd, s32 req_lim_delta) 114122032991SBart Van Assche { 114222032991SBart Van Assche unsigned long flags; 114322032991SBart Van Assche 1144509c07bcSBart Van Assche srp_unmap_data(scmnd, ch, req); 114522032991SBart Van Assche 1146509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1147509c07bcSBart Van Assche ch->req_lim += req_lim_delta; 1148509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 1149526b4caaSIshai Rabinovitz } 1150526b4caaSIshai Rabinovitz 1151509c07bcSBart Van Assche static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, 1152509c07bcSBart Van Assche struct scsi_device *sdev, int result) 1153526b4caaSIshai Rabinovitz { 1154509c07bcSBart Van Assche struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); 115522032991SBart Van Assche 115622032991SBart Van Assche if (scmnd) { 1157509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 0); 1158ed9b2264SBart Van Assche scmnd->result = result; 115922032991SBart Van Assche scmnd->scsi_done(scmnd); 116022032991SBart Van Assche } 1161526b4caaSIshai Rabinovitz } 1162526b4caaSIshai Rabinovitz 1163ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport) 1164aef9ec39SRoland Dreier { 1165ed9b2264SBart Van Assche struct srp_target_port *target = rport->lld_data; 1166d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 1167b3fe628dSBart Van Assche struct Scsi_Host *shost = target->scsi_host; 1168b3fe628dSBart Van Assche struct scsi_device *sdev; 1169d92c0da7SBart Van Assche int i, j; 1170aef9ec39SRoland Dreier 1171b3fe628dSBart Van Assche /* 1172b3fe628dSBart Van Assche * Invoking srp_terminate_io() while srp_queuecommand() is running 1173b3fe628dSBart Van Assche * is not safe. Hence the warning statement below. 1174b3fe628dSBart Van Assche */ 1175b3fe628dSBart Van Assche shost_for_each_device(sdev, shost) 1176b3fe628dSBart Van Assche WARN_ON_ONCE(sdev->request_queue->request_fn_active); 1177b3fe628dSBart Van Assche 1178d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1179d92c0da7SBart Van Assche ch = &target->ch[i]; 1180509c07bcSBart Van Assche 1181d92c0da7SBart Van Assche for (j = 0; j < target->req_ring_size; ++j) { 1182d92c0da7SBart Van Assche struct srp_request *req = &ch->req_ring[j]; 1183d92c0da7SBart Van Assche 1184d92c0da7SBart Van Assche srp_finish_req(ch, req, NULL, 1185d92c0da7SBart Van Assche DID_TRANSPORT_FAILFAST << 16); 1186d92c0da7SBart Van Assche } 1187ed9b2264SBart Van Assche } 1188ed9b2264SBart Van Assche } 1189ed9b2264SBart Van Assche 1190ed9b2264SBart Van Assche /* 1191ed9b2264SBart Van Assche * It is up to the caller to ensure that srp_rport_reconnect() calls are 1192ed9b2264SBart Van Assche * serialized and that no concurrent srp_queuecommand(), srp_abort(), 1193ed9b2264SBart Van Assche * srp_reset_device() or srp_reset_host() calls will occur while this function 1194ed9b2264SBart Van Assche * is in progress. One way to realize that is not to call this function 1195ed9b2264SBart Van Assche * directly but to call srp_reconnect_rport() instead since that last function 1196ed9b2264SBart Van Assche * serializes calls of this function via rport->mutex and also blocks 1197ed9b2264SBart Van Assche * srp_queuecommand() calls before invoking this function. 1198ed9b2264SBart Van Assche */ 1199ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport) 1200ed9b2264SBart Van Assche { 1201ed9b2264SBart Van Assche struct srp_target_port *target = rport->lld_data; 1202d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 1203d92c0da7SBart Van Assche int i, j, ret = 0; 1204d92c0da7SBart Van Assche bool multich = false; 120509be70a2SBart Van Assche 1206aef9ec39SRoland Dreier srp_disconnect_target(target); 120734aa654eSBart Van Assche 120834aa654eSBart Van Assche if (target->state == SRP_TARGET_SCANNING) 120934aa654eSBart Van Assche return -ENODEV; 121034aa654eSBart Van Assche 1211aef9ec39SRoland Dreier /* 1212c7c4e7ffSBart Van Assche * Now get a new local CM ID so that we avoid confusing the target in 1213c7c4e7ffSBart Van Assche * case things are really fouled up. Doing so also ensures that all CM 1214c7c4e7ffSBart Van Assche * callbacks will have finished before a new QP is allocated. 1215aef9ec39SRoland Dreier */ 1216d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1217d92c0da7SBart Van Assche ch = &target->ch[i]; 1218d92c0da7SBart Van Assche ret += srp_new_cm_id(ch); 1219d92c0da7SBart Van Assche } 1220d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1221d92c0da7SBart Van Assche ch = &target->ch[i]; 1222d92c0da7SBart Van Assche for (j = 0; j < target->req_ring_size; ++j) { 1223d92c0da7SBart Van Assche struct srp_request *req = &ch->req_ring[j]; 1224509c07bcSBart Van Assche 1225509c07bcSBart Van Assche srp_finish_req(ch, req, NULL, DID_RESET << 16); 1226536ae14eSBart Van Assche } 1227d92c0da7SBart Van Assche } 1228d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1229d92c0da7SBart Van Assche ch = &target->ch[i]; 12305cfb1782SBart Van Assche /* 12315cfb1782SBart Van Assche * Whether or not creating a new CM ID succeeded, create a new 1232d92c0da7SBart Van Assche * QP. This guarantees that all completion callback function 1233d92c0da7SBart Van Assche * invocations have finished before request resetting starts. 12345cfb1782SBart Van Assche */ 1235509c07bcSBart Van Assche ret += srp_create_ch_ib(ch); 12365cfb1782SBart Van Assche 1237509c07bcSBart Van Assche INIT_LIST_HEAD(&ch->free_tx); 1238d92c0da7SBart Van Assche for (j = 0; j < target->queue_size; ++j) 1239d92c0da7SBart Van Assche list_add(&ch->tx_ring[j]->list, &ch->free_tx); 1240d92c0da7SBart Van Assche } 12418de9fe3aSBart Van Assche 12428de9fe3aSBart Van Assche target->qp_in_error = false; 12438de9fe3aSBart Van Assche 1244d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1245d92c0da7SBart Van Assche ch = &target->ch[i]; 1246bbac5ccfSBart Van Assche if (ret) 1247d92c0da7SBart Van Assche break; 1248d92c0da7SBart Van Assche ret = srp_connect_ch(ch, multich); 1249d92c0da7SBart Van Assche multich = true; 1250d92c0da7SBart Van Assche } 125109be70a2SBart Van Assche 1252ed9b2264SBart Van Assche if (ret == 0) 1253ed9b2264SBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 1254ed9b2264SBart Van Assche PFX "reconnect succeeded\n"); 1255aef9ec39SRoland Dreier 1256aef9ec39SRoland Dreier return ret; 1257aef9ec39SRoland Dreier } 1258aef9ec39SRoland Dreier 12598f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, 12608f26c9ffSDavid Dillow unsigned int dma_len, u32 rkey) 1261f5358a17SRoland Dreier { 12628f26c9ffSDavid Dillow struct srp_direct_buf *desc = state->desc; 12638f26c9ffSDavid Dillow 12643ae95da8SBart Van Assche WARN_ON_ONCE(!dma_len); 12653ae95da8SBart Van Assche 12668f26c9ffSDavid Dillow desc->va = cpu_to_be64(dma_addr); 12678f26c9ffSDavid Dillow desc->key = cpu_to_be32(rkey); 12688f26c9ffSDavid Dillow desc->len = cpu_to_be32(dma_len); 12698f26c9ffSDavid Dillow 12708f26c9ffSDavid Dillow state->total_len += dma_len; 12718f26c9ffSDavid Dillow state->desc++; 12728f26c9ffSDavid Dillow state->ndesc++; 12738f26c9ffSDavid Dillow } 12748f26c9ffSDavid Dillow 12758f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state, 1276509c07bcSBart Van Assche struct srp_rdma_ch *ch) 12778f26c9ffSDavid Dillow { 1278186fbc66SBart Van Assche struct srp_target_port *target = ch->target; 1279186fbc66SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 12808f26c9ffSDavid Dillow struct ib_pool_fmr *fmr; 1281f5358a17SRoland Dreier u64 io_addr = 0; 12828f26c9ffSDavid Dillow 1283f731ed62SBart Van Assche if (state->fmr.next >= state->fmr.end) 1284f731ed62SBart Van Assche return -ENOMEM; 1285f731ed62SBart Van Assche 128626630e8aSSagi Grimberg WARN_ON_ONCE(!dev->use_fmr); 128726630e8aSSagi Grimberg 128826630e8aSSagi Grimberg if (state->npages == 0) 128926630e8aSSagi Grimberg return 0; 129026630e8aSSagi Grimberg 129126630e8aSSagi Grimberg if (state->npages == 1 && target->global_mr) { 129226630e8aSSagi Grimberg srp_map_desc(state, state->base_dma_addr, state->dma_len, 129326630e8aSSagi Grimberg target->global_mr->rkey); 129426630e8aSSagi Grimberg goto reset_state; 129526630e8aSSagi Grimberg } 129626630e8aSSagi Grimberg 1297509c07bcSBart Van Assche fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, 12988f26c9ffSDavid Dillow state->npages, io_addr); 12998f26c9ffSDavid Dillow if (IS_ERR(fmr)) 13008f26c9ffSDavid Dillow return PTR_ERR(fmr); 13018f26c9ffSDavid Dillow 1302f731ed62SBart Van Assche *state->fmr.next++ = fmr; 130352ede08fSBart Van Assche state->nmdesc++; 13048f26c9ffSDavid Dillow 1305186fbc66SBart Van Assche srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask, 1306186fbc66SBart Van Assche state->dma_len, fmr->fmr->rkey); 1307539dde6fSBart Van Assche 130826630e8aSSagi Grimberg reset_state: 130926630e8aSSagi Grimberg state->npages = 0; 131026630e8aSSagi Grimberg state->dma_len = 0; 131126630e8aSSagi Grimberg 13128f26c9ffSDavid Dillow return 0; 13138f26c9ffSDavid Dillow } 13148f26c9ffSDavid Dillow 13155cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state, 1316509c07bcSBart Van Assche struct srp_rdma_ch *ch) 13175cfb1782SBart Van Assche { 1318509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 13195cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 13205cfb1782SBart Van Assche struct ib_send_wr *bad_wr; 1321f7f7aab1SSagi Grimberg struct ib_reg_wr wr; 13225cfb1782SBart Van Assche struct srp_fr_desc *desc; 13235cfb1782SBart Van Assche u32 rkey; 1324f7f7aab1SSagi Grimberg int n, err; 13255cfb1782SBart Van Assche 1326f731ed62SBart Van Assche if (state->fr.next >= state->fr.end) 1327f731ed62SBart Van Assche return -ENOMEM; 1328f731ed62SBart Van Assche 132926630e8aSSagi Grimberg WARN_ON_ONCE(!dev->use_fast_reg); 133026630e8aSSagi Grimberg 1331f7f7aab1SSagi Grimberg if (state->sg_nents == 0) 133226630e8aSSagi Grimberg return 0; 133326630e8aSSagi Grimberg 1334f7f7aab1SSagi Grimberg if (state->sg_nents == 1 && target->global_mr) { 1335f7f7aab1SSagi Grimberg srp_map_desc(state, sg_dma_address(state->sg), 1336f7f7aab1SSagi Grimberg sg_dma_len(state->sg), 133726630e8aSSagi Grimberg target->global_mr->rkey); 1338f7f7aab1SSagi Grimberg return 1; 133926630e8aSSagi Grimberg } 134026630e8aSSagi Grimberg 1341509c07bcSBart Van Assche desc = srp_fr_pool_get(ch->fr_pool); 13425cfb1782SBart Van Assche if (!desc) 13435cfb1782SBart Van Assche return -ENOMEM; 13445cfb1782SBart Van Assche 13455cfb1782SBart Van Assche rkey = ib_inc_rkey(desc->mr->rkey); 13465cfb1782SBart Van Assche ib_update_fast_reg_key(desc->mr, rkey); 13475cfb1782SBart Van Assche 1348f7f7aab1SSagi Grimberg n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents, 1349f7f7aab1SSagi Grimberg dev->mr_page_size); 1350f7f7aab1SSagi Grimberg if (unlikely(n < 0)) 1351f7f7aab1SSagi Grimberg return n; 13525cfb1782SBart Van Assche 1353f7f7aab1SSagi Grimberg wr.wr.next = NULL; 1354f7f7aab1SSagi Grimberg wr.wr.opcode = IB_WR_REG_MR; 1355e622f2f4SChristoph Hellwig wr.wr.wr_id = FAST_REG_WR_ID_MASK; 1356f7f7aab1SSagi Grimberg wr.wr.num_sge = 0; 1357f7f7aab1SSagi Grimberg wr.wr.send_flags = 0; 1358f7f7aab1SSagi Grimberg wr.mr = desc->mr; 1359f7f7aab1SSagi Grimberg wr.key = desc->mr->rkey; 1360f7f7aab1SSagi Grimberg wr.access = (IB_ACCESS_LOCAL_WRITE | 13615cfb1782SBart Van Assche IB_ACCESS_REMOTE_READ | 13625cfb1782SBart Van Assche IB_ACCESS_REMOTE_WRITE); 13635cfb1782SBart Van Assche 1364f731ed62SBart Van Assche *state->fr.next++ = desc; 13655cfb1782SBart Van Assche state->nmdesc++; 13665cfb1782SBart Van Assche 1367f7f7aab1SSagi Grimberg srp_map_desc(state, desc->mr->iova, 1368f7f7aab1SSagi Grimberg desc->mr->length, desc->mr->rkey); 13695cfb1782SBart Van Assche 137026630e8aSSagi Grimberg err = ib_post_send(ch->qp, &wr.wr, &bad_wr); 1371f7f7aab1SSagi Grimberg if (unlikely(err)) 137226630e8aSSagi Grimberg return err; 137326630e8aSSagi Grimberg 1374f7f7aab1SSagi Grimberg return n; 13755cfb1782SBart Van Assche } 13765cfb1782SBart Van Assche 13778f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state, 1378509c07bcSBart Van Assche struct srp_rdma_ch *ch, 13793ae95da8SBart Van Assche struct scatterlist *sg, int sg_index) 13808f26c9ffSDavid Dillow { 1381509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 138205321937SGreg Kroah-Hartman struct srp_device *dev = target->srp_host->srp_dev; 138385507bccSRalph Campbell struct ib_device *ibdev = dev->dev; 13848f26c9ffSDavid Dillow dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); 1385bb350d1dSFUJITA Tomonori unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 13863ae95da8SBart Van Assche unsigned int len = 0; 13878f26c9ffSDavid Dillow int ret; 138885507bccSRalph Campbell 13893ae95da8SBart Van Assche WARN_ON_ONCE(!dma_len); 1390f5358a17SRoland Dreier 13918f26c9ffSDavid Dillow while (dma_len) { 13925cfb1782SBart Van Assche unsigned offset = dma_addr & ~dev->mr_page_mask; 13935cfb1782SBart Van Assche if (state->npages == dev->max_pages_per_mr || offset != 0) { 1394f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(state, ch); 13958f26c9ffSDavid Dillow if (ret) 13968f26c9ffSDavid Dillow return ret; 139785507bccSRalph Campbell } 1398f5358a17SRoland Dreier 13995cfb1782SBart Van Assche len = min_t(unsigned int, dma_len, dev->mr_page_size - offset); 14008f26c9ffSDavid Dillow 14018f26c9ffSDavid Dillow if (!state->npages) 14028f26c9ffSDavid Dillow state->base_dma_addr = dma_addr; 14035cfb1782SBart Van Assche state->pages[state->npages++] = dma_addr & dev->mr_page_mask; 140452ede08fSBart Van Assche state->dma_len += len; 14058f26c9ffSDavid Dillow dma_addr += len; 14068f26c9ffSDavid Dillow dma_len -= len; 1407f5358a17SRoland Dreier } 1408f5358a17SRoland Dreier 14095cfb1782SBart Van Assche /* 14105cfb1782SBart Van Assche * If the last entry of the MR wasn't a full page, then we need to 14118f26c9ffSDavid Dillow * close it out and start a new one -- we can only merge at page 14128f26c9ffSDavid Dillow * boundries. 14138f26c9ffSDavid Dillow */ 1414f5358a17SRoland Dreier ret = 0; 14150e0d3a48SBart Van Assche if (len != dev->mr_page_size) 1416f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(state, ch); 1417f5358a17SRoland Dreier return ret; 1418f5358a17SRoland Dreier } 1419f5358a17SRoland Dreier 142026630e8aSSagi Grimberg static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch, 142126630e8aSSagi Grimberg struct srp_request *req, struct scatterlist *scat, 142226630e8aSSagi Grimberg int count) 142326630e8aSSagi Grimberg { 142426630e8aSSagi Grimberg struct scatterlist *sg; 142526630e8aSSagi Grimberg int i, ret; 142626630e8aSSagi Grimberg 142726630e8aSSagi Grimberg state->desc = req->indirect_desc; 142826630e8aSSagi Grimberg state->pages = req->map_page; 142926630e8aSSagi Grimberg state->fmr.next = req->fmr_list; 143026630e8aSSagi Grimberg state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt; 143126630e8aSSagi Grimberg 143226630e8aSSagi Grimberg for_each_sg(scat, sg, count, i) { 143326630e8aSSagi Grimberg ret = srp_map_sg_entry(state, ch, sg, i); 143426630e8aSSagi Grimberg if (ret) 143526630e8aSSagi Grimberg return ret; 143626630e8aSSagi Grimberg } 143726630e8aSSagi Grimberg 1438f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(state, ch); 143926630e8aSSagi Grimberg if (ret) 144026630e8aSSagi Grimberg return ret; 144126630e8aSSagi Grimberg 144226630e8aSSagi Grimberg req->nmdesc = state->nmdesc; 144326630e8aSSagi Grimberg 144426630e8aSSagi Grimberg return 0; 144526630e8aSSagi Grimberg } 144626630e8aSSagi Grimberg 144726630e8aSSagi Grimberg static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, 144826630e8aSSagi Grimberg struct srp_request *req, struct scatterlist *scat, 144926630e8aSSagi Grimberg int count) 145026630e8aSSagi Grimberg { 145126630e8aSSagi Grimberg state->desc = req->indirect_desc; 1452f7f7aab1SSagi Grimberg state->fr.next = req->fr_list; 1453f7f7aab1SSagi Grimberg state->fr.end = req->fr_list + ch->target->cmd_sg_cnt; 1454f7f7aab1SSagi Grimberg state->sg = scat; 1455f7f7aab1SSagi Grimberg state->sg_nents = scsi_sg_count(req->scmnd); 145626630e8aSSagi Grimberg 1457f7f7aab1SSagi Grimberg while (state->sg_nents) { 1458f7f7aab1SSagi Grimberg int i, n; 1459f7f7aab1SSagi Grimberg 1460f7f7aab1SSagi Grimberg n = srp_map_finish_fr(state, ch); 1461f7f7aab1SSagi Grimberg if (unlikely(n < 0)) 1462f7f7aab1SSagi Grimberg return n; 1463f7f7aab1SSagi Grimberg 1464f7f7aab1SSagi Grimberg state->sg_nents -= n; 1465f7f7aab1SSagi Grimberg for (i = 0; i < n; i++) 1466f7f7aab1SSagi Grimberg state->sg = sg_next(state->sg); 146726630e8aSSagi Grimberg } 146826630e8aSSagi Grimberg 146926630e8aSSagi Grimberg req->nmdesc = state->nmdesc; 147026630e8aSSagi Grimberg 147126630e8aSSagi Grimberg return 0; 147226630e8aSSagi Grimberg } 147326630e8aSSagi Grimberg 147426630e8aSSagi Grimberg static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, 1475509c07bcSBart Van Assche struct srp_request *req, struct scatterlist *scat, 1476509c07bcSBart Van Assche int count) 147776bc1e1dSBart Van Assche { 1478509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 147976bc1e1dSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 148076bc1e1dSBart Van Assche struct scatterlist *sg; 148126630e8aSSagi Grimberg int i; 148276bc1e1dSBart Van Assche 148376bc1e1dSBart Van Assche state->desc = req->indirect_desc; 14843ae95da8SBart Van Assche for_each_sg(scat, sg, count, i) { 14853ae95da8SBart Van Assche srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), 148603f6fb93SBart Van Assche ib_sg_dma_len(dev->dev, sg), 148703f6fb93SBart Van Assche target->global_mr->rkey); 14883ae95da8SBart Van Assche } 148976bc1e1dSBart Van Assche 149052ede08fSBart Van Assche req->nmdesc = state->nmdesc; 14915cfb1782SBart Van Assche 149226630e8aSSagi Grimberg return 0; 149376bc1e1dSBart Van Assche } 149476bc1e1dSBart Van Assche 1495330179f2SBart Van Assche /* 1496330179f2SBart Van Assche * Register the indirect data buffer descriptor with the HCA. 1497330179f2SBart Van Assche * 1498330179f2SBart Van Assche * Note: since the indirect data buffer descriptor has been allocated with 1499330179f2SBart Van Assche * kmalloc() it is guaranteed that this buffer is a physically contiguous 1500330179f2SBart Van Assche * memory buffer. 1501330179f2SBart Van Assche */ 1502330179f2SBart Van Assche static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, 1503330179f2SBart Van Assche void **next_mr, void **end_mr, u32 idb_len, 1504330179f2SBart Van Assche __be32 *idb_rkey) 1505330179f2SBart Van Assche { 1506330179f2SBart Van Assche struct srp_target_port *target = ch->target; 1507330179f2SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 1508330179f2SBart Van Assche struct srp_map_state state; 1509330179f2SBart Van Assche struct srp_direct_buf idb_desc; 1510330179f2SBart Van Assche u64 idb_pages[1]; 1511f7f7aab1SSagi Grimberg struct scatterlist idb_sg[1]; 1512330179f2SBart Van Assche int ret; 1513330179f2SBart Van Assche 1514330179f2SBart Van Assche memset(&state, 0, sizeof(state)); 1515330179f2SBart Van Assche memset(&idb_desc, 0, sizeof(idb_desc)); 1516330179f2SBart Van Assche state.gen.next = next_mr; 1517330179f2SBart Van Assche state.gen.end = end_mr; 1518330179f2SBart Van Assche state.desc = &idb_desc; 1519f7f7aab1SSagi Grimberg state.base_dma_addr = req->indirect_dma_addr; 1520f7f7aab1SSagi Grimberg state.dma_len = idb_len; 1521f7f7aab1SSagi Grimberg 1522f7f7aab1SSagi Grimberg if (dev->use_fast_reg) { 1523f7f7aab1SSagi Grimberg state.sg = idb_sg; 1524f7f7aab1SSagi Grimberg state.sg_nents = 1; 1525f7f7aab1SSagi Grimberg sg_set_buf(idb_sg, req->indirect_desc, idb_len); 1526f7f7aab1SSagi Grimberg idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ 1527f7f7aab1SSagi Grimberg ret = srp_map_finish_fr(&state, ch); 1528f7f7aab1SSagi Grimberg if (ret < 0) 1529f7f7aab1SSagi Grimberg return ret; 1530f7f7aab1SSagi Grimberg } else if (dev->use_fmr) { 1531330179f2SBart Van Assche state.pages = idb_pages; 1532330179f2SBart Van Assche state.pages[0] = (req->indirect_dma_addr & 1533330179f2SBart Van Assche dev->mr_page_mask); 1534330179f2SBart Van Assche state.npages = 1; 1535f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(&state, ch); 1536330179f2SBart Van Assche if (ret < 0) 1537f7f7aab1SSagi Grimberg return ret; 1538f7f7aab1SSagi Grimberg } else { 1539f7f7aab1SSagi Grimberg return -EINVAL; 1540f7f7aab1SSagi Grimberg } 1541330179f2SBart Van Assche 1542330179f2SBart Van Assche *idb_rkey = idb_desc.key; 1543330179f2SBart Van Assche 1544f7f7aab1SSagi Grimberg return 0; 1545330179f2SBart Van Assche } 1546330179f2SBart Van Assche 1547509c07bcSBart Van Assche static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, 1548aef9ec39SRoland Dreier struct srp_request *req) 1549aef9ec39SRoland Dreier { 1550509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 155176bc1e1dSBart Van Assche struct scatterlist *scat; 1552aef9ec39SRoland Dreier struct srp_cmd *cmd = req->cmd->buf; 1553330179f2SBart Van Assche int len, nents, count, ret; 155485507bccSRalph Campbell struct srp_device *dev; 155585507bccSRalph Campbell struct ib_device *ibdev; 15568f26c9ffSDavid Dillow struct srp_map_state state; 15578f26c9ffSDavid Dillow struct srp_indirect_buf *indirect_hdr; 1558330179f2SBart Van Assche u32 idb_len, table_len; 1559330179f2SBart Van Assche __be32 idb_rkey; 15608f26c9ffSDavid Dillow u8 fmt; 1561aef9ec39SRoland Dreier 1562bb350d1dSFUJITA Tomonori if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 1563aef9ec39SRoland Dreier return sizeof (struct srp_cmd); 1564aef9ec39SRoland Dreier 1565aef9ec39SRoland Dreier if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 1566aef9ec39SRoland Dreier scmnd->sc_data_direction != DMA_TO_DEVICE) { 15677aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 15687aa54bd7SDavid Dillow PFX "Unhandled data direction %d\n", 1569aef9ec39SRoland Dreier scmnd->sc_data_direction); 1570aef9ec39SRoland Dreier return -EINVAL; 1571aef9ec39SRoland Dreier } 1572aef9ec39SRoland Dreier 1573bb350d1dSFUJITA Tomonori nents = scsi_sg_count(scmnd); 1574bb350d1dSFUJITA Tomonori scat = scsi_sglist(scmnd); 1575aef9ec39SRoland Dreier 157605321937SGreg Kroah-Hartman dev = target->srp_host->srp_dev; 157785507bccSRalph Campbell ibdev = dev->dev; 157885507bccSRalph Campbell 157985507bccSRalph Campbell count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 15808f26c9ffSDavid Dillow if (unlikely(count == 0)) 15818f26c9ffSDavid Dillow return -EIO; 1582aef9ec39SRoland Dreier 1583aef9ec39SRoland Dreier fmt = SRP_DATA_DESC_DIRECT; 1584f5358a17SRoland Dreier len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 1585f5358a17SRoland Dreier 158603f6fb93SBart Van Assche if (count == 1 && target->global_mr) { 1587f5358a17SRoland Dreier /* 1588f5358a17SRoland Dreier * The midlayer only generated a single gather/scatter 1589f5358a17SRoland Dreier * entry, or DMA mapping coalesced everything to a 1590f5358a17SRoland Dreier * single entry. So a direct descriptor along with 1591f5358a17SRoland Dreier * the DMA MR suffices. 1592f5358a17SRoland Dreier */ 1593f5358a17SRoland Dreier struct srp_direct_buf *buf = (void *) cmd->add_data; 1594aef9ec39SRoland Dreier 159585507bccSRalph Campbell buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 159603f6fb93SBart Van Assche buf->key = cpu_to_be32(target->global_mr->rkey); 159785507bccSRalph Campbell buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 15988f26c9ffSDavid Dillow 159952ede08fSBart Van Assche req->nmdesc = 0; 16008f26c9ffSDavid Dillow goto map_complete; 16018f26c9ffSDavid Dillow } 16028f26c9ffSDavid Dillow 16035cfb1782SBart Van Assche /* 16045cfb1782SBart Van Assche * We have more than one scatter/gather entry, so build our indirect 16055cfb1782SBart Van Assche * descriptor table, trying to merge as many entries as we can. 1606f5358a17SRoland Dreier */ 16078f26c9ffSDavid Dillow indirect_hdr = (void *) cmd->add_data; 16088f26c9ffSDavid Dillow 1609c07d424dSDavid Dillow ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, 1610c07d424dSDavid Dillow target->indirect_size, DMA_TO_DEVICE); 1611c07d424dSDavid Dillow 16128f26c9ffSDavid Dillow memset(&state, 0, sizeof(state)); 161326630e8aSSagi Grimberg if (dev->use_fast_reg) 161426630e8aSSagi Grimberg srp_map_sg_fr(&state, ch, req, scat, count); 161526630e8aSSagi Grimberg else if (dev->use_fmr) 161626630e8aSSagi Grimberg srp_map_sg_fmr(&state, ch, req, scat, count); 161726630e8aSSagi Grimberg else 161826630e8aSSagi Grimberg srp_map_sg_dma(&state, ch, req, scat, count); 16198f26c9ffSDavid Dillow 1620c07d424dSDavid Dillow /* We've mapped the request, now pull as much of the indirect 1621c07d424dSDavid Dillow * descriptor table as we can into the command buffer. If this 1622c07d424dSDavid Dillow * target is not using an external indirect table, we are 1623c07d424dSDavid Dillow * guaranteed to fit into the command, as the SCSI layer won't 1624c07d424dSDavid Dillow * give us more S/G entries than we allow. 16258f26c9ffSDavid Dillow */ 16268f26c9ffSDavid Dillow if (state.ndesc == 1) { 16275cfb1782SBart Van Assche /* 16285cfb1782SBart Van Assche * Memory registration collapsed the sg-list into one entry, 16298f26c9ffSDavid Dillow * so use a direct descriptor. 16308f26c9ffSDavid Dillow */ 16318f26c9ffSDavid Dillow struct srp_direct_buf *buf = (void *) cmd->add_data; 16328f26c9ffSDavid Dillow 1633c07d424dSDavid Dillow *buf = req->indirect_desc[0]; 16348f26c9ffSDavid Dillow goto map_complete; 16358f26c9ffSDavid Dillow } 16368f26c9ffSDavid Dillow 1637c07d424dSDavid Dillow if (unlikely(target->cmd_sg_cnt < state.ndesc && 1638c07d424dSDavid Dillow !target->allow_ext_sg)) { 1639c07d424dSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 1640c07d424dSDavid Dillow "Could not fit S/G list into SRP_CMD\n"); 1641c07d424dSDavid Dillow return -EIO; 1642c07d424dSDavid Dillow } 1643c07d424dSDavid Dillow 1644c07d424dSDavid Dillow count = min(state.ndesc, target->cmd_sg_cnt); 16458f26c9ffSDavid Dillow table_len = state.ndesc * sizeof (struct srp_direct_buf); 1646330179f2SBart Van Assche idb_len = sizeof(struct srp_indirect_buf) + table_len; 1647aef9ec39SRoland Dreier 1648aef9ec39SRoland Dreier fmt = SRP_DATA_DESC_INDIRECT; 16498f26c9ffSDavid Dillow len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); 1650c07d424dSDavid Dillow len += count * sizeof (struct srp_direct_buf); 1651f5358a17SRoland Dreier 1652c07d424dSDavid Dillow memcpy(indirect_hdr->desc_list, req->indirect_desc, 1653c07d424dSDavid Dillow count * sizeof (struct srp_direct_buf)); 165485507bccSRalph Campbell 165503f6fb93SBart Van Assche if (!target->global_mr) { 1656330179f2SBart Van Assche ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, 1657330179f2SBart Van Assche idb_len, &idb_rkey); 1658330179f2SBart Van Assche if (ret < 0) 1659330179f2SBart Van Assche return ret; 1660330179f2SBart Van Assche req->nmdesc++; 1661330179f2SBart Van Assche } else { 166203f6fb93SBart Van Assche idb_rkey = target->global_mr->rkey; 1663330179f2SBart Van Assche } 1664330179f2SBart Van Assche 1665c07d424dSDavid Dillow indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 1666330179f2SBart Van Assche indirect_hdr->table_desc.key = idb_rkey; 16678f26c9ffSDavid Dillow indirect_hdr->table_desc.len = cpu_to_be32(table_len); 16688f26c9ffSDavid Dillow indirect_hdr->len = cpu_to_be32(state.total_len); 1669aef9ec39SRoland Dreier 1670aef9ec39SRoland Dreier if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1671c07d424dSDavid Dillow cmd->data_out_desc_cnt = count; 1672aef9ec39SRoland Dreier else 1673c07d424dSDavid Dillow cmd->data_in_desc_cnt = count; 1674c07d424dSDavid Dillow 1675c07d424dSDavid Dillow ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, 1676c07d424dSDavid Dillow DMA_TO_DEVICE); 1677aef9ec39SRoland Dreier 16788f26c9ffSDavid Dillow map_complete: 1679aef9ec39SRoland Dreier if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1680aef9ec39SRoland Dreier cmd->buf_fmt = fmt << 4; 1681aef9ec39SRoland Dreier else 1682aef9ec39SRoland Dreier cmd->buf_fmt = fmt; 1683aef9ec39SRoland Dreier 1684aef9ec39SRoland Dreier return len; 1685aef9ec39SRoland Dreier } 1686aef9ec39SRoland Dreier 168705a1d750SDavid Dillow /* 168876c75b25SBart Van Assche * Return an IU and possible credit to the free pool 168976c75b25SBart Van Assche */ 1690509c07bcSBart Van Assche static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, 169176c75b25SBart Van Assche enum srp_iu_type iu_type) 169276c75b25SBart Van Assche { 169376c75b25SBart Van Assche unsigned long flags; 169476c75b25SBart Van Assche 1695509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1696509c07bcSBart Van Assche list_add(&iu->list, &ch->free_tx); 169776c75b25SBart Van Assche if (iu_type != SRP_IU_RSP) 1698509c07bcSBart Van Assche ++ch->req_lim; 1699509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 170076c75b25SBart Van Assche } 170176c75b25SBart Van Assche 170276c75b25SBart Van Assche /* 1703509c07bcSBart Van Assche * Must be called with ch->lock held to protect req_lim and free_tx. 1704e9684678SBart Van Assche * If IU is not sent, it must be returned using srp_put_tx_iu(). 170505a1d750SDavid Dillow * 170605a1d750SDavid Dillow * Note: 170705a1d750SDavid Dillow * An upper limit for the number of allocated information units for each 170805a1d750SDavid Dillow * request type is: 170905a1d750SDavid Dillow * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues 171005a1d750SDavid Dillow * more than Scsi_Host.can_queue requests. 171105a1d750SDavid Dillow * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. 171205a1d750SDavid Dillow * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 171305a1d750SDavid Dillow * one unanswered SRP request to an initiator. 171405a1d750SDavid Dillow */ 1715509c07bcSBart Van Assche static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, 171605a1d750SDavid Dillow enum srp_iu_type iu_type) 171705a1d750SDavid Dillow { 1718509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 171905a1d750SDavid Dillow s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 172005a1d750SDavid Dillow struct srp_iu *iu; 172105a1d750SDavid Dillow 1722509c07bcSBart Van Assche srp_send_completion(ch->send_cq, ch); 172305a1d750SDavid Dillow 1724509c07bcSBart Van Assche if (list_empty(&ch->free_tx)) 172505a1d750SDavid Dillow return NULL; 172605a1d750SDavid Dillow 172705a1d750SDavid Dillow /* Initiator responses to target requests do not consume credits */ 172876c75b25SBart Van Assche if (iu_type != SRP_IU_RSP) { 1729509c07bcSBart Van Assche if (ch->req_lim <= rsv) { 173005a1d750SDavid Dillow ++target->zero_req_lim; 173105a1d750SDavid Dillow return NULL; 173205a1d750SDavid Dillow } 173305a1d750SDavid Dillow 1734509c07bcSBart Van Assche --ch->req_lim; 173576c75b25SBart Van Assche } 173676c75b25SBart Van Assche 1737509c07bcSBart Van Assche iu = list_first_entry(&ch->free_tx, struct srp_iu, list); 173876c75b25SBart Van Assche list_del(&iu->list); 173905a1d750SDavid Dillow return iu; 174005a1d750SDavid Dillow } 174105a1d750SDavid Dillow 1742509c07bcSBart Van Assche static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) 174305a1d750SDavid Dillow { 1744509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 174505a1d750SDavid Dillow struct ib_sge list; 174605a1d750SDavid Dillow struct ib_send_wr wr, *bad_wr; 174705a1d750SDavid Dillow 174805a1d750SDavid Dillow list.addr = iu->dma; 174905a1d750SDavid Dillow list.length = len; 17509af76271SDavid Dillow list.lkey = target->lkey; 175105a1d750SDavid Dillow 175205a1d750SDavid Dillow wr.next = NULL; 1753dcb4cb85SBart Van Assche wr.wr_id = (uintptr_t) iu; 175405a1d750SDavid Dillow wr.sg_list = &list; 175505a1d750SDavid Dillow wr.num_sge = 1; 175605a1d750SDavid Dillow wr.opcode = IB_WR_SEND; 175705a1d750SDavid Dillow wr.send_flags = IB_SEND_SIGNALED; 175805a1d750SDavid Dillow 1759509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 176005a1d750SDavid Dillow } 176105a1d750SDavid Dillow 1762509c07bcSBart Van Assche static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) 1763c996bb47SBart Van Assche { 1764509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1765c996bb47SBart Van Assche struct ib_recv_wr wr, *bad_wr; 1766dcb4cb85SBart Van Assche struct ib_sge list; 1767c996bb47SBart Van Assche 1768c996bb47SBart Van Assche list.addr = iu->dma; 1769c996bb47SBart Van Assche list.length = iu->size; 17709af76271SDavid Dillow list.lkey = target->lkey; 1771c996bb47SBart Van Assche 1772c996bb47SBart Van Assche wr.next = NULL; 1773dcb4cb85SBart Van Assche wr.wr_id = (uintptr_t) iu; 1774c996bb47SBart Van Assche wr.sg_list = &list; 1775c996bb47SBart Van Assche wr.num_sge = 1; 1776c996bb47SBart Van Assche 1777509c07bcSBart Van Assche return ib_post_recv(ch->qp, &wr, &bad_wr); 1778c996bb47SBart Van Assche } 1779c996bb47SBart Van Assche 1780509c07bcSBart Van Assche static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) 1781aef9ec39SRoland Dreier { 1782509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1783aef9ec39SRoland Dreier struct srp_request *req; 1784aef9ec39SRoland Dreier struct scsi_cmnd *scmnd; 1785aef9ec39SRoland Dreier unsigned long flags; 1786aef9ec39SRoland Dreier 1787aef9ec39SRoland Dreier if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 1788509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1789509c07bcSBart Van Assche ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1790509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 179194a9174cSBart Van Assche 1792509c07bcSBart Van Assche ch->tsk_mgmt_status = -1; 1793f8b6e31eSDavid Dillow if (be32_to_cpu(rsp->resp_data_len) >= 4) 1794509c07bcSBart Van Assche ch->tsk_mgmt_status = rsp->data[3]; 1795509c07bcSBart Van Assche complete(&ch->tsk_mgmt_done); 1796aef9ec39SRoland Dreier } else { 179777f2c1a4SBart Van Assche scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); 179877f2c1a4SBart Van Assche if (scmnd) { 179977f2c1a4SBart Van Assche req = (void *)scmnd->host_scribble; 180077f2c1a4SBart Van Assche scmnd = srp_claim_req(ch, req, NULL, scmnd); 180177f2c1a4SBart Van Assche } 180222032991SBart Van Assche if (!scmnd) { 18037aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 1804d92c0da7SBart Van Assche "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", 1805d92c0da7SBart Van Assche rsp->tag, ch - target->ch, ch->qp->qp_num); 180622032991SBart Van Assche 1807509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1808509c07bcSBart Van Assche ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1809509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 181022032991SBart Van Assche 181122032991SBart Van Assche return; 181222032991SBart Van Assche } 1813aef9ec39SRoland Dreier scmnd->result = rsp->status; 1814aef9ec39SRoland Dreier 1815aef9ec39SRoland Dreier if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 1816aef9ec39SRoland Dreier memcpy(scmnd->sense_buffer, rsp->data + 1817aef9ec39SRoland Dreier be32_to_cpu(rsp->resp_data_len), 1818aef9ec39SRoland Dreier min_t(int, be32_to_cpu(rsp->sense_data_len), 1819aef9ec39SRoland Dreier SCSI_SENSE_BUFFERSIZE)); 1820aef9ec39SRoland Dreier } 1821aef9ec39SRoland Dreier 1822e714531aSBart Van Assche if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) 1823bb350d1dSFUJITA Tomonori scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 1824e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER)) 1825e714531aSBart Van Assche scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt)); 1826e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) 1827e714531aSBart Van Assche scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 1828e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) 1829e714531aSBart Van Assche scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); 1830aef9ec39SRoland Dreier 1831509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 183222032991SBart Van Assche be32_to_cpu(rsp->req_lim_delta)); 183322032991SBart Van Assche 1834f8b6e31eSDavid Dillow scmnd->host_scribble = NULL; 1835aef9ec39SRoland Dreier scmnd->scsi_done(scmnd); 1836aef9ec39SRoland Dreier } 1837aef9ec39SRoland Dreier } 1838aef9ec39SRoland Dreier 1839509c07bcSBart Van Assche static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, 1840bb12588aSDavid Dillow void *rsp, int len) 1841bb12588aSDavid Dillow { 1842509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 184376c75b25SBart Van Assche struct ib_device *dev = target->srp_host->srp_dev->dev; 1844bb12588aSDavid Dillow unsigned long flags; 1845bb12588aSDavid Dillow struct srp_iu *iu; 184676c75b25SBart Van Assche int err; 1847bb12588aSDavid Dillow 1848509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1849509c07bcSBart Van Assche ch->req_lim += req_delta; 1850509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_RSP); 1851509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 185276c75b25SBart Van Assche 1853bb12588aSDavid Dillow if (!iu) { 1854bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1855bb12588aSDavid Dillow "no IU available to send response\n"); 185676c75b25SBart Van Assche return 1; 1857bb12588aSDavid Dillow } 1858bb12588aSDavid Dillow 1859bb12588aSDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 1860bb12588aSDavid Dillow memcpy(iu->buf, rsp, len); 1861bb12588aSDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 1862bb12588aSDavid Dillow 1863509c07bcSBart Van Assche err = srp_post_send(ch, iu, len); 186476c75b25SBart Van Assche if (err) { 1865bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1866bb12588aSDavid Dillow "unable to post response: %d\n", err); 1867509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_RSP); 186876c75b25SBart Van Assche } 1869bb12588aSDavid Dillow 1870bb12588aSDavid Dillow return err; 1871bb12588aSDavid Dillow } 1872bb12588aSDavid Dillow 1873509c07bcSBart Van Assche static void srp_process_cred_req(struct srp_rdma_ch *ch, 1874bb12588aSDavid Dillow struct srp_cred_req *req) 1875bb12588aSDavid Dillow { 1876bb12588aSDavid Dillow struct srp_cred_rsp rsp = { 1877bb12588aSDavid Dillow .opcode = SRP_CRED_RSP, 1878bb12588aSDavid Dillow .tag = req->tag, 1879bb12588aSDavid Dillow }; 1880bb12588aSDavid Dillow s32 delta = be32_to_cpu(req->req_lim_delta); 1881bb12588aSDavid Dillow 1882509c07bcSBart Van Assche if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 1883509c07bcSBart Van Assche shost_printk(KERN_ERR, ch->target->scsi_host, PFX 1884bb12588aSDavid Dillow "problems processing SRP_CRED_REQ\n"); 1885bb12588aSDavid Dillow } 1886bb12588aSDavid Dillow 1887509c07bcSBart Van Assche static void srp_process_aer_req(struct srp_rdma_ch *ch, 1888bb12588aSDavid Dillow struct srp_aer_req *req) 1889bb12588aSDavid Dillow { 1890509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1891bb12588aSDavid Dillow struct srp_aer_rsp rsp = { 1892bb12588aSDavid Dillow .opcode = SRP_AER_RSP, 1893bb12588aSDavid Dillow .tag = req->tag, 1894bb12588aSDavid Dillow }; 1895bb12588aSDavid Dillow s32 delta = be32_to_cpu(req->req_lim_delta); 1896bb12588aSDavid Dillow 1897bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1898985aa495SBart Van Assche "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); 1899bb12588aSDavid Dillow 1900509c07bcSBart Van Assche if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 1901bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1902bb12588aSDavid Dillow "problems processing SRP_AER_REQ\n"); 1903bb12588aSDavid Dillow } 1904bb12588aSDavid Dillow 1905509c07bcSBart Van Assche static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc) 1906aef9ec39SRoland Dreier { 1907509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1908dcb4cb85SBart Van Assche struct ib_device *dev = target->srp_host->srp_dev->dev; 1909737b94ebSRoland Dreier struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; 1910c996bb47SBart Van Assche int res; 1911aef9ec39SRoland Dreier u8 opcode; 1912aef9ec39SRoland Dreier 1913509c07bcSBart Van Assche ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, 191485507bccSRalph Campbell DMA_FROM_DEVICE); 1915aef9ec39SRoland Dreier 1916aef9ec39SRoland Dreier opcode = *(u8 *) iu->buf; 1917aef9ec39SRoland Dreier 1918aef9ec39SRoland Dreier if (0) { 19197aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 19207aa54bd7SDavid Dillow PFX "recv completion, opcode 0x%02x\n", opcode); 19217a700811SBart Van Assche print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, 19227a700811SBart Van Assche iu->buf, wc->byte_len, true); 1923aef9ec39SRoland Dreier } 1924aef9ec39SRoland Dreier 1925aef9ec39SRoland Dreier switch (opcode) { 1926aef9ec39SRoland Dreier case SRP_RSP: 1927509c07bcSBart Van Assche srp_process_rsp(ch, iu->buf); 1928aef9ec39SRoland Dreier break; 1929aef9ec39SRoland Dreier 1930bb12588aSDavid Dillow case SRP_CRED_REQ: 1931509c07bcSBart Van Assche srp_process_cred_req(ch, iu->buf); 1932bb12588aSDavid Dillow break; 1933bb12588aSDavid Dillow 1934bb12588aSDavid Dillow case SRP_AER_REQ: 1935509c07bcSBart Van Assche srp_process_aer_req(ch, iu->buf); 1936bb12588aSDavid Dillow break; 1937bb12588aSDavid Dillow 1938aef9ec39SRoland Dreier case SRP_T_LOGOUT: 1939aef9ec39SRoland Dreier /* XXX Handle target logout */ 19407aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 19417aa54bd7SDavid Dillow PFX "Got target logout request\n"); 1942aef9ec39SRoland Dreier break; 1943aef9ec39SRoland Dreier 1944aef9ec39SRoland Dreier default: 19457aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 19467aa54bd7SDavid Dillow PFX "Unhandled SRP opcode 0x%02x\n", opcode); 1947aef9ec39SRoland Dreier break; 1948aef9ec39SRoland Dreier } 1949aef9ec39SRoland Dreier 1950509c07bcSBart Van Assche ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, 195185507bccSRalph Campbell DMA_FROM_DEVICE); 1952c996bb47SBart Van Assche 1953509c07bcSBart Van Assche res = srp_post_recv(ch, iu); 1954c996bb47SBart Van Assche if (res != 0) 1955c996bb47SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 1956c996bb47SBart Van Assche PFX "Recv failed with error code %d\n", res); 1957aef9ec39SRoland Dreier } 1958aef9ec39SRoland Dreier 1959c1120f89SBart Van Assche /** 1960c1120f89SBart Van Assche * srp_tl_err_work() - handle a transport layer error 1961af24663bSBart Van Assche * @work: Work structure embedded in an SRP target port. 1962c1120f89SBart Van Assche * 1963c1120f89SBart Van Assche * Note: This function may get invoked before the rport has been created, 1964c1120f89SBart Van Assche * hence the target->rport test. 1965c1120f89SBart Van Assche */ 1966c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work) 1967c1120f89SBart Van Assche { 1968c1120f89SBart Van Assche struct srp_target_port *target; 1969c1120f89SBart Van Assche 1970c1120f89SBart Van Assche target = container_of(work, struct srp_target_port, tl_err_work); 1971c1120f89SBart Van Assche if (target->rport) 1972c1120f89SBart Van Assche srp_start_tl_fail_timers(target->rport); 1973c1120f89SBart Van Assche } 1974c1120f89SBart Van Assche 19755cfb1782SBart Van Assche static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, 19767dad6b2eSBart Van Assche bool send_err, struct srp_rdma_ch *ch) 1977948d1e88SBart Van Assche { 19787dad6b2eSBart Van Assche struct srp_target_port *target = ch->target; 19797dad6b2eSBart Van Assche 19807dad6b2eSBart Van Assche if (wr_id == SRP_LAST_WR_ID) { 19817dad6b2eSBart Van Assche complete(&ch->done); 19827dad6b2eSBart Van Assche return; 19837dad6b2eSBart Van Assche } 19847dad6b2eSBart Van Assche 1985c014c8cdSBart Van Assche if (ch->connected && !target->qp_in_error) { 19865cfb1782SBart Van Assche if (wr_id & LOCAL_INV_WR_ID_MASK) { 19875cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 198857363d98SSagi Grimberg "LOCAL_INV failed with status %s (%d)\n", 198957363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status); 19905cfb1782SBart Van Assche } else if (wr_id & FAST_REG_WR_ID_MASK) { 19915cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 199257363d98SSagi Grimberg "FAST_REG_MR failed status %s (%d)\n", 199357363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status); 19945cfb1782SBart Van Assche } else { 19955cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 199657363d98SSagi Grimberg PFX "failed %s status %s (%d) for iu %p\n", 19975cfb1782SBart Van Assche send_err ? "send" : "receive", 199857363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status, 199957363d98SSagi Grimberg (void *)(uintptr_t)wr_id); 20005cfb1782SBart Van Assche } 2001c1120f89SBart Van Assche queue_work(system_long_wq, &target->tl_err_work); 20024f0af697SBart Van Assche } 2003948d1e88SBart Van Assche target->qp_in_error = true; 2004948d1e88SBart Van Assche } 2005948d1e88SBart Van Assche 2006509c07bcSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr) 2007aef9ec39SRoland Dreier { 2008509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 2009aef9ec39SRoland Dreier struct ib_wc wc; 2010aef9ec39SRoland Dreier 2011aef9ec39SRoland Dreier ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 2012aef9ec39SRoland Dreier while (ib_poll_cq(cq, 1, &wc) > 0) { 2013948d1e88SBart Van Assche if (likely(wc.status == IB_WC_SUCCESS)) { 2014509c07bcSBart Van Assche srp_handle_recv(ch, &wc); 2015948d1e88SBart Van Assche } else { 20167dad6b2eSBart Van Assche srp_handle_qp_err(wc.wr_id, wc.status, false, ch); 2017aef9ec39SRoland Dreier } 20189c03dc9fSBart Van Assche } 20199c03dc9fSBart Van Assche } 20209c03dc9fSBart Van Assche 2021509c07bcSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *ch_ptr) 20229c03dc9fSBart Van Assche { 2023509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 20249c03dc9fSBart Van Assche struct ib_wc wc; 2025dcb4cb85SBart Van Assche struct srp_iu *iu; 20269c03dc9fSBart Van Assche 20279c03dc9fSBart Van Assche while (ib_poll_cq(cq, 1, &wc) > 0) { 2028948d1e88SBart Van Assche if (likely(wc.status == IB_WC_SUCCESS)) { 2029737b94ebSRoland Dreier iu = (struct srp_iu *) (uintptr_t) wc.wr_id; 2030509c07bcSBart Van Assche list_add(&iu->list, &ch->free_tx); 2031948d1e88SBart Van Assche } else { 20327dad6b2eSBart Van Assche srp_handle_qp_err(wc.wr_id, wc.status, true, ch); 2033948d1e88SBart Van Assche } 2034aef9ec39SRoland Dreier } 2035aef9ec39SRoland Dreier } 2036aef9ec39SRoland Dreier 203776c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) 2038aef9ec39SRoland Dreier { 203976c75b25SBart Van Assche struct srp_target_port *target = host_to_target(shost); 2040a95cadb9SBart Van Assche struct srp_rport *rport = target->rport; 2041509c07bcSBart Van Assche struct srp_rdma_ch *ch; 2042aef9ec39SRoland Dreier struct srp_request *req; 2043aef9ec39SRoland Dreier struct srp_iu *iu; 2044aef9ec39SRoland Dreier struct srp_cmd *cmd; 204585507bccSRalph Campbell struct ib_device *dev; 204676c75b25SBart Van Assche unsigned long flags; 204777f2c1a4SBart Van Assche u32 tag; 204877f2c1a4SBart Van Assche u16 idx; 2049d1b4289eSBart Van Assche int len, ret; 2050a95cadb9SBart Van Assche const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; 2051a95cadb9SBart Van Assche 2052a95cadb9SBart Van Assche /* 2053a95cadb9SBart Van Assche * The SCSI EH thread is the only context from which srp_queuecommand() 2054a95cadb9SBart Van Assche * can get invoked for blocked devices (SDEV_BLOCK / 2055a95cadb9SBart Van Assche * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by 2056a95cadb9SBart Van Assche * locking the rport mutex if invoked from inside the SCSI EH. 2057a95cadb9SBart Van Assche */ 2058a95cadb9SBart Van Assche if (in_scsi_eh) 2059a95cadb9SBart Van Assche mutex_lock(&rport->mutex); 2060aef9ec39SRoland Dreier 2061d1b4289eSBart Van Assche scmnd->result = srp_chkready(target->rport); 2062d1b4289eSBart Van Assche if (unlikely(scmnd->result)) 2063d1b4289eSBart Van Assche goto err; 20642ce19e72SBart Van Assche 206577f2c1a4SBart Van Assche WARN_ON_ONCE(scmnd->request->tag < 0); 206677f2c1a4SBart Van Assche tag = blk_mq_unique_tag(scmnd->request); 2067d92c0da7SBart Van Assche ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; 206877f2c1a4SBart Van Assche idx = blk_mq_unique_tag_to_tag(tag); 206977f2c1a4SBart Van Assche WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", 207077f2c1a4SBart Van Assche dev_name(&shost->shost_gendev), tag, idx, 207177f2c1a4SBart Van Assche target->req_ring_size); 2072509c07bcSBart Van Assche 2073509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 2074509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_CMD); 2075509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 2076aef9ec39SRoland Dreier 207777f2c1a4SBart Van Assche if (!iu) 207877f2c1a4SBart Van Assche goto err; 207977f2c1a4SBart Van Assche 208077f2c1a4SBart Van Assche req = &ch->req_ring[idx]; 208105321937SGreg Kroah-Hartman dev = target->srp_host->srp_dev->dev; 208249248644SDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, 208385507bccSRalph Campbell DMA_TO_DEVICE); 2084aef9ec39SRoland Dreier 2085f8b6e31eSDavid Dillow scmnd->host_scribble = (void *) req; 2086aef9ec39SRoland Dreier 2087aef9ec39SRoland Dreier cmd = iu->buf; 2088aef9ec39SRoland Dreier memset(cmd, 0, sizeof *cmd); 2089aef9ec39SRoland Dreier 2090aef9ec39SRoland Dreier cmd->opcode = SRP_CMD; 2091985aa495SBart Van Assche int_to_scsilun(scmnd->device->lun, &cmd->lun); 209277f2c1a4SBart Van Assche cmd->tag = tag; 2093aef9ec39SRoland Dreier memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 2094aef9ec39SRoland Dreier 2095aef9ec39SRoland Dreier req->scmnd = scmnd; 2096aef9ec39SRoland Dreier req->cmd = iu; 2097aef9ec39SRoland Dreier 2098509c07bcSBart Van Assche len = srp_map_data(scmnd, ch, req); 2099aef9ec39SRoland Dreier if (len < 0) { 21007aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 2101d1b4289eSBart Van Assche PFX "Failed to map data (%d)\n", len); 2102d1b4289eSBart Van Assche /* 2103d1b4289eSBart Van Assche * If we ran out of memory descriptors (-ENOMEM) because an 2104d1b4289eSBart Van Assche * application is queuing many requests with more than 210552ede08fSBart Van Assche * max_pages_per_mr sg-list elements, tell the SCSI mid-layer 2106d1b4289eSBart Van Assche * to reduce queue depth temporarily. 2107d1b4289eSBart Van Assche */ 2108d1b4289eSBart Van Assche scmnd->result = len == -ENOMEM ? 2109d1b4289eSBart Van Assche DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16; 211076c75b25SBart Van Assche goto err_iu; 2111aef9ec39SRoland Dreier } 2112aef9ec39SRoland Dreier 211349248644SDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, 211485507bccSRalph Campbell DMA_TO_DEVICE); 2115aef9ec39SRoland Dreier 2116509c07bcSBart Van Assche if (srp_post_send(ch, iu, len)) { 21177aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 2118aef9ec39SRoland Dreier goto err_unmap; 2119aef9ec39SRoland Dreier } 2120aef9ec39SRoland Dreier 2121d1b4289eSBart Van Assche ret = 0; 2122d1b4289eSBart Van Assche 2123a95cadb9SBart Van Assche unlock_rport: 2124a95cadb9SBart Van Assche if (in_scsi_eh) 2125a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2126a95cadb9SBart Van Assche 2127d1b4289eSBart Van Assche return ret; 2128aef9ec39SRoland Dreier 2129aef9ec39SRoland Dreier err_unmap: 2130509c07bcSBart Van Assche srp_unmap_data(scmnd, ch, req); 2131aef9ec39SRoland Dreier 213276c75b25SBart Van Assche err_iu: 2133509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_CMD); 213476c75b25SBart Van Assche 2135024ca901SBart Van Assche /* 2136024ca901SBart Van Assche * Avoid that the loops that iterate over the request ring can 2137024ca901SBart Van Assche * encounter a dangling SCSI command pointer. 2138024ca901SBart Van Assche */ 2139024ca901SBart Van Assche req->scmnd = NULL; 2140024ca901SBart Van Assche 2141d1b4289eSBart Van Assche err: 2142d1b4289eSBart Van Assche if (scmnd->result) { 2143d1b4289eSBart Van Assche scmnd->scsi_done(scmnd); 2144d1b4289eSBart Van Assche ret = 0; 2145d1b4289eSBart Van Assche } else { 2146d1b4289eSBart Van Assche ret = SCSI_MLQUEUE_HOST_BUSY; 2147d1b4289eSBart Van Assche } 2148a95cadb9SBart Van Assche 2149d1b4289eSBart Van Assche goto unlock_rport; 2150aef9ec39SRoland Dreier } 2151aef9ec39SRoland Dreier 21524d73f95fSBart Van Assche /* 21534d73f95fSBart Van Assche * Note: the resources allocated in this function are freed in 2154509c07bcSBart Van Assche * srp_free_ch_ib(). 21554d73f95fSBart Van Assche */ 2156509c07bcSBart Van Assche static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) 2157aef9ec39SRoland Dreier { 2158509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2159aef9ec39SRoland Dreier int i; 2160aef9ec39SRoland Dreier 2161509c07bcSBart Van Assche ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), 21624d73f95fSBart Van Assche GFP_KERNEL); 2163509c07bcSBart Van Assche if (!ch->rx_ring) 21644d73f95fSBart Van Assche goto err_no_ring; 2165509c07bcSBart Van Assche ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), 21664d73f95fSBart Van Assche GFP_KERNEL); 2167509c07bcSBart Van Assche if (!ch->tx_ring) 21684d73f95fSBart Van Assche goto err_no_ring; 21694d73f95fSBart Van Assche 21704d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2171509c07bcSBart Van Assche ch->rx_ring[i] = srp_alloc_iu(target->srp_host, 2172509c07bcSBart Van Assche ch->max_ti_iu_len, 2173aef9ec39SRoland Dreier GFP_KERNEL, DMA_FROM_DEVICE); 2174509c07bcSBart Van Assche if (!ch->rx_ring[i]) 2175aef9ec39SRoland Dreier goto err; 2176aef9ec39SRoland Dreier } 2177aef9ec39SRoland Dreier 21784d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2179509c07bcSBart Van Assche ch->tx_ring[i] = srp_alloc_iu(target->srp_host, 218049248644SDavid Dillow target->max_iu_len, 2181aef9ec39SRoland Dreier GFP_KERNEL, DMA_TO_DEVICE); 2182509c07bcSBart Van Assche if (!ch->tx_ring[i]) 2183aef9ec39SRoland Dreier goto err; 2184dcb4cb85SBart Van Assche 2185509c07bcSBart Van Assche list_add(&ch->tx_ring[i]->list, &ch->free_tx); 2186aef9ec39SRoland Dreier } 2187aef9ec39SRoland Dreier 2188aef9ec39SRoland Dreier return 0; 2189aef9ec39SRoland Dreier 2190aef9ec39SRoland Dreier err: 21914d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2192509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->rx_ring[i]); 2193509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->tx_ring[i]); 2194aef9ec39SRoland Dreier } 2195aef9ec39SRoland Dreier 21964d73f95fSBart Van Assche 21974d73f95fSBart Van Assche err_no_ring: 2198509c07bcSBart Van Assche kfree(ch->tx_ring); 2199509c07bcSBart Van Assche ch->tx_ring = NULL; 2200509c07bcSBart Van Assche kfree(ch->rx_ring); 2201509c07bcSBart Van Assche ch->rx_ring = NULL; 2202aef9ec39SRoland Dreier 2203aef9ec39SRoland Dreier return -ENOMEM; 2204aef9ec39SRoland Dreier } 2205aef9ec39SRoland Dreier 2206c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) 2207c9b03c1aSBart Van Assche { 2208c9b03c1aSBart Van Assche uint64_t T_tr_ns, max_compl_time_ms; 2209c9b03c1aSBart Van Assche uint32_t rq_tmo_jiffies; 2210c9b03c1aSBart Van Assche 2211c9b03c1aSBart Van Assche /* 2212c9b03c1aSBart Van Assche * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, 2213c9b03c1aSBart Van Assche * table 91), both the QP timeout and the retry count have to be set 2214c9b03c1aSBart Van Assche * for RC QP's during the RTR to RTS transition. 2215c9b03c1aSBart Van Assche */ 2216c9b03c1aSBart Van Assche WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) != 2217c9b03c1aSBart Van Assche (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)); 2218c9b03c1aSBart Van Assche 2219c9b03c1aSBart Van Assche /* 2220c9b03c1aSBart Van Assche * Set target->rq_tmo_jiffies to one second more than the largest time 2221c9b03c1aSBart Van Assche * it can take before an error completion is generated. See also 2222c9b03c1aSBart Van Assche * C9-140..142 in the IBTA spec for more information about how to 2223c9b03c1aSBart Van Assche * convert the QP Local ACK Timeout value to nanoseconds. 2224c9b03c1aSBart Van Assche */ 2225c9b03c1aSBart Van Assche T_tr_ns = 4096 * (1ULL << qp_attr->timeout); 2226c9b03c1aSBart Van Assche max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; 2227c9b03c1aSBart Van Assche do_div(max_compl_time_ms, NSEC_PER_MSEC); 2228c9b03c1aSBart Van Assche rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000); 2229c9b03c1aSBart Van Assche 2230c9b03c1aSBart Van Assche return rq_tmo_jiffies; 2231c9b03c1aSBart Van Assche } 2232c9b03c1aSBart Van Assche 2233961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 2234e6300cbdSBart Van Assche const struct srp_login_rsp *lrsp, 2235509c07bcSBart Van Assche struct srp_rdma_ch *ch) 2236961e0be8SDavid Dillow { 2237509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2238961e0be8SDavid Dillow struct ib_qp_attr *qp_attr = NULL; 2239961e0be8SDavid Dillow int attr_mask = 0; 2240961e0be8SDavid Dillow int ret; 2241961e0be8SDavid Dillow int i; 2242961e0be8SDavid Dillow 2243961e0be8SDavid Dillow if (lrsp->opcode == SRP_LOGIN_RSP) { 2244509c07bcSBart Van Assche ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); 2245509c07bcSBart Van Assche ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); 2246961e0be8SDavid Dillow 2247961e0be8SDavid Dillow /* 2248961e0be8SDavid Dillow * Reserve credits for task management so we don't 2249961e0be8SDavid Dillow * bounce requests back to the SCSI mid-layer. 2250961e0be8SDavid Dillow */ 2251961e0be8SDavid Dillow target->scsi_host->can_queue 2252509c07bcSBart Van Assche = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, 2253961e0be8SDavid Dillow target->scsi_host->can_queue); 22544d73f95fSBart Van Assche target->scsi_host->cmd_per_lun 22554d73f95fSBart Van Assche = min_t(int, target->scsi_host->can_queue, 22564d73f95fSBart Van Assche target->scsi_host->cmd_per_lun); 2257961e0be8SDavid Dillow } else { 2258961e0be8SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 2259961e0be8SDavid Dillow PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); 2260961e0be8SDavid Dillow ret = -ECONNRESET; 2261961e0be8SDavid Dillow goto error; 2262961e0be8SDavid Dillow } 2263961e0be8SDavid Dillow 2264509c07bcSBart Van Assche if (!ch->rx_ring) { 2265509c07bcSBart Van Assche ret = srp_alloc_iu_bufs(ch); 2266961e0be8SDavid Dillow if (ret) 2267961e0be8SDavid Dillow goto error; 2268961e0be8SDavid Dillow } 2269961e0be8SDavid Dillow 2270961e0be8SDavid Dillow ret = -ENOMEM; 2271961e0be8SDavid Dillow qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 2272961e0be8SDavid Dillow if (!qp_attr) 2273961e0be8SDavid Dillow goto error; 2274961e0be8SDavid Dillow 2275961e0be8SDavid Dillow qp_attr->qp_state = IB_QPS_RTR; 2276961e0be8SDavid Dillow ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2277961e0be8SDavid Dillow if (ret) 2278961e0be8SDavid Dillow goto error_free; 2279961e0be8SDavid Dillow 2280509c07bcSBart Van Assche ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2281961e0be8SDavid Dillow if (ret) 2282961e0be8SDavid Dillow goto error_free; 2283961e0be8SDavid Dillow 22844d73f95fSBart Van Assche for (i = 0; i < target->queue_size; i++) { 2285509c07bcSBart Van Assche struct srp_iu *iu = ch->rx_ring[i]; 2286509c07bcSBart Van Assche 2287509c07bcSBart Van Assche ret = srp_post_recv(ch, iu); 2288961e0be8SDavid Dillow if (ret) 2289961e0be8SDavid Dillow goto error_free; 2290961e0be8SDavid Dillow } 2291961e0be8SDavid Dillow 2292961e0be8SDavid Dillow qp_attr->qp_state = IB_QPS_RTS; 2293961e0be8SDavid Dillow ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2294961e0be8SDavid Dillow if (ret) 2295961e0be8SDavid Dillow goto error_free; 2296961e0be8SDavid Dillow 2297c9b03c1aSBart Van Assche target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); 2298c9b03c1aSBart Van Assche 2299509c07bcSBart Van Assche ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2300961e0be8SDavid Dillow if (ret) 2301961e0be8SDavid Dillow goto error_free; 2302961e0be8SDavid Dillow 2303961e0be8SDavid Dillow ret = ib_send_cm_rtu(cm_id, NULL, 0); 2304961e0be8SDavid Dillow 2305961e0be8SDavid Dillow error_free: 2306961e0be8SDavid Dillow kfree(qp_attr); 2307961e0be8SDavid Dillow 2308961e0be8SDavid Dillow error: 2309509c07bcSBart Van Assche ch->status = ret; 2310961e0be8SDavid Dillow } 2311961e0be8SDavid Dillow 2312aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 2313aef9ec39SRoland Dreier struct ib_cm_event *event, 2314509c07bcSBart Van Assche struct srp_rdma_ch *ch) 2315aef9ec39SRoland Dreier { 2316509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 23177aa54bd7SDavid Dillow struct Scsi_Host *shost = target->scsi_host; 2318aef9ec39SRoland Dreier struct ib_class_port_info *cpi; 2319aef9ec39SRoland Dreier int opcode; 2320aef9ec39SRoland Dreier 2321aef9ec39SRoland Dreier switch (event->param.rej_rcvd.reason) { 2322aef9ec39SRoland Dreier case IB_CM_REJ_PORT_CM_REDIRECT: 2323aef9ec39SRoland Dreier cpi = event->param.rej_rcvd.ari; 2324509c07bcSBart Van Assche ch->path.dlid = cpi->redirect_lid; 2325509c07bcSBart Van Assche ch->path.pkey = cpi->redirect_pkey; 2326aef9ec39SRoland Dreier cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 2327509c07bcSBart Van Assche memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16); 2328aef9ec39SRoland Dreier 2329509c07bcSBart Van Assche ch->status = ch->path.dlid ? 2330aef9ec39SRoland Dreier SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 2331aef9ec39SRoland Dreier break; 2332aef9ec39SRoland Dreier 2333aef9ec39SRoland Dreier case IB_CM_REJ_PORT_REDIRECT: 23345d7cbfd6SRoland Dreier if (srp_target_is_topspin(target)) { 2335aef9ec39SRoland Dreier /* 2336aef9ec39SRoland Dreier * Topspin/Cisco SRP gateways incorrectly send 2337aef9ec39SRoland Dreier * reject reason code 25 when they mean 24 2338aef9ec39SRoland Dreier * (port redirect). 2339aef9ec39SRoland Dreier */ 2340509c07bcSBart Van Assche memcpy(ch->path.dgid.raw, 2341aef9ec39SRoland Dreier event->param.rej_rcvd.ari, 16); 2342aef9ec39SRoland Dreier 23437aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, shost, 23447aa54bd7SDavid Dillow PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 2345509c07bcSBart Van Assche be64_to_cpu(ch->path.dgid.global.subnet_prefix), 2346509c07bcSBart Van Assche be64_to_cpu(ch->path.dgid.global.interface_id)); 2347aef9ec39SRoland Dreier 2348509c07bcSBart Van Assche ch->status = SRP_PORT_REDIRECT; 2349aef9ec39SRoland Dreier } else { 23507aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23517aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 2352509c07bcSBart Van Assche ch->status = -ECONNRESET; 2353aef9ec39SRoland Dreier } 2354aef9ec39SRoland Dreier break; 2355aef9ec39SRoland Dreier 2356aef9ec39SRoland Dreier case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 23577aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23587aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 2359509c07bcSBart Van Assche ch->status = -ECONNRESET; 2360aef9ec39SRoland Dreier break; 2361aef9ec39SRoland Dreier 2362aef9ec39SRoland Dreier case IB_CM_REJ_CONSUMER_DEFINED: 2363aef9ec39SRoland Dreier opcode = *(u8 *) event->private_data; 2364aef9ec39SRoland Dreier if (opcode == SRP_LOGIN_REJ) { 2365aef9ec39SRoland Dreier struct srp_login_rej *rej = event->private_data; 2366aef9ec39SRoland Dreier u32 reason = be32_to_cpu(rej->reason); 2367aef9ec39SRoland Dreier 2368aef9ec39SRoland Dreier if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 23697aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23707aa54bd7SDavid Dillow PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 2371aef9ec39SRoland Dreier else 2372e7ffde01SBart Van Assche shost_printk(KERN_WARNING, shost, PFX 2373e7ffde01SBart Van Assche "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", 2374747fe000SBart Van Assche target->sgid.raw, 2375747fe000SBart Van Assche target->orig_dgid.raw, reason); 2376aef9ec39SRoland Dreier } else 23777aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23787aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 2379aef9ec39SRoland Dreier " opcode 0x%02x\n", opcode); 2380509c07bcSBart Van Assche ch->status = -ECONNRESET; 2381aef9ec39SRoland Dreier break; 2382aef9ec39SRoland Dreier 23839fe4bcf4SDavid Dillow case IB_CM_REJ_STALE_CONN: 23849fe4bcf4SDavid Dillow shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 2385509c07bcSBart Van Assche ch->status = SRP_STALE_CONN; 23869fe4bcf4SDavid Dillow break; 23879fe4bcf4SDavid Dillow 2388aef9ec39SRoland Dreier default: 23897aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 2390aef9ec39SRoland Dreier event->param.rej_rcvd.reason); 2391509c07bcSBart Van Assche ch->status = -ECONNRESET; 2392aef9ec39SRoland Dreier } 2393aef9ec39SRoland Dreier } 2394aef9ec39SRoland Dreier 2395aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 2396aef9ec39SRoland Dreier { 2397509c07bcSBart Van Assche struct srp_rdma_ch *ch = cm_id->context; 2398509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2399aef9ec39SRoland Dreier int comp = 0; 2400aef9ec39SRoland Dreier 2401aef9ec39SRoland Dreier switch (event->event) { 2402aef9ec39SRoland Dreier case IB_CM_REQ_ERROR: 24037aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 24047aa54bd7SDavid Dillow PFX "Sending CM REQ failed\n"); 2405aef9ec39SRoland Dreier comp = 1; 2406509c07bcSBart Van Assche ch->status = -ECONNRESET; 2407aef9ec39SRoland Dreier break; 2408aef9ec39SRoland Dreier 2409aef9ec39SRoland Dreier case IB_CM_REP_RECEIVED: 2410aef9ec39SRoland Dreier comp = 1; 2411509c07bcSBart Van Assche srp_cm_rep_handler(cm_id, event->private_data, ch); 2412aef9ec39SRoland Dreier break; 2413aef9ec39SRoland Dreier 2414aef9ec39SRoland Dreier case IB_CM_REJ_RECEIVED: 24157aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 2416aef9ec39SRoland Dreier comp = 1; 2417aef9ec39SRoland Dreier 2418509c07bcSBart Van Assche srp_cm_rej_handler(cm_id, event, ch); 2419aef9ec39SRoland Dreier break; 2420aef9ec39SRoland Dreier 2421b7ac4ab4SIshai Rabinovitz case IB_CM_DREQ_RECEIVED: 24227aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 24237aa54bd7SDavid Dillow PFX "DREQ received - connection closed\n"); 2424c014c8cdSBart Van Assche ch->connected = false; 2425b7ac4ab4SIshai Rabinovitz if (ib_send_cm_drep(cm_id, NULL, 0)) 24267aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 24277aa54bd7SDavid Dillow PFX "Sending CM DREP failed\n"); 2428c1120f89SBart Van Assche queue_work(system_long_wq, &target->tl_err_work); 2429aef9ec39SRoland Dreier break; 2430aef9ec39SRoland Dreier 2431aef9ec39SRoland Dreier case IB_CM_TIMEWAIT_EXIT: 24327aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 24337aa54bd7SDavid Dillow PFX "connection closed\n"); 2434ac72d766SBart Van Assche comp = 1; 2435aef9ec39SRoland Dreier 2436509c07bcSBart Van Assche ch->status = 0; 2437aef9ec39SRoland Dreier break; 2438aef9ec39SRoland Dreier 2439b7ac4ab4SIshai Rabinovitz case IB_CM_MRA_RECEIVED: 2440b7ac4ab4SIshai Rabinovitz case IB_CM_DREQ_ERROR: 2441b7ac4ab4SIshai Rabinovitz case IB_CM_DREP_RECEIVED: 2442b7ac4ab4SIshai Rabinovitz break; 2443b7ac4ab4SIshai Rabinovitz 2444aef9ec39SRoland Dreier default: 24457aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 24467aa54bd7SDavid Dillow PFX "Unhandled CM event %d\n", event->event); 2447aef9ec39SRoland Dreier break; 2448aef9ec39SRoland Dreier } 2449aef9ec39SRoland Dreier 2450aef9ec39SRoland Dreier if (comp) 2451509c07bcSBart Van Assche complete(&ch->done); 2452aef9ec39SRoland Dreier 2453aef9ec39SRoland Dreier return 0; 2454aef9ec39SRoland Dreier } 2455aef9ec39SRoland Dreier 245671444b97SJack Wang /** 245771444b97SJack Wang * srp_change_queue_depth - setting device queue depth 245871444b97SJack Wang * @sdev: scsi device struct 245971444b97SJack Wang * @qdepth: requested queue depth 246071444b97SJack Wang * 246171444b97SJack Wang * Returns queue depth. 246271444b97SJack Wang */ 246371444b97SJack Wang static int 2464db5ed4dfSChristoph Hellwig srp_change_queue_depth(struct scsi_device *sdev, int qdepth) 246571444b97SJack Wang { 246671444b97SJack Wang if (!sdev->tagged_supported) 24671e6f2416SChristoph Hellwig qdepth = 1; 2468db5ed4dfSChristoph Hellwig return scsi_change_queue_depth(sdev, qdepth); 246971444b97SJack Wang } 247071444b97SJack Wang 2471985aa495SBart Van Assche static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, 2472985aa495SBart Van Assche u8 func) 2473aef9ec39SRoland Dreier { 2474509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2475a95cadb9SBart Van Assche struct srp_rport *rport = target->rport; 247619081f31SDavid Dillow struct ib_device *dev = target->srp_host->srp_dev->dev; 2477aef9ec39SRoland Dreier struct srp_iu *iu; 2478aef9ec39SRoland Dreier struct srp_tsk_mgmt *tsk_mgmt; 2479aef9ec39SRoland Dreier 2480c014c8cdSBart Van Assche if (!ch->connected || target->qp_in_error) 24813780d1f0SBart Van Assche return -1; 24823780d1f0SBart Van Assche 2483509c07bcSBart Van Assche init_completion(&ch->tsk_mgmt_done); 2484aef9ec39SRoland Dreier 2485a95cadb9SBart Van Assche /* 2486509c07bcSBart Van Assche * Lock the rport mutex to avoid that srp_create_ch_ib() is 2487a95cadb9SBart Van Assche * invoked while a task management function is being sent. 2488a95cadb9SBart Van Assche */ 2489a95cadb9SBart Van Assche mutex_lock(&rport->mutex); 2490509c07bcSBart Van Assche spin_lock_irq(&ch->lock); 2491509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); 2492509c07bcSBart Van Assche spin_unlock_irq(&ch->lock); 249376c75b25SBart Van Assche 2494a95cadb9SBart Van Assche if (!iu) { 2495a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2496a95cadb9SBart Van Assche 249776c75b25SBart Van Assche return -1; 2498a95cadb9SBart Van Assche } 2499aef9ec39SRoland Dreier 250019081f31SDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 250119081f31SDavid Dillow DMA_TO_DEVICE); 2502aef9ec39SRoland Dreier tsk_mgmt = iu->buf; 2503aef9ec39SRoland Dreier memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 2504aef9ec39SRoland Dreier 2505aef9ec39SRoland Dreier tsk_mgmt->opcode = SRP_TSK_MGMT; 2506985aa495SBart Van Assche int_to_scsilun(lun, &tsk_mgmt->lun); 2507f8b6e31eSDavid Dillow tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; 2508aef9ec39SRoland Dreier tsk_mgmt->tsk_mgmt_func = func; 2509f8b6e31eSDavid Dillow tsk_mgmt->task_tag = req_tag; 2510aef9ec39SRoland Dreier 251119081f31SDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 251219081f31SDavid Dillow DMA_TO_DEVICE); 2513509c07bcSBart Van Assche if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { 2514509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); 2515a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2516a95cadb9SBart Van Assche 251776c75b25SBart Van Assche return -1; 251876c75b25SBart Van Assche } 2519a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2520d945e1dfSRoland Dreier 2521509c07bcSBart Van Assche if (!wait_for_completion_timeout(&ch->tsk_mgmt_done, 2522aef9ec39SRoland Dreier msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 2523d945e1dfSRoland Dreier return -1; 2524aef9ec39SRoland Dreier 2525d945e1dfSRoland Dreier return 0; 2526d945e1dfSRoland Dreier } 2527d945e1dfSRoland Dreier 2528aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd) 2529aef9ec39SRoland Dreier { 2530d945e1dfSRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2531f8b6e31eSDavid Dillow struct srp_request *req = (struct srp_request *) scmnd->host_scribble; 253277f2c1a4SBart Van Assche u32 tag; 2533d92c0da7SBart Van Assche u16 ch_idx; 2534509c07bcSBart Van Assche struct srp_rdma_ch *ch; 2535086f44f5SBart Van Assche int ret; 2536d945e1dfSRoland Dreier 25377aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 2538aef9ec39SRoland Dreier 2539d92c0da7SBart Van Assche if (!req) 254099b6697aSBart Van Assche return SUCCESS; 254177f2c1a4SBart Van Assche tag = blk_mq_unique_tag(scmnd->request); 2542d92c0da7SBart Van Assche ch_idx = blk_mq_unique_tag_to_hwq(tag); 2543d92c0da7SBart Van Assche if (WARN_ON_ONCE(ch_idx >= target->ch_count)) 2544d92c0da7SBart Van Assche return SUCCESS; 2545d92c0da7SBart Van Assche ch = &target->ch[ch_idx]; 2546d92c0da7SBart Van Assche if (!srp_claim_req(ch, req, NULL, scmnd)) 2547d92c0da7SBart Van Assche return SUCCESS; 2548d92c0da7SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 2549d92c0da7SBart Van Assche "Sending SRP abort for tag %#x\n", tag); 255077f2c1a4SBart Van Assche if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, 255180d5e8a2SBart Van Assche SRP_TSK_ABORT_TASK) == 0) 2552086f44f5SBart Van Assche ret = SUCCESS; 2553ed9b2264SBart Van Assche else if (target->rport->state == SRP_RPORT_LOST) 255499e1c139SBart Van Assche ret = FAST_IO_FAIL; 2555086f44f5SBart Van Assche else 2556086f44f5SBart Van Assche ret = FAILED; 2557509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 0); 2558d945e1dfSRoland Dreier scmnd->result = DID_ABORT << 16; 2559d8536670SBart Van Assche scmnd->scsi_done(scmnd); 2560d945e1dfSRoland Dreier 2561086f44f5SBart Van Assche return ret; 2562aef9ec39SRoland Dreier } 2563aef9ec39SRoland Dreier 2564aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd) 2565aef9ec39SRoland Dreier { 2566d945e1dfSRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2567d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 2568536ae14eSBart Van Assche int i; 2569d945e1dfSRoland Dreier 25707aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 2571aef9ec39SRoland Dreier 2572d92c0da7SBart Van Assche ch = &target->ch[0]; 2573509c07bcSBart Van Assche if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, 2574f8b6e31eSDavid Dillow SRP_TSK_LUN_RESET)) 2575d945e1dfSRoland Dreier return FAILED; 2576509c07bcSBart Van Assche if (ch->tsk_mgmt_status) 2577d945e1dfSRoland Dreier return FAILED; 2578d945e1dfSRoland Dreier 2579d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 2580d92c0da7SBart Van Assche ch = &target->ch[i]; 25814d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 2582509c07bcSBart Van Assche struct srp_request *req = &ch->req_ring[i]; 2583509c07bcSBart Van Assche 2584509c07bcSBart Van Assche srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); 2585536ae14eSBart Van Assche } 2586d92c0da7SBart Van Assche } 2587d945e1dfSRoland Dreier 2588d945e1dfSRoland Dreier return SUCCESS; 2589aef9ec39SRoland Dreier } 2590aef9ec39SRoland Dreier 2591aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd) 2592aef9ec39SRoland Dreier { 2593aef9ec39SRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2594aef9ec39SRoland Dreier 25957aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); 2596aef9ec39SRoland Dreier 2597ed9b2264SBart Van Assche return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; 2598aef9ec39SRoland Dreier } 2599aef9ec39SRoland Dreier 2600c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev) 2601c9b03c1aSBart Van Assche { 2602c9b03c1aSBart Van Assche struct Scsi_Host *shost = sdev->host; 2603c9b03c1aSBart Van Assche struct srp_target_port *target = host_to_target(shost); 2604c9b03c1aSBart Van Assche struct request_queue *q = sdev->request_queue; 2605c9b03c1aSBart Van Assche unsigned long timeout; 2606c9b03c1aSBart Van Assche 2607c9b03c1aSBart Van Assche if (sdev->type == TYPE_DISK) { 2608c9b03c1aSBart Van Assche timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); 2609c9b03c1aSBart Van Assche blk_queue_rq_timeout(q, timeout); 2610c9b03c1aSBart Van Assche } 2611c9b03c1aSBart Van Assche 2612c9b03c1aSBart Van Assche return 0; 2613c9b03c1aSBart Van Assche } 2614c9b03c1aSBart Van Assche 2615ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, 2616ee959b00STony Jones char *buf) 26176ecb0c84SRoland Dreier { 2618ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26196ecb0c84SRoland Dreier 262045c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); 26216ecb0c84SRoland Dreier } 26226ecb0c84SRoland Dreier 2623ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, 2624ee959b00STony Jones char *buf) 26256ecb0c84SRoland Dreier { 2626ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26276ecb0c84SRoland Dreier 262845c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); 26296ecb0c84SRoland Dreier } 26306ecb0c84SRoland Dreier 2631ee959b00STony Jones static ssize_t show_service_id(struct device *dev, 2632ee959b00STony Jones struct device_attribute *attr, char *buf) 26336ecb0c84SRoland Dreier { 2634ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26356ecb0c84SRoland Dreier 263645c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id)); 26376ecb0c84SRoland Dreier } 26386ecb0c84SRoland Dreier 2639ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, 2640ee959b00STony Jones char *buf) 26416ecb0c84SRoland Dreier { 2642ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26436ecb0c84SRoland Dreier 2644747fe000SBart Van Assche return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey)); 26456ecb0c84SRoland Dreier } 26466ecb0c84SRoland Dreier 2647848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, 2648848b3082SBart Van Assche char *buf) 2649848b3082SBart Van Assche { 2650848b3082SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2651848b3082SBart Van Assche 2652747fe000SBart Van Assche return sprintf(buf, "%pI6\n", target->sgid.raw); 2653848b3082SBart Van Assche } 2654848b3082SBart Van Assche 2655ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 2656ee959b00STony Jones char *buf) 26576ecb0c84SRoland Dreier { 2658ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2659d92c0da7SBart Van Assche struct srp_rdma_ch *ch = &target->ch[0]; 26606ecb0c84SRoland Dreier 2661509c07bcSBart Van Assche return sprintf(buf, "%pI6\n", ch->path.dgid.raw); 26626ecb0c84SRoland Dreier } 26636ecb0c84SRoland Dreier 2664ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev, 2665ee959b00STony Jones struct device_attribute *attr, char *buf) 26663633b3d0SIshai Rabinovitz { 2667ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26683633b3d0SIshai Rabinovitz 2669747fe000SBart Van Assche return sprintf(buf, "%pI6\n", target->orig_dgid.raw); 26703633b3d0SIshai Rabinovitz } 26713633b3d0SIshai Rabinovitz 267289de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev, 267389de7486SBart Van Assche struct device_attribute *attr, char *buf) 267489de7486SBart Van Assche { 267589de7486SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2676d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 2677d92c0da7SBart Van Assche int i, req_lim = INT_MAX; 267889de7486SBart Van Assche 2679d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 2680d92c0da7SBart Van Assche ch = &target->ch[i]; 2681d92c0da7SBart Van Assche req_lim = min(req_lim, ch->req_lim); 2682d92c0da7SBart Van Assche } 2683d92c0da7SBart Van Assche return sprintf(buf, "%d\n", req_lim); 268489de7486SBart Van Assche } 268589de7486SBart Van Assche 2686ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev, 2687ee959b00STony Jones struct device_attribute *attr, char *buf) 26886bfa24faSRoland Dreier { 2689ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26906bfa24faSRoland Dreier 26916bfa24faSRoland Dreier return sprintf(buf, "%d\n", target->zero_req_lim); 26926bfa24faSRoland Dreier } 26936bfa24faSRoland Dreier 2694ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev, 2695ee959b00STony Jones struct device_attribute *attr, char *buf) 2696ded7f1a1SIshai Rabinovitz { 2697ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2698ded7f1a1SIshai Rabinovitz 2699ded7f1a1SIshai Rabinovitz return sprintf(buf, "%d\n", target->srp_host->port); 2700ded7f1a1SIshai Rabinovitz } 2701ded7f1a1SIshai Rabinovitz 2702ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev, 2703ee959b00STony Jones struct device_attribute *attr, char *buf) 2704ded7f1a1SIshai Rabinovitz { 2705ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2706ded7f1a1SIshai Rabinovitz 270705321937SGreg Kroah-Hartman return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 2708ded7f1a1SIshai Rabinovitz } 2709ded7f1a1SIshai Rabinovitz 2710d92c0da7SBart Van Assche static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr, 2711d92c0da7SBart Van Assche char *buf) 2712d92c0da7SBart Van Assche { 2713d92c0da7SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2714d92c0da7SBart Van Assche 2715d92c0da7SBart Van Assche return sprintf(buf, "%d\n", target->ch_count); 2716d92c0da7SBart Van Assche } 2717d92c0da7SBart Van Assche 27184b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev, 27194b5e5f41SBart Van Assche struct device_attribute *attr, char *buf) 27204b5e5f41SBart Van Assche { 27214b5e5f41SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 27224b5e5f41SBart Van Assche 27234b5e5f41SBart Van Assche return sprintf(buf, "%d\n", target->comp_vector); 27244b5e5f41SBart Van Assche } 27254b5e5f41SBart Van Assche 27267bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev, 27277bb312e4SVu Pham struct device_attribute *attr, char *buf) 27287bb312e4SVu Pham { 27297bb312e4SVu Pham struct srp_target_port *target = host_to_target(class_to_shost(dev)); 27307bb312e4SVu Pham 27317bb312e4SVu Pham return sprintf(buf, "%d\n", target->tl_retry_count); 27327bb312e4SVu Pham } 27337bb312e4SVu Pham 273449248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev, 273549248644SDavid Dillow struct device_attribute *attr, char *buf) 273649248644SDavid Dillow { 273749248644SDavid Dillow struct srp_target_port *target = host_to_target(class_to_shost(dev)); 273849248644SDavid Dillow 273949248644SDavid Dillow return sprintf(buf, "%u\n", target->cmd_sg_cnt); 274049248644SDavid Dillow } 274149248644SDavid Dillow 2742c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev, 2743c07d424dSDavid Dillow struct device_attribute *attr, char *buf) 2744c07d424dSDavid Dillow { 2745c07d424dSDavid Dillow struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2746c07d424dSDavid Dillow 2747c07d424dSDavid Dillow return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); 2748c07d424dSDavid Dillow } 2749c07d424dSDavid Dillow 2750ee959b00STony Jones static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 2751ee959b00STony Jones static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 2752ee959b00STony Jones static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 2753ee959b00STony Jones static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 2754848b3082SBart Van Assche static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL); 2755ee959b00STony Jones static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 2756ee959b00STony Jones static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); 275789de7486SBart Van Assche static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); 2758ee959b00STony Jones static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 2759ee959b00STony Jones static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 2760ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 2761d92c0da7SBart Van Assche static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL); 27624b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); 27637bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); 276449248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); 2765c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); 27666ecb0c84SRoland Dreier 2767ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = { 2768ee959b00STony Jones &dev_attr_id_ext, 2769ee959b00STony Jones &dev_attr_ioc_guid, 2770ee959b00STony Jones &dev_attr_service_id, 2771ee959b00STony Jones &dev_attr_pkey, 2772848b3082SBart Van Assche &dev_attr_sgid, 2773ee959b00STony Jones &dev_attr_dgid, 2774ee959b00STony Jones &dev_attr_orig_dgid, 277589de7486SBart Van Assche &dev_attr_req_lim, 2776ee959b00STony Jones &dev_attr_zero_req_lim, 2777ee959b00STony Jones &dev_attr_local_ib_port, 2778ee959b00STony Jones &dev_attr_local_ib_device, 2779d92c0da7SBart Van Assche &dev_attr_ch_count, 27804b5e5f41SBart Van Assche &dev_attr_comp_vector, 27817bb312e4SVu Pham &dev_attr_tl_retry_count, 278249248644SDavid Dillow &dev_attr_cmd_sg_entries, 2783c07d424dSDavid Dillow &dev_attr_allow_ext_sg, 27846ecb0c84SRoland Dreier NULL 27856ecb0c84SRoland Dreier }; 27866ecb0c84SRoland Dreier 2787aef9ec39SRoland Dreier static struct scsi_host_template srp_template = { 2788aef9ec39SRoland Dreier .module = THIS_MODULE, 2789b7f008fdSRoland Dreier .name = "InfiniBand SRP initiator", 2790b7f008fdSRoland Dreier .proc_name = DRV_NAME, 2791c9b03c1aSBart Van Assche .slave_configure = srp_slave_configure, 2792aef9ec39SRoland Dreier .info = srp_target_info, 2793aef9ec39SRoland Dreier .queuecommand = srp_queuecommand, 279471444b97SJack Wang .change_queue_depth = srp_change_queue_depth, 2795aef9ec39SRoland Dreier .eh_abort_handler = srp_abort, 2796aef9ec39SRoland Dreier .eh_device_reset_handler = srp_reset_device, 2797aef9ec39SRoland Dreier .eh_host_reset_handler = srp_reset_host, 27982742c1daSBart Van Assche .skip_settle_delay = true, 279949248644SDavid Dillow .sg_tablesize = SRP_DEF_SG_TABLESIZE, 28004d73f95fSBart Van Assche .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, 2801aef9ec39SRoland Dreier .this_id = -1, 28024d73f95fSBart Van Assche .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, 28036ecb0c84SRoland Dreier .use_clustering = ENABLE_CLUSTERING, 280477f2c1a4SBart Van Assche .shost_attrs = srp_host_attrs, 2805c40ecc12SChristoph Hellwig .track_queue_depth = 1, 2806aef9ec39SRoland Dreier }; 2807aef9ec39SRoland Dreier 280834aa654eSBart Van Assche static int srp_sdev_count(struct Scsi_Host *host) 280934aa654eSBart Van Assche { 281034aa654eSBart Van Assche struct scsi_device *sdev; 281134aa654eSBart Van Assche int c = 0; 281234aa654eSBart Van Assche 281334aa654eSBart Van Assche shost_for_each_device(sdev, host) 281434aa654eSBart Van Assche c++; 281534aa654eSBart Van Assche 281634aa654eSBart Van Assche return c; 281734aa654eSBart Van Assche } 281834aa654eSBart Van Assche 2819bc44bd1dSBart Van Assche /* 2820bc44bd1dSBart Van Assche * Return values: 2821bc44bd1dSBart Van Assche * < 0 upon failure. Caller is responsible for SRP target port cleanup. 2822bc44bd1dSBart Van Assche * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port 2823bc44bd1dSBart Van Assche * removal has been scheduled. 2824bc44bd1dSBart Van Assche * 0 and target->state != SRP_TARGET_REMOVED upon success. 2825bc44bd1dSBart Van Assche */ 2826aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 2827aef9ec39SRoland Dreier { 28283236822bSFUJITA Tomonori struct srp_rport_identifiers ids; 28293236822bSFUJITA Tomonori struct srp_rport *rport; 28303236822bSFUJITA Tomonori 283134aa654eSBart Van Assche target->state = SRP_TARGET_SCANNING; 2832aef9ec39SRoland Dreier sprintf(target->target_name, "SRP.T10:%016llX", 283345c37cadSBart Van Assche be64_to_cpu(target->id_ext)); 2834aef9ec39SRoland Dreier 283505321937SGreg Kroah-Hartman if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) 2836aef9ec39SRoland Dreier return -ENODEV; 2837aef9ec39SRoland Dreier 28383236822bSFUJITA Tomonori memcpy(ids.port_id, &target->id_ext, 8); 28393236822bSFUJITA Tomonori memcpy(ids.port_id + 8, &target->ioc_guid, 8); 2840aebd5e47SFUJITA Tomonori ids.roles = SRP_RPORT_ROLE_TARGET; 28413236822bSFUJITA Tomonori rport = srp_rport_add(target->scsi_host, &ids); 28423236822bSFUJITA Tomonori if (IS_ERR(rport)) { 28433236822bSFUJITA Tomonori scsi_remove_host(target->scsi_host); 28443236822bSFUJITA Tomonori return PTR_ERR(rport); 28453236822bSFUJITA Tomonori } 28463236822bSFUJITA Tomonori 2847dc1bdbd9SBart Van Assche rport->lld_data = target; 28489dd69a60SBart Van Assche target->rport = rport; 2849dc1bdbd9SBart Van Assche 2850b3589fd4SMatthew Wilcox spin_lock(&host->target_lock); 2851aef9ec39SRoland Dreier list_add_tail(&target->list, &host->target_list); 2852b3589fd4SMatthew Wilcox spin_unlock(&host->target_lock); 2853aef9ec39SRoland Dreier 2854aef9ec39SRoland Dreier scsi_scan_target(&target->scsi_host->shost_gendev, 28551962a4a1SMatthew Wilcox 0, target->scsi_id, SCAN_WILD_CARD, 0); 2856aef9ec39SRoland Dreier 2857c014c8cdSBart Van Assche if (srp_connected_ch(target) < target->ch_count || 2858c014c8cdSBart Van Assche target->qp_in_error) { 285934aa654eSBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 286034aa654eSBart Van Assche PFX "SCSI scan failed - removing SCSI host\n"); 286134aa654eSBart Van Assche srp_queue_remove_work(target); 286234aa654eSBart Van Assche goto out; 286334aa654eSBart Van Assche } 286434aa654eSBart Van Assche 286534aa654eSBart Van Assche pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n", 286634aa654eSBart Van Assche dev_name(&target->scsi_host->shost_gendev), 286734aa654eSBart Van Assche srp_sdev_count(target->scsi_host)); 286834aa654eSBart Van Assche 286934aa654eSBart Van Assche spin_lock_irq(&target->lock); 287034aa654eSBart Van Assche if (target->state == SRP_TARGET_SCANNING) 287134aa654eSBart Van Assche target->state = SRP_TARGET_LIVE; 287234aa654eSBart Van Assche spin_unlock_irq(&target->lock); 287334aa654eSBart Van Assche 287434aa654eSBart Van Assche out: 2875aef9ec39SRoland Dreier return 0; 2876aef9ec39SRoland Dreier } 2877aef9ec39SRoland Dreier 2878ee959b00STony Jones static void srp_release_dev(struct device *dev) 2879aef9ec39SRoland Dreier { 2880aef9ec39SRoland Dreier struct srp_host *host = 2881ee959b00STony Jones container_of(dev, struct srp_host, dev); 2882aef9ec39SRoland Dreier 2883aef9ec39SRoland Dreier complete(&host->released); 2884aef9ec39SRoland Dreier } 2885aef9ec39SRoland Dreier 2886aef9ec39SRoland Dreier static struct class srp_class = { 2887aef9ec39SRoland Dreier .name = "infiniband_srp", 2888ee959b00STony Jones .dev_release = srp_release_dev 2889aef9ec39SRoland Dreier }; 2890aef9ec39SRoland Dreier 289196fc248aSBart Van Assche /** 289296fc248aSBart Van Assche * srp_conn_unique() - check whether the connection to a target is unique 2893af24663bSBart Van Assche * @host: SRP host. 2894af24663bSBart Van Assche * @target: SRP target port. 289596fc248aSBart Van Assche */ 289696fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host, 289796fc248aSBart Van Assche struct srp_target_port *target) 289896fc248aSBart Van Assche { 289996fc248aSBart Van Assche struct srp_target_port *t; 290096fc248aSBart Van Assche bool ret = false; 290196fc248aSBart Van Assche 290296fc248aSBart Van Assche if (target->state == SRP_TARGET_REMOVED) 290396fc248aSBart Van Assche goto out; 290496fc248aSBart Van Assche 290596fc248aSBart Van Assche ret = true; 290696fc248aSBart Van Assche 290796fc248aSBart Van Assche spin_lock(&host->target_lock); 290896fc248aSBart Van Assche list_for_each_entry(t, &host->target_list, list) { 290996fc248aSBart Van Assche if (t != target && 291096fc248aSBart Van Assche target->id_ext == t->id_ext && 291196fc248aSBart Van Assche target->ioc_guid == t->ioc_guid && 291296fc248aSBart Van Assche target->initiator_ext == t->initiator_ext) { 291396fc248aSBart Van Assche ret = false; 291496fc248aSBart Van Assche break; 291596fc248aSBart Van Assche } 291696fc248aSBart Van Assche } 291796fc248aSBart Van Assche spin_unlock(&host->target_lock); 291896fc248aSBart Van Assche 291996fc248aSBart Van Assche out: 292096fc248aSBart Van Assche return ret; 292196fc248aSBart Van Assche } 292296fc248aSBart Van Assche 2923aef9ec39SRoland Dreier /* 2924aef9ec39SRoland Dreier * Target ports are added by writing 2925aef9ec39SRoland Dreier * 2926aef9ec39SRoland Dreier * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 2927aef9ec39SRoland Dreier * pkey=<P_Key>,service_id=<service ID> 2928aef9ec39SRoland Dreier * 2929aef9ec39SRoland Dreier * to the add_target sysfs attribute. 2930aef9ec39SRoland Dreier */ 2931aef9ec39SRoland Dreier enum { 2932aef9ec39SRoland Dreier SRP_OPT_ERR = 0, 2933aef9ec39SRoland Dreier SRP_OPT_ID_EXT = 1 << 0, 2934aef9ec39SRoland Dreier SRP_OPT_IOC_GUID = 1 << 1, 2935aef9ec39SRoland Dreier SRP_OPT_DGID = 1 << 2, 2936aef9ec39SRoland Dreier SRP_OPT_PKEY = 1 << 3, 2937aef9ec39SRoland Dreier SRP_OPT_SERVICE_ID = 1 << 4, 2938aef9ec39SRoland Dreier SRP_OPT_MAX_SECT = 1 << 5, 293952fb2b50SVu Pham SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 29400c0450dbSRamachandra K SRP_OPT_IO_CLASS = 1 << 7, 294101cb9bcbSIshai Rabinovitz SRP_OPT_INITIATOR_EXT = 1 << 8, 294249248644SDavid Dillow SRP_OPT_CMD_SG_ENTRIES = 1 << 9, 2943c07d424dSDavid Dillow SRP_OPT_ALLOW_EXT_SG = 1 << 10, 2944c07d424dSDavid Dillow SRP_OPT_SG_TABLESIZE = 1 << 11, 29454b5e5f41SBart Van Assche SRP_OPT_COMP_VECTOR = 1 << 12, 29467bb312e4SVu Pham SRP_OPT_TL_RETRY_COUNT = 1 << 13, 29474d73f95fSBart Van Assche SRP_OPT_QUEUE_SIZE = 1 << 14, 2948aef9ec39SRoland Dreier SRP_OPT_ALL = (SRP_OPT_ID_EXT | 2949aef9ec39SRoland Dreier SRP_OPT_IOC_GUID | 2950aef9ec39SRoland Dreier SRP_OPT_DGID | 2951aef9ec39SRoland Dreier SRP_OPT_PKEY | 2952aef9ec39SRoland Dreier SRP_OPT_SERVICE_ID), 2953aef9ec39SRoland Dreier }; 2954aef9ec39SRoland Dreier 2955a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = { 2956aef9ec39SRoland Dreier { SRP_OPT_ID_EXT, "id_ext=%s" }, 2957aef9ec39SRoland Dreier { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 2958aef9ec39SRoland Dreier { SRP_OPT_DGID, "dgid=%s" }, 2959aef9ec39SRoland Dreier { SRP_OPT_PKEY, "pkey=%x" }, 2960aef9ec39SRoland Dreier { SRP_OPT_SERVICE_ID, "service_id=%s" }, 2961aef9ec39SRoland Dreier { SRP_OPT_MAX_SECT, "max_sect=%d" }, 296252fb2b50SVu Pham { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 29630c0450dbSRamachandra K { SRP_OPT_IO_CLASS, "io_class=%x" }, 296401cb9bcbSIshai Rabinovitz { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 296549248644SDavid Dillow { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, 2966c07d424dSDavid Dillow { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, 2967c07d424dSDavid Dillow { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, 29684b5e5f41SBart Van Assche { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, 29697bb312e4SVu Pham { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, 29704d73f95fSBart Van Assche { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, 2971aef9ec39SRoland Dreier { SRP_OPT_ERR, NULL } 2972aef9ec39SRoland Dreier }; 2973aef9ec39SRoland Dreier 2974aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target) 2975aef9ec39SRoland Dreier { 2976aef9ec39SRoland Dreier char *options, *sep_opt; 2977aef9ec39SRoland Dreier char *p; 2978aef9ec39SRoland Dreier char dgid[3]; 2979aef9ec39SRoland Dreier substring_t args[MAX_OPT_ARGS]; 2980aef9ec39SRoland Dreier int opt_mask = 0; 2981aef9ec39SRoland Dreier int token; 2982aef9ec39SRoland Dreier int ret = -EINVAL; 2983aef9ec39SRoland Dreier int i; 2984aef9ec39SRoland Dreier 2985aef9ec39SRoland Dreier options = kstrdup(buf, GFP_KERNEL); 2986aef9ec39SRoland Dreier if (!options) 2987aef9ec39SRoland Dreier return -ENOMEM; 2988aef9ec39SRoland Dreier 2989aef9ec39SRoland Dreier sep_opt = options; 29907dcf9c19SSagi Grimberg while ((p = strsep(&sep_opt, ",\n")) != NULL) { 2991aef9ec39SRoland Dreier if (!*p) 2992aef9ec39SRoland Dreier continue; 2993aef9ec39SRoland Dreier 2994aef9ec39SRoland Dreier token = match_token(p, srp_opt_tokens, args); 2995aef9ec39SRoland Dreier opt_mask |= token; 2996aef9ec39SRoland Dreier 2997aef9ec39SRoland Dreier switch (token) { 2998aef9ec39SRoland Dreier case SRP_OPT_ID_EXT: 2999aef9ec39SRoland Dreier p = match_strdup(args); 3000a20f3a6dSIshai Rabinovitz if (!p) { 3001a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3002a20f3a6dSIshai Rabinovitz goto out; 3003a20f3a6dSIshai Rabinovitz } 3004aef9ec39SRoland Dreier target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 3005aef9ec39SRoland Dreier kfree(p); 3006aef9ec39SRoland Dreier break; 3007aef9ec39SRoland Dreier 3008aef9ec39SRoland Dreier case SRP_OPT_IOC_GUID: 3009aef9ec39SRoland Dreier p = match_strdup(args); 3010a20f3a6dSIshai Rabinovitz if (!p) { 3011a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3012a20f3a6dSIshai Rabinovitz goto out; 3013a20f3a6dSIshai Rabinovitz } 3014aef9ec39SRoland Dreier target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 3015aef9ec39SRoland Dreier kfree(p); 3016aef9ec39SRoland Dreier break; 3017aef9ec39SRoland Dreier 3018aef9ec39SRoland Dreier case SRP_OPT_DGID: 3019aef9ec39SRoland Dreier p = match_strdup(args); 3020a20f3a6dSIshai Rabinovitz if (!p) { 3021a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3022a20f3a6dSIshai Rabinovitz goto out; 3023a20f3a6dSIshai Rabinovitz } 3024aef9ec39SRoland Dreier if (strlen(p) != 32) { 3025e0bda7d8SBart Van Assche pr_warn("bad dest GID parameter '%s'\n", p); 3026ce1823f0SRoland Dreier kfree(p); 3027aef9ec39SRoland Dreier goto out; 3028aef9ec39SRoland Dreier } 3029aef9ec39SRoland Dreier 3030aef9ec39SRoland Dreier for (i = 0; i < 16; ++i) { 3031747fe000SBart Van Assche strlcpy(dgid, p + i * 2, sizeof(dgid)); 3032747fe000SBart Van Assche if (sscanf(dgid, "%hhx", 3033747fe000SBart Van Assche &target->orig_dgid.raw[i]) < 1) { 3034747fe000SBart Van Assche ret = -EINVAL; 3035747fe000SBart Van Assche kfree(p); 3036747fe000SBart Van Assche goto out; 3037747fe000SBart Van Assche } 3038aef9ec39SRoland Dreier } 3039bf17c1c7SRoland Dreier kfree(p); 3040aef9ec39SRoland Dreier break; 3041aef9ec39SRoland Dreier 3042aef9ec39SRoland Dreier case SRP_OPT_PKEY: 3043aef9ec39SRoland Dreier if (match_hex(args, &token)) { 3044e0bda7d8SBart Van Assche pr_warn("bad P_Key parameter '%s'\n", p); 3045aef9ec39SRoland Dreier goto out; 3046aef9ec39SRoland Dreier } 3047747fe000SBart Van Assche target->pkey = cpu_to_be16(token); 3048aef9ec39SRoland Dreier break; 3049aef9ec39SRoland Dreier 3050aef9ec39SRoland Dreier case SRP_OPT_SERVICE_ID: 3051aef9ec39SRoland Dreier p = match_strdup(args); 3052a20f3a6dSIshai Rabinovitz if (!p) { 3053a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3054a20f3a6dSIshai Rabinovitz goto out; 3055a20f3a6dSIshai Rabinovitz } 3056aef9ec39SRoland Dreier target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 3057aef9ec39SRoland Dreier kfree(p); 3058aef9ec39SRoland Dreier break; 3059aef9ec39SRoland Dreier 3060aef9ec39SRoland Dreier case SRP_OPT_MAX_SECT: 3061aef9ec39SRoland Dreier if (match_int(args, &token)) { 3062e0bda7d8SBart Van Assche pr_warn("bad max sect parameter '%s'\n", p); 3063aef9ec39SRoland Dreier goto out; 3064aef9ec39SRoland Dreier } 3065aef9ec39SRoland Dreier target->scsi_host->max_sectors = token; 3066aef9ec39SRoland Dreier break; 3067aef9ec39SRoland Dreier 30684d73f95fSBart Van Assche case SRP_OPT_QUEUE_SIZE: 30694d73f95fSBart Van Assche if (match_int(args, &token) || token < 1) { 30704d73f95fSBart Van Assche pr_warn("bad queue_size parameter '%s'\n", p); 30714d73f95fSBart Van Assche goto out; 30724d73f95fSBart Van Assche } 30734d73f95fSBart Van Assche target->scsi_host->can_queue = token; 30744d73f95fSBart Van Assche target->queue_size = token + SRP_RSP_SQ_SIZE + 30754d73f95fSBart Van Assche SRP_TSK_MGMT_SQ_SIZE; 30764d73f95fSBart Van Assche if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 30774d73f95fSBart Van Assche target->scsi_host->cmd_per_lun = token; 30784d73f95fSBart Van Assche break; 30794d73f95fSBart Van Assche 308052fb2b50SVu Pham case SRP_OPT_MAX_CMD_PER_LUN: 30814d73f95fSBart Van Assche if (match_int(args, &token) || token < 1) { 3082e0bda7d8SBart Van Assche pr_warn("bad max cmd_per_lun parameter '%s'\n", 3083e0bda7d8SBart Van Assche p); 308452fb2b50SVu Pham goto out; 308552fb2b50SVu Pham } 30864d73f95fSBart Van Assche target->scsi_host->cmd_per_lun = token; 308752fb2b50SVu Pham break; 308852fb2b50SVu Pham 30890c0450dbSRamachandra K case SRP_OPT_IO_CLASS: 30900c0450dbSRamachandra K if (match_hex(args, &token)) { 3091e0bda7d8SBart Van Assche pr_warn("bad IO class parameter '%s'\n", p); 30920c0450dbSRamachandra K goto out; 30930c0450dbSRamachandra K } 30940c0450dbSRamachandra K if (token != SRP_REV10_IB_IO_CLASS && 30950c0450dbSRamachandra K token != SRP_REV16A_IB_IO_CLASS) { 3096e0bda7d8SBart Van Assche pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", 3097e0bda7d8SBart Van Assche token, SRP_REV10_IB_IO_CLASS, 3098e0bda7d8SBart Van Assche SRP_REV16A_IB_IO_CLASS); 30990c0450dbSRamachandra K goto out; 31000c0450dbSRamachandra K } 31010c0450dbSRamachandra K target->io_class = token; 31020c0450dbSRamachandra K break; 31030c0450dbSRamachandra K 310401cb9bcbSIshai Rabinovitz case SRP_OPT_INITIATOR_EXT: 310501cb9bcbSIshai Rabinovitz p = match_strdup(args); 3106a20f3a6dSIshai Rabinovitz if (!p) { 3107a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3108a20f3a6dSIshai Rabinovitz goto out; 3109a20f3a6dSIshai Rabinovitz } 311001cb9bcbSIshai Rabinovitz target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 311101cb9bcbSIshai Rabinovitz kfree(p); 311201cb9bcbSIshai Rabinovitz break; 311301cb9bcbSIshai Rabinovitz 311449248644SDavid Dillow case SRP_OPT_CMD_SG_ENTRIES: 311549248644SDavid Dillow if (match_int(args, &token) || token < 1 || token > 255) { 3116e0bda7d8SBart Van Assche pr_warn("bad max cmd_sg_entries parameter '%s'\n", 3117e0bda7d8SBart Van Assche p); 311849248644SDavid Dillow goto out; 311949248644SDavid Dillow } 312049248644SDavid Dillow target->cmd_sg_cnt = token; 312149248644SDavid Dillow break; 312249248644SDavid Dillow 3123c07d424dSDavid Dillow case SRP_OPT_ALLOW_EXT_SG: 3124c07d424dSDavid Dillow if (match_int(args, &token)) { 3125e0bda7d8SBart Van Assche pr_warn("bad allow_ext_sg parameter '%s'\n", p); 3126c07d424dSDavid Dillow goto out; 3127c07d424dSDavid Dillow } 3128c07d424dSDavid Dillow target->allow_ext_sg = !!token; 3129c07d424dSDavid Dillow break; 3130c07d424dSDavid Dillow 3131c07d424dSDavid Dillow case SRP_OPT_SG_TABLESIZE: 3132c07d424dSDavid Dillow if (match_int(args, &token) || token < 1 || 3133c07d424dSDavid Dillow token > SCSI_MAX_SG_CHAIN_SEGMENTS) { 3134e0bda7d8SBart Van Assche pr_warn("bad max sg_tablesize parameter '%s'\n", 3135e0bda7d8SBart Van Assche p); 3136c07d424dSDavid Dillow goto out; 3137c07d424dSDavid Dillow } 3138c07d424dSDavid Dillow target->sg_tablesize = token; 3139c07d424dSDavid Dillow break; 3140c07d424dSDavid Dillow 31414b5e5f41SBart Van Assche case SRP_OPT_COMP_VECTOR: 31424b5e5f41SBart Van Assche if (match_int(args, &token) || token < 0) { 31434b5e5f41SBart Van Assche pr_warn("bad comp_vector parameter '%s'\n", p); 31444b5e5f41SBart Van Assche goto out; 31454b5e5f41SBart Van Assche } 31464b5e5f41SBart Van Assche target->comp_vector = token; 31474b5e5f41SBart Van Assche break; 31484b5e5f41SBart Van Assche 31497bb312e4SVu Pham case SRP_OPT_TL_RETRY_COUNT: 31507bb312e4SVu Pham if (match_int(args, &token) || token < 2 || token > 7) { 31517bb312e4SVu Pham pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", 31527bb312e4SVu Pham p); 31537bb312e4SVu Pham goto out; 31547bb312e4SVu Pham } 31557bb312e4SVu Pham target->tl_retry_count = token; 31567bb312e4SVu Pham break; 31577bb312e4SVu Pham 3158aef9ec39SRoland Dreier default: 3159e0bda7d8SBart Van Assche pr_warn("unknown parameter or missing value '%s' in target creation request\n", 3160e0bda7d8SBart Van Assche p); 3161aef9ec39SRoland Dreier goto out; 3162aef9ec39SRoland Dreier } 3163aef9ec39SRoland Dreier } 3164aef9ec39SRoland Dreier 3165aef9ec39SRoland Dreier if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 3166aef9ec39SRoland Dreier ret = 0; 3167aef9ec39SRoland Dreier else 3168aef9ec39SRoland Dreier for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 3169aef9ec39SRoland Dreier if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 3170aef9ec39SRoland Dreier !(srp_opt_tokens[i].token & opt_mask)) 3171e0bda7d8SBart Van Assche pr_warn("target creation request is missing parameter '%s'\n", 3172aef9ec39SRoland Dreier srp_opt_tokens[i].pattern); 3173aef9ec39SRoland Dreier 31744d73f95fSBart Van Assche if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue 31754d73f95fSBart Van Assche && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 31764d73f95fSBart Van Assche pr_warn("cmd_per_lun = %d > queue_size = %d\n", 31774d73f95fSBart Van Assche target->scsi_host->cmd_per_lun, 31784d73f95fSBart Van Assche target->scsi_host->can_queue); 31794d73f95fSBart Van Assche 3180aef9ec39SRoland Dreier out: 3181aef9ec39SRoland Dreier kfree(options); 3182aef9ec39SRoland Dreier return ret; 3183aef9ec39SRoland Dreier } 3184aef9ec39SRoland Dreier 3185ee959b00STony Jones static ssize_t srp_create_target(struct device *dev, 3186ee959b00STony Jones struct device_attribute *attr, 3187aef9ec39SRoland Dreier const char *buf, size_t count) 3188aef9ec39SRoland Dreier { 3189aef9ec39SRoland Dreier struct srp_host *host = 3190ee959b00STony Jones container_of(dev, struct srp_host, dev); 3191aef9ec39SRoland Dreier struct Scsi_Host *target_host; 3192aef9ec39SRoland Dreier struct srp_target_port *target; 3193509c07bcSBart Van Assche struct srp_rdma_ch *ch; 3194d1b4289eSBart Van Assche struct srp_device *srp_dev = host->srp_dev; 3195d1b4289eSBart Van Assche struct ib_device *ibdev = srp_dev->dev; 3196d92c0da7SBart Van Assche int ret, node_idx, node, cpu, i; 3197d92c0da7SBart Van Assche bool multich = false; 3198aef9ec39SRoland Dreier 3199aef9ec39SRoland Dreier target_host = scsi_host_alloc(&srp_template, 3200aef9ec39SRoland Dreier sizeof (struct srp_target_port)); 3201aef9ec39SRoland Dreier if (!target_host) 3202aef9ec39SRoland Dreier return -ENOMEM; 3203aef9ec39SRoland Dreier 32043236822bSFUJITA Tomonori target_host->transportt = ib_srp_transport_template; 3205fd1b6c4aSBart Van Assche target_host->max_channel = 0; 3206fd1b6c4aSBart Van Assche target_host->max_id = 1; 3207985aa495SBart Van Assche target_host->max_lun = -1LL; 32083c8edf0eSArne Redlich target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 32095f068992SRoland Dreier 3210aef9ec39SRoland Dreier target = host_to_target(target_host); 3211aef9ec39SRoland Dreier 32120c0450dbSRamachandra K target->io_class = SRP_REV16A_IB_IO_CLASS; 3213aef9ec39SRoland Dreier target->scsi_host = target_host; 3214aef9ec39SRoland Dreier target->srp_host = host; 3215e6bf5f48SJason Gunthorpe target->lkey = host->srp_dev->pd->local_dma_lkey; 321603f6fb93SBart Van Assche target->global_mr = host->srp_dev->global_mr; 321749248644SDavid Dillow target->cmd_sg_cnt = cmd_sg_entries; 3218c07d424dSDavid Dillow target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 3219c07d424dSDavid Dillow target->allow_ext_sg = allow_ext_sg; 32207bb312e4SVu Pham target->tl_retry_count = 7; 32214d73f95fSBart Van Assche target->queue_size = SRP_DEFAULT_QUEUE_SIZE; 3222aef9ec39SRoland Dreier 322334aa654eSBart Van Assche /* 322434aa654eSBart Van Assche * Avoid that the SCSI host can be removed by srp_remove_target() 322534aa654eSBart Van Assche * before this function returns. 322634aa654eSBart Van Assche */ 322734aa654eSBart Van Assche scsi_host_get(target->scsi_host); 322834aa654eSBart Van Assche 32292d7091bcSBart Van Assche mutex_lock(&host->add_target_mutex); 32302d7091bcSBart Van Assche 3231aef9ec39SRoland Dreier ret = srp_parse_options(buf, target); 3232aef9ec39SRoland Dreier if (ret) 3233fb49c8bbSBart Van Assche goto out; 3234aef9ec39SRoland Dreier 32354d73f95fSBart Van Assche target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; 32364d73f95fSBart Van Assche 323796fc248aSBart Van Assche if (!srp_conn_unique(target->srp_host, target)) { 323896fc248aSBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 323996fc248aSBart Van Assche PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", 324096fc248aSBart Van Assche be64_to_cpu(target->id_ext), 324196fc248aSBart Van Assche be64_to_cpu(target->ioc_guid), 324296fc248aSBart Van Assche be64_to_cpu(target->initiator_ext)); 324396fc248aSBart Van Assche ret = -EEXIST; 3244fb49c8bbSBart Van Assche goto out; 324596fc248aSBart Van Assche } 324696fc248aSBart Van Assche 32475cfb1782SBart Van Assche if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && 3248c07d424dSDavid Dillow target->cmd_sg_cnt < target->sg_tablesize) { 32495cfb1782SBart Van Assche pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); 3250c07d424dSDavid Dillow target->sg_tablesize = target->cmd_sg_cnt; 3251c07d424dSDavid Dillow } 3252c07d424dSDavid Dillow 3253c07d424dSDavid Dillow target_host->sg_tablesize = target->sg_tablesize; 3254c07d424dSDavid Dillow target->indirect_size = target->sg_tablesize * 3255c07d424dSDavid Dillow sizeof (struct srp_direct_buf); 325649248644SDavid Dillow target->max_iu_len = sizeof (struct srp_cmd) + 325749248644SDavid Dillow sizeof (struct srp_indirect_buf) + 325849248644SDavid Dillow target->cmd_sg_cnt * sizeof (struct srp_direct_buf); 325949248644SDavid Dillow 3260c1120f89SBart Van Assche INIT_WORK(&target->tl_err_work, srp_tl_err_work); 3261ef6c49d8SBart Van Assche INIT_WORK(&target->remove_work, srp_remove_work); 32628f26c9ffSDavid Dillow spin_lock_init(&target->lock); 326355ee3ab2SMatan Barak ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL); 32642088ca66SSagi Grimberg if (ret) 3265fb49c8bbSBart Van Assche goto out; 3266d92c0da7SBart Van Assche 3267d92c0da7SBart Van Assche ret = -ENOMEM; 3268d92c0da7SBart Van Assche target->ch_count = max_t(unsigned, num_online_nodes(), 3269d92c0da7SBart Van Assche min(ch_count ? : 3270d92c0da7SBart Van Assche min(4 * num_online_nodes(), 3271d92c0da7SBart Van Assche ibdev->num_comp_vectors), 3272d92c0da7SBart Van Assche num_online_cpus())); 3273d92c0da7SBart Van Assche target->ch = kcalloc(target->ch_count, sizeof(*target->ch), 3274d92c0da7SBart Van Assche GFP_KERNEL); 3275d92c0da7SBart Van Assche if (!target->ch) 3276fb49c8bbSBart Van Assche goto out; 3277d92c0da7SBart Van Assche 3278d92c0da7SBart Van Assche node_idx = 0; 3279d92c0da7SBart Van Assche for_each_online_node(node) { 3280d92c0da7SBart Van Assche const int ch_start = (node_idx * target->ch_count / 3281d92c0da7SBart Van Assche num_online_nodes()); 3282d92c0da7SBart Van Assche const int ch_end = ((node_idx + 1) * target->ch_count / 3283d92c0da7SBart Van Assche num_online_nodes()); 3284d92c0da7SBart Van Assche const int cv_start = (node_idx * ibdev->num_comp_vectors / 3285d92c0da7SBart Van Assche num_online_nodes() + target->comp_vector) 3286d92c0da7SBart Van Assche % ibdev->num_comp_vectors; 3287d92c0da7SBart Van Assche const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors / 3288d92c0da7SBart Van Assche num_online_nodes() + target->comp_vector) 3289d92c0da7SBart Van Assche % ibdev->num_comp_vectors; 3290d92c0da7SBart Van Assche int cpu_idx = 0; 3291d92c0da7SBart Van Assche 3292d92c0da7SBart Van Assche for_each_online_cpu(cpu) { 3293d92c0da7SBart Van Assche if (cpu_to_node(cpu) != node) 3294d92c0da7SBart Van Assche continue; 3295d92c0da7SBart Van Assche if (ch_start + cpu_idx >= ch_end) 3296d92c0da7SBart Van Assche continue; 3297d92c0da7SBart Van Assche ch = &target->ch[ch_start + cpu_idx]; 3298d92c0da7SBart Van Assche ch->target = target; 3299d92c0da7SBart Van Assche ch->comp_vector = cv_start == cv_end ? cv_start : 3300d92c0da7SBart Van Assche cv_start + cpu_idx % (cv_end - cv_start); 3301d92c0da7SBart Van Assche spin_lock_init(&ch->lock); 3302d92c0da7SBart Van Assche INIT_LIST_HEAD(&ch->free_tx); 3303d92c0da7SBart Van Assche ret = srp_new_cm_id(ch); 3304d92c0da7SBart Van Assche if (ret) 3305d92c0da7SBart Van Assche goto err_disconnect; 3306aef9ec39SRoland Dreier 3307509c07bcSBart Van Assche ret = srp_create_ch_ib(ch); 3308aef9ec39SRoland Dreier if (ret) 3309d92c0da7SBart Van Assche goto err_disconnect; 3310aef9ec39SRoland Dreier 3311d92c0da7SBart Van Assche ret = srp_alloc_req_data(ch); 33129fe4bcf4SDavid Dillow if (ret) 3313d92c0da7SBart Van Assche goto err_disconnect; 3314aef9ec39SRoland Dreier 3315d92c0da7SBart Van Assche ret = srp_connect_ch(ch, multich); 3316aef9ec39SRoland Dreier if (ret) { 33177aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 3318d92c0da7SBart Van Assche PFX "Connection %d/%d failed\n", 3319d92c0da7SBart Van Assche ch_start + cpu_idx, 3320d92c0da7SBart Van Assche target->ch_count); 3321d92c0da7SBart Van Assche if (node_idx == 0 && cpu_idx == 0) { 3322d92c0da7SBart Van Assche goto err_disconnect; 3323d92c0da7SBart Van Assche } else { 3324d92c0da7SBart Van Assche srp_free_ch_ib(target, ch); 3325d92c0da7SBart Van Assche srp_free_req_data(target, ch); 3326d92c0da7SBart Van Assche target->ch_count = ch - target->ch; 3327c257ea6fSBart Van Assche goto connected; 3328aef9ec39SRoland Dreier } 3329d92c0da7SBart Van Assche } 3330d92c0da7SBart Van Assche 3331d92c0da7SBart Van Assche multich = true; 3332d92c0da7SBart Van Assche cpu_idx++; 3333d92c0da7SBart Van Assche } 3334d92c0da7SBart Van Assche node_idx++; 3335d92c0da7SBart Van Assche } 3336d92c0da7SBart Van Assche 3337c257ea6fSBart Van Assche connected: 3338d92c0da7SBart Van Assche target->scsi_host->nr_hw_queues = target->ch_count; 3339aef9ec39SRoland Dreier 3340aef9ec39SRoland Dreier ret = srp_add_target(host, target); 3341aef9ec39SRoland Dreier if (ret) 3342aef9ec39SRoland Dreier goto err_disconnect; 3343aef9ec39SRoland Dreier 334434aa654eSBart Van Assche if (target->state != SRP_TARGET_REMOVED) { 3345e7ffde01SBart Van Assche shost_printk(KERN_DEBUG, target->scsi_host, PFX 3346e7ffde01SBart Van Assche "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", 3347e7ffde01SBart Van Assche be64_to_cpu(target->id_ext), 3348e7ffde01SBart Van Assche be64_to_cpu(target->ioc_guid), 3349747fe000SBart Van Assche be16_to_cpu(target->pkey), 3350e7ffde01SBart Van Assche be64_to_cpu(target->service_id), 3351747fe000SBart Van Assche target->sgid.raw, target->orig_dgid.raw); 335234aa654eSBart Van Assche } 3353e7ffde01SBart Van Assche 33542d7091bcSBart Van Assche ret = count; 33552d7091bcSBart Van Assche 33562d7091bcSBart Van Assche out: 33572d7091bcSBart Van Assche mutex_unlock(&host->add_target_mutex); 335834aa654eSBart Van Assche 335934aa654eSBart Van Assche scsi_host_put(target->scsi_host); 3360bc44bd1dSBart Van Assche if (ret < 0) 3361bc44bd1dSBart Van Assche scsi_host_put(target->scsi_host); 336234aa654eSBart Van Assche 33632d7091bcSBart Van Assche return ret; 3364aef9ec39SRoland Dreier 3365aef9ec39SRoland Dreier err_disconnect: 3366aef9ec39SRoland Dreier srp_disconnect_target(target); 3367aef9ec39SRoland Dreier 3368d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 3369d92c0da7SBart Van Assche ch = &target->ch[i]; 3370509c07bcSBart Van Assche srp_free_ch_ib(target, ch); 3371509c07bcSBart Van Assche srp_free_req_data(target, ch); 3372d92c0da7SBart Van Assche } 3373d92c0da7SBart Van Assche 3374d92c0da7SBart Van Assche kfree(target->ch); 33752d7091bcSBart Van Assche goto out; 3376aef9ec39SRoland Dreier } 3377aef9ec39SRoland Dreier 3378ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 3379aef9ec39SRoland Dreier 3380ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 3381ee959b00STony Jones char *buf) 3382aef9ec39SRoland Dreier { 3383ee959b00STony Jones struct srp_host *host = container_of(dev, struct srp_host, dev); 3384aef9ec39SRoland Dreier 338505321937SGreg Kroah-Hartman return sprintf(buf, "%s\n", host->srp_dev->dev->name); 3386aef9ec39SRoland Dreier } 3387aef9ec39SRoland Dreier 3388ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 3389aef9ec39SRoland Dreier 3390ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr, 3391ee959b00STony Jones char *buf) 3392aef9ec39SRoland Dreier { 3393ee959b00STony Jones struct srp_host *host = container_of(dev, struct srp_host, dev); 3394aef9ec39SRoland Dreier 3395aef9ec39SRoland Dreier return sprintf(buf, "%d\n", host->port); 3396aef9ec39SRoland Dreier } 3397aef9ec39SRoland Dreier 3398ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 3399aef9ec39SRoland Dreier 3400f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port) 3401aef9ec39SRoland Dreier { 3402aef9ec39SRoland Dreier struct srp_host *host; 3403aef9ec39SRoland Dreier 3404aef9ec39SRoland Dreier host = kzalloc(sizeof *host, GFP_KERNEL); 3405aef9ec39SRoland Dreier if (!host) 3406aef9ec39SRoland Dreier return NULL; 3407aef9ec39SRoland Dreier 3408aef9ec39SRoland Dreier INIT_LIST_HEAD(&host->target_list); 3409b3589fd4SMatthew Wilcox spin_lock_init(&host->target_lock); 3410aef9ec39SRoland Dreier init_completion(&host->released); 34112d7091bcSBart Van Assche mutex_init(&host->add_target_mutex); 341205321937SGreg Kroah-Hartman host->srp_dev = device; 3413aef9ec39SRoland Dreier host->port = port; 3414aef9ec39SRoland Dreier 3415ee959b00STony Jones host->dev.class = &srp_class; 3416ee959b00STony Jones host->dev.parent = device->dev->dma_device; 3417d927e38cSKay Sievers dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); 3418aef9ec39SRoland Dreier 3419ee959b00STony Jones if (device_register(&host->dev)) 3420f5358a17SRoland Dreier goto free_host; 3421ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_add_target)) 3422aef9ec39SRoland Dreier goto err_class; 3423ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_ibdev)) 3424aef9ec39SRoland Dreier goto err_class; 3425ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_port)) 3426aef9ec39SRoland Dreier goto err_class; 3427aef9ec39SRoland Dreier 3428aef9ec39SRoland Dreier return host; 3429aef9ec39SRoland Dreier 3430aef9ec39SRoland Dreier err_class: 3431ee959b00STony Jones device_unregister(&host->dev); 3432aef9ec39SRoland Dreier 3433f5358a17SRoland Dreier free_host: 3434aef9ec39SRoland Dreier kfree(host); 3435aef9ec39SRoland Dreier 3436aef9ec39SRoland Dreier return NULL; 3437aef9ec39SRoland Dreier } 3438aef9ec39SRoland Dreier 3439aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device) 3440aef9ec39SRoland Dreier { 3441f5358a17SRoland Dreier struct srp_device *srp_dev; 3442f5358a17SRoland Dreier struct ib_device_attr *dev_attr; 3443aef9ec39SRoland Dreier struct srp_host *host; 34444139032bSHal Rosenstock int mr_page_shift, p; 344552ede08fSBart Van Assche u64 max_pages_per_mr; 3446aef9ec39SRoland Dreier 3447f5358a17SRoland Dreier dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 3448f5358a17SRoland Dreier if (!dev_attr) 3449cf311cd4SSean Hefty return; 3450aef9ec39SRoland Dreier 3451f5358a17SRoland Dreier if (ib_query_device(device, dev_attr)) { 3452e0bda7d8SBart Van Assche pr_warn("Query device failed for %s\n", device->name); 3453f5358a17SRoland Dreier goto free_attr; 3454f5358a17SRoland Dreier } 3455f5358a17SRoland Dreier 3456f5358a17SRoland Dreier srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); 3457f5358a17SRoland Dreier if (!srp_dev) 3458f5358a17SRoland Dreier goto free_attr; 3459f5358a17SRoland Dreier 3460d1b4289eSBart Van Assche srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && 3461d1b4289eSBart Van Assche device->map_phys_fmr && device->unmap_fmr); 34625cfb1782SBart Van Assche srp_dev->has_fr = (dev_attr->device_cap_flags & 34635cfb1782SBart Van Assche IB_DEVICE_MEM_MGT_EXTENSIONS); 34645cfb1782SBart Van Assche if (!srp_dev->has_fmr && !srp_dev->has_fr) 34655cfb1782SBart Van Assche dev_warn(&device->dev, "neither FMR nor FR is supported\n"); 34665cfb1782SBart Van Assche 34675cfb1782SBart Van Assche srp_dev->use_fast_reg = (srp_dev->has_fr && 34685cfb1782SBart Van Assche (!srp_dev->has_fmr || prefer_fr)); 3469002f1567SBart Van Assche srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr; 3470d1b4289eSBart Van Assche 3471f5358a17SRoland Dreier /* 3472f5358a17SRoland Dreier * Use the smallest page size supported by the HCA, down to a 34738f26c9ffSDavid Dillow * minimum of 4096 bytes. We're unlikely to build large sglists 34748f26c9ffSDavid Dillow * out of smaller entries. 3475f5358a17SRoland Dreier */ 347652ede08fSBart Van Assche mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1); 347752ede08fSBart Van Assche srp_dev->mr_page_size = 1 << mr_page_shift; 347852ede08fSBart Van Assche srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); 347952ede08fSBart Van Assche max_pages_per_mr = dev_attr->max_mr_size; 348052ede08fSBart Van Assche do_div(max_pages_per_mr, srp_dev->mr_page_size); 348152ede08fSBart Van Assche srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, 348252ede08fSBart Van Assche max_pages_per_mr); 34835cfb1782SBart Van Assche if (srp_dev->use_fast_reg) { 34845cfb1782SBart Van Assche srp_dev->max_pages_per_mr = 34855cfb1782SBart Van Assche min_t(u32, srp_dev->max_pages_per_mr, 34865cfb1782SBart Van Assche dev_attr->max_fast_reg_page_list_len); 34875cfb1782SBart Van Assche } 348852ede08fSBart Van Assche srp_dev->mr_max_size = srp_dev->mr_page_size * 348952ede08fSBart Van Assche srp_dev->max_pages_per_mr; 34905cfb1782SBart Van Assche pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", 349152ede08fSBart Van Assche device->name, mr_page_shift, dev_attr->max_mr_size, 34925cfb1782SBart Van Assche dev_attr->max_fast_reg_page_list_len, 349352ede08fSBart Van Assche srp_dev->max_pages_per_mr, srp_dev->mr_max_size); 3494f5358a17SRoland Dreier 3495f5358a17SRoland Dreier INIT_LIST_HEAD(&srp_dev->dev_list); 3496f5358a17SRoland Dreier 3497f5358a17SRoland Dreier srp_dev->dev = device; 3498f5358a17SRoland Dreier srp_dev->pd = ib_alloc_pd(device); 3499f5358a17SRoland Dreier if (IS_ERR(srp_dev->pd)) 3500f5358a17SRoland Dreier goto free_dev; 3501f5358a17SRoland Dreier 350203f6fb93SBart Van Assche if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) { 350303f6fb93SBart Van Assche srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd, 3504f5358a17SRoland Dreier IB_ACCESS_LOCAL_WRITE | 3505f5358a17SRoland Dreier IB_ACCESS_REMOTE_READ | 3506f5358a17SRoland Dreier IB_ACCESS_REMOTE_WRITE); 350703f6fb93SBart Van Assche if (IS_ERR(srp_dev->global_mr)) 3508f5358a17SRoland Dreier goto err_pd; 350903f6fb93SBart Van Assche } else { 351003f6fb93SBart Van Assche srp_dev->global_mr = NULL; 351103f6fb93SBart Van Assche } 3512f5358a17SRoland Dreier 35134139032bSHal Rosenstock for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 3514f5358a17SRoland Dreier host = srp_add_port(srp_dev, p); 3515aef9ec39SRoland Dreier if (host) 3516f5358a17SRoland Dreier list_add_tail(&host->list, &srp_dev->dev_list); 3517aef9ec39SRoland Dreier } 3518aef9ec39SRoland Dreier 3519f5358a17SRoland Dreier ib_set_client_data(device, &srp_client, srp_dev); 3520f5358a17SRoland Dreier 3521f5358a17SRoland Dreier goto free_attr; 3522f5358a17SRoland Dreier 3523f5358a17SRoland Dreier err_pd: 3524f5358a17SRoland Dreier ib_dealloc_pd(srp_dev->pd); 3525f5358a17SRoland Dreier 3526f5358a17SRoland Dreier free_dev: 3527f5358a17SRoland Dreier kfree(srp_dev); 3528f5358a17SRoland Dreier 3529f5358a17SRoland Dreier free_attr: 3530f5358a17SRoland Dreier kfree(dev_attr); 3531aef9ec39SRoland Dreier } 3532aef9ec39SRoland Dreier 35337c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data) 3534aef9ec39SRoland Dreier { 3535f5358a17SRoland Dreier struct srp_device *srp_dev; 3536aef9ec39SRoland Dreier struct srp_host *host, *tmp_host; 3537ef6c49d8SBart Van Assche struct srp_target_port *target; 3538aef9ec39SRoland Dreier 35397c1eb45aSHaggai Eran srp_dev = client_data; 35401fe0cb84SDotan Barak if (!srp_dev) 35411fe0cb84SDotan Barak return; 3542aef9ec39SRoland Dreier 3543f5358a17SRoland Dreier list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 3544ee959b00STony Jones device_unregister(&host->dev); 3545aef9ec39SRoland Dreier /* 3546aef9ec39SRoland Dreier * Wait for the sysfs entry to go away, so that no new 3547aef9ec39SRoland Dreier * target ports can be created. 3548aef9ec39SRoland Dreier */ 3549aef9ec39SRoland Dreier wait_for_completion(&host->released); 3550aef9ec39SRoland Dreier 3551aef9ec39SRoland Dreier /* 3552ef6c49d8SBart Van Assche * Remove all target ports. 3553aef9ec39SRoland Dreier */ 3554b3589fd4SMatthew Wilcox spin_lock(&host->target_lock); 3555ef6c49d8SBart Van Assche list_for_each_entry(target, &host->target_list, list) 3556ef6c49d8SBart Van Assche srp_queue_remove_work(target); 3557b3589fd4SMatthew Wilcox spin_unlock(&host->target_lock); 3558aef9ec39SRoland Dreier 3559aef9ec39SRoland Dreier /* 3560bcc05910SBart Van Assche * Wait for tl_err and target port removal tasks. 3561aef9ec39SRoland Dreier */ 3562ef6c49d8SBart Van Assche flush_workqueue(system_long_wq); 3563bcc05910SBart Van Assche flush_workqueue(srp_remove_wq); 3564aef9ec39SRoland Dreier 3565aef9ec39SRoland Dreier kfree(host); 3566aef9ec39SRoland Dreier } 3567aef9ec39SRoland Dreier 356803f6fb93SBart Van Assche if (srp_dev->global_mr) 356903f6fb93SBart Van Assche ib_dereg_mr(srp_dev->global_mr); 3570f5358a17SRoland Dreier ib_dealloc_pd(srp_dev->pd); 3571f5358a17SRoland Dreier 3572f5358a17SRoland Dreier kfree(srp_dev); 3573aef9ec39SRoland Dreier } 3574aef9ec39SRoland Dreier 35753236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = { 3576ed9b2264SBart Van Assche .has_rport_state = true, 3577ed9b2264SBart Van Assche .reset_timer_if_blocked = true, 3578a95cadb9SBart Van Assche .reconnect_delay = &srp_reconnect_delay, 3579ed9b2264SBart Van Assche .fast_io_fail_tmo = &srp_fast_io_fail_tmo, 3580ed9b2264SBart Van Assche .dev_loss_tmo = &srp_dev_loss_tmo, 3581ed9b2264SBart Van Assche .reconnect = srp_rport_reconnect, 3582dc1bdbd9SBart Van Assche .rport_delete = srp_rport_delete, 3583ed9b2264SBart Van Assche .terminate_rport_io = srp_terminate_io, 35843236822bSFUJITA Tomonori }; 35853236822bSFUJITA Tomonori 3586aef9ec39SRoland Dreier static int __init srp_init_module(void) 3587aef9ec39SRoland Dreier { 3588aef9ec39SRoland Dreier int ret; 3589aef9ec39SRoland Dreier 3590dcb4cb85SBart Van Assche BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); 3591dd5e6e38SBart Van Assche 359249248644SDavid Dillow if (srp_sg_tablesize) { 3593e0bda7d8SBart Van Assche pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); 359449248644SDavid Dillow if (!cmd_sg_entries) 359549248644SDavid Dillow cmd_sg_entries = srp_sg_tablesize; 359649248644SDavid Dillow } 359749248644SDavid Dillow 359849248644SDavid Dillow if (!cmd_sg_entries) 359949248644SDavid Dillow cmd_sg_entries = SRP_DEF_SG_TABLESIZE; 360049248644SDavid Dillow 360149248644SDavid Dillow if (cmd_sg_entries > 255) { 3602e0bda7d8SBart Van Assche pr_warn("Clamping cmd_sg_entries to 255\n"); 360349248644SDavid Dillow cmd_sg_entries = 255; 36041e89a194SDavid Dillow } 36051e89a194SDavid Dillow 3606c07d424dSDavid Dillow if (!indirect_sg_entries) 3607c07d424dSDavid Dillow indirect_sg_entries = cmd_sg_entries; 3608c07d424dSDavid Dillow else if (indirect_sg_entries < cmd_sg_entries) { 3609e0bda7d8SBart Van Assche pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", 3610e0bda7d8SBart Van Assche cmd_sg_entries); 3611c07d424dSDavid Dillow indirect_sg_entries = cmd_sg_entries; 3612c07d424dSDavid Dillow } 3613c07d424dSDavid Dillow 3614bcc05910SBart Van Assche srp_remove_wq = create_workqueue("srp_remove"); 3615da05be29SWei Yongjun if (!srp_remove_wq) { 3616da05be29SWei Yongjun ret = -ENOMEM; 3617bcc05910SBart Van Assche goto out; 3618bcc05910SBart Van Assche } 3619bcc05910SBart Van Assche 3620bcc05910SBart Van Assche ret = -ENOMEM; 36213236822bSFUJITA Tomonori ib_srp_transport_template = 36223236822bSFUJITA Tomonori srp_attach_transport(&ib_srp_transport_functions); 36233236822bSFUJITA Tomonori if (!ib_srp_transport_template) 3624bcc05910SBart Van Assche goto destroy_wq; 36253236822bSFUJITA Tomonori 3626aef9ec39SRoland Dreier ret = class_register(&srp_class); 3627aef9ec39SRoland Dreier if (ret) { 3628e0bda7d8SBart Van Assche pr_err("couldn't register class infiniband_srp\n"); 3629bcc05910SBart Van Assche goto release_tr; 3630aef9ec39SRoland Dreier } 3631aef9ec39SRoland Dreier 3632c1a0b23bSMichael S. Tsirkin ib_sa_register_client(&srp_sa_client); 3633c1a0b23bSMichael S. Tsirkin 3634aef9ec39SRoland Dreier ret = ib_register_client(&srp_client); 3635aef9ec39SRoland Dreier if (ret) { 3636e0bda7d8SBart Van Assche pr_err("couldn't register IB client\n"); 3637bcc05910SBart Van Assche goto unreg_sa; 3638aef9ec39SRoland Dreier } 3639aef9ec39SRoland Dreier 3640bcc05910SBart Van Assche out: 3641bcc05910SBart Van Assche return ret; 3642bcc05910SBart Van Assche 3643bcc05910SBart Van Assche unreg_sa: 3644bcc05910SBart Van Assche ib_sa_unregister_client(&srp_sa_client); 3645bcc05910SBart Van Assche class_unregister(&srp_class); 3646bcc05910SBart Van Assche 3647bcc05910SBart Van Assche release_tr: 3648bcc05910SBart Van Assche srp_release_transport(ib_srp_transport_template); 3649bcc05910SBart Van Assche 3650bcc05910SBart Van Assche destroy_wq: 3651bcc05910SBart Van Assche destroy_workqueue(srp_remove_wq); 3652bcc05910SBart Van Assche goto out; 3653aef9ec39SRoland Dreier } 3654aef9ec39SRoland Dreier 3655aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void) 3656aef9ec39SRoland Dreier { 3657aef9ec39SRoland Dreier ib_unregister_client(&srp_client); 3658c1a0b23bSMichael S. Tsirkin ib_sa_unregister_client(&srp_sa_client); 3659aef9ec39SRoland Dreier class_unregister(&srp_class); 36603236822bSFUJITA Tomonori srp_release_transport(ib_srp_transport_template); 3661bcc05910SBart Van Assche destroy_workqueue(srp_remove_wq); 3662aef9ec39SRoland Dreier } 3663aef9ec39SRoland Dreier 3664aef9ec39SRoland Dreier module_init(srp_init_module); 3665aef9ec39SRoland Dreier module_exit(srp_cleanup_module); 3666