1aef9ec39SRoland Dreier /* 2aef9ec39SRoland Dreier * Copyright (c) 2005 Cisco Systems. All rights reserved. 3aef9ec39SRoland Dreier * 4aef9ec39SRoland Dreier * This software is available to you under a choice of one of two 5aef9ec39SRoland Dreier * licenses. You may choose to be licensed under the terms of the GNU 6aef9ec39SRoland Dreier * General Public License (GPL) Version 2, available from the file 7aef9ec39SRoland Dreier * COPYING in the main directory of this source tree, or the 8aef9ec39SRoland Dreier * OpenIB.org BSD license below: 9aef9ec39SRoland Dreier * 10aef9ec39SRoland Dreier * Redistribution and use in source and binary forms, with or 11aef9ec39SRoland Dreier * without modification, are permitted provided that the following 12aef9ec39SRoland Dreier * conditions are met: 13aef9ec39SRoland Dreier * 14aef9ec39SRoland Dreier * - Redistributions of source code must retain the above 15aef9ec39SRoland Dreier * copyright notice, this list of conditions and the following 16aef9ec39SRoland Dreier * disclaimer. 17aef9ec39SRoland Dreier * 18aef9ec39SRoland Dreier * - Redistributions in binary form must reproduce the above 19aef9ec39SRoland Dreier * copyright notice, this list of conditions and the following 20aef9ec39SRoland Dreier * disclaimer in the documentation and/or other materials 21aef9ec39SRoland Dreier * provided with the distribution. 22aef9ec39SRoland Dreier * 23aef9ec39SRoland Dreier * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24aef9ec39SRoland Dreier * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25aef9ec39SRoland Dreier * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26aef9ec39SRoland Dreier * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27aef9ec39SRoland Dreier * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28aef9ec39SRoland Dreier * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29aef9ec39SRoland Dreier * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30aef9ec39SRoland Dreier * SOFTWARE. 31aef9ec39SRoland Dreier */ 32aef9ec39SRoland Dreier 33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34e0bda7d8SBart Van Assche 35aef9ec39SRoland Dreier #include <linux/module.h> 36aef9ec39SRoland Dreier #include <linux/init.h> 37aef9ec39SRoland Dreier #include <linux/slab.h> 38aef9ec39SRoland Dreier #include <linux/err.h> 39aef9ec39SRoland Dreier #include <linux/string.h> 40aef9ec39SRoland Dreier #include <linux/parser.h> 41aef9ec39SRoland Dreier #include <linux/random.h> 42de25968cSTim Schmielau #include <linux/jiffies.h> 4356b5390cSBart Van Assche #include <rdma/ib_cache.h> 44aef9ec39SRoland Dreier 4560063497SArun Sharma #include <linux/atomic.h> 46aef9ec39SRoland Dreier 47aef9ec39SRoland Dreier #include <scsi/scsi.h> 48aef9ec39SRoland Dreier #include <scsi/scsi_device.h> 49aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h> 5071444b97SJack Wang #include <scsi/scsi_tcq.h> 51aef9ec39SRoland Dreier #include <scsi/srp.h> 523236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h> 53aef9ec39SRoland Dreier 54aef9ec39SRoland Dreier #include "ib_srp.h" 55aef9ec39SRoland Dreier 56aef9ec39SRoland Dreier #define DRV_NAME "ib_srp" 57aef9ec39SRoland Dreier #define PFX DRV_NAME ": " 58713ef24eSBart Van Assche #define DRV_VERSION "2.0" 59713ef24eSBart Van Assche #define DRV_RELDATE "July 26, 2015" 60aef9ec39SRoland Dreier 61aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier"); 6233ab3e5bSBart Van Assche MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); 63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL"); 6433ab3e5bSBart Van Assche MODULE_VERSION(DRV_VERSION); 6533ab3e5bSBart Van Assche MODULE_INFO(release_date, DRV_RELDATE); 66aef9ec39SRoland Dreier 6749248644SDavid Dillow static unsigned int srp_sg_tablesize; 6849248644SDavid Dillow static unsigned int cmd_sg_entries; 69c07d424dSDavid Dillow static unsigned int indirect_sg_entries; 70c07d424dSDavid Dillow static bool allow_ext_sg; 7103f6fb93SBart Van Assche static bool prefer_fr = true; 7203f6fb93SBart Van Assche static bool register_always = true; 73aef9ec39SRoland Dreier static int topspin_workarounds = 1; 74aef9ec39SRoland Dreier 7549248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444); 7649248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); 7749248644SDavid Dillow 7849248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444); 7949248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries, 8049248644SDavid Dillow "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); 8149248644SDavid Dillow 82c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444); 83c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries, 84c07d424dSDavid Dillow "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); 85c07d424dSDavid Dillow 86c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444); 87c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg, 88c07d424dSDavid Dillow "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); 89c07d424dSDavid Dillow 90aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444); 91aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds, 92aef9ec39SRoland Dreier "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 93aef9ec39SRoland Dreier 945cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444); 955cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr, 965cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported"); 975cfb1782SBart Van Assche 98b1b8854dSBart Van Assche module_param(register_always, bool, 0444); 99b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always, 100b1b8854dSBart Van Assche "Use memory registration even for contiguous memory regions"); 101b1b8854dSBart Van Assche 1029c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops; 103ed9b2264SBart Van Assche 104a95cadb9SBart Van Assche static int srp_reconnect_delay = 10; 105a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, 106a95cadb9SBart Van Assche S_IRUGO | S_IWUSR); 107a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); 108a95cadb9SBart Van Assche 109ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15; 110ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo, 111ed9b2264SBart Van Assche S_IRUGO | S_IWUSR); 112ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo, 113ed9b2264SBart Van Assche "Number of seconds between the observation of a transport" 114ed9b2264SBart Van Assche " layer error and failing all I/O. \"off\" means that this" 115ed9b2264SBart Van Assche " functionality is disabled."); 116ed9b2264SBart Van Assche 117a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600; 118ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, 119ed9b2264SBart Van Assche S_IRUGO | S_IWUSR); 120ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo, 121ed9b2264SBart Van Assche "Maximum number of seconds that the SRP transport should" 122ed9b2264SBart Van Assche " insulate transport layer errors. After this time has been" 123ed9b2264SBart Van Assche " exceeded the SCSI host is removed. Should be" 124ed9b2264SBart Van Assche " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) 125ed9b2264SBart Van Assche " if fast_io_fail_tmo has not been set. \"off\" means that" 126ed9b2264SBart Van Assche " this functionality is disabled."); 127ed9b2264SBart Van Assche 128d92c0da7SBart Van Assche static unsigned ch_count; 129d92c0da7SBart Van Assche module_param(ch_count, uint, 0444); 130d92c0da7SBart Van Assche MODULE_PARM_DESC(ch_count, 131d92c0da7SBart Van Assche "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); 132d92c0da7SBart Van Assche 133aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device); 1347c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data); 135509c07bcSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr); 136509c07bcSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *ch_ptr); 137aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 138aef9ec39SRoland Dreier 1393236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template; 140bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq; 1413236822bSFUJITA Tomonori 142aef9ec39SRoland Dreier static struct ib_client srp_client = { 143aef9ec39SRoland Dreier .name = "srp", 144aef9ec39SRoland Dreier .add = srp_add_one, 145aef9ec39SRoland Dreier .remove = srp_remove_one 146aef9ec39SRoland Dreier }; 147aef9ec39SRoland Dreier 148c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client; 149c1a0b23bSMichael S. Tsirkin 150ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp) 151ed9b2264SBart Van Assche { 152ed9b2264SBart Van Assche int tmo = *(int *)kp->arg; 153ed9b2264SBart Van Assche 154ed9b2264SBart Van Assche if (tmo >= 0) 155ed9b2264SBart Van Assche return sprintf(buffer, "%d", tmo); 156ed9b2264SBart Van Assche else 157ed9b2264SBart Van Assche return sprintf(buffer, "off"); 158ed9b2264SBart Van Assche } 159ed9b2264SBart Van Assche 160ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp) 161ed9b2264SBart Van Assche { 162ed9b2264SBart Van Assche int tmo, res; 163ed9b2264SBart Van Assche 1643fdf70acSSagi Grimberg res = srp_parse_tmo(&tmo, val); 165ed9b2264SBart Van Assche if (res) 166ed9b2264SBart Van Assche goto out; 1673fdf70acSSagi Grimberg 168a95cadb9SBart Van Assche if (kp->arg == &srp_reconnect_delay) 169a95cadb9SBart Van Assche res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, 170a95cadb9SBart Van Assche srp_dev_loss_tmo); 171a95cadb9SBart Van Assche else if (kp->arg == &srp_fast_io_fail_tmo) 172a95cadb9SBart Van Assche res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo); 173ed9b2264SBart Van Assche else 174a95cadb9SBart Van Assche res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo, 175a95cadb9SBart Van Assche tmo); 176ed9b2264SBart Van Assche if (res) 177ed9b2264SBart Van Assche goto out; 178ed9b2264SBart Van Assche *(int *)kp->arg = tmo; 179ed9b2264SBart Van Assche 180ed9b2264SBart Van Assche out: 181ed9b2264SBart Van Assche return res; 182ed9b2264SBart Van Assche } 183ed9b2264SBart Van Assche 1849c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops = { 185ed9b2264SBart Van Assche .get = srp_tmo_get, 186ed9b2264SBart Van Assche .set = srp_tmo_set, 187ed9b2264SBart Van Assche }; 188ed9b2264SBart Van Assche 189aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 190aef9ec39SRoland Dreier { 191aef9ec39SRoland Dreier return (struct srp_target_port *) host->hostdata; 192aef9ec39SRoland Dreier } 193aef9ec39SRoland Dreier 194aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host) 195aef9ec39SRoland Dreier { 196aef9ec39SRoland Dreier return host_to_target(host)->target_name; 197aef9ec39SRoland Dreier } 198aef9ec39SRoland Dreier 1995d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target) 2005d7cbfd6SRoland Dreier { 2015d7cbfd6SRoland Dreier static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 2023d1ff48dSRaghava Kondapalli static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 2035d7cbfd6SRoland Dreier 2045d7cbfd6SRoland Dreier return topspin_workarounds && 2053d1ff48dSRaghava Kondapalli (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 2063d1ff48dSRaghava Kondapalli !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 2075d7cbfd6SRoland Dreier } 2085d7cbfd6SRoland Dreier 209aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 210aef9ec39SRoland Dreier gfp_t gfp_mask, 211aef9ec39SRoland Dreier enum dma_data_direction direction) 212aef9ec39SRoland Dreier { 213aef9ec39SRoland Dreier struct srp_iu *iu; 214aef9ec39SRoland Dreier 215aef9ec39SRoland Dreier iu = kmalloc(sizeof *iu, gfp_mask); 216aef9ec39SRoland Dreier if (!iu) 217aef9ec39SRoland Dreier goto out; 218aef9ec39SRoland Dreier 219aef9ec39SRoland Dreier iu->buf = kzalloc(size, gfp_mask); 220aef9ec39SRoland Dreier if (!iu->buf) 221aef9ec39SRoland Dreier goto out_free_iu; 222aef9ec39SRoland Dreier 22305321937SGreg Kroah-Hartman iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, 22405321937SGreg Kroah-Hartman direction); 22505321937SGreg Kroah-Hartman if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) 226aef9ec39SRoland Dreier goto out_free_buf; 227aef9ec39SRoland Dreier 228aef9ec39SRoland Dreier iu->size = size; 229aef9ec39SRoland Dreier iu->direction = direction; 230aef9ec39SRoland Dreier 231aef9ec39SRoland Dreier return iu; 232aef9ec39SRoland Dreier 233aef9ec39SRoland Dreier out_free_buf: 234aef9ec39SRoland Dreier kfree(iu->buf); 235aef9ec39SRoland Dreier out_free_iu: 236aef9ec39SRoland Dreier kfree(iu); 237aef9ec39SRoland Dreier out: 238aef9ec39SRoland Dreier return NULL; 239aef9ec39SRoland Dreier } 240aef9ec39SRoland Dreier 241aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 242aef9ec39SRoland Dreier { 243aef9ec39SRoland Dreier if (!iu) 244aef9ec39SRoland Dreier return; 245aef9ec39SRoland Dreier 24605321937SGreg Kroah-Hartman ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, 24705321937SGreg Kroah-Hartman iu->direction); 248aef9ec39SRoland Dreier kfree(iu->buf); 249aef9ec39SRoland Dreier kfree(iu); 250aef9ec39SRoland Dreier } 251aef9ec39SRoland Dreier 252aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context) 253aef9ec39SRoland Dreier { 25457363d98SSagi Grimberg pr_debug("QP event %s (%d)\n", 25557363d98SSagi Grimberg ib_event_msg(event->event), event->event); 256aef9ec39SRoland Dreier } 257aef9ec39SRoland Dreier 258aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target, 259aef9ec39SRoland Dreier struct ib_qp *qp) 260aef9ec39SRoland Dreier { 261aef9ec39SRoland Dreier struct ib_qp_attr *attr; 262aef9ec39SRoland Dreier int ret; 263aef9ec39SRoland Dreier 264aef9ec39SRoland Dreier attr = kmalloc(sizeof *attr, GFP_KERNEL); 265aef9ec39SRoland Dreier if (!attr) 266aef9ec39SRoland Dreier return -ENOMEM; 267aef9ec39SRoland Dreier 26856b5390cSBart Van Assche ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, 269aef9ec39SRoland Dreier target->srp_host->port, 270747fe000SBart Van Assche be16_to_cpu(target->pkey), 271aef9ec39SRoland Dreier &attr->pkey_index); 272aef9ec39SRoland Dreier if (ret) 273aef9ec39SRoland Dreier goto out; 274aef9ec39SRoland Dreier 275aef9ec39SRoland Dreier attr->qp_state = IB_QPS_INIT; 276aef9ec39SRoland Dreier attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 277aef9ec39SRoland Dreier IB_ACCESS_REMOTE_WRITE); 278aef9ec39SRoland Dreier attr->port_num = target->srp_host->port; 279aef9ec39SRoland Dreier 280aef9ec39SRoland Dreier ret = ib_modify_qp(qp, attr, 281aef9ec39SRoland Dreier IB_QP_STATE | 282aef9ec39SRoland Dreier IB_QP_PKEY_INDEX | 283aef9ec39SRoland Dreier IB_QP_ACCESS_FLAGS | 284aef9ec39SRoland Dreier IB_QP_PORT); 285aef9ec39SRoland Dreier 286aef9ec39SRoland Dreier out: 287aef9ec39SRoland Dreier kfree(attr); 288aef9ec39SRoland Dreier return ret; 289aef9ec39SRoland Dreier } 290aef9ec39SRoland Dreier 291509c07bcSBart Van Assche static int srp_new_cm_id(struct srp_rdma_ch *ch) 2929fe4bcf4SDavid Dillow { 293509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2949fe4bcf4SDavid Dillow struct ib_cm_id *new_cm_id; 2959fe4bcf4SDavid Dillow 29605321937SGreg Kroah-Hartman new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 297509c07bcSBart Van Assche srp_cm_handler, ch); 2989fe4bcf4SDavid Dillow if (IS_ERR(new_cm_id)) 2999fe4bcf4SDavid Dillow return PTR_ERR(new_cm_id); 3009fe4bcf4SDavid Dillow 301509c07bcSBart Van Assche if (ch->cm_id) 302509c07bcSBart Van Assche ib_destroy_cm_id(ch->cm_id); 303509c07bcSBart Van Assche ch->cm_id = new_cm_id; 304509c07bcSBart Van Assche ch->path.sgid = target->sgid; 305509c07bcSBart Van Assche ch->path.dgid = target->orig_dgid; 306509c07bcSBart Van Assche ch->path.pkey = target->pkey; 307509c07bcSBart Van Assche ch->path.service_id = target->service_id; 3089fe4bcf4SDavid Dillow 3099fe4bcf4SDavid Dillow return 0; 3109fe4bcf4SDavid Dillow } 3119fe4bcf4SDavid Dillow 312d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target) 313d1b4289eSBart Van Assche { 314d1b4289eSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 315d1b4289eSBart Van Assche struct ib_fmr_pool_param fmr_param; 316d1b4289eSBart Van Assche 317d1b4289eSBart Van Assche memset(&fmr_param, 0, sizeof(fmr_param)); 318d1b4289eSBart Van Assche fmr_param.pool_size = target->scsi_host->can_queue; 319d1b4289eSBart Van Assche fmr_param.dirty_watermark = fmr_param.pool_size / 4; 320d1b4289eSBart Van Assche fmr_param.cache = 1; 32152ede08fSBart Van Assche fmr_param.max_pages_per_fmr = dev->max_pages_per_mr; 32252ede08fSBart Van Assche fmr_param.page_shift = ilog2(dev->mr_page_size); 323d1b4289eSBart Van Assche fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 324d1b4289eSBart Van Assche IB_ACCESS_REMOTE_WRITE | 325d1b4289eSBart Van Assche IB_ACCESS_REMOTE_READ); 326d1b4289eSBart Van Assche 327d1b4289eSBart Van Assche return ib_create_fmr_pool(dev->pd, &fmr_param); 328d1b4289eSBart Van Assche } 329d1b4289eSBart Van Assche 3305cfb1782SBart Van Assche /** 3315cfb1782SBart Van Assche * srp_destroy_fr_pool() - free the resources owned by a pool 3325cfb1782SBart Van Assche * @pool: Fast registration pool to be destroyed. 3335cfb1782SBart Van Assche */ 3345cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool) 3355cfb1782SBart Van Assche { 3365cfb1782SBart Van Assche int i; 3375cfb1782SBart Van Assche struct srp_fr_desc *d; 3385cfb1782SBart Van Assche 3395cfb1782SBart Van Assche if (!pool) 3405cfb1782SBart Van Assche return; 3415cfb1782SBart Van Assche 3425cfb1782SBart Van Assche for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 3435cfb1782SBart Van Assche if (d->mr) 3445cfb1782SBart Van Assche ib_dereg_mr(d->mr); 3455cfb1782SBart Van Assche } 3465cfb1782SBart Van Assche kfree(pool); 3475cfb1782SBart Van Assche } 3485cfb1782SBart Van Assche 3495cfb1782SBart Van Assche /** 3505cfb1782SBart Van Assche * srp_create_fr_pool() - allocate and initialize a pool for fast registration 3515cfb1782SBart Van Assche * @device: IB device to allocate fast registration descriptors for. 3525cfb1782SBart Van Assche * @pd: Protection domain associated with the FR descriptors. 3535cfb1782SBart Van Assche * @pool_size: Number of descriptors to allocate. 3545cfb1782SBart Van Assche * @max_page_list_len: Maximum fast registration work request page list length. 3555cfb1782SBart Van Assche */ 3565cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, 3575cfb1782SBart Van Assche struct ib_pd *pd, int pool_size, 3585cfb1782SBart Van Assche int max_page_list_len) 3595cfb1782SBart Van Assche { 3605cfb1782SBart Van Assche struct srp_fr_pool *pool; 3615cfb1782SBart Van Assche struct srp_fr_desc *d; 3625cfb1782SBart Van Assche struct ib_mr *mr; 3635cfb1782SBart Van Assche int i, ret = -EINVAL; 3645cfb1782SBart Van Assche 3655cfb1782SBart Van Assche if (pool_size <= 0) 3665cfb1782SBart Van Assche goto err; 3675cfb1782SBart Van Assche ret = -ENOMEM; 3685cfb1782SBart Van Assche pool = kzalloc(sizeof(struct srp_fr_pool) + 3695cfb1782SBart Van Assche pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL); 3705cfb1782SBart Van Assche if (!pool) 3715cfb1782SBart Van Assche goto err; 3725cfb1782SBart Van Assche pool->size = pool_size; 3735cfb1782SBart Van Assche pool->max_page_list_len = max_page_list_len; 3745cfb1782SBart Van Assche spin_lock_init(&pool->lock); 3755cfb1782SBart Van Assche INIT_LIST_HEAD(&pool->free_list); 3765cfb1782SBart Van Assche 3775cfb1782SBart Van Assche for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 378563b67c5SSagi Grimberg mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 379563b67c5SSagi Grimberg max_page_list_len); 3805cfb1782SBart Van Assche if (IS_ERR(mr)) { 3815cfb1782SBart Van Assche ret = PTR_ERR(mr); 3825cfb1782SBart Van Assche goto destroy_pool; 3835cfb1782SBart Van Assche } 3845cfb1782SBart Van Assche d->mr = mr; 3855cfb1782SBart Van Assche list_add_tail(&d->entry, &pool->free_list); 3865cfb1782SBart Van Assche } 3875cfb1782SBart Van Assche 3885cfb1782SBart Van Assche out: 3895cfb1782SBart Van Assche return pool; 3905cfb1782SBart Van Assche 3915cfb1782SBart Van Assche destroy_pool: 3925cfb1782SBart Van Assche srp_destroy_fr_pool(pool); 3935cfb1782SBart Van Assche 3945cfb1782SBart Van Assche err: 3955cfb1782SBart Van Assche pool = ERR_PTR(ret); 3965cfb1782SBart Van Assche goto out; 3975cfb1782SBart Van Assche } 3985cfb1782SBart Van Assche 3995cfb1782SBart Van Assche /** 4005cfb1782SBart Van Assche * srp_fr_pool_get() - obtain a descriptor suitable for fast registration 4015cfb1782SBart Van Assche * @pool: Pool to obtain descriptor from. 4025cfb1782SBart Van Assche */ 4035cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool) 4045cfb1782SBart Van Assche { 4055cfb1782SBart Van Assche struct srp_fr_desc *d = NULL; 4065cfb1782SBart Van Assche unsigned long flags; 4075cfb1782SBart Van Assche 4085cfb1782SBart Van Assche spin_lock_irqsave(&pool->lock, flags); 4095cfb1782SBart Van Assche if (!list_empty(&pool->free_list)) { 4105cfb1782SBart Van Assche d = list_first_entry(&pool->free_list, typeof(*d), entry); 4115cfb1782SBart Van Assche list_del(&d->entry); 4125cfb1782SBart Van Assche } 4135cfb1782SBart Van Assche spin_unlock_irqrestore(&pool->lock, flags); 4145cfb1782SBart Van Assche 4155cfb1782SBart Van Assche return d; 4165cfb1782SBart Van Assche } 4175cfb1782SBart Van Assche 4185cfb1782SBart Van Assche /** 4195cfb1782SBart Van Assche * srp_fr_pool_put() - put an FR descriptor back in the free list 4205cfb1782SBart Van Assche * @pool: Pool the descriptor was allocated from. 4215cfb1782SBart Van Assche * @desc: Pointer to an array of fast registration descriptor pointers. 4225cfb1782SBart Van Assche * @n: Number of descriptors to put back. 4235cfb1782SBart Van Assche * 4245cfb1782SBart Van Assche * Note: The caller must already have queued an invalidation request for 4255cfb1782SBart Van Assche * desc->mr->rkey before calling this function. 4265cfb1782SBart Van Assche */ 4275cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, 4285cfb1782SBart Van Assche int n) 4295cfb1782SBart Van Assche { 4305cfb1782SBart Van Assche unsigned long flags; 4315cfb1782SBart Van Assche int i; 4325cfb1782SBart Van Assche 4335cfb1782SBart Van Assche spin_lock_irqsave(&pool->lock, flags); 4345cfb1782SBart Van Assche for (i = 0; i < n; i++) 4355cfb1782SBart Van Assche list_add(&desc[i]->entry, &pool->free_list); 4365cfb1782SBart Van Assche spin_unlock_irqrestore(&pool->lock, flags); 4375cfb1782SBart Van Assche } 4385cfb1782SBart Van Assche 4395cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) 4405cfb1782SBart Van Assche { 4415cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 4425cfb1782SBart Van Assche 4435cfb1782SBart Van Assche return srp_create_fr_pool(dev->dev, dev->pd, 4445cfb1782SBart Van Assche target->scsi_host->can_queue, 4455cfb1782SBart Van Assche dev->max_pages_per_mr); 4465cfb1782SBart Van Assche } 4475cfb1782SBart Van Assche 4487dad6b2eSBart Van Assche /** 4497dad6b2eSBart Van Assche * srp_destroy_qp() - destroy an RDMA queue pair 4507dad6b2eSBart Van Assche * @ch: SRP RDMA channel. 4517dad6b2eSBart Van Assche * 4527dad6b2eSBart Van Assche * Change a queue pair into the error state and wait until all receive 4537dad6b2eSBart Van Assche * completions have been processed before destroying it. This avoids that 4547dad6b2eSBart Van Assche * the receive completion handler can access the queue pair while it is 4557dad6b2eSBart Van Assche * being destroyed. 4567dad6b2eSBart Van Assche */ 4577dad6b2eSBart Van Assche static void srp_destroy_qp(struct srp_rdma_ch *ch) 4587dad6b2eSBart Van Assche { 4597dad6b2eSBart Van Assche static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 4607dad6b2eSBart Van Assche static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID }; 4617dad6b2eSBart Van Assche struct ib_recv_wr *bad_wr; 4627dad6b2eSBart Van Assche int ret; 4637dad6b2eSBart Van Assche 4647dad6b2eSBart Van Assche /* Destroying a QP and reusing ch->done is only safe if not connected */ 465c014c8cdSBart Van Assche WARN_ON_ONCE(ch->connected); 4667dad6b2eSBart Van Assche 4677dad6b2eSBart Van Assche ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); 4687dad6b2eSBart Van Assche WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret); 4697dad6b2eSBart Van Assche if (ret) 4707dad6b2eSBart Van Assche goto out; 4717dad6b2eSBart Van Assche 4727dad6b2eSBart Van Assche init_completion(&ch->done); 4737dad6b2eSBart Van Assche ret = ib_post_recv(ch->qp, &wr, &bad_wr); 4747dad6b2eSBart Van Assche WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret); 4757dad6b2eSBart Van Assche if (ret == 0) 4767dad6b2eSBart Van Assche wait_for_completion(&ch->done); 4777dad6b2eSBart Van Assche 4787dad6b2eSBart Van Assche out: 4797dad6b2eSBart Van Assche ib_destroy_qp(ch->qp); 4807dad6b2eSBart Van Assche } 4817dad6b2eSBart Van Assche 482509c07bcSBart Van Assche static int srp_create_ch_ib(struct srp_rdma_ch *ch) 483aef9ec39SRoland Dreier { 484509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 48562154b2eSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 486aef9ec39SRoland Dreier struct ib_qp_init_attr *init_attr; 48773aa89edSIshai Rabinovitz struct ib_cq *recv_cq, *send_cq; 48873aa89edSIshai Rabinovitz struct ib_qp *qp; 489d1b4289eSBart Van Assche struct ib_fmr_pool *fmr_pool = NULL; 4905cfb1782SBart Van Assche struct srp_fr_pool *fr_pool = NULL; 49109c0c0beSSagi Grimberg const int m = dev->use_fast_reg ? 3 : 1; 4928e37210bSMatan Barak struct ib_cq_init_attr cq_attr = {}; 493aef9ec39SRoland Dreier int ret; 494aef9ec39SRoland Dreier 495aef9ec39SRoland Dreier init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 496aef9ec39SRoland Dreier if (!init_attr) 497aef9ec39SRoland Dreier return -ENOMEM; 498aef9ec39SRoland Dreier 4997dad6b2eSBart Van Assche /* + 1 for SRP_LAST_WR_ID */ 5008e37210bSMatan Barak cq_attr.cqe = target->queue_size + 1; 5018e37210bSMatan Barak cq_attr.comp_vector = ch->comp_vector; 502509c07bcSBart Van Assche recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, 5038e37210bSMatan Barak &cq_attr); 50473aa89edSIshai Rabinovitz if (IS_ERR(recv_cq)) { 50573aa89edSIshai Rabinovitz ret = PTR_ERR(recv_cq); 506da9d2f07SRoland Dreier goto err; 507aef9ec39SRoland Dreier } 508aef9ec39SRoland Dreier 5098e37210bSMatan Barak cq_attr.cqe = m * target->queue_size; 5108e37210bSMatan Barak cq_attr.comp_vector = ch->comp_vector; 511509c07bcSBart Van Assche send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, 5128e37210bSMatan Barak &cq_attr); 51373aa89edSIshai Rabinovitz if (IS_ERR(send_cq)) { 51473aa89edSIshai Rabinovitz ret = PTR_ERR(send_cq); 515da9d2f07SRoland Dreier goto err_recv_cq; 5169c03dc9fSBart Van Assche } 5179c03dc9fSBart Van Assche 51873aa89edSIshai Rabinovitz ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); 519aef9ec39SRoland Dreier 520aef9ec39SRoland Dreier init_attr->event_handler = srp_qp_event; 5215cfb1782SBart Van Assche init_attr->cap.max_send_wr = m * target->queue_size; 5227dad6b2eSBart Van Assche init_attr->cap.max_recv_wr = target->queue_size + 1; 523aef9ec39SRoland Dreier init_attr->cap.max_recv_sge = 1; 524aef9ec39SRoland Dreier init_attr->cap.max_send_sge = 1; 5255cfb1782SBart Van Assche init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 526aef9ec39SRoland Dreier init_attr->qp_type = IB_QPT_RC; 52773aa89edSIshai Rabinovitz init_attr->send_cq = send_cq; 52873aa89edSIshai Rabinovitz init_attr->recv_cq = recv_cq; 529aef9ec39SRoland Dreier 53062154b2eSBart Van Assche qp = ib_create_qp(dev->pd, init_attr); 53173aa89edSIshai Rabinovitz if (IS_ERR(qp)) { 53273aa89edSIshai Rabinovitz ret = PTR_ERR(qp); 533da9d2f07SRoland Dreier goto err_send_cq; 534aef9ec39SRoland Dreier } 535aef9ec39SRoland Dreier 53673aa89edSIshai Rabinovitz ret = srp_init_qp(target, qp); 537da9d2f07SRoland Dreier if (ret) 538da9d2f07SRoland Dreier goto err_qp; 539aef9ec39SRoland Dreier 540002f1567SBart Van Assche if (dev->use_fast_reg) { 5415cfb1782SBart Van Assche fr_pool = srp_alloc_fr_pool(target); 5425cfb1782SBart Van Assche if (IS_ERR(fr_pool)) { 5435cfb1782SBart Van Assche ret = PTR_ERR(fr_pool); 5445cfb1782SBart Van Assche shost_printk(KERN_WARNING, target->scsi_host, PFX 5455cfb1782SBart Van Assche "FR pool allocation failed (%d)\n", ret); 5465cfb1782SBart Van Assche goto err_qp; 5475cfb1782SBart Van Assche } 548002f1567SBart Van Assche } else if (dev->use_fmr) { 549d1b4289eSBart Van Assche fmr_pool = srp_alloc_fmr_pool(target); 550d1b4289eSBart Van Assche if (IS_ERR(fmr_pool)) { 551d1b4289eSBart Van Assche ret = PTR_ERR(fmr_pool); 552d1b4289eSBart Van Assche shost_printk(KERN_WARNING, target->scsi_host, PFX 553d1b4289eSBart Van Assche "FMR pool allocation failed (%d)\n", ret); 554d1b4289eSBart Van Assche goto err_qp; 555d1b4289eSBart Van Assche } 556d1b4289eSBart Van Assche } 557d1b4289eSBart Van Assche 558509c07bcSBart Van Assche if (ch->qp) 5597dad6b2eSBart Van Assche srp_destroy_qp(ch); 560509c07bcSBart Van Assche if (ch->recv_cq) 561509c07bcSBart Van Assche ib_destroy_cq(ch->recv_cq); 562509c07bcSBart Van Assche if (ch->send_cq) 563509c07bcSBart Van Assche ib_destroy_cq(ch->send_cq); 56473aa89edSIshai Rabinovitz 565509c07bcSBart Van Assche ch->qp = qp; 566509c07bcSBart Van Assche ch->recv_cq = recv_cq; 567509c07bcSBart Van Assche ch->send_cq = send_cq; 56873aa89edSIshai Rabinovitz 5697fbc67dfSSagi Grimberg if (dev->use_fast_reg) { 5707fbc67dfSSagi Grimberg if (ch->fr_pool) 5717fbc67dfSSagi Grimberg srp_destroy_fr_pool(ch->fr_pool); 5727fbc67dfSSagi Grimberg ch->fr_pool = fr_pool; 5737fbc67dfSSagi Grimberg } else if (dev->use_fmr) { 5747fbc67dfSSagi Grimberg if (ch->fmr_pool) 5757fbc67dfSSagi Grimberg ib_destroy_fmr_pool(ch->fmr_pool); 5767fbc67dfSSagi Grimberg ch->fmr_pool = fmr_pool; 5777fbc67dfSSagi Grimberg } 5787fbc67dfSSagi Grimberg 579da9d2f07SRoland Dreier kfree(init_attr); 580da9d2f07SRoland Dreier return 0; 581da9d2f07SRoland Dreier 582da9d2f07SRoland Dreier err_qp: 58373aa89edSIshai Rabinovitz ib_destroy_qp(qp); 584da9d2f07SRoland Dreier 585da9d2f07SRoland Dreier err_send_cq: 58673aa89edSIshai Rabinovitz ib_destroy_cq(send_cq); 587da9d2f07SRoland Dreier 588da9d2f07SRoland Dreier err_recv_cq: 58973aa89edSIshai Rabinovitz ib_destroy_cq(recv_cq); 590da9d2f07SRoland Dreier 591da9d2f07SRoland Dreier err: 592aef9ec39SRoland Dreier kfree(init_attr); 593aef9ec39SRoland Dreier return ret; 594aef9ec39SRoland Dreier } 595aef9ec39SRoland Dreier 5964d73f95fSBart Van Assche /* 5974d73f95fSBart Van Assche * Note: this function may be called without srp_alloc_iu_bufs() having been 598509c07bcSBart Van Assche * invoked. Hence the ch->[rt]x_ring checks. 5994d73f95fSBart Van Assche */ 600509c07bcSBart Van Assche static void srp_free_ch_ib(struct srp_target_port *target, 601509c07bcSBart Van Assche struct srp_rdma_ch *ch) 602aef9ec39SRoland Dreier { 6035cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 604aef9ec39SRoland Dreier int i; 605aef9ec39SRoland Dreier 606d92c0da7SBart Van Assche if (!ch->target) 607d92c0da7SBart Van Assche return; 608d92c0da7SBart Van Assche 609509c07bcSBart Van Assche if (ch->cm_id) { 610509c07bcSBart Van Assche ib_destroy_cm_id(ch->cm_id); 611509c07bcSBart Van Assche ch->cm_id = NULL; 612394c595eSBart Van Assche } 613394c595eSBart Van Assche 614d92c0da7SBart Van Assche /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */ 615d92c0da7SBart Van Assche if (!ch->qp) 616d92c0da7SBart Van Assche return; 617d92c0da7SBart Van Assche 6185cfb1782SBart Van Assche if (dev->use_fast_reg) { 619509c07bcSBart Van Assche if (ch->fr_pool) 620509c07bcSBart Van Assche srp_destroy_fr_pool(ch->fr_pool); 621002f1567SBart Van Assche } else if (dev->use_fmr) { 622509c07bcSBart Van Assche if (ch->fmr_pool) 623509c07bcSBart Van Assche ib_destroy_fmr_pool(ch->fmr_pool); 6245cfb1782SBart Van Assche } 6257dad6b2eSBart Van Assche srp_destroy_qp(ch); 626509c07bcSBart Van Assche ib_destroy_cq(ch->send_cq); 627509c07bcSBart Van Assche ib_destroy_cq(ch->recv_cq); 628aef9ec39SRoland Dreier 629d92c0da7SBart Van Assche /* 630d92c0da7SBart Van Assche * Avoid that the SCSI error handler tries to use this channel after 631d92c0da7SBart Van Assche * it has been freed. The SCSI error handler can namely continue 632d92c0da7SBart Van Assche * trying to perform recovery actions after scsi_remove_host() 633d92c0da7SBart Van Assche * returned. 634d92c0da7SBart Van Assche */ 635d92c0da7SBart Van Assche ch->target = NULL; 636d92c0da7SBart Van Assche 637509c07bcSBart Van Assche ch->qp = NULL; 638509c07bcSBart Van Assche ch->send_cq = ch->recv_cq = NULL; 63973aa89edSIshai Rabinovitz 640509c07bcSBart Van Assche if (ch->rx_ring) { 6414d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) 642509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->rx_ring[i]); 643509c07bcSBart Van Assche kfree(ch->rx_ring); 644509c07bcSBart Van Assche ch->rx_ring = NULL; 6454d73f95fSBart Van Assche } 646509c07bcSBart Van Assche if (ch->tx_ring) { 6474d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) 648509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->tx_ring[i]); 649509c07bcSBart Van Assche kfree(ch->tx_ring); 650509c07bcSBart Van Assche ch->tx_ring = NULL; 6514d73f95fSBart Van Assche } 652aef9ec39SRoland Dreier } 653aef9ec39SRoland Dreier 654aef9ec39SRoland Dreier static void srp_path_rec_completion(int status, 655aef9ec39SRoland Dreier struct ib_sa_path_rec *pathrec, 656509c07bcSBart Van Assche void *ch_ptr) 657aef9ec39SRoland Dreier { 658509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 659509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 660aef9ec39SRoland Dreier 661509c07bcSBart Van Assche ch->status = status; 662aef9ec39SRoland Dreier if (status) 6637aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 6647aa54bd7SDavid Dillow PFX "Got failed path rec status %d\n", status); 665aef9ec39SRoland Dreier else 666509c07bcSBart Van Assche ch->path = *pathrec; 667509c07bcSBart Van Assche complete(&ch->done); 668aef9ec39SRoland Dreier } 669aef9ec39SRoland Dreier 670509c07bcSBart Van Assche static int srp_lookup_path(struct srp_rdma_ch *ch) 671aef9ec39SRoland Dreier { 672509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 673a702adceSBart Van Assche int ret; 674a702adceSBart Van Assche 675509c07bcSBart Van Assche ch->path.numb_path = 1; 676aef9ec39SRoland Dreier 677509c07bcSBart Van Assche init_completion(&ch->done); 678aef9ec39SRoland Dreier 679509c07bcSBart Van Assche ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 68005321937SGreg Kroah-Hartman target->srp_host->srp_dev->dev, 681aef9ec39SRoland Dreier target->srp_host->port, 682509c07bcSBart Van Assche &ch->path, 683247e020eSSean Hefty IB_SA_PATH_REC_SERVICE_ID | 684aef9ec39SRoland Dreier IB_SA_PATH_REC_DGID | 685aef9ec39SRoland Dreier IB_SA_PATH_REC_SGID | 686aef9ec39SRoland Dreier IB_SA_PATH_REC_NUMB_PATH | 687aef9ec39SRoland Dreier IB_SA_PATH_REC_PKEY, 688aef9ec39SRoland Dreier SRP_PATH_REC_TIMEOUT_MS, 689aef9ec39SRoland Dreier GFP_KERNEL, 690aef9ec39SRoland Dreier srp_path_rec_completion, 691509c07bcSBart Van Assche ch, &ch->path_query); 692509c07bcSBart Van Assche if (ch->path_query_id < 0) 693509c07bcSBart Van Assche return ch->path_query_id; 694aef9ec39SRoland Dreier 695509c07bcSBart Van Assche ret = wait_for_completion_interruptible(&ch->done); 696a702adceSBart Van Assche if (ret < 0) 697a702adceSBart Van Assche return ret; 698aef9ec39SRoland Dreier 699509c07bcSBart Van Assche if (ch->status < 0) 7007aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 7017aa54bd7SDavid Dillow PFX "Path record query failed\n"); 702aef9ec39SRoland Dreier 703509c07bcSBart Van Assche return ch->status; 704aef9ec39SRoland Dreier } 705aef9ec39SRoland Dreier 706d92c0da7SBart Van Assche static int srp_send_req(struct srp_rdma_ch *ch, bool multich) 707aef9ec39SRoland Dreier { 708509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 709aef9ec39SRoland Dreier struct { 710aef9ec39SRoland Dreier struct ib_cm_req_param param; 711aef9ec39SRoland Dreier struct srp_login_req priv; 712aef9ec39SRoland Dreier } *req = NULL; 713aef9ec39SRoland Dreier int status; 714aef9ec39SRoland Dreier 715aef9ec39SRoland Dreier req = kzalloc(sizeof *req, GFP_KERNEL); 716aef9ec39SRoland Dreier if (!req) 717aef9ec39SRoland Dreier return -ENOMEM; 718aef9ec39SRoland Dreier 719509c07bcSBart Van Assche req->param.primary_path = &ch->path; 720aef9ec39SRoland Dreier req->param.alternate_path = NULL; 721aef9ec39SRoland Dreier req->param.service_id = target->service_id; 722509c07bcSBart Van Assche req->param.qp_num = ch->qp->qp_num; 723509c07bcSBart Van Assche req->param.qp_type = ch->qp->qp_type; 724aef9ec39SRoland Dreier req->param.private_data = &req->priv; 725aef9ec39SRoland Dreier req->param.private_data_len = sizeof req->priv; 726aef9ec39SRoland Dreier req->param.flow_control = 1; 727aef9ec39SRoland Dreier 728aef9ec39SRoland Dreier get_random_bytes(&req->param.starting_psn, 4); 729aef9ec39SRoland Dreier req->param.starting_psn &= 0xffffff; 730aef9ec39SRoland Dreier 731aef9ec39SRoland Dreier /* 732aef9ec39SRoland Dreier * Pick some arbitrary defaults here; we could make these 733aef9ec39SRoland Dreier * module parameters if anyone cared about setting them. 734aef9ec39SRoland Dreier */ 735aef9ec39SRoland Dreier req->param.responder_resources = 4; 736aef9ec39SRoland Dreier req->param.remote_cm_response_timeout = 20; 737aef9ec39SRoland Dreier req->param.local_cm_response_timeout = 20; 7387bb312e4SVu Pham req->param.retry_count = target->tl_retry_count; 739aef9ec39SRoland Dreier req->param.rnr_retry_count = 7; 740aef9ec39SRoland Dreier req->param.max_cm_retries = 15; 741aef9ec39SRoland Dreier 742aef9ec39SRoland Dreier req->priv.opcode = SRP_LOGIN_REQ; 743aef9ec39SRoland Dreier req->priv.tag = 0; 74449248644SDavid Dillow req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); 745aef9ec39SRoland Dreier req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 746aef9ec39SRoland Dreier SRP_BUF_FORMAT_INDIRECT); 747d92c0da7SBart Van Assche req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI : 748d92c0da7SBart Van Assche SRP_MULTICHAN_SINGLE); 7490c0450dbSRamachandra K /* 7500c0450dbSRamachandra K * In the published SRP specification (draft rev. 16a), the 7510c0450dbSRamachandra K * port identifier format is 8 bytes of ID extension followed 7520c0450dbSRamachandra K * by 8 bytes of GUID. Older drafts put the two halves in the 7530c0450dbSRamachandra K * opposite order, so that the GUID comes first. 7540c0450dbSRamachandra K * 7550c0450dbSRamachandra K * Targets conforming to these obsolete drafts can be 7560c0450dbSRamachandra K * recognized by the I/O Class they report. 7570c0450dbSRamachandra K */ 7580c0450dbSRamachandra K if (target->io_class == SRP_REV10_IB_IO_CLASS) { 7590c0450dbSRamachandra K memcpy(req->priv.initiator_port_id, 760747fe000SBart Van Assche &target->sgid.global.interface_id, 8); 7610c0450dbSRamachandra K memcpy(req->priv.initiator_port_id + 8, 76201cb9bcbSIshai Rabinovitz &target->initiator_ext, 8); 7630c0450dbSRamachandra K memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 7640c0450dbSRamachandra K memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); 7650c0450dbSRamachandra K } else { 7660c0450dbSRamachandra K memcpy(req->priv.initiator_port_id, 76701cb9bcbSIshai Rabinovitz &target->initiator_ext, 8); 76801cb9bcbSIshai Rabinovitz memcpy(req->priv.initiator_port_id + 8, 769747fe000SBart Van Assche &target->sgid.global.interface_id, 8); 7700c0450dbSRamachandra K memcpy(req->priv.target_port_id, &target->id_ext, 8); 7710c0450dbSRamachandra K memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 7720c0450dbSRamachandra K } 7730c0450dbSRamachandra K 774aef9ec39SRoland Dreier /* 775aef9ec39SRoland Dreier * Topspin/Cisco SRP targets will reject our login unless we 77601cb9bcbSIshai Rabinovitz * zero out the first 8 bytes of our initiator port ID and set 77701cb9bcbSIshai Rabinovitz * the second 8 bytes to the local node GUID. 778aef9ec39SRoland Dreier */ 7795d7cbfd6SRoland Dreier if (srp_target_is_topspin(target)) { 7807aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 7817aa54bd7SDavid Dillow PFX "Topspin/Cisco initiator port ID workaround " 782aef9ec39SRoland Dreier "activated for target GUID %016llx\n", 78345c37cadSBart Van Assche be64_to_cpu(target->ioc_guid)); 784aef9ec39SRoland Dreier memset(req->priv.initiator_port_id, 0, 8); 78501cb9bcbSIshai Rabinovitz memcpy(req->priv.initiator_port_id + 8, 78605321937SGreg Kroah-Hartman &target->srp_host->srp_dev->dev->node_guid, 8); 787aef9ec39SRoland Dreier } 788aef9ec39SRoland Dreier 789509c07bcSBart Van Assche status = ib_send_cm_req(ch->cm_id, &req->param); 790aef9ec39SRoland Dreier 791aef9ec39SRoland Dreier kfree(req); 792aef9ec39SRoland Dreier 793aef9ec39SRoland Dreier return status; 794aef9ec39SRoland Dreier } 795aef9ec39SRoland Dreier 796ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target) 797ef6c49d8SBart Van Assche { 798ef6c49d8SBart Van Assche bool changed = false; 799ef6c49d8SBart Van Assche 800ef6c49d8SBart Van Assche spin_lock_irq(&target->lock); 801ef6c49d8SBart Van Assche if (target->state != SRP_TARGET_REMOVED) { 802ef6c49d8SBart Van Assche target->state = SRP_TARGET_REMOVED; 803ef6c49d8SBart Van Assche changed = true; 804ef6c49d8SBart Van Assche } 805ef6c49d8SBart Van Assche spin_unlock_irq(&target->lock); 806ef6c49d8SBart Van Assche 807ef6c49d8SBart Van Assche if (changed) 808bcc05910SBart Van Assche queue_work(srp_remove_wq, &target->remove_work); 809ef6c49d8SBart Van Assche 810ef6c49d8SBart Van Assche return changed; 811ef6c49d8SBart Van Assche } 812ef6c49d8SBart Van Assche 813aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target) 814aef9ec39SRoland Dreier { 815d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 816d92c0da7SBart Van Assche int i; 817509c07bcSBart Van Assche 818aef9ec39SRoland Dreier /* XXX should send SRP_I_LOGOUT request */ 819aef9ec39SRoland Dreier 820d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 821d92c0da7SBart Van Assche ch = &target->ch[i]; 822c014c8cdSBart Van Assche ch->connected = false; 823d92c0da7SBart Van Assche if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { 8247aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 8257aa54bd7SDavid Dillow PFX "Sending CM DREQ failed\n"); 826aef9ec39SRoland Dreier } 827294c875aSBart Van Assche } 828294c875aSBart Van Assche } 829aef9ec39SRoland Dreier 830509c07bcSBart Van Assche static void srp_free_req_data(struct srp_target_port *target, 831509c07bcSBart Van Assche struct srp_rdma_ch *ch) 8328f26c9ffSDavid Dillow { 8335cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 8345cfb1782SBart Van Assche struct ib_device *ibdev = dev->dev; 8358f26c9ffSDavid Dillow struct srp_request *req; 8368f26c9ffSDavid Dillow int i; 8378f26c9ffSDavid Dillow 83847513cf4SBart Van Assche if (!ch->req_ring) 8394d73f95fSBart Van Assche return; 8404d73f95fSBart Van Assche 8414d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 842509c07bcSBart Van Assche req = &ch->req_ring[i]; 8439a21be53SSagi Grimberg if (dev->use_fast_reg) { 8445cfb1782SBart Van Assche kfree(req->fr_list); 8459a21be53SSagi Grimberg } else { 8468f26c9ffSDavid Dillow kfree(req->fmr_list); 8478f26c9ffSDavid Dillow kfree(req->map_page); 8489a21be53SSagi Grimberg } 849c07d424dSDavid Dillow if (req->indirect_dma_addr) { 850c07d424dSDavid Dillow ib_dma_unmap_single(ibdev, req->indirect_dma_addr, 851c07d424dSDavid Dillow target->indirect_size, 852c07d424dSDavid Dillow DMA_TO_DEVICE); 853c07d424dSDavid Dillow } 854c07d424dSDavid Dillow kfree(req->indirect_desc); 8558f26c9ffSDavid Dillow } 8564d73f95fSBart Van Assche 857509c07bcSBart Van Assche kfree(ch->req_ring); 858509c07bcSBart Van Assche ch->req_ring = NULL; 8598f26c9ffSDavid Dillow } 8608f26c9ffSDavid Dillow 861509c07bcSBart Van Assche static int srp_alloc_req_data(struct srp_rdma_ch *ch) 862b81d00bdSBart Van Assche { 863509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 864b81d00bdSBart Van Assche struct srp_device *srp_dev = target->srp_host->srp_dev; 865b81d00bdSBart Van Assche struct ib_device *ibdev = srp_dev->dev; 866b81d00bdSBart Van Assche struct srp_request *req; 8675cfb1782SBart Van Assche void *mr_list; 868b81d00bdSBart Van Assche dma_addr_t dma_addr; 869b81d00bdSBart Van Assche int i, ret = -ENOMEM; 870b81d00bdSBart Van Assche 871509c07bcSBart Van Assche ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), 872509c07bcSBart Van Assche GFP_KERNEL); 873509c07bcSBart Van Assche if (!ch->req_ring) 8744d73f95fSBart Van Assche goto out; 8754d73f95fSBart Van Assche 8764d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 877509c07bcSBart Van Assche req = &ch->req_ring[i]; 8785cfb1782SBart Van Assche mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), 879b81d00bdSBart Van Assche GFP_KERNEL); 8805cfb1782SBart Van Assche if (!mr_list) 8815cfb1782SBart Van Assche goto out; 8829a21be53SSagi Grimberg if (srp_dev->use_fast_reg) { 8835cfb1782SBart Van Assche req->fr_list = mr_list; 8849a21be53SSagi Grimberg } else { 8855cfb1782SBart Van Assche req->fmr_list = mr_list; 88652ede08fSBart Van Assche req->map_page = kmalloc(srp_dev->max_pages_per_mr * 887d1b4289eSBart Van Assche sizeof(void *), GFP_KERNEL); 8885cfb1782SBart Van Assche if (!req->map_page) 8895cfb1782SBart Van Assche goto out; 8909a21be53SSagi Grimberg } 891b81d00bdSBart Van Assche req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); 8925cfb1782SBart Van Assche if (!req->indirect_desc) 893b81d00bdSBart Van Assche goto out; 894b81d00bdSBart Van Assche 895b81d00bdSBart Van Assche dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, 896b81d00bdSBart Van Assche target->indirect_size, 897b81d00bdSBart Van Assche DMA_TO_DEVICE); 898b81d00bdSBart Van Assche if (ib_dma_mapping_error(ibdev, dma_addr)) 899b81d00bdSBart Van Assche goto out; 900b81d00bdSBart Van Assche 901b81d00bdSBart Van Assche req->indirect_dma_addr = dma_addr; 902b81d00bdSBart Van Assche } 903b81d00bdSBart Van Assche ret = 0; 904b81d00bdSBart Van Assche 905b81d00bdSBart Van Assche out: 906b81d00bdSBart Van Assche return ret; 907b81d00bdSBart Van Assche } 908b81d00bdSBart Van Assche 909683b159aSBart Van Assche /** 910683b159aSBart Van Assche * srp_del_scsi_host_attr() - Remove attributes defined in the host template. 911683b159aSBart Van Assche * @shost: SCSI host whose attributes to remove from sysfs. 912683b159aSBart Van Assche * 913683b159aSBart Van Assche * Note: Any attributes defined in the host template and that did not exist 914683b159aSBart Van Assche * before invocation of this function will be ignored. 915683b159aSBart Van Assche */ 916683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost) 917683b159aSBart Van Assche { 918683b159aSBart Van Assche struct device_attribute **attr; 919683b159aSBart Van Assche 920683b159aSBart Van Assche for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) 921683b159aSBart Van Assche device_remove_file(&shost->shost_dev, *attr); 922683b159aSBart Van Assche } 923683b159aSBart Van Assche 924ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target) 925ee12d6a8SBart Van Assche { 926d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 927d92c0da7SBart Van Assche int i; 928509c07bcSBart Van Assche 929ef6c49d8SBart Van Assche WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 930ef6c49d8SBart Van Assche 931ee12d6a8SBart Van Assche srp_del_scsi_host_attr(target->scsi_host); 9329dd69a60SBart Van Assche srp_rport_get(target->rport); 933ee12d6a8SBart Van Assche srp_remove_host(target->scsi_host); 934ee12d6a8SBart Van Assche scsi_remove_host(target->scsi_host); 93593079162SBart Van Assche srp_stop_rport_timers(target->rport); 936ef6c49d8SBart Van Assche srp_disconnect_target(target); 937d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 938d92c0da7SBart Van Assche ch = &target->ch[i]; 939509c07bcSBart Van Assche srp_free_ch_ib(target, ch); 940d92c0da7SBart Van Assche } 941c1120f89SBart Van Assche cancel_work_sync(&target->tl_err_work); 9429dd69a60SBart Van Assche srp_rport_put(target->rport); 943d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 944d92c0da7SBart Van Assche ch = &target->ch[i]; 945509c07bcSBart Van Assche srp_free_req_data(target, ch); 946d92c0da7SBart Van Assche } 947d92c0da7SBart Van Assche kfree(target->ch); 948d92c0da7SBart Van Assche target->ch = NULL; 94965d7dd2fSVu Pham 95065d7dd2fSVu Pham spin_lock(&target->srp_host->target_lock); 95165d7dd2fSVu Pham list_del(&target->list); 95265d7dd2fSVu Pham spin_unlock(&target->srp_host->target_lock); 95365d7dd2fSVu Pham 954ee12d6a8SBart Van Assche scsi_host_put(target->scsi_host); 955ee12d6a8SBart Van Assche } 956ee12d6a8SBart Van Assche 957c4028958SDavid Howells static void srp_remove_work(struct work_struct *work) 958aef9ec39SRoland Dreier { 959c4028958SDavid Howells struct srp_target_port *target = 960ef6c49d8SBart Van Assche container_of(work, struct srp_target_port, remove_work); 961aef9ec39SRoland Dreier 962ef6c49d8SBart Van Assche WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 963aef9ec39SRoland Dreier 96496fc248aSBart Van Assche srp_remove_target(target); 965aef9ec39SRoland Dreier } 966aef9ec39SRoland Dreier 967dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport) 968dc1bdbd9SBart Van Assche { 969dc1bdbd9SBart Van Assche struct srp_target_port *target = rport->lld_data; 970dc1bdbd9SBart Van Assche 971dc1bdbd9SBart Van Assche srp_queue_remove_work(target); 972dc1bdbd9SBart Van Assche } 973dc1bdbd9SBart Van Assche 974c014c8cdSBart Van Assche /** 975c014c8cdSBart Van Assche * srp_connected_ch() - number of connected channels 976c014c8cdSBart Van Assche * @target: SRP target port. 977c014c8cdSBart Van Assche */ 978c014c8cdSBart Van Assche static int srp_connected_ch(struct srp_target_port *target) 979c014c8cdSBart Van Assche { 980c014c8cdSBart Van Assche int i, c = 0; 981c014c8cdSBart Van Assche 982c014c8cdSBart Van Assche for (i = 0; i < target->ch_count; i++) 983c014c8cdSBart Van Assche c += target->ch[i].connected; 984c014c8cdSBart Van Assche 985c014c8cdSBart Van Assche return c; 986c014c8cdSBart Van Assche } 987c014c8cdSBart Van Assche 988d92c0da7SBart Van Assche static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) 989aef9ec39SRoland Dreier { 990509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 991aef9ec39SRoland Dreier int ret; 992aef9ec39SRoland Dreier 993c014c8cdSBart Van Assche WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); 994294c875aSBart Van Assche 995509c07bcSBart Van Assche ret = srp_lookup_path(ch); 996aef9ec39SRoland Dreier if (ret) 9974d59ad29SBart Van Assche goto out; 998aef9ec39SRoland Dreier 999aef9ec39SRoland Dreier while (1) { 1000509c07bcSBart Van Assche init_completion(&ch->done); 1001d92c0da7SBart Van Assche ret = srp_send_req(ch, multich); 1002aef9ec39SRoland Dreier if (ret) 10034d59ad29SBart Van Assche goto out; 1004509c07bcSBart Van Assche ret = wait_for_completion_interruptible(&ch->done); 1005a702adceSBart Van Assche if (ret < 0) 10064d59ad29SBart Van Assche goto out; 1007aef9ec39SRoland Dreier 1008aef9ec39SRoland Dreier /* 1009aef9ec39SRoland Dreier * The CM event handling code will set status to 1010aef9ec39SRoland Dreier * SRP_PORT_REDIRECT if we get a port redirect REJ 1011aef9ec39SRoland Dreier * back, or SRP_DLID_REDIRECT if we get a lid/qp 1012aef9ec39SRoland Dreier * redirect REJ back. 1013aef9ec39SRoland Dreier */ 10144d59ad29SBart Van Assche ret = ch->status; 10154d59ad29SBart Van Assche switch (ret) { 1016aef9ec39SRoland Dreier case 0: 1017c014c8cdSBart Van Assche ch->connected = true; 10184d59ad29SBart Van Assche goto out; 1019aef9ec39SRoland Dreier 1020aef9ec39SRoland Dreier case SRP_PORT_REDIRECT: 1021509c07bcSBart Van Assche ret = srp_lookup_path(ch); 1022aef9ec39SRoland Dreier if (ret) 10234d59ad29SBart Van Assche goto out; 1024aef9ec39SRoland Dreier break; 1025aef9ec39SRoland Dreier 1026aef9ec39SRoland Dreier case SRP_DLID_REDIRECT: 1027aef9ec39SRoland Dreier break; 1028aef9ec39SRoland Dreier 10299fe4bcf4SDavid Dillow case SRP_STALE_CONN: 10309fe4bcf4SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 10319fe4bcf4SDavid Dillow "giving up on stale connection\n"); 10324d59ad29SBart Van Assche ret = -ECONNRESET; 10334d59ad29SBart Van Assche goto out; 10349fe4bcf4SDavid Dillow 1035aef9ec39SRoland Dreier default: 10364d59ad29SBart Van Assche goto out; 1037aef9ec39SRoland Dreier } 1038aef9ec39SRoland Dreier } 10394d59ad29SBart Van Assche 10404d59ad29SBart Van Assche out: 10414d59ad29SBart Van Assche return ret <= 0 ? ret : -ENODEV; 1042aef9ec39SRoland Dreier } 1043aef9ec39SRoland Dreier 1044509c07bcSBart Van Assche static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) 10455cfb1782SBart Van Assche { 10465cfb1782SBart Van Assche struct ib_send_wr *bad_wr; 10475cfb1782SBart Van Assche struct ib_send_wr wr = { 10485cfb1782SBart Van Assche .opcode = IB_WR_LOCAL_INV, 10495cfb1782SBart Van Assche .wr_id = LOCAL_INV_WR_ID_MASK, 10505cfb1782SBart Van Assche .next = NULL, 10515cfb1782SBart Van Assche .num_sge = 0, 10525cfb1782SBart Van Assche .send_flags = 0, 10535cfb1782SBart Van Assche .ex.invalidate_rkey = rkey, 10545cfb1782SBart Van Assche }; 10555cfb1782SBart Van Assche 1056509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 10575cfb1782SBart Van Assche } 10585cfb1782SBart Van Assche 1059d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd, 1060509c07bcSBart Van Assche struct srp_rdma_ch *ch, 1061d945e1dfSRoland Dreier struct srp_request *req) 1062d945e1dfSRoland Dreier { 1063509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 10645cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 10655cfb1782SBart Van Assche struct ib_device *ibdev = dev->dev; 10665cfb1782SBart Van Assche int i, res; 10678f26c9ffSDavid Dillow 1068bb350d1dSFUJITA Tomonori if (!scsi_sglist(scmnd) || 1069d945e1dfSRoland Dreier (scmnd->sc_data_direction != DMA_TO_DEVICE && 1070d945e1dfSRoland Dreier scmnd->sc_data_direction != DMA_FROM_DEVICE)) 1071d945e1dfSRoland Dreier return; 1072d945e1dfSRoland Dreier 10735cfb1782SBart Van Assche if (dev->use_fast_reg) { 10745cfb1782SBart Van Assche struct srp_fr_desc **pfr; 10755cfb1782SBart Van Assche 10765cfb1782SBart Van Assche for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { 1077509c07bcSBart Van Assche res = srp_inv_rkey(ch, (*pfr)->mr->rkey); 10785cfb1782SBart Van Assche if (res < 0) { 10795cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 10805cfb1782SBart Van Assche "Queueing INV WR for rkey %#x failed (%d)\n", 10815cfb1782SBart Van Assche (*pfr)->mr->rkey, res); 10825cfb1782SBart Van Assche queue_work(system_long_wq, 10835cfb1782SBart Van Assche &target->tl_err_work); 10845cfb1782SBart Van Assche } 10855cfb1782SBart Van Assche } 10865cfb1782SBart Van Assche if (req->nmdesc) 1087509c07bcSBart Van Assche srp_fr_pool_put(ch->fr_pool, req->fr_list, 10885cfb1782SBart Van Assche req->nmdesc); 1089002f1567SBart Van Assche } else if (dev->use_fmr) { 10905cfb1782SBart Van Assche struct ib_pool_fmr **pfmr; 10915cfb1782SBart Van Assche 10925cfb1782SBart Van Assche for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++) 10935cfb1782SBart Van Assche ib_fmr_pool_unmap(*pfmr); 10945cfb1782SBart Van Assche } 1095f5358a17SRoland Dreier 10968f26c9ffSDavid Dillow ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), 10978f26c9ffSDavid Dillow scmnd->sc_data_direction); 1098d945e1dfSRoland Dreier } 1099d945e1dfSRoland Dreier 110022032991SBart Van Assche /** 110122032991SBart Van Assche * srp_claim_req - Take ownership of the scmnd associated with a request. 1102509c07bcSBart Van Assche * @ch: SRP RDMA channel. 110322032991SBart Van Assche * @req: SRP request. 1104b3fe628dSBart Van Assche * @sdev: If not NULL, only take ownership for this SCSI device. 110522032991SBart Van Assche * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take 110622032991SBart Van Assche * ownership of @req->scmnd if it equals @scmnd. 110722032991SBart Van Assche * 110822032991SBart Van Assche * Return value: 110922032991SBart Van Assche * Either NULL or a pointer to the SCSI command the caller became owner of. 111022032991SBart Van Assche */ 1111509c07bcSBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, 111222032991SBart Van Assche struct srp_request *req, 1113b3fe628dSBart Van Assche struct scsi_device *sdev, 111422032991SBart Van Assche struct scsi_cmnd *scmnd) 1115526b4caaSIshai Rabinovitz { 111694a9174cSBart Van Assche unsigned long flags; 111794a9174cSBart Van Assche 1118509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1119b3fe628dSBart Van Assche if (req->scmnd && 1120b3fe628dSBart Van Assche (!sdev || req->scmnd->device == sdev) && 1121b3fe628dSBart Van Assche (!scmnd || req->scmnd == scmnd)) { 112222032991SBart Van Assche scmnd = req->scmnd; 112322032991SBart Van Assche req->scmnd = NULL; 112422032991SBart Van Assche } else { 112522032991SBart Van Assche scmnd = NULL; 112622032991SBart Van Assche } 1127509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 112822032991SBart Van Assche 112922032991SBart Van Assche return scmnd; 113022032991SBart Van Assche } 113122032991SBart Van Assche 113222032991SBart Van Assche /** 113322032991SBart Van Assche * srp_free_req() - Unmap data and add request to the free request list. 1134509c07bcSBart Van Assche * @ch: SRP RDMA channel. 1135af24663bSBart Van Assche * @req: Request to be freed. 1136af24663bSBart Van Assche * @scmnd: SCSI command associated with @req. 1137af24663bSBart Van Assche * @req_lim_delta: Amount to be added to @target->req_lim. 113822032991SBart Van Assche */ 1139509c07bcSBart Van Assche static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, 1140509c07bcSBart Van Assche struct scsi_cmnd *scmnd, s32 req_lim_delta) 114122032991SBart Van Assche { 114222032991SBart Van Assche unsigned long flags; 114322032991SBart Van Assche 1144509c07bcSBart Van Assche srp_unmap_data(scmnd, ch, req); 114522032991SBart Van Assche 1146509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1147509c07bcSBart Van Assche ch->req_lim += req_lim_delta; 1148509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 1149526b4caaSIshai Rabinovitz } 1150526b4caaSIshai Rabinovitz 1151509c07bcSBart Van Assche static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, 1152509c07bcSBart Van Assche struct scsi_device *sdev, int result) 1153526b4caaSIshai Rabinovitz { 1154509c07bcSBart Van Assche struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); 115522032991SBart Van Assche 115622032991SBart Van Assche if (scmnd) { 1157509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 0); 1158ed9b2264SBart Van Assche scmnd->result = result; 115922032991SBart Van Assche scmnd->scsi_done(scmnd); 116022032991SBart Van Assche } 1161526b4caaSIshai Rabinovitz } 1162526b4caaSIshai Rabinovitz 1163ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport) 1164aef9ec39SRoland Dreier { 1165ed9b2264SBart Van Assche struct srp_target_port *target = rport->lld_data; 1166d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 1167b3fe628dSBart Van Assche struct Scsi_Host *shost = target->scsi_host; 1168b3fe628dSBart Van Assche struct scsi_device *sdev; 1169d92c0da7SBart Van Assche int i, j; 1170aef9ec39SRoland Dreier 1171b3fe628dSBart Van Assche /* 1172b3fe628dSBart Van Assche * Invoking srp_terminate_io() while srp_queuecommand() is running 1173b3fe628dSBart Van Assche * is not safe. Hence the warning statement below. 1174b3fe628dSBart Van Assche */ 1175b3fe628dSBart Van Assche shost_for_each_device(sdev, shost) 1176b3fe628dSBart Van Assche WARN_ON_ONCE(sdev->request_queue->request_fn_active); 1177b3fe628dSBart Van Assche 1178d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1179d92c0da7SBart Van Assche ch = &target->ch[i]; 1180509c07bcSBart Van Assche 1181d92c0da7SBart Van Assche for (j = 0; j < target->req_ring_size; ++j) { 1182d92c0da7SBart Van Assche struct srp_request *req = &ch->req_ring[j]; 1183d92c0da7SBart Van Assche 1184d92c0da7SBart Van Assche srp_finish_req(ch, req, NULL, 1185d92c0da7SBart Van Assche DID_TRANSPORT_FAILFAST << 16); 1186d92c0da7SBart Van Assche } 1187ed9b2264SBart Van Assche } 1188ed9b2264SBart Van Assche } 1189ed9b2264SBart Van Assche 1190ed9b2264SBart Van Assche /* 1191ed9b2264SBart Van Assche * It is up to the caller to ensure that srp_rport_reconnect() calls are 1192ed9b2264SBart Van Assche * serialized and that no concurrent srp_queuecommand(), srp_abort(), 1193ed9b2264SBart Van Assche * srp_reset_device() or srp_reset_host() calls will occur while this function 1194ed9b2264SBart Van Assche * is in progress. One way to realize that is not to call this function 1195ed9b2264SBart Van Assche * directly but to call srp_reconnect_rport() instead since that last function 1196ed9b2264SBart Van Assche * serializes calls of this function via rport->mutex and also blocks 1197ed9b2264SBart Van Assche * srp_queuecommand() calls before invoking this function. 1198ed9b2264SBart Van Assche */ 1199ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport) 1200ed9b2264SBart Van Assche { 1201ed9b2264SBart Van Assche struct srp_target_port *target = rport->lld_data; 1202d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 1203d92c0da7SBart Van Assche int i, j, ret = 0; 1204d92c0da7SBart Van Assche bool multich = false; 120509be70a2SBart Van Assche 1206aef9ec39SRoland Dreier srp_disconnect_target(target); 120734aa654eSBart Van Assche 120834aa654eSBart Van Assche if (target->state == SRP_TARGET_SCANNING) 120934aa654eSBart Van Assche return -ENODEV; 121034aa654eSBart Van Assche 1211aef9ec39SRoland Dreier /* 1212c7c4e7ffSBart Van Assche * Now get a new local CM ID so that we avoid confusing the target in 1213c7c4e7ffSBart Van Assche * case things are really fouled up. Doing so also ensures that all CM 1214c7c4e7ffSBart Van Assche * callbacks will have finished before a new QP is allocated. 1215aef9ec39SRoland Dreier */ 1216d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1217d92c0da7SBart Van Assche ch = &target->ch[i]; 1218d92c0da7SBart Van Assche ret += srp_new_cm_id(ch); 1219d92c0da7SBart Van Assche } 1220d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1221d92c0da7SBart Van Assche ch = &target->ch[i]; 1222d92c0da7SBart Van Assche for (j = 0; j < target->req_ring_size; ++j) { 1223d92c0da7SBart Van Assche struct srp_request *req = &ch->req_ring[j]; 1224509c07bcSBart Van Assche 1225509c07bcSBart Van Assche srp_finish_req(ch, req, NULL, DID_RESET << 16); 1226536ae14eSBart Van Assche } 1227d92c0da7SBart Van Assche } 1228d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1229d92c0da7SBart Van Assche ch = &target->ch[i]; 12305cfb1782SBart Van Assche /* 12315cfb1782SBart Van Assche * Whether or not creating a new CM ID succeeded, create a new 1232d92c0da7SBart Van Assche * QP. This guarantees that all completion callback function 1233d92c0da7SBart Van Assche * invocations have finished before request resetting starts. 12345cfb1782SBart Van Assche */ 1235509c07bcSBart Van Assche ret += srp_create_ch_ib(ch); 12365cfb1782SBart Van Assche 1237509c07bcSBart Van Assche INIT_LIST_HEAD(&ch->free_tx); 1238d92c0da7SBart Van Assche for (j = 0; j < target->queue_size; ++j) 1239d92c0da7SBart Van Assche list_add(&ch->tx_ring[j]->list, &ch->free_tx); 1240d92c0da7SBart Van Assche } 12418de9fe3aSBart Van Assche 12428de9fe3aSBart Van Assche target->qp_in_error = false; 12438de9fe3aSBart Van Assche 1244d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1245d92c0da7SBart Van Assche ch = &target->ch[i]; 1246bbac5ccfSBart Van Assche if (ret) 1247d92c0da7SBart Van Assche break; 1248d92c0da7SBart Van Assche ret = srp_connect_ch(ch, multich); 1249d92c0da7SBart Van Assche multich = true; 1250d92c0da7SBart Van Assche } 125109be70a2SBart Van Assche 1252ed9b2264SBart Van Assche if (ret == 0) 1253ed9b2264SBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 1254ed9b2264SBart Van Assche PFX "reconnect succeeded\n"); 1255aef9ec39SRoland Dreier 1256aef9ec39SRoland Dreier return ret; 1257aef9ec39SRoland Dreier } 1258aef9ec39SRoland Dreier 12598f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, 12608f26c9ffSDavid Dillow unsigned int dma_len, u32 rkey) 1261f5358a17SRoland Dreier { 12628f26c9ffSDavid Dillow struct srp_direct_buf *desc = state->desc; 12638f26c9ffSDavid Dillow 12643ae95da8SBart Van Assche WARN_ON_ONCE(!dma_len); 12653ae95da8SBart Van Assche 12668f26c9ffSDavid Dillow desc->va = cpu_to_be64(dma_addr); 12678f26c9ffSDavid Dillow desc->key = cpu_to_be32(rkey); 12688f26c9ffSDavid Dillow desc->len = cpu_to_be32(dma_len); 12698f26c9ffSDavid Dillow 12708f26c9ffSDavid Dillow state->total_len += dma_len; 12718f26c9ffSDavid Dillow state->desc++; 12728f26c9ffSDavid Dillow state->ndesc++; 12738f26c9ffSDavid Dillow } 12748f26c9ffSDavid Dillow 12758f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state, 1276509c07bcSBart Van Assche struct srp_rdma_ch *ch) 12778f26c9ffSDavid Dillow { 1278186fbc66SBart Van Assche struct srp_target_port *target = ch->target; 1279186fbc66SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 12808f26c9ffSDavid Dillow struct ib_pool_fmr *fmr; 1281f5358a17SRoland Dreier u64 io_addr = 0; 12828f26c9ffSDavid Dillow 1283f731ed62SBart Van Assche if (state->fmr.next >= state->fmr.end) 1284f731ed62SBart Van Assche return -ENOMEM; 1285f731ed62SBart Van Assche 128626630e8aSSagi Grimberg WARN_ON_ONCE(!dev->use_fmr); 128726630e8aSSagi Grimberg 128826630e8aSSagi Grimberg if (state->npages == 0) 128926630e8aSSagi Grimberg return 0; 129026630e8aSSagi Grimberg 129126630e8aSSagi Grimberg if (state->npages == 1 && target->global_mr) { 129226630e8aSSagi Grimberg srp_map_desc(state, state->base_dma_addr, state->dma_len, 129326630e8aSSagi Grimberg target->global_mr->rkey); 129426630e8aSSagi Grimberg goto reset_state; 129526630e8aSSagi Grimberg } 129626630e8aSSagi Grimberg 1297509c07bcSBart Van Assche fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, 12988f26c9ffSDavid Dillow state->npages, io_addr); 12998f26c9ffSDavid Dillow if (IS_ERR(fmr)) 13008f26c9ffSDavid Dillow return PTR_ERR(fmr); 13018f26c9ffSDavid Dillow 1302f731ed62SBart Van Assche *state->fmr.next++ = fmr; 130352ede08fSBart Van Assche state->nmdesc++; 13048f26c9ffSDavid Dillow 1305186fbc66SBart Van Assche srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask, 1306186fbc66SBart Van Assche state->dma_len, fmr->fmr->rkey); 1307539dde6fSBart Van Assche 130826630e8aSSagi Grimberg reset_state: 130926630e8aSSagi Grimberg state->npages = 0; 131026630e8aSSagi Grimberg state->dma_len = 0; 131126630e8aSSagi Grimberg 13128f26c9ffSDavid Dillow return 0; 13138f26c9ffSDavid Dillow } 13148f26c9ffSDavid Dillow 13155cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state, 1316509c07bcSBart Van Assche struct srp_rdma_ch *ch) 13175cfb1782SBart Van Assche { 1318509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 13195cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 13205cfb1782SBart Van Assche struct ib_send_wr *bad_wr; 1321f7f7aab1SSagi Grimberg struct ib_reg_wr wr; 13225cfb1782SBart Van Assche struct srp_fr_desc *desc; 13235cfb1782SBart Van Assche u32 rkey; 1324f7f7aab1SSagi Grimberg int n, err; 13255cfb1782SBart Van Assche 1326f731ed62SBart Van Assche if (state->fr.next >= state->fr.end) 1327f731ed62SBart Van Assche return -ENOMEM; 1328f731ed62SBart Van Assche 132926630e8aSSagi Grimberg WARN_ON_ONCE(!dev->use_fast_reg); 133026630e8aSSagi Grimberg 1331f7f7aab1SSagi Grimberg if (state->sg_nents == 0) 133226630e8aSSagi Grimberg return 0; 133326630e8aSSagi Grimberg 1334f7f7aab1SSagi Grimberg if (state->sg_nents == 1 && target->global_mr) { 1335f7f7aab1SSagi Grimberg srp_map_desc(state, sg_dma_address(state->sg), 1336f7f7aab1SSagi Grimberg sg_dma_len(state->sg), 133726630e8aSSagi Grimberg target->global_mr->rkey); 1338f7f7aab1SSagi Grimberg return 1; 133926630e8aSSagi Grimberg } 134026630e8aSSagi Grimberg 1341509c07bcSBart Van Assche desc = srp_fr_pool_get(ch->fr_pool); 13425cfb1782SBart Van Assche if (!desc) 13435cfb1782SBart Van Assche return -ENOMEM; 13445cfb1782SBart Van Assche 13455cfb1782SBart Van Assche rkey = ib_inc_rkey(desc->mr->rkey); 13465cfb1782SBart Van Assche ib_update_fast_reg_key(desc->mr, rkey); 13475cfb1782SBart Van Assche 1348f7f7aab1SSagi Grimberg n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents, 1349f7f7aab1SSagi Grimberg dev->mr_page_size); 1350f7f7aab1SSagi Grimberg if (unlikely(n < 0)) 1351f7f7aab1SSagi Grimberg return n; 13525cfb1782SBart Van Assche 1353f7f7aab1SSagi Grimberg wr.wr.next = NULL; 1354f7f7aab1SSagi Grimberg wr.wr.opcode = IB_WR_REG_MR; 1355e622f2f4SChristoph Hellwig wr.wr.wr_id = FAST_REG_WR_ID_MASK; 1356f7f7aab1SSagi Grimberg wr.wr.num_sge = 0; 1357f7f7aab1SSagi Grimberg wr.wr.send_flags = 0; 1358f7f7aab1SSagi Grimberg wr.mr = desc->mr; 1359f7f7aab1SSagi Grimberg wr.key = desc->mr->rkey; 1360f7f7aab1SSagi Grimberg wr.access = (IB_ACCESS_LOCAL_WRITE | 13615cfb1782SBart Van Assche IB_ACCESS_REMOTE_READ | 13625cfb1782SBart Van Assche IB_ACCESS_REMOTE_WRITE); 13635cfb1782SBart Van Assche 1364f731ed62SBart Van Assche *state->fr.next++ = desc; 13655cfb1782SBart Van Assche state->nmdesc++; 13665cfb1782SBart Van Assche 1367f7f7aab1SSagi Grimberg srp_map_desc(state, desc->mr->iova, 1368f7f7aab1SSagi Grimberg desc->mr->length, desc->mr->rkey); 13695cfb1782SBart Van Assche 137026630e8aSSagi Grimberg err = ib_post_send(ch->qp, &wr.wr, &bad_wr); 1371f7f7aab1SSagi Grimberg if (unlikely(err)) 137226630e8aSSagi Grimberg return err; 137326630e8aSSagi Grimberg 1374f7f7aab1SSagi Grimberg return n; 13755cfb1782SBart Van Assche } 13765cfb1782SBart Van Assche 13778f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state, 1378509c07bcSBart Van Assche struct srp_rdma_ch *ch, 13793ae95da8SBart Van Assche struct scatterlist *sg, int sg_index) 13808f26c9ffSDavid Dillow { 1381509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 138205321937SGreg Kroah-Hartman struct srp_device *dev = target->srp_host->srp_dev; 138385507bccSRalph Campbell struct ib_device *ibdev = dev->dev; 13848f26c9ffSDavid Dillow dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); 1385bb350d1dSFUJITA Tomonori unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 13863ae95da8SBart Van Assche unsigned int len = 0; 13878f26c9ffSDavid Dillow int ret; 138885507bccSRalph Campbell 13893ae95da8SBart Van Assche WARN_ON_ONCE(!dma_len); 1390f5358a17SRoland Dreier 13918f26c9ffSDavid Dillow while (dma_len) { 13925cfb1782SBart Van Assche unsigned offset = dma_addr & ~dev->mr_page_mask; 13935cfb1782SBart Van Assche if (state->npages == dev->max_pages_per_mr || offset != 0) { 1394f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(state, ch); 13958f26c9ffSDavid Dillow if (ret) 13968f26c9ffSDavid Dillow return ret; 139785507bccSRalph Campbell } 1398f5358a17SRoland Dreier 13995cfb1782SBart Van Assche len = min_t(unsigned int, dma_len, dev->mr_page_size - offset); 14008f26c9ffSDavid Dillow 14018f26c9ffSDavid Dillow if (!state->npages) 14028f26c9ffSDavid Dillow state->base_dma_addr = dma_addr; 14035cfb1782SBart Van Assche state->pages[state->npages++] = dma_addr & dev->mr_page_mask; 140452ede08fSBart Van Assche state->dma_len += len; 14058f26c9ffSDavid Dillow dma_addr += len; 14068f26c9ffSDavid Dillow dma_len -= len; 1407f5358a17SRoland Dreier } 1408f5358a17SRoland Dreier 14095cfb1782SBart Van Assche /* 14105cfb1782SBart Van Assche * If the last entry of the MR wasn't a full page, then we need to 14118f26c9ffSDavid Dillow * close it out and start a new one -- we can only merge at page 14128f26c9ffSDavid Dillow * boundries. 14138f26c9ffSDavid Dillow */ 1414f5358a17SRoland Dreier ret = 0; 14150e0d3a48SBart Van Assche if (len != dev->mr_page_size) 1416f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(state, ch); 1417f5358a17SRoland Dreier return ret; 1418f5358a17SRoland Dreier } 1419f5358a17SRoland Dreier 142026630e8aSSagi Grimberg static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch, 142126630e8aSSagi Grimberg struct srp_request *req, struct scatterlist *scat, 142226630e8aSSagi Grimberg int count) 142326630e8aSSagi Grimberg { 142426630e8aSSagi Grimberg struct scatterlist *sg; 142526630e8aSSagi Grimberg int i, ret; 142626630e8aSSagi Grimberg 142726630e8aSSagi Grimberg state->desc = req->indirect_desc; 142826630e8aSSagi Grimberg state->pages = req->map_page; 142926630e8aSSagi Grimberg state->fmr.next = req->fmr_list; 143026630e8aSSagi Grimberg state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt; 143126630e8aSSagi Grimberg 143226630e8aSSagi Grimberg for_each_sg(scat, sg, count, i) { 143326630e8aSSagi Grimberg ret = srp_map_sg_entry(state, ch, sg, i); 143426630e8aSSagi Grimberg if (ret) 143526630e8aSSagi Grimberg return ret; 143626630e8aSSagi Grimberg } 143726630e8aSSagi Grimberg 1438f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(state, ch); 143926630e8aSSagi Grimberg if (ret) 144026630e8aSSagi Grimberg return ret; 144126630e8aSSagi Grimberg 144226630e8aSSagi Grimberg req->nmdesc = state->nmdesc; 144326630e8aSSagi Grimberg 144426630e8aSSagi Grimberg return 0; 144526630e8aSSagi Grimberg } 144626630e8aSSagi Grimberg 144726630e8aSSagi Grimberg static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, 144826630e8aSSagi Grimberg struct srp_request *req, struct scatterlist *scat, 144926630e8aSSagi Grimberg int count) 145026630e8aSSagi Grimberg { 145126630e8aSSagi Grimberg state->desc = req->indirect_desc; 1452f7f7aab1SSagi Grimberg state->fr.next = req->fr_list; 1453f7f7aab1SSagi Grimberg state->fr.end = req->fr_list + ch->target->cmd_sg_cnt; 1454f7f7aab1SSagi Grimberg state->sg = scat; 1455f7f7aab1SSagi Grimberg state->sg_nents = scsi_sg_count(req->scmnd); 145626630e8aSSagi Grimberg 1457f7f7aab1SSagi Grimberg while (state->sg_nents) { 1458f7f7aab1SSagi Grimberg int i, n; 1459f7f7aab1SSagi Grimberg 1460f7f7aab1SSagi Grimberg n = srp_map_finish_fr(state, ch); 1461f7f7aab1SSagi Grimberg if (unlikely(n < 0)) 1462f7f7aab1SSagi Grimberg return n; 1463f7f7aab1SSagi Grimberg 1464f7f7aab1SSagi Grimberg state->sg_nents -= n; 1465f7f7aab1SSagi Grimberg for (i = 0; i < n; i++) 1466f7f7aab1SSagi Grimberg state->sg = sg_next(state->sg); 146726630e8aSSagi Grimberg } 146826630e8aSSagi Grimberg 146926630e8aSSagi Grimberg req->nmdesc = state->nmdesc; 147026630e8aSSagi Grimberg 147126630e8aSSagi Grimberg return 0; 147226630e8aSSagi Grimberg } 147326630e8aSSagi Grimberg 147426630e8aSSagi Grimberg static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, 1475509c07bcSBart Van Assche struct srp_request *req, struct scatterlist *scat, 1476509c07bcSBart Van Assche int count) 147776bc1e1dSBart Van Assche { 1478509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 147976bc1e1dSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 148076bc1e1dSBart Van Assche struct scatterlist *sg; 148126630e8aSSagi Grimberg int i; 148276bc1e1dSBart Van Assche 148376bc1e1dSBart Van Assche state->desc = req->indirect_desc; 14843ae95da8SBart Van Assche for_each_sg(scat, sg, count, i) { 14853ae95da8SBart Van Assche srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), 148603f6fb93SBart Van Assche ib_sg_dma_len(dev->dev, sg), 148703f6fb93SBart Van Assche target->global_mr->rkey); 14883ae95da8SBart Van Assche } 148976bc1e1dSBart Van Assche 149052ede08fSBart Van Assche req->nmdesc = state->nmdesc; 14915cfb1782SBart Van Assche 149226630e8aSSagi Grimberg return 0; 149376bc1e1dSBart Van Assche } 149476bc1e1dSBart Van Assche 1495330179f2SBart Van Assche /* 1496330179f2SBart Van Assche * Register the indirect data buffer descriptor with the HCA. 1497330179f2SBart Van Assche * 1498330179f2SBart Van Assche * Note: since the indirect data buffer descriptor has been allocated with 1499330179f2SBart Van Assche * kmalloc() it is guaranteed that this buffer is a physically contiguous 1500330179f2SBart Van Assche * memory buffer. 1501330179f2SBart Van Assche */ 1502330179f2SBart Van Assche static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, 1503330179f2SBart Van Assche void **next_mr, void **end_mr, u32 idb_len, 1504330179f2SBart Van Assche __be32 *idb_rkey) 1505330179f2SBart Van Assche { 1506330179f2SBart Van Assche struct srp_target_port *target = ch->target; 1507330179f2SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 1508330179f2SBart Van Assche struct srp_map_state state; 1509330179f2SBart Van Assche struct srp_direct_buf idb_desc; 1510330179f2SBart Van Assche u64 idb_pages[1]; 1511f7f7aab1SSagi Grimberg struct scatterlist idb_sg[1]; 1512330179f2SBart Van Assche int ret; 1513330179f2SBart Van Assche 1514330179f2SBart Van Assche memset(&state, 0, sizeof(state)); 1515330179f2SBart Van Assche memset(&idb_desc, 0, sizeof(idb_desc)); 1516330179f2SBart Van Assche state.gen.next = next_mr; 1517330179f2SBart Van Assche state.gen.end = end_mr; 1518330179f2SBart Van Assche state.desc = &idb_desc; 1519f7f7aab1SSagi Grimberg state.base_dma_addr = req->indirect_dma_addr; 1520f7f7aab1SSagi Grimberg state.dma_len = idb_len; 1521f7f7aab1SSagi Grimberg 1522f7f7aab1SSagi Grimberg if (dev->use_fast_reg) { 1523f7f7aab1SSagi Grimberg state.sg = idb_sg; 1524f7f7aab1SSagi Grimberg state.sg_nents = 1; 1525f7f7aab1SSagi Grimberg sg_set_buf(idb_sg, req->indirect_desc, idb_len); 1526f7f7aab1SSagi Grimberg idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ 1527fc925518SChristoph Hellwig #ifdef CONFIG_NEED_SG_DMA_LENGTH 1528fc925518SChristoph Hellwig idb_sg->dma_length = idb_sg->length; /* hack^2 */ 1529fc925518SChristoph Hellwig #endif 1530f7f7aab1SSagi Grimberg ret = srp_map_finish_fr(&state, ch); 1531f7f7aab1SSagi Grimberg if (ret < 0) 1532f7f7aab1SSagi Grimberg return ret; 1533f7f7aab1SSagi Grimberg } else if (dev->use_fmr) { 1534330179f2SBart Van Assche state.pages = idb_pages; 1535330179f2SBart Van Assche state.pages[0] = (req->indirect_dma_addr & 1536330179f2SBart Van Assche dev->mr_page_mask); 1537330179f2SBart Van Assche state.npages = 1; 1538f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(&state, ch); 1539330179f2SBart Van Assche if (ret < 0) 1540f7f7aab1SSagi Grimberg return ret; 1541f7f7aab1SSagi Grimberg } else { 1542f7f7aab1SSagi Grimberg return -EINVAL; 1543f7f7aab1SSagi Grimberg } 1544330179f2SBart Van Assche 1545330179f2SBart Van Assche *idb_rkey = idb_desc.key; 1546330179f2SBart Van Assche 1547f7f7aab1SSagi Grimberg return 0; 1548330179f2SBart Van Assche } 1549330179f2SBart Van Assche 1550509c07bcSBart Van Assche static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, 1551aef9ec39SRoland Dreier struct srp_request *req) 1552aef9ec39SRoland Dreier { 1553509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 155476bc1e1dSBart Van Assche struct scatterlist *scat; 1555aef9ec39SRoland Dreier struct srp_cmd *cmd = req->cmd->buf; 1556330179f2SBart Van Assche int len, nents, count, ret; 155785507bccSRalph Campbell struct srp_device *dev; 155885507bccSRalph Campbell struct ib_device *ibdev; 15598f26c9ffSDavid Dillow struct srp_map_state state; 15608f26c9ffSDavid Dillow struct srp_indirect_buf *indirect_hdr; 1561330179f2SBart Van Assche u32 idb_len, table_len; 1562330179f2SBart Van Assche __be32 idb_rkey; 15638f26c9ffSDavid Dillow u8 fmt; 1564aef9ec39SRoland Dreier 1565bb350d1dSFUJITA Tomonori if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 1566aef9ec39SRoland Dreier return sizeof (struct srp_cmd); 1567aef9ec39SRoland Dreier 1568aef9ec39SRoland Dreier if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 1569aef9ec39SRoland Dreier scmnd->sc_data_direction != DMA_TO_DEVICE) { 15707aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 15717aa54bd7SDavid Dillow PFX "Unhandled data direction %d\n", 1572aef9ec39SRoland Dreier scmnd->sc_data_direction); 1573aef9ec39SRoland Dreier return -EINVAL; 1574aef9ec39SRoland Dreier } 1575aef9ec39SRoland Dreier 1576bb350d1dSFUJITA Tomonori nents = scsi_sg_count(scmnd); 1577bb350d1dSFUJITA Tomonori scat = scsi_sglist(scmnd); 1578aef9ec39SRoland Dreier 157905321937SGreg Kroah-Hartman dev = target->srp_host->srp_dev; 158085507bccSRalph Campbell ibdev = dev->dev; 158185507bccSRalph Campbell 158285507bccSRalph Campbell count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 15838f26c9ffSDavid Dillow if (unlikely(count == 0)) 15848f26c9ffSDavid Dillow return -EIO; 1585aef9ec39SRoland Dreier 1586aef9ec39SRoland Dreier fmt = SRP_DATA_DESC_DIRECT; 1587f5358a17SRoland Dreier len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 1588f5358a17SRoland Dreier 158903f6fb93SBart Van Assche if (count == 1 && target->global_mr) { 1590f5358a17SRoland Dreier /* 1591f5358a17SRoland Dreier * The midlayer only generated a single gather/scatter 1592f5358a17SRoland Dreier * entry, or DMA mapping coalesced everything to a 1593f5358a17SRoland Dreier * single entry. So a direct descriptor along with 1594f5358a17SRoland Dreier * the DMA MR suffices. 1595f5358a17SRoland Dreier */ 1596f5358a17SRoland Dreier struct srp_direct_buf *buf = (void *) cmd->add_data; 1597aef9ec39SRoland Dreier 159885507bccSRalph Campbell buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 159903f6fb93SBart Van Assche buf->key = cpu_to_be32(target->global_mr->rkey); 160085507bccSRalph Campbell buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 16018f26c9ffSDavid Dillow 160252ede08fSBart Van Assche req->nmdesc = 0; 16038f26c9ffSDavid Dillow goto map_complete; 16048f26c9ffSDavid Dillow } 16058f26c9ffSDavid Dillow 16065cfb1782SBart Van Assche /* 16075cfb1782SBart Van Assche * We have more than one scatter/gather entry, so build our indirect 16085cfb1782SBart Van Assche * descriptor table, trying to merge as many entries as we can. 1609f5358a17SRoland Dreier */ 16108f26c9ffSDavid Dillow indirect_hdr = (void *) cmd->add_data; 16118f26c9ffSDavid Dillow 1612c07d424dSDavid Dillow ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, 1613c07d424dSDavid Dillow target->indirect_size, DMA_TO_DEVICE); 1614c07d424dSDavid Dillow 16158f26c9ffSDavid Dillow memset(&state, 0, sizeof(state)); 161626630e8aSSagi Grimberg if (dev->use_fast_reg) 161726630e8aSSagi Grimberg srp_map_sg_fr(&state, ch, req, scat, count); 161826630e8aSSagi Grimberg else if (dev->use_fmr) 161926630e8aSSagi Grimberg srp_map_sg_fmr(&state, ch, req, scat, count); 162026630e8aSSagi Grimberg else 162126630e8aSSagi Grimberg srp_map_sg_dma(&state, ch, req, scat, count); 16228f26c9ffSDavid Dillow 1623c07d424dSDavid Dillow /* We've mapped the request, now pull as much of the indirect 1624c07d424dSDavid Dillow * descriptor table as we can into the command buffer. If this 1625c07d424dSDavid Dillow * target is not using an external indirect table, we are 1626c07d424dSDavid Dillow * guaranteed to fit into the command, as the SCSI layer won't 1627c07d424dSDavid Dillow * give us more S/G entries than we allow. 16288f26c9ffSDavid Dillow */ 16298f26c9ffSDavid Dillow if (state.ndesc == 1) { 16305cfb1782SBart Van Assche /* 16315cfb1782SBart Van Assche * Memory registration collapsed the sg-list into one entry, 16328f26c9ffSDavid Dillow * so use a direct descriptor. 16338f26c9ffSDavid Dillow */ 16348f26c9ffSDavid Dillow struct srp_direct_buf *buf = (void *) cmd->add_data; 16358f26c9ffSDavid Dillow 1636c07d424dSDavid Dillow *buf = req->indirect_desc[0]; 16378f26c9ffSDavid Dillow goto map_complete; 16388f26c9ffSDavid Dillow } 16398f26c9ffSDavid Dillow 1640c07d424dSDavid Dillow if (unlikely(target->cmd_sg_cnt < state.ndesc && 1641c07d424dSDavid Dillow !target->allow_ext_sg)) { 1642c07d424dSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 1643c07d424dSDavid Dillow "Could not fit S/G list into SRP_CMD\n"); 1644c07d424dSDavid Dillow return -EIO; 1645c07d424dSDavid Dillow } 1646c07d424dSDavid Dillow 1647c07d424dSDavid Dillow count = min(state.ndesc, target->cmd_sg_cnt); 16488f26c9ffSDavid Dillow table_len = state.ndesc * sizeof (struct srp_direct_buf); 1649330179f2SBart Van Assche idb_len = sizeof(struct srp_indirect_buf) + table_len; 1650aef9ec39SRoland Dreier 1651aef9ec39SRoland Dreier fmt = SRP_DATA_DESC_INDIRECT; 16528f26c9ffSDavid Dillow len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); 1653c07d424dSDavid Dillow len += count * sizeof (struct srp_direct_buf); 1654f5358a17SRoland Dreier 1655c07d424dSDavid Dillow memcpy(indirect_hdr->desc_list, req->indirect_desc, 1656c07d424dSDavid Dillow count * sizeof (struct srp_direct_buf)); 165785507bccSRalph Campbell 165803f6fb93SBart Van Assche if (!target->global_mr) { 1659330179f2SBart Van Assche ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, 1660330179f2SBart Van Assche idb_len, &idb_rkey); 1661330179f2SBart Van Assche if (ret < 0) 1662330179f2SBart Van Assche return ret; 1663330179f2SBart Van Assche req->nmdesc++; 1664330179f2SBart Van Assche } else { 1665*a745f4f4SBart Van Assche idb_rkey = cpu_to_be32(target->global_mr->rkey); 1666330179f2SBart Van Assche } 1667330179f2SBart Van Assche 1668c07d424dSDavid Dillow indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 1669330179f2SBart Van Assche indirect_hdr->table_desc.key = idb_rkey; 16708f26c9ffSDavid Dillow indirect_hdr->table_desc.len = cpu_to_be32(table_len); 16718f26c9ffSDavid Dillow indirect_hdr->len = cpu_to_be32(state.total_len); 1672aef9ec39SRoland Dreier 1673aef9ec39SRoland Dreier if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1674c07d424dSDavid Dillow cmd->data_out_desc_cnt = count; 1675aef9ec39SRoland Dreier else 1676c07d424dSDavid Dillow cmd->data_in_desc_cnt = count; 1677c07d424dSDavid Dillow 1678c07d424dSDavid Dillow ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, 1679c07d424dSDavid Dillow DMA_TO_DEVICE); 1680aef9ec39SRoland Dreier 16818f26c9ffSDavid Dillow map_complete: 1682aef9ec39SRoland Dreier if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1683aef9ec39SRoland Dreier cmd->buf_fmt = fmt << 4; 1684aef9ec39SRoland Dreier else 1685aef9ec39SRoland Dreier cmd->buf_fmt = fmt; 1686aef9ec39SRoland Dreier 1687aef9ec39SRoland Dreier return len; 1688aef9ec39SRoland Dreier } 1689aef9ec39SRoland Dreier 169005a1d750SDavid Dillow /* 169176c75b25SBart Van Assche * Return an IU and possible credit to the free pool 169276c75b25SBart Van Assche */ 1693509c07bcSBart Van Assche static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, 169476c75b25SBart Van Assche enum srp_iu_type iu_type) 169576c75b25SBart Van Assche { 169676c75b25SBart Van Assche unsigned long flags; 169776c75b25SBart Van Assche 1698509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1699509c07bcSBart Van Assche list_add(&iu->list, &ch->free_tx); 170076c75b25SBart Van Assche if (iu_type != SRP_IU_RSP) 1701509c07bcSBart Van Assche ++ch->req_lim; 1702509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 170376c75b25SBart Van Assche } 170476c75b25SBart Van Assche 170576c75b25SBart Van Assche /* 1706509c07bcSBart Van Assche * Must be called with ch->lock held to protect req_lim and free_tx. 1707e9684678SBart Van Assche * If IU is not sent, it must be returned using srp_put_tx_iu(). 170805a1d750SDavid Dillow * 170905a1d750SDavid Dillow * Note: 171005a1d750SDavid Dillow * An upper limit for the number of allocated information units for each 171105a1d750SDavid Dillow * request type is: 171205a1d750SDavid Dillow * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues 171305a1d750SDavid Dillow * more than Scsi_Host.can_queue requests. 171405a1d750SDavid Dillow * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. 171505a1d750SDavid Dillow * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 171605a1d750SDavid Dillow * one unanswered SRP request to an initiator. 171705a1d750SDavid Dillow */ 1718509c07bcSBart Van Assche static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, 171905a1d750SDavid Dillow enum srp_iu_type iu_type) 172005a1d750SDavid Dillow { 1721509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 172205a1d750SDavid Dillow s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 172305a1d750SDavid Dillow struct srp_iu *iu; 172405a1d750SDavid Dillow 1725509c07bcSBart Van Assche srp_send_completion(ch->send_cq, ch); 172605a1d750SDavid Dillow 1727509c07bcSBart Van Assche if (list_empty(&ch->free_tx)) 172805a1d750SDavid Dillow return NULL; 172905a1d750SDavid Dillow 173005a1d750SDavid Dillow /* Initiator responses to target requests do not consume credits */ 173176c75b25SBart Van Assche if (iu_type != SRP_IU_RSP) { 1732509c07bcSBart Van Assche if (ch->req_lim <= rsv) { 173305a1d750SDavid Dillow ++target->zero_req_lim; 173405a1d750SDavid Dillow return NULL; 173505a1d750SDavid Dillow } 173605a1d750SDavid Dillow 1737509c07bcSBart Van Assche --ch->req_lim; 173876c75b25SBart Van Assche } 173976c75b25SBart Van Assche 1740509c07bcSBart Van Assche iu = list_first_entry(&ch->free_tx, struct srp_iu, list); 174176c75b25SBart Van Assche list_del(&iu->list); 174205a1d750SDavid Dillow return iu; 174305a1d750SDavid Dillow } 174405a1d750SDavid Dillow 1745509c07bcSBart Van Assche static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) 174605a1d750SDavid Dillow { 1747509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 174805a1d750SDavid Dillow struct ib_sge list; 174905a1d750SDavid Dillow struct ib_send_wr wr, *bad_wr; 175005a1d750SDavid Dillow 175105a1d750SDavid Dillow list.addr = iu->dma; 175205a1d750SDavid Dillow list.length = len; 17539af76271SDavid Dillow list.lkey = target->lkey; 175405a1d750SDavid Dillow 175505a1d750SDavid Dillow wr.next = NULL; 1756dcb4cb85SBart Van Assche wr.wr_id = (uintptr_t) iu; 175705a1d750SDavid Dillow wr.sg_list = &list; 175805a1d750SDavid Dillow wr.num_sge = 1; 175905a1d750SDavid Dillow wr.opcode = IB_WR_SEND; 176005a1d750SDavid Dillow wr.send_flags = IB_SEND_SIGNALED; 176105a1d750SDavid Dillow 1762509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 176305a1d750SDavid Dillow } 176405a1d750SDavid Dillow 1765509c07bcSBart Van Assche static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) 1766c996bb47SBart Van Assche { 1767509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1768c996bb47SBart Van Assche struct ib_recv_wr wr, *bad_wr; 1769dcb4cb85SBart Van Assche struct ib_sge list; 1770c996bb47SBart Van Assche 1771c996bb47SBart Van Assche list.addr = iu->dma; 1772c996bb47SBart Van Assche list.length = iu->size; 17739af76271SDavid Dillow list.lkey = target->lkey; 1774c996bb47SBart Van Assche 1775c996bb47SBart Van Assche wr.next = NULL; 1776dcb4cb85SBart Van Assche wr.wr_id = (uintptr_t) iu; 1777c996bb47SBart Van Assche wr.sg_list = &list; 1778c996bb47SBart Van Assche wr.num_sge = 1; 1779c996bb47SBart Van Assche 1780509c07bcSBart Van Assche return ib_post_recv(ch->qp, &wr, &bad_wr); 1781c996bb47SBart Van Assche } 1782c996bb47SBart Van Assche 1783509c07bcSBart Van Assche static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) 1784aef9ec39SRoland Dreier { 1785509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1786aef9ec39SRoland Dreier struct srp_request *req; 1787aef9ec39SRoland Dreier struct scsi_cmnd *scmnd; 1788aef9ec39SRoland Dreier unsigned long flags; 1789aef9ec39SRoland Dreier 1790aef9ec39SRoland Dreier if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 1791509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1792509c07bcSBart Van Assche ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1793509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 179494a9174cSBart Van Assche 1795509c07bcSBart Van Assche ch->tsk_mgmt_status = -1; 1796f8b6e31eSDavid Dillow if (be32_to_cpu(rsp->resp_data_len) >= 4) 1797509c07bcSBart Van Assche ch->tsk_mgmt_status = rsp->data[3]; 1798509c07bcSBart Van Assche complete(&ch->tsk_mgmt_done); 1799aef9ec39SRoland Dreier } else { 180077f2c1a4SBart Van Assche scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); 180177f2c1a4SBart Van Assche if (scmnd) { 180277f2c1a4SBart Van Assche req = (void *)scmnd->host_scribble; 180377f2c1a4SBart Van Assche scmnd = srp_claim_req(ch, req, NULL, scmnd); 180477f2c1a4SBart Van Assche } 180522032991SBart Van Assche if (!scmnd) { 18067aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 1807d92c0da7SBart Van Assche "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", 1808d92c0da7SBart Van Assche rsp->tag, ch - target->ch, ch->qp->qp_num); 180922032991SBart Van Assche 1810509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1811509c07bcSBart Van Assche ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1812509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 181322032991SBart Van Assche 181422032991SBart Van Assche return; 181522032991SBart Van Assche } 1816aef9ec39SRoland Dreier scmnd->result = rsp->status; 1817aef9ec39SRoland Dreier 1818aef9ec39SRoland Dreier if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 1819aef9ec39SRoland Dreier memcpy(scmnd->sense_buffer, rsp->data + 1820aef9ec39SRoland Dreier be32_to_cpu(rsp->resp_data_len), 1821aef9ec39SRoland Dreier min_t(int, be32_to_cpu(rsp->sense_data_len), 1822aef9ec39SRoland Dreier SCSI_SENSE_BUFFERSIZE)); 1823aef9ec39SRoland Dreier } 1824aef9ec39SRoland Dreier 1825e714531aSBart Van Assche if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) 1826bb350d1dSFUJITA Tomonori scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 1827e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER)) 1828e714531aSBart Van Assche scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt)); 1829e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) 1830e714531aSBart Van Assche scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 1831e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) 1832e714531aSBart Van Assche scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); 1833aef9ec39SRoland Dreier 1834509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 183522032991SBart Van Assche be32_to_cpu(rsp->req_lim_delta)); 183622032991SBart Van Assche 1837f8b6e31eSDavid Dillow scmnd->host_scribble = NULL; 1838aef9ec39SRoland Dreier scmnd->scsi_done(scmnd); 1839aef9ec39SRoland Dreier } 1840aef9ec39SRoland Dreier } 1841aef9ec39SRoland Dreier 1842509c07bcSBart Van Assche static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, 1843bb12588aSDavid Dillow void *rsp, int len) 1844bb12588aSDavid Dillow { 1845509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 184676c75b25SBart Van Assche struct ib_device *dev = target->srp_host->srp_dev->dev; 1847bb12588aSDavid Dillow unsigned long flags; 1848bb12588aSDavid Dillow struct srp_iu *iu; 184976c75b25SBart Van Assche int err; 1850bb12588aSDavid Dillow 1851509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1852509c07bcSBart Van Assche ch->req_lim += req_delta; 1853509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_RSP); 1854509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 185576c75b25SBart Van Assche 1856bb12588aSDavid Dillow if (!iu) { 1857bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1858bb12588aSDavid Dillow "no IU available to send response\n"); 185976c75b25SBart Van Assche return 1; 1860bb12588aSDavid Dillow } 1861bb12588aSDavid Dillow 1862bb12588aSDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 1863bb12588aSDavid Dillow memcpy(iu->buf, rsp, len); 1864bb12588aSDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 1865bb12588aSDavid Dillow 1866509c07bcSBart Van Assche err = srp_post_send(ch, iu, len); 186776c75b25SBart Van Assche if (err) { 1868bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1869bb12588aSDavid Dillow "unable to post response: %d\n", err); 1870509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_RSP); 187176c75b25SBart Van Assche } 1872bb12588aSDavid Dillow 1873bb12588aSDavid Dillow return err; 1874bb12588aSDavid Dillow } 1875bb12588aSDavid Dillow 1876509c07bcSBart Van Assche static void srp_process_cred_req(struct srp_rdma_ch *ch, 1877bb12588aSDavid Dillow struct srp_cred_req *req) 1878bb12588aSDavid Dillow { 1879bb12588aSDavid Dillow struct srp_cred_rsp rsp = { 1880bb12588aSDavid Dillow .opcode = SRP_CRED_RSP, 1881bb12588aSDavid Dillow .tag = req->tag, 1882bb12588aSDavid Dillow }; 1883bb12588aSDavid Dillow s32 delta = be32_to_cpu(req->req_lim_delta); 1884bb12588aSDavid Dillow 1885509c07bcSBart Van Assche if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 1886509c07bcSBart Van Assche shost_printk(KERN_ERR, ch->target->scsi_host, PFX 1887bb12588aSDavid Dillow "problems processing SRP_CRED_REQ\n"); 1888bb12588aSDavid Dillow } 1889bb12588aSDavid Dillow 1890509c07bcSBart Van Assche static void srp_process_aer_req(struct srp_rdma_ch *ch, 1891bb12588aSDavid Dillow struct srp_aer_req *req) 1892bb12588aSDavid Dillow { 1893509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1894bb12588aSDavid Dillow struct srp_aer_rsp rsp = { 1895bb12588aSDavid Dillow .opcode = SRP_AER_RSP, 1896bb12588aSDavid Dillow .tag = req->tag, 1897bb12588aSDavid Dillow }; 1898bb12588aSDavid Dillow s32 delta = be32_to_cpu(req->req_lim_delta); 1899bb12588aSDavid Dillow 1900bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1901985aa495SBart Van Assche "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); 1902bb12588aSDavid Dillow 1903509c07bcSBart Van Assche if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 1904bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1905bb12588aSDavid Dillow "problems processing SRP_AER_REQ\n"); 1906bb12588aSDavid Dillow } 1907bb12588aSDavid Dillow 1908509c07bcSBart Van Assche static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc) 1909aef9ec39SRoland Dreier { 1910509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1911dcb4cb85SBart Van Assche struct ib_device *dev = target->srp_host->srp_dev->dev; 1912737b94ebSRoland Dreier struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; 1913c996bb47SBart Van Assche int res; 1914aef9ec39SRoland Dreier u8 opcode; 1915aef9ec39SRoland Dreier 1916509c07bcSBart Van Assche ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, 191785507bccSRalph Campbell DMA_FROM_DEVICE); 1918aef9ec39SRoland Dreier 1919aef9ec39SRoland Dreier opcode = *(u8 *) iu->buf; 1920aef9ec39SRoland Dreier 1921aef9ec39SRoland Dreier if (0) { 19227aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 19237aa54bd7SDavid Dillow PFX "recv completion, opcode 0x%02x\n", opcode); 19247a700811SBart Van Assche print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, 19257a700811SBart Van Assche iu->buf, wc->byte_len, true); 1926aef9ec39SRoland Dreier } 1927aef9ec39SRoland Dreier 1928aef9ec39SRoland Dreier switch (opcode) { 1929aef9ec39SRoland Dreier case SRP_RSP: 1930509c07bcSBart Van Assche srp_process_rsp(ch, iu->buf); 1931aef9ec39SRoland Dreier break; 1932aef9ec39SRoland Dreier 1933bb12588aSDavid Dillow case SRP_CRED_REQ: 1934509c07bcSBart Van Assche srp_process_cred_req(ch, iu->buf); 1935bb12588aSDavid Dillow break; 1936bb12588aSDavid Dillow 1937bb12588aSDavid Dillow case SRP_AER_REQ: 1938509c07bcSBart Van Assche srp_process_aer_req(ch, iu->buf); 1939bb12588aSDavid Dillow break; 1940bb12588aSDavid Dillow 1941aef9ec39SRoland Dreier case SRP_T_LOGOUT: 1942aef9ec39SRoland Dreier /* XXX Handle target logout */ 19437aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 19447aa54bd7SDavid Dillow PFX "Got target logout request\n"); 1945aef9ec39SRoland Dreier break; 1946aef9ec39SRoland Dreier 1947aef9ec39SRoland Dreier default: 19487aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 19497aa54bd7SDavid Dillow PFX "Unhandled SRP opcode 0x%02x\n", opcode); 1950aef9ec39SRoland Dreier break; 1951aef9ec39SRoland Dreier } 1952aef9ec39SRoland Dreier 1953509c07bcSBart Van Assche ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, 195485507bccSRalph Campbell DMA_FROM_DEVICE); 1955c996bb47SBart Van Assche 1956509c07bcSBart Van Assche res = srp_post_recv(ch, iu); 1957c996bb47SBart Van Assche if (res != 0) 1958c996bb47SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 1959c996bb47SBart Van Assche PFX "Recv failed with error code %d\n", res); 1960aef9ec39SRoland Dreier } 1961aef9ec39SRoland Dreier 1962c1120f89SBart Van Assche /** 1963c1120f89SBart Van Assche * srp_tl_err_work() - handle a transport layer error 1964af24663bSBart Van Assche * @work: Work structure embedded in an SRP target port. 1965c1120f89SBart Van Assche * 1966c1120f89SBart Van Assche * Note: This function may get invoked before the rport has been created, 1967c1120f89SBart Van Assche * hence the target->rport test. 1968c1120f89SBart Van Assche */ 1969c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work) 1970c1120f89SBart Van Assche { 1971c1120f89SBart Van Assche struct srp_target_port *target; 1972c1120f89SBart Van Assche 1973c1120f89SBart Van Assche target = container_of(work, struct srp_target_port, tl_err_work); 1974c1120f89SBart Van Assche if (target->rport) 1975c1120f89SBart Van Assche srp_start_tl_fail_timers(target->rport); 1976c1120f89SBart Van Assche } 1977c1120f89SBart Van Assche 19785cfb1782SBart Van Assche static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, 19797dad6b2eSBart Van Assche bool send_err, struct srp_rdma_ch *ch) 1980948d1e88SBart Van Assche { 19817dad6b2eSBart Van Assche struct srp_target_port *target = ch->target; 19827dad6b2eSBart Van Assche 19837dad6b2eSBart Van Assche if (wr_id == SRP_LAST_WR_ID) { 19847dad6b2eSBart Van Assche complete(&ch->done); 19857dad6b2eSBart Van Assche return; 19867dad6b2eSBart Van Assche } 19877dad6b2eSBart Van Assche 1988c014c8cdSBart Van Assche if (ch->connected && !target->qp_in_error) { 19895cfb1782SBart Van Assche if (wr_id & LOCAL_INV_WR_ID_MASK) { 19905cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 199157363d98SSagi Grimberg "LOCAL_INV failed with status %s (%d)\n", 199257363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status); 19935cfb1782SBart Van Assche } else if (wr_id & FAST_REG_WR_ID_MASK) { 19945cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 199557363d98SSagi Grimberg "FAST_REG_MR failed status %s (%d)\n", 199657363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status); 19975cfb1782SBart Van Assche } else { 19985cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 199957363d98SSagi Grimberg PFX "failed %s status %s (%d) for iu %p\n", 20005cfb1782SBart Van Assche send_err ? "send" : "receive", 200157363d98SSagi Grimberg ib_wc_status_msg(wc_status), wc_status, 200257363d98SSagi Grimberg (void *)(uintptr_t)wr_id); 20035cfb1782SBart Van Assche } 2004c1120f89SBart Van Assche queue_work(system_long_wq, &target->tl_err_work); 20054f0af697SBart Van Assche } 2006948d1e88SBart Van Assche target->qp_in_error = true; 2007948d1e88SBart Van Assche } 2008948d1e88SBart Van Assche 2009509c07bcSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr) 2010aef9ec39SRoland Dreier { 2011509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 2012aef9ec39SRoland Dreier struct ib_wc wc; 2013aef9ec39SRoland Dreier 2014aef9ec39SRoland Dreier ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 2015aef9ec39SRoland Dreier while (ib_poll_cq(cq, 1, &wc) > 0) { 2016948d1e88SBart Van Assche if (likely(wc.status == IB_WC_SUCCESS)) { 2017509c07bcSBart Van Assche srp_handle_recv(ch, &wc); 2018948d1e88SBart Van Assche } else { 20197dad6b2eSBart Van Assche srp_handle_qp_err(wc.wr_id, wc.status, false, ch); 2020aef9ec39SRoland Dreier } 20219c03dc9fSBart Van Assche } 20229c03dc9fSBart Van Assche } 20239c03dc9fSBart Van Assche 2024509c07bcSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *ch_ptr) 20259c03dc9fSBart Van Assche { 2026509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 20279c03dc9fSBart Van Assche struct ib_wc wc; 2028dcb4cb85SBart Van Assche struct srp_iu *iu; 20299c03dc9fSBart Van Assche 20309c03dc9fSBart Van Assche while (ib_poll_cq(cq, 1, &wc) > 0) { 2031948d1e88SBart Van Assche if (likely(wc.status == IB_WC_SUCCESS)) { 2032737b94ebSRoland Dreier iu = (struct srp_iu *) (uintptr_t) wc.wr_id; 2033509c07bcSBart Van Assche list_add(&iu->list, &ch->free_tx); 2034948d1e88SBart Van Assche } else { 20357dad6b2eSBart Van Assche srp_handle_qp_err(wc.wr_id, wc.status, true, ch); 2036948d1e88SBart Van Assche } 2037aef9ec39SRoland Dreier } 2038aef9ec39SRoland Dreier } 2039aef9ec39SRoland Dreier 204076c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) 2041aef9ec39SRoland Dreier { 204276c75b25SBart Van Assche struct srp_target_port *target = host_to_target(shost); 2043a95cadb9SBart Van Assche struct srp_rport *rport = target->rport; 2044509c07bcSBart Van Assche struct srp_rdma_ch *ch; 2045aef9ec39SRoland Dreier struct srp_request *req; 2046aef9ec39SRoland Dreier struct srp_iu *iu; 2047aef9ec39SRoland Dreier struct srp_cmd *cmd; 204885507bccSRalph Campbell struct ib_device *dev; 204976c75b25SBart Van Assche unsigned long flags; 205077f2c1a4SBart Van Assche u32 tag; 205177f2c1a4SBart Van Assche u16 idx; 2052d1b4289eSBart Van Assche int len, ret; 2053a95cadb9SBart Van Assche const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; 2054a95cadb9SBart Van Assche 2055a95cadb9SBart Van Assche /* 2056a95cadb9SBart Van Assche * The SCSI EH thread is the only context from which srp_queuecommand() 2057a95cadb9SBart Van Assche * can get invoked for blocked devices (SDEV_BLOCK / 2058a95cadb9SBart Van Assche * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by 2059a95cadb9SBart Van Assche * locking the rport mutex if invoked from inside the SCSI EH. 2060a95cadb9SBart Van Assche */ 2061a95cadb9SBart Van Assche if (in_scsi_eh) 2062a95cadb9SBart Van Assche mutex_lock(&rport->mutex); 2063aef9ec39SRoland Dreier 2064d1b4289eSBart Van Assche scmnd->result = srp_chkready(target->rport); 2065d1b4289eSBart Van Assche if (unlikely(scmnd->result)) 2066d1b4289eSBart Van Assche goto err; 20672ce19e72SBart Van Assche 206877f2c1a4SBart Van Assche WARN_ON_ONCE(scmnd->request->tag < 0); 206977f2c1a4SBart Van Assche tag = blk_mq_unique_tag(scmnd->request); 2070d92c0da7SBart Van Assche ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; 207177f2c1a4SBart Van Assche idx = blk_mq_unique_tag_to_tag(tag); 207277f2c1a4SBart Van Assche WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", 207377f2c1a4SBart Van Assche dev_name(&shost->shost_gendev), tag, idx, 207477f2c1a4SBart Van Assche target->req_ring_size); 2075509c07bcSBart Van Assche 2076509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 2077509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_CMD); 2078509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 2079aef9ec39SRoland Dreier 208077f2c1a4SBart Van Assche if (!iu) 208177f2c1a4SBart Van Assche goto err; 208277f2c1a4SBart Van Assche 208377f2c1a4SBart Van Assche req = &ch->req_ring[idx]; 208405321937SGreg Kroah-Hartman dev = target->srp_host->srp_dev->dev; 208549248644SDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, 208685507bccSRalph Campbell DMA_TO_DEVICE); 2087aef9ec39SRoland Dreier 2088f8b6e31eSDavid Dillow scmnd->host_scribble = (void *) req; 2089aef9ec39SRoland Dreier 2090aef9ec39SRoland Dreier cmd = iu->buf; 2091aef9ec39SRoland Dreier memset(cmd, 0, sizeof *cmd); 2092aef9ec39SRoland Dreier 2093aef9ec39SRoland Dreier cmd->opcode = SRP_CMD; 2094985aa495SBart Van Assche int_to_scsilun(scmnd->device->lun, &cmd->lun); 209577f2c1a4SBart Van Assche cmd->tag = tag; 2096aef9ec39SRoland Dreier memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 2097aef9ec39SRoland Dreier 2098aef9ec39SRoland Dreier req->scmnd = scmnd; 2099aef9ec39SRoland Dreier req->cmd = iu; 2100aef9ec39SRoland Dreier 2101509c07bcSBart Van Assche len = srp_map_data(scmnd, ch, req); 2102aef9ec39SRoland Dreier if (len < 0) { 21037aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 2104d1b4289eSBart Van Assche PFX "Failed to map data (%d)\n", len); 2105d1b4289eSBart Van Assche /* 2106d1b4289eSBart Van Assche * If we ran out of memory descriptors (-ENOMEM) because an 2107d1b4289eSBart Van Assche * application is queuing many requests with more than 210852ede08fSBart Van Assche * max_pages_per_mr sg-list elements, tell the SCSI mid-layer 2109d1b4289eSBart Van Assche * to reduce queue depth temporarily. 2110d1b4289eSBart Van Assche */ 2111d1b4289eSBart Van Assche scmnd->result = len == -ENOMEM ? 2112d1b4289eSBart Van Assche DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16; 211376c75b25SBart Van Assche goto err_iu; 2114aef9ec39SRoland Dreier } 2115aef9ec39SRoland Dreier 211649248644SDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, 211785507bccSRalph Campbell DMA_TO_DEVICE); 2118aef9ec39SRoland Dreier 2119509c07bcSBart Van Assche if (srp_post_send(ch, iu, len)) { 21207aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 2121aef9ec39SRoland Dreier goto err_unmap; 2122aef9ec39SRoland Dreier } 2123aef9ec39SRoland Dreier 2124d1b4289eSBart Van Assche ret = 0; 2125d1b4289eSBart Van Assche 2126a95cadb9SBart Van Assche unlock_rport: 2127a95cadb9SBart Van Assche if (in_scsi_eh) 2128a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2129a95cadb9SBart Van Assche 2130d1b4289eSBart Van Assche return ret; 2131aef9ec39SRoland Dreier 2132aef9ec39SRoland Dreier err_unmap: 2133509c07bcSBart Van Assche srp_unmap_data(scmnd, ch, req); 2134aef9ec39SRoland Dreier 213576c75b25SBart Van Assche err_iu: 2136509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_CMD); 213776c75b25SBart Van Assche 2138024ca901SBart Van Assche /* 2139024ca901SBart Van Assche * Avoid that the loops that iterate over the request ring can 2140024ca901SBart Van Assche * encounter a dangling SCSI command pointer. 2141024ca901SBart Van Assche */ 2142024ca901SBart Van Assche req->scmnd = NULL; 2143024ca901SBart Van Assche 2144d1b4289eSBart Van Assche err: 2145d1b4289eSBart Van Assche if (scmnd->result) { 2146d1b4289eSBart Van Assche scmnd->scsi_done(scmnd); 2147d1b4289eSBart Van Assche ret = 0; 2148d1b4289eSBart Van Assche } else { 2149d1b4289eSBart Van Assche ret = SCSI_MLQUEUE_HOST_BUSY; 2150d1b4289eSBart Van Assche } 2151a95cadb9SBart Van Assche 2152d1b4289eSBart Van Assche goto unlock_rport; 2153aef9ec39SRoland Dreier } 2154aef9ec39SRoland Dreier 21554d73f95fSBart Van Assche /* 21564d73f95fSBart Van Assche * Note: the resources allocated in this function are freed in 2157509c07bcSBart Van Assche * srp_free_ch_ib(). 21584d73f95fSBart Van Assche */ 2159509c07bcSBart Van Assche static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) 2160aef9ec39SRoland Dreier { 2161509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2162aef9ec39SRoland Dreier int i; 2163aef9ec39SRoland Dreier 2164509c07bcSBart Van Assche ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), 21654d73f95fSBart Van Assche GFP_KERNEL); 2166509c07bcSBart Van Assche if (!ch->rx_ring) 21674d73f95fSBart Van Assche goto err_no_ring; 2168509c07bcSBart Van Assche ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), 21694d73f95fSBart Van Assche GFP_KERNEL); 2170509c07bcSBart Van Assche if (!ch->tx_ring) 21714d73f95fSBart Van Assche goto err_no_ring; 21724d73f95fSBart Van Assche 21734d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2174509c07bcSBart Van Assche ch->rx_ring[i] = srp_alloc_iu(target->srp_host, 2175509c07bcSBart Van Assche ch->max_ti_iu_len, 2176aef9ec39SRoland Dreier GFP_KERNEL, DMA_FROM_DEVICE); 2177509c07bcSBart Van Assche if (!ch->rx_ring[i]) 2178aef9ec39SRoland Dreier goto err; 2179aef9ec39SRoland Dreier } 2180aef9ec39SRoland Dreier 21814d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2182509c07bcSBart Van Assche ch->tx_ring[i] = srp_alloc_iu(target->srp_host, 218349248644SDavid Dillow target->max_iu_len, 2184aef9ec39SRoland Dreier GFP_KERNEL, DMA_TO_DEVICE); 2185509c07bcSBart Van Assche if (!ch->tx_ring[i]) 2186aef9ec39SRoland Dreier goto err; 2187dcb4cb85SBart Van Assche 2188509c07bcSBart Van Assche list_add(&ch->tx_ring[i]->list, &ch->free_tx); 2189aef9ec39SRoland Dreier } 2190aef9ec39SRoland Dreier 2191aef9ec39SRoland Dreier return 0; 2192aef9ec39SRoland Dreier 2193aef9ec39SRoland Dreier err: 21944d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2195509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->rx_ring[i]); 2196509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->tx_ring[i]); 2197aef9ec39SRoland Dreier } 2198aef9ec39SRoland Dreier 21994d73f95fSBart Van Assche 22004d73f95fSBart Van Assche err_no_ring: 2201509c07bcSBart Van Assche kfree(ch->tx_ring); 2202509c07bcSBart Van Assche ch->tx_ring = NULL; 2203509c07bcSBart Van Assche kfree(ch->rx_ring); 2204509c07bcSBart Van Assche ch->rx_ring = NULL; 2205aef9ec39SRoland Dreier 2206aef9ec39SRoland Dreier return -ENOMEM; 2207aef9ec39SRoland Dreier } 2208aef9ec39SRoland Dreier 2209c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) 2210c9b03c1aSBart Van Assche { 2211c9b03c1aSBart Van Assche uint64_t T_tr_ns, max_compl_time_ms; 2212c9b03c1aSBart Van Assche uint32_t rq_tmo_jiffies; 2213c9b03c1aSBart Van Assche 2214c9b03c1aSBart Van Assche /* 2215c9b03c1aSBart Van Assche * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, 2216c9b03c1aSBart Van Assche * table 91), both the QP timeout and the retry count have to be set 2217c9b03c1aSBart Van Assche * for RC QP's during the RTR to RTS transition. 2218c9b03c1aSBart Van Assche */ 2219c9b03c1aSBart Van Assche WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) != 2220c9b03c1aSBart Van Assche (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)); 2221c9b03c1aSBart Van Assche 2222c9b03c1aSBart Van Assche /* 2223c9b03c1aSBart Van Assche * Set target->rq_tmo_jiffies to one second more than the largest time 2224c9b03c1aSBart Van Assche * it can take before an error completion is generated. See also 2225c9b03c1aSBart Van Assche * C9-140..142 in the IBTA spec for more information about how to 2226c9b03c1aSBart Van Assche * convert the QP Local ACK Timeout value to nanoseconds. 2227c9b03c1aSBart Van Assche */ 2228c9b03c1aSBart Van Assche T_tr_ns = 4096 * (1ULL << qp_attr->timeout); 2229c9b03c1aSBart Van Assche max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; 2230c9b03c1aSBart Van Assche do_div(max_compl_time_ms, NSEC_PER_MSEC); 2231c9b03c1aSBart Van Assche rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000); 2232c9b03c1aSBart Van Assche 2233c9b03c1aSBart Van Assche return rq_tmo_jiffies; 2234c9b03c1aSBart Van Assche } 2235c9b03c1aSBart Van Assche 2236961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 2237e6300cbdSBart Van Assche const struct srp_login_rsp *lrsp, 2238509c07bcSBart Van Assche struct srp_rdma_ch *ch) 2239961e0be8SDavid Dillow { 2240509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2241961e0be8SDavid Dillow struct ib_qp_attr *qp_attr = NULL; 2242961e0be8SDavid Dillow int attr_mask = 0; 2243961e0be8SDavid Dillow int ret; 2244961e0be8SDavid Dillow int i; 2245961e0be8SDavid Dillow 2246961e0be8SDavid Dillow if (lrsp->opcode == SRP_LOGIN_RSP) { 2247509c07bcSBart Van Assche ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); 2248509c07bcSBart Van Assche ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); 2249961e0be8SDavid Dillow 2250961e0be8SDavid Dillow /* 2251961e0be8SDavid Dillow * Reserve credits for task management so we don't 2252961e0be8SDavid Dillow * bounce requests back to the SCSI mid-layer. 2253961e0be8SDavid Dillow */ 2254961e0be8SDavid Dillow target->scsi_host->can_queue 2255509c07bcSBart Van Assche = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, 2256961e0be8SDavid Dillow target->scsi_host->can_queue); 22574d73f95fSBart Van Assche target->scsi_host->cmd_per_lun 22584d73f95fSBart Van Assche = min_t(int, target->scsi_host->can_queue, 22594d73f95fSBart Van Assche target->scsi_host->cmd_per_lun); 2260961e0be8SDavid Dillow } else { 2261961e0be8SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 2262961e0be8SDavid Dillow PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); 2263961e0be8SDavid Dillow ret = -ECONNRESET; 2264961e0be8SDavid Dillow goto error; 2265961e0be8SDavid Dillow } 2266961e0be8SDavid Dillow 2267509c07bcSBart Van Assche if (!ch->rx_ring) { 2268509c07bcSBart Van Assche ret = srp_alloc_iu_bufs(ch); 2269961e0be8SDavid Dillow if (ret) 2270961e0be8SDavid Dillow goto error; 2271961e0be8SDavid Dillow } 2272961e0be8SDavid Dillow 2273961e0be8SDavid Dillow ret = -ENOMEM; 2274961e0be8SDavid Dillow qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 2275961e0be8SDavid Dillow if (!qp_attr) 2276961e0be8SDavid Dillow goto error; 2277961e0be8SDavid Dillow 2278961e0be8SDavid Dillow qp_attr->qp_state = IB_QPS_RTR; 2279961e0be8SDavid Dillow ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2280961e0be8SDavid Dillow if (ret) 2281961e0be8SDavid Dillow goto error_free; 2282961e0be8SDavid Dillow 2283509c07bcSBart Van Assche ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2284961e0be8SDavid Dillow if (ret) 2285961e0be8SDavid Dillow goto error_free; 2286961e0be8SDavid Dillow 22874d73f95fSBart Van Assche for (i = 0; i < target->queue_size; i++) { 2288509c07bcSBart Van Assche struct srp_iu *iu = ch->rx_ring[i]; 2289509c07bcSBart Van Assche 2290509c07bcSBart Van Assche ret = srp_post_recv(ch, iu); 2291961e0be8SDavid Dillow if (ret) 2292961e0be8SDavid Dillow goto error_free; 2293961e0be8SDavid Dillow } 2294961e0be8SDavid Dillow 2295961e0be8SDavid Dillow qp_attr->qp_state = IB_QPS_RTS; 2296961e0be8SDavid Dillow ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2297961e0be8SDavid Dillow if (ret) 2298961e0be8SDavid Dillow goto error_free; 2299961e0be8SDavid Dillow 2300c9b03c1aSBart Van Assche target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); 2301c9b03c1aSBart Van Assche 2302509c07bcSBart Van Assche ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2303961e0be8SDavid Dillow if (ret) 2304961e0be8SDavid Dillow goto error_free; 2305961e0be8SDavid Dillow 2306961e0be8SDavid Dillow ret = ib_send_cm_rtu(cm_id, NULL, 0); 2307961e0be8SDavid Dillow 2308961e0be8SDavid Dillow error_free: 2309961e0be8SDavid Dillow kfree(qp_attr); 2310961e0be8SDavid Dillow 2311961e0be8SDavid Dillow error: 2312509c07bcSBart Van Assche ch->status = ret; 2313961e0be8SDavid Dillow } 2314961e0be8SDavid Dillow 2315aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 2316aef9ec39SRoland Dreier struct ib_cm_event *event, 2317509c07bcSBart Van Assche struct srp_rdma_ch *ch) 2318aef9ec39SRoland Dreier { 2319509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 23207aa54bd7SDavid Dillow struct Scsi_Host *shost = target->scsi_host; 2321aef9ec39SRoland Dreier struct ib_class_port_info *cpi; 2322aef9ec39SRoland Dreier int opcode; 2323aef9ec39SRoland Dreier 2324aef9ec39SRoland Dreier switch (event->param.rej_rcvd.reason) { 2325aef9ec39SRoland Dreier case IB_CM_REJ_PORT_CM_REDIRECT: 2326aef9ec39SRoland Dreier cpi = event->param.rej_rcvd.ari; 2327509c07bcSBart Van Assche ch->path.dlid = cpi->redirect_lid; 2328509c07bcSBart Van Assche ch->path.pkey = cpi->redirect_pkey; 2329aef9ec39SRoland Dreier cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 2330509c07bcSBart Van Assche memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16); 2331aef9ec39SRoland Dreier 2332509c07bcSBart Van Assche ch->status = ch->path.dlid ? 2333aef9ec39SRoland Dreier SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 2334aef9ec39SRoland Dreier break; 2335aef9ec39SRoland Dreier 2336aef9ec39SRoland Dreier case IB_CM_REJ_PORT_REDIRECT: 23375d7cbfd6SRoland Dreier if (srp_target_is_topspin(target)) { 2338aef9ec39SRoland Dreier /* 2339aef9ec39SRoland Dreier * Topspin/Cisco SRP gateways incorrectly send 2340aef9ec39SRoland Dreier * reject reason code 25 when they mean 24 2341aef9ec39SRoland Dreier * (port redirect). 2342aef9ec39SRoland Dreier */ 2343509c07bcSBart Van Assche memcpy(ch->path.dgid.raw, 2344aef9ec39SRoland Dreier event->param.rej_rcvd.ari, 16); 2345aef9ec39SRoland Dreier 23467aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, shost, 23477aa54bd7SDavid Dillow PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 2348509c07bcSBart Van Assche be64_to_cpu(ch->path.dgid.global.subnet_prefix), 2349509c07bcSBart Van Assche be64_to_cpu(ch->path.dgid.global.interface_id)); 2350aef9ec39SRoland Dreier 2351509c07bcSBart Van Assche ch->status = SRP_PORT_REDIRECT; 2352aef9ec39SRoland Dreier } else { 23537aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23547aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 2355509c07bcSBart Van Assche ch->status = -ECONNRESET; 2356aef9ec39SRoland Dreier } 2357aef9ec39SRoland Dreier break; 2358aef9ec39SRoland Dreier 2359aef9ec39SRoland Dreier case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 23607aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23617aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 2362509c07bcSBart Van Assche ch->status = -ECONNRESET; 2363aef9ec39SRoland Dreier break; 2364aef9ec39SRoland Dreier 2365aef9ec39SRoland Dreier case IB_CM_REJ_CONSUMER_DEFINED: 2366aef9ec39SRoland Dreier opcode = *(u8 *) event->private_data; 2367aef9ec39SRoland Dreier if (opcode == SRP_LOGIN_REJ) { 2368aef9ec39SRoland Dreier struct srp_login_rej *rej = event->private_data; 2369aef9ec39SRoland Dreier u32 reason = be32_to_cpu(rej->reason); 2370aef9ec39SRoland Dreier 2371aef9ec39SRoland Dreier if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 23727aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23737aa54bd7SDavid Dillow PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 2374aef9ec39SRoland Dreier else 2375e7ffde01SBart Van Assche shost_printk(KERN_WARNING, shost, PFX 2376e7ffde01SBart Van Assche "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", 2377747fe000SBart Van Assche target->sgid.raw, 2378747fe000SBart Van Assche target->orig_dgid.raw, reason); 2379aef9ec39SRoland Dreier } else 23807aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 23817aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 2382aef9ec39SRoland Dreier " opcode 0x%02x\n", opcode); 2383509c07bcSBart Van Assche ch->status = -ECONNRESET; 2384aef9ec39SRoland Dreier break; 2385aef9ec39SRoland Dreier 23869fe4bcf4SDavid Dillow case IB_CM_REJ_STALE_CONN: 23879fe4bcf4SDavid Dillow shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 2388509c07bcSBart Van Assche ch->status = SRP_STALE_CONN; 23899fe4bcf4SDavid Dillow break; 23909fe4bcf4SDavid Dillow 2391aef9ec39SRoland Dreier default: 23927aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 2393aef9ec39SRoland Dreier event->param.rej_rcvd.reason); 2394509c07bcSBart Van Assche ch->status = -ECONNRESET; 2395aef9ec39SRoland Dreier } 2396aef9ec39SRoland Dreier } 2397aef9ec39SRoland Dreier 2398aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 2399aef9ec39SRoland Dreier { 2400509c07bcSBart Van Assche struct srp_rdma_ch *ch = cm_id->context; 2401509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2402aef9ec39SRoland Dreier int comp = 0; 2403aef9ec39SRoland Dreier 2404aef9ec39SRoland Dreier switch (event->event) { 2405aef9ec39SRoland Dreier case IB_CM_REQ_ERROR: 24067aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 24077aa54bd7SDavid Dillow PFX "Sending CM REQ failed\n"); 2408aef9ec39SRoland Dreier comp = 1; 2409509c07bcSBart Van Assche ch->status = -ECONNRESET; 2410aef9ec39SRoland Dreier break; 2411aef9ec39SRoland Dreier 2412aef9ec39SRoland Dreier case IB_CM_REP_RECEIVED: 2413aef9ec39SRoland Dreier comp = 1; 2414509c07bcSBart Van Assche srp_cm_rep_handler(cm_id, event->private_data, ch); 2415aef9ec39SRoland Dreier break; 2416aef9ec39SRoland Dreier 2417aef9ec39SRoland Dreier case IB_CM_REJ_RECEIVED: 24187aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 2419aef9ec39SRoland Dreier comp = 1; 2420aef9ec39SRoland Dreier 2421509c07bcSBart Van Assche srp_cm_rej_handler(cm_id, event, ch); 2422aef9ec39SRoland Dreier break; 2423aef9ec39SRoland Dreier 2424b7ac4ab4SIshai Rabinovitz case IB_CM_DREQ_RECEIVED: 24257aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 24267aa54bd7SDavid Dillow PFX "DREQ received - connection closed\n"); 2427c014c8cdSBart Van Assche ch->connected = false; 2428b7ac4ab4SIshai Rabinovitz if (ib_send_cm_drep(cm_id, NULL, 0)) 24297aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 24307aa54bd7SDavid Dillow PFX "Sending CM DREP failed\n"); 2431c1120f89SBart Van Assche queue_work(system_long_wq, &target->tl_err_work); 2432aef9ec39SRoland Dreier break; 2433aef9ec39SRoland Dreier 2434aef9ec39SRoland Dreier case IB_CM_TIMEWAIT_EXIT: 24357aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 24367aa54bd7SDavid Dillow PFX "connection closed\n"); 2437ac72d766SBart Van Assche comp = 1; 2438aef9ec39SRoland Dreier 2439509c07bcSBart Van Assche ch->status = 0; 2440aef9ec39SRoland Dreier break; 2441aef9ec39SRoland Dreier 2442b7ac4ab4SIshai Rabinovitz case IB_CM_MRA_RECEIVED: 2443b7ac4ab4SIshai Rabinovitz case IB_CM_DREQ_ERROR: 2444b7ac4ab4SIshai Rabinovitz case IB_CM_DREP_RECEIVED: 2445b7ac4ab4SIshai Rabinovitz break; 2446b7ac4ab4SIshai Rabinovitz 2447aef9ec39SRoland Dreier default: 24487aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 24497aa54bd7SDavid Dillow PFX "Unhandled CM event %d\n", event->event); 2450aef9ec39SRoland Dreier break; 2451aef9ec39SRoland Dreier } 2452aef9ec39SRoland Dreier 2453aef9ec39SRoland Dreier if (comp) 2454509c07bcSBart Van Assche complete(&ch->done); 2455aef9ec39SRoland Dreier 2456aef9ec39SRoland Dreier return 0; 2457aef9ec39SRoland Dreier } 2458aef9ec39SRoland Dreier 245971444b97SJack Wang /** 246071444b97SJack Wang * srp_change_queue_depth - setting device queue depth 246171444b97SJack Wang * @sdev: scsi device struct 246271444b97SJack Wang * @qdepth: requested queue depth 246371444b97SJack Wang * 246471444b97SJack Wang * Returns queue depth. 246571444b97SJack Wang */ 246671444b97SJack Wang static int 2467db5ed4dfSChristoph Hellwig srp_change_queue_depth(struct scsi_device *sdev, int qdepth) 246871444b97SJack Wang { 246971444b97SJack Wang if (!sdev->tagged_supported) 24701e6f2416SChristoph Hellwig qdepth = 1; 2471db5ed4dfSChristoph Hellwig return scsi_change_queue_depth(sdev, qdepth); 247271444b97SJack Wang } 247371444b97SJack Wang 2474985aa495SBart Van Assche static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, 2475985aa495SBart Van Assche u8 func) 2476aef9ec39SRoland Dreier { 2477509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2478a95cadb9SBart Van Assche struct srp_rport *rport = target->rport; 247919081f31SDavid Dillow struct ib_device *dev = target->srp_host->srp_dev->dev; 2480aef9ec39SRoland Dreier struct srp_iu *iu; 2481aef9ec39SRoland Dreier struct srp_tsk_mgmt *tsk_mgmt; 2482aef9ec39SRoland Dreier 2483c014c8cdSBart Van Assche if (!ch->connected || target->qp_in_error) 24843780d1f0SBart Van Assche return -1; 24853780d1f0SBart Van Assche 2486509c07bcSBart Van Assche init_completion(&ch->tsk_mgmt_done); 2487aef9ec39SRoland Dreier 2488a95cadb9SBart Van Assche /* 2489509c07bcSBart Van Assche * Lock the rport mutex to avoid that srp_create_ch_ib() is 2490a95cadb9SBart Van Assche * invoked while a task management function is being sent. 2491a95cadb9SBart Van Assche */ 2492a95cadb9SBart Van Assche mutex_lock(&rport->mutex); 2493509c07bcSBart Van Assche spin_lock_irq(&ch->lock); 2494509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); 2495509c07bcSBart Van Assche spin_unlock_irq(&ch->lock); 249676c75b25SBart Van Assche 2497a95cadb9SBart Van Assche if (!iu) { 2498a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2499a95cadb9SBart Van Assche 250076c75b25SBart Van Assche return -1; 2501a95cadb9SBart Van Assche } 2502aef9ec39SRoland Dreier 250319081f31SDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 250419081f31SDavid Dillow DMA_TO_DEVICE); 2505aef9ec39SRoland Dreier tsk_mgmt = iu->buf; 2506aef9ec39SRoland Dreier memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 2507aef9ec39SRoland Dreier 2508aef9ec39SRoland Dreier tsk_mgmt->opcode = SRP_TSK_MGMT; 2509985aa495SBart Van Assche int_to_scsilun(lun, &tsk_mgmt->lun); 2510f8b6e31eSDavid Dillow tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; 2511aef9ec39SRoland Dreier tsk_mgmt->tsk_mgmt_func = func; 2512f8b6e31eSDavid Dillow tsk_mgmt->task_tag = req_tag; 2513aef9ec39SRoland Dreier 251419081f31SDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 251519081f31SDavid Dillow DMA_TO_DEVICE); 2516509c07bcSBart Van Assche if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { 2517509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); 2518a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2519a95cadb9SBart Van Assche 252076c75b25SBart Van Assche return -1; 252176c75b25SBart Van Assche } 2522a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2523d945e1dfSRoland Dreier 2524509c07bcSBart Van Assche if (!wait_for_completion_timeout(&ch->tsk_mgmt_done, 2525aef9ec39SRoland Dreier msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 2526d945e1dfSRoland Dreier return -1; 2527aef9ec39SRoland Dreier 2528d945e1dfSRoland Dreier return 0; 2529d945e1dfSRoland Dreier } 2530d945e1dfSRoland Dreier 2531aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd) 2532aef9ec39SRoland Dreier { 2533d945e1dfSRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2534f8b6e31eSDavid Dillow struct srp_request *req = (struct srp_request *) scmnd->host_scribble; 253577f2c1a4SBart Van Assche u32 tag; 2536d92c0da7SBart Van Assche u16 ch_idx; 2537509c07bcSBart Van Assche struct srp_rdma_ch *ch; 2538086f44f5SBart Van Assche int ret; 2539d945e1dfSRoland Dreier 25407aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 2541aef9ec39SRoland Dreier 2542d92c0da7SBart Van Assche if (!req) 254399b6697aSBart Van Assche return SUCCESS; 254477f2c1a4SBart Van Assche tag = blk_mq_unique_tag(scmnd->request); 2545d92c0da7SBart Van Assche ch_idx = blk_mq_unique_tag_to_hwq(tag); 2546d92c0da7SBart Van Assche if (WARN_ON_ONCE(ch_idx >= target->ch_count)) 2547d92c0da7SBart Van Assche return SUCCESS; 2548d92c0da7SBart Van Assche ch = &target->ch[ch_idx]; 2549d92c0da7SBart Van Assche if (!srp_claim_req(ch, req, NULL, scmnd)) 2550d92c0da7SBart Van Assche return SUCCESS; 2551d92c0da7SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 2552d92c0da7SBart Van Assche "Sending SRP abort for tag %#x\n", tag); 255377f2c1a4SBart Van Assche if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, 255480d5e8a2SBart Van Assche SRP_TSK_ABORT_TASK) == 0) 2555086f44f5SBart Van Assche ret = SUCCESS; 2556ed9b2264SBart Van Assche else if (target->rport->state == SRP_RPORT_LOST) 255799e1c139SBart Van Assche ret = FAST_IO_FAIL; 2558086f44f5SBart Van Assche else 2559086f44f5SBart Van Assche ret = FAILED; 2560509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 0); 2561d945e1dfSRoland Dreier scmnd->result = DID_ABORT << 16; 2562d8536670SBart Van Assche scmnd->scsi_done(scmnd); 2563d945e1dfSRoland Dreier 2564086f44f5SBart Van Assche return ret; 2565aef9ec39SRoland Dreier } 2566aef9ec39SRoland Dreier 2567aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd) 2568aef9ec39SRoland Dreier { 2569d945e1dfSRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2570d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 2571536ae14eSBart Van Assche int i; 2572d945e1dfSRoland Dreier 25737aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 2574aef9ec39SRoland Dreier 2575d92c0da7SBart Van Assche ch = &target->ch[0]; 2576509c07bcSBart Van Assche if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, 2577f8b6e31eSDavid Dillow SRP_TSK_LUN_RESET)) 2578d945e1dfSRoland Dreier return FAILED; 2579509c07bcSBart Van Assche if (ch->tsk_mgmt_status) 2580d945e1dfSRoland Dreier return FAILED; 2581d945e1dfSRoland Dreier 2582d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 2583d92c0da7SBart Van Assche ch = &target->ch[i]; 25844d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 2585509c07bcSBart Van Assche struct srp_request *req = &ch->req_ring[i]; 2586509c07bcSBart Van Assche 2587509c07bcSBart Van Assche srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); 2588536ae14eSBart Van Assche } 2589d92c0da7SBart Van Assche } 2590d945e1dfSRoland Dreier 2591d945e1dfSRoland Dreier return SUCCESS; 2592aef9ec39SRoland Dreier } 2593aef9ec39SRoland Dreier 2594aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd) 2595aef9ec39SRoland Dreier { 2596aef9ec39SRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2597aef9ec39SRoland Dreier 25987aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); 2599aef9ec39SRoland Dreier 2600ed9b2264SBart Van Assche return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; 2601aef9ec39SRoland Dreier } 2602aef9ec39SRoland Dreier 2603c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev) 2604c9b03c1aSBart Van Assche { 2605c9b03c1aSBart Van Assche struct Scsi_Host *shost = sdev->host; 2606c9b03c1aSBart Van Assche struct srp_target_port *target = host_to_target(shost); 2607c9b03c1aSBart Van Assche struct request_queue *q = sdev->request_queue; 2608c9b03c1aSBart Van Assche unsigned long timeout; 2609c9b03c1aSBart Van Assche 2610c9b03c1aSBart Van Assche if (sdev->type == TYPE_DISK) { 2611c9b03c1aSBart Van Assche timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); 2612c9b03c1aSBart Van Assche blk_queue_rq_timeout(q, timeout); 2613c9b03c1aSBart Van Assche } 2614c9b03c1aSBart Van Assche 2615c9b03c1aSBart Van Assche return 0; 2616c9b03c1aSBart Van Assche } 2617c9b03c1aSBart Van Assche 2618ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, 2619ee959b00STony Jones char *buf) 26206ecb0c84SRoland Dreier { 2621ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26226ecb0c84SRoland Dreier 262345c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); 26246ecb0c84SRoland Dreier } 26256ecb0c84SRoland Dreier 2626ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, 2627ee959b00STony Jones char *buf) 26286ecb0c84SRoland Dreier { 2629ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26306ecb0c84SRoland Dreier 263145c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); 26326ecb0c84SRoland Dreier } 26336ecb0c84SRoland Dreier 2634ee959b00STony Jones static ssize_t show_service_id(struct device *dev, 2635ee959b00STony Jones struct device_attribute *attr, char *buf) 26366ecb0c84SRoland Dreier { 2637ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26386ecb0c84SRoland Dreier 263945c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id)); 26406ecb0c84SRoland Dreier } 26416ecb0c84SRoland Dreier 2642ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, 2643ee959b00STony Jones char *buf) 26446ecb0c84SRoland Dreier { 2645ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26466ecb0c84SRoland Dreier 2647747fe000SBart Van Assche return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey)); 26486ecb0c84SRoland Dreier } 26496ecb0c84SRoland Dreier 2650848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, 2651848b3082SBart Van Assche char *buf) 2652848b3082SBart Van Assche { 2653848b3082SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2654848b3082SBart Van Assche 2655747fe000SBart Van Assche return sprintf(buf, "%pI6\n", target->sgid.raw); 2656848b3082SBart Van Assche } 2657848b3082SBart Van Assche 2658ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 2659ee959b00STony Jones char *buf) 26606ecb0c84SRoland Dreier { 2661ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2662d92c0da7SBart Van Assche struct srp_rdma_ch *ch = &target->ch[0]; 26636ecb0c84SRoland Dreier 2664509c07bcSBart Van Assche return sprintf(buf, "%pI6\n", ch->path.dgid.raw); 26656ecb0c84SRoland Dreier } 26666ecb0c84SRoland Dreier 2667ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev, 2668ee959b00STony Jones struct device_attribute *attr, char *buf) 26693633b3d0SIshai Rabinovitz { 2670ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26713633b3d0SIshai Rabinovitz 2672747fe000SBart Van Assche return sprintf(buf, "%pI6\n", target->orig_dgid.raw); 26733633b3d0SIshai Rabinovitz } 26743633b3d0SIshai Rabinovitz 267589de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev, 267689de7486SBart Van Assche struct device_attribute *attr, char *buf) 267789de7486SBart Van Assche { 267889de7486SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2679d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 2680d92c0da7SBart Van Assche int i, req_lim = INT_MAX; 268189de7486SBart Van Assche 2682d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 2683d92c0da7SBart Van Assche ch = &target->ch[i]; 2684d92c0da7SBart Van Assche req_lim = min(req_lim, ch->req_lim); 2685d92c0da7SBart Van Assche } 2686d92c0da7SBart Van Assche return sprintf(buf, "%d\n", req_lim); 268789de7486SBart Van Assche } 268889de7486SBart Van Assche 2689ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev, 2690ee959b00STony Jones struct device_attribute *attr, char *buf) 26916bfa24faSRoland Dreier { 2692ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26936bfa24faSRoland Dreier 26946bfa24faSRoland Dreier return sprintf(buf, "%d\n", target->zero_req_lim); 26956bfa24faSRoland Dreier } 26966bfa24faSRoland Dreier 2697ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev, 2698ee959b00STony Jones struct device_attribute *attr, char *buf) 2699ded7f1a1SIshai Rabinovitz { 2700ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2701ded7f1a1SIshai Rabinovitz 2702ded7f1a1SIshai Rabinovitz return sprintf(buf, "%d\n", target->srp_host->port); 2703ded7f1a1SIshai Rabinovitz } 2704ded7f1a1SIshai Rabinovitz 2705ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev, 2706ee959b00STony Jones struct device_attribute *attr, char *buf) 2707ded7f1a1SIshai Rabinovitz { 2708ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2709ded7f1a1SIshai Rabinovitz 271005321937SGreg Kroah-Hartman return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 2711ded7f1a1SIshai Rabinovitz } 2712ded7f1a1SIshai Rabinovitz 2713d92c0da7SBart Van Assche static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr, 2714d92c0da7SBart Van Assche char *buf) 2715d92c0da7SBart Van Assche { 2716d92c0da7SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2717d92c0da7SBart Van Assche 2718d92c0da7SBart Van Assche return sprintf(buf, "%d\n", target->ch_count); 2719d92c0da7SBart Van Assche } 2720d92c0da7SBart Van Assche 27214b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev, 27224b5e5f41SBart Van Assche struct device_attribute *attr, char *buf) 27234b5e5f41SBart Van Assche { 27244b5e5f41SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 27254b5e5f41SBart Van Assche 27264b5e5f41SBart Van Assche return sprintf(buf, "%d\n", target->comp_vector); 27274b5e5f41SBart Van Assche } 27284b5e5f41SBart Van Assche 27297bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev, 27307bb312e4SVu Pham struct device_attribute *attr, char *buf) 27317bb312e4SVu Pham { 27327bb312e4SVu Pham struct srp_target_port *target = host_to_target(class_to_shost(dev)); 27337bb312e4SVu Pham 27347bb312e4SVu Pham return sprintf(buf, "%d\n", target->tl_retry_count); 27357bb312e4SVu Pham } 27367bb312e4SVu Pham 273749248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev, 273849248644SDavid Dillow struct device_attribute *attr, char *buf) 273949248644SDavid Dillow { 274049248644SDavid Dillow struct srp_target_port *target = host_to_target(class_to_shost(dev)); 274149248644SDavid Dillow 274249248644SDavid Dillow return sprintf(buf, "%u\n", target->cmd_sg_cnt); 274349248644SDavid Dillow } 274449248644SDavid Dillow 2745c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev, 2746c07d424dSDavid Dillow struct device_attribute *attr, char *buf) 2747c07d424dSDavid Dillow { 2748c07d424dSDavid Dillow struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2749c07d424dSDavid Dillow 2750c07d424dSDavid Dillow return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); 2751c07d424dSDavid Dillow } 2752c07d424dSDavid Dillow 2753ee959b00STony Jones static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 2754ee959b00STony Jones static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 2755ee959b00STony Jones static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 2756ee959b00STony Jones static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 2757848b3082SBart Van Assche static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL); 2758ee959b00STony Jones static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 2759ee959b00STony Jones static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); 276089de7486SBart Van Assche static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); 2761ee959b00STony Jones static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 2762ee959b00STony Jones static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 2763ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 2764d92c0da7SBart Van Assche static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL); 27654b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); 27667bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); 276749248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); 2768c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); 27696ecb0c84SRoland Dreier 2770ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = { 2771ee959b00STony Jones &dev_attr_id_ext, 2772ee959b00STony Jones &dev_attr_ioc_guid, 2773ee959b00STony Jones &dev_attr_service_id, 2774ee959b00STony Jones &dev_attr_pkey, 2775848b3082SBart Van Assche &dev_attr_sgid, 2776ee959b00STony Jones &dev_attr_dgid, 2777ee959b00STony Jones &dev_attr_orig_dgid, 277889de7486SBart Van Assche &dev_attr_req_lim, 2779ee959b00STony Jones &dev_attr_zero_req_lim, 2780ee959b00STony Jones &dev_attr_local_ib_port, 2781ee959b00STony Jones &dev_attr_local_ib_device, 2782d92c0da7SBart Van Assche &dev_attr_ch_count, 27834b5e5f41SBart Van Assche &dev_attr_comp_vector, 27847bb312e4SVu Pham &dev_attr_tl_retry_count, 278549248644SDavid Dillow &dev_attr_cmd_sg_entries, 2786c07d424dSDavid Dillow &dev_attr_allow_ext_sg, 27876ecb0c84SRoland Dreier NULL 27886ecb0c84SRoland Dreier }; 27896ecb0c84SRoland Dreier 2790aef9ec39SRoland Dreier static struct scsi_host_template srp_template = { 2791aef9ec39SRoland Dreier .module = THIS_MODULE, 2792b7f008fdSRoland Dreier .name = "InfiniBand SRP initiator", 2793b7f008fdSRoland Dreier .proc_name = DRV_NAME, 2794c9b03c1aSBart Van Assche .slave_configure = srp_slave_configure, 2795aef9ec39SRoland Dreier .info = srp_target_info, 2796aef9ec39SRoland Dreier .queuecommand = srp_queuecommand, 279771444b97SJack Wang .change_queue_depth = srp_change_queue_depth, 2798aef9ec39SRoland Dreier .eh_abort_handler = srp_abort, 2799aef9ec39SRoland Dreier .eh_device_reset_handler = srp_reset_device, 2800aef9ec39SRoland Dreier .eh_host_reset_handler = srp_reset_host, 28012742c1daSBart Van Assche .skip_settle_delay = true, 280249248644SDavid Dillow .sg_tablesize = SRP_DEF_SG_TABLESIZE, 28034d73f95fSBart Van Assche .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, 2804aef9ec39SRoland Dreier .this_id = -1, 28054d73f95fSBart Van Assche .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, 28066ecb0c84SRoland Dreier .use_clustering = ENABLE_CLUSTERING, 280777f2c1a4SBart Van Assche .shost_attrs = srp_host_attrs, 2808c40ecc12SChristoph Hellwig .track_queue_depth = 1, 2809aef9ec39SRoland Dreier }; 2810aef9ec39SRoland Dreier 281134aa654eSBart Van Assche static int srp_sdev_count(struct Scsi_Host *host) 281234aa654eSBart Van Assche { 281334aa654eSBart Van Assche struct scsi_device *sdev; 281434aa654eSBart Van Assche int c = 0; 281534aa654eSBart Van Assche 281634aa654eSBart Van Assche shost_for_each_device(sdev, host) 281734aa654eSBart Van Assche c++; 281834aa654eSBart Van Assche 281934aa654eSBart Van Assche return c; 282034aa654eSBart Van Assche } 282134aa654eSBart Van Assche 2822bc44bd1dSBart Van Assche /* 2823bc44bd1dSBart Van Assche * Return values: 2824bc44bd1dSBart Van Assche * < 0 upon failure. Caller is responsible for SRP target port cleanup. 2825bc44bd1dSBart Van Assche * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port 2826bc44bd1dSBart Van Assche * removal has been scheduled. 2827bc44bd1dSBart Van Assche * 0 and target->state != SRP_TARGET_REMOVED upon success. 2828bc44bd1dSBart Van Assche */ 2829aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 2830aef9ec39SRoland Dreier { 28313236822bSFUJITA Tomonori struct srp_rport_identifiers ids; 28323236822bSFUJITA Tomonori struct srp_rport *rport; 28333236822bSFUJITA Tomonori 283434aa654eSBart Van Assche target->state = SRP_TARGET_SCANNING; 2835aef9ec39SRoland Dreier sprintf(target->target_name, "SRP.T10:%016llX", 283645c37cadSBart Van Assche be64_to_cpu(target->id_ext)); 2837aef9ec39SRoland Dreier 283805321937SGreg Kroah-Hartman if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) 2839aef9ec39SRoland Dreier return -ENODEV; 2840aef9ec39SRoland Dreier 28413236822bSFUJITA Tomonori memcpy(ids.port_id, &target->id_ext, 8); 28423236822bSFUJITA Tomonori memcpy(ids.port_id + 8, &target->ioc_guid, 8); 2843aebd5e47SFUJITA Tomonori ids.roles = SRP_RPORT_ROLE_TARGET; 28443236822bSFUJITA Tomonori rport = srp_rport_add(target->scsi_host, &ids); 28453236822bSFUJITA Tomonori if (IS_ERR(rport)) { 28463236822bSFUJITA Tomonori scsi_remove_host(target->scsi_host); 28473236822bSFUJITA Tomonori return PTR_ERR(rport); 28483236822bSFUJITA Tomonori } 28493236822bSFUJITA Tomonori 2850dc1bdbd9SBart Van Assche rport->lld_data = target; 28519dd69a60SBart Van Assche target->rport = rport; 2852dc1bdbd9SBart Van Assche 2853b3589fd4SMatthew Wilcox spin_lock(&host->target_lock); 2854aef9ec39SRoland Dreier list_add_tail(&target->list, &host->target_list); 2855b3589fd4SMatthew Wilcox spin_unlock(&host->target_lock); 2856aef9ec39SRoland Dreier 2857aef9ec39SRoland Dreier scsi_scan_target(&target->scsi_host->shost_gendev, 28581962a4a1SMatthew Wilcox 0, target->scsi_id, SCAN_WILD_CARD, 0); 2859aef9ec39SRoland Dreier 2860c014c8cdSBart Van Assche if (srp_connected_ch(target) < target->ch_count || 2861c014c8cdSBart Van Assche target->qp_in_error) { 286234aa654eSBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 286334aa654eSBart Van Assche PFX "SCSI scan failed - removing SCSI host\n"); 286434aa654eSBart Van Assche srp_queue_remove_work(target); 286534aa654eSBart Van Assche goto out; 286634aa654eSBart Van Assche } 286734aa654eSBart Van Assche 286834aa654eSBart Van Assche pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n", 286934aa654eSBart Van Assche dev_name(&target->scsi_host->shost_gendev), 287034aa654eSBart Van Assche srp_sdev_count(target->scsi_host)); 287134aa654eSBart Van Assche 287234aa654eSBart Van Assche spin_lock_irq(&target->lock); 287334aa654eSBart Van Assche if (target->state == SRP_TARGET_SCANNING) 287434aa654eSBart Van Assche target->state = SRP_TARGET_LIVE; 287534aa654eSBart Van Assche spin_unlock_irq(&target->lock); 287634aa654eSBart Van Assche 287734aa654eSBart Van Assche out: 2878aef9ec39SRoland Dreier return 0; 2879aef9ec39SRoland Dreier } 2880aef9ec39SRoland Dreier 2881ee959b00STony Jones static void srp_release_dev(struct device *dev) 2882aef9ec39SRoland Dreier { 2883aef9ec39SRoland Dreier struct srp_host *host = 2884ee959b00STony Jones container_of(dev, struct srp_host, dev); 2885aef9ec39SRoland Dreier 2886aef9ec39SRoland Dreier complete(&host->released); 2887aef9ec39SRoland Dreier } 2888aef9ec39SRoland Dreier 2889aef9ec39SRoland Dreier static struct class srp_class = { 2890aef9ec39SRoland Dreier .name = "infiniband_srp", 2891ee959b00STony Jones .dev_release = srp_release_dev 2892aef9ec39SRoland Dreier }; 2893aef9ec39SRoland Dreier 289496fc248aSBart Van Assche /** 289596fc248aSBart Van Assche * srp_conn_unique() - check whether the connection to a target is unique 2896af24663bSBart Van Assche * @host: SRP host. 2897af24663bSBart Van Assche * @target: SRP target port. 289896fc248aSBart Van Assche */ 289996fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host, 290096fc248aSBart Van Assche struct srp_target_port *target) 290196fc248aSBart Van Assche { 290296fc248aSBart Van Assche struct srp_target_port *t; 290396fc248aSBart Van Assche bool ret = false; 290496fc248aSBart Van Assche 290596fc248aSBart Van Assche if (target->state == SRP_TARGET_REMOVED) 290696fc248aSBart Van Assche goto out; 290796fc248aSBart Van Assche 290896fc248aSBart Van Assche ret = true; 290996fc248aSBart Van Assche 291096fc248aSBart Van Assche spin_lock(&host->target_lock); 291196fc248aSBart Van Assche list_for_each_entry(t, &host->target_list, list) { 291296fc248aSBart Van Assche if (t != target && 291396fc248aSBart Van Assche target->id_ext == t->id_ext && 291496fc248aSBart Van Assche target->ioc_guid == t->ioc_guid && 291596fc248aSBart Van Assche target->initiator_ext == t->initiator_ext) { 291696fc248aSBart Van Assche ret = false; 291796fc248aSBart Van Assche break; 291896fc248aSBart Van Assche } 291996fc248aSBart Van Assche } 292096fc248aSBart Van Assche spin_unlock(&host->target_lock); 292196fc248aSBart Van Assche 292296fc248aSBart Van Assche out: 292396fc248aSBart Van Assche return ret; 292496fc248aSBart Van Assche } 292596fc248aSBart Van Assche 2926aef9ec39SRoland Dreier /* 2927aef9ec39SRoland Dreier * Target ports are added by writing 2928aef9ec39SRoland Dreier * 2929aef9ec39SRoland Dreier * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 2930aef9ec39SRoland Dreier * pkey=<P_Key>,service_id=<service ID> 2931aef9ec39SRoland Dreier * 2932aef9ec39SRoland Dreier * to the add_target sysfs attribute. 2933aef9ec39SRoland Dreier */ 2934aef9ec39SRoland Dreier enum { 2935aef9ec39SRoland Dreier SRP_OPT_ERR = 0, 2936aef9ec39SRoland Dreier SRP_OPT_ID_EXT = 1 << 0, 2937aef9ec39SRoland Dreier SRP_OPT_IOC_GUID = 1 << 1, 2938aef9ec39SRoland Dreier SRP_OPT_DGID = 1 << 2, 2939aef9ec39SRoland Dreier SRP_OPT_PKEY = 1 << 3, 2940aef9ec39SRoland Dreier SRP_OPT_SERVICE_ID = 1 << 4, 2941aef9ec39SRoland Dreier SRP_OPT_MAX_SECT = 1 << 5, 294252fb2b50SVu Pham SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 29430c0450dbSRamachandra K SRP_OPT_IO_CLASS = 1 << 7, 294401cb9bcbSIshai Rabinovitz SRP_OPT_INITIATOR_EXT = 1 << 8, 294549248644SDavid Dillow SRP_OPT_CMD_SG_ENTRIES = 1 << 9, 2946c07d424dSDavid Dillow SRP_OPT_ALLOW_EXT_SG = 1 << 10, 2947c07d424dSDavid Dillow SRP_OPT_SG_TABLESIZE = 1 << 11, 29484b5e5f41SBart Van Assche SRP_OPT_COMP_VECTOR = 1 << 12, 29497bb312e4SVu Pham SRP_OPT_TL_RETRY_COUNT = 1 << 13, 29504d73f95fSBart Van Assche SRP_OPT_QUEUE_SIZE = 1 << 14, 2951aef9ec39SRoland Dreier SRP_OPT_ALL = (SRP_OPT_ID_EXT | 2952aef9ec39SRoland Dreier SRP_OPT_IOC_GUID | 2953aef9ec39SRoland Dreier SRP_OPT_DGID | 2954aef9ec39SRoland Dreier SRP_OPT_PKEY | 2955aef9ec39SRoland Dreier SRP_OPT_SERVICE_ID), 2956aef9ec39SRoland Dreier }; 2957aef9ec39SRoland Dreier 2958a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = { 2959aef9ec39SRoland Dreier { SRP_OPT_ID_EXT, "id_ext=%s" }, 2960aef9ec39SRoland Dreier { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 2961aef9ec39SRoland Dreier { SRP_OPT_DGID, "dgid=%s" }, 2962aef9ec39SRoland Dreier { SRP_OPT_PKEY, "pkey=%x" }, 2963aef9ec39SRoland Dreier { SRP_OPT_SERVICE_ID, "service_id=%s" }, 2964aef9ec39SRoland Dreier { SRP_OPT_MAX_SECT, "max_sect=%d" }, 296552fb2b50SVu Pham { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 29660c0450dbSRamachandra K { SRP_OPT_IO_CLASS, "io_class=%x" }, 296701cb9bcbSIshai Rabinovitz { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 296849248644SDavid Dillow { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, 2969c07d424dSDavid Dillow { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, 2970c07d424dSDavid Dillow { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, 29714b5e5f41SBart Van Assche { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, 29727bb312e4SVu Pham { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, 29734d73f95fSBart Van Assche { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, 2974aef9ec39SRoland Dreier { SRP_OPT_ERR, NULL } 2975aef9ec39SRoland Dreier }; 2976aef9ec39SRoland Dreier 2977aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target) 2978aef9ec39SRoland Dreier { 2979aef9ec39SRoland Dreier char *options, *sep_opt; 2980aef9ec39SRoland Dreier char *p; 2981aef9ec39SRoland Dreier char dgid[3]; 2982aef9ec39SRoland Dreier substring_t args[MAX_OPT_ARGS]; 2983aef9ec39SRoland Dreier int opt_mask = 0; 2984aef9ec39SRoland Dreier int token; 2985aef9ec39SRoland Dreier int ret = -EINVAL; 2986aef9ec39SRoland Dreier int i; 2987aef9ec39SRoland Dreier 2988aef9ec39SRoland Dreier options = kstrdup(buf, GFP_KERNEL); 2989aef9ec39SRoland Dreier if (!options) 2990aef9ec39SRoland Dreier return -ENOMEM; 2991aef9ec39SRoland Dreier 2992aef9ec39SRoland Dreier sep_opt = options; 29937dcf9c19SSagi Grimberg while ((p = strsep(&sep_opt, ",\n")) != NULL) { 2994aef9ec39SRoland Dreier if (!*p) 2995aef9ec39SRoland Dreier continue; 2996aef9ec39SRoland Dreier 2997aef9ec39SRoland Dreier token = match_token(p, srp_opt_tokens, args); 2998aef9ec39SRoland Dreier opt_mask |= token; 2999aef9ec39SRoland Dreier 3000aef9ec39SRoland Dreier switch (token) { 3001aef9ec39SRoland Dreier case SRP_OPT_ID_EXT: 3002aef9ec39SRoland Dreier p = match_strdup(args); 3003a20f3a6dSIshai Rabinovitz if (!p) { 3004a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3005a20f3a6dSIshai Rabinovitz goto out; 3006a20f3a6dSIshai Rabinovitz } 3007aef9ec39SRoland Dreier target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 3008aef9ec39SRoland Dreier kfree(p); 3009aef9ec39SRoland Dreier break; 3010aef9ec39SRoland Dreier 3011aef9ec39SRoland Dreier case SRP_OPT_IOC_GUID: 3012aef9ec39SRoland Dreier p = match_strdup(args); 3013a20f3a6dSIshai Rabinovitz if (!p) { 3014a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3015a20f3a6dSIshai Rabinovitz goto out; 3016a20f3a6dSIshai Rabinovitz } 3017aef9ec39SRoland Dreier target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 3018aef9ec39SRoland Dreier kfree(p); 3019aef9ec39SRoland Dreier break; 3020aef9ec39SRoland Dreier 3021aef9ec39SRoland Dreier case SRP_OPT_DGID: 3022aef9ec39SRoland Dreier p = match_strdup(args); 3023a20f3a6dSIshai Rabinovitz if (!p) { 3024a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3025a20f3a6dSIshai Rabinovitz goto out; 3026a20f3a6dSIshai Rabinovitz } 3027aef9ec39SRoland Dreier if (strlen(p) != 32) { 3028e0bda7d8SBart Van Assche pr_warn("bad dest GID parameter '%s'\n", p); 3029ce1823f0SRoland Dreier kfree(p); 3030aef9ec39SRoland Dreier goto out; 3031aef9ec39SRoland Dreier } 3032aef9ec39SRoland Dreier 3033aef9ec39SRoland Dreier for (i = 0; i < 16; ++i) { 3034747fe000SBart Van Assche strlcpy(dgid, p + i * 2, sizeof(dgid)); 3035747fe000SBart Van Assche if (sscanf(dgid, "%hhx", 3036747fe000SBart Van Assche &target->orig_dgid.raw[i]) < 1) { 3037747fe000SBart Van Assche ret = -EINVAL; 3038747fe000SBart Van Assche kfree(p); 3039747fe000SBart Van Assche goto out; 3040747fe000SBart Van Assche } 3041aef9ec39SRoland Dreier } 3042bf17c1c7SRoland Dreier kfree(p); 3043aef9ec39SRoland Dreier break; 3044aef9ec39SRoland Dreier 3045aef9ec39SRoland Dreier case SRP_OPT_PKEY: 3046aef9ec39SRoland Dreier if (match_hex(args, &token)) { 3047e0bda7d8SBart Van Assche pr_warn("bad P_Key parameter '%s'\n", p); 3048aef9ec39SRoland Dreier goto out; 3049aef9ec39SRoland Dreier } 3050747fe000SBart Van Assche target->pkey = cpu_to_be16(token); 3051aef9ec39SRoland Dreier break; 3052aef9ec39SRoland Dreier 3053aef9ec39SRoland Dreier case SRP_OPT_SERVICE_ID: 3054aef9ec39SRoland Dreier p = match_strdup(args); 3055a20f3a6dSIshai Rabinovitz if (!p) { 3056a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3057a20f3a6dSIshai Rabinovitz goto out; 3058a20f3a6dSIshai Rabinovitz } 3059aef9ec39SRoland Dreier target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 3060aef9ec39SRoland Dreier kfree(p); 3061aef9ec39SRoland Dreier break; 3062aef9ec39SRoland Dreier 3063aef9ec39SRoland Dreier case SRP_OPT_MAX_SECT: 3064aef9ec39SRoland Dreier if (match_int(args, &token)) { 3065e0bda7d8SBart Van Assche pr_warn("bad max sect parameter '%s'\n", p); 3066aef9ec39SRoland Dreier goto out; 3067aef9ec39SRoland Dreier } 3068aef9ec39SRoland Dreier target->scsi_host->max_sectors = token; 3069aef9ec39SRoland Dreier break; 3070aef9ec39SRoland Dreier 30714d73f95fSBart Van Assche case SRP_OPT_QUEUE_SIZE: 30724d73f95fSBart Van Assche if (match_int(args, &token) || token < 1) { 30734d73f95fSBart Van Assche pr_warn("bad queue_size parameter '%s'\n", p); 30744d73f95fSBart Van Assche goto out; 30754d73f95fSBart Van Assche } 30764d73f95fSBart Van Assche target->scsi_host->can_queue = token; 30774d73f95fSBart Van Assche target->queue_size = token + SRP_RSP_SQ_SIZE + 30784d73f95fSBart Van Assche SRP_TSK_MGMT_SQ_SIZE; 30794d73f95fSBart Van Assche if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 30804d73f95fSBart Van Assche target->scsi_host->cmd_per_lun = token; 30814d73f95fSBart Van Assche break; 30824d73f95fSBart Van Assche 308352fb2b50SVu Pham case SRP_OPT_MAX_CMD_PER_LUN: 30844d73f95fSBart Van Assche if (match_int(args, &token) || token < 1) { 3085e0bda7d8SBart Van Assche pr_warn("bad max cmd_per_lun parameter '%s'\n", 3086e0bda7d8SBart Van Assche p); 308752fb2b50SVu Pham goto out; 308852fb2b50SVu Pham } 30894d73f95fSBart Van Assche target->scsi_host->cmd_per_lun = token; 309052fb2b50SVu Pham break; 309152fb2b50SVu Pham 30920c0450dbSRamachandra K case SRP_OPT_IO_CLASS: 30930c0450dbSRamachandra K if (match_hex(args, &token)) { 3094e0bda7d8SBart Van Assche pr_warn("bad IO class parameter '%s'\n", p); 30950c0450dbSRamachandra K goto out; 30960c0450dbSRamachandra K } 30970c0450dbSRamachandra K if (token != SRP_REV10_IB_IO_CLASS && 30980c0450dbSRamachandra K token != SRP_REV16A_IB_IO_CLASS) { 3099e0bda7d8SBart Van Assche pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", 3100e0bda7d8SBart Van Assche token, SRP_REV10_IB_IO_CLASS, 3101e0bda7d8SBart Van Assche SRP_REV16A_IB_IO_CLASS); 31020c0450dbSRamachandra K goto out; 31030c0450dbSRamachandra K } 31040c0450dbSRamachandra K target->io_class = token; 31050c0450dbSRamachandra K break; 31060c0450dbSRamachandra K 310701cb9bcbSIshai Rabinovitz case SRP_OPT_INITIATOR_EXT: 310801cb9bcbSIshai Rabinovitz p = match_strdup(args); 3109a20f3a6dSIshai Rabinovitz if (!p) { 3110a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3111a20f3a6dSIshai Rabinovitz goto out; 3112a20f3a6dSIshai Rabinovitz } 311301cb9bcbSIshai Rabinovitz target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 311401cb9bcbSIshai Rabinovitz kfree(p); 311501cb9bcbSIshai Rabinovitz break; 311601cb9bcbSIshai Rabinovitz 311749248644SDavid Dillow case SRP_OPT_CMD_SG_ENTRIES: 311849248644SDavid Dillow if (match_int(args, &token) || token < 1 || token > 255) { 3119e0bda7d8SBart Van Assche pr_warn("bad max cmd_sg_entries parameter '%s'\n", 3120e0bda7d8SBart Van Assche p); 312149248644SDavid Dillow goto out; 312249248644SDavid Dillow } 312349248644SDavid Dillow target->cmd_sg_cnt = token; 312449248644SDavid Dillow break; 312549248644SDavid Dillow 3126c07d424dSDavid Dillow case SRP_OPT_ALLOW_EXT_SG: 3127c07d424dSDavid Dillow if (match_int(args, &token)) { 3128e0bda7d8SBart Van Assche pr_warn("bad allow_ext_sg parameter '%s'\n", p); 3129c07d424dSDavid Dillow goto out; 3130c07d424dSDavid Dillow } 3131c07d424dSDavid Dillow target->allow_ext_sg = !!token; 3132c07d424dSDavid Dillow break; 3133c07d424dSDavid Dillow 3134c07d424dSDavid Dillow case SRP_OPT_SG_TABLESIZE: 3135c07d424dSDavid Dillow if (match_int(args, &token) || token < 1 || 3136c07d424dSDavid Dillow token > SCSI_MAX_SG_CHAIN_SEGMENTS) { 3137e0bda7d8SBart Van Assche pr_warn("bad max sg_tablesize parameter '%s'\n", 3138e0bda7d8SBart Van Assche p); 3139c07d424dSDavid Dillow goto out; 3140c07d424dSDavid Dillow } 3141c07d424dSDavid Dillow target->sg_tablesize = token; 3142c07d424dSDavid Dillow break; 3143c07d424dSDavid Dillow 31444b5e5f41SBart Van Assche case SRP_OPT_COMP_VECTOR: 31454b5e5f41SBart Van Assche if (match_int(args, &token) || token < 0) { 31464b5e5f41SBart Van Assche pr_warn("bad comp_vector parameter '%s'\n", p); 31474b5e5f41SBart Van Assche goto out; 31484b5e5f41SBart Van Assche } 31494b5e5f41SBart Van Assche target->comp_vector = token; 31504b5e5f41SBart Van Assche break; 31514b5e5f41SBart Van Assche 31527bb312e4SVu Pham case SRP_OPT_TL_RETRY_COUNT: 31537bb312e4SVu Pham if (match_int(args, &token) || token < 2 || token > 7) { 31547bb312e4SVu Pham pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", 31557bb312e4SVu Pham p); 31567bb312e4SVu Pham goto out; 31577bb312e4SVu Pham } 31587bb312e4SVu Pham target->tl_retry_count = token; 31597bb312e4SVu Pham break; 31607bb312e4SVu Pham 3161aef9ec39SRoland Dreier default: 3162e0bda7d8SBart Van Assche pr_warn("unknown parameter or missing value '%s' in target creation request\n", 3163e0bda7d8SBart Van Assche p); 3164aef9ec39SRoland Dreier goto out; 3165aef9ec39SRoland Dreier } 3166aef9ec39SRoland Dreier } 3167aef9ec39SRoland Dreier 3168aef9ec39SRoland Dreier if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 3169aef9ec39SRoland Dreier ret = 0; 3170aef9ec39SRoland Dreier else 3171aef9ec39SRoland Dreier for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 3172aef9ec39SRoland Dreier if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 3173aef9ec39SRoland Dreier !(srp_opt_tokens[i].token & opt_mask)) 3174e0bda7d8SBart Van Assche pr_warn("target creation request is missing parameter '%s'\n", 3175aef9ec39SRoland Dreier srp_opt_tokens[i].pattern); 3176aef9ec39SRoland Dreier 31774d73f95fSBart Van Assche if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue 31784d73f95fSBart Van Assche && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 31794d73f95fSBart Van Assche pr_warn("cmd_per_lun = %d > queue_size = %d\n", 31804d73f95fSBart Van Assche target->scsi_host->cmd_per_lun, 31814d73f95fSBart Van Assche target->scsi_host->can_queue); 31824d73f95fSBart Van Assche 3183aef9ec39SRoland Dreier out: 3184aef9ec39SRoland Dreier kfree(options); 3185aef9ec39SRoland Dreier return ret; 3186aef9ec39SRoland Dreier } 3187aef9ec39SRoland Dreier 3188ee959b00STony Jones static ssize_t srp_create_target(struct device *dev, 3189ee959b00STony Jones struct device_attribute *attr, 3190aef9ec39SRoland Dreier const char *buf, size_t count) 3191aef9ec39SRoland Dreier { 3192aef9ec39SRoland Dreier struct srp_host *host = 3193ee959b00STony Jones container_of(dev, struct srp_host, dev); 3194aef9ec39SRoland Dreier struct Scsi_Host *target_host; 3195aef9ec39SRoland Dreier struct srp_target_port *target; 3196509c07bcSBart Van Assche struct srp_rdma_ch *ch; 3197d1b4289eSBart Van Assche struct srp_device *srp_dev = host->srp_dev; 3198d1b4289eSBart Van Assche struct ib_device *ibdev = srp_dev->dev; 3199d92c0da7SBart Van Assche int ret, node_idx, node, cpu, i; 3200d92c0da7SBart Van Assche bool multich = false; 3201aef9ec39SRoland Dreier 3202aef9ec39SRoland Dreier target_host = scsi_host_alloc(&srp_template, 3203aef9ec39SRoland Dreier sizeof (struct srp_target_port)); 3204aef9ec39SRoland Dreier if (!target_host) 3205aef9ec39SRoland Dreier return -ENOMEM; 3206aef9ec39SRoland Dreier 32073236822bSFUJITA Tomonori target_host->transportt = ib_srp_transport_template; 3208fd1b6c4aSBart Van Assche target_host->max_channel = 0; 3209fd1b6c4aSBart Van Assche target_host->max_id = 1; 3210985aa495SBart Van Assche target_host->max_lun = -1LL; 32113c8edf0eSArne Redlich target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 32125f068992SRoland Dreier 3213aef9ec39SRoland Dreier target = host_to_target(target_host); 3214aef9ec39SRoland Dreier 32150c0450dbSRamachandra K target->io_class = SRP_REV16A_IB_IO_CLASS; 3216aef9ec39SRoland Dreier target->scsi_host = target_host; 3217aef9ec39SRoland Dreier target->srp_host = host; 3218e6bf5f48SJason Gunthorpe target->lkey = host->srp_dev->pd->local_dma_lkey; 321903f6fb93SBart Van Assche target->global_mr = host->srp_dev->global_mr; 322049248644SDavid Dillow target->cmd_sg_cnt = cmd_sg_entries; 3221c07d424dSDavid Dillow target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 3222c07d424dSDavid Dillow target->allow_ext_sg = allow_ext_sg; 32237bb312e4SVu Pham target->tl_retry_count = 7; 32244d73f95fSBart Van Assche target->queue_size = SRP_DEFAULT_QUEUE_SIZE; 3225aef9ec39SRoland Dreier 322634aa654eSBart Van Assche /* 322734aa654eSBart Van Assche * Avoid that the SCSI host can be removed by srp_remove_target() 322834aa654eSBart Van Assche * before this function returns. 322934aa654eSBart Van Assche */ 323034aa654eSBart Van Assche scsi_host_get(target->scsi_host); 323134aa654eSBart Van Assche 32322d7091bcSBart Van Assche mutex_lock(&host->add_target_mutex); 32332d7091bcSBart Van Assche 3234aef9ec39SRoland Dreier ret = srp_parse_options(buf, target); 3235aef9ec39SRoland Dreier if (ret) 3236fb49c8bbSBart Van Assche goto out; 3237aef9ec39SRoland Dreier 32384d73f95fSBart Van Assche target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; 32394d73f95fSBart Van Assche 324096fc248aSBart Van Assche if (!srp_conn_unique(target->srp_host, target)) { 324196fc248aSBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 324296fc248aSBart Van Assche PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", 324396fc248aSBart Van Assche be64_to_cpu(target->id_ext), 324496fc248aSBart Van Assche be64_to_cpu(target->ioc_guid), 324596fc248aSBart Van Assche be64_to_cpu(target->initiator_ext)); 324696fc248aSBart Van Assche ret = -EEXIST; 3247fb49c8bbSBart Van Assche goto out; 324896fc248aSBart Van Assche } 324996fc248aSBart Van Assche 32505cfb1782SBart Van Assche if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && 3251c07d424dSDavid Dillow target->cmd_sg_cnt < target->sg_tablesize) { 32525cfb1782SBart Van Assche pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); 3253c07d424dSDavid Dillow target->sg_tablesize = target->cmd_sg_cnt; 3254c07d424dSDavid Dillow } 3255c07d424dSDavid Dillow 3256c07d424dSDavid Dillow target_host->sg_tablesize = target->sg_tablesize; 3257c07d424dSDavid Dillow target->indirect_size = target->sg_tablesize * 3258c07d424dSDavid Dillow sizeof (struct srp_direct_buf); 325949248644SDavid Dillow target->max_iu_len = sizeof (struct srp_cmd) + 326049248644SDavid Dillow sizeof (struct srp_indirect_buf) + 326149248644SDavid Dillow target->cmd_sg_cnt * sizeof (struct srp_direct_buf); 326249248644SDavid Dillow 3263c1120f89SBart Van Assche INIT_WORK(&target->tl_err_work, srp_tl_err_work); 3264ef6c49d8SBart Van Assche INIT_WORK(&target->remove_work, srp_remove_work); 32658f26c9ffSDavid Dillow spin_lock_init(&target->lock); 326655ee3ab2SMatan Barak ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL); 32672088ca66SSagi Grimberg if (ret) 3268fb49c8bbSBart Van Assche goto out; 3269d92c0da7SBart Van Assche 3270d92c0da7SBart Van Assche ret = -ENOMEM; 3271d92c0da7SBart Van Assche target->ch_count = max_t(unsigned, num_online_nodes(), 3272d92c0da7SBart Van Assche min(ch_count ? : 3273d92c0da7SBart Van Assche min(4 * num_online_nodes(), 3274d92c0da7SBart Van Assche ibdev->num_comp_vectors), 3275d92c0da7SBart Van Assche num_online_cpus())); 3276d92c0da7SBart Van Assche target->ch = kcalloc(target->ch_count, sizeof(*target->ch), 3277d92c0da7SBart Van Assche GFP_KERNEL); 3278d92c0da7SBart Van Assche if (!target->ch) 3279fb49c8bbSBart Van Assche goto out; 3280d92c0da7SBart Van Assche 3281d92c0da7SBart Van Assche node_idx = 0; 3282d92c0da7SBart Van Assche for_each_online_node(node) { 3283d92c0da7SBart Van Assche const int ch_start = (node_idx * target->ch_count / 3284d92c0da7SBart Van Assche num_online_nodes()); 3285d92c0da7SBart Van Assche const int ch_end = ((node_idx + 1) * target->ch_count / 3286d92c0da7SBart Van Assche num_online_nodes()); 3287d92c0da7SBart Van Assche const int cv_start = (node_idx * ibdev->num_comp_vectors / 3288d92c0da7SBart Van Assche num_online_nodes() + target->comp_vector) 3289d92c0da7SBart Van Assche % ibdev->num_comp_vectors; 3290d92c0da7SBart Van Assche const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors / 3291d92c0da7SBart Van Assche num_online_nodes() + target->comp_vector) 3292d92c0da7SBart Van Assche % ibdev->num_comp_vectors; 3293d92c0da7SBart Van Assche int cpu_idx = 0; 3294d92c0da7SBart Van Assche 3295d92c0da7SBart Van Assche for_each_online_cpu(cpu) { 3296d92c0da7SBart Van Assche if (cpu_to_node(cpu) != node) 3297d92c0da7SBart Van Assche continue; 3298d92c0da7SBart Van Assche if (ch_start + cpu_idx >= ch_end) 3299d92c0da7SBart Van Assche continue; 3300d92c0da7SBart Van Assche ch = &target->ch[ch_start + cpu_idx]; 3301d92c0da7SBart Van Assche ch->target = target; 3302d92c0da7SBart Van Assche ch->comp_vector = cv_start == cv_end ? cv_start : 3303d92c0da7SBart Van Assche cv_start + cpu_idx % (cv_end - cv_start); 3304d92c0da7SBart Van Assche spin_lock_init(&ch->lock); 3305d92c0da7SBart Van Assche INIT_LIST_HEAD(&ch->free_tx); 3306d92c0da7SBart Van Assche ret = srp_new_cm_id(ch); 3307d92c0da7SBart Van Assche if (ret) 3308d92c0da7SBart Van Assche goto err_disconnect; 3309aef9ec39SRoland Dreier 3310509c07bcSBart Van Assche ret = srp_create_ch_ib(ch); 3311aef9ec39SRoland Dreier if (ret) 3312d92c0da7SBart Van Assche goto err_disconnect; 3313aef9ec39SRoland Dreier 3314d92c0da7SBart Van Assche ret = srp_alloc_req_data(ch); 33159fe4bcf4SDavid Dillow if (ret) 3316d92c0da7SBart Van Assche goto err_disconnect; 3317aef9ec39SRoland Dreier 3318d92c0da7SBart Van Assche ret = srp_connect_ch(ch, multich); 3319aef9ec39SRoland Dreier if (ret) { 33207aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 3321d92c0da7SBart Van Assche PFX "Connection %d/%d failed\n", 3322d92c0da7SBart Van Assche ch_start + cpu_idx, 3323d92c0da7SBart Van Assche target->ch_count); 3324d92c0da7SBart Van Assche if (node_idx == 0 && cpu_idx == 0) { 3325d92c0da7SBart Van Assche goto err_disconnect; 3326d92c0da7SBart Van Assche } else { 3327d92c0da7SBart Van Assche srp_free_ch_ib(target, ch); 3328d92c0da7SBart Van Assche srp_free_req_data(target, ch); 3329d92c0da7SBart Van Assche target->ch_count = ch - target->ch; 3330c257ea6fSBart Van Assche goto connected; 3331aef9ec39SRoland Dreier } 3332d92c0da7SBart Van Assche } 3333d92c0da7SBart Van Assche 3334d92c0da7SBart Van Assche multich = true; 3335d92c0da7SBart Van Assche cpu_idx++; 3336d92c0da7SBart Van Assche } 3337d92c0da7SBart Van Assche node_idx++; 3338d92c0da7SBart Van Assche } 3339d92c0da7SBart Van Assche 3340c257ea6fSBart Van Assche connected: 3341d92c0da7SBart Van Assche target->scsi_host->nr_hw_queues = target->ch_count; 3342aef9ec39SRoland Dreier 3343aef9ec39SRoland Dreier ret = srp_add_target(host, target); 3344aef9ec39SRoland Dreier if (ret) 3345aef9ec39SRoland Dreier goto err_disconnect; 3346aef9ec39SRoland Dreier 334734aa654eSBart Van Assche if (target->state != SRP_TARGET_REMOVED) { 3348e7ffde01SBart Van Assche shost_printk(KERN_DEBUG, target->scsi_host, PFX 3349e7ffde01SBart Van Assche "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", 3350e7ffde01SBart Van Assche be64_to_cpu(target->id_ext), 3351e7ffde01SBart Van Assche be64_to_cpu(target->ioc_guid), 3352747fe000SBart Van Assche be16_to_cpu(target->pkey), 3353e7ffde01SBart Van Assche be64_to_cpu(target->service_id), 3354747fe000SBart Van Assche target->sgid.raw, target->orig_dgid.raw); 335534aa654eSBart Van Assche } 3356e7ffde01SBart Van Assche 33572d7091bcSBart Van Assche ret = count; 33582d7091bcSBart Van Assche 33592d7091bcSBart Van Assche out: 33602d7091bcSBart Van Assche mutex_unlock(&host->add_target_mutex); 336134aa654eSBart Van Assche 336234aa654eSBart Van Assche scsi_host_put(target->scsi_host); 3363bc44bd1dSBart Van Assche if (ret < 0) 3364bc44bd1dSBart Van Assche scsi_host_put(target->scsi_host); 336534aa654eSBart Van Assche 33662d7091bcSBart Van Assche return ret; 3367aef9ec39SRoland Dreier 3368aef9ec39SRoland Dreier err_disconnect: 3369aef9ec39SRoland Dreier srp_disconnect_target(target); 3370aef9ec39SRoland Dreier 3371d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 3372d92c0da7SBart Van Assche ch = &target->ch[i]; 3373509c07bcSBart Van Assche srp_free_ch_ib(target, ch); 3374509c07bcSBart Van Assche srp_free_req_data(target, ch); 3375d92c0da7SBart Van Assche } 3376d92c0da7SBart Van Assche 3377d92c0da7SBart Van Assche kfree(target->ch); 33782d7091bcSBart Van Assche goto out; 3379aef9ec39SRoland Dreier } 3380aef9ec39SRoland Dreier 3381ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 3382aef9ec39SRoland Dreier 3383ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 3384ee959b00STony Jones char *buf) 3385aef9ec39SRoland Dreier { 3386ee959b00STony Jones struct srp_host *host = container_of(dev, struct srp_host, dev); 3387aef9ec39SRoland Dreier 338805321937SGreg Kroah-Hartman return sprintf(buf, "%s\n", host->srp_dev->dev->name); 3389aef9ec39SRoland Dreier } 3390aef9ec39SRoland Dreier 3391ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 3392aef9ec39SRoland Dreier 3393ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr, 3394ee959b00STony Jones char *buf) 3395aef9ec39SRoland Dreier { 3396ee959b00STony Jones struct srp_host *host = container_of(dev, struct srp_host, dev); 3397aef9ec39SRoland Dreier 3398aef9ec39SRoland Dreier return sprintf(buf, "%d\n", host->port); 3399aef9ec39SRoland Dreier } 3400aef9ec39SRoland Dreier 3401ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 3402aef9ec39SRoland Dreier 3403f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port) 3404aef9ec39SRoland Dreier { 3405aef9ec39SRoland Dreier struct srp_host *host; 3406aef9ec39SRoland Dreier 3407aef9ec39SRoland Dreier host = kzalloc(sizeof *host, GFP_KERNEL); 3408aef9ec39SRoland Dreier if (!host) 3409aef9ec39SRoland Dreier return NULL; 3410aef9ec39SRoland Dreier 3411aef9ec39SRoland Dreier INIT_LIST_HEAD(&host->target_list); 3412b3589fd4SMatthew Wilcox spin_lock_init(&host->target_lock); 3413aef9ec39SRoland Dreier init_completion(&host->released); 34142d7091bcSBart Van Assche mutex_init(&host->add_target_mutex); 341505321937SGreg Kroah-Hartman host->srp_dev = device; 3416aef9ec39SRoland Dreier host->port = port; 3417aef9ec39SRoland Dreier 3418ee959b00STony Jones host->dev.class = &srp_class; 3419ee959b00STony Jones host->dev.parent = device->dev->dma_device; 3420d927e38cSKay Sievers dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); 3421aef9ec39SRoland Dreier 3422ee959b00STony Jones if (device_register(&host->dev)) 3423f5358a17SRoland Dreier goto free_host; 3424ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_add_target)) 3425aef9ec39SRoland Dreier goto err_class; 3426ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_ibdev)) 3427aef9ec39SRoland Dreier goto err_class; 3428ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_port)) 3429aef9ec39SRoland Dreier goto err_class; 3430aef9ec39SRoland Dreier 3431aef9ec39SRoland Dreier return host; 3432aef9ec39SRoland Dreier 3433aef9ec39SRoland Dreier err_class: 3434ee959b00STony Jones device_unregister(&host->dev); 3435aef9ec39SRoland Dreier 3436f5358a17SRoland Dreier free_host: 3437aef9ec39SRoland Dreier kfree(host); 3438aef9ec39SRoland Dreier 3439aef9ec39SRoland Dreier return NULL; 3440aef9ec39SRoland Dreier } 3441aef9ec39SRoland Dreier 3442aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device) 3443aef9ec39SRoland Dreier { 3444f5358a17SRoland Dreier struct srp_device *srp_dev; 3445f5358a17SRoland Dreier struct ib_device_attr *dev_attr; 3446aef9ec39SRoland Dreier struct srp_host *host; 34474139032bSHal Rosenstock int mr_page_shift, p; 344852ede08fSBart Van Assche u64 max_pages_per_mr; 3449aef9ec39SRoland Dreier 3450f5358a17SRoland Dreier dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 3451f5358a17SRoland Dreier if (!dev_attr) 3452cf311cd4SSean Hefty return; 3453aef9ec39SRoland Dreier 3454f5358a17SRoland Dreier if (ib_query_device(device, dev_attr)) { 3455e0bda7d8SBart Van Assche pr_warn("Query device failed for %s\n", device->name); 3456f5358a17SRoland Dreier goto free_attr; 3457f5358a17SRoland Dreier } 3458f5358a17SRoland Dreier 3459f5358a17SRoland Dreier srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); 3460f5358a17SRoland Dreier if (!srp_dev) 3461f5358a17SRoland Dreier goto free_attr; 3462f5358a17SRoland Dreier 3463d1b4289eSBart Van Assche srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && 3464d1b4289eSBart Van Assche device->map_phys_fmr && device->unmap_fmr); 34655cfb1782SBart Van Assche srp_dev->has_fr = (dev_attr->device_cap_flags & 34665cfb1782SBart Van Assche IB_DEVICE_MEM_MGT_EXTENSIONS); 34675cfb1782SBart Van Assche if (!srp_dev->has_fmr && !srp_dev->has_fr) 34685cfb1782SBart Van Assche dev_warn(&device->dev, "neither FMR nor FR is supported\n"); 34695cfb1782SBart Van Assche 34705cfb1782SBart Van Assche srp_dev->use_fast_reg = (srp_dev->has_fr && 34715cfb1782SBart Van Assche (!srp_dev->has_fmr || prefer_fr)); 3472002f1567SBart Van Assche srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr; 3473d1b4289eSBart Van Assche 3474f5358a17SRoland Dreier /* 3475f5358a17SRoland Dreier * Use the smallest page size supported by the HCA, down to a 34768f26c9ffSDavid Dillow * minimum of 4096 bytes. We're unlikely to build large sglists 34778f26c9ffSDavid Dillow * out of smaller entries. 3478f5358a17SRoland Dreier */ 347952ede08fSBart Van Assche mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1); 348052ede08fSBart Van Assche srp_dev->mr_page_size = 1 << mr_page_shift; 348152ede08fSBart Van Assche srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); 348252ede08fSBart Van Assche max_pages_per_mr = dev_attr->max_mr_size; 348352ede08fSBart Van Assche do_div(max_pages_per_mr, srp_dev->mr_page_size); 348452ede08fSBart Van Assche srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, 348552ede08fSBart Van Assche max_pages_per_mr); 34865cfb1782SBart Van Assche if (srp_dev->use_fast_reg) { 34875cfb1782SBart Van Assche srp_dev->max_pages_per_mr = 34885cfb1782SBart Van Assche min_t(u32, srp_dev->max_pages_per_mr, 34895cfb1782SBart Van Assche dev_attr->max_fast_reg_page_list_len); 34905cfb1782SBart Van Assche } 349152ede08fSBart Van Assche srp_dev->mr_max_size = srp_dev->mr_page_size * 349252ede08fSBart Van Assche srp_dev->max_pages_per_mr; 34935cfb1782SBart Van Assche pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", 349452ede08fSBart Van Assche device->name, mr_page_shift, dev_attr->max_mr_size, 34955cfb1782SBart Van Assche dev_attr->max_fast_reg_page_list_len, 349652ede08fSBart Van Assche srp_dev->max_pages_per_mr, srp_dev->mr_max_size); 3497f5358a17SRoland Dreier 3498f5358a17SRoland Dreier INIT_LIST_HEAD(&srp_dev->dev_list); 3499f5358a17SRoland Dreier 3500f5358a17SRoland Dreier srp_dev->dev = device; 3501f5358a17SRoland Dreier srp_dev->pd = ib_alloc_pd(device); 3502f5358a17SRoland Dreier if (IS_ERR(srp_dev->pd)) 3503f5358a17SRoland Dreier goto free_dev; 3504f5358a17SRoland Dreier 350503f6fb93SBart Van Assche if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) { 350603f6fb93SBart Van Assche srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd, 3507f5358a17SRoland Dreier IB_ACCESS_LOCAL_WRITE | 3508f5358a17SRoland Dreier IB_ACCESS_REMOTE_READ | 3509f5358a17SRoland Dreier IB_ACCESS_REMOTE_WRITE); 351003f6fb93SBart Van Assche if (IS_ERR(srp_dev->global_mr)) 3511f5358a17SRoland Dreier goto err_pd; 351203f6fb93SBart Van Assche } else { 351303f6fb93SBart Van Assche srp_dev->global_mr = NULL; 351403f6fb93SBart Van Assche } 3515f5358a17SRoland Dreier 35164139032bSHal Rosenstock for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 3517f5358a17SRoland Dreier host = srp_add_port(srp_dev, p); 3518aef9ec39SRoland Dreier if (host) 3519f5358a17SRoland Dreier list_add_tail(&host->list, &srp_dev->dev_list); 3520aef9ec39SRoland Dreier } 3521aef9ec39SRoland Dreier 3522f5358a17SRoland Dreier ib_set_client_data(device, &srp_client, srp_dev); 3523f5358a17SRoland Dreier 3524f5358a17SRoland Dreier goto free_attr; 3525f5358a17SRoland Dreier 3526f5358a17SRoland Dreier err_pd: 3527f5358a17SRoland Dreier ib_dealloc_pd(srp_dev->pd); 3528f5358a17SRoland Dreier 3529f5358a17SRoland Dreier free_dev: 3530f5358a17SRoland Dreier kfree(srp_dev); 3531f5358a17SRoland Dreier 3532f5358a17SRoland Dreier free_attr: 3533f5358a17SRoland Dreier kfree(dev_attr); 3534aef9ec39SRoland Dreier } 3535aef9ec39SRoland Dreier 35367c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data) 3537aef9ec39SRoland Dreier { 3538f5358a17SRoland Dreier struct srp_device *srp_dev; 3539aef9ec39SRoland Dreier struct srp_host *host, *tmp_host; 3540ef6c49d8SBart Van Assche struct srp_target_port *target; 3541aef9ec39SRoland Dreier 35427c1eb45aSHaggai Eran srp_dev = client_data; 35431fe0cb84SDotan Barak if (!srp_dev) 35441fe0cb84SDotan Barak return; 3545aef9ec39SRoland Dreier 3546f5358a17SRoland Dreier list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 3547ee959b00STony Jones device_unregister(&host->dev); 3548aef9ec39SRoland Dreier /* 3549aef9ec39SRoland Dreier * Wait for the sysfs entry to go away, so that no new 3550aef9ec39SRoland Dreier * target ports can be created. 3551aef9ec39SRoland Dreier */ 3552aef9ec39SRoland Dreier wait_for_completion(&host->released); 3553aef9ec39SRoland Dreier 3554aef9ec39SRoland Dreier /* 3555ef6c49d8SBart Van Assche * Remove all target ports. 3556aef9ec39SRoland Dreier */ 3557b3589fd4SMatthew Wilcox spin_lock(&host->target_lock); 3558ef6c49d8SBart Van Assche list_for_each_entry(target, &host->target_list, list) 3559ef6c49d8SBart Van Assche srp_queue_remove_work(target); 3560b3589fd4SMatthew Wilcox spin_unlock(&host->target_lock); 3561aef9ec39SRoland Dreier 3562aef9ec39SRoland Dreier /* 3563bcc05910SBart Van Assche * Wait for tl_err and target port removal tasks. 3564aef9ec39SRoland Dreier */ 3565ef6c49d8SBart Van Assche flush_workqueue(system_long_wq); 3566bcc05910SBart Van Assche flush_workqueue(srp_remove_wq); 3567aef9ec39SRoland Dreier 3568aef9ec39SRoland Dreier kfree(host); 3569aef9ec39SRoland Dreier } 3570aef9ec39SRoland Dreier 357103f6fb93SBart Van Assche if (srp_dev->global_mr) 357203f6fb93SBart Van Assche ib_dereg_mr(srp_dev->global_mr); 3573f5358a17SRoland Dreier ib_dealloc_pd(srp_dev->pd); 3574f5358a17SRoland Dreier 3575f5358a17SRoland Dreier kfree(srp_dev); 3576aef9ec39SRoland Dreier } 3577aef9ec39SRoland Dreier 35783236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = { 3579ed9b2264SBart Van Assche .has_rport_state = true, 3580ed9b2264SBart Van Assche .reset_timer_if_blocked = true, 3581a95cadb9SBart Van Assche .reconnect_delay = &srp_reconnect_delay, 3582ed9b2264SBart Van Assche .fast_io_fail_tmo = &srp_fast_io_fail_tmo, 3583ed9b2264SBart Van Assche .dev_loss_tmo = &srp_dev_loss_tmo, 3584ed9b2264SBart Van Assche .reconnect = srp_rport_reconnect, 3585dc1bdbd9SBart Van Assche .rport_delete = srp_rport_delete, 3586ed9b2264SBart Van Assche .terminate_rport_io = srp_terminate_io, 35873236822bSFUJITA Tomonori }; 35883236822bSFUJITA Tomonori 3589aef9ec39SRoland Dreier static int __init srp_init_module(void) 3590aef9ec39SRoland Dreier { 3591aef9ec39SRoland Dreier int ret; 3592aef9ec39SRoland Dreier 3593dcb4cb85SBart Van Assche BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); 3594dd5e6e38SBart Van Assche 359549248644SDavid Dillow if (srp_sg_tablesize) { 3596e0bda7d8SBart Van Assche pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); 359749248644SDavid Dillow if (!cmd_sg_entries) 359849248644SDavid Dillow cmd_sg_entries = srp_sg_tablesize; 359949248644SDavid Dillow } 360049248644SDavid Dillow 360149248644SDavid Dillow if (!cmd_sg_entries) 360249248644SDavid Dillow cmd_sg_entries = SRP_DEF_SG_TABLESIZE; 360349248644SDavid Dillow 360449248644SDavid Dillow if (cmd_sg_entries > 255) { 3605e0bda7d8SBart Van Assche pr_warn("Clamping cmd_sg_entries to 255\n"); 360649248644SDavid Dillow cmd_sg_entries = 255; 36071e89a194SDavid Dillow } 36081e89a194SDavid Dillow 3609c07d424dSDavid Dillow if (!indirect_sg_entries) 3610c07d424dSDavid Dillow indirect_sg_entries = cmd_sg_entries; 3611c07d424dSDavid Dillow else if (indirect_sg_entries < cmd_sg_entries) { 3612e0bda7d8SBart Van Assche pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", 3613e0bda7d8SBart Van Assche cmd_sg_entries); 3614c07d424dSDavid Dillow indirect_sg_entries = cmd_sg_entries; 3615c07d424dSDavid Dillow } 3616c07d424dSDavid Dillow 3617bcc05910SBart Van Assche srp_remove_wq = create_workqueue("srp_remove"); 3618da05be29SWei Yongjun if (!srp_remove_wq) { 3619da05be29SWei Yongjun ret = -ENOMEM; 3620bcc05910SBart Van Assche goto out; 3621bcc05910SBart Van Assche } 3622bcc05910SBart Van Assche 3623bcc05910SBart Van Assche ret = -ENOMEM; 36243236822bSFUJITA Tomonori ib_srp_transport_template = 36253236822bSFUJITA Tomonori srp_attach_transport(&ib_srp_transport_functions); 36263236822bSFUJITA Tomonori if (!ib_srp_transport_template) 3627bcc05910SBart Van Assche goto destroy_wq; 36283236822bSFUJITA Tomonori 3629aef9ec39SRoland Dreier ret = class_register(&srp_class); 3630aef9ec39SRoland Dreier if (ret) { 3631e0bda7d8SBart Van Assche pr_err("couldn't register class infiniband_srp\n"); 3632bcc05910SBart Van Assche goto release_tr; 3633aef9ec39SRoland Dreier } 3634aef9ec39SRoland Dreier 3635c1a0b23bSMichael S. Tsirkin ib_sa_register_client(&srp_sa_client); 3636c1a0b23bSMichael S. Tsirkin 3637aef9ec39SRoland Dreier ret = ib_register_client(&srp_client); 3638aef9ec39SRoland Dreier if (ret) { 3639e0bda7d8SBart Van Assche pr_err("couldn't register IB client\n"); 3640bcc05910SBart Van Assche goto unreg_sa; 3641aef9ec39SRoland Dreier } 3642aef9ec39SRoland Dreier 3643bcc05910SBart Van Assche out: 3644bcc05910SBart Van Assche return ret; 3645bcc05910SBart Van Assche 3646bcc05910SBart Van Assche unreg_sa: 3647bcc05910SBart Van Assche ib_sa_unregister_client(&srp_sa_client); 3648bcc05910SBart Van Assche class_unregister(&srp_class); 3649bcc05910SBart Van Assche 3650bcc05910SBart Van Assche release_tr: 3651bcc05910SBart Van Assche srp_release_transport(ib_srp_transport_template); 3652bcc05910SBart Van Assche 3653bcc05910SBart Van Assche destroy_wq: 3654bcc05910SBart Van Assche destroy_workqueue(srp_remove_wq); 3655bcc05910SBart Van Assche goto out; 3656aef9ec39SRoland Dreier } 3657aef9ec39SRoland Dreier 3658aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void) 3659aef9ec39SRoland Dreier { 3660aef9ec39SRoland Dreier ib_unregister_client(&srp_client); 3661c1a0b23bSMichael S. Tsirkin ib_sa_unregister_client(&srp_sa_client); 3662aef9ec39SRoland Dreier class_unregister(&srp_class); 36633236822bSFUJITA Tomonori srp_release_transport(ib_srp_transport_template); 3664bcc05910SBart Van Assche destroy_workqueue(srp_remove_wq); 3665aef9ec39SRoland Dreier } 3666aef9ec39SRoland Dreier 3667aef9ec39SRoland Dreier module_init(srp_init_module); 3668aef9ec39SRoland Dreier module_exit(srp_cleanup_module); 3669