1aef9ec39SRoland Dreier /* 2aef9ec39SRoland Dreier * Copyright (c) 2005 Cisco Systems. All rights reserved. 3aef9ec39SRoland Dreier * 4aef9ec39SRoland Dreier * This software is available to you under a choice of one of two 5aef9ec39SRoland Dreier * licenses. You may choose to be licensed under the terms of the GNU 6aef9ec39SRoland Dreier * General Public License (GPL) Version 2, available from the file 7aef9ec39SRoland Dreier * COPYING in the main directory of this source tree, or the 8aef9ec39SRoland Dreier * OpenIB.org BSD license below: 9aef9ec39SRoland Dreier * 10aef9ec39SRoland Dreier * Redistribution and use in source and binary forms, with or 11aef9ec39SRoland Dreier * without modification, are permitted provided that the following 12aef9ec39SRoland Dreier * conditions are met: 13aef9ec39SRoland Dreier * 14aef9ec39SRoland Dreier * - Redistributions of source code must retain the above 15aef9ec39SRoland Dreier * copyright notice, this list of conditions and the following 16aef9ec39SRoland Dreier * disclaimer. 17aef9ec39SRoland Dreier * 18aef9ec39SRoland Dreier * - Redistributions in binary form must reproduce the above 19aef9ec39SRoland Dreier * copyright notice, this list of conditions and the following 20aef9ec39SRoland Dreier * disclaimer in the documentation and/or other materials 21aef9ec39SRoland Dreier * provided with the distribution. 22aef9ec39SRoland Dreier * 23aef9ec39SRoland Dreier * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24aef9ec39SRoland Dreier * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25aef9ec39SRoland Dreier * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26aef9ec39SRoland Dreier * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27aef9ec39SRoland Dreier * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28aef9ec39SRoland Dreier * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29aef9ec39SRoland Dreier * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30aef9ec39SRoland Dreier * SOFTWARE. 31aef9ec39SRoland Dreier */ 32aef9ec39SRoland Dreier 33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34e0bda7d8SBart Van Assche 35aef9ec39SRoland Dreier #include <linux/module.h> 36aef9ec39SRoland Dreier #include <linux/init.h> 37aef9ec39SRoland Dreier #include <linux/slab.h> 38aef9ec39SRoland Dreier #include <linux/err.h> 39aef9ec39SRoland Dreier #include <linux/string.h> 40aef9ec39SRoland Dreier #include <linux/parser.h> 41aef9ec39SRoland Dreier #include <linux/random.h> 42de25968cSTim Schmielau #include <linux/jiffies.h> 4356b5390cSBart Van Assche #include <rdma/ib_cache.h> 44aef9ec39SRoland Dreier 4560063497SArun Sharma #include <linux/atomic.h> 46aef9ec39SRoland Dreier 47aef9ec39SRoland Dreier #include <scsi/scsi.h> 48aef9ec39SRoland Dreier #include <scsi/scsi_device.h> 49aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h> 5071444b97SJack Wang #include <scsi/scsi_tcq.h> 51aef9ec39SRoland Dreier #include <scsi/srp.h> 523236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h> 53aef9ec39SRoland Dreier 54aef9ec39SRoland Dreier #include "ib_srp.h" 55aef9ec39SRoland Dreier 56aef9ec39SRoland Dreier #define DRV_NAME "ib_srp" 57aef9ec39SRoland Dreier #define PFX DRV_NAME ": " 58713ef24eSBart Van Assche #define DRV_VERSION "2.0" 59713ef24eSBart Van Assche #define DRV_RELDATE "July 26, 2015" 60aef9ec39SRoland Dreier 61aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier"); 6233ab3e5bSBart Van Assche MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); 63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL"); 6433ab3e5bSBart Van Assche MODULE_VERSION(DRV_VERSION); 6533ab3e5bSBart Van Assche MODULE_INFO(release_date, DRV_RELDATE); 66aef9ec39SRoland Dreier 671a1faf7aSBart Van Assche #if !defined(CONFIG_DYNAMIC_DEBUG) 681a1faf7aSBart Van Assche #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) 691a1faf7aSBart Van Assche #define DYNAMIC_DEBUG_BRANCH(descriptor) false 701a1faf7aSBart Van Assche #endif 711a1faf7aSBart Van Assche 7249248644SDavid Dillow static unsigned int srp_sg_tablesize; 7349248644SDavid Dillow static unsigned int cmd_sg_entries; 74c07d424dSDavid Dillow static unsigned int indirect_sg_entries; 75c07d424dSDavid Dillow static bool allow_ext_sg; 7603f6fb93SBart Van Assche static bool prefer_fr = true; 7703f6fb93SBart Van Assche static bool register_always = true; 78c222a39fSBart Van Assche static bool never_register; 79aef9ec39SRoland Dreier static int topspin_workarounds = 1; 80aef9ec39SRoland Dreier 8149248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444); 8249248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); 8349248644SDavid Dillow 8449248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444); 8549248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries, 8649248644SDavid Dillow "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); 8749248644SDavid Dillow 88c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444); 89c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries, 9065e8617fSMing Lin "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")"); 91c07d424dSDavid Dillow 92c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444); 93c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg, 94c07d424dSDavid Dillow "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); 95c07d424dSDavid Dillow 96aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444); 97aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds, 98aef9ec39SRoland Dreier "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 99aef9ec39SRoland Dreier 1005cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444); 1015cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr, 1025cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported"); 1035cfb1782SBart Van Assche 104b1b8854dSBart Van Assche module_param(register_always, bool, 0444); 105b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always, 106b1b8854dSBart Van Assche "Use memory registration even for contiguous memory regions"); 107b1b8854dSBart Van Assche 108c222a39fSBart Van Assche module_param(never_register, bool, 0444); 109c222a39fSBart Van Assche MODULE_PARM_DESC(never_register, "Never register memory"); 110c222a39fSBart Van Assche 1119c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops; 112ed9b2264SBart Van Assche 113a95cadb9SBart Van Assche static int srp_reconnect_delay = 10; 114a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, 115a95cadb9SBart Van Assche S_IRUGO | S_IWUSR); 116a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); 117a95cadb9SBart Van Assche 118ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15; 119ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo, 120ed9b2264SBart Van Assche S_IRUGO | S_IWUSR); 121ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo, 122ed9b2264SBart Van Assche "Number of seconds between the observation of a transport" 123ed9b2264SBart Van Assche " layer error and failing all I/O. \"off\" means that this" 124ed9b2264SBart Van Assche " functionality is disabled."); 125ed9b2264SBart Van Assche 126a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600; 127ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, 128ed9b2264SBart Van Assche S_IRUGO | S_IWUSR); 129ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo, 130ed9b2264SBart Van Assche "Maximum number of seconds that the SRP transport should" 131ed9b2264SBart Van Assche " insulate transport layer errors. After this time has been" 132ed9b2264SBart Van Assche " exceeded the SCSI host is removed. Should be" 133ed9b2264SBart Van Assche " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) 134ed9b2264SBart Van Assche " if fast_io_fail_tmo has not been set. \"off\" means that" 135ed9b2264SBart Van Assche " this functionality is disabled."); 136ed9b2264SBart Van Assche 137d92c0da7SBart Van Assche static unsigned ch_count; 138d92c0da7SBart Van Assche module_param(ch_count, uint, 0444); 139d92c0da7SBart Van Assche MODULE_PARM_DESC(ch_count, 140d92c0da7SBart Van Assche "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); 141d92c0da7SBart Van Assche 142aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device); 1437c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data); 1441dc7b1f1SChristoph Hellwig static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc); 1451dc7b1f1SChristoph Hellwig static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, 1461dc7b1f1SChristoph Hellwig const char *opname); 147aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 148aef9ec39SRoland Dreier 1493236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template; 150bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq; 1513236822bSFUJITA Tomonori 152aef9ec39SRoland Dreier static struct ib_client srp_client = { 153aef9ec39SRoland Dreier .name = "srp", 154aef9ec39SRoland Dreier .add = srp_add_one, 155aef9ec39SRoland Dreier .remove = srp_remove_one 156aef9ec39SRoland Dreier }; 157aef9ec39SRoland Dreier 158c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client; 159c1a0b23bSMichael S. Tsirkin 160ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp) 161ed9b2264SBart Van Assche { 162ed9b2264SBart Van Assche int tmo = *(int *)kp->arg; 163ed9b2264SBart Van Assche 164ed9b2264SBart Van Assche if (tmo >= 0) 165ed9b2264SBart Van Assche return sprintf(buffer, "%d", tmo); 166ed9b2264SBart Van Assche else 167ed9b2264SBart Van Assche return sprintf(buffer, "off"); 168ed9b2264SBart Van Assche } 169ed9b2264SBart Van Assche 170ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp) 171ed9b2264SBart Van Assche { 172ed9b2264SBart Van Assche int tmo, res; 173ed9b2264SBart Van Assche 1743fdf70acSSagi Grimberg res = srp_parse_tmo(&tmo, val); 175ed9b2264SBart Van Assche if (res) 176ed9b2264SBart Van Assche goto out; 1773fdf70acSSagi Grimberg 178a95cadb9SBart Van Assche if (kp->arg == &srp_reconnect_delay) 179a95cadb9SBart Van Assche res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, 180a95cadb9SBart Van Assche srp_dev_loss_tmo); 181a95cadb9SBart Van Assche else if (kp->arg == &srp_fast_io_fail_tmo) 182a95cadb9SBart Van Assche res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo); 183ed9b2264SBart Van Assche else 184a95cadb9SBart Van Assche res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo, 185a95cadb9SBart Van Assche tmo); 186ed9b2264SBart Van Assche if (res) 187ed9b2264SBart Van Assche goto out; 188ed9b2264SBart Van Assche *(int *)kp->arg = tmo; 189ed9b2264SBart Van Assche 190ed9b2264SBart Van Assche out: 191ed9b2264SBart Van Assche return res; 192ed9b2264SBart Van Assche } 193ed9b2264SBart Van Assche 1949c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops = { 195ed9b2264SBart Van Assche .get = srp_tmo_get, 196ed9b2264SBart Van Assche .set = srp_tmo_set, 197ed9b2264SBart Van Assche }; 198ed9b2264SBart Van Assche 199aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 200aef9ec39SRoland Dreier { 201aef9ec39SRoland Dreier return (struct srp_target_port *) host->hostdata; 202aef9ec39SRoland Dreier } 203aef9ec39SRoland Dreier 204aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host) 205aef9ec39SRoland Dreier { 206aef9ec39SRoland Dreier return host_to_target(host)->target_name; 207aef9ec39SRoland Dreier } 208aef9ec39SRoland Dreier 2095d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target) 2105d7cbfd6SRoland Dreier { 2115d7cbfd6SRoland Dreier static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 2123d1ff48dSRaghava Kondapalli static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 2135d7cbfd6SRoland Dreier 2145d7cbfd6SRoland Dreier return topspin_workarounds && 2153d1ff48dSRaghava Kondapalli (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 2163d1ff48dSRaghava Kondapalli !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 2175d7cbfd6SRoland Dreier } 2185d7cbfd6SRoland Dreier 219aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 220aef9ec39SRoland Dreier gfp_t gfp_mask, 221aef9ec39SRoland Dreier enum dma_data_direction direction) 222aef9ec39SRoland Dreier { 223aef9ec39SRoland Dreier struct srp_iu *iu; 224aef9ec39SRoland Dreier 225aef9ec39SRoland Dreier iu = kmalloc(sizeof *iu, gfp_mask); 226aef9ec39SRoland Dreier if (!iu) 227aef9ec39SRoland Dreier goto out; 228aef9ec39SRoland Dreier 229aef9ec39SRoland Dreier iu->buf = kzalloc(size, gfp_mask); 230aef9ec39SRoland Dreier if (!iu->buf) 231aef9ec39SRoland Dreier goto out_free_iu; 232aef9ec39SRoland Dreier 23305321937SGreg Kroah-Hartman iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, 23405321937SGreg Kroah-Hartman direction); 23505321937SGreg Kroah-Hartman if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) 236aef9ec39SRoland Dreier goto out_free_buf; 237aef9ec39SRoland Dreier 238aef9ec39SRoland Dreier iu->size = size; 239aef9ec39SRoland Dreier iu->direction = direction; 240aef9ec39SRoland Dreier 241aef9ec39SRoland Dreier return iu; 242aef9ec39SRoland Dreier 243aef9ec39SRoland Dreier out_free_buf: 244aef9ec39SRoland Dreier kfree(iu->buf); 245aef9ec39SRoland Dreier out_free_iu: 246aef9ec39SRoland Dreier kfree(iu); 247aef9ec39SRoland Dreier out: 248aef9ec39SRoland Dreier return NULL; 249aef9ec39SRoland Dreier } 250aef9ec39SRoland Dreier 251aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 252aef9ec39SRoland Dreier { 253aef9ec39SRoland Dreier if (!iu) 254aef9ec39SRoland Dreier return; 255aef9ec39SRoland Dreier 25605321937SGreg Kroah-Hartman ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, 25705321937SGreg Kroah-Hartman iu->direction); 258aef9ec39SRoland Dreier kfree(iu->buf); 259aef9ec39SRoland Dreier kfree(iu); 260aef9ec39SRoland Dreier } 261aef9ec39SRoland Dreier 262aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context) 263aef9ec39SRoland Dreier { 26457363d98SSagi Grimberg pr_debug("QP event %s (%d)\n", 26557363d98SSagi Grimberg ib_event_msg(event->event), event->event); 266aef9ec39SRoland Dreier } 267aef9ec39SRoland Dreier 268aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target, 269aef9ec39SRoland Dreier struct ib_qp *qp) 270aef9ec39SRoland Dreier { 271aef9ec39SRoland Dreier struct ib_qp_attr *attr; 272aef9ec39SRoland Dreier int ret; 273aef9ec39SRoland Dreier 274aef9ec39SRoland Dreier attr = kmalloc(sizeof *attr, GFP_KERNEL); 275aef9ec39SRoland Dreier if (!attr) 276aef9ec39SRoland Dreier return -ENOMEM; 277aef9ec39SRoland Dreier 27856b5390cSBart Van Assche ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, 279aef9ec39SRoland Dreier target->srp_host->port, 280747fe000SBart Van Assche be16_to_cpu(target->pkey), 281aef9ec39SRoland Dreier &attr->pkey_index); 282aef9ec39SRoland Dreier if (ret) 283aef9ec39SRoland Dreier goto out; 284aef9ec39SRoland Dreier 285aef9ec39SRoland Dreier attr->qp_state = IB_QPS_INIT; 286aef9ec39SRoland Dreier attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 287aef9ec39SRoland Dreier IB_ACCESS_REMOTE_WRITE); 288aef9ec39SRoland Dreier attr->port_num = target->srp_host->port; 289aef9ec39SRoland Dreier 290aef9ec39SRoland Dreier ret = ib_modify_qp(qp, attr, 291aef9ec39SRoland Dreier IB_QP_STATE | 292aef9ec39SRoland Dreier IB_QP_PKEY_INDEX | 293aef9ec39SRoland Dreier IB_QP_ACCESS_FLAGS | 294aef9ec39SRoland Dreier IB_QP_PORT); 295aef9ec39SRoland Dreier 296aef9ec39SRoland Dreier out: 297aef9ec39SRoland Dreier kfree(attr); 298aef9ec39SRoland Dreier return ret; 299aef9ec39SRoland Dreier } 300aef9ec39SRoland Dreier 301509c07bcSBart Van Assche static int srp_new_cm_id(struct srp_rdma_ch *ch) 3029fe4bcf4SDavid Dillow { 303509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 3049fe4bcf4SDavid Dillow struct ib_cm_id *new_cm_id; 3059fe4bcf4SDavid Dillow 30605321937SGreg Kroah-Hartman new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 307509c07bcSBart Van Assche srp_cm_handler, ch); 3089fe4bcf4SDavid Dillow if (IS_ERR(new_cm_id)) 3099fe4bcf4SDavid Dillow return PTR_ERR(new_cm_id); 3109fe4bcf4SDavid Dillow 311509c07bcSBart Van Assche if (ch->cm_id) 312509c07bcSBart Van Assche ib_destroy_cm_id(ch->cm_id); 313509c07bcSBart Van Assche ch->cm_id = new_cm_id; 314509c07bcSBart Van Assche ch->path.sgid = target->sgid; 315509c07bcSBart Van Assche ch->path.dgid = target->orig_dgid; 316509c07bcSBart Van Assche ch->path.pkey = target->pkey; 317509c07bcSBart Van Assche ch->path.service_id = target->service_id; 3189fe4bcf4SDavid Dillow 3199fe4bcf4SDavid Dillow return 0; 3209fe4bcf4SDavid Dillow } 3219fe4bcf4SDavid Dillow 322d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target) 323d1b4289eSBart Van Assche { 324d1b4289eSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 325d1b4289eSBart Van Assche struct ib_fmr_pool_param fmr_param; 326d1b4289eSBart Van Assche 327d1b4289eSBart Van Assche memset(&fmr_param, 0, sizeof(fmr_param)); 328fa9863f8SBart Van Assche fmr_param.pool_size = target->mr_pool_size; 329d1b4289eSBart Van Assche fmr_param.dirty_watermark = fmr_param.pool_size / 4; 330d1b4289eSBart Van Assche fmr_param.cache = 1; 33152ede08fSBart Van Assche fmr_param.max_pages_per_fmr = dev->max_pages_per_mr; 33252ede08fSBart Van Assche fmr_param.page_shift = ilog2(dev->mr_page_size); 333d1b4289eSBart Van Assche fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 334d1b4289eSBart Van Assche IB_ACCESS_REMOTE_WRITE | 335d1b4289eSBart Van Assche IB_ACCESS_REMOTE_READ); 336d1b4289eSBart Van Assche 337d1b4289eSBart Van Assche return ib_create_fmr_pool(dev->pd, &fmr_param); 338d1b4289eSBart Van Assche } 339d1b4289eSBart Van Assche 3405cfb1782SBart Van Assche /** 3415cfb1782SBart Van Assche * srp_destroy_fr_pool() - free the resources owned by a pool 3425cfb1782SBart Van Assche * @pool: Fast registration pool to be destroyed. 3435cfb1782SBart Van Assche */ 3445cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool) 3455cfb1782SBart Van Assche { 3465cfb1782SBart Van Assche int i; 3475cfb1782SBart Van Assche struct srp_fr_desc *d; 3485cfb1782SBart Van Assche 3495cfb1782SBart Van Assche if (!pool) 3505cfb1782SBart Van Assche return; 3515cfb1782SBart Van Assche 3525cfb1782SBart Van Assche for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 3535cfb1782SBart Van Assche if (d->mr) 3545cfb1782SBart Van Assche ib_dereg_mr(d->mr); 3555cfb1782SBart Van Assche } 3565cfb1782SBart Van Assche kfree(pool); 3575cfb1782SBart Van Assche } 3585cfb1782SBart Van Assche 3595cfb1782SBart Van Assche /** 3605cfb1782SBart Van Assche * srp_create_fr_pool() - allocate and initialize a pool for fast registration 3615cfb1782SBart Van Assche * @device: IB device to allocate fast registration descriptors for. 3625cfb1782SBart Van Assche * @pd: Protection domain associated with the FR descriptors. 3635cfb1782SBart Van Assche * @pool_size: Number of descriptors to allocate. 3645cfb1782SBart Van Assche * @max_page_list_len: Maximum fast registration work request page list length. 3655cfb1782SBart Van Assche */ 3665cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, 3675cfb1782SBart Van Assche struct ib_pd *pd, int pool_size, 3685cfb1782SBart Van Assche int max_page_list_len) 3695cfb1782SBart Van Assche { 3705cfb1782SBart Van Assche struct srp_fr_pool *pool; 3715cfb1782SBart Van Assche struct srp_fr_desc *d; 3725cfb1782SBart Van Assche struct ib_mr *mr; 3735cfb1782SBart Van Assche int i, ret = -EINVAL; 3745cfb1782SBart Van Assche 3755cfb1782SBart Van Assche if (pool_size <= 0) 3765cfb1782SBart Van Assche goto err; 3775cfb1782SBart Van Assche ret = -ENOMEM; 3785cfb1782SBart Van Assche pool = kzalloc(sizeof(struct srp_fr_pool) + 3795cfb1782SBart Van Assche pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL); 3805cfb1782SBart Van Assche if (!pool) 3815cfb1782SBart Van Assche goto err; 3825cfb1782SBart Van Assche pool->size = pool_size; 3835cfb1782SBart Van Assche pool->max_page_list_len = max_page_list_len; 3845cfb1782SBart Van Assche spin_lock_init(&pool->lock); 3855cfb1782SBart Van Assche INIT_LIST_HEAD(&pool->free_list); 3865cfb1782SBart Van Assche 3875cfb1782SBart Van Assche for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 388563b67c5SSagi Grimberg mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 389563b67c5SSagi Grimberg max_page_list_len); 3905cfb1782SBart Van Assche if (IS_ERR(mr)) { 3915cfb1782SBart Van Assche ret = PTR_ERR(mr); 3923787d990SBart Van Assche if (ret == -ENOMEM) 3933787d990SBart Van Assche pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n", 3943787d990SBart Van Assche dev_name(&device->dev)); 3955cfb1782SBart Van Assche goto destroy_pool; 3965cfb1782SBart Van Assche } 3975cfb1782SBart Van Assche d->mr = mr; 3985cfb1782SBart Van Assche list_add_tail(&d->entry, &pool->free_list); 3995cfb1782SBart Van Assche } 4005cfb1782SBart Van Assche 4015cfb1782SBart Van Assche out: 4025cfb1782SBart Van Assche return pool; 4035cfb1782SBart Van Assche 4045cfb1782SBart Van Assche destroy_pool: 4055cfb1782SBart Van Assche srp_destroy_fr_pool(pool); 4065cfb1782SBart Van Assche 4075cfb1782SBart Van Assche err: 4085cfb1782SBart Van Assche pool = ERR_PTR(ret); 4095cfb1782SBart Van Assche goto out; 4105cfb1782SBart Van Assche } 4115cfb1782SBart Van Assche 4125cfb1782SBart Van Assche /** 4135cfb1782SBart Van Assche * srp_fr_pool_get() - obtain a descriptor suitable for fast registration 4145cfb1782SBart Van Assche * @pool: Pool to obtain descriptor from. 4155cfb1782SBart Van Assche */ 4165cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool) 4175cfb1782SBart Van Assche { 4185cfb1782SBart Van Assche struct srp_fr_desc *d = NULL; 4195cfb1782SBart Van Assche unsigned long flags; 4205cfb1782SBart Van Assche 4215cfb1782SBart Van Assche spin_lock_irqsave(&pool->lock, flags); 4225cfb1782SBart Van Assche if (!list_empty(&pool->free_list)) { 4235cfb1782SBart Van Assche d = list_first_entry(&pool->free_list, typeof(*d), entry); 4245cfb1782SBart Van Assche list_del(&d->entry); 4255cfb1782SBart Van Assche } 4265cfb1782SBart Van Assche spin_unlock_irqrestore(&pool->lock, flags); 4275cfb1782SBart Van Assche 4285cfb1782SBart Van Assche return d; 4295cfb1782SBart Van Assche } 4305cfb1782SBart Van Assche 4315cfb1782SBart Van Assche /** 4325cfb1782SBart Van Assche * srp_fr_pool_put() - put an FR descriptor back in the free list 4335cfb1782SBart Van Assche * @pool: Pool the descriptor was allocated from. 4345cfb1782SBart Van Assche * @desc: Pointer to an array of fast registration descriptor pointers. 4355cfb1782SBart Van Assche * @n: Number of descriptors to put back. 4365cfb1782SBart Van Assche * 4375cfb1782SBart Van Assche * Note: The caller must already have queued an invalidation request for 4385cfb1782SBart Van Assche * desc->mr->rkey before calling this function. 4395cfb1782SBart Van Assche */ 4405cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, 4415cfb1782SBart Van Assche int n) 4425cfb1782SBart Van Assche { 4435cfb1782SBart Van Assche unsigned long flags; 4445cfb1782SBart Van Assche int i; 4455cfb1782SBart Van Assche 4465cfb1782SBart Van Assche spin_lock_irqsave(&pool->lock, flags); 4475cfb1782SBart Van Assche for (i = 0; i < n; i++) 4485cfb1782SBart Van Assche list_add(&desc[i]->entry, &pool->free_list); 4495cfb1782SBart Van Assche spin_unlock_irqrestore(&pool->lock, flags); 4505cfb1782SBart Van Assche } 4515cfb1782SBart Van Assche 4525cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) 4535cfb1782SBart Van Assche { 4545cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 4555cfb1782SBart Van Assche 456fa9863f8SBart Van Assche return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size, 4575cfb1782SBart Van Assche dev->max_pages_per_mr); 4585cfb1782SBart Van Assche } 4595cfb1782SBart Van Assche 4607dad6b2eSBart Van Assche /** 4617dad6b2eSBart Van Assche * srp_destroy_qp() - destroy an RDMA queue pair 462f83b2561SBart Van Assche * @qp: RDMA queue pair. 4637dad6b2eSBart Van Assche * 464561392d4SSteve Wise * Drain the qp before destroying it. This avoids that the receive 465561392d4SSteve Wise * completion handler can access the queue pair while it is 4667dad6b2eSBart Van Assche * being destroyed. 4677dad6b2eSBart Van Assche */ 468f83b2561SBart Van Assche static void srp_destroy_qp(struct ib_qp *qp) 4697dad6b2eSBart Van Assche { 470f83b2561SBart Van Assche ib_drain_rq(qp); 471f83b2561SBart Van Assche ib_destroy_qp(qp); 4727dad6b2eSBart Van Assche } 4737dad6b2eSBart Van Assche 474509c07bcSBart Van Assche static int srp_create_ch_ib(struct srp_rdma_ch *ch) 475aef9ec39SRoland Dreier { 476509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 47762154b2eSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 478aef9ec39SRoland Dreier struct ib_qp_init_attr *init_attr; 47973aa89edSIshai Rabinovitz struct ib_cq *recv_cq, *send_cq; 48073aa89edSIshai Rabinovitz struct ib_qp *qp; 481d1b4289eSBart Van Assche struct ib_fmr_pool *fmr_pool = NULL; 4825cfb1782SBart Van Assche struct srp_fr_pool *fr_pool = NULL; 483509c5f33SBart Van Assche const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2; 484aef9ec39SRoland Dreier int ret; 485aef9ec39SRoland Dreier 486aef9ec39SRoland Dreier init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 487aef9ec39SRoland Dreier if (!init_attr) 488aef9ec39SRoland Dreier return -ENOMEM; 489aef9ec39SRoland Dreier 490561392d4SSteve Wise /* queue_size + 1 for ib_drain_rq() */ 4911dc7b1f1SChristoph Hellwig recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, 4921dc7b1f1SChristoph Hellwig ch->comp_vector, IB_POLL_SOFTIRQ); 49373aa89edSIshai Rabinovitz if (IS_ERR(recv_cq)) { 49473aa89edSIshai Rabinovitz ret = PTR_ERR(recv_cq); 495da9d2f07SRoland Dreier goto err; 496aef9ec39SRoland Dreier } 497aef9ec39SRoland Dreier 4981dc7b1f1SChristoph Hellwig send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, 4991dc7b1f1SChristoph Hellwig ch->comp_vector, IB_POLL_DIRECT); 50073aa89edSIshai Rabinovitz if (IS_ERR(send_cq)) { 50173aa89edSIshai Rabinovitz ret = PTR_ERR(send_cq); 502da9d2f07SRoland Dreier goto err_recv_cq; 5039c03dc9fSBart Van Assche } 5049c03dc9fSBart Van Assche 505aef9ec39SRoland Dreier init_attr->event_handler = srp_qp_event; 5065cfb1782SBart Van Assche init_attr->cap.max_send_wr = m * target->queue_size; 5077dad6b2eSBart Van Assche init_attr->cap.max_recv_wr = target->queue_size + 1; 508aef9ec39SRoland Dreier init_attr->cap.max_recv_sge = 1; 509aef9ec39SRoland Dreier init_attr->cap.max_send_sge = 1; 5105cfb1782SBart Van Assche init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 511aef9ec39SRoland Dreier init_attr->qp_type = IB_QPT_RC; 51273aa89edSIshai Rabinovitz init_attr->send_cq = send_cq; 51373aa89edSIshai Rabinovitz init_attr->recv_cq = recv_cq; 514aef9ec39SRoland Dreier 51562154b2eSBart Van Assche qp = ib_create_qp(dev->pd, init_attr); 51673aa89edSIshai Rabinovitz if (IS_ERR(qp)) { 51773aa89edSIshai Rabinovitz ret = PTR_ERR(qp); 518da9d2f07SRoland Dreier goto err_send_cq; 519aef9ec39SRoland Dreier } 520aef9ec39SRoland Dreier 52173aa89edSIshai Rabinovitz ret = srp_init_qp(target, qp); 522da9d2f07SRoland Dreier if (ret) 523da9d2f07SRoland Dreier goto err_qp; 524aef9ec39SRoland Dreier 525002f1567SBart Van Assche if (dev->use_fast_reg) { 5265cfb1782SBart Van Assche fr_pool = srp_alloc_fr_pool(target); 5275cfb1782SBart Van Assche if (IS_ERR(fr_pool)) { 5285cfb1782SBart Van Assche ret = PTR_ERR(fr_pool); 5295cfb1782SBart Van Assche shost_printk(KERN_WARNING, target->scsi_host, PFX 5305cfb1782SBart Van Assche "FR pool allocation failed (%d)\n", ret); 5315cfb1782SBart Van Assche goto err_qp; 5325cfb1782SBart Van Assche } 533002f1567SBart Van Assche } else if (dev->use_fmr) { 534d1b4289eSBart Van Assche fmr_pool = srp_alloc_fmr_pool(target); 535d1b4289eSBart Van Assche if (IS_ERR(fmr_pool)) { 536d1b4289eSBart Van Assche ret = PTR_ERR(fmr_pool); 537d1b4289eSBart Van Assche shost_printk(KERN_WARNING, target->scsi_host, PFX 538d1b4289eSBart Van Assche "FMR pool allocation failed (%d)\n", ret); 539d1b4289eSBart Van Assche goto err_qp; 540d1b4289eSBart Van Assche } 541d1b4289eSBart Van Assche } 542d1b4289eSBart Van Assche 543509c07bcSBart Van Assche if (ch->qp) 544f83b2561SBart Van Assche srp_destroy_qp(ch->qp); 545509c07bcSBart Van Assche if (ch->recv_cq) 5461dc7b1f1SChristoph Hellwig ib_free_cq(ch->recv_cq); 547509c07bcSBart Van Assche if (ch->send_cq) 5481dc7b1f1SChristoph Hellwig ib_free_cq(ch->send_cq); 54973aa89edSIshai Rabinovitz 550509c07bcSBart Van Assche ch->qp = qp; 551509c07bcSBart Van Assche ch->recv_cq = recv_cq; 552509c07bcSBart Van Assche ch->send_cq = send_cq; 55373aa89edSIshai Rabinovitz 5547fbc67dfSSagi Grimberg if (dev->use_fast_reg) { 5557fbc67dfSSagi Grimberg if (ch->fr_pool) 5567fbc67dfSSagi Grimberg srp_destroy_fr_pool(ch->fr_pool); 5577fbc67dfSSagi Grimberg ch->fr_pool = fr_pool; 5587fbc67dfSSagi Grimberg } else if (dev->use_fmr) { 5597fbc67dfSSagi Grimberg if (ch->fmr_pool) 5607fbc67dfSSagi Grimberg ib_destroy_fmr_pool(ch->fmr_pool); 5617fbc67dfSSagi Grimberg ch->fmr_pool = fmr_pool; 5627fbc67dfSSagi Grimberg } 5637fbc67dfSSagi Grimberg 564da9d2f07SRoland Dreier kfree(init_attr); 565da9d2f07SRoland Dreier return 0; 566da9d2f07SRoland Dreier 567da9d2f07SRoland Dreier err_qp: 568f83b2561SBart Van Assche srp_destroy_qp(qp); 569da9d2f07SRoland Dreier 570da9d2f07SRoland Dreier err_send_cq: 5711dc7b1f1SChristoph Hellwig ib_free_cq(send_cq); 572da9d2f07SRoland Dreier 573da9d2f07SRoland Dreier err_recv_cq: 5741dc7b1f1SChristoph Hellwig ib_free_cq(recv_cq); 575da9d2f07SRoland Dreier 576da9d2f07SRoland Dreier err: 577aef9ec39SRoland Dreier kfree(init_attr); 578aef9ec39SRoland Dreier return ret; 579aef9ec39SRoland Dreier } 580aef9ec39SRoland Dreier 5814d73f95fSBart Van Assche /* 5824d73f95fSBart Van Assche * Note: this function may be called without srp_alloc_iu_bufs() having been 583509c07bcSBart Van Assche * invoked. Hence the ch->[rt]x_ring checks. 5844d73f95fSBart Van Assche */ 585509c07bcSBart Van Assche static void srp_free_ch_ib(struct srp_target_port *target, 586509c07bcSBart Van Assche struct srp_rdma_ch *ch) 587aef9ec39SRoland Dreier { 5885cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 589aef9ec39SRoland Dreier int i; 590aef9ec39SRoland Dreier 591d92c0da7SBart Van Assche if (!ch->target) 592d92c0da7SBart Van Assche return; 593d92c0da7SBart Van Assche 594509c07bcSBart Van Assche if (ch->cm_id) { 595509c07bcSBart Van Assche ib_destroy_cm_id(ch->cm_id); 596509c07bcSBart Van Assche ch->cm_id = NULL; 597394c595eSBart Van Assche } 598394c595eSBart Van Assche 599d92c0da7SBart Van Assche /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */ 600d92c0da7SBart Van Assche if (!ch->qp) 601d92c0da7SBart Van Assche return; 602d92c0da7SBart Van Assche 6035cfb1782SBart Van Assche if (dev->use_fast_reg) { 604509c07bcSBart Van Assche if (ch->fr_pool) 605509c07bcSBart Van Assche srp_destroy_fr_pool(ch->fr_pool); 606002f1567SBart Van Assche } else if (dev->use_fmr) { 607509c07bcSBart Van Assche if (ch->fmr_pool) 608509c07bcSBart Van Assche ib_destroy_fmr_pool(ch->fmr_pool); 6095cfb1782SBart Van Assche } 6101dc7b1f1SChristoph Hellwig 611f83b2561SBart Van Assche srp_destroy_qp(ch->qp); 6121dc7b1f1SChristoph Hellwig ib_free_cq(ch->send_cq); 6131dc7b1f1SChristoph Hellwig ib_free_cq(ch->recv_cq); 614aef9ec39SRoland Dreier 615d92c0da7SBart Van Assche /* 616d92c0da7SBart Van Assche * Avoid that the SCSI error handler tries to use this channel after 617d92c0da7SBart Van Assche * it has been freed. The SCSI error handler can namely continue 618d92c0da7SBart Van Assche * trying to perform recovery actions after scsi_remove_host() 619d92c0da7SBart Van Assche * returned. 620d92c0da7SBart Van Assche */ 621d92c0da7SBart Van Assche ch->target = NULL; 622d92c0da7SBart Van Assche 623509c07bcSBart Van Assche ch->qp = NULL; 624509c07bcSBart Van Assche ch->send_cq = ch->recv_cq = NULL; 62573aa89edSIshai Rabinovitz 626509c07bcSBart Van Assche if (ch->rx_ring) { 6274d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) 628509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->rx_ring[i]); 629509c07bcSBart Van Assche kfree(ch->rx_ring); 630509c07bcSBart Van Assche ch->rx_ring = NULL; 6314d73f95fSBart Van Assche } 632509c07bcSBart Van Assche if (ch->tx_ring) { 6334d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) 634509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->tx_ring[i]); 635509c07bcSBart Van Assche kfree(ch->tx_ring); 636509c07bcSBart Van Assche ch->tx_ring = NULL; 6374d73f95fSBart Van Assche } 638aef9ec39SRoland Dreier } 639aef9ec39SRoland Dreier 640aef9ec39SRoland Dreier static void srp_path_rec_completion(int status, 641aef9ec39SRoland Dreier struct ib_sa_path_rec *pathrec, 642509c07bcSBart Van Assche void *ch_ptr) 643aef9ec39SRoland Dreier { 644509c07bcSBart Van Assche struct srp_rdma_ch *ch = ch_ptr; 645509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 646aef9ec39SRoland Dreier 647509c07bcSBart Van Assche ch->status = status; 648aef9ec39SRoland Dreier if (status) 6497aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 6507aa54bd7SDavid Dillow PFX "Got failed path rec status %d\n", status); 651aef9ec39SRoland Dreier else 652509c07bcSBart Van Assche ch->path = *pathrec; 653509c07bcSBart Van Assche complete(&ch->done); 654aef9ec39SRoland Dreier } 655aef9ec39SRoland Dreier 656509c07bcSBart Van Assche static int srp_lookup_path(struct srp_rdma_ch *ch) 657aef9ec39SRoland Dreier { 658509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 659a702adceSBart Van Assche int ret; 660a702adceSBart Van Assche 661509c07bcSBart Van Assche ch->path.numb_path = 1; 662aef9ec39SRoland Dreier 663509c07bcSBart Van Assche init_completion(&ch->done); 664aef9ec39SRoland Dreier 665509c07bcSBart Van Assche ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 66605321937SGreg Kroah-Hartman target->srp_host->srp_dev->dev, 667aef9ec39SRoland Dreier target->srp_host->port, 668509c07bcSBart Van Assche &ch->path, 669247e020eSSean Hefty IB_SA_PATH_REC_SERVICE_ID | 670aef9ec39SRoland Dreier IB_SA_PATH_REC_DGID | 671aef9ec39SRoland Dreier IB_SA_PATH_REC_SGID | 672aef9ec39SRoland Dreier IB_SA_PATH_REC_NUMB_PATH | 673aef9ec39SRoland Dreier IB_SA_PATH_REC_PKEY, 674aef9ec39SRoland Dreier SRP_PATH_REC_TIMEOUT_MS, 675aef9ec39SRoland Dreier GFP_KERNEL, 676aef9ec39SRoland Dreier srp_path_rec_completion, 677509c07bcSBart Van Assche ch, &ch->path_query); 678509c07bcSBart Van Assche if (ch->path_query_id < 0) 679509c07bcSBart Van Assche return ch->path_query_id; 680aef9ec39SRoland Dreier 681509c07bcSBart Van Assche ret = wait_for_completion_interruptible(&ch->done); 682a702adceSBart Van Assche if (ret < 0) 683a702adceSBart Van Assche return ret; 684aef9ec39SRoland Dreier 685509c07bcSBart Van Assche if (ch->status < 0) 6867aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 6877aa54bd7SDavid Dillow PFX "Path record query failed\n"); 688aef9ec39SRoland Dreier 689509c07bcSBart Van Assche return ch->status; 690aef9ec39SRoland Dreier } 691aef9ec39SRoland Dreier 692d92c0da7SBart Van Assche static int srp_send_req(struct srp_rdma_ch *ch, bool multich) 693aef9ec39SRoland Dreier { 694509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 695aef9ec39SRoland Dreier struct { 696aef9ec39SRoland Dreier struct ib_cm_req_param param; 697aef9ec39SRoland Dreier struct srp_login_req priv; 698aef9ec39SRoland Dreier } *req = NULL; 699aef9ec39SRoland Dreier int status; 700aef9ec39SRoland Dreier 701aef9ec39SRoland Dreier req = kzalloc(sizeof *req, GFP_KERNEL); 702aef9ec39SRoland Dreier if (!req) 703aef9ec39SRoland Dreier return -ENOMEM; 704aef9ec39SRoland Dreier 705509c07bcSBart Van Assche req->param.primary_path = &ch->path; 706aef9ec39SRoland Dreier req->param.alternate_path = NULL; 707aef9ec39SRoland Dreier req->param.service_id = target->service_id; 708509c07bcSBart Van Assche req->param.qp_num = ch->qp->qp_num; 709509c07bcSBart Van Assche req->param.qp_type = ch->qp->qp_type; 710aef9ec39SRoland Dreier req->param.private_data = &req->priv; 711aef9ec39SRoland Dreier req->param.private_data_len = sizeof req->priv; 712aef9ec39SRoland Dreier req->param.flow_control = 1; 713aef9ec39SRoland Dreier 714aef9ec39SRoland Dreier get_random_bytes(&req->param.starting_psn, 4); 715aef9ec39SRoland Dreier req->param.starting_psn &= 0xffffff; 716aef9ec39SRoland Dreier 717aef9ec39SRoland Dreier /* 718aef9ec39SRoland Dreier * Pick some arbitrary defaults here; we could make these 719aef9ec39SRoland Dreier * module parameters if anyone cared about setting them. 720aef9ec39SRoland Dreier */ 721aef9ec39SRoland Dreier req->param.responder_resources = 4; 722aef9ec39SRoland Dreier req->param.remote_cm_response_timeout = 20; 723aef9ec39SRoland Dreier req->param.local_cm_response_timeout = 20; 7247bb312e4SVu Pham req->param.retry_count = target->tl_retry_count; 725aef9ec39SRoland Dreier req->param.rnr_retry_count = 7; 726aef9ec39SRoland Dreier req->param.max_cm_retries = 15; 727aef9ec39SRoland Dreier 728aef9ec39SRoland Dreier req->priv.opcode = SRP_LOGIN_REQ; 729aef9ec39SRoland Dreier req->priv.tag = 0; 73049248644SDavid Dillow req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); 731aef9ec39SRoland Dreier req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 732aef9ec39SRoland Dreier SRP_BUF_FORMAT_INDIRECT); 733d92c0da7SBart Van Assche req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI : 734d92c0da7SBart Van Assche SRP_MULTICHAN_SINGLE); 7350c0450dbSRamachandra K /* 7360c0450dbSRamachandra K * In the published SRP specification (draft rev. 16a), the 7370c0450dbSRamachandra K * port identifier format is 8 bytes of ID extension followed 7380c0450dbSRamachandra K * by 8 bytes of GUID. Older drafts put the two halves in the 7390c0450dbSRamachandra K * opposite order, so that the GUID comes first. 7400c0450dbSRamachandra K * 7410c0450dbSRamachandra K * Targets conforming to these obsolete drafts can be 7420c0450dbSRamachandra K * recognized by the I/O Class they report. 7430c0450dbSRamachandra K */ 7440c0450dbSRamachandra K if (target->io_class == SRP_REV10_IB_IO_CLASS) { 7450c0450dbSRamachandra K memcpy(req->priv.initiator_port_id, 746747fe000SBart Van Assche &target->sgid.global.interface_id, 8); 7470c0450dbSRamachandra K memcpy(req->priv.initiator_port_id + 8, 74801cb9bcbSIshai Rabinovitz &target->initiator_ext, 8); 7490c0450dbSRamachandra K memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 7500c0450dbSRamachandra K memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); 7510c0450dbSRamachandra K } else { 7520c0450dbSRamachandra K memcpy(req->priv.initiator_port_id, 75301cb9bcbSIshai Rabinovitz &target->initiator_ext, 8); 75401cb9bcbSIshai Rabinovitz memcpy(req->priv.initiator_port_id + 8, 755747fe000SBart Van Assche &target->sgid.global.interface_id, 8); 7560c0450dbSRamachandra K memcpy(req->priv.target_port_id, &target->id_ext, 8); 7570c0450dbSRamachandra K memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 7580c0450dbSRamachandra K } 7590c0450dbSRamachandra K 760aef9ec39SRoland Dreier /* 761aef9ec39SRoland Dreier * Topspin/Cisco SRP targets will reject our login unless we 76201cb9bcbSIshai Rabinovitz * zero out the first 8 bytes of our initiator port ID and set 76301cb9bcbSIshai Rabinovitz * the second 8 bytes to the local node GUID. 764aef9ec39SRoland Dreier */ 7655d7cbfd6SRoland Dreier if (srp_target_is_topspin(target)) { 7667aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 7677aa54bd7SDavid Dillow PFX "Topspin/Cisco initiator port ID workaround " 768aef9ec39SRoland Dreier "activated for target GUID %016llx\n", 76945c37cadSBart Van Assche be64_to_cpu(target->ioc_guid)); 770aef9ec39SRoland Dreier memset(req->priv.initiator_port_id, 0, 8); 77101cb9bcbSIshai Rabinovitz memcpy(req->priv.initiator_port_id + 8, 77205321937SGreg Kroah-Hartman &target->srp_host->srp_dev->dev->node_guid, 8); 773aef9ec39SRoland Dreier } 774aef9ec39SRoland Dreier 775509c07bcSBart Van Assche status = ib_send_cm_req(ch->cm_id, &req->param); 776aef9ec39SRoland Dreier 777aef9ec39SRoland Dreier kfree(req); 778aef9ec39SRoland Dreier 779aef9ec39SRoland Dreier return status; 780aef9ec39SRoland Dreier } 781aef9ec39SRoland Dreier 782ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target) 783ef6c49d8SBart Van Assche { 784ef6c49d8SBart Van Assche bool changed = false; 785ef6c49d8SBart Van Assche 786ef6c49d8SBart Van Assche spin_lock_irq(&target->lock); 787ef6c49d8SBart Van Assche if (target->state != SRP_TARGET_REMOVED) { 788ef6c49d8SBart Van Assche target->state = SRP_TARGET_REMOVED; 789ef6c49d8SBart Van Assche changed = true; 790ef6c49d8SBart Van Assche } 791ef6c49d8SBart Van Assche spin_unlock_irq(&target->lock); 792ef6c49d8SBart Van Assche 793ef6c49d8SBart Van Assche if (changed) 794bcc05910SBart Van Assche queue_work(srp_remove_wq, &target->remove_work); 795ef6c49d8SBart Van Assche 796ef6c49d8SBart Van Assche return changed; 797ef6c49d8SBart Van Assche } 798ef6c49d8SBart Van Assche 799aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target) 800aef9ec39SRoland Dreier { 801d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 802d92c0da7SBart Van Assche int i; 803509c07bcSBart Van Assche 804aef9ec39SRoland Dreier /* XXX should send SRP_I_LOGOUT request */ 805aef9ec39SRoland Dreier 806d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 807d92c0da7SBart Van Assche ch = &target->ch[i]; 808c014c8cdSBart Van Assche ch->connected = false; 809d92c0da7SBart Van Assche if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { 8107aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 8117aa54bd7SDavid Dillow PFX "Sending CM DREQ failed\n"); 812aef9ec39SRoland Dreier } 813294c875aSBart Van Assche } 814294c875aSBart Van Assche } 815aef9ec39SRoland Dreier 816509c07bcSBart Van Assche static void srp_free_req_data(struct srp_target_port *target, 817509c07bcSBart Van Assche struct srp_rdma_ch *ch) 8188f26c9ffSDavid Dillow { 8195cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 8205cfb1782SBart Van Assche struct ib_device *ibdev = dev->dev; 8218f26c9ffSDavid Dillow struct srp_request *req; 8228f26c9ffSDavid Dillow int i; 8238f26c9ffSDavid Dillow 82447513cf4SBart Van Assche if (!ch->req_ring) 8254d73f95fSBart Van Assche return; 8264d73f95fSBart Van Assche 8274d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 828509c07bcSBart Van Assche req = &ch->req_ring[i]; 8299a21be53SSagi Grimberg if (dev->use_fast_reg) { 8305cfb1782SBart Van Assche kfree(req->fr_list); 8319a21be53SSagi Grimberg } else { 8328f26c9ffSDavid Dillow kfree(req->fmr_list); 8338f26c9ffSDavid Dillow kfree(req->map_page); 8349a21be53SSagi Grimberg } 835c07d424dSDavid Dillow if (req->indirect_dma_addr) { 836c07d424dSDavid Dillow ib_dma_unmap_single(ibdev, req->indirect_dma_addr, 837c07d424dSDavid Dillow target->indirect_size, 838c07d424dSDavid Dillow DMA_TO_DEVICE); 839c07d424dSDavid Dillow } 840c07d424dSDavid Dillow kfree(req->indirect_desc); 8418f26c9ffSDavid Dillow } 8424d73f95fSBart Van Assche 843509c07bcSBart Van Assche kfree(ch->req_ring); 844509c07bcSBart Van Assche ch->req_ring = NULL; 8458f26c9ffSDavid Dillow } 8468f26c9ffSDavid Dillow 847509c07bcSBart Van Assche static int srp_alloc_req_data(struct srp_rdma_ch *ch) 848b81d00bdSBart Van Assche { 849509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 850b81d00bdSBart Van Assche struct srp_device *srp_dev = target->srp_host->srp_dev; 851b81d00bdSBart Van Assche struct ib_device *ibdev = srp_dev->dev; 852b81d00bdSBart Van Assche struct srp_request *req; 8535cfb1782SBart Van Assche void *mr_list; 854b81d00bdSBart Van Assche dma_addr_t dma_addr; 855b81d00bdSBart Van Assche int i, ret = -ENOMEM; 856b81d00bdSBart Van Assche 857509c07bcSBart Van Assche ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), 858509c07bcSBart Van Assche GFP_KERNEL); 859509c07bcSBart Van Assche if (!ch->req_ring) 8604d73f95fSBart Van Assche goto out; 8614d73f95fSBart Van Assche 8624d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 863509c07bcSBart Van Assche req = &ch->req_ring[i]; 864509c5f33SBart Van Assche mr_list = kmalloc(target->mr_per_cmd * sizeof(void *), 865b81d00bdSBart Van Assche GFP_KERNEL); 8665cfb1782SBart Van Assche if (!mr_list) 8675cfb1782SBart Van Assche goto out; 8689a21be53SSagi Grimberg if (srp_dev->use_fast_reg) { 8695cfb1782SBart Van Assche req->fr_list = mr_list; 8709a21be53SSagi Grimberg } else { 8715cfb1782SBart Van Assche req->fmr_list = mr_list; 87252ede08fSBart Van Assche req->map_page = kmalloc(srp_dev->max_pages_per_mr * 873d1b4289eSBart Van Assche sizeof(void *), GFP_KERNEL); 8745cfb1782SBart Van Assche if (!req->map_page) 8755cfb1782SBart Van Assche goto out; 8769a21be53SSagi Grimberg } 877b81d00bdSBart Van Assche req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); 8785cfb1782SBart Van Assche if (!req->indirect_desc) 879b81d00bdSBart Van Assche goto out; 880b81d00bdSBart Van Assche 881b81d00bdSBart Van Assche dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, 882b81d00bdSBart Van Assche target->indirect_size, 883b81d00bdSBart Van Assche DMA_TO_DEVICE); 884b81d00bdSBart Van Assche if (ib_dma_mapping_error(ibdev, dma_addr)) 885b81d00bdSBart Van Assche goto out; 886b81d00bdSBart Van Assche 887b81d00bdSBart Van Assche req->indirect_dma_addr = dma_addr; 888b81d00bdSBart Van Assche } 889b81d00bdSBart Van Assche ret = 0; 890b81d00bdSBart Van Assche 891b81d00bdSBart Van Assche out: 892b81d00bdSBart Van Assche return ret; 893b81d00bdSBart Van Assche } 894b81d00bdSBart Van Assche 895683b159aSBart Van Assche /** 896683b159aSBart Van Assche * srp_del_scsi_host_attr() - Remove attributes defined in the host template. 897683b159aSBart Van Assche * @shost: SCSI host whose attributes to remove from sysfs. 898683b159aSBart Van Assche * 899683b159aSBart Van Assche * Note: Any attributes defined in the host template and that did not exist 900683b159aSBart Van Assche * before invocation of this function will be ignored. 901683b159aSBart Van Assche */ 902683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost) 903683b159aSBart Van Assche { 904683b159aSBart Van Assche struct device_attribute **attr; 905683b159aSBart Van Assche 906683b159aSBart Van Assche for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) 907683b159aSBart Van Assche device_remove_file(&shost->shost_dev, *attr); 908683b159aSBart Van Assche } 909683b159aSBart Van Assche 910ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target) 911ee12d6a8SBart Van Assche { 912d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 913d92c0da7SBart Van Assche int i; 914509c07bcSBart Van Assche 915ef6c49d8SBart Van Assche WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 916ef6c49d8SBart Van Assche 917ee12d6a8SBart Van Assche srp_del_scsi_host_attr(target->scsi_host); 9189dd69a60SBart Van Assche srp_rport_get(target->rport); 919ee12d6a8SBart Van Assche srp_remove_host(target->scsi_host); 920ee12d6a8SBart Van Assche scsi_remove_host(target->scsi_host); 92193079162SBart Van Assche srp_stop_rport_timers(target->rport); 922ef6c49d8SBart Van Assche srp_disconnect_target(target); 923d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 924d92c0da7SBart Van Assche ch = &target->ch[i]; 925509c07bcSBart Van Assche srp_free_ch_ib(target, ch); 926d92c0da7SBart Van Assche } 927c1120f89SBart Van Assche cancel_work_sync(&target->tl_err_work); 9289dd69a60SBart Van Assche srp_rport_put(target->rport); 929d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 930d92c0da7SBart Van Assche ch = &target->ch[i]; 931509c07bcSBart Van Assche srp_free_req_data(target, ch); 932d92c0da7SBart Van Assche } 933d92c0da7SBart Van Assche kfree(target->ch); 934d92c0da7SBart Van Assche target->ch = NULL; 93565d7dd2fSVu Pham 93665d7dd2fSVu Pham spin_lock(&target->srp_host->target_lock); 93765d7dd2fSVu Pham list_del(&target->list); 93865d7dd2fSVu Pham spin_unlock(&target->srp_host->target_lock); 93965d7dd2fSVu Pham 940ee12d6a8SBart Van Assche scsi_host_put(target->scsi_host); 941ee12d6a8SBart Van Assche } 942ee12d6a8SBart Van Assche 943c4028958SDavid Howells static void srp_remove_work(struct work_struct *work) 944aef9ec39SRoland Dreier { 945c4028958SDavid Howells struct srp_target_port *target = 946ef6c49d8SBart Van Assche container_of(work, struct srp_target_port, remove_work); 947aef9ec39SRoland Dreier 948ef6c49d8SBart Van Assche WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 949aef9ec39SRoland Dreier 95096fc248aSBart Van Assche srp_remove_target(target); 951aef9ec39SRoland Dreier } 952aef9ec39SRoland Dreier 953dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport) 954dc1bdbd9SBart Van Assche { 955dc1bdbd9SBart Van Assche struct srp_target_port *target = rport->lld_data; 956dc1bdbd9SBart Van Assche 957dc1bdbd9SBart Van Assche srp_queue_remove_work(target); 958dc1bdbd9SBart Van Assche } 959dc1bdbd9SBart Van Assche 960c014c8cdSBart Van Assche /** 961c014c8cdSBart Van Assche * srp_connected_ch() - number of connected channels 962c014c8cdSBart Van Assche * @target: SRP target port. 963c014c8cdSBart Van Assche */ 964c014c8cdSBart Van Assche static int srp_connected_ch(struct srp_target_port *target) 965c014c8cdSBart Van Assche { 966c014c8cdSBart Van Assche int i, c = 0; 967c014c8cdSBart Van Assche 968c014c8cdSBart Van Assche for (i = 0; i < target->ch_count; i++) 969c014c8cdSBart Van Assche c += target->ch[i].connected; 970c014c8cdSBart Van Assche 971c014c8cdSBart Van Assche return c; 972c014c8cdSBart Van Assche } 973c014c8cdSBart Van Assche 974d92c0da7SBart Van Assche static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) 975aef9ec39SRoland Dreier { 976509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 977aef9ec39SRoland Dreier int ret; 978aef9ec39SRoland Dreier 979c014c8cdSBart Van Assche WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); 980294c875aSBart Van Assche 981509c07bcSBart Van Assche ret = srp_lookup_path(ch); 982aef9ec39SRoland Dreier if (ret) 9834d59ad29SBart Van Assche goto out; 984aef9ec39SRoland Dreier 985aef9ec39SRoland Dreier while (1) { 986509c07bcSBart Van Assche init_completion(&ch->done); 987d92c0da7SBart Van Assche ret = srp_send_req(ch, multich); 988aef9ec39SRoland Dreier if (ret) 9894d59ad29SBart Van Assche goto out; 990509c07bcSBart Van Assche ret = wait_for_completion_interruptible(&ch->done); 991a702adceSBart Van Assche if (ret < 0) 9924d59ad29SBart Van Assche goto out; 993aef9ec39SRoland Dreier 994aef9ec39SRoland Dreier /* 995aef9ec39SRoland Dreier * The CM event handling code will set status to 996aef9ec39SRoland Dreier * SRP_PORT_REDIRECT if we get a port redirect REJ 997aef9ec39SRoland Dreier * back, or SRP_DLID_REDIRECT if we get a lid/qp 998aef9ec39SRoland Dreier * redirect REJ back. 999aef9ec39SRoland Dreier */ 10004d59ad29SBart Van Assche ret = ch->status; 10014d59ad29SBart Van Assche switch (ret) { 1002aef9ec39SRoland Dreier case 0: 1003c014c8cdSBart Van Assche ch->connected = true; 10044d59ad29SBart Van Assche goto out; 1005aef9ec39SRoland Dreier 1006aef9ec39SRoland Dreier case SRP_PORT_REDIRECT: 1007509c07bcSBart Van Assche ret = srp_lookup_path(ch); 1008aef9ec39SRoland Dreier if (ret) 10094d59ad29SBart Van Assche goto out; 1010aef9ec39SRoland Dreier break; 1011aef9ec39SRoland Dreier 1012aef9ec39SRoland Dreier case SRP_DLID_REDIRECT: 1013aef9ec39SRoland Dreier break; 1014aef9ec39SRoland Dreier 10159fe4bcf4SDavid Dillow case SRP_STALE_CONN: 10169fe4bcf4SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 10179fe4bcf4SDavid Dillow "giving up on stale connection\n"); 10184d59ad29SBart Van Assche ret = -ECONNRESET; 10194d59ad29SBart Van Assche goto out; 10209fe4bcf4SDavid Dillow 1021aef9ec39SRoland Dreier default: 10224d59ad29SBart Van Assche goto out; 1023aef9ec39SRoland Dreier } 1024aef9ec39SRoland Dreier } 10254d59ad29SBart Van Assche 10264d59ad29SBart Van Assche out: 10274d59ad29SBart Van Assche return ret <= 0 ? ret : -ENODEV; 1028aef9ec39SRoland Dreier } 1029aef9ec39SRoland Dreier 10301dc7b1f1SChristoph Hellwig static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc) 10311dc7b1f1SChristoph Hellwig { 10321dc7b1f1SChristoph Hellwig srp_handle_qp_err(cq, wc, "INV RKEY"); 10331dc7b1f1SChristoph Hellwig } 10341dc7b1f1SChristoph Hellwig 10351dc7b1f1SChristoph Hellwig static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch, 10361dc7b1f1SChristoph Hellwig u32 rkey) 10375cfb1782SBart Van Assche { 10385cfb1782SBart Van Assche struct ib_send_wr *bad_wr; 10395cfb1782SBart Van Assche struct ib_send_wr wr = { 10405cfb1782SBart Van Assche .opcode = IB_WR_LOCAL_INV, 10415cfb1782SBart Van Assche .next = NULL, 10425cfb1782SBart Van Assche .num_sge = 0, 10435cfb1782SBart Van Assche .send_flags = 0, 10445cfb1782SBart Van Assche .ex.invalidate_rkey = rkey, 10455cfb1782SBart Van Assche }; 10465cfb1782SBart Van Assche 10471dc7b1f1SChristoph Hellwig wr.wr_cqe = &req->reg_cqe; 10481dc7b1f1SChristoph Hellwig req->reg_cqe.done = srp_inv_rkey_err_done; 1049509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 10505cfb1782SBart Van Assche } 10515cfb1782SBart Van Assche 1052d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd, 1053509c07bcSBart Van Assche struct srp_rdma_ch *ch, 1054d945e1dfSRoland Dreier struct srp_request *req) 1055d945e1dfSRoland Dreier { 1056509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 10575cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 10585cfb1782SBart Van Assche struct ib_device *ibdev = dev->dev; 10595cfb1782SBart Van Assche int i, res; 10608f26c9ffSDavid Dillow 1061bb350d1dSFUJITA Tomonori if (!scsi_sglist(scmnd) || 1062d945e1dfSRoland Dreier (scmnd->sc_data_direction != DMA_TO_DEVICE && 1063d945e1dfSRoland Dreier scmnd->sc_data_direction != DMA_FROM_DEVICE)) 1064d945e1dfSRoland Dreier return; 1065d945e1dfSRoland Dreier 10665cfb1782SBart Van Assche if (dev->use_fast_reg) { 10675cfb1782SBart Van Assche struct srp_fr_desc **pfr; 10685cfb1782SBart Van Assche 10695cfb1782SBart Van Assche for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { 10701dc7b1f1SChristoph Hellwig res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey); 10715cfb1782SBart Van Assche if (res < 0) { 10725cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, PFX 10735cfb1782SBart Van Assche "Queueing INV WR for rkey %#x failed (%d)\n", 10745cfb1782SBart Van Assche (*pfr)->mr->rkey, res); 10755cfb1782SBart Van Assche queue_work(system_long_wq, 10765cfb1782SBart Van Assche &target->tl_err_work); 10775cfb1782SBart Van Assche } 10785cfb1782SBart Van Assche } 10795cfb1782SBart Van Assche if (req->nmdesc) 1080509c07bcSBart Van Assche srp_fr_pool_put(ch->fr_pool, req->fr_list, 10815cfb1782SBart Van Assche req->nmdesc); 1082002f1567SBart Van Assche } else if (dev->use_fmr) { 10835cfb1782SBart Van Assche struct ib_pool_fmr **pfmr; 10845cfb1782SBart Van Assche 10855cfb1782SBart Van Assche for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++) 10865cfb1782SBart Van Assche ib_fmr_pool_unmap(*pfmr); 10875cfb1782SBart Van Assche } 1088f5358a17SRoland Dreier 10898f26c9ffSDavid Dillow ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), 10908f26c9ffSDavid Dillow scmnd->sc_data_direction); 1091d945e1dfSRoland Dreier } 1092d945e1dfSRoland Dreier 109322032991SBart Van Assche /** 109422032991SBart Van Assche * srp_claim_req - Take ownership of the scmnd associated with a request. 1095509c07bcSBart Van Assche * @ch: SRP RDMA channel. 109622032991SBart Van Assche * @req: SRP request. 1097b3fe628dSBart Van Assche * @sdev: If not NULL, only take ownership for this SCSI device. 109822032991SBart Van Assche * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take 109922032991SBart Van Assche * ownership of @req->scmnd if it equals @scmnd. 110022032991SBart Van Assche * 110122032991SBart Van Assche * Return value: 110222032991SBart Van Assche * Either NULL or a pointer to the SCSI command the caller became owner of. 110322032991SBart Van Assche */ 1104509c07bcSBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, 110522032991SBart Van Assche struct srp_request *req, 1106b3fe628dSBart Van Assche struct scsi_device *sdev, 110722032991SBart Van Assche struct scsi_cmnd *scmnd) 1108526b4caaSIshai Rabinovitz { 110994a9174cSBart Van Assche unsigned long flags; 111094a9174cSBart Van Assche 1111509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1112b3fe628dSBart Van Assche if (req->scmnd && 1113b3fe628dSBart Van Assche (!sdev || req->scmnd->device == sdev) && 1114b3fe628dSBart Van Assche (!scmnd || req->scmnd == scmnd)) { 111522032991SBart Van Assche scmnd = req->scmnd; 111622032991SBart Van Assche req->scmnd = NULL; 111722032991SBart Van Assche } else { 111822032991SBart Van Assche scmnd = NULL; 111922032991SBart Van Assche } 1120509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 112122032991SBart Van Assche 112222032991SBart Van Assche return scmnd; 112322032991SBart Van Assche } 112422032991SBart Van Assche 112522032991SBart Van Assche /** 11266ec2ba02SBart Van Assche * srp_free_req() - Unmap data and adjust ch->req_lim. 1127509c07bcSBart Van Assche * @ch: SRP RDMA channel. 1128af24663bSBart Van Assche * @req: Request to be freed. 1129af24663bSBart Van Assche * @scmnd: SCSI command associated with @req. 1130af24663bSBart Van Assche * @req_lim_delta: Amount to be added to @target->req_lim. 113122032991SBart Van Assche */ 1132509c07bcSBart Van Assche static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, 1133509c07bcSBart Van Assche struct scsi_cmnd *scmnd, s32 req_lim_delta) 113422032991SBart Van Assche { 113522032991SBart Van Assche unsigned long flags; 113622032991SBart Van Assche 1137509c07bcSBart Van Assche srp_unmap_data(scmnd, ch, req); 113822032991SBart Van Assche 1139509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1140509c07bcSBart Van Assche ch->req_lim += req_lim_delta; 1141509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 1142526b4caaSIshai Rabinovitz } 1143526b4caaSIshai Rabinovitz 1144509c07bcSBart Van Assche static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, 1145509c07bcSBart Van Assche struct scsi_device *sdev, int result) 1146526b4caaSIshai Rabinovitz { 1147509c07bcSBart Van Assche struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); 114822032991SBart Van Assche 114922032991SBart Van Assche if (scmnd) { 1150509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 0); 1151ed9b2264SBart Van Assche scmnd->result = result; 115222032991SBart Van Assche scmnd->scsi_done(scmnd); 115322032991SBart Van Assche } 1154526b4caaSIshai Rabinovitz } 1155526b4caaSIshai Rabinovitz 1156ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport) 1157aef9ec39SRoland Dreier { 1158ed9b2264SBart Van Assche struct srp_target_port *target = rport->lld_data; 1159d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 1160b3fe628dSBart Van Assche struct Scsi_Host *shost = target->scsi_host; 1161b3fe628dSBart Van Assche struct scsi_device *sdev; 1162d92c0da7SBart Van Assche int i, j; 1163aef9ec39SRoland Dreier 1164b3fe628dSBart Van Assche /* 1165b3fe628dSBart Van Assche * Invoking srp_terminate_io() while srp_queuecommand() is running 1166b3fe628dSBart Van Assche * is not safe. Hence the warning statement below. 1167b3fe628dSBart Van Assche */ 1168b3fe628dSBart Van Assche shost_for_each_device(sdev, shost) 1169b3fe628dSBart Van Assche WARN_ON_ONCE(sdev->request_queue->request_fn_active); 1170b3fe628dSBart Van Assche 1171d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1172d92c0da7SBart Van Assche ch = &target->ch[i]; 1173509c07bcSBart Van Assche 1174d92c0da7SBart Van Assche for (j = 0; j < target->req_ring_size; ++j) { 1175d92c0da7SBart Van Assche struct srp_request *req = &ch->req_ring[j]; 1176d92c0da7SBart Van Assche 1177d92c0da7SBart Van Assche srp_finish_req(ch, req, NULL, 1178d92c0da7SBart Van Assche DID_TRANSPORT_FAILFAST << 16); 1179d92c0da7SBart Van Assche } 1180ed9b2264SBart Van Assche } 1181ed9b2264SBart Van Assche } 1182ed9b2264SBart Van Assche 1183ed9b2264SBart Van Assche /* 1184ed9b2264SBart Van Assche * It is up to the caller to ensure that srp_rport_reconnect() calls are 1185ed9b2264SBart Van Assche * serialized and that no concurrent srp_queuecommand(), srp_abort(), 1186ed9b2264SBart Van Assche * srp_reset_device() or srp_reset_host() calls will occur while this function 1187ed9b2264SBart Van Assche * is in progress. One way to realize that is not to call this function 1188ed9b2264SBart Van Assche * directly but to call srp_reconnect_rport() instead since that last function 1189ed9b2264SBart Van Assche * serializes calls of this function via rport->mutex and also blocks 1190ed9b2264SBart Van Assche * srp_queuecommand() calls before invoking this function. 1191ed9b2264SBart Van Assche */ 1192ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport) 1193ed9b2264SBart Van Assche { 1194ed9b2264SBart Van Assche struct srp_target_port *target = rport->lld_data; 1195d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 1196d92c0da7SBart Van Assche int i, j, ret = 0; 1197d92c0da7SBart Van Assche bool multich = false; 119809be70a2SBart Van Assche 1199aef9ec39SRoland Dreier srp_disconnect_target(target); 120034aa654eSBart Van Assche 120134aa654eSBart Van Assche if (target->state == SRP_TARGET_SCANNING) 120234aa654eSBart Van Assche return -ENODEV; 120334aa654eSBart Van Assche 1204aef9ec39SRoland Dreier /* 1205c7c4e7ffSBart Van Assche * Now get a new local CM ID so that we avoid confusing the target in 1206c7c4e7ffSBart Van Assche * case things are really fouled up. Doing so also ensures that all CM 1207c7c4e7ffSBart Van Assche * callbacks will have finished before a new QP is allocated. 1208aef9ec39SRoland Dreier */ 1209d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1210d92c0da7SBart Van Assche ch = &target->ch[i]; 1211d92c0da7SBart Van Assche ret += srp_new_cm_id(ch); 1212d92c0da7SBart Van Assche } 1213d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1214d92c0da7SBart Van Assche ch = &target->ch[i]; 1215d92c0da7SBart Van Assche for (j = 0; j < target->req_ring_size; ++j) { 1216d92c0da7SBart Van Assche struct srp_request *req = &ch->req_ring[j]; 1217509c07bcSBart Van Assche 1218509c07bcSBart Van Assche srp_finish_req(ch, req, NULL, DID_RESET << 16); 1219536ae14eSBart Van Assche } 1220d92c0da7SBart Van Assche } 1221d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1222d92c0da7SBart Van Assche ch = &target->ch[i]; 12235cfb1782SBart Van Assche /* 12245cfb1782SBart Van Assche * Whether or not creating a new CM ID succeeded, create a new 1225d92c0da7SBart Van Assche * QP. This guarantees that all completion callback function 1226d92c0da7SBart Van Assche * invocations have finished before request resetting starts. 12275cfb1782SBart Van Assche */ 1228509c07bcSBart Van Assche ret += srp_create_ch_ib(ch); 12295cfb1782SBart Van Assche 1230509c07bcSBart Van Assche INIT_LIST_HEAD(&ch->free_tx); 1231d92c0da7SBart Van Assche for (j = 0; j < target->queue_size; ++j) 1232d92c0da7SBart Van Assche list_add(&ch->tx_ring[j]->list, &ch->free_tx); 1233d92c0da7SBart Van Assche } 12348de9fe3aSBart Van Assche 12358de9fe3aSBart Van Assche target->qp_in_error = false; 12368de9fe3aSBart Van Assche 1237d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 1238d92c0da7SBart Van Assche ch = &target->ch[i]; 1239bbac5ccfSBart Van Assche if (ret) 1240d92c0da7SBart Van Assche break; 1241d92c0da7SBart Van Assche ret = srp_connect_ch(ch, multich); 1242d92c0da7SBart Van Assche multich = true; 1243d92c0da7SBart Van Assche } 124409be70a2SBart Van Assche 1245ed9b2264SBart Van Assche if (ret == 0) 1246ed9b2264SBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 1247ed9b2264SBart Van Assche PFX "reconnect succeeded\n"); 1248aef9ec39SRoland Dreier 1249aef9ec39SRoland Dreier return ret; 1250aef9ec39SRoland Dreier } 1251aef9ec39SRoland Dreier 12528f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, 12538f26c9ffSDavid Dillow unsigned int dma_len, u32 rkey) 1254f5358a17SRoland Dreier { 12558f26c9ffSDavid Dillow struct srp_direct_buf *desc = state->desc; 12568f26c9ffSDavid Dillow 12573ae95da8SBart Van Assche WARN_ON_ONCE(!dma_len); 12583ae95da8SBart Van Assche 12598f26c9ffSDavid Dillow desc->va = cpu_to_be64(dma_addr); 12608f26c9ffSDavid Dillow desc->key = cpu_to_be32(rkey); 12618f26c9ffSDavid Dillow desc->len = cpu_to_be32(dma_len); 12628f26c9ffSDavid Dillow 12638f26c9ffSDavid Dillow state->total_len += dma_len; 12648f26c9ffSDavid Dillow state->desc++; 12658f26c9ffSDavid Dillow state->ndesc++; 12668f26c9ffSDavid Dillow } 12678f26c9ffSDavid Dillow 12688f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state, 1269509c07bcSBart Van Assche struct srp_rdma_ch *ch) 12708f26c9ffSDavid Dillow { 1271186fbc66SBart Van Assche struct srp_target_port *target = ch->target; 1272186fbc66SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 12735f071777SChristoph Hellwig struct ib_pd *pd = target->pd; 12748f26c9ffSDavid Dillow struct ib_pool_fmr *fmr; 1275f5358a17SRoland Dreier u64 io_addr = 0; 12768f26c9ffSDavid Dillow 1277290081b4SBart Van Assche if (state->fmr.next >= state->fmr.end) { 1278290081b4SBart Van Assche shost_printk(KERN_ERR, ch->target->scsi_host, 1279290081b4SBart Van Assche PFX "Out of MRs (mr_per_cmd = %d)\n", 1280290081b4SBart Van Assche ch->target->mr_per_cmd); 1281f731ed62SBart Van Assche return -ENOMEM; 1282290081b4SBart Van Assche } 1283f731ed62SBart Van Assche 128426630e8aSSagi Grimberg WARN_ON_ONCE(!dev->use_fmr); 128526630e8aSSagi Grimberg 128626630e8aSSagi Grimberg if (state->npages == 0) 128726630e8aSSagi Grimberg return 0; 128826630e8aSSagi Grimberg 12895f071777SChristoph Hellwig if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) { 129026630e8aSSagi Grimberg srp_map_desc(state, state->base_dma_addr, state->dma_len, 12915f071777SChristoph Hellwig pd->unsafe_global_rkey); 129226630e8aSSagi Grimberg goto reset_state; 129326630e8aSSagi Grimberg } 129426630e8aSSagi Grimberg 1295509c07bcSBart Van Assche fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, 12968f26c9ffSDavid Dillow state->npages, io_addr); 12978f26c9ffSDavid Dillow if (IS_ERR(fmr)) 12988f26c9ffSDavid Dillow return PTR_ERR(fmr); 12998f26c9ffSDavid Dillow 1300f731ed62SBart Van Assche *state->fmr.next++ = fmr; 130152ede08fSBart Van Assche state->nmdesc++; 13028f26c9ffSDavid Dillow 1303186fbc66SBart Van Assche srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask, 1304186fbc66SBart Van Assche state->dma_len, fmr->fmr->rkey); 1305539dde6fSBart Van Assche 130626630e8aSSagi Grimberg reset_state: 130726630e8aSSagi Grimberg state->npages = 0; 130826630e8aSSagi Grimberg state->dma_len = 0; 130926630e8aSSagi Grimberg 13108f26c9ffSDavid Dillow return 0; 13118f26c9ffSDavid Dillow } 13128f26c9ffSDavid Dillow 13131dc7b1f1SChristoph Hellwig static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc) 13141dc7b1f1SChristoph Hellwig { 13151dc7b1f1SChristoph Hellwig srp_handle_qp_err(cq, wc, "FAST REG"); 13161dc7b1f1SChristoph Hellwig } 13171dc7b1f1SChristoph Hellwig 1318509c5f33SBart Van Assche /* 1319509c5f33SBart Van Assche * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset 1320509c5f33SBart Van Assche * where to start in the first element. If sg_offset_p != NULL then 1321509c5f33SBart Van Assche * *sg_offset_p is updated to the offset in state->sg[retval] of the first 1322509c5f33SBart Van Assche * byte that has not yet been mapped. 1323509c5f33SBart Van Assche */ 13245cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state, 13251dc7b1f1SChristoph Hellwig struct srp_request *req, 1326509c5f33SBart Van Assche struct srp_rdma_ch *ch, int sg_nents, 1327509c5f33SBart Van Assche unsigned int *sg_offset_p) 13285cfb1782SBart Van Assche { 1329509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 13305cfb1782SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 13315f071777SChristoph Hellwig struct ib_pd *pd = target->pd; 13325cfb1782SBart Van Assche struct ib_send_wr *bad_wr; 1333f7f7aab1SSagi Grimberg struct ib_reg_wr wr; 13345cfb1782SBart Van Assche struct srp_fr_desc *desc; 13355cfb1782SBart Van Assche u32 rkey; 1336f7f7aab1SSagi Grimberg int n, err; 13375cfb1782SBart Van Assche 1338290081b4SBart Van Assche if (state->fr.next >= state->fr.end) { 1339290081b4SBart Van Assche shost_printk(KERN_ERR, ch->target->scsi_host, 1340290081b4SBart Van Assche PFX "Out of MRs (mr_per_cmd = %d)\n", 1341290081b4SBart Van Assche ch->target->mr_per_cmd); 1342f731ed62SBart Van Assche return -ENOMEM; 1343290081b4SBart Van Assche } 1344f731ed62SBart Van Assche 134526630e8aSSagi Grimberg WARN_ON_ONCE(!dev->use_fast_reg); 134626630e8aSSagi Grimberg 13475f071777SChristoph Hellwig if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) { 1348509c5f33SBart Van Assche unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1349509c5f33SBart Van Assche 1350509c5f33SBart Van Assche srp_map_desc(state, sg_dma_address(state->sg) + sg_offset, 1351509c5f33SBart Van Assche sg_dma_len(state->sg) - sg_offset, 13525f071777SChristoph Hellwig pd->unsafe_global_rkey); 1353509c5f33SBart Van Assche if (sg_offset_p) 1354509c5f33SBart Van Assche *sg_offset_p = 0; 1355f7f7aab1SSagi Grimberg return 1; 135626630e8aSSagi Grimberg } 135726630e8aSSagi Grimberg 1358509c07bcSBart Van Assche desc = srp_fr_pool_get(ch->fr_pool); 13595cfb1782SBart Van Assche if (!desc) 13605cfb1782SBart Van Assche return -ENOMEM; 13615cfb1782SBart Van Assche 13625cfb1782SBart Van Assche rkey = ib_inc_rkey(desc->mr->rkey); 13635cfb1782SBart Van Assche ib_update_fast_reg_key(desc->mr, rkey); 13645cfb1782SBart Van Assche 1365509c5f33SBart Van Assche n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p, 1366509c5f33SBart Van Assche dev->mr_page_size); 13679d8e7d0dSBart Van Assche if (unlikely(n < 0)) { 13689d8e7d0dSBart Van Assche srp_fr_pool_put(ch->fr_pool, &desc, 1); 1369509c5f33SBart Van Assche pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n", 13709d8e7d0dSBart Van Assche dev_name(&req->scmnd->device->sdev_gendev), sg_nents, 1371509c5f33SBart Van Assche sg_offset_p ? *sg_offset_p : -1, n); 1372f7f7aab1SSagi Grimberg return n; 13739d8e7d0dSBart Van Assche } 13745cfb1782SBart Van Assche 1375509c5f33SBart Van Assche WARN_ON_ONCE(desc->mr->length == 0); 13765cfb1782SBart Van Assche 13771dc7b1f1SChristoph Hellwig req->reg_cqe.done = srp_reg_mr_err_done; 13781dc7b1f1SChristoph Hellwig 1379f7f7aab1SSagi Grimberg wr.wr.next = NULL; 1380f7f7aab1SSagi Grimberg wr.wr.opcode = IB_WR_REG_MR; 13811dc7b1f1SChristoph Hellwig wr.wr.wr_cqe = &req->reg_cqe; 1382f7f7aab1SSagi Grimberg wr.wr.num_sge = 0; 1383f7f7aab1SSagi Grimberg wr.wr.send_flags = 0; 1384f7f7aab1SSagi Grimberg wr.mr = desc->mr; 1385f7f7aab1SSagi Grimberg wr.key = desc->mr->rkey; 1386f7f7aab1SSagi Grimberg wr.access = (IB_ACCESS_LOCAL_WRITE | 13875cfb1782SBart Van Assche IB_ACCESS_REMOTE_READ | 13885cfb1782SBart Van Assche IB_ACCESS_REMOTE_WRITE); 13895cfb1782SBart Van Assche 1390f731ed62SBart Van Assche *state->fr.next++ = desc; 13915cfb1782SBart Van Assche state->nmdesc++; 13925cfb1782SBart Van Assche 1393f7f7aab1SSagi Grimberg srp_map_desc(state, desc->mr->iova, 1394f7f7aab1SSagi Grimberg desc->mr->length, desc->mr->rkey); 13955cfb1782SBart Van Assche 139626630e8aSSagi Grimberg err = ib_post_send(ch->qp, &wr.wr, &bad_wr); 1397509c5f33SBart Van Assche if (unlikely(err)) { 1398509c5f33SBart Van Assche WARN_ON_ONCE(err == -ENOMEM); 139926630e8aSSagi Grimberg return err; 1400509c5f33SBart Van Assche } 140126630e8aSSagi Grimberg 1402f7f7aab1SSagi Grimberg return n; 14035cfb1782SBart Van Assche } 14045cfb1782SBart Van Assche 14058f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state, 1406509c07bcSBart Van Assche struct srp_rdma_ch *ch, 140752bb8c62SBart Van Assche struct scatterlist *sg) 14088f26c9ffSDavid Dillow { 1409509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 141005321937SGreg Kroah-Hartman struct srp_device *dev = target->srp_host->srp_dev; 141185507bccSRalph Campbell struct ib_device *ibdev = dev->dev; 14128f26c9ffSDavid Dillow dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); 1413bb350d1dSFUJITA Tomonori unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 14143ae95da8SBart Van Assche unsigned int len = 0; 14158f26c9ffSDavid Dillow int ret; 141685507bccSRalph Campbell 14173ae95da8SBart Van Assche WARN_ON_ONCE(!dma_len); 1418f5358a17SRoland Dreier 14198f26c9ffSDavid Dillow while (dma_len) { 14205cfb1782SBart Van Assche unsigned offset = dma_addr & ~dev->mr_page_mask; 1421681cc360SBart Van Assche 1422681cc360SBart Van Assche if (state->npages == dev->max_pages_per_mr || 1423681cc360SBart Van Assche (state->npages > 0 && offset != 0)) { 1424f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(state, ch); 14258f26c9ffSDavid Dillow if (ret) 14268f26c9ffSDavid Dillow return ret; 142785507bccSRalph Campbell } 1428f5358a17SRoland Dreier 14295cfb1782SBart Van Assche len = min_t(unsigned int, dma_len, dev->mr_page_size - offset); 14308f26c9ffSDavid Dillow 14318f26c9ffSDavid Dillow if (!state->npages) 14328f26c9ffSDavid Dillow state->base_dma_addr = dma_addr; 14335cfb1782SBart Van Assche state->pages[state->npages++] = dma_addr & dev->mr_page_mask; 143452ede08fSBart Van Assche state->dma_len += len; 14358f26c9ffSDavid Dillow dma_addr += len; 14368f26c9ffSDavid Dillow dma_len -= len; 1437f5358a17SRoland Dreier } 1438f5358a17SRoland Dreier 14395cfb1782SBart Van Assche /* 1440681cc360SBart Van Assche * If the end of the MR is not on a page boundary then we need to 14418f26c9ffSDavid Dillow * close it out and start a new one -- we can only merge at page 14421d3d98c4SBart Van Assche * boundaries. 14438f26c9ffSDavid Dillow */ 1444f5358a17SRoland Dreier ret = 0; 1445681cc360SBart Van Assche if ((dma_addr & ~dev->mr_page_mask) != 0) 1446f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(state, ch); 1447f5358a17SRoland Dreier return ret; 1448f5358a17SRoland Dreier } 1449f5358a17SRoland Dreier 145026630e8aSSagi Grimberg static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch, 145126630e8aSSagi Grimberg struct srp_request *req, struct scatterlist *scat, 145226630e8aSSagi Grimberg int count) 145326630e8aSSagi Grimberg { 145426630e8aSSagi Grimberg struct scatterlist *sg; 145526630e8aSSagi Grimberg int i, ret; 145626630e8aSSagi Grimberg 145726630e8aSSagi Grimberg state->pages = req->map_page; 145826630e8aSSagi Grimberg state->fmr.next = req->fmr_list; 1459509c5f33SBart Van Assche state->fmr.end = req->fmr_list + ch->target->mr_per_cmd; 146026630e8aSSagi Grimberg 146126630e8aSSagi Grimberg for_each_sg(scat, sg, count, i) { 146252bb8c62SBart Van Assche ret = srp_map_sg_entry(state, ch, sg); 146326630e8aSSagi Grimberg if (ret) 146426630e8aSSagi Grimberg return ret; 146526630e8aSSagi Grimberg } 146626630e8aSSagi Grimberg 1467f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(state, ch); 146826630e8aSSagi Grimberg if (ret) 146926630e8aSSagi Grimberg return ret; 147026630e8aSSagi Grimberg 147126630e8aSSagi Grimberg return 0; 147226630e8aSSagi Grimberg } 147326630e8aSSagi Grimberg 147426630e8aSSagi Grimberg static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, 147526630e8aSSagi Grimberg struct srp_request *req, struct scatterlist *scat, 147626630e8aSSagi Grimberg int count) 147726630e8aSSagi Grimberg { 1478509c5f33SBart Van Assche unsigned int sg_offset = 0; 1479509c5f33SBart Van Assche 1480f7f7aab1SSagi Grimberg state->fr.next = req->fr_list; 1481509c5f33SBart Van Assche state->fr.end = req->fr_list + ch->target->mr_per_cmd; 1482f7f7aab1SSagi Grimberg state->sg = scat; 148326630e8aSSagi Grimberg 14843b59b7a6SBart Van Assche if (count == 0) 14853b59b7a6SBart Van Assche return 0; 14863b59b7a6SBart Van Assche 148757b0be9cSBart Van Assche while (count) { 1488f7f7aab1SSagi Grimberg int i, n; 1489f7f7aab1SSagi Grimberg 1490509c5f33SBart Van Assche n = srp_map_finish_fr(state, req, ch, count, &sg_offset); 1491f7f7aab1SSagi Grimberg if (unlikely(n < 0)) 1492f7f7aab1SSagi Grimberg return n; 1493f7f7aab1SSagi Grimberg 149457b0be9cSBart Van Assche count -= n; 1495f7f7aab1SSagi Grimberg for (i = 0; i < n; i++) 1496f7f7aab1SSagi Grimberg state->sg = sg_next(state->sg); 149726630e8aSSagi Grimberg } 149826630e8aSSagi Grimberg 149926630e8aSSagi Grimberg return 0; 150026630e8aSSagi Grimberg } 150126630e8aSSagi Grimberg 150226630e8aSSagi Grimberg static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, 1503509c07bcSBart Van Assche struct srp_request *req, struct scatterlist *scat, 1504509c07bcSBart Van Assche int count) 150576bc1e1dSBart Van Assche { 1506509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 150776bc1e1dSBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 150876bc1e1dSBart Van Assche struct scatterlist *sg; 150926630e8aSSagi Grimberg int i; 151076bc1e1dSBart Van Assche 15113ae95da8SBart Van Assche for_each_sg(scat, sg, count, i) { 15123ae95da8SBart Van Assche srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), 151303f6fb93SBart Van Assche ib_sg_dma_len(dev->dev, sg), 15145f071777SChristoph Hellwig target->pd->unsafe_global_rkey); 15153ae95da8SBart Van Assche } 151676bc1e1dSBart Van Assche 151726630e8aSSagi Grimberg return 0; 151876bc1e1dSBart Van Assche } 151976bc1e1dSBart Van Assche 1520330179f2SBart Van Assche /* 1521330179f2SBart Van Assche * Register the indirect data buffer descriptor with the HCA. 1522330179f2SBart Van Assche * 1523330179f2SBart Van Assche * Note: since the indirect data buffer descriptor has been allocated with 1524330179f2SBart Van Assche * kmalloc() it is guaranteed that this buffer is a physically contiguous 1525330179f2SBart Van Assche * memory buffer. 1526330179f2SBart Van Assche */ 1527330179f2SBart Van Assche static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, 1528330179f2SBart Van Assche void **next_mr, void **end_mr, u32 idb_len, 1529330179f2SBart Van Assche __be32 *idb_rkey) 1530330179f2SBart Van Assche { 1531330179f2SBart Van Assche struct srp_target_port *target = ch->target; 1532330179f2SBart Van Assche struct srp_device *dev = target->srp_host->srp_dev; 1533330179f2SBart Van Assche struct srp_map_state state; 1534330179f2SBart Van Assche struct srp_direct_buf idb_desc; 1535330179f2SBart Van Assche u64 idb_pages[1]; 1536f7f7aab1SSagi Grimberg struct scatterlist idb_sg[1]; 1537330179f2SBart Van Assche int ret; 1538330179f2SBart Van Assche 1539330179f2SBart Van Assche memset(&state, 0, sizeof(state)); 1540330179f2SBart Van Assche memset(&idb_desc, 0, sizeof(idb_desc)); 1541330179f2SBart Van Assche state.gen.next = next_mr; 1542330179f2SBart Van Assche state.gen.end = end_mr; 1543330179f2SBart Van Assche state.desc = &idb_desc; 1544f7f7aab1SSagi Grimberg state.base_dma_addr = req->indirect_dma_addr; 1545f7f7aab1SSagi Grimberg state.dma_len = idb_len; 1546f7f7aab1SSagi Grimberg 1547f7f7aab1SSagi Grimberg if (dev->use_fast_reg) { 1548f7f7aab1SSagi Grimberg state.sg = idb_sg; 154954f5c9c5SBart Van Assche sg_init_one(idb_sg, req->indirect_desc, idb_len); 1550f7f7aab1SSagi Grimberg idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ 1551fc925518SChristoph Hellwig #ifdef CONFIG_NEED_SG_DMA_LENGTH 1552fc925518SChristoph Hellwig idb_sg->dma_length = idb_sg->length; /* hack^2 */ 1553fc925518SChristoph Hellwig #endif 1554509c5f33SBart Van Assche ret = srp_map_finish_fr(&state, req, ch, 1, NULL); 1555f7f7aab1SSagi Grimberg if (ret < 0) 1556f7f7aab1SSagi Grimberg return ret; 1557509c5f33SBart Van Assche WARN_ON_ONCE(ret < 1); 1558f7f7aab1SSagi Grimberg } else if (dev->use_fmr) { 1559330179f2SBart Van Assche state.pages = idb_pages; 1560330179f2SBart Van Assche state.pages[0] = (req->indirect_dma_addr & 1561330179f2SBart Van Assche dev->mr_page_mask); 1562330179f2SBart Van Assche state.npages = 1; 1563f7f7aab1SSagi Grimberg ret = srp_map_finish_fmr(&state, ch); 1564330179f2SBart Van Assche if (ret < 0) 1565f7f7aab1SSagi Grimberg return ret; 1566f7f7aab1SSagi Grimberg } else { 1567f7f7aab1SSagi Grimberg return -EINVAL; 1568f7f7aab1SSagi Grimberg } 1569330179f2SBart Van Assche 1570330179f2SBart Van Assche *idb_rkey = idb_desc.key; 1571330179f2SBart Van Assche 1572f7f7aab1SSagi Grimberg return 0; 1573330179f2SBart Van Assche } 1574330179f2SBart Van Assche 1575509c5f33SBart Van Assche static void srp_check_mapping(struct srp_map_state *state, 1576509c5f33SBart Van Assche struct srp_rdma_ch *ch, struct srp_request *req, 1577509c5f33SBart Van Assche struct scatterlist *scat, int count) 1578509c5f33SBart Van Assche { 1579509c5f33SBart Van Assche struct srp_device *dev = ch->target->srp_host->srp_dev; 1580509c5f33SBart Van Assche struct srp_fr_desc **pfr; 1581509c5f33SBart Van Assche u64 desc_len = 0, mr_len = 0; 1582509c5f33SBart Van Assche int i; 1583509c5f33SBart Van Assche 1584509c5f33SBart Van Assche for (i = 0; i < state->ndesc; i++) 1585509c5f33SBart Van Assche desc_len += be32_to_cpu(req->indirect_desc[i].len); 1586509c5f33SBart Van Assche if (dev->use_fast_reg) 1587509c5f33SBart Van Assche for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++) 1588509c5f33SBart Van Assche mr_len += (*pfr)->mr->length; 1589509c5f33SBart Van Assche else if (dev->use_fmr) 1590509c5f33SBart Van Assche for (i = 0; i < state->nmdesc; i++) 1591509c5f33SBart Van Assche mr_len += be32_to_cpu(req->indirect_desc[i].len); 1592509c5f33SBart Van Assche if (desc_len != scsi_bufflen(req->scmnd) || 1593509c5f33SBart Van Assche mr_len > scsi_bufflen(req->scmnd)) 1594509c5f33SBart Van Assche pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n", 1595509c5f33SBart Van Assche scsi_bufflen(req->scmnd), desc_len, mr_len, 1596509c5f33SBart Van Assche state->ndesc, state->nmdesc); 1597509c5f33SBart Van Assche } 1598509c5f33SBart Van Assche 159977269cdfSBart Van Assche /** 160077269cdfSBart Van Assche * srp_map_data() - map SCSI data buffer onto an SRP request 160177269cdfSBart Van Assche * @scmnd: SCSI command to map 160277269cdfSBart Van Assche * @ch: SRP RDMA channel 160377269cdfSBart Van Assche * @req: SRP request 160477269cdfSBart Van Assche * 160577269cdfSBart Van Assche * Returns the length in bytes of the SRP_CMD IU or a negative value if 160677269cdfSBart Van Assche * mapping failed. 160777269cdfSBart Van Assche */ 1608509c07bcSBart Van Assche static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, 1609aef9ec39SRoland Dreier struct srp_request *req) 1610aef9ec39SRoland Dreier { 1611509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 16125f071777SChristoph Hellwig struct ib_pd *pd = target->pd; 161376bc1e1dSBart Van Assche struct scatterlist *scat; 1614aef9ec39SRoland Dreier struct srp_cmd *cmd = req->cmd->buf; 1615330179f2SBart Van Assche int len, nents, count, ret; 161685507bccSRalph Campbell struct srp_device *dev; 161785507bccSRalph Campbell struct ib_device *ibdev; 16188f26c9ffSDavid Dillow struct srp_map_state state; 16198f26c9ffSDavid Dillow struct srp_indirect_buf *indirect_hdr; 1620330179f2SBart Van Assche u32 idb_len, table_len; 1621330179f2SBart Van Assche __be32 idb_rkey; 16228f26c9ffSDavid Dillow u8 fmt; 1623aef9ec39SRoland Dreier 1624bb350d1dSFUJITA Tomonori if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 1625aef9ec39SRoland Dreier return sizeof (struct srp_cmd); 1626aef9ec39SRoland Dreier 1627aef9ec39SRoland Dreier if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 1628aef9ec39SRoland Dreier scmnd->sc_data_direction != DMA_TO_DEVICE) { 16297aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 16307aa54bd7SDavid Dillow PFX "Unhandled data direction %d\n", 1631aef9ec39SRoland Dreier scmnd->sc_data_direction); 1632aef9ec39SRoland Dreier return -EINVAL; 1633aef9ec39SRoland Dreier } 1634aef9ec39SRoland Dreier 1635bb350d1dSFUJITA Tomonori nents = scsi_sg_count(scmnd); 1636bb350d1dSFUJITA Tomonori scat = scsi_sglist(scmnd); 1637aef9ec39SRoland Dreier 163805321937SGreg Kroah-Hartman dev = target->srp_host->srp_dev; 163985507bccSRalph Campbell ibdev = dev->dev; 164085507bccSRalph Campbell 164185507bccSRalph Campbell count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 16428f26c9ffSDavid Dillow if (unlikely(count == 0)) 16438f26c9ffSDavid Dillow return -EIO; 1644aef9ec39SRoland Dreier 1645aef9ec39SRoland Dreier fmt = SRP_DATA_DESC_DIRECT; 1646f5358a17SRoland Dreier len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 1647f5358a17SRoland Dreier 16485f071777SChristoph Hellwig if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) { 1649f5358a17SRoland Dreier /* 1650f5358a17SRoland Dreier * The midlayer only generated a single gather/scatter 1651f5358a17SRoland Dreier * entry, or DMA mapping coalesced everything to a 1652f5358a17SRoland Dreier * single entry. So a direct descriptor along with 1653f5358a17SRoland Dreier * the DMA MR suffices. 1654f5358a17SRoland Dreier */ 1655f5358a17SRoland Dreier struct srp_direct_buf *buf = (void *) cmd->add_data; 1656aef9ec39SRoland Dreier 165785507bccSRalph Campbell buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 16585f071777SChristoph Hellwig buf->key = cpu_to_be32(pd->unsafe_global_rkey); 165985507bccSRalph Campbell buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 16608f26c9ffSDavid Dillow 166152ede08fSBart Van Assche req->nmdesc = 0; 16628f26c9ffSDavid Dillow goto map_complete; 16638f26c9ffSDavid Dillow } 16648f26c9ffSDavid Dillow 16655cfb1782SBart Van Assche /* 16665cfb1782SBart Van Assche * We have more than one scatter/gather entry, so build our indirect 16675cfb1782SBart Van Assche * descriptor table, trying to merge as many entries as we can. 1668f5358a17SRoland Dreier */ 16698f26c9ffSDavid Dillow indirect_hdr = (void *) cmd->add_data; 16708f26c9ffSDavid Dillow 1671c07d424dSDavid Dillow ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, 1672c07d424dSDavid Dillow target->indirect_size, DMA_TO_DEVICE); 1673c07d424dSDavid Dillow 16748f26c9ffSDavid Dillow memset(&state, 0, sizeof(state)); 16759edba790SBart Van Assche state.desc = req->indirect_desc; 167626630e8aSSagi Grimberg if (dev->use_fast_reg) 1677e012f363SBart Van Assche ret = srp_map_sg_fr(&state, ch, req, scat, count); 167826630e8aSSagi Grimberg else if (dev->use_fmr) 1679e012f363SBart Van Assche ret = srp_map_sg_fmr(&state, ch, req, scat, count); 168026630e8aSSagi Grimberg else 1681e012f363SBart Van Assche ret = srp_map_sg_dma(&state, ch, req, scat, count); 1682e012f363SBart Van Assche req->nmdesc = state.nmdesc; 1683e012f363SBart Van Assche if (ret < 0) 1684e012f363SBart Van Assche goto unmap; 16858f26c9ffSDavid Dillow 1686509c5f33SBart Van Assche { 1687509c5f33SBart Van Assche DEFINE_DYNAMIC_DEBUG_METADATA(ddm, 1688509c5f33SBart Van Assche "Memory mapping consistency check"); 16891a1faf7aSBart Van Assche if (DYNAMIC_DEBUG_BRANCH(ddm)) 1690509c5f33SBart Van Assche srp_check_mapping(&state, ch, req, scat, count); 1691509c5f33SBart Van Assche } 16928f26c9ffSDavid Dillow 1693c07d424dSDavid Dillow /* We've mapped the request, now pull as much of the indirect 1694c07d424dSDavid Dillow * descriptor table as we can into the command buffer. If this 1695c07d424dSDavid Dillow * target is not using an external indirect table, we are 1696c07d424dSDavid Dillow * guaranteed to fit into the command, as the SCSI layer won't 1697c07d424dSDavid Dillow * give us more S/G entries than we allow. 16988f26c9ffSDavid Dillow */ 16998f26c9ffSDavid Dillow if (state.ndesc == 1) { 17005cfb1782SBart Van Assche /* 17015cfb1782SBart Van Assche * Memory registration collapsed the sg-list into one entry, 17028f26c9ffSDavid Dillow * so use a direct descriptor. 17038f26c9ffSDavid Dillow */ 17048f26c9ffSDavid Dillow struct srp_direct_buf *buf = (void *) cmd->add_data; 17058f26c9ffSDavid Dillow 1706c07d424dSDavid Dillow *buf = req->indirect_desc[0]; 17078f26c9ffSDavid Dillow goto map_complete; 17088f26c9ffSDavid Dillow } 17098f26c9ffSDavid Dillow 1710c07d424dSDavid Dillow if (unlikely(target->cmd_sg_cnt < state.ndesc && 1711c07d424dSDavid Dillow !target->allow_ext_sg)) { 1712c07d424dSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 1713c07d424dSDavid Dillow "Could not fit S/G list into SRP_CMD\n"); 1714e012f363SBart Van Assche ret = -EIO; 1715e012f363SBart Van Assche goto unmap; 1716c07d424dSDavid Dillow } 1717c07d424dSDavid Dillow 1718c07d424dSDavid Dillow count = min(state.ndesc, target->cmd_sg_cnt); 17198f26c9ffSDavid Dillow table_len = state.ndesc * sizeof (struct srp_direct_buf); 1720330179f2SBart Van Assche idb_len = sizeof(struct srp_indirect_buf) + table_len; 1721aef9ec39SRoland Dreier 1722aef9ec39SRoland Dreier fmt = SRP_DATA_DESC_INDIRECT; 17238f26c9ffSDavid Dillow len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf); 1724c07d424dSDavid Dillow len += count * sizeof (struct srp_direct_buf); 1725f5358a17SRoland Dreier 1726c07d424dSDavid Dillow memcpy(indirect_hdr->desc_list, req->indirect_desc, 1727c07d424dSDavid Dillow count * sizeof (struct srp_direct_buf)); 172885507bccSRalph Campbell 17295f071777SChristoph Hellwig if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) { 1730330179f2SBart Van Assche ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, 1731330179f2SBart Van Assche idb_len, &idb_rkey); 1732330179f2SBart Van Assche if (ret < 0) 1733e012f363SBart Van Assche goto unmap; 1734330179f2SBart Van Assche req->nmdesc++; 1735330179f2SBart Van Assche } else { 17365f071777SChristoph Hellwig idb_rkey = cpu_to_be32(pd->unsafe_global_rkey); 1737330179f2SBart Van Assche } 1738330179f2SBart Van Assche 1739c07d424dSDavid Dillow indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 1740330179f2SBart Van Assche indirect_hdr->table_desc.key = idb_rkey; 17418f26c9ffSDavid Dillow indirect_hdr->table_desc.len = cpu_to_be32(table_len); 17428f26c9ffSDavid Dillow indirect_hdr->len = cpu_to_be32(state.total_len); 1743aef9ec39SRoland Dreier 1744aef9ec39SRoland Dreier if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1745c07d424dSDavid Dillow cmd->data_out_desc_cnt = count; 1746aef9ec39SRoland Dreier else 1747c07d424dSDavid Dillow cmd->data_in_desc_cnt = count; 1748c07d424dSDavid Dillow 1749c07d424dSDavid Dillow ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, 1750c07d424dSDavid Dillow DMA_TO_DEVICE); 1751aef9ec39SRoland Dreier 17528f26c9ffSDavid Dillow map_complete: 1753aef9ec39SRoland Dreier if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1754aef9ec39SRoland Dreier cmd->buf_fmt = fmt << 4; 1755aef9ec39SRoland Dreier else 1756aef9ec39SRoland Dreier cmd->buf_fmt = fmt; 1757aef9ec39SRoland Dreier 1758aef9ec39SRoland Dreier return len; 1759e012f363SBart Van Assche 1760e012f363SBart Van Assche unmap: 1761e012f363SBart Van Assche srp_unmap_data(scmnd, ch, req); 1762ffc548bbSBart Van Assche if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size) 1763ffc548bbSBart Van Assche ret = -E2BIG; 1764e012f363SBart Van Assche return ret; 1765aef9ec39SRoland Dreier } 1766aef9ec39SRoland Dreier 176705a1d750SDavid Dillow /* 176876c75b25SBart Van Assche * Return an IU and possible credit to the free pool 176976c75b25SBart Van Assche */ 1770509c07bcSBart Van Assche static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, 177176c75b25SBart Van Assche enum srp_iu_type iu_type) 177276c75b25SBart Van Assche { 177376c75b25SBart Van Assche unsigned long flags; 177476c75b25SBart Van Assche 1775509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1776509c07bcSBart Van Assche list_add(&iu->list, &ch->free_tx); 177776c75b25SBart Van Assche if (iu_type != SRP_IU_RSP) 1778509c07bcSBart Van Assche ++ch->req_lim; 1779509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 178076c75b25SBart Van Assche } 178176c75b25SBart Van Assche 178276c75b25SBart Van Assche /* 1783509c07bcSBart Van Assche * Must be called with ch->lock held to protect req_lim and free_tx. 1784e9684678SBart Van Assche * If IU is not sent, it must be returned using srp_put_tx_iu(). 178505a1d750SDavid Dillow * 178605a1d750SDavid Dillow * Note: 178705a1d750SDavid Dillow * An upper limit for the number of allocated information units for each 178805a1d750SDavid Dillow * request type is: 178905a1d750SDavid Dillow * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues 179005a1d750SDavid Dillow * more than Scsi_Host.can_queue requests. 179105a1d750SDavid Dillow * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. 179205a1d750SDavid Dillow * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 179305a1d750SDavid Dillow * one unanswered SRP request to an initiator. 179405a1d750SDavid Dillow */ 1795509c07bcSBart Van Assche static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, 179605a1d750SDavid Dillow enum srp_iu_type iu_type) 179705a1d750SDavid Dillow { 1798509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 179905a1d750SDavid Dillow s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 180005a1d750SDavid Dillow struct srp_iu *iu; 180105a1d750SDavid Dillow 18021dc7b1f1SChristoph Hellwig ib_process_cq_direct(ch->send_cq, -1); 180305a1d750SDavid Dillow 1804509c07bcSBart Van Assche if (list_empty(&ch->free_tx)) 180505a1d750SDavid Dillow return NULL; 180605a1d750SDavid Dillow 180705a1d750SDavid Dillow /* Initiator responses to target requests do not consume credits */ 180876c75b25SBart Van Assche if (iu_type != SRP_IU_RSP) { 1809509c07bcSBart Van Assche if (ch->req_lim <= rsv) { 181005a1d750SDavid Dillow ++target->zero_req_lim; 181105a1d750SDavid Dillow return NULL; 181205a1d750SDavid Dillow } 181305a1d750SDavid Dillow 1814509c07bcSBart Van Assche --ch->req_lim; 181576c75b25SBart Van Assche } 181676c75b25SBart Van Assche 1817509c07bcSBart Van Assche iu = list_first_entry(&ch->free_tx, struct srp_iu, list); 181876c75b25SBart Van Assche list_del(&iu->list); 181905a1d750SDavid Dillow return iu; 182005a1d750SDavid Dillow } 182105a1d750SDavid Dillow 18221dc7b1f1SChristoph Hellwig static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc) 18231dc7b1f1SChristoph Hellwig { 18241dc7b1f1SChristoph Hellwig struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); 18251dc7b1f1SChristoph Hellwig struct srp_rdma_ch *ch = cq->cq_context; 18261dc7b1f1SChristoph Hellwig 18271dc7b1f1SChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 18281dc7b1f1SChristoph Hellwig srp_handle_qp_err(cq, wc, "SEND"); 18291dc7b1f1SChristoph Hellwig return; 18301dc7b1f1SChristoph Hellwig } 18311dc7b1f1SChristoph Hellwig 18321dc7b1f1SChristoph Hellwig list_add(&iu->list, &ch->free_tx); 18331dc7b1f1SChristoph Hellwig } 18341dc7b1f1SChristoph Hellwig 1835509c07bcSBart Van Assche static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) 183605a1d750SDavid Dillow { 1837509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 183805a1d750SDavid Dillow struct ib_sge list; 183905a1d750SDavid Dillow struct ib_send_wr wr, *bad_wr; 184005a1d750SDavid Dillow 184105a1d750SDavid Dillow list.addr = iu->dma; 184205a1d750SDavid Dillow list.length = len; 18439af76271SDavid Dillow list.lkey = target->lkey; 184405a1d750SDavid Dillow 18451dc7b1f1SChristoph Hellwig iu->cqe.done = srp_send_done; 18461dc7b1f1SChristoph Hellwig 184705a1d750SDavid Dillow wr.next = NULL; 18481dc7b1f1SChristoph Hellwig wr.wr_cqe = &iu->cqe; 184905a1d750SDavid Dillow wr.sg_list = &list; 185005a1d750SDavid Dillow wr.num_sge = 1; 185105a1d750SDavid Dillow wr.opcode = IB_WR_SEND; 185205a1d750SDavid Dillow wr.send_flags = IB_SEND_SIGNALED; 185305a1d750SDavid Dillow 1854509c07bcSBart Van Assche return ib_post_send(ch->qp, &wr, &bad_wr); 185505a1d750SDavid Dillow } 185605a1d750SDavid Dillow 1857509c07bcSBart Van Assche static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) 1858c996bb47SBart Van Assche { 1859509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1860c996bb47SBart Van Assche struct ib_recv_wr wr, *bad_wr; 1861dcb4cb85SBart Van Assche struct ib_sge list; 1862c996bb47SBart Van Assche 1863c996bb47SBart Van Assche list.addr = iu->dma; 1864c996bb47SBart Van Assche list.length = iu->size; 18659af76271SDavid Dillow list.lkey = target->lkey; 1866c996bb47SBart Van Assche 18671dc7b1f1SChristoph Hellwig iu->cqe.done = srp_recv_done; 18681dc7b1f1SChristoph Hellwig 1869c996bb47SBart Van Assche wr.next = NULL; 18701dc7b1f1SChristoph Hellwig wr.wr_cqe = &iu->cqe; 1871c996bb47SBart Van Assche wr.sg_list = &list; 1872c996bb47SBart Van Assche wr.num_sge = 1; 1873c996bb47SBart Van Assche 1874509c07bcSBart Van Assche return ib_post_recv(ch->qp, &wr, &bad_wr); 1875c996bb47SBart Van Assche } 1876c996bb47SBart Van Assche 1877509c07bcSBart Van Assche static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) 1878aef9ec39SRoland Dreier { 1879509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1880aef9ec39SRoland Dreier struct srp_request *req; 1881aef9ec39SRoland Dreier struct scsi_cmnd *scmnd; 1882aef9ec39SRoland Dreier unsigned long flags; 1883aef9ec39SRoland Dreier 1884aef9ec39SRoland Dreier if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 1885509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1886509c07bcSBart Van Assche ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1887509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 188894a9174cSBart Van Assche 1889509c07bcSBart Van Assche ch->tsk_mgmt_status = -1; 1890f8b6e31eSDavid Dillow if (be32_to_cpu(rsp->resp_data_len) >= 4) 1891509c07bcSBart Van Assche ch->tsk_mgmt_status = rsp->data[3]; 1892509c07bcSBart Van Assche complete(&ch->tsk_mgmt_done); 1893aef9ec39SRoland Dreier } else { 189477f2c1a4SBart Van Assche scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); 189577f2c1a4SBart Van Assche if (scmnd) { 189677f2c1a4SBart Van Assche req = (void *)scmnd->host_scribble; 189777f2c1a4SBart Van Assche scmnd = srp_claim_req(ch, req, NULL, scmnd); 189877f2c1a4SBart Van Assche } 189922032991SBart Van Assche if (!scmnd) { 19007aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 1901d92c0da7SBart Van Assche "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", 1902d92c0da7SBart Van Assche rsp->tag, ch - target->ch, ch->qp->qp_num); 190322032991SBart Van Assche 1904509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1905509c07bcSBart Van Assche ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1906509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 190722032991SBart Van Assche 190822032991SBart Van Assche return; 190922032991SBart Van Assche } 1910aef9ec39SRoland Dreier scmnd->result = rsp->status; 1911aef9ec39SRoland Dreier 1912aef9ec39SRoland Dreier if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 1913aef9ec39SRoland Dreier memcpy(scmnd->sense_buffer, rsp->data + 1914aef9ec39SRoland Dreier be32_to_cpu(rsp->resp_data_len), 1915aef9ec39SRoland Dreier min_t(int, be32_to_cpu(rsp->sense_data_len), 1916aef9ec39SRoland Dreier SCSI_SENSE_BUFFERSIZE)); 1917aef9ec39SRoland Dreier } 1918aef9ec39SRoland Dreier 1919e714531aSBart Van Assche if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) 1920bb350d1dSFUJITA Tomonori scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 1921e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER)) 1922e714531aSBart Van Assche scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt)); 1923e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) 1924e714531aSBart Van Assche scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 1925e714531aSBart Van Assche else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) 1926e714531aSBart Van Assche scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); 1927aef9ec39SRoland Dreier 1928509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 192922032991SBart Van Assche be32_to_cpu(rsp->req_lim_delta)); 193022032991SBart Van Assche 1931f8b6e31eSDavid Dillow scmnd->host_scribble = NULL; 1932aef9ec39SRoland Dreier scmnd->scsi_done(scmnd); 1933aef9ec39SRoland Dreier } 1934aef9ec39SRoland Dreier } 1935aef9ec39SRoland Dreier 1936509c07bcSBart Van Assche static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, 1937bb12588aSDavid Dillow void *rsp, int len) 1938bb12588aSDavid Dillow { 1939509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 194076c75b25SBart Van Assche struct ib_device *dev = target->srp_host->srp_dev->dev; 1941bb12588aSDavid Dillow unsigned long flags; 1942bb12588aSDavid Dillow struct srp_iu *iu; 194376c75b25SBart Van Assche int err; 1944bb12588aSDavid Dillow 1945509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 1946509c07bcSBart Van Assche ch->req_lim += req_delta; 1947509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_RSP); 1948509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 194976c75b25SBart Van Assche 1950bb12588aSDavid Dillow if (!iu) { 1951bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1952bb12588aSDavid Dillow "no IU available to send response\n"); 195376c75b25SBart Van Assche return 1; 1954bb12588aSDavid Dillow } 1955bb12588aSDavid Dillow 1956bb12588aSDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 1957bb12588aSDavid Dillow memcpy(iu->buf, rsp, len); 1958bb12588aSDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 1959bb12588aSDavid Dillow 1960509c07bcSBart Van Assche err = srp_post_send(ch, iu, len); 196176c75b25SBart Van Assche if (err) { 1962bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1963bb12588aSDavid Dillow "unable to post response: %d\n", err); 1964509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_RSP); 196576c75b25SBart Van Assche } 1966bb12588aSDavid Dillow 1967bb12588aSDavid Dillow return err; 1968bb12588aSDavid Dillow } 1969bb12588aSDavid Dillow 1970509c07bcSBart Van Assche static void srp_process_cred_req(struct srp_rdma_ch *ch, 1971bb12588aSDavid Dillow struct srp_cred_req *req) 1972bb12588aSDavid Dillow { 1973bb12588aSDavid Dillow struct srp_cred_rsp rsp = { 1974bb12588aSDavid Dillow .opcode = SRP_CRED_RSP, 1975bb12588aSDavid Dillow .tag = req->tag, 1976bb12588aSDavid Dillow }; 1977bb12588aSDavid Dillow s32 delta = be32_to_cpu(req->req_lim_delta); 1978bb12588aSDavid Dillow 1979509c07bcSBart Van Assche if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 1980509c07bcSBart Van Assche shost_printk(KERN_ERR, ch->target->scsi_host, PFX 1981bb12588aSDavid Dillow "problems processing SRP_CRED_REQ\n"); 1982bb12588aSDavid Dillow } 1983bb12588aSDavid Dillow 1984509c07bcSBart Van Assche static void srp_process_aer_req(struct srp_rdma_ch *ch, 1985bb12588aSDavid Dillow struct srp_aer_req *req) 1986bb12588aSDavid Dillow { 1987509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 1988bb12588aSDavid Dillow struct srp_aer_rsp rsp = { 1989bb12588aSDavid Dillow .opcode = SRP_AER_RSP, 1990bb12588aSDavid Dillow .tag = req->tag, 1991bb12588aSDavid Dillow }; 1992bb12588aSDavid Dillow s32 delta = be32_to_cpu(req->req_lim_delta); 1993bb12588aSDavid Dillow 1994bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1995985aa495SBart Van Assche "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); 1996bb12588aSDavid Dillow 1997509c07bcSBart Van Assche if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 1998bb12588aSDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX 1999bb12588aSDavid Dillow "problems processing SRP_AER_REQ\n"); 2000bb12588aSDavid Dillow } 2001bb12588aSDavid Dillow 20021dc7b1f1SChristoph Hellwig static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc) 2003aef9ec39SRoland Dreier { 20041dc7b1f1SChristoph Hellwig struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); 20051dc7b1f1SChristoph Hellwig struct srp_rdma_ch *ch = cq->cq_context; 2006509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2007dcb4cb85SBart Van Assche struct ib_device *dev = target->srp_host->srp_dev->dev; 2008c996bb47SBart Van Assche int res; 2009aef9ec39SRoland Dreier u8 opcode; 2010aef9ec39SRoland Dreier 20111dc7b1f1SChristoph Hellwig if (unlikely(wc->status != IB_WC_SUCCESS)) { 20121dc7b1f1SChristoph Hellwig srp_handle_qp_err(cq, wc, "RECV"); 20131dc7b1f1SChristoph Hellwig return; 20141dc7b1f1SChristoph Hellwig } 20151dc7b1f1SChristoph Hellwig 2016509c07bcSBart Van Assche ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, 201785507bccSRalph Campbell DMA_FROM_DEVICE); 2018aef9ec39SRoland Dreier 2019aef9ec39SRoland Dreier opcode = *(u8 *) iu->buf; 2020aef9ec39SRoland Dreier 2021aef9ec39SRoland Dreier if (0) { 20227aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 20237aa54bd7SDavid Dillow PFX "recv completion, opcode 0x%02x\n", opcode); 20247a700811SBart Van Assche print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, 20257a700811SBart Van Assche iu->buf, wc->byte_len, true); 2026aef9ec39SRoland Dreier } 2027aef9ec39SRoland Dreier 2028aef9ec39SRoland Dreier switch (opcode) { 2029aef9ec39SRoland Dreier case SRP_RSP: 2030509c07bcSBart Van Assche srp_process_rsp(ch, iu->buf); 2031aef9ec39SRoland Dreier break; 2032aef9ec39SRoland Dreier 2033bb12588aSDavid Dillow case SRP_CRED_REQ: 2034509c07bcSBart Van Assche srp_process_cred_req(ch, iu->buf); 2035bb12588aSDavid Dillow break; 2036bb12588aSDavid Dillow 2037bb12588aSDavid Dillow case SRP_AER_REQ: 2038509c07bcSBart Van Assche srp_process_aer_req(ch, iu->buf); 2039bb12588aSDavid Dillow break; 2040bb12588aSDavid Dillow 2041aef9ec39SRoland Dreier case SRP_T_LOGOUT: 2042aef9ec39SRoland Dreier /* XXX Handle target logout */ 20437aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 20447aa54bd7SDavid Dillow PFX "Got target logout request\n"); 2045aef9ec39SRoland Dreier break; 2046aef9ec39SRoland Dreier 2047aef9ec39SRoland Dreier default: 20487aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 20497aa54bd7SDavid Dillow PFX "Unhandled SRP opcode 0x%02x\n", opcode); 2050aef9ec39SRoland Dreier break; 2051aef9ec39SRoland Dreier } 2052aef9ec39SRoland Dreier 2053509c07bcSBart Van Assche ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, 205485507bccSRalph Campbell DMA_FROM_DEVICE); 2055c996bb47SBart Van Assche 2056509c07bcSBart Van Assche res = srp_post_recv(ch, iu); 2057c996bb47SBart Van Assche if (res != 0) 2058c996bb47SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 2059c996bb47SBart Van Assche PFX "Recv failed with error code %d\n", res); 2060aef9ec39SRoland Dreier } 2061aef9ec39SRoland Dreier 2062c1120f89SBart Van Assche /** 2063c1120f89SBart Van Assche * srp_tl_err_work() - handle a transport layer error 2064af24663bSBart Van Assche * @work: Work structure embedded in an SRP target port. 2065c1120f89SBart Van Assche * 2066c1120f89SBart Van Assche * Note: This function may get invoked before the rport has been created, 2067c1120f89SBart Van Assche * hence the target->rport test. 2068c1120f89SBart Van Assche */ 2069c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work) 2070c1120f89SBart Van Assche { 2071c1120f89SBart Van Assche struct srp_target_port *target; 2072c1120f89SBart Van Assche 2073c1120f89SBart Van Assche target = container_of(work, struct srp_target_port, tl_err_work); 2074c1120f89SBart Van Assche if (target->rport) 2075c1120f89SBart Van Assche srp_start_tl_fail_timers(target->rport); 2076c1120f89SBart Van Assche } 2077c1120f89SBart Van Assche 20781dc7b1f1SChristoph Hellwig static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, 20791dc7b1f1SChristoph Hellwig const char *opname) 2080948d1e88SBart Van Assche { 20811dc7b1f1SChristoph Hellwig struct srp_rdma_ch *ch = cq->cq_context; 20827dad6b2eSBart Van Assche struct srp_target_port *target = ch->target; 20837dad6b2eSBart Van Assche 2084c014c8cdSBart Van Assche if (ch->connected && !target->qp_in_error) { 20855cfb1782SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 20861dc7b1f1SChristoph Hellwig PFX "failed %s status %s (%d) for CQE %p\n", 20871dc7b1f1SChristoph Hellwig opname, ib_wc_status_msg(wc->status), wc->status, 20881dc7b1f1SChristoph Hellwig wc->wr_cqe); 2089c1120f89SBart Van Assche queue_work(system_long_wq, &target->tl_err_work); 20904f0af697SBart Van Assche } 2091948d1e88SBart Van Assche target->qp_in_error = true; 2092948d1e88SBart Van Assche } 2093948d1e88SBart Van Assche 209476c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) 2095aef9ec39SRoland Dreier { 209676c75b25SBart Van Assche struct srp_target_port *target = host_to_target(shost); 2097a95cadb9SBart Van Assche struct srp_rport *rport = target->rport; 2098509c07bcSBart Van Assche struct srp_rdma_ch *ch; 2099aef9ec39SRoland Dreier struct srp_request *req; 2100aef9ec39SRoland Dreier struct srp_iu *iu; 2101aef9ec39SRoland Dreier struct srp_cmd *cmd; 210285507bccSRalph Campbell struct ib_device *dev; 210376c75b25SBart Van Assche unsigned long flags; 210477f2c1a4SBart Van Assche u32 tag; 210577f2c1a4SBart Van Assche u16 idx; 2106d1b4289eSBart Van Assche int len, ret; 2107a95cadb9SBart Van Assche const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; 2108a95cadb9SBart Van Assche 2109a95cadb9SBart Van Assche /* 2110a95cadb9SBart Van Assche * The SCSI EH thread is the only context from which srp_queuecommand() 2111a95cadb9SBart Van Assche * can get invoked for blocked devices (SDEV_BLOCK / 2112a95cadb9SBart Van Assche * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by 2113a95cadb9SBart Van Assche * locking the rport mutex if invoked from inside the SCSI EH. 2114a95cadb9SBart Van Assche */ 2115a95cadb9SBart Van Assche if (in_scsi_eh) 2116a95cadb9SBart Van Assche mutex_lock(&rport->mutex); 2117aef9ec39SRoland Dreier 2118d1b4289eSBart Van Assche scmnd->result = srp_chkready(target->rport); 2119d1b4289eSBart Van Assche if (unlikely(scmnd->result)) 2120d1b4289eSBart Van Assche goto err; 21212ce19e72SBart Van Assche 212277f2c1a4SBart Van Assche WARN_ON_ONCE(scmnd->request->tag < 0); 212377f2c1a4SBart Van Assche tag = blk_mq_unique_tag(scmnd->request); 2124d92c0da7SBart Van Assche ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; 212577f2c1a4SBart Van Assche idx = blk_mq_unique_tag_to_tag(tag); 212677f2c1a4SBart Van Assche WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", 212777f2c1a4SBart Van Assche dev_name(&shost->shost_gendev), tag, idx, 212877f2c1a4SBart Van Assche target->req_ring_size); 2129509c07bcSBart Van Assche 2130509c07bcSBart Van Assche spin_lock_irqsave(&ch->lock, flags); 2131509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_CMD); 2132509c07bcSBart Van Assche spin_unlock_irqrestore(&ch->lock, flags); 2133aef9ec39SRoland Dreier 213477f2c1a4SBart Van Assche if (!iu) 213577f2c1a4SBart Van Assche goto err; 213677f2c1a4SBart Van Assche 213777f2c1a4SBart Van Assche req = &ch->req_ring[idx]; 213805321937SGreg Kroah-Hartman dev = target->srp_host->srp_dev->dev; 213949248644SDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, 214085507bccSRalph Campbell DMA_TO_DEVICE); 2141aef9ec39SRoland Dreier 2142f8b6e31eSDavid Dillow scmnd->host_scribble = (void *) req; 2143aef9ec39SRoland Dreier 2144aef9ec39SRoland Dreier cmd = iu->buf; 2145aef9ec39SRoland Dreier memset(cmd, 0, sizeof *cmd); 2146aef9ec39SRoland Dreier 2147aef9ec39SRoland Dreier cmd->opcode = SRP_CMD; 2148985aa495SBart Van Assche int_to_scsilun(scmnd->device->lun, &cmd->lun); 214977f2c1a4SBart Van Assche cmd->tag = tag; 2150aef9ec39SRoland Dreier memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 2151aef9ec39SRoland Dreier 2152aef9ec39SRoland Dreier req->scmnd = scmnd; 2153aef9ec39SRoland Dreier req->cmd = iu; 2154aef9ec39SRoland Dreier 2155509c07bcSBart Van Assche len = srp_map_data(scmnd, ch, req); 2156aef9ec39SRoland Dreier if (len < 0) { 21577aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 2158d1b4289eSBart Van Assche PFX "Failed to map data (%d)\n", len); 2159d1b4289eSBart Van Assche /* 2160d1b4289eSBart Van Assche * If we ran out of memory descriptors (-ENOMEM) because an 2161d1b4289eSBart Van Assche * application is queuing many requests with more than 216252ede08fSBart Van Assche * max_pages_per_mr sg-list elements, tell the SCSI mid-layer 2163d1b4289eSBart Van Assche * to reduce queue depth temporarily. 2164d1b4289eSBart Van Assche */ 2165d1b4289eSBart Van Assche scmnd->result = len == -ENOMEM ? 2166d1b4289eSBart Van Assche DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16; 216776c75b25SBart Van Assche goto err_iu; 2168aef9ec39SRoland Dreier } 2169aef9ec39SRoland Dreier 217049248644SDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, 217185507bccSRalph Campbell DMA_TO_DEVICE); 2172aef9ec39SRoland Dreier 2173509c07bcSBart Van Assche if (srp_post_send(ch, iu, len)) { 21747aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 2175aef9ec39SRoland Dreier goto err_unmap; 2176aef9ec39SRoland Dreier } 2177aef9ec39SRoland Dreier 2178d1b4289eSBart Van Assche ret = 0; 2179d1b4289eSBart Van Assche 2180a95cadb9SBart Van Assche unlock_rport: 2181a95cadb9SBart Van Assche if (in_scsi_eh) 2182a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2183a95cadb9SBart Van Assche 2184d1b4289eSBart Van Assche return ret; 2185aef9ec39SRoland Dreier 2186aef9ec39SRoland Dreier err_unmap: 2187509c07bcSBart Van Assche srp_unmap_data(scmnd, ch, req); 2188aef9ec39SRoland Dreier 218976c75b25SBart Van Assche err_iu: 2190509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_CMD); 219176c75b25SBart Van Assche 2192024ca901SBart Van Assche /* 2193024ca901SBart Van Assche * Avoid that the loops that iterate over the request ring can 2194024ca901SBart Van Assche * encounter a dangling SCSI command pointer. 2195024ca901SBart Van Assche */ 2196024ca901SBart Van Assche req->scmnd = NULL; 2197024ca901SBart Van Assche 2198d1b4289eSBart Van Assche err: 2199d1b4289eSBart Van Assche if (scmnd->result) { 2200d1b4289eSBart Van Assche scmnd->scsi_done(scmnd); 2201d1b4289eSBart Van Assche ret = 0; 2202d1b4289eSBart Van Assche } else { 2203d1b4289eSBart Van Assche ret = SCSI_MLQUEUE_HOST_BUSY; 2204d1b4289eSBart Van Assche } 2205a95cadb9SBart Van Assche 2206d1b4289eSBart Van Assche goto unlock_rport; 2207aef9ec39SRoland Dreier } 2208aef9ec39SRoland Dreier 22094d73f95fSBart Van Assche /* 22104d73f95fSBart Van Assche * Note: the resources allocated in this function are freed in 2211509c07bcSBart Van Assche * srp_free_ch_ib(). 22124d73f95fSBart Van Assche */ 2213509c07bcSBart Van Assche static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) 2214aef9ec39SRoland Dreier { 2215509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2216aef9ec39SRoland Dreier int i; 2217aef9ec39SRoland Dreier 2218509c07bcSBart Van Assche ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), 22194d73f95fSBart Van Assche GFP_KERNEL); 2220509c07bcSBart Van Assche if (!ch->rx_ring) 22214d73f95fSBart Van Assche goto err_no_ring; 2222509c07bcSBart Van Assche ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), 22234d73f95fSBart Van Assche GFP_KERNEL); 2224509c07bcSBart Van Assche if (!ch->tx_ring) 22254d73f95fSBart Van Assche goto err_no_ring; 22264d73f95fSBart Van Assche 22274d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2228509c07bcSBart Van Assche ch->rx_ring[i] = srp_alloc_iu(target->srp_host, 2229509c07bcSBart Van Assche ch->max_ti_iu_len, 2230aef9ec39SRoland Dreier GFP_KERNEL, DMA_FROM_DEVICE); 2231509c07bcSBart Van Assche if (!ch->rx_ring[i]) 2232aef9ec39SRoland Dreier goto err; 2233aef9ec39SRoland Dreier } 2234aef9ec39SRoland Dreier 22354d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2236509c07bcSBart Van Assche ch->tx_ring[i] = srp_alloc_iu(target->srp_host, 223749248644SDavid Dillow target->max_iu_len, 2238aef9ec39SRoland Dreier GFP_KERNEL, DMA_TO_DEVICE); 2239509c07bcSBart Van Assche if (!ch->tx_ring[i]) 2240aef9ec39SRoland Dreier goto err; 2241dcb4cb85SBart Van Assche 2242509c07bcSBart Van Assche list_add(&ch->tx_ring[i]->list, &ch->free_tx); 2243aef9ec39SRoland Dreier } 2244aef9ec39SRoland Dreier 2245aef9ec39SRoland Dreier return 0; 2246aef9ec39SRoland Dreier 2247aef9ec39SRoland Dreier err: 22484d73f95fSBart Van Assche for (i = 0; i < target->queue_size; ++i) { 2249509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->rx_ring[i]); 2250509c07bcSBart Van Assche srp_free_iu(target->srp_host, ch->tx_ring[i]); 2251aef9ec39SRoland Dreier } 2252aef9ec39SRoland Dreier 22534d73f95fSBart Van Assche 22544d73f95fSBart Van Assche err_no_ring: 2255509c07bcSBart Van Assche kfree(ch->tx_ring); 2256509c07bcSBart Van Assche ch->tx_ring = NULL; 2257509c07bcSBart Van Assche kfree(ch->rx_ring); 2258509c07bcSBart Van Assche ch->rx_ring = NULL; 2259aef9ec39SRoland Dreier 2260aef9ec39SRoland Dreier return -ENOMEM; 2261aef9ec39SRoland Dreier } 2262aef9ec39SRoland Dreier 2263c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) 2264c9b03c1aSBart Van Assche { 2265c9b03c1aSBart Van Assche uint64_t T_tr_ns, max_compl_time_ms; 2266c9b03c1aSBart Van Assche uint32_t rq_tmo_jiffies; 2267c9b03c1aSBart Van Assche 2268c9b03c1aSBart Van Assche /* 2269c9b03c1aSBart Van Assche * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, 2270c9b03c1aSBart Van Assche * table 91), both the QP timeout and the retry count have to be set 2271c9b03c1aSBart Van Assche * for RC QP's during the RTR to RTS transition. 2272c9b03c1aSBart Van Assche */ 2273c9b03c1aSBart Van Assche WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) != 2274c9b03c1aSBart Van Assche (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)); 2275c9b03c1aSBart Van Assche 2276c9b03c1aSBart Van Assche /* 2277c9b03c1aSBart Van Assche * Set target->rq_tmo_jiffies to one second more than the largest time 2278c9b03c1aSBart Van Assche * it can take before an error completion is generated. See also 2279c9b03c1aSBart Van Assche * C9-140..142 in the IBTA spec for more information about how to 2280c9b03c1aSBart Van Assche * convert the QP Local ACK Timeout value to nanoseconds. 2281c9b03c1aSBart Van Assche */ 2282c9b03c1aSBart Van Assche T_tr_ns = 4096 * (1ULL << qp_attr->timeout); 2283c9b03c1aSBart Van Assche max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; 2284c9b03c1aSBart Van Assche do_div(max_compl_time_ms, NSEC_PER_MSEC); 2285c9b03c1aSBart Van Assche rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000); 2286c9b03c1aSBart Van Assche 2287c9b03c1aSBart Van Assche return rq_tmo_jiffies; 2288c9b03c1aSBart Van Assche } 2289c9b03c1aSBart Van Assche 2290961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 2291e6300cbdSBart Van Assche const struct srp_login_rsp *lrsp, 2292509c07bcSBart Van Assche struct srp_rdma_ch *ch) 2293961e0be8SDavid Dillow { 2294509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2295961e0be8SDavid Dillow struct ib_qp_attr *qp_attr = NULL; 2296961e0be8SDavid Dillow int attr_mask = 0; 2297961e0be8SDavid Dillow int ret; 2298961e0be8SDavid Dillow int i; 2299961e0be8SDavid Dillow 2300961e0be8SDavid Dillow if (lrsp->opcode == SRP_LOGIN_RSP) { 2301509c07bcSBart Van Assche ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); 2302509c07bcSBart Van Assche ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); 2303961e0be8SDavid Dillow 2304961e0be8SDavid Dillow /* 2305961e0be8SDavid Dillow * Reserve credits for task management so we don't 2306961e0be8SDavid Dillow * bounce requests back to the SCSI mid-layer. 2307961e0be8SDavid Dillow */ 2308961e0be8SDavid Dillow target->scsi_host->can_queue 2309509c07bcSBart Van Assche = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, 2310961e0be8SDavid Dillow target->scsi_host->can_queue); 23114d73f95fSBart Van Assche target->scsi_host->cmd_per_lun 23124d73f95fSBart Van Assche = min_t(int, target->scsi_host->can_queue, 23134d73f95fSBart Van Assche target->scsi_host->cmd_per_lun); 2314961e0be8SDavid Dillow } else { 2315961e0be8SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 2316961e0be8SDavid Dillow PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); 2317961e0be8SDavid Dillow ret = -ECONNRESET; 2318961e0be8SDavid Dillow goto error; 2319961e0be8SDavid Dillow } 2320961e0be8SDavid Dillow 2321509c07bcSBart Van Assche if (!ch->rx_ring) { 2322509c07bcSBart Van Assche ret = srp_alloc_iu_bufs(ch); 2323961e0be8SDavid Dillow if (ret) 2324961e0be8SDavid Dillow goto error; 2325961e0be8SDavid Dillow } 2326961e0be8SDavid Dillow 2327961e0be8SDavid Dillow ret = -ENOMEM; 2328961e0be8SDavid Dillow qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 2329961e0be8SDavid Dillow if (!qp_attr) 2330961e0be8SDavid Dillow goto error; 2331961e0be8SDavid Dillow 2332961e0be8SDavid Dillow qp_attr->qp_state = IB_QPS_RTR; 2333961e0be8SDavid Dillow ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2334961e0be8SDavid Dillow if (ret) 2335961e0be8SDavid Dillow goto error_free; 2336961e0be8SDavid Dillow 2337509c07bcSBart Van Assche ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2338961e0be8SDavid Dillow if (ret) 2339961e0be8SDavid Dillow goto error_free; 2340961e0be8SDavid Dillow 23414d73f95fSBart Van Assche for (i = 0; i < target->queue_size; i++) { 2342509c07bcSBart Van Assche struct srp_iu *iu = ch->rx_ring[i]; 2343509c07bcSBart Van Assche 2344509c07bcSBart Van Assche ret = srp_post_recv(ch, iu); 2345961e0be8SDavid Dillow if (ret) 2346961e0be8SDavid Dillow goto error_free; 2347961e0be8SDavid Dillow } 2348961e0be8SDavid Dillow 2349961e0be8SDavid Dillow qp_attr->qp_state = IB_QPS_RTS; 2350961e0be8SDavid Dillow ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2351961e0be8SDavid Dillow if (ret) 2352961e0be8SDavid Dillow goto error_free; 2353961e0be8SDavid Dillow 2354c9b03c1aSBart Van Assche target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); 2355c9b03c1aSBart Van Assche 2356509c07bcSBart Van Assche ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2357961e0be8SDavid Dillow if (ret) 2358961e0be8SDavid Dillow goto error_free; 2359961e0be8SDavid Dillow 2360961e0be8SDavid Dillow ret = ib_send_cm_rtu(cm_id, NULL, 0); 2361961e0be8SDavid Dillow 2362961e0be8SDavid Dillow error_free: 2363961e0be8SDavid Dillow kfree(qp_attr); 2364961e0be8SDavid Dillow 2365961e0be8SDavid Dillow error: 2366509c07bcSBart Van Assche ch->status = ret; 2367961e0be8SDavid Dillow } 2368961e0be8SDavid Dillow 2369aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 2370aef9ec39SRoland Dreier struct ib_cm_event *event, 2371509c07bcSBart Van Assche struct srp_rdma_ch *ch) 2372aef9ec39SRoland Dreier { 2373509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 23747aa54bd7SDavid Dillow struct Scsi_Host *shost = target->scsi_host; 2375aef9ec39SRoland Dreier struct ib_class_port_info *cpi; 2376aef9ec39SRoland Dreier int opcode; 2377aef9ec39SRoland Dreier 2378aef9ec39SRoland Dreier switch (event->param.rej_rcvd.reason) { 2379aef9ec39SRoland Dreier case IB_CM_REJ_PORT_CM_REDIRECT: 2380aef9ec39SRoland Dreier cpi = event->param.rej_rcvd.ari; 2381509c07bcSBart Van Assche ch->path.dlid = cpi->redirect_lid; 2382509c07bcSBart Van Assche ch->path.pkey = cpi->redirect_pkey; 2383aef9ec39SRoland Dreier cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 2384509c07bcSBart Van Assche memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16); 2385aef9ec39SRoland Dreier 2386509c07bcSBart Van Assche ch->status = ch->path.dlid ? 2387aef9ec39SRoland Dreier SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 2388aef9ec39SRoland Dreier break; 2389aef9ec39SRoland Dreier 2390aef9ec39SRoland Dreier case IB_CM_REJ_PORT_REDIRECT: 23915d7cbfd6SRoland Dreier if (srp_target_is_topspin(target)) { 2392aef9ec39SRoland Dreier /* 2393aef9ec39SRoland Dreier * Topspin/Cisco SRP gateways incorrectly send 2394aef9ec39SRoland Dreier * reject reason code 25 when they mean 24 2395aef9ec39SRoland Dreier * (port redirect). 2396aef9ec39SRoland Dreier */ 2397509c07bcSBart Van Assche memcpy(ch->path.dgid.raw, 2398aef9ec39SRoland Dreier event->param.rej_rcvd.ari, 16); 2399aef9ec39SRoland Dreier 24007aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, shost, 24017aa54bd7SDavid Dillow PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 2402509c07bcSBart Van Assche be64_to_cpu(ch->path.dgid.global.subnet_prefix), 2403509c07bcSBart Van Assche be64_to_cpu(ch->path.dgid.global.interface_id)); 2404aef9ec39SRoland Dreier 2405509c07bcSBart Van Assche ch->status = SRP_PORT_REDIRECT; 2406aef9ec39SRoland Dreier } else { 24077aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 24087aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 2409509c07bcSBart Van Assche ch->status = -ECONNRESET; 2410aef9ec39SRoland Dreier } 2411aef9ec39SRoland Dreier break; 2412aef9ec39SRoland Dreier 2413aef9ec39SRoland Dreier case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 24147aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 24157aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 2416509c07bcSBart Van Assche ch->status = -ECONNRESET; 2417aef9ec39SRoland Dreier break; 2418aef9ec39SRoland Dreier 2419aef9ec39SRoland Dreier case IB_CM_REJ_CONSUMER_DEFINED: 2420aef9ec39SRoland Dreier opcode = *(u8 *) event->private_data; 2421aef9ec39SRoland Dreier if (opcode == SRP_LOGIN_REJ) { 2422aef9ec39SRoland Dreier struct srp_login_rej *rej = event->private_data; 2423aef9ec39SRoland Dreier u32 reason = be32_to_cpu(rej->reason); 2424aef9ec39SRoland Dreier 2425aef9ec39SRoland Dreier if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 24267aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 24277aa54bd7SDavid Dillow PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 2428aef9ec39SRoland Dreier else 2429e7ffde01SBart Van Assche shost_printk(KERN_WARNING, shost, PFX 2430e7ffde01SBart Van Assche "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", 2431747fe000SBart Van Assche target->sgid.raw, 2432747fe000SBart Van Assche target->orig_dgid.raw, reason); 2433aef9ec39SRoland Dreier } else 24347aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, 24357aa54bd7SDavid Dillow " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 2436aef9ec39SRoland Dreier " opcode 0x%02x\n", opcode); 2437509c07bcSBart Van Assche ch->status = -ECONNRESET; 2438aef9ec39SRoland Dreier break; 2439aef9ec39SRoland Dreier 24409fe4bcf4SDavid Dillow case IB_CM_REJ_STALE_CONN: 24419fe4bcf4SDavid Dillow shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 2442509c07bcSBart Van Assche ch->status = SRP_STALE_CONN; 24439fe4bcf4SDavid Dillow break; 24449fe4bcf4SDavid Dillow 2445aef9ec39SRoland Dreier default: 24467aa54bd7SDavid Dillow shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 2447aef9ec39SRoland Dreier event->param.rej_rcvd.reason); 2448509c07bcSBart Van Assche ch->status = -ECONNRESET; 2449aef9ec39SRoland Dreier } 2450aef9ec39SRoland Dreier } 2451aef9ec39SRoland Dreier 2452aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 2453aef9ec39SRoland Dreier { 2454509c07bcSBart Van Assche struct srp_rdma_ch *ch = cm_id->context; 2455509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2456aef9ec39SRoland Dreier int comp = 0; 2457aef9ec39SRoland Dreier 2458aef9ec39SRoland Dreier switch (event->event) { 2459aef9ec39SRoland Dreier case IB_CM_REQ_ERROR: 24607aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, 24617aa54bd7SDavid Dillow PFX "Sending CM REQ failed\n"); 2462aef9ec39SRoland Dreier comp = 1; 2463509c07bcSBart Van Assche ch->status = -ECONNRESET; 2464aef9ec39SRoland Dreier break; 2465aef9ec39SRoland Dreier 2466aef9ec39SRoland Dreier case IB_CM_REP_RECEIVED: 2467aef9ec39SRoland Dreier comp = 1; 2468509c07bcSBart Van Assche srp_cm_rep_handler(cm_id, event->private_data, ch); 2469aef9ec39SRoland Dreier break; 2470aef9ec39SRoland Dreier 2471aef9ec39SRoland Dreier case IB_CM_REJ_RECEIVED: 24727aa54bd7SDavid Dillow shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 2473aef9ec39SRoland Dreier comp = 1; 2474aef9ec39SRoland Dreier 2475509c07bcSBart Van Assche srp_cm_rej_handler(cm_id, event, ch); 2476aef9ec39SRoland Dreier break; 2477aef9ec39SRoland Dreier 2478b7ac4ab4SIshai Rabinovitz case IB_CM_DREQ_RECEIVED: 24797aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 24807aa54bd7SDavid Dillow PFX "DREQ received - connection closed\n"); 2481c014c8cdSBart Van Assche ch->connected = false; 2482b7ac4ab4SIshai Rabinovitz if (ib_send_cm_drep(cm_id, NULL, 0)) 24837aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 24847aa54bd7SDavid Dillow PFX "Sending CM DREP failed\n"); 2485c1120f89SBart Van Assche queue_work(system_long_wq, &target->tl_err_work); 2486aef9ec39SRoland Dreier break; 2487aef9ec39SRoland Dreier 2488aef9ec39SRoland Dreier case IB_CM_TIMEWAIT_EXIT: 24897aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 24907aa54bd7SDavid Dillow PFX "connection closed\n"); 2491ac72d766SBart Van Assche comp = 1; 2492aef9ec39SRoland Dreier 2493509c07bcSBart Van Assche ch->status = 0; 2494aef9ec39SRoland Dreier break; 2495aef9ec39SRoland Dreier 2496b7ac4ab4SIshai Rabinovitz case IB_CM_MRA_RECEIVED: 2497b7ac4ab4SIshai Rabinovitz case IB_CM_DREQ_ERROR: 2498b7ac4ab4SIshai Rabinovitz case IB_CM_DREP_RECEIVED: 2499b7ac4ab4SIshai Rabinovitz break; 2500b7ac4ab4SIshai Rabinovitz 2501aef9ec39SRoland Dreier default: 25027aa54bd7SDavid Dillow shost_printk(KERN_WARNING, target->scsi_host, 25037aa54bd7SDavid Dillow PFX "Unhandled CM event %d\n", event->event); 2504aef9ec39SRoland Dreier break; 2505aef9ec39SRoland Dreier } 2506aef9ec39SRoland Dreier 2507aef9ec39SRoland Dreier if (comp) 2508509c07bcSBart Van Assche complete(&ch->done); 2509aef9ec39SRoland Dreier 2510aef9ec39SRoland Dreier return 0; 2511aef9ec39SRoland Dreier } 2512aef9ec39SRoland Dreier 251371444b97SJack Wang /** 251471444b97SJack Wang * srp_change_queue_depth - setting device queue depth 251571444b97SJack Wang * @sdev: scsi device struct 251671444b97SJack Wang * @qdepth: requested queue depth 251771444b97SJack Wang * 251871444b97SJack Wang * Returns queue depth. 251971444b97SJack Wang */ 252071444b97SJack Wang static int 2521db5ed4dfSChristoph Hellwig srp_change_queue_depth(struct scsi_device *sdev, int qdepth) 252271444b97SJack Wang { 252371444b97SJack Wang if (!sdev->tagged_supported) 25241e6f2416SChristoph Hellwig qdepth = 1; 2525db5ed4dfSChristoph Hellwig return scsi_change_queue_depth(sdev, qdepth); 252671444b97SJack Wang } 252771444b97SJack Wang 2528985aa495SBart Van Assche static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, 2529985aa495SBart Van Assche u8 func) 2530aef9ec39SRoland Dreier { 2531509c07bcSBart Van Assche struct srp_target_port *target = ch->target; 2532a95cadb9SBart Van Assche struct srp_rport *rport = target->rport; 253319081f31SDavid Dillow struct ib_device *dev = target->srp_host->srp_dev->dev; 2534aef9ec39SRoland Dreier struct srp_iu *iu; 2535aef9ec39SRoland Dreier struct srp_tsk_mgmt *tsk_mgmt; 2536aef9ec39SRoland Dreier 2537c014c8cdSBart Van Assche if (!ch->connected || target->qp_in_error) 25383780d1f0SBart Van Assche return -1; 25393780d1f0SBart Van Assche 2540509c07bcSBart Van Assche init_completion(&ch->tsk_mgmt_done); 2541aef9ec39SRoland Dreier 2542a95cadb9SBart Van Assche /* 2543509c07bcSBart Van Assche * Lock the rport mutex to avoid that srp_create_ch_ib() is 2544a95cadb9SBart Van Assche * invoked while a task management function is being sent. 2545a95cadb9SBart Van Assche */ 2546a95cadb9SBart Van Assche mutex_lock(&rport->mutex); 2547509c07bcSBart Van Assche spin_lock_irq(&ch->lock); 2548509c07bcSBart Van Assche iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); 2549509c07bcSBart Van Assche spin_unlock_irq(&ch->lock); 255076c75b25SBart Van Assche 2551a95cadb9SBart Van Assche if (!iu) { 2552a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2553a95cadb9SBart Van Assche 255476c75b25SBart Van Assche return -1; 2555a95cadb9SBart Van Assche } 2556aef9ec39SRoland Dreier 255719081f31SDavid Dillow ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 255819081f31SDavid Dillow DMA_TO_DEVICE); 2559aef9ec39SRoland Dreier tsk_mgmt = iu->buf; 2560aef9ec39SRoland Dreier memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 2561aef9ec39SRoland Dreier 2562aef9ec39SRoland Dreier tsk_mgmt->opcode = SRP_TSK_MGMT; 2563985aa495SBart Van Assche int_to_scsilun(lun, &tsk_mgmt->lun); 2564f8b6e31eSDavid Dillow tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; 2565aef9ec39SRoland Dreier tsk_mgmt->tsk_mgmt_func = func; 2566f8b6e31eSDavid Dillow tsk_mgmt->task_tag = req_tag; 2567aef9ec39SRoland Dreier 256819081f31SDavid Dillow ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 256919081f31SDavid Dillow DMA_TO_DEVICE); 2570509c07bcSBart Van Assche if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { 2571509c07bcSBart Van Assche srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); 2572a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2573a95cadb9SBart Van Assche 257476c75b25SBart Van Assche return -1; 257576c75b25SBart Van Assche } 2576a95cadb9SBart Van Assche mutex_unlock(&rport->mutex); 2577d945e1dfSRoland Dreier 2578509c07bcSBart Van Assche if (!wait_for_completion_timeout(&ch->tsk_mgmt_done, 2579aef9ec39SRoland Dreier msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 2580d945e1dfSRoland Dreier return -1; 2581aef9ec39SRoland Dreier 2582d945e1dfSRoland Dreier return 0; 2583d945e1dfSRoland Dreier } 2584d945e1dfSRoland Dreier 2585aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd) 2586aef9ec39SRoland Dreier { 2587d945e1dfSRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2588f8b6e31eSDavid Dillow struct srp_request *req = (struct srp_request *) scmnd->host_scribble; 258977f2c1a4SBart Van Assche u32 tag; 2590d92c0da7SBart Van Assche u16 ch_idx; 2591509c07bcSBart Van Assche struct srp_rdma_ch *ch; 2592086f44f5SBart Van Assche int ret; 2593d945e1dfSRoland Dreier 25947aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 2595aef9ec39SRoland Dreier 2596d92c0da7SBart Van Assche if (!req) 259799b6697aSBart Van Assche return SUCCESS; 259877f2c1a4SBart Van Assche tag = blk_mq_unique_tag(scmnd->request); 2599d92c0da7SBart Van Assche ch_idx = blk_mq_unique_tag_to_hwq(tag); 2600d92c0da7SBart Van Assche if (WARN_ON_ONCE(ch_idx >= target->ch_count)) 2601d92c0da7SBart Van Assche return SUCCESS; 2602d92c0da7SBart Van Assche ch = &target->ch[ch_idx]; 2603d92c0da7SBart Van Assche if (!srp_claim_req(ch, req, NULL, scmnd)) 2604d92c0da7SBart Van Assche return SUCCESS; 2605d92c0da7SBart Van Assche shost_printk(KERN_ERR, target->scsi_host, 2606d92c0da7SBart Van Assche "Sending SRP abort for tag %#x\n", tag); 260777f2c1a4SBart Van Assche if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, 260880d5e8a2SBart Van Assche SRP_TSK_ABORT_TASK) == 0) 2609086f44f5SBart Van Assche ret = SUCCESS; 2610ed9b2264SBart Van Assche else if (target->rport->state == SRP_RPORT_LOST) 261199e1c139SBart Van Assche ret = FAST_IO_FAIL; 2612086f44f5SBart Van Assche else 2613086f44f5SBart Van Assche ret = FAILED; 2614509c07bcSBart Van Assche srp_free_req(ch, req, scmnd, 0); 2615d945e1dfSRoland Dreier scmnd->result = DID_ABORT << 16; 2616d8536670SBart Van Assche scmnd->scsi_done(scmnd); 2617d945e1dfSRoland Dreier 2618086f44f5SBart Van Assche return ret; 2619aef9ec39SRoland Dreier } 2620aef9ec39SRoland Dreier 2621aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd) 2622aef9ec39SRoland Dreier { 2623d945e1dfSRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2624d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 2625536ae14eSBart Van Assche int i; 2626d945e1dfSRoland Dreier 26277aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 2628aef9ec39SRoland Dreier 2629d92c0da7SBart Van Assche ch = &target->ch[0]; 2630509c07bcSBart Van Assche if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, 2631f8b6e31eSDavid Dillow SRP_TSK_LUN_RESET)) 2632d945e1dfSRoland Dreier return FAILED; 2633509c07bcSBart Van Assche if (ch->tsk_mgmt_status) 2634d945e1dfSRoland Dreier return FAILED; 2635d945e1dfSRoland Dreier 2636d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 2637d92c0da7SBart Van Assche ch = &target->ch[i]; 26384d73f95fSBart Van Assche for (i = 0; i < target->req_ring_size; ++i) { 2639509c07bcSBart Van Assche struct srp_request *req = &ch->req_ring[i]; 2640509c07bcSBart Van Assche 2641509c07bcSBart Van Assche srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); 2642536ae14eSBart Van Assche } 2643d92c0da7SBart Van Assche } 2644d945e1dfSRoland Dreier 2645d945e1dfSRoland Dreier return SUCCESS; 2646aef9ec39SRoland Dreier } 2647aef9ec39SRoland Dreier 2648aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd) 2649aef9ec39SRoland Dreier { 2650aef9ec39SRoland Dreier struct srp_target_port *target = host_to_target(scmnd->device->host); 2651aef9ec39SRoland Dreier 26527aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); 2653aef9ec39SRoland Dreier 2654ed9b2264SBart Van Assche return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; 2655aef9ec39SRoland Dreier } 2656aef9ec39SRoland Dreier 2657509c5f33SBart Van Assche static int srp_slave_alloc(struct scsi_device *sdev) 2658509c5f33SBart Van Assche { 2659509c5f33SBart Van Assche struct Scsi_Host *shost = sdev->host; 2660509c5f33SBart Van Assche struct srp_target_port *target = host_to_target(shost); 2661509c5f33SBart Van Assche struct srp_device *srp_dev = target->srp_host->srp_dev; 2662509c5f33SBart Van Assche struct ib_device *ibdev = srp_dev->dev; 2663509c5f33SBart Van Assche 2664509c5f33SBart Van Assche if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) 2665509c5f33SBart Van Assche blk_queue_virt_boundary(sdev->request_queue, 2666509c5f33SBart Van Assche ~srp_dev->mr_page_mask); 2667509c5f33SBart Van Assche 2668509c5f33SBart Van Assche return 0; 2669509c5f33SBart Van Assche } 2670509c5f33SBart Van Assche 2671c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev) 2672c9b03c1aSBart Van Assche { 2673c9b03c1aSBart Van Assche struct Scsi_Host *shost = sdev->host; 2674c9b03c1aSBart Van Assche struct srp_target_port *target = host_to_target(shost); 2675c9b03c1aSBart Van Assche struct request_queue *q = sdev->request_queue; 2676c9b03c1aSBart Van Assche unsigned long timeout; 2677c9b03c1aSBart Van Assche 2678c9b03c1aSBart Van Assche if (sdev->type == TYPE_DISK) { 2679c9b03c1aSBart Van Assche timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); 2680c9b03c1aSBart Van Assche blk_queue_rq_timeout(q, timeout); 2681c9b03c1aSBart Van Assche } 2682c9b03c1aSBart Van Assche 2683c9b03c1aSBart Van Assche return 0; 2684c9b03c1aSBart Van Assche } 2685c9b03c1aSBart Van Assche 2686ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, 2687ee959b00STony Jones char *buf) 26886ecb0c84SRoland Dreier { 2689ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26906ecb0c84SRoland Dreier 269145c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); 26926ecb0c84SRoland Dreier } 26936ecb0c84SRoland Dreier 2694ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, 2695ee959b00STony Jones char *buf) 26966ecb0c84SRoland Dreier { 2697ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 26986ecb0c84SRoland Dreier 269945c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); 27006ecb0c84SRoland Dreier } 27016ecb0c84SRoland Dreier 2702ee959b00STony Jones static ssize_t show_service_id(struct device *dev, 2703ee959b00STony Jones struct device_attribute *attr, char *buf) 27046ecb0c84SRoland Dreier { 2705ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 27066ecb0c84SRoland Dreier 270745c37cadSBart Van Assche return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id)); 27086ecb0c84SRoland Dreier } 27096ecb0c84SRoland Dreier 2710ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, 2711ee959b00STony Jones char *buf) 27126ecb0c84SRoland Dreier { 2713ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 27146ecb0c84SRoland Dreier 2715747fe000SBart Van Assche return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey)); 27166ecb0c84SRoland Dreier } 27176ecb0c84SRoland Dreier 2718848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, 2719848b3082SBart Van Assche char *buf) 2720848b3082SBart Van Assche { 2721848b3082SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2722848b3082SBart Van Assche 2723747fe000SBart Van Assche return sprintf(buf, "%pI6\n", target->sgid.raw); 2724848b3082SBart Van Assche } 2725848b3082SBart Van Assche 2726ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 2727ee959b00STony Jones char *buf) 27286ecb0c84SRoland Dreier { 2729ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2730d92c0da7SBart Van Assche struct srp_rdma_ch *ch = &target->ch[0]; 27316ecb0c84SRoland Dreier 2732509c07bcSBart Van Assche return sprintf(buf, "%pI6\n", ch->path.dgid.raw); 27336ecb0c84SRoland Dreier } 27346ecb0c84SRoland Dreier 2735ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev, 2736ee959b00STony Jones struct device_attribute *attr, char *buf) 27373633b3d0SIshai Rabinovitz { 2738ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 27393633b3d0SIshai Rabinovitz 2740747fe000SBart Van Assche return sprintf(buf, "%pI6\n", target->orig_dgid.raw); 27413633b3d0SIshai Rabinovitz } 27423633b3d0SIshai Rabinovitz 274389de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev, 274489de7486SBart Van Assche struct device_attribute *attr, char *buf) 274589de7486SBart Van Assche { 274689de7486SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2747d92c0da7SBart Van Assche struct srp_rdma_ch *ch; 2748d92c0da7SBart Van Assche int i, req_lim = INT_MAX; 274989de7486SBart Van Assche 2750d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 2751d92c0da7SBart Van Assche ch = &target->ch[i]; 2752d92c0da7SBart Van Assche req_lim = min(req_lim, ch->req_lim); 2753d92c0da7SBart Van Assche } 2754d92c0da7SBart Van Assche return sprintf(buf, "%d\n", req_lim); 275589de7486SBart Van Assche } 275689de7486SBart Van Assche 2757ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev, 2758ee959b00STony Jones struct device_attribute *attr, char *buf) 27596bfa24faSRoland Dreier { 2760ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 27616bfa24faSRoland Dreier 27626bfa24faSRoland Dreier return sprintf(buf, "%d\n", target->zero_req_lim); 27636bfa24faSRoland Dreier } 27646bfa24faSRoland Dreier 2765ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev, 2766ee959b00STony Jones struct device_attribute *attr, char *buf) 2767ded7f1a1SIshai Rabinovitz { 2768ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2769ded7f1a1SIshai Rabinovitz 2770ded7f1a1SIshai Rabinovitz return sprintf(buf, "%d\n", target->srp_host->port); 2771ded7f1a1SIshai Rabinovitz } 2772ded7f1a1SIshai Rabinovitz 2773ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev, 2774ee959b00STony Jones struct device_attribute *attr, char *buf) 2775ded7f1a1SIshai Rabinovitz { 2776ee959b00STony Jones struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2777ded7f1a1SIshai Rabinovitz 277805321937SGreg Kroah-Hartman return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 2779ded7f1a1SIshai Rabinovitz } 2780ded7f1a1SIshai Rabinovitz 2781d92c0da7SBart Van Assche static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr, 2782d92c0da7SBart Van Assche char *buf) 2783d92c0da7SBart Van Assche { 2784d92c0da7SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2785d92c0da7SBart Van Assche 2786d92c0da7SBart Van Assche return sprintf(buf, "%d\n", target->ch_count); 2787d92c0da7SBart Van Assche } 2788d92c0da7SBart Van Assche 27894b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev, 27904b5e5f41SBart Van Assche struct device_attribute *attr, char *buf) 27914b5e5f41SBart Van Assche { 27924b5e5f41SBart Van Assche struct srp_target_port *target = host_to_target(class_to_shost(dev)); 27934b5e5f41SBart Van Assche 27944b5e5f41SBart Van Assche return sprintf(buf, "%d\n", target->comp_vector); 27954b5e5f41SBart Van Assche } 27964b5e5f41SBart Van Assche 27977bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev, 27987bb312e4SVu Pham struct device_attribute *attr, char *buf) 27997bb312e4SVu Pham { 28007bb312e4SVu Pham struct srp_target_port *target = host_to_target(class_to_shost(dev)); 28017bb312e4SVu Pham 28027bb312e4SVu Pham return sprintf(buf, "%d\n", target->tl_retry_count); 28037bb312e4SVu Pham } 28047bb312e4SVu Pham 280549248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev, 280649248644SDavid Dillow struct device_attribute *attr, char *buf) 280749248644SDavid Dillow { 280849248644SDavid Dillow struct srp_target_port *target = host_to_target(class_to_shost(dev)); 280949248644SDavid Dillow 281049248644SDavid Dillow return sprintf(buf, "%u\n", target->cmd_sg_cnt); 281149248644SDavid Dillow } 281249248644SDavid Dillow 2813c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev, 2814c07d424dSDavid Dillow struct device_attribute *attr, char *buf) 2815c07d424dSDavid Dillow { 2816c07d424dSDavid Dillow struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2817c07d424dSDavid Dillow 2818c07d424dSDavid Dillow return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); 2819c07d424dSDavid Dillow } 2820c07d424dSDavid Dillow 2821ee959b00STony Jones static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 2822ee959b00STony Jones static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 2823ee959b00STony Jones static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 2824ee959b00STony Jones static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 2825848b3082SBart Van Assche static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL); 2826ee959b00STony Jones static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 2827ee959b00STony Jones static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); 282889de7486SBart Van Assche static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); 2829ee959b00STony Jones static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 2830ee959b00STony Jones static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 2831ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 2832d92c0da7SBart Van Assche static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL); 28334b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); 28347bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); 283549248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); 2836c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL); 28376ecb0c84SRoland Dreier 2838ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = { 2839ee959b00STony Jones &dev_attr_id_ext, 2840ee959b00STony Jones &dev_attr_ioc_guid, 2841ee959b00STony Jones &dev_attr_service_id, 2842ee959b00STony Jones &dev_attr_pkey, 2843848b3082SBart Van Assche &dev_attr_sgid, 2844ee959b00STony Jones &dev_attr_dgid, 2845ee959b00STony Jones &dev_attr_orig_dgid, 284689de7486SBart Van Assche &dev_attr_req_lim, 2847ee959b00STony Jones &dev_attr_zero_req_lim, 2848ee959b00STony Jones &dev_attr_local_ib_port, 2849ee959b00STony Jones &dev_attr_local_ib_device, 2850d92c0da7SBart Van Assche &dev_attr_ch_count, 28514b5e5f41SBart Van Assche &dev_attr_comp_vector, 28527bb312e4SVu Pham &dev_attr_tl_retry_count, 285349248644SDavid Dillow &dev_attr_cmd_sg_entries, 2854c07d424dSDavid Dillow &dev_attr_allow_ext_sg, 28556ecb0c84SRoland Dreier NULL 28566ecb0c84SRoland Dreier }; 28576ecb0c84SRoland Dreier 2858aef9ec39SRoland Dreier static struct scsi_host_template srp_template = { 2859aef9ec39SRoland Dreier .module = THIS_MODULE, 2860b7f008fdSRoland Dreier .name = "InfiniBand SRP initiator", 2861b7f008fdSRoland Dreier .proc_name = DRV_NAME, 2862509c5f33SBart Van Assche .slave_alloc = srp_slave_alloc, 2863c9b03c1aSBart Van Assche .slave_configure = srp_slave_configure, 2864aef9ec39SRoland Dreier .info = srp_target_info, 2865aef9ec39SRoland Dreier .queuecommand = srp_queuecommand, 286671444b97SJack Wang .change_queue_depth = srp_change_queue_depth, 2867aef9ec39SRoland Dreier .eh_abort_handler = srp_abort, 2868aef9ec39SRoland Dreier .eh_device_reset_handler = srp_reset_device, 2869aef9ec39SRoland Dreier .eh_host_reset_handler = srp_reset_host, 28702742c1daSBart Van Assche .skip_settle_delay = true, 287149248644SDavid Dillow .sg_tablesize = SRP_DEF_SG_TABLESIZE, 28724d73f95fSBart Van Assche .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, 2873aef9ec39SRoland Dreier .this_id = -1, 28744d73f95fSBart Van Assche .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, 28756ecb0c84SRoland Dreier .use_clustering = ENABLE_CLUSTERING, 287677f2c1a4SBart Van Assche .shost_attrs = srp_host_attrs, 2877c40ecc12SChristoph Hellwig .track_queue_depth = 1, 2878aef9ec39SRoland Dreier }; 2879aef9ec39SRoland Dreier 288034aa654eSBart Van Assche static int srp_sdev_count(struct Scsi_Host *host) 288134aa654eSBart Van Assche { 288234aa654eSBart Van Assche struct scsi_device *sdev; 288334aa654eSBart Van Assche int c = 0; 288434aa654eSBart Van Assche 288534aa654eSBart Van Assche shost_for_each_device(sdev, host) 288634aa654eSBart Van Assche c++; 288734aa654eSBart Van Assche 288834aa654eSBart Van Assche return c; 288934aa654eSBart Van Assche } 289034aa654eSBart Van Assche 2891bc44bd1dSBart Van Assche /* 2892bc44bd1dSBart Van Assche * Return values: 2893bc44bd1dSBart Van Assche * < 0 upon failure. Caller is responsible for SRP target port cleanup. 2894bc44bd1dSBart Van Assche * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port 2895bc44bd1dSBart Van Assche * removal has been scheduled. 2896bc44bd1dSBart Van Assche * 0 and target->state != SRP_TARGET_REMOVED upon success. 2897bc44bd1dSBart Van Assche */ 2898aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 2899aef9ec39SRoland Dreier { 29003236822bSFUJITA Tomonori struct srp_rport_identifiers ids; 29013236822bSFUJITA Tomonori struct srp_rport *rport; 29023236822bSFUJITA Tomonori 290334aa654eSBart Van Assche target->state = SRP_TARGET_SCANNING; 2904aef9ec39SRoland Dreier sprintf(target->target_name, "SRP.T10:%016llX", 290545c37cadSBart Van Assche be64_to_cpu(target->id_ext)); 2906aef9ec39SRoland Dreier 2907*dee2b82aSBart Van Assche if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) 2908aef9ec39SRoland Dreier return -ENODEV; 2909aef9ec39SRoland Dreier 29103236822bSFUJITA Tomonori memcpy(ids.port_id, &target->id_ext, 8); 29113236822bSFUJITA Tomonori memcpy(ids.port_id + 8, &target->ioc_guid, 8); 2912aebd5e47SFUJITA Tomonori ids.roles = SRP_RPORT_ROLE_TARGET; 29133236822bSFUJITA Tomonori rport = srp_rport_add(target->scsi_host, &ids); 29143236822bSFUJITA Tomonori if (IS_ERR(rport)) { 29153236822bSFUJITA Tomonori scsi_remove_host(target->scsi_host); 29163236822bSFUJITA Tomonori return PTR_ERR(rport); 29173236822bSFUJITA Tomonori } 29183236822bSFUJITA Tomonori 2919dc1bdbd9SBart Van Assche rport->lld_data = target; 29209dd69a60SBart Van Assche target->rport = rport; 2921dc1bdbd9SBart Van Assche 2922b3589fd4SMatthew Wilcox spin_lock(&host->target_lock); 2923aef9ec39SRoland Dreier list_add_tail(&target->list, &host->target_list); 2924b3589fd4SMatthew Wilcox spin_unlock(&host->target_lock); 2925aef9ec39SRoland Dreier 2926aef9ec39SRoland Dreier scsi_scan_target(&target->scsi_host->shost_gendev, 29271d645088SHannes Reinecke 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 2928aef9ec39SRoland Dreier 2929c014c8cdSBart Van Assche if (srp_connected_ch(target) < target->ch_count || 2930c014c8cdSBart Van Assche target->qp_in_error) { 293134aa654eSBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 293234aa654eSBart Van Assche PFX "SCSI scan failed - removing SCSI host\n"); 293334aa654eSBart Van Assche srp_queue_remove_work(target); 293434aa654eSBart Van Assche goto out; 293534aa654eSBart Van Assche } 293634aa654eSBart Van Assche 2937cf1acab7SBart Van Assche pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n", 293834aa654eSBart Van Assche dev_name(&target->scsi_host->shost_gendev), 293934aa654eSBart Van Assche srp_sdev_count(target->scsi_host)); 294034aa654eSBart Van Assche 294134aa654eSBart Van Assche spin_lock_irq(&target->lock); 294234aa654eSBart Van Assche if (target->state == SRP_TARGET_SCANNING) 294334aa654eSBart Van Assche target->state = SRP_TARGET_LIVE; 294434aa654eSBart Van Assche spin_unlock_irq(&target->lock); 294534aa654eSBart Van Assche 294634aa654eSBart Van Assche out: 2947aef9ec39SRoland Dreier return 0; 2948aef9ec39SRoland Dreier } 2949aef9ec39SRoland Dreier 2950ee959b00STony Jones static void srp_release_dev(struct device *dev) 2951aef9ec39SRoland Dreier { 2952aef9ec39SRoland Dreier struct srp_host *host = 2953ee959b00STony Jones container_of(dev, struct srp_host, dev); 2954aef9ec39SRoland Dreier 2955aef9ec39SRoland Dreier complete(&host->released); 2956aef9ec39SRoland Dreier } 2957aef9ec39SRoland Dreier 2958aef9ec39SRoland Dreier static struct class srp_class = { 2959aef9ec39SRoland Dreier .name = "infiniband_srp", 2960ee959b00STony Jones .dev_release = srp_release_dev 2961aef9ec39SRoland Dreier }; 2962aef9ec39SRoland Dreier 296396fc248aSBart Van Assche /** 296496fc248aSBart Van Assche * srp_conn_unique() - check whether the connection to a target is unique 2965af24663bSBart Van Assche * @host: SRP host. 2966af24663bSBart Van Assche * @target: SRP target port. 296796fc248aSBart Van Assche */ 296896fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host, 296996fc248aSBart Van Assche struct srp_target_port *target) 297096fc248aSBart Van Assche { 297196fc248aSBart Van Assche struct srp_target_port *t; 297296fc248aSBart Van Assche bool ret = false; 297396fc248aSBart Van Assche 297496fc248aSBart Van Assche if (target->state == SRP_TARGET_REMOVED) 297596fc248aSBart Van Assche goto out; 297696fc248aSBart Van Assche 297796fc248aSBart Van Assche ret = true; 297896fc248aSBart Van Assche 297996fc248aSBart Van Assche spin_lock(&host->target_lock); 298096fc248aSBart Van Assche list_for_each_entry(t, &host->target_list, list) { 298196fc248aSBart Van Assche if (t != target && 298296fc248aSBart Van Assche target->id_ext == t->id_ext && 298396fc248aSBart Van Assche target->ioc_guid == t->ioc_guid && 298496fc248aSBart Van Assche target->initiator_ext == t->initiator_ext) { 298596fc248aSBart Van Assche ret = false; 298696fc248aSBart Van Assche break; 298796fc248aSBart Van Assche } 298896fc248aSBart Van Assche } 298996fc248aSBart Van Assche spin_unlock(&host->target_lock); 299096fc248aSBart Van Assche 299196fc248aSBart Van Assche out: 299296fc248aSBart Van Assche return ret; 299396fc248aSBart Van Assche } 299496fc248aSBart Van Assche 2995aef9ec39SRoland Dreier /* 2996aef9ec39SRoland Dreier * Target ports are added by writing 2997aef9ec39SRoland Dreier * 2998aef9ec39SRoland Dreier * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 2999aef9ec39SRoland Dreier * pkey=<P_Key>,service_id=<service ID> 3000aef9ec39SRoland Dreier * 3001aef9ec39SRoland Dreier * to the add_target sysfs attribute. 3002aef9ec39SRoland Dreier */ 3003aef9ec39SRoland Dreier enum { 3004aef9ec39SRoland Dreier SRP_OPT_ERR = 0, 3005aef9ec39SRoland Dreier SRP_OPT_ID_EXT = 1 << 0, 3006aef9ec39SRoland Dreier SRP_OPT_IOC_GUID = 1 << 1, 3007aef9ec39SRoland Dreier SRP_OPT_DGID = 1 << 2, 3008aef9ec39SRoland Dreier SRP_OPT_PKEY = 1 << 3, 3009aef9ec39SRoland Dreier SRP_OPT_SERVICE_ID = 1 << 4, 3010aef9ec39SRoland Dreier SRP_OPT_MAX_SECT = 1 << 5, 301152fb2b50SVu Pham SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 30120c0450dbSRamachandra K SRP_OPT_IO_CLASS = 1 << 7, 301301cb9bcbSIshai Rabinovitz SRP_OPT_INITIATOR_EXT = 1 << 8, 301449248644SDavid Dillow SRP_OPT_CMD_SG_ENTRIES = 1 << 9, 3015c07d424dSDavid Dillow SRP_OPT_ALLOW_EXT_SG = 1 << 10, 3016c07d424dSDavid Dillow SRP_OPT_SG_TABLESIZE = 1 << 11, 30174b5e5f41SBart Van Assche SRP_OPT_COMP_VECTOR = 1 << 12, 30187bb312e4SVu Pham SRP_OPT_TL_RETRY_COUNT = 1 << 13, 30194d73f95fSBart Van Assche SRP_OPT_QUEUE_SIZE = 1 << 14, 3020aef9ec39SRoland Dreier SRP_OPT_ALL = (SRP_OPT_ID_EXT | 3021aef9ec39SRoland Dreier SRP_OPT_IOC_GUID | 3022aef9ec39SRoland Dreier SRP_OPT_DGID | 3023aef9ec39SRoland Dreier SRP_OPT_PKEY | 3024aef9ec39SRoland Dreier SRP_OPT_SERVICE_ID), 3025aef9ec39SRoland Dreier }; 3026aef9ec39SRoland Dreier 3027a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = { 3028aef9ec39SRoland Dreier { SRP_OPT_ID_EXT, "id_ext=%s" }, 3029aef9ec39SRoland Dreier { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 3030aef9ec39SRoland Dreier { SRP_OPT_DGID, "dgid=%s" }, 3031aef9ec39SRoland Dreier { SRP_OPT_PKEY, "pkey=%x" }, 3032aef9ec39SRoland Dreier { SRP_OPT_SERVICE_ID, "service_id=%s" }, 3033aef9ec39SRoland Dreier { SRP_OPT_MAX_SECT, "max_sect=%d" }, 303452fb2b50SVu Pham { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 30350c0450dbSRamachandra K { SRP_OPT_IO_CLASS, "io_class=%x" }, 303601cb9bcbSIshai Rabinovitz { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 303749248644SDavid Dillow { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, 3038c07d424dSDavid Dillow { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, 3039c07d424dSDavid Dillow { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, 30404b5e5f41SBart Van Assche { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, 30417bb312e4SVu Pham { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, 30424d73f95fSBart Van Assche { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, 3043aef9ec39SRoland Dreier { SRP_OPT_ERR, NULL } 3044aef9ec39SRoland Dreier }; 3045aef9ec39SRoland Dreier 3046aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target) 3047aef9ec39SRoland Dreier { 3048aef9ec39SRoland Dreier char *options, *sep_opt; 3049aef9ec39SRoland Dreier char *p; 3050aef9ec39SRoland Dreier char dgid[3]; 3051aef9ec39SRoland Dreier substring_t args[MAX_OPT_ARGS]; 3052aef9ec39SRoland Dreier int opt_mask = 0; 3053aef9ec39SRoland Dreier int token; 3054aef9ec39SRoland Dreier int ret = -EINVAL; 3055aef9ec39SRoland Dreier int i; 3056aef9ec39SRoland Dreier 3057aef9ec39SRoland Dreier options = kstrdup(buf, GFP_KERNEL); 3058aef9ec39SRoland Dreier if (!options) 3059aef9ec39SRoland Dreier return -ENOMEM; 3060aef9ec39SRoland Dreier 3061aef9ec39SRoland Dreier sep_opt = options; 30627dcf9c19SSagi Grimberg while ((p = strsep(&sep_opt, ",\n")) != NULL) { 3063aef9ec39SRoland Dreier if (!*p) 3064aef9ec39SRoland Dreier continue; 3065aef9ec39SRoland Dreier 3066aef9ec39SRoland Dreier token = match_token(p, srp_opt_tokens, args); 3067aef9ec39SRoland Dreier opt_mask |= token; 3068aef9ec39SRoland Dreier 3069aef9ec39SRoland Dreier switch (token) { 3070aef9ec39SRoland Dreier case SRP_OPT_ID_EXT: 3071aef9ec39SRoland Dreier p = match_strdup(args); 3072a20f3a6dSIshai Rabinovitz if (!p) { 3073a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3074a20f3a6dSIshai Rabinovitz goto out; 3075a20f3a6dSIshai Rabinovitz } 3076aef9ec39SRoland Dreier target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 3077aef9ec39SRoland Dreier kfree(p); 3078aef9ec39SRoland Dreier break; 3079aef9ec39SRoland Dreier 3080aef9ec39SRoland Dreier case SRP_OPT_IOC_GUID: 3081aef9ec39SRoland Dreier p = match_strdup(args); 3082a20f3a6dSIshai Rabinovitz if (!p) { 3083a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3084a20f3a6dSIshai Rabinovitz goto out; 3085a20f3a6dSIshai Rabinovitz } 3086aef9ec39SRoland Dreier target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 3087aef9ec39SRoland Dreier kfree(p); 3088aef9ec39SRoland Dreier break; 3089aef9ec39SRoland Dreier 3090aef9ec39SRoland Dreier case SRP_OPT_DGID: 3091aef9ec39SRoland Dreier p = match_strdup(args); 3092a20f3a6dSIshai Rabinovitz if (!p) { 3093a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3094a20f3a6dSIshai Rabinovitz goto out; 3095a20f3a6dSIshai Rabinovitz } 3096aef9ec39SRoland Dreier if (strlen(p) != 32) { 3097e0bda7d8SBart Van Assche pr_warn("bad dest GID parameter '%s'\n", p); 3098ce1823f0SRoland Dreier kfree(p); 3099aef9ec39SRoland Dreier goto out; 3100aef9ec39SRoland Dreier } 3101aef9ec39SRoland Dreier 3102aef9ec39SRoland Dreier for (i = 0; i < 16; ++i) { 3103747fe000SBart Van Assche strlcpy(dgid, p + i * 2, sizeof(dgid)); 3104747fe000SBart Van Assche if (sscanf(dgid, "%hhx", 3105747fe000SBart Van Assche &target->orig_dgid.raw[i]) < 1) { 3106747fe000SBart Van Assche ret = -EINVAL; 3107747fe000SBart Van Assche kfree(p); 3108747fe000SBart Van Assche goto out; 3109747fe000SBart Van Assche } 3110aef9ec39SRoland Dreier } 3111bf17c1c7SRoland Dreier kfree(p); 3112aef9ec39SRoland Dreier break; 3113aef9ec39SRoland Dreier 3114aef9ec39SRoland Dreier case SRP_OPT_PKEY: 3115aef9ec39SRoland Dreier if (match_hex(args, &token)) { 3116e0bda7d8SBart Van Assche pr_warn("bad P_Key parameter '%s'\n", p); 3117aef9ec39SRoland Dreier goto out; 3118aef9ec39SRoland Dreier } 3119747fe000SBart Van Assche target->pkey = cpu_to_be16(token); 3120aef9ec39SRoland Dreier break; 3121aef9ec39SRoland Dreier 3122aef9ec39SRoland Dreier case SRP_OPT_SERVICE_ID: 3123aef9ec39SRoland Dreier p = match_strdup(args); 3124a20f3a6dSIshai Rabinovitz if (!p) { 3125a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3126a20f3a6dSIshai Rabinovitz goto out; 3127a20f3a6dSIshai Rabinovitz } 3128aef9ec39SRoland Dreier target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 3129aef9ec39SRoland Dreier kfree(p); 3130aef9ec39SRoland Dreier break; 3131aef9ec39SRoland Dreier 3132aef9ec39SRoland Dreier case SRP_OPT_MAX_SECT: 3133aef9ec39SRoland Dreier if (match_int(args, &token)) { 3134e0bda7d8SBart Van Assche pr_warn("bad max sect parameter '%s'\n", p); 3135aef9ec39SRoland Dreier goto out; 3136aef9ec39SRoland Dreier } 3137aef9ec39SRoland Dreier target->scsi_host->max_sectors = token; 3138aef9ec39SRoland Dreier break; 3139aef9ec39SRoland Dreier 31404d73f95fSBart Van Assche case SRP_OPT_QUEUE_SIZE: 31414d73f95fSBart Van Assche if (match_int(args, &token) || token < 1) { 31424d73f95fSBart Van Assche pr_warn("bad queue_size parameter '%s'\n", p); 31434d73f95fSBart Van Assche goto out; 31444d73f95fSBart Van Assche } 31454d73f95fSBart Van Assche target->scsi_host->can_queue = token; 31464d73f95fSBart Van Assche target->queue_size = token + SRP_RSP_SQ_SIZE + 31474d73f95fSBart Van Assche SRP_TSK_MGMT_SQ_SIZE; 31484d73f95fSBart Van Assche if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 31494d73f95fSBart Van Assche target->scsi_host->cmd_per_lun = token; 31504d73f95fSBart Van Assche break; 31514d73f95fSBart Van Assche 315252fb2b50SVu Pham case SRP_OPT_MAX_CMD_PER_LUN: 31534d73f95fSBart Van Assche if (match_int(args, &token) || token < 1) { 3154e0bda7d8SBart Van Assche pr_warn("bad max cmd_per_lun parameter '%s'\n", 3155e0bda7d8SBart Van Assche p); 315652fb2b50SVu Pham goto out; 315752fb2b50SVu Pham } 31584d73f95fSBart Van Assche target->scsi_host->cmd_per_lun = token; 315952fb2b50SVu Pham break; 316052fb2b50SVu Pham 31610c0450dbSRamachandra K case SRP_OPT_IO_CLASS: 31620c0450dbSRamachandra K if (match_hex(args, &token)) { 3163e0bda7d8SBart Van Assche pr_warn("bad IO class parameter '%s'\n", p); 31640c0450dbSRamachandra K goto out; 31650c0450dbSRamachandra K } 31660c0450dbSRamachandra K if (token != SRP_REV10_IB_IO_CLASS && 31670c0450dbSRamachandra K token != SRP_REV16A_IB_IO_CLASS) { 3168e0bda7d8SBart Van Assche pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", 3169e0bda7d8SBart Van Assche token, SRP_REV10_IB_IO_CLASS, 3170e0bda7d8SBart Van Assche SRP_REV16A_IB_IO_CLASS); 31710c0450dbSRamachandra K goto out; 31720c0450dbSRamachandra K } 31730c0450dbSRamachandra K target->io_class = token; 31740c0450dbSRamachandra K break; 31750c0450dbSRamachandra K 317601cb9bcbSIshai Rabinovitz case SRP_OPT_INITIATOR_EXT: 317701cb9bcbSIshai Rabinovitz p = match_strdup(args); 3178a20f3a6dSIshai Rabinovitz if (!p) { 3179a20f3a6dSIshai Rabinovitz ret = -ENOMEM; 3180a20f3a6dSIshai Rabinovitz goto out; 3181a20f3a6dSIshai Rabinovitz } 318201cb9bcbSIshai Rabinovitz target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 318301cb9bcbSIshai Rabinovitz kfree(p); 318401cb9bcbSIshai Rabinovitz break; 318501cb9bcbSIshai Rabinovitz 318649248644SDavid Dillow case SRP_OPT_CMD_SG_ENTRIES: 318749248644SDavid Dillow if (match_int(args, &token) || token < 1 || token > 255) { 3188e0bda7d8SBart Van Assche pr_warn("bad max cmd_sg_entries parameter '%s'\n", 3189e0bda7d8SBart Van Assche p); 319049248644SDavid Dillow goto out; 319149248644SDavid Dillow } 319249248644SDavid Dillow target->cmd_sg_cnt = token; 319349248644SDavid Dillow break; 319449248644SDavid Dillow 3195c07d424dSDavid Dillow case SRP_OPT_ALLOW_EXT_SG: 3196c07d424dSDavid Dillow if (match_int(args, &token)) { 3197e0bda7d8SBart Van Assche pr_warn("bad allow_ext_sg parameter '%s'\n", p); 3198c07d424dSDavid Dillow goto out; 3199c07d424dSDavid Dillow } 3200c07d424dSDavid Dillow target->allow_ext_sg = !!token; 3201c07d424dSDavid Dillow break; 3202c07d424dSDavid Dillow 3203c07d424dSDavid Dillow case SRP_OPT_SG_TABLESIZE: 3204c07d424dSDavid Dillow if (match_int(args, &token) || token < 1 || 320565e8617fSMing Lin token > SG_MAX_SEGMENTS) { 3206e0bda7d8SBart Van Assche pr_warn("bad max sg_tablesize parameter '%s'\n", 3207e0bda7d8SBart Van Assche p); 3208c07d424dSDavid Dillow goto out; 3209c07d424dSDavid Dillow } 3210c07d424dSDavid Dillow target->sg_tablesize = token; 3211c07d424dSDavid Dillow break; 3212c07d424dSDavid Dillow 32134b5e5f41SBart Van Assche case SRP_OPT_COMP_VECTOR: 32144b5e5f41SBart Van Assche if (match_int(args, &token) || token < 0) { 32154b5e5f41SBart Van Assche pr_warn("bad comp_vector parameter '%s'\n", p); 32164b5e5f41SBart Van Assche goto out; 32174b5e5f41SBart Van Assche } 32184b5e5f41SBart Van Assche target->comp_vector = token; 32194b5e5f41SBart Van Assche break; 32204b5e5f41SBart Van Assche 32217bb312e4SVu Pham case SRP_OPT_TL_RETRY_COUNT: 32227bb312e4SVu Pham if (match_int(args, &token) || token < 2 || token > 7) { 32237bb312e4SVu Pham pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", 32247bb312e4SVu Pham p); 32257bb312e4SVu Pham goto out; 32267bb312e4SVu Pham } 32277bb312e4SVu Pham target->tl_retry_count = token; 32287bb312e4SVu Pham break; 32297bb312e4SVu Pham 3230aef9ec39SRoland Dreier default: 3231e0bda7d8SBart Van Assche pr_warn("unknown parameter or missing value '%s' in target creation request\n", 3232e0bda7d8SBart Van Assche p); 3233aef9ec39SRoland Dreier goto out; 3234aef9ec39SRoland Dreier } 3235aef9ec39SRoland Dreier } 3236aef9ec39SRoland Dreier 3237aef9ec39SRoland Dreier if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 3238aef9ec39SRoland Dreier ret = 0; 3239aef9ec39SRoland Dreier else 3240aef9ec39SRoland Dreier for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 3241aef9ec39SRoland Dreier if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 3242aef9ec39SRoland Dreier !(srp_opt_tokens[i].token & opt_mask)) 3243e0bda7d8SBart Van Assche pr_warn("target creation request is missing parameter '%s'\n", 3244aef9ec39SRoland Dreier srp_opt_tokens[i].pattern); 3245aef9ec39SRoland Dreier 32464d73f95fSBart Van Assche if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue 32474d73f95fSBart Van Assche && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 32484d73f95fSBart Van Assche pr_warn("cmd_per_lun = %d > queue_size = %d\n", 32494d73f95fSBart Van Assche target->scsi_host->cmd_per_lun, 32504d73f95fSBart Van Assche target->scsi_host->can_queue); 32514d73f95fSBart Van Assche 3252aef9ec39SRoland Dreier out: 3253aef9ec39SRoland Dreier kfree(options); 3254aef9ec39SRoland Dreier return ret; 3255aef9ec39SRoland Dreier } 3256aef9ec39SRoland Dreier 3257ee959b00STony Jones static ssize_t srp_create_target(struct device *dev, 3258ee959b00STony Jones struct device_attribute *attr, 3259aef9ec39SRoland Dreier const char *buf, size_t count) 3260aef9ec39SRoland Dreier { 3261aef9ec39SRoland Dreier struct srp_host *host = 3262ee959b00STony Jones container_of(dev, struct srp_host, dev); 3263aef9ec39SRoland Dreier struct Scsi_Host *target_host; 3264aef9ec39SRoland Dreier struct srp_target_port *target; 3265509c07bcSBart Van Assche struct srp_rdma_ch *ch; 3266d1b4289eSBart Van Assche struct srp_device *srp_dev = host->srp_dev; 3267d1b4289eSBart Van Assche struct ib_device *ibdev = srp_dev->dev; 3268d92c0da7SBart Van Assche int ret, node_idx, node, cpu, i; 3269509c5f33SBart Van Assche unsigned int max_sectors_per_mr, mr_per_cmd = 0; 3270d92c0da7SBart Van Assche bool multich = false; 3271aef9ec39SRoland Dreier 3272aef9ec39SRoland Dreier target_host = scsi_host_alloc(&srp_template, 3273aef9ec39SRoland Dreier sizeof (struct srp_target_port)); 3274aef9ec39SRoland Dreier if (!target_host) 3275aef9ec39SRoland Dreier return -ENOMEM; 3276aef9ec39SRoland Dreier 32773236822bSFUJITA Tomonori target_host->transportt = ib_srp_transport_template; 3278fd1b6c4aSBart Van Assche target_host->max_channel = 0; 3279fd1b6c4aSBart Van Assche target_host->max_id = 1; 3280985aa495SBart Van Assche target_host->max_lun = -1LL; 32813c8edf0eSArne Redlich target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 32825f068992SRoland Dreier 3283aef9ec39SRoland Dreier target = host_to_target(target_host); 3284aef9ec39SRoland Dreier 32850c0450dbSRamachandra K target->io_class = SRP_REV16A_IB_IO_CLASS; 3286aef9ec39SRoland Dreier target->scsi_host = target_host; 3287aef9ec39SRoland Dreier target->srp_host = host; 32885f071777SChristoph Hellwig target->pd = host->srp_dev->pd; 3289e6bf5f48SJason Gunthorpe target->lkey = host->srp_dev->pd->local_dma_lkey; 329049248644SDavid Dillow target->cmd_sg_cnt = cmd_sg_entries; 3291c07d424dSDavid Dillow target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 3292c07d424dSDavid Dillow target->allow_ext_sg = allow_ext_sg; 32937bb312e4SVu Pham target->tl_retry_count = 7; 32944d73f95fSBart Van Assche target->queue_size = SRP_DEFAULT_QUEUE_SIZE; 3295aef9ec39SRoland Dreier 329634aa654eSBart Van Assche /* 329734aa654eSBart Van Assche * Avoid that the SCSI host can be removed by srp_remove_target() 329834aa654eSBart Van Assche * before this function returns. 329934aa654eSBart Van Assche */ 330034aa654eSBart Van Assche scsi_host_get(target->scsi_host); 330134aa654eSBart Van Assche 33024fa354c9SBart Van Assche ret = mutex_lock_interruptible(&host->add_target_mutex); 33034fa354c9SBart Van Assche if (ret < 0) 33044fa354c9SBart Van Assche goto put; 33052d7091bcSBart Van Assche 3306aef9ec39SRoland Dreier ret = srp_parse_options(buf, target); 3307aef9ec39SRoland Dreier if (ret) 3308fb49c8bbSBart Van Assche goto out; 3309aef9ec39SRoland Dreier 33104d73f95fSBart Van Assche target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; 33114d73f95fSBart Van Assche 331296fc248aSBart Van Assche if (!srp_conn_unique(target->srp_host, target)) { 331396fc248aSBart Van Assche shost_printk(KERN_INFO, target->scsi_host, 331496fc248aSBart Van Assche PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", 331596fc248aSBart Van Assche be64_to_cpu(target->id_ext), 331696fc248aSBart Van Assche be64_to_cpu(target->ioc_guid), 331796fc248aSBart Van Assche be64_to_cpu(target->initiator_ext)); 331896fc248aSBart Van Assche ret = -EEXIST; 3319fb49c8bbSBart Van Assche goto out; 332096fc248aSBart Van Assche } 332196fc248aSBart Van Assche 33225cfb1782SBart Van Assche if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && 3323c07d424dSDavid Dillow target->cmd_sg_cnt < target->sg_tablesize) { 33245cfb1782SBart Van Assche pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); 3325c07d424dSDavid Dillow target->sg_tablesize = target->cmd_sg_cnt; 3326c07d424dSDavid Dillow } 3327c07d424dSDavid Dillow 3328509c5f33SBart Van Assche if (srp_dev->use_fast_reg || srp_dev->use_fmr) { 3329509c5f33SBart Van Assche /* 3330509c5f33SBart Van Assche * FR and FMR can only map one HCA page per entry. If the 3331509c5f33SBart Van Assche * start address is not aligned on a HCA page boundary two 3332509c5f33SBart Van Assche * entries will be used for the head and the tail although 3333509c5f33SBart Van Assche * these two entries combined contain at most one HCA page of 3334509c5f33SBart Van Assche * data. Hence the "+ 1" in the calculation below. 3335509c5f33SBart Van Assche * 3336509c5f33SBart Van Assche * The indirect data buffer descriptor is contiguous so the 3337509c5f33SBart Van Assche * memory for that buffer will only be registered if 3338509c5f33SBart Van Assche * register_always is true. Hence add one to mr_per_cmd if 3339509c5f33SBart Van Assche * register_always has been set. 3340509c5f33SBart Van Assche */ 3341509c5f33SBart Van Assche max_sectors_per_mr = srp_dev->max_pages_per_mr << 3342509c5f33SBart Van Assche (ilog2(srp_dev->mr_page_size) - 9); 3343509c5f33SBart Van Assche mr_per_cmd = register_always + 3344509c5f33SBart Van Assche (target->scsi_host->max_sectors + 1 + 3345509c5f33SBart Van Assche max_sectors_per_mr - 1) / max_sectors_per_mr; 3346509c5f33SBart Van Assche pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n", 3347509c5f33SBart Van Assche target->scsi_host->max_sectors, 3348509c5f33SBart Van Assche srp_dev->max_pages_per_mr, srp_dev->mr_page_size, 3349509c5f33SBart Van Assche max_sectors_per_mr, mr_per_cmd); 3350509c5f33SBart Van Assche } 3351509c5f33SBart Van Assche 3352c07d424dSDavid Dillow target_host->sg_tablesize = target->sg_tablesize; 3353509c5f33SBart Van Assche target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd; 3354509c5f33SBart Van Assche target->mr_per_cmd = mr_per_cmd; 3355c07d424dSDavid Dillow target->indirect_size = target->sg_tablesize * 3356c07d424dSDavid Dillow sizeof (struct srp_direct_buf); 335749248644SDavid Dillow target->max_iu_len = sizeof (struct srp_cmd) + 335849248644SDavid Dillow sizeof (struct srp_indirect_buf) + 335949248644SDavid Dillow target->cmd_sg_cnt * sizeof (struct srp_direct_buf); 336049248644SDavid Dillow 3361c1120f89SBart Van Assche INIT_WORK(&target->tl_err_work, srp_tl_err_work); 3362ef6c49d8SBart Van Assche INIT_WORK(&target->remove_work, srp_remove_work); 33638f26c9ffSDavid Dillow spin_lock_init(&target->lock); 336455ee3ab2SMatan Barak ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL); 33652088ca66SSagi Grimberg if (ret) 3366fb49c8bbSBart Van Assche goto out; 3367d92c0da7SBart Van Assche 3368d92c0da7SBart Van Assche ret = -ENOMEM; 3369d92c0da7SBart Van Assche target->ch_count = max_t(unsigned, num_online_nodes(), 3370d92c0da7SBart Van Assche min(ch_count ? : 3371d92c0da7SBart Van Assche min(4 * num_online_nodes(), 3372d92c0da7SBart Van Assche ibdev->num_comp_vectors), 3373d92c0da7SBart Van Assche num_online_cpus())); 3374d92c0da7SBart Van Assche target->ch = kcalloc(target->ch_count, sizeof(*target->ch), 3375d92c0da7SBart Van Assche GFP_KERNEL); 3376d92c0da7SBart Van Assche if (!target->ch) 3377fb49c8bbSBart Van Assche goto out; 3378d92c0da7SBart Van Assche 3379d92c0da7SBart Van Assche node_idx = 0; 3380d92c0da7SBart Van Assche for_each_online_node(node) { 3381d92c0da7SBart Van Assche const int ch_start = (node_idx * target->ch_count / 3382d92c0da7SBart Van Assche num_online_nodes()); 3383d92c0da7SBart Van Assche const int ch_end = ((node_idx + 1) * target->ch_count / 3384d92c0da7SBart Van Assche num_online_nodes()); 3385d92c0da7SBart Van Assche const int cv_start = (node_idx * ibdev->num_comp_vectors / 3386d92c0da7SBart Van Assche num_online_nodes() + target->comp_vector) 3387d92c0da7SBart Van Assche % ibdev->num_comp_vectors; 3388d92c0da7SBart Van Assche const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors / 3389d92c0da7SBart Van Assche num_online_nodes() + target->comp_vector) 3390d92c0da7SBart Van Assche % ibdev->num_comp_vectors; 3391d92c0da7SBart Van Assche int cpu_idx = 0; 3392d92c0da7SBart Van Assche 3393d92c0da7SBart Van Assche for_each_online_cpu(cpu) { 3394d92c0da7SBart Van Assche if (cpu_to_node(cpu) != node) 3395d92c0da7SBart Van Assche continue; 3396d92c0da7SBart Van Assche if (ch_start + cpu_idx >= ch_end) 3397d92c0da7SBart Van Assche continue; 3398d92c0da7SBart Van Assche ch = &target->ch[ch_start + cpu_idx]; 3399d92c0da7SBart Van Assche ch->target = target; 3400d92c0da7SBart Van Assche ch->comp_vector = cv_start == cv_end ? cv_start : 3401d92c0da7SBart Van Assche cv_start + cpu_idx % (cv_end - cv_start); 3402d92c0da7SBart Van Assche spin_lock_init(&ch->lock); 3403d92c0da7SBart Van Assche INIT_LIST_HEAD(&ch->free_tx); 3404d92c0da7SBart Van Assche ret = srp_new_cm_id(ch); 3405d92c0da7SBart Van Assche if (ret) 3406d92c0da7SBart Van Assche goto err_disconnect; 3407aef9ec39SRoland Dreier 3408509c07bcSBart Van Assche ret = srp_create_ch_ib(ch); 3409aef9ec39SRoland Dreier if (ret) 3410d92c0da7SBart Van Assche goto err_disconnect; 3411aef9ec39SRoland Dreier 3412d92c0da7SBart Van Assche ret = srp_alloc_req_data(ch); 34139fe4bcf4SDavid Dillow if (ret) 3414d92c0da7SBart Van Assche goto err_disconnect; 3415aef9ec39SRoland Dreier 3416d92c0da7SBart Van Assche ret = srp_connect_ch(ch, multich); 3417aef9ec39SRoland Dreier if (ret) { 34187aa54bd7SDavid Dillow shost_printk(KERN_ERR, target->scsi_host, 3419d92c0da7SBart Van Assche PFX "Connection %d/%d failed\n", 3420d92c0da7SBart Van Assche ch_start + cpu_idx, 3421d92c0da7SBart Van Assche target->ch_count); 3422d92c0da7SBart Van Assche if (node_idx == 0 && cpu_idx == 0) { 3423d92c0da7SBart Van Assche goto err_disconnect; 3424d92c0da7SBart Van Assche } else { 3425d92c0da7SBart Van Assche srp_free_ch_ib(target, ch); 3426d92c0da7SBart Van Assche srp_free_req_data(target, ch); 3427d92c0da7SBart Van Assche target->ch_count = ch - target->ch; 3428c257ea6fSBart Van Assche goto connected; 3429aef9ec39SRoland Dreier } 3430d92c0da7SBart Van Assche } 3431d92c0da7SBart Van Assche 3432d92c0da7SBart Van Assche multich = true; 3433d92c0da7SBart Van Assche cpu_idx++; 3434d92c0da7SBart Van Assche } 3435d92c0da7SBart Van Assche node_idx++; 3436d92c0da7SBart Van Assche } 3437d92c0da7SBart Van Assche 3438c257ea6fSBart Van Assche connected: 3439d92c0da7SBart Van Assche target->scsi_host->nr_hw_queues = target->ch_count; 3440aef9ec39SRoland Dreier 3441aef9ec39SRoland Dreier ret = srp_add_target(host, target); 3442aef9ec39SRoland Dreier if (ret) 3443aef9ec39SRoland Dreier goto err_disconnect; 3444aef9ec39SRoland Dreier 344534aa654eSBart Van Assche if (target->state != SRP_TARGET_REMOVED) { 3446e7ffde01SBart Van Assche shost_printk(KERN_DEBUG, target->scsi_host, PFX 3447e7ffde01SBart Van Assche "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", 3448e7ffde01SBart Van Assche be64_to_cpu(target->id_ext), 3449e7ffde01SBart Van Assche be64_to_cpu(target->ioc_guid), 3450747fe000SBart Van Assche be16_to_cpu(target->pkey), 3451e7ffde01SBart Van Assche be64_to_cpu(target->service_id), 3452747fe000SBart Van Assche target->sgid.raw, target->orig_dgid.raw); 345334aa654eSBart Van Assche } 3454e7ffde01SBart Van Assche 34552d7091bcSBart Van Assche ret = count; 34562d7091bcSBart Van Assche 34572d7091bcSBart Van Assche out: 34582d7091bcSBart Van Assche mutex_unlock(&host->add_target_mutex); 345934aa654eSBart Van Assche 34604fa354c9SBart Van Assche put: 346134aa654eSBart Van Assche scsi_host_put(target->scsi_host); 3462bc44bd1dSBart Van Assche if (ret < 0) 3463bc44bd1dSBart Van Assche scsi_host_put(target->scsi_host); 346434aa654eSBart Van Assche 34652d7091bcSBart Van Assche return ret; 3466aef9ec39SRoland Dreier 3467aef9ec39SRoland Dreier err_disconnect: 3468aef9ec39SRoland Dreier srp_disconnect_target(target); 3469aef9ec39SRoland Dreier 3470d92c0da7SBart Van Assche for (i = 0; i < target->ch_count; i++) { 3471d92c0da7SBart Van Assche ch = &target->ch[i]; 3472509c07bcSBart Van Assche srp_free_ch_ib(target, ch); 3473509c07bcSBart Van Assche srp_free_req_data(target, ch); 3474d92c0da7SBart Van Assche } 3475d92c0da7SBart Van Assche 3476d92c0da7SBart Van Assche kfree(target->ch); 34772d7091bcSBart Van Assche goto out; 3478aef9ec39SRoland Dreier } 3479aef9ec39SRoland Dreier 3480ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 3481aef9ec39SRoland Dreier 3482ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 3483ee959b00STony Jones char *buf) 3484aef9ec39SRoland Dreier { 3485ee959b00STony Jones struct srp_host *host = container_of(dev, struct srp_host, dev); 3486aef9ec39SRoland Dreier 348705321937SGreg Kroah-Hartman return sprintf(buf, "%s\n", host->srp_dev->dev->name); 3488aef9ec39SRoland Dreier } 3489aef9ec39SRoland Dreier 3490ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 3491aef9ec39SRoland Dreier 3492ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr, 3493ee959b00STony Jones char *buf) 3494aef9ec39SRoland Dreier { 3495ee959b00STony Jones struct srp_host *host = container_of(dev, struct srp_host, dev); 3496aef9ec39SRoland Dreier 3497aef9ec39SRoland Dreier return sprintf(buf, "%d\n", host->port); 3498aef9ec39SRoland Dreier } 3499aef9ec39SRoland Dreier 3500ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 3501aef9ec39SRoland Dreier 3502f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port) 3503aef9ec39SRoland Dreier { 3504aef9ec39SRoland Dreier struct srp_host *host; 3505aef9ec39SRoland Dreier 3506aef9ec39SRoland Dreier host = kzalloc(sizeof *host, GFP_KERNEL); 3507aef9ec39SRoland Dreier if (!host) 3508aef9ec39SRoland Dreier return NULL; 3509aef9ec39SRoland Dreier 3510aef9ec39SRoland Dreier INIT_LIST_HEAD(&host->target_list); 3511b3589fd4SMatthew Wilcox spin_lock_init(&host->target_lock); 3512aef9ec39SRoland Dreier init_completion(&host->released); 35132d7091bcSBart Van Assche mutex_init(&host->add_target_mutex); 351405321937SGreg Kroah-Hartman host->srp_dev = device; 3515aef9ec39SRoland Dreier host->port = port; 3516aef9ec39SRoland Dreier 3517ee959b00STony Jones host->dev.class = &srp_class; 3518*dee2b82aSBart Van Assche host->dev.parent = device->dev->dev.parent; 3519d927e38cSKay Sievers dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); 3520aef9ec39SRoland Dreier 3521ee959b00STony Jones if (device_register(&host->dev)) 3522f5358a17SRoland Dreier goto free_host; 3523ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_add_target)) 3524aef9ec39SRoland Dreier goto err_class; 3525ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_ibdev)) 3526aef9ec39SRoland Dreier goto err_class; 3527ee959b00STony Jones if (device_create_file(&host->dev, &dev_attr_port)) 3528aef9ec39SRoland Dreier goto err_class; 3529aef9ec39SRoland Dreier 3530aef9ec39SRoland Dreier return host; 3531aef9ec39SRoland Dreier 3532aef9ec39SRoland Dreier err_class: 3533ee959b00STony Jones device_unregister(&host->dev); 3534aef9ec39SRoland Dreier 3535f5358a17SRoland Dreier free_host: 3536aef9ec39SRoland Dreier kfree(host); 3537aef9ec39SRoland Dreier 3538aef9ec39SRoland Dreier return NULL; 3539aef9ec39SRoland Dreier } 3540aef9ec39SRoland Dreier 3541aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device) 3542aef9ec39SRoland Dreier { 3543f5358a17SRoland Dreier struct srp_device *srp_dev; 3544042dd765SBart Van Assche struct ib_device_attr *attr = &device->attrs; 3545aef9ec39SRoland Dreier struct srp_host *host; 35464139032bSHal Rosenstock int mr_page_shift, p; 354752ede08fSBart Van Assche u64 max_pages_per_mr; 35485f071777SChristoph Hellwig unsigned int flags = 0; 3549aef9ec39SRoland Dreier 3550249f0656SBart Van Assche srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); 3551f5358a17SRoland Dreier if (!srp_dev) 35524a061b28SOr Gerlitz return; 3553f5358a17SRoland Dreier 3554f5358a17SRoland Dreier /* 3555f5358a17SRoland Dreier * Use the smallest page size supported by the HCA, down to a 35568f26c9ffSDavid Dillow * minimum of 4096 bytes. We're unlikely to build large sglists 35578f26c9ffSDavid Dillow * out of smaller entries. 3558f5358a17SRoland Dreier */ 3559042dd765SBart Van Assche mr_page_shift = max(12, ffs(attr->page_size_cap) - 1); 356052ede08fSBart Van Assche srp_dev->mr_page_size = 1 << mr_page_shift; 356152ede08fSBart Van Assche srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); 3562042dd765SBart Van Assche max_pages_per_mr = attr->max_mr_size; 356352ede08fSBart Van Assche do_div(max_pages_per_mr, srp_dev->mr_page_size); 3564509c5f33SBart Van Assche pr_debug("%s: %llu / %u = %llu <> %u\n", __func__, 3565042dd765SBart Van Assche attr->max_mr_size, srp_dev->mr_page_size, 3566509c5f33SBart Van Assche max_pages_per_mr, SRP_MAX_PAGES_PER_MR); 356752ede08fSBart Van Assche srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, 356852ede08fSBart Van Assche max_pages_per_mr); 3569835ee624SBart Van Assche 3570835ee624SBart Van Assche srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && 3571835ee624SBart Van Assche device->map_phys_fmr && device->unmap_fmr); 3572042dd765SBart Van Assche srp_dev->has_fr = (attr->device_cap_flags & 3573835ee624SBart Van Assche IB_DEVICE_MEM_MGT_EXTENSIONS); 3574c222a39fSBart Van Assche if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) { 3575835ee624SBart Van Assche dev_warn(&device->dev, "neither FMR nor FR is supported\n"); 3576c222a39fSBart Van Assche } else if (!never_register && 3577042dd765SBart Van Assche attr->max_mr_size >= 2 * srp_dev->mr_page_size) { 3578835ee624SBart Van Assche srp_dev->use_fast_reg = (srp_dev->has_fr && 3579835ee624SBart Van Assche (!srp_dev->has_fmr || prefer_fr)); 3580835ee624SBart Van Assche srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr; 3581509c5f33SBart Van Assche } 3582835ee624SBart Van Assche 35835f071777SChristoph Hellwig if (never_register || !register_always || 35845f071777SChristoph Hellwig (!srp_dev->has_fmr && !srp_dev->has_fr)) 35855f071777SChristoph Hellwig flags |= IB_PD_UNSAFE_GLOBAL_RKEY; 35865f071777SChristoph Hellwig 35875cfb1782SBart Van Assche if (srp_dev->use_fast_reg) { 35885cfb1782SBart Van Assche srp_dev->max_pages_per_mr = 35895cfb1782SBart Van Assche min_t(u32, srp_dev->max_pages_per_mr, 3590042dd765SBart Van Assche attr->max_fast_reg_page_list_len); 35915cfb1782SBart Van Assche } 359252ede08fSBart Van Assche srp_dev->mr_max_size = srp_dev->mr_page_size * 359352ede08fSBart Van Assche srp_dev->max_pages_per_mr; 35944a061b28SOr Gerlitz pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", 3595042dd765SBart Van Assche device->name, mr_page_shift, attr->max_mr_size, 3596042dd765SBart Van Assche attr->max_fast_reg_page_list_len, 359752ede08fSBart Van Assche srp_dev->max_pages_per_mr, srp_dev->mr_max_size); 3598f5358a17SRoland Dreier 3599f5358a17SRoland Dreier INIT_LIST_HEAD(&srp_dev->dev_list); 3600f5358a17SRoland Dreier 3601f5358a17SRoland Dreier srp_dev->dev = device; 36025f071777SChristoph Hellwig srp_dev->pd = ib_alloc_pd(device, flags); 3603f5358a17SRoland Dreier if (IS_ERR(srp_dev->pd)) 3604f5358a17SRoland Dreier goto free_dev; 3605f5358a17SRoland Dreier 3606f5358a17SRoland Dreier 36074139032bSHal Rosenstock for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 3608f5358a17SRoland Dreier host = srp_add_port(srp_dev, p); 3609aef9ec39SRoland Dreier if (host) 3610f5358a17SRoland Dreier list_add_tail(&host->list, &srp_dev->dev_list); 3611aef9ec39SRoland Dreier } 3612aef9ec39SRoland Dreier 3613f5358a17SRoland Dreier ib_set_client_data(device, &srp_client, srp_dev); 36144a061b28SOr Gerlitz return; 3615f5358a17SRoland Dreier 3616f5358a17SRoland Dreier free_dev: 3617f5358a17SRoland Dreier kfree(srp_dev); 3618aef9ec39SRoland Dreier } 3619aef9ec39SRoland Dreier 36207c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data) 3621aef9ec39SRoland Dreier { 3622f5358a17SRoland Dreier struct srp_device *srp_dev; 3623aef9ec39SRoland Dreier struct srp_host *host, *tmp_host; 3624ef6c49d8SBart Van Assche struct srp_target_port *target; 3625aef9ec39SRoland Dreier 36267c1eb45aSHaggai Eran srp_dev = client_data; 36271fe0cb84SDotan Barak if (!srp_dev) 36281fe0cb84SDotan Barak return; 3629aef9ec39SRoland Dreier 3630f5358a17SRoland Dreier list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 3631ee959b00STony Jones device_unregister(&host->dev); 3632aef9ec39SRoland Dreier /* 3633aef9ec39SRoland Dreier * Wait for the sysfs entry to go away, so that no new 3634aef9ec39SRoland Dreier * target ports can be created. 3635aef9ec39SRoland Dreier */ 3636aef9ec39SRoland Dreier wait_for_completion(&host->released); 3637aef9ec39SRoland Dreier 3638aef9ec39SRoland Dreier /* 3639ef6c49d8SBart Van Assche * Remove all target ports. 3640aef9ec39SRoland Dreier */ 3641b3589fd4SMatthew Wilcox spin_lock(&host->target_lock); 3642ef6c49d8SBart Van Assche list_for_each_entry(target, &host->target_list, list) 3643ef6c49d8SBart Van Assche srp_queue_remove_work(target); 3644b3589fd4SMatthew Wilcox spin_unlock(&host->target_lock); 3645aef9ec39SRoland Dreier 3646aef9ec39SRoland Dreier /* 3647bcc05910SBart Van Assche * Wait for tl_err and target port removal tasks. 3648aef9ec39SRoland Dreier */ 3649ef6c49d8SBart Van Assche flush_workqueue(system_long_wq); 3650bcc05910SBart Van Assche flush_workqueue(srp_remove_wq); 3651aef9ec39SRoland Dreier 3652aef9ec39SRoland Dreier kfree(host); 3653aef9ec39SRoland Dreier } 3654aef9ec39SRoland Dreier 3655f5358a17SRoland Dreier ib_dealloc_pd(srp_dev->pd); 3656f5358a17SRoland Dreier 3657f5358a17SRoland Dreier kfree(srp_dev); 3658aef9ec39SRoland Dreier } 3659aef9ec39SRoland Dreier 36603236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = { 3661ed9b2264SBart Van Assche .has_rport_state = true, 3662ed9b2264SBart Van Assche .reset_timer_if_blocked = true, 3663a95cadb9SBart Van Assche .reconnect_delay = &srp_reconnect_delay, 3664ed9b2264SBart Van Assche .fast_io_fail_tmo = &srp_fast_io_fail_tmo, 3665ed9b2264SBart Van Assche .dev_loss_tmo = &srp_dev_loss_tmo, 3666ed9b2264SBart Van Assche .reconnect = srp_rport_reconnect, 3667dc1bdbd9SBart Van Assche .rport_delete = srp_rport_delete, 3668ed9b2264SBart Van Assche .terminate_rport_io = srp_terminate_io, 36693236822bSFUJITA Tomonori }; 36703236822bSFUJITA Tomonori 3671aef9ec39SRoland Dreier static int __init srp_init_module(void) 3672aef9ec39SRoland Dreier { 3673aef9ec39SRoland Dreier int ret; 3674aef9ec39SRoland Dreier 367549248644SDavid Dillow if (srp_sg_tablesize) { 3676e0bda7d8SBart Van Assche pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); 367749248644SDavid Dillow if (!cmd_sg_entries) 367849248644SDavid Dillow cmd_sg_entries = srp_sg_tablesize; 367949248644SDavid Dillow } 368049248644SDavid Dillow 368149248644SDavid Dillow if (!cmd_sg_entries) 368249248644SDavid Dillow cmd_sg_entries = SRP_DEF_SG_TABLESIZE; 368349248644SDavid Dillow 368449248644SDavid Dillow if (cmd_sg_entries > 255) { 3685e0bda7d8SBart Van Assche pr_warn("Clamping cmd_sg_entries to 255\n"); 368649248644SDavid Dillow cmd_sg_entries = 255; 36871e89a194SDavid Dillow } 36881e89a194SDavid Dillow 3689c07d424dSDavid Dillow if (!indirect_sg_entries) 3690c07d424dSDavid Dillow indirect_sg_entries = cmd_sg_entries; 3691c07d424dSDavid Dillow else if (indirect_sg_entries < cmd_sg_entries) { 3692e0bda7d8SBart Van Assche pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", 3693e0bda7d8SBart Van Assche cmd_sg_entries); 3694c07d424dSDavid Dillow indirect_sg_entries = cmd_sg_entries; 3695c07d424dSDavid Dillow } 3696c07d424dSDavid Dillow 3697bcc05910SBart Van Assche srp_remove_wq = create_workqueue("srp_remove"); 3698da05be29SWei Yongjun if (!srp_remove_wq) { 3699da05be29SWei Yongjun ret = -ENOMEM; 3700bcc05910SBart Van Assche goto out; 3701bcc05910SBart Van Assche } 3702bcc05910SBart Van Assche 3703bcc05910SBart Van Assche ret = -ENOMEM; 37043236822bSFUJITA Tomonori ib_srp_transport_template = 37053236822bSFUJITA Tomonori srp_attach_transport(&ib_srp_transport_functions); 37063236822bSFUJITA Tomonori if (!ib_srp_transport_template) 3707bcc05910SBart Van Assche goto destroy_wq; 37083236822bSFUJITA Tomonori 3709aef9ec39SRoland Dreier ret = class_register(&srp_class); 3710aef9ec39SRoland Dreier if (ret) { 3711e0bda7d8SBart Van Assche pr_err("couldn't register class infiniband_srp\n"); 3712bcc05910SBart Van Assche goto release_tr; 3713aef9ec39SRoland Dreier } 3714aef9ec39SRoland Dreier 3715c1a0b23bSMichael S. Tsirkin ib_sa_register_client(&srp_sa_client); 3716c1a0b23bSMichael S. Tsirkin 3717aef9ec39SRoland Dreier ret = ib_register_client(&srp_client); 3718aef9ec39SRoland Dreier if (ret) { 3719e0bda7d8SBart Van Assche pr_err("couldn't register IB client\n"); 3720bcc05910SBart Van Assche goto unreg_sa; 3721aef9ec39SRoland Dreier } 3722aef9ec39SRoland Dreier 3723bcc05910SBart Van Assche out: 3724bcc05910SBart Van Assche return ret; 3725bcc05910SBart Van Assche 3726bcc05910SBart Van Assche unreg_sa: 3727bcc05910SBart Van Assche ib_sa_unregister_client(&srp_sa_client); 3728bcc05910SBart Van Assche class_unregister(&srp_class); 3729bcc05910SBart Van Assche 3730bcc05910SBart Van Assche release_tr: 3731bcc05910SBart Van Assche srp_release_transport(ib_srp_transport_template); 3732bcc05910SBart Van Assche 3733bcc05910SBart Van Assche destroy_wq: 3734bcc05910SBart Van Assche destroy_workqueue(srp_remove_wq); 3735bcc05910SBart Van Assche goto out; 3736aef9ec39SRoland Dreier } 3737aef9ec39SRoland Dreier 3738aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void) 3739aef9ec39SRoland Dreier { 3740aef9ec39SRoland Dreier ib_unregister_client(&srp_client); 3741c1a0b23bSMichael S. Tsirkin ib_sa_unregister_client(&srp_sa_client); 3742aef9ec39SRoland Dreier class_unregister(&srp_class); 37433236822bSFUJITA Tomonori srp_release_transport(ib_srp_transport_template); 3744bcc05910SBart Van Assche destroy_workqueue(srp_remove_wq); 3745aef9ec39SRoland Dreier } 3746aef9ec39SRoland Dreier 3747aef9ec39SRoland Dreier module_init(srp_init_module); 3748aef9ec39SRoland Dreier module_exit(srp_cleanup_module); 3749