xref: /linux/drivers/infiniband/ulp/srp/ib_srp.c (revision c8b09f6fb67df7fc1b51ced1037fa9b677428149)
1aef9ec39SRoland Dreier /*
2aef9ec39SRoland Dreier  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3aef9ec39SRoland Dreier  *
4aef9ec39SRoland Dreier  * This software is available to you under a choice of one of two
5aef9ec39SRoland Dreier  * licenses.  You may choose to be licensed under the terms of the GNU
6aef9ec39SRoland Dreier  * General Public License (GPL) Version 2, available from the file
7aef9ec39SRoland Dreier  * COPYING in the main directory of this source tree, or the
8aef9ec39SRoland Dreier  * OpenIB.org BSD license below:
9aef9ec39SRoland Dreier  *
10aef9ec39SRoland Dreier  *     Redistribution and use in source and binary forms, with or
11aef9ec39SRoland Dreier  *     without modification, are permitted provided that the following
12aef9ec39SRoland Dreier  *     conditions are met:
13aef9ec39SRoland Dreier  *
14aef9ec39SRoland Dreier  *      - Redistributions of source code must retain the above
15aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
16aef9ec39SRoland Dreier  *        disclaimer.
17aef9ec39SRoland Dreier  *
18aef9ec39SRoland Dreier  *      - Redistributions in binary form must reproduce the above
19aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
20aef9ec39SRoland Dreier  *        disclaimer in the documentation and/or other materials
21aef9ec39SRoland Dreier  *        provided with the distribution.
22aef9ec39SRoland Dreier  *
23aef9ec39SRoland Dreier  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24aef9ec39SRoland Dreier  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25aef9ec39SRoland Dreier  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26aef9ec39SRoland Dreier  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27aef9ec39SRoland Dreier  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28aef9ec39SRoland Dreier  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29aef9ec39SRoland Dreier  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30aef9ec39SRoland Dreier  * SOFTWARE.
31aef9ec39SRoland Dreier  */
32aef9ec39SRoland Dreier 
33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34e0bda7d8SBart Van Assche 
35aef9ec39SRoland Dreier #include <linux/module.h>
36aef9ec39SRoland Dreier #include <linux/init.h>
37aef9ec39SRoland Dreier #include <linux/slab.h>
38aef9ec39SRoland Dreier #include <linux/err.h>
39aef9ec39SRoland Dreier #include <linux/string.h>
40aef9ec39SRoland Dreier #include <linux/parser.h>
41aef9ec39SRoland Dreier #include <linux/random.h>
42de25968cSTim Schmielau #include <linux/jiffies.h>
43aef9ec39SRoland Dreier 
4460063497SArun Sharma #include <linux/atomic.h>
45aef9ec39SRoland Dreier 
46aef9ec39SRoland Dreier #include <scsi/scsi.h>
47aef9ec39SRoland Dreier #include <scsi/scsi_device.h>
48aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h>
4971444b97SJack Wang #include <scsi/scsi_tcq.h>
50aef9ec39SRoland Dreier #include <scsi/srp.h>
513236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h>
52aef9ec39SRoland Dreier 
53aef9ec39SRoland Dreier #include "ib_srp.h"
54aef9ec39SRoland Dreier 
55aef9ec39SRoland Dreier #define DRV_NAME	"ib_srp"
56aef9ec39SRoland Dreier #define PFX		DRV_NAME ": "
57e8ca4135SVu Pham #define DRV_VERSION	"1.0"
58e8ca4135SVu Pham #define DRV_RELDATE	"July 1, 2013"
59aef9ec39SRoland Dreier 
60aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier");
61aef9ec39SRoland Dreier MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62aef9ec39SRoland Dreier 		   "v" DRV_VERSION " (" DRV_RELDATE ")");
63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL");
64aef9ec39SRoland Dreier 
6549248644SDavid Dillow static unsigned int srp_sg_tablesize;
6649248644SDavid Dillow static unsigned int cmd_sg_entries;
67c07d424dSDavid Dillow static unsigned int indirect_sg_entries;
68c07d424dSDavid Dillow static bool allow_ext_sg;
695cfb1782SBart Van Assche static bool prefer_fr;
70b1b8854dSBart Van Assche static bool register_always;
71aef9ec39SRoland Dreier static int topspin_workarounds = 1;
72aef9ec39SRoland Dreier 
7349248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444);
7449248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
7549248644SDavid Dillow 
7649248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444);
7749248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries,
7849248644SDavid Dillow 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
7949248644SDavid Dillow 
80c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444);
81c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries,
82c07d424dSDavid Dillow 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
83c07d424dSDavid Dillow 
84c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444);
85c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg,
86c07d424dSDavid Dillow 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
87c07d424dSDavid Dillow 
88aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444);
89aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds,
90aef9ec39SRoland Dreier 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
91aef9ec39SRoland Dreier 
925cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444);
935cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr,
945cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported");
955cfb1782SBart Van Assche 
96b1b8854dSBart Van Assche module_param(register_always, bool, 0444);
97b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always,
98b1b8854dSBart Van Assche 		 "Use memory registration even for contiguous memory regions");
99b1b8854dSBart Van Assche 
100ed9b2264SBart Van Assche static struct kernel_param_ops srp_tmo_ops;
101ed9b2264SBart Van Assche 
102a95cadb9SBart Van Assche static int srp_reconnect_delay = 10;
103a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104a95cadb9SBart Van Assche 		S_IRUGO | S_IWUSR);
105a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106a95cadb9SBart Van Assche 
107ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15;
108ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
110ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo,
111ed9b2264SBart Van Assche 		 "Number of seconds between the observation of a transport"
112ed9b2264SBart Van Assche 		 " layer error and failing all I/O. \"off\" means that this"
113ed9b2264SBart Van Assche 		 " functionality is disabled.");
114ed9b2264SBart Van Assche 
115a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600;
116ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
118ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo,
119ed9b2264SBart Van Assche 		 "Maximum number of seconds that the SRP transport should"
120ed9b2264SBart Van Assche 		 " insulate transport layer errors. After this time has been"
121ed9b2264SBart Van Assche 		 " exceeded the SCSI host is removed. Should be"
122ed9b2264SBart Van Assche 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123ed9b2264SBart Van Assche 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
124ed9b2264SBart Van Assche 		 " this functionality is disabled.");
125ed9b2264SBart Van Assche 
126aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device);
127aef9ec39SRoland Dreier static void srp_remove_one(struct ib_device *device);
1289c03dc9fSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
1299c03dc9fSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
130aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
131aef9ec39SRoland Dreier 
1323236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template;
133bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq;
1343236822bSFUJITA Tomonori 
135aef9ec39SRoland Dreier static struct ib_client srp_client = {
136aef9ec39SRoland Dreier 	.name   = "srp",
137aef9ec39SRoland Dreier 	.add    = srp_add_one,
138aef9ec39SRoland Dreier 	.remove = srp_remove_one
139aef9ec39SRoland Dreier };
140aef9ec39SRoland Dreier 
141c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client;
142c1a0b23bSMichael S. Tsirkin 
143ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
144ed9b2264SBart Van Assche {
145ed9b2264SBart Van Assche 	int tmo = *(int *)kp->arg;
146ed9b2264SBart Van Assche 
147ed9b2264SBart Van Assche 	if (tmo >= 0)
148ed9b2264SBart Van Assche 		return sprintf(buffer, "%d", tmo);
149ed9b2264SBart Van Assche 	else
150ed9b2264SBart Van Assche 		return sprintf(buffer, "off");
151ed9b2264SBart Van Assche }
152ed9b2264SBart Van Assche 
153ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp)
154ed9b2264SBart Van Assche {
155ed9b2264SBart Van Assche 	int tmo, res;
156ed9b2264SBart Van Assche 
157ed9b2264SBart Van Assche 	if (strncmp(val, "off", 3) != 0) {
158ed9b2264SBart Van Assche 		res = kstrtoint(val, 0, &tmo);
159ed9b2264SBart Van Assche 		if (res)
160ed9b2264SBart Van Assche 			goto out;
161ed9b2264SBart Van Assche 	} else {
162ed9b2264SBart Van Assche 		tmo = -1;
163ed9b2264SBart Van Assche 	}
164a95cadb9SBart Van Assche 	if (kp->arg == &srp_reconnect_delay)
165a95cadb9SBart Van Assche 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
166a95cadb9SBart Van Assche 				    srp_dev_loss_tmo);
167a95cadb9SBart Van Assche 	else if (kp->arg == &srp_fast_io_fail_tmo)
168a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
169ed9b2264SBart Van Assche 	else
170a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
171a95cadb9SBart Van Assche 				    tmo);
172ed9b2264SBart Van Assche 	if (res)
173ed9b2264SBart Van Assche 		goto out;
174ed9b2264SBart Van Assche 	*(int *)kp->arg = tmo;
175ed9b2264SBart Van Assche 
176ed9b2264SBart Van Assche out:
177ed9b2264SBart Van Assche 	return res;
178ed9b2264SBart Van Assche }
179ed9b2264SBart Van Assche 
180ed9b2264SBart Van Assche static struct kernel_param_ops srp_tmo_ops = {
181ed9b2264SBart Van Assche 	.get = srp_tmo_get,
182ed9b2264SBart Van Assche 	.set = srp_tmo_set,
183ed9b2264SBart Van Assche };
184ed9b2264SBart Van Assche 
185aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
186aef9ec39SRoland Dreier {
187aef9ec39SRoland Dreier 	return (struct srp_target_port *) host->hostdata;
188aef9ec39SRoland Dreier }
189aef9ec39SRoland Dreier 
190aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host)
191aef9ec39SRoland Dreier {
192aef9ec39SRoland Dreier 	return host_to_target(host)->target_name;
193aef9ec39SRoland Dreier }
194aef9ec39SRoland Dreier 
1955d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target)
1965d7cbfd6SRoland Dreier {
1975d7cbfd6SRoland Dreier 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
1983d1ff48dSRaghava Kondapalli 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
1995d7cbfd6SRoland Dreier 
2005d7cbfd6SRoland Dreier 	return topspin_workarounds &&
2013d1ff48dSRaghava Kondapalli 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
2023d1ff48dSRaghava Kondapalli 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
2035d7cbfd6SRoland Dreier }
2045d7cbfd6SRoland Dreier 
205aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
206aef9ec39SRoland Dreier 				   gfp_t gfp_mask,
207aef9ec39SRoland Dreier 				   enum dma_data_direction direction)
208aef9ec39SRoland Dreier {
209aef9ec39SRoland Dreier 	struct srp_iu *iu;
210aef9ec39SRoland Dreier 
211aef9ec39SRoland Dreier 	iu = kmalloc(sizeof *iu, gfp_mask);
212aef9ec39SRoland Dreier 	if (!iu)
213aef9ec39SRoland Dreier 		goto out;
214aef9ec39SRoland Dreier 
215aef9ec39SRoland Dreier 	iu->buf = kzalloc(size, gfp_mask);
216aef9ec39SRoland Dreier 	if (!iu->buf)
217aef9ec39SRoland Dreier 		goto out_free_iu;
218aef9ec39SRoland Dreier 
21905321937SGreg Kroah-Hartman 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
22005321937SGreg Kroah-Hartman 				    direction);
22105321937SGreg Kroah-Hartman 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
222aef9ec39SRoland Dreier 		goto out_free_buf;
223aef9ec39SRoland Dreier 
224aef9ec39SRoland Dreier 	iu->size      = size;
225aef9ec39SRoland Dreier 	iu->direction = direction;
226aef9ec39SRoland Dreier 
227aef9ec39SRoland Dreier 	return iu;
228aef9ec39SRoland Dreier 
229aef9ec39SRoland Dreier out_free_buf:
230aef9ec39SRoland Dreier 	kfree(iu->buf);
231aef9ec39SRoland Dreier out_free_iu:
232aef9ec39SRoland Dreier 	kfree(iu);
233aef9ec39SRoland Dreier out:
234aef9ec39SRoland Dreier 	return NULL;
235aef9ec39SRoland Dreier }
236aef9ec39SRoland Dreier 
237aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
238aef9ec39SRoland Dreier {
239aef9ec39SRoland Dreier 	if (!iu)
240aef9ec39SRoland Dreier 		return;
241aef9ec39SRoland Dreier 
24205321937SGreg Kroah-Hartman 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
24305321937SGreg Kroah-Hartman 			    iu->direction);
244aef9ec39SRoland Dreier 	kfree(iu->buf);
245aef9ec39SRoland Dreier 	kfree(iu);
246aef9ec39SRoland Dreier }
247aef9ec39SRoland Dreier 
248aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context)
249aef9ec39SRoland Dreier {
250e0bda7d8SBart Van Assche 	pr_debug("QP event %d\n", event->event);
251aef9ec39SRoland Dreier }
252aef9ec39SRoland Dreier 
253aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target,
254aef9ec39SRoland Dreier 		       struct ib_qp *qp)
255aef9ec39SRoland Dreier {
256aef9ec39SRoland Dreier 	struct ib_qp_attr *attr;
257aef9ec39SRoland Dreier 	int ret;
258aef9ec39SRoland Dreier 
259aef9ec39SRoland Dreier 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
260aef9ec39SRoland Dreier 	if (!attr)
261aef9ec39SRoland Dreier 		return -ENOMEM;
262aef9ec39SRoland Dreier 
263969a60f9SRoland Dreier 	ret = ib_find_pkey(target->srp_host->srp_dev->dev,
264aef9ec39SRoland Dreier 			   target->srp_host->port,
265aef9ec39SRoland Dreier 			   be16_to_cpu(target->path.pkey),
266aef9ec39SRoland Dreier 			   &attr->pkey_index);
267aef9ec39SRoland Dreier 	if (ret)
268aef9ec39SRoland Dreier 		goto out;
269aef9ec39SRoland Dreier 
270aef9ec39SRoland Dreier 	attr->qp_state        = IB_QPS_INIT;
271aef9ec39SRoland Dreier 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
272aef9ec39SRoland Dreier 				    IB_ACCESS_REMOTE_WRITE);
273aef9ec39SRoland Dreier 	attr->port_num        = target->srp_host->port;
274aef9ec39SRoland Dreier 
275aef9ec39SRoland Dreier 	ret = ib_modify_qp(qp, attr,
276aef9ec39SRoland Dreier 			   IB_QP_STATE		|
277aef9ec39SRoland Dreier 			   IB_QP_PKEY_INDEX	|
278aef9ec39SRoland Dreier 			   IB_QP_ACCESS_FLAGS	|
279aef9ec39SRoland Dreier 			   IB_QP_PORT);
280aef9ec39SRoland Dreier 
281aef9ec39SRoland Dreier out:
282aef9ec39SRoland Dreier 	kfree(attr);
283aef9ec39SRoland Dreier 	return ret;
284aef9ec39SRoland Dreier }
285aef9ec39SRoland Dreier 
2869fe4bcf4SDavid Dillow static int srp_new_cm_id(struct srp_target_port *target)
2879fe4bcf4SDavid Dillow {
2889fe4bcf4SDavid Dillow 	struct ib_cm_id *new_cm_id;
2899fe4bcf4SDavid Dillow 
29005321937SGreg Kroah-Hartman 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
2919fe4bcf4SDavid Dillow 				    srp_cm_handler, target);
2929fe4bcf4SDavid Dillow 	if (IS_ERR(new_cm_id))
2939fe4bcf4SDavid Dillow 		return PTR_ERR(new_cm_id);
2949fe4bcf4SDavid Dillow 
2959fe4bcf4SDavid Dillow 	if (target->cm_id)
2969fe4bcf4SDavid Dillow 		ib_destroy_cm_id(target->cm_id);
2979fe4bcf4SDavid Dillow 	target->cm_id = new_cm_id;
2989fe4bcf4SDavid Dillow 
2999fe4bcf4SDavid Dillow 	return 0;
3009fe4bcf4SDavid Dillow }
3019fe4bcf4SDavid Dillow 
302d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
303d1b4289eSBart Van Assche {
304d1b4289eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
305d1b4289eSBart Van Assche 	struct ib_fmr_pool_param fmr_param;
306d1b4289eSBart Van Assche 
307d1b4289eSBart Van Assche 	memset(&fmr_param, 0, sizeof(fmr_param));
308d1b4289eSBart Van Assche 	fmr_param.pool_size	    = target->scsi_host->can_queue;
309d1b4289eSBart Van Assche 	fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
310d1b4289eSBart Van Assche 	fmr_param.cache		    = 1;
31152ede08fSBart Van Assche 	fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
31252ede08fSBart Van Assche 	fmr_param.page_shift	    = ilog2(dev->mr_page_size);
313d1b4289eSBart Van Assche 	fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
314d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_WRITE |
315d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_READ);
316d1b4289eSBart Van Assche 
317d1b4289eSBart Van Assche 	return ib_create_fmr_pool(dev->pd, &fmr_param);
318d1b4289eSBart Van Assche }
319d1b4289eSBart Van Assche 
3205cfb1782SBart Van Assche /**
3215cfb1782SBart Van Assche  * srp_destroy_fr_pool() - free the resources owned by a pool
3225cfb1782SBart Van Assche  * @pool: Fast registration pool to be destroyed.
3235cfb1782SBart Van Assche  */
3245cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
3255cfb1782SBart Van Assche {
3265cfb1782SBart Van Assche 	int i;
3275cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3285cfb1782SBart Van Assche 
3295cfb1782SBart Van Assche 	if (!pool)
3305cfb1782SBart Van Assche 		return;
3315cfb1782SBart Van Assche 
3325cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
3335cfb1782SBart Van Assche 		if (d->frpl)
3345cfb1782SBart Van Assche 			ib_free_fast_reg_page_list(d->frpl);
3355cfb1782SBart Van Assche 		if (d->mr)
3365cfb1782SBart Van Assche 			ib_dereg_mr(d->mr);
3375cfb1782SBart Van Assche 	}
3385cfb1782SBart Van Assche 	kfree(pool);
3395cfb1782SBart Van Assche }
3405cfb1782SBart Van Assche 
3415cfb1782SBart Van Assche /**
3425cfb1782SBart Van Assche  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
3435cfb1782SBart Van Assche  * @device:            IB device to allocate fast registration descriptors for.
3445cfb1782SBart Van Assche  * @pd:                Protection domain associated with the FR descriptors.
3455cfb1782SBart Van Assche  * @pool_size:         Number of descriptors to allocate.
3465cfb1782SBart Van Assche  * @max_page_list_len: Maximum fast registration work request page list length.
3475cfb1782SBart Van Assche  */
3485cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
3495cfb1782SBart Van Assche 					      struct ib_pd *pd, int pool_size,
3505cfb1782SBart Van Assche 					      int max_page_list_len)
3515cfb1782SBart Van Assche {
3525cfb1782SBart Van Assche 	struct srp_fr_pool *pool;
3535cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3545cfb1782SBart Van Assche 	struct ib_mr *mr;
3555cfb1782SBart Van Assche 	struct ib_fast_reg_page_list *frpl;
3565cfb1782SBart Van Assche 	int i, ret = -EINVAL;
3575cfb1782SBart Van Assche 
3585cfb1782SBart Van Assche 	if (pool_size <= 0)
3595cfb1782SBart Van Assche 		goto err;
3605cfb1782SBart Van Assche 	ret = -ENOMEM;
3615cfb1782SBart Van Assche 	pool = kzalloc(sizeof(struct srp_fr_pool) +
3625cfb1782SBart Van Assche 		       pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
3635cfb1782SBart Van Assche 	if (!pool)
3645cfb1782SBart Van Assche 		goto err;
3655cfb1782SBart Van Assche 	pool->size = pool_size;
3665cfb1782SBart Van Assche 	pool->max_page_list_len = max_page_list_len;
3675cfb1782SBart Van Assche 	spin_lock_init(&pool->lock);
3685cfb1782SBart Van Assche 	INIT_LIST_HEAD(&pool->free_list);
3695cfb1782SBart Van Assche 
3705cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
3715cfb1782SBart Van Assche 		mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
3725cfb1782SBart Van Assche 		if (IS_ERR(mr)) {
3735cfb1782SBart Van Assche 			ret = PTR_ERR(mr);
3745cfb1782SBart Van Assche 			goto destroy_pool;
3755cfb1782SBart Van Assche 		}
3765cfb1782SBart Van Assche 		d->mr = mr;
3775cfb1782SBart Van Assche 		frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
3785cfb1782SBart Van Assche 		if (IS_ERR(frpl)) {
3795cfb1782SBart Van Assche 			ret = PTR_ERR(frpl);
3805cfb1782SBart Van Assche 			goto destroy_pool;
3815cfb1782SBart Van Assche 		}
3825cfb1782SBart Van Assche 		d->frpl = frpl;
3835cfb1782SBart Van Assche 		list_add_tail(&d->entry, &pool->free_list);
3845cfb1782SBart Van Assche 	}
3855cfb1782SBart Van Assche 
3865cfb1782SBart Van Assche out:
3875cfb1782SBart Van Assche 	return pool;
3885cfb1782SBart Van Assche 
3895cfb1782SBart Van Assche destroy_pool:
3905cfb1782SBart Van Assche 	srp_destroy_fr_pool(pool);
3915cfb1782SBart Van Assche 
3925cfb1782SBart Van Assche err:
3935cfb1782SBart Van Assche 	pool = ERR_PTR(ret);
3945cfb1782SBart Van Assche 	goto out;
3955cfb1782SBart Van Assche }
3965cfb1782SBart Van Assche 
3975cfb1782SBart Van Assche /**
3985cfb1782SBart Van Assche  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
3995cfb1782SBart Van Assche  * @pool: Pool to obtain descriptor from.
4005cfb1782SBart Van Assche  */
4015cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
4025cfb1782SBart Van Assche {
4035cfb1782SBart Van Assche 	struct srp_fr_desc *d = NULL;
4045cfb1782SBart Van Assche 	unsigned long flags;
4055cfb1782SBart Van Assche 
4065cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4075cfb1782SBart Van Assche 	if (!list_empty(&pool->free_list)) {
4085cfb1782SBart Van Assche 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
4095cfb1782SBart Van Assche 		list_del(&d->entry);
4105cfb1782SBart Van Assche 	}
4115cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4125cfb1782SBart Van Assche 
4135cfb1782SBart Van Assche 	return d;
4145cfb1782SBart Van Assche }
4155cfb1782SBart Van Assche 
4165cfb1782SBart Van Assche /**
4175cfb1782SBart Van Assche  * srp_fr_pool_put() - put an FR descriptor back in the free list
4185cfb1782SBart Van Assche  * @pool: Pool the descriptor was allocated from.
4195cfb1782SBart Van Assche  * @desc: Pointer to an array of fast registration descriptor pointers.
4205cfb1782SBart Van Assche  * @n:    Number of descriptors to put back.
4215cfb1782SBart Van Assche  *
4225cfb1782SBart Van Assche  * Note: The caller must already have queued an invalidation request for
4235cfb1782SBart Van Assche  * desc->mr->rkey before calling this function.
4245cfb1782SBart Van Assche  */
4255cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
4265cfb1782SBart Van Assche 			    int n)
4275cfb1782SBart Van Assche {
4285cfb1782SBart Van Assche 	unsigned long flags;
4295cfb1782SBart Van Assche 	int i;
4305cfb1782SBart Van Assche 
4315cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4325cfb1782SBart Van Assche 	for (i = 0; i < n; i++)
4335cfb1782SBart Van Assche 		list_add(&desc[i]->entry, &pool->free_list);
4345cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4355cfb1782SBart Van Assche }
4365cfb1782SBart Van Assche 
4375cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
4385cfb1782SBart Van Assche {
4395cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
4405cfb1782SBart Van Assche 
4415cfb1782SBart Van Assche 	return srp_create_fr_pool(dev->dev, dev->pd,
4425cfb1782SBart Van Assche 				  target->scsi_host->can_queue,
4435cfb1782SBart Van Assche 				  dev->max_pages_per_mr);
4445cfb1782SBart Van Assche }
4455cfb1782SBart Van Assche 
446aef9ec39SRoland Dreier static int srp_create_target_ib(struct srp_target_port *target)
447aef9ec39SRoland Dreier {
44862154b2eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
449aef9ec39SRoland Dreier 	struct ib_qp_init_attr *init_attr;
45073aa89edSIshai Rabinovitz 	struct ib_cq *recv_cq, *send_cq;
45173aa89edSIshai Rabinovitz 	struct ib_qp *qp;
452d1b4289eSBart Van Assche 	struct ib_fmr_pool *fmr_pool = NULL;
4535cfb1782SBart Van Assche 	struct srp_fr_pool *fr_pool = NULL;
4545cfb1782SBart Van Assche 	const int m = 1 + dev->use_fast_reg;
455aef9ec39SRoland Dreier 	int ret;
456aef9ec39SRoland Dreier 
457aef9ec39SRoland Dreier 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
458aef9ec39SRoland Dreier 	if (!init_attr)
459aef9ec39SRoland Dreier 		return -ENOMEM;
460aef9ec39SRoland Dreier 
46162154b2eSBart Van Assche 	recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, target,
4624d73f95fSBart Van Assche 			       target->queue_size, target->comp_vector);
46373aa89edSIshai Rabinovitz 	if (IS_ERR(recv_cq)) {
46473aa89edSIshai Rabinovitz 		ret = PTR_ERR(recv_cq);
465da9d2f07SRoland Dreier 		goto err;
466aef9ec39SRoland Dreier 	}
467aef9ec39SRoland Dreier 
46862154b2eSBart Van Assche 	send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, target,
4695cfb1782SBart Van Assche 			       m * target->queue_size, target->comp_vector);
47073aa89edSIshai Rabinovitz 	if (IS_ERR(send_cq)) {
47173aa89edSIshai Rabinovitz 		ret = PTR_ERR(send_cq);
472da9d2f07SRoland Dreier 		goto err_recv_cq;
4739c03dc9fSBart Van Assche 	}
4749c03dc9fSBart Van Assche 
47573aa89edSIshai Rabinovitz 	ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
476aef9ec39SRoland Dreier 
477aef9ec39SRoland Dreier 	init_attr->event_handler       = srp_qp_event;
4785cfb1782SBart Van Assche 	init_attr->cap.max_send_wr     = m * target->queue_size;
4794d73f95fSBart Van Assche 	init_attr->cap.max_recv_wr     = target->queue_size;
480aef9ec39SRoland Dreier 	init_attr->cap.max_recv_sge    = 1;
481aef9ec39SRoland Dreier 	init_attr->cap.max_send_sge    = 1;
4825cfb1782SBart Van Assche 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
483aef9ec39SRoland Dreier 	init_attr->qp_type             = IB_QPT_RC;
48473aa89edSIshai Rabinovitz 	init_attr->send_cq             = send_cq;
48573aa89edSIshai Rabinovitz 	init_attr->recv_cq             = recv_cq;
486aef9ec39SRoland Dreier 
48762154b2eSBart Van Assche 	qp = ib_create_qp(dev->pd, init_attr);
48873aa89edSIshai Rabinovitz 	if (IS_ERR(qp)) {
48973aa89edSIshai Rabinovitz 		ret = PTR_ERR(qp);
490da9d2f07SRoland Dreier 		goto err_send_cq;
491aef9ec39SRoland Dreier 	}
492aef9ec39SRoland Dreier 
49373aa89edSIshai Rabinovitz 	ret = srp_init_qp(target, qp);
494da9d2f07SRoland Dreier 	if (ret)
495da9d2f07SRoland Dreier 		goto err_qp;
496aef9ec39SRoland Dreier 
4975cfb1782SBart Van Assche 	if (dev->use_fast_reg && dev->has_fr) {
4985cfb1782SBart Van Assche 		fr_pool = srp_alloc_fr_pool(target);
4995cfb1782SBart Van Assche 		if (IS_ERR(fr_pool)) {
5005cfb1782SBart Van Assche 			ret = PTR_ERR(fr_pool);
5015cfb1782SBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
5025cfb1782SBart Van Assche 				     "FR pool allocation failed (%d)\n", ret);
5035cfb1782SBart Van Assche 			goto err_qp;
5045cfb1782SBart Van Assche 		}
5055cfb1782SBart Van Assche 		if (target->fr_pool)
5065cfb1782SBart Van Assche 			srp_destroy_fr_pool(target->fr_pool);
5075cfb1782SBart Van Assche 		target->fr_pool = fr_pool;
5085cfb1782SBart Van Assche 	} else if (!dev->use_fast_reg && dev->has_fmr) {
509d1b4289eSBart Van Assche 		fmr_pool = srp_alloc_fmr_pool(target);
510d1b4289eSBart Van Assche 		if (IS_ERR(fmr_pool)) {
511d1b4289eSBart Van Assche 			ret = PTR_ERR(fmr_pool);
512d1b4289eSBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
513d1b4289eSBart Van Assche 				     "FMR pool allocation failed (%d)\n", ret);
514d1b4289eSBart Van Assche 			goto err_qp;
515d1b4289eSBart Van Assche 		}
516d1b4289eSBart Van Assche 		if (target->fmr_pool)
517d1b4289eSBart Van Assche 			ib_destroy_fmr_pool(target->fmr_pool);
518d1b4289eSBart Van Assche 		target->fmr_pool = fmr_pool;
519d1b4289eSBart Van Assche 	}
520d1b4289eSBart Van Assche 
52173aa89edSIshai Rabinovitz 	if (target->qp)
52273aa89edSIshai Rabinovitz 		ib_destroy_qp(target->qp);
52373aa89edSIshai Rabinovitz 	if (target->recv_cq)
52473aa89edSIshai Rabinovitz 		ib_destroy_cq(target->recv_cq);
52573aa89edSIshai Rabinovitz 	if (target->send_cq)
52673aa89edSIshai Rabinovitz 		ib_destroy_cq(target->send_cq);
52773aa89edSIshai Rabinovitz 
52873aa89edSIshai Rabinovitz 	target->qp = qp;
52973aa89edSIshai Rabinovitz 	target->recv_cq = recv_cq;
53073aa89edSIshai Rabinovitz 	target->send_cq = send_cq;
53173aa89edSIshai Rabinovitz 
532da9d2f07SRoland Dreier 	kfree(init_attr);
533da9d2f07SRoland Dreier 	return 0;
534da9d2f07SRoland Dreier 
535da9d2f07SRoland Dreier err_qp:
53673aa89edSIshai Rabinovitz 	ib_destroy_qp(qp);
537da9d2f07SRoland Dreier 
538da9d2f07SRoland Dreier err_send_cq:
53973aa89edSIshai Rabinovitz 	ib_destroy_cq(send_cq);
540da9d2f07SRoland Dreier 
541da9d2f07SRoland Dreier err_recv_cq:
54273aa89edSIshai Rabinovitz 	ib_destroy_cq(recv_cq);
543da9d2f07SRoland Dreier 
544da9d2f07SRoland Dreier err:
545aef9ec39SRoland Dreier 	kfree(init_attr);
546aef9ec39SRoland Dreier 	return ret;
547aef9ec39SRoland Dreier }
548aef9ec39SRoland Dreier 
5494d73f95fSBart Van Assche /*
5504d73f95fSBart Van Assche  * Note: this function may be called without srp_alloc_iu_bufs() having been
5514d73f95fSBart Van Assche  * invoked. Hence the target->[rt]x_ring checks.
5524d73f95fSBart Van Assche  */
553aef9ec39SRoland Dreier static void srp_free_target_ib(struct srp_target_port *target)
554aef9ec39SRoland Dreier {
5555cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
556aef9ec39SRoland Dreier 	int i;
557aef9ec39SRoland Dreier 
5585cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
5595cfb1782SBart Van Assche 		if (target->fr_pool)
5605cfb1782SBart Van Assche 			srp_destroy_fr_pool(target->fr_pool);
5615cfb1782SBart Van Assche 	} else {
562d1b4289eSBart Van Assche 		if (target->fmr_pool)
563d1b4289eSBart Van Assche 			ib_destroy_fmr_pool(target->fmr_pool);
5645cfb1782SBart Van Assche 	}
565aef9ec39SRoland Dreier 	ib_destroy_qp(target->qp);
5669c03dc9fSBart Van Assche 	ib_destroy_cq(target->send_cq);
5679c03dc9fSBart Van Assche 	ib_destroy_cq(target->recv_cq);
568aef9ec39SRoland Dreier 
56973aa89edSIshai Rabinovitz 	target->qp = NULL;
57073aa89edSIshai Rabinovitz 	target->send_cq = target->recv_cq = NULL;
57173aa89edSIshai Rabinovitz 
5724d73f95fSBart Van Assche 	if (target->rx_ring) {
5734d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
574aef9ec39SRoland Dreier 			srp_free_iu(target->srp_host, target->rx_ring[i]);
5754d73f95fSBart Van Assche 		kfree(target->rx_ring);
5764d73f95fSBart Van Assche 		target->rx_ring = NULL;
5774d73f95fSBart Van Assche 	}
5784d73f95fSBart Van Assche 	if (target->tx_ring) {
5794d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
580aef9ec39SRoland Dreier 			srp_free_iu(target->srp_host, target->tx_ring[i]);
5814d73f95fSBart Van Assche 		kfree(target->tx_ring);
5824d73f95fSBart Van Assche 		target->tx_ring = NULL;
5834d73f95fSBart Van Assche 	}
584aef9ec39SRoland Dreier }
585aef9ec39SRoland Dreier 
586aef9ec39SRoland Dreier static void srp_path_rec_completion(int status,
587aef9ec39SRoland Dreier 				    struct ib_sa_path_rec *pathrec,
588aef9ec39SRoland Dreier 				    void *target_ptr)
589aef9ec39SRoland Dreier {
590aef9ec39SRoland Dreier 	struct srp_target_port *target = target_ptr;
591aef9ec39SRoland Dreier 
592aef9ec39SRoland Dreier 	target->status = status;
593aef9ec39SRoland Dreier 	if (status)
5947aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
5957aa54bd7SDavid Dillow 			     PFX "Got failed path rec status %d\n", status);
596aef9ec39SRoland Dreier 	else
597aef9ec39SRoland Dreier 		target->path = *pathrec;
598aef9ec39SRoland Dreier 	complete(&target->done);
599aef9ec39SRoland Dreier }
600aef9ec39SRoland Dreier 
601aef9ec39SRoland Dreier static int srp_lookup_path(struct srp_target_port *target)
602aef9ec39SRoland Dreier {
603a702adceSBart Van Assche 	int ret;
604a702adceSBart Van Assche 
605aef9ec39SRoland Dreier 	target->path.numb_path = 1;
606aef9ec39SRoland Dreier 
607aef9ec39SRoland Dreier 	init_completion(&target->done);
608aef9ec39SRoland Dreier 
609c1a0b23bSMichael S. Tsirkin 	target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
61005321937SGreg Kroah-Hartman 						   target->srp_host->srp_dev->dev,
611aef9ec39SRoland Dreier 						   target->srp_host->port,
612aef9ec39SRoland Dreier 						   &target->path,
613247e020eSSean Hefty 						   IB_SA_PATH_REC_SERVICE_ID	|
614aef9ec39SRoland Dreier 						   IB_SA_PATH_REC_DGID		|
615aef9ec39SRoland Dreier 						   IB_SA_PATH_REC_SGID		|
616aef9ec39SRoland Dreier 						   IB_SA_PATH_REC_NUMB_PATH	|
617aef9ec39SRoland Dreier 						   IB_SA_PATH_REC_PKEY,
618aef9ec39SRoland Dreier 						   SRP_PATH_REC_TIMEOUT_MS,
619aef9ec39SRoland Dreier 						   GFP_KERNEL,
620aef9ec39SRoland Dreier 						   srp_path_rec_completion,
621aef9ec39SRoland Dreier 						   target, &target->path_query);
622aef9ec39SRoland Dreier 	if (target->path_query_id < 0)
623aef9ec39SRoland Dreier 		return target->path_query_id;
624aef9ec39SRoland Dreier 
625a702adceSBart Van Assche 	ret = wait_for_completion_interruptible(&target->done);
626a702adceSBart Van Assche 	if (ret < 0)
627a702adceSBart Van Assche 		return ret;
628aef9ec39SRoland Dreier 
629aef9ec39SRoland Dreier 	if (target->status < 0)
6307aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
6317aa54bd7SDavid Dillow 			     PFX "Path record query failed\n");
632aef9ec39SRoland Dreier 
633aef9ec39SRoland Dreier 	return target->status;
634aef9ec39SRoland Dreier }
635aef9ec39SRoland Dreier 
636aef9ec39SRoland Dreier static int srp_send_req(struct srp_target_port *target)
637aef9ec39SRoland Dreier {
638aef9ec39SRoland Dreier 	struct {
639aef9ec39SRoland Dreier 		struct ib_cm_req_param param;
640aef9ec39SRoland Dreier 		struct srp_login_req   priv;
641aef9ec39SRoland Dreier 	} *req = NULL;
642aef9ec39SRoland Dreier 	int status;
643aef9ec39SRoland Dreier 
644aef9ec39SRoland Dreier 	req = kzalloc(sizeof *req, GFP_KERNEL);
645aef9ec39SRoland Dreier 	if (!req)
646aef9ec39SRoland Dreier 		return -ENOMEM;
647aef9ec39SRoland Dreier 
648aef9ec39SRoland Dreier 	req->param.primary_path 	      = &target->path;
649aef9ec39SRoland Dreier 	req->param.alternate_path 	      = NULL;
650aef9ec39SRoland Dreier 	req->param.service_id 		      = target->service_id;
651aef9ec39SRoland Dreier 	req->param.qp_num 		      = target->qp->qp_num;
652aef9ec39SRoland Dreier 	req->param.qp_type 		      = target->qp->qp_type;
653aef9ec39SRoland Dreier 	req->param.private_data 	      = &req->priv;
654aef9ec39SRoland Dreier 	req->param.private_data_len 	      = sizeof req->priv;
655aef9ec39SRoland Dreier 	req->param.flow_control 	      = 1;
656aef9ec39SRoland Dreier 
657aef9ec39SRoland Dreier 	get_random_bytes(&req->param.starting_psn, 4);
658aef9ec39SRoland Dreier 	req->param.starting_psn 	     &= 0xffffff;
659aef9ec39SRoland Dreier 
660aef9ec39SRoland Dreier 	/*
661aef9ec39SRoland Dreier 	 * Pick some arbitrary defaults here; we could make these
662aef9ec39SRoland Dreier 	 * module parameters if anyone cared about setting them.
663aef9ec39SRoland Dreier 	 */
664aef9ec39SRoland Dreier 	req->param.responder_resources	      = 4;
665aef9ec39SRoland Dreier 	req->param.remote_cm_response_timeout = 20;
666aef9ec39SRoland Dreier 	req->param.local_cm_response_timeout  = 20;
6677bb312e4SVu Pham 	req->param.retry_count                = target->tl_retry_count;
668aef9ec39SRoland Dreier 	req->param.rnr_retry_count 	      = 7;
669aef9ec39SRoland Dreier 	req->param.max_cm_retries 	      = 15;
670aef9ec39SRoland Dreier 
671aef9ec39SRoland Dreier 	req->priv.opcode     	= SRP_LOGIN_REQ;
672aef9ec39SRoland Dreier 	req->priv.tag        	= 0;
67349248644SDavid Dillow 	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
674aef9ec39SRoland Dreier 	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
675aef9ec39SRoland Dreier 					      SRP_BUF_FORMAT_INDIRECT);
6760c0450dbSRamachandra K 	/*
6770c0450dbSRamachandra K 	 * In the published SRP specification (draft rev. 16a), the
6780c0450dbSRamachandra K 	 * port identifier format is 8 bytes of ID extension followed
6790c0450dbSRamachandra K 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
6800c0450dbSRamachandra K 	 * opposite order, so that the GUID comes first.
6810c0450dbSRamachandra K 	 *
6820c0450dbSRamachandra K 	 * Targets conforming to these obsolete drafts can be
6830c0450dbSRamachandra K 	 * recognized by the I/O Class they report.
6840c0450dbSRamachandra K 	 */
6850c0450dbSRamachandra K 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
6860c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
68701cb9bcbSIshai Rabinovitz 		       &target->path.sgid.global.interface_id, 8);
6880c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id + 8,
68901cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
6900c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
6910c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
6920c0450dbSRamachandra K 	} else {
6930c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
69401cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
69501cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
69601cb9bcbSIshai Rabinovitz 		       &target->path.sgid.global.interface_id, 8);
6970c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
6980c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
6990c0450dbSRamachandra K 	}
7000c0450dbSRamachandra K 
701aef9ec39SRoland Dreier 	/*
702aef9ec39SRoland Dreier 	 * Topspin/Cisco SRP targets will reject our login unless we
70301cb9bcbSIshai Rabinovitz 	 * zero out the first 8 bytes of our initiator port ID and set
70401cb9bcbSIshai Rabinovitz 	 * the second 8 bytes to the local node GUID.
705aef9ec39SRoland Dreier 	 */
7065d7cbfd6SRoland Dreier 	if (srp_target_is_topspin(target)) {
7077aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
7087aa54bd7SDavid Dillow 			     PFX "Topspin/Cisco initiator port ID workaround "
709aef9ec39SRoland Dreier 			     "activated for target GUID %016llx\n",
710aef9ec39SRoland Dreier 			     (unsigned long long) be64_to_cpu(target->ioc_guid));
711aef9ec39SRoland Dreier 		memset(req->priv.initiator_port_id, 0, 8);
71201cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
71305321937SGreg Kroah-Hartman 		       &target->srp_host->srp_dev->dev->node_guid, 8);
714aef9ec39SRoland Dreier 	}
715aef9ec39SRoland Dreier 
716aef9ec39SRoland Dreier 	status = ib_send_cm_req(target->cm_id, &req->param);
717aef9ec39SRoland Dreier 
718aef9ec39SRoland Dreier 	kfree(req);
719aef9ec39SRoland Dreier 
720aef9ec39SRoland Dreier 	return status;
721aef9ec39SRoland Dreier }
722aef9ec39SRoland Dreier 
723ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target)
724ef6c49d8SBart Van Assche {
725ef6c49d8SBart Van Assche 	bool changed = false;
726ef6c49d8SBart Van Assche 
727ef6c49d8SBart Van Assche 	spin_lock_irq(&target->lock);
728ef6c49d8SBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
729ef6c49d8SBart Van Assche 		target->state = SRP_TARGET_REMOVED;
730ef6c49d8SBart Van Assche 		changed = true;
731ef6c49d8SBart Van Assche 	}
732ef6c49d8SBart Van Assche 	spin_unlock_irq(&target->lock);
733ef6c49d8SBart Van Assche 
734ef6c49d8SBart Van Assche 	if (changed)
735bcc05910SBart Van Assche 		queue_work(srp_remove_wq, &target->remove_work);
736ef6c49d8SBart Van Assche 
737ef6c49d8SBart Van Assche 	return changed;
738ef6c49d8SBart Van Assche }
739ef6c49d8SBart Van Assche 
740294c875aSBart Van Assche static bool srp_change_conn_state(struct srp_target_port *target,
741294c875aSBart Van Assche 				  bool connected)
742294c875aSBart Van Assche {
743294c875aSBart Van Assche 	bool changed = false;
744294c875aSBart Van Assche 
745294c875aSBart Van Assche 	spin_lock_irq(&target->lock);
746294c875aSBart Van Assche 	if (target->connected != connected) {
747294c875aSBart Van Assche 		target->connected = connected;
748294c875aSBart Van Assche 		changed = true;
749294c875aSBart Van Assche 	}
750294c875aSBart Van Assche 	spin_unlock_irq(&target->lock);
751294c875aSBart Van Assche 
752294c875aSBart Van Assche 	return changed;
753294c875aSBart Van Assche }
754294c875aSBart Van Assche 
755aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target)
756aef9ec39SRoland Dreier {
757294c875aSBart Van Assche 	if (srp_change_conn_state(target, false)) {
758aef9ec39SRoland Dreier 		/* XXX should send SRP_I_LOGOUT request */
759aef9ec39SRoland Dreier 
760e6581056SRoland Dreier 		if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
7617aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, target->scsi_host,
7627aa54bd7SDavid Dillow 				     PFX "Sending CM DREQ failed\n");
763aef9ec39SRoland Dreier 		}
764294c875aSBart Van Assche 	}
765294c875aSBart Van Assche }
766aef9ec39SRoland Dreier 
7678f26c9ffSDavid Dillow static void srp_free_req_data(struct srp_target_port *target)
7688f26c9ffSDavid Dillow {
7695cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
7705cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
7718f26c9ffSDavid Dillow 	struct srp_request *req;
7728f26c9ffSDavid Dillow 	int i;
7738f26c9ffSDavid Dillow 
7744d73f95fSBart Van Assche 	if (!target->req_ring)
7754d73f95fSBart Van Assche 		return;
7764d73f95fSBart Van Assche 
7774d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
7784d73f95fSBart Van Assche 		req = &target->req_ring[i];
7795cfb1782SBart Van Assche 		if (dev->use_fast_reg)
7805cfb1782SBart Van Assche 			kfree(req->fr_list);
7815cfb1782SBart Van Assche 		else
7828f26c9ffSDavid Dillow 			kfree(req->fmr_list);
7838f26c9ffSDavid Dillow 		kfree(req->map_page);
784c07d424dSDavid Dillow 		if (req->indirect_dma_addr) {
785c07d424dSDavid Dillow 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
786c07d424dSDavid Dillow 					    target->indirect_size,
787c07d424dSDavid Dillow 					    DMA_TO_DEVICE);
788c07d424dSDavid Dillow 		}
789c07d424dSDavid Dillow 		kfree(req->indirect_desc);
7908f26c9ffSDavid Dillow 	}
7914d73f95fSBart Van Assche 
7924d73f95fSBart Van Assche 	kfree(target->req_ring);
7934d73f95fSBart Van Assche 	target->req_ring = NULL;
7948f26c9ffSDavid Dillow }
7958f26c9ffSDavid Dillow 
796b81d00bdSBart Van Assche static int srp_alloc_req_data(struct srp_target_port *target)
797b81d00bdSBart Van Assche {
798b81d00bdSBart Van Assche 	struct srp_device *srp_dev = target->srp_host->srp_dev;
799b81d00bdSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
800b81d00bdSBart Van Assche 	struct srp_request *req;
8015cfb1782SBart Van Assche 	void *mr_list;
802b81d00bdSBart Van Assche 	dma_addr_t dma_addr;
803b81d00bdSBart Van Assche 	int i, ret = -ENOMEM;
804b81d00bdSBart Van Assche 
805b81d00bdSBart Van Assche 	INIT_LIST_HEAD(&target->free_reqs);
806b81d00bdSBart Van Assche 
8074d73f95fSBart Van Assche 	target->req_ring = kzalloc(target->req_ring_size *
8084d73f95fSBart Van Assche 				   sizeof(*target->req_ring), GFP_KERNEL);
8094d73f95fSBart Van Assche 	if (!target->req_ring)
8104d73f95fSBart Van Assche 		goto out;
8114d73f95fSBart Van Assche 
8124d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
813b81d00bdSBart Van Assche 		req = &target->req_ring[i];
8145cfb1782SBart Van Assche 		mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
815b81d00bdSBart Van Assche 				  GFP_KERNEL);
8165cfb1782SBart Van Assche 		if (!mr_list)
8175cfb1782SBart Van Assche 			goto out;
8185cfb1782SBart Van Assche 		if (srp_dev->use_fast_reg)
8195cfb1782SBart Van Assche 			req->fr_list = mr_list;
8205cfb1782SBart Van Assche 		else
8215cfb1782SBart Van Assche 			req->fmr_list = mr_list;
82252ede08fSBart Van Assche 		req->map_page = kmalloc(srp_dev->max_pages_per_mr *
823d1b4289eSBart Van Assche 					sizeof(void *), GFP_KERNEL);
8245cfb1782SBart Van Assche 		if (!req->map_page)
8255cfb1782SBart Van Assche 			goto out;
826b81d00bdSBart Van Assche 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
8275cfb1782SBart Van Assche 		if (!req->indirect_desc)
828b81d00bdSBart Van Assche 			goto out;
829b81d00bdSBart Van Assche 
830b81d00bdSBart Van Assche 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
831b81d00bdSBart Van Assche 					     target->indirect_size,
832b81d00bdSBart Van Assche 					     DMA_TO_DEVICE);
833b81d00bdSBart Van Assche 		if (ib_dma_mapping_error(ibdev, dma_addr))
834b81d00bdSBart Van Assche 			goto out;
835b81d00bdSBart Van Assche 
836b81d00bdSBart Van Assche 		req->indirect_dma_addr = dma_addr;
837b81d00bdSBart Van Assche 		req->index = i;
838b81d00bdSBart Van Assche 		list_add_tail(&req->list, &target->free_reqs);
839b81d00bdSBart Van Assche 	}
840b81d00bdSBart Van Assche 	ret = 0;
841b81d00bdSBart Van Assche 
842b81d00bdSBart Van Assche out:
843b81d00bdSBart Van Assche 	return ret;
844b81d00bdSBart Van Assche }
845b81d00bdSBart Van Assche 
846683b159aSBart Van Assche /**
847683b159aSBart Van Assche  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
848683b159aSBart Van Assche  * @shost: SCSI host whose attributes to remove from sysfs.
849683b159aSBart Van Assche  *
850683b159aSBart Van Assche  * Note: Any attributes defined in the host template and that did not exist
851683b159aSBart Van Assche  * before invocation of this function will be ignored.
852683b159aSBart Van Assche  */
853683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
854683b159aSBart Van Assche {
855683b159aSBart Van Assche 	struct device_attribute **attr;
856683b159aSBart Van Assche 
857683b159aSBart Van Assche 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
858683b159aSBart Van Assche 		device_remove_file(&shost->shost_dev, *attr);
859683b159aSBart Van Assche }
860683b159aSBart Van Assche 
861ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target)
862ee12d6a8SBart Van Assche {
863ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
864ef6c49d8SBart Van Assche 
865ee12d6a8SBart Van Assche 	srp_del_scsi_host_attr(target->scsi_host);
8669dd69a60SBart Van Assche 	srp_rport_get(target->rport);
867ee12d6a8SBart Van Assche 	srp_remove_host(target->scsi_host);
868ee12d6a8SBart Van Assche 	scsi_remove_host(target->scsi_host);
86993079162SBart Van Assche 	srp_stop_rport_timers(target->rport);
870ef6c49d8SBart Van Assche 	srp_disconnect_target(target);
871ee12d6a8SBart Van Assche 	ib_destroy_cm_id(target->cm_id);
872ee12d6a8SBart Van Assche 	srp_free_target_ib(target);
873c1120f89SBart Van Assche 	cancel_work_sync(&target->tl_err_work);
8749dd69a60SBart Van Assche 	srp_rport_put(target->rport);
875ee12d6a8SBart Van Assche 	srp_free_req_data(target);
87665d7dd2fSVu Pham 
87765d7dd2fSVu Pham 	spin_lock(&target->srp_host->target_lock);
87865d7dd2fSVu Pham 	list_del(&target->list);
87965d7dd2fSVu Pham 	spin_unlock(&target->srp_host->target_lock);
88065d7dd2fSVu Pham 
881ee12d6a8SBart Van Assche 	scsi_host_put(target->scsi_host);
882ee12d6a8SBart Van Assche }
883ee12d6a8SBart Van Assche 
884c4028958SDavid Howells static void srp_remove_work(struct work_struct *work)
885aef9ec39SRoland Dreier {
886c4028958SDavid Howells 	struct srp_target_port *target =
887ef6c49d8SBart Van Assche 		container_of(work, struct srp_target_port, remove_work);
888aef9ec39SRoland Dreier 
889ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
890aef9ec39SRoland Dreier 
89196fc248aSBart Van Assche 	srp_remove_target(target);
892aef9ec39SRoland Dreier }
893aef9ec39SRoland Dreier 
894dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport)
895dc1bdbd9SBart Van Assche {
896dc1bdbd9SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
897dc1bdbd9SBart Van Assche 
898dc1bdbd9SBart Van Assche 	srp_queue_remove_work(target);
899dc1bdbd9SBart Van Assche }
900dc1bdbd9SBart Van Assche 
901aef9ec39SRoland Dreier static int srp_connect_target(struct srp_target_port *target)
902aef9ec39SRoland Dreier {
9039fe4bcf4SDavid Dillow 	int retries = 3;
904aef9ec39SRoland Dreier 	int ret;
905aef9ec39SRoland Dreier 
906294c875aSBart Van Assche 	WARN_ON_ONCE(target->connected);
907294c875aSBart Van Assche 
908948d1e88SBart Van Assche 	target->qp_in_error = false;
909948d1e88SBart Van Assche 
910aef9ec39SRoland Dreier 	ret = srp_lookup_path(target);
911aef9ec39SRoland Dreier 	if (ret)
912aef9ec39SRoland Dreier 		return ret;
913aef9ec39SRoland Dreier 
914aef9ec39SRoland Dreier 	while (1) {
915aef9ec39SRoland Dreier 		init_completion(&target->done);
916aef9ec39SRoland Dreier 		ret = srp_send_req(target);
917aef9ec39SRoland Dreier 		if (ret)
918aef9ec39SRoland Dreier 			return ret;
919a702adceSBart Van Assche 		ret = wait_for_completion_interruptible(&target->done);
920a702adceSBart Van Assche 		if (ret < 0)
921a702adceSBart Van Assche 			return ret;
922aef9ec39SRoland Dreier 
923aef9ec39SRoland Dreier 		/*
924aef9ec39SRoland Dreier 		 * The CM event handling code will set status to
925aef9ec39SRoland Dreier 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
926aef9ec39SRoland Dreier 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
927aef9ec39SRoland Dreier 		 * redirect REJ back.
928aef9ec39SRoland Dreier 		 */
929aef9ec39SRoland Dreier 		switch (target->status) {
930aef9ec39SRoland Dreier 		case 0:
931294c875aSBart Van Assche 			srp_change_conn_state(target, true);
932aef9ec39SRoland Dreier 			return 0;
933aef9ec39SRoland Dreier 
934aef9ec39SRoland Dreier 		case SRP_PORT_REDIRECT:
935aef9ec39SRoland Dreier 			ret = srp_lookup_path(target);
936aef9ec39SRoland Dreier 			if (ret)
937aef9ec39SRoland Dreier 				return ret;
938aef9ec39SRoland Dreier 			break;
939aef9ec39SRoland Dreier 
940aef9ec39SRoland Dreier 		case SRP_DLID_REDIRECT:
941aef9ec39SRoland Dreier 			break;
942aef9ec39SRoland Dreier 
9439fe4bcf4SDavid Dillow 		case SRP_STALE_CONN:
9449fe4bcf4SDavid Dillow 			/* Our current CM id was stale, and is now in timewait.
9459fe4bcf4SDavid Dillow 			 * Try to reconnect with a new one.
9469fe4bcf4SDavid Dillow 			 */
9479fe4bcf4SDavid Dillow 			if (!retries-- || srp_new_cm_id(target)) {
9489fe4bcf4SDavid Dillow 				shost_printk(KERN_ERR, target->scsi_host, PFX
9499fe4bcf4SDavid Dillow 					     "giving up on stale connection\n");
9509fe4bcf4SDavid Dillow 				target->status = -ECONNRESET;
9519fe4bcf4SDavid Dillow 				return target->status;
9529fe4bcf4SDavid Dillow 			}
9539fe4bcf4SDavid Dillow 
9549fe4bcf4SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host, PFX
9559fe4bcf4SDavid Dillow 				     "retrying stale connection\n");
9569fe4bcf4SDavid Dillow 			break;
9579fe4bcf4SDavid Dillow 
958aef9ec39SRoland Dreier 		default:
959aef9ec39SRoland Dreier 			return target->status;
960aef9ec39SRoland Dreier 		}
961aef9ec39SRoland Dreier 	}
962aef9ec39SRoland Dreier }
963aef9ec39SRoland Dreier 
9645cfb1782SBart Van Assche static int srp_inv_rkey(struct srp_target_port *target, u32 rkey)
9655cfb1782SBart Van Assche {
9665cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
9675cfb1782SBart Van Assche 	struct ib_send_wr wr = {
9685cfb1782SBart Van Assche 		.opcode		    = IB_WR_LOCAL_INV,
9695cfb1782SBart Van Assche 		.wr_id		    = LOCAL_INV_WR_ID_MASK,
9705cfb1782SBart Van Assche 		.next		    = NULL,
9715cfb1782SBart Van Assche 		.num_sge	    = 0,
9725cfb1782SBart Van Assche 		.send_flags	    = 0,
9735cfb1782SBart Van Assche 		.ex.invalidate_rkey = rkey,
9745cfb1782SBart Van Assche 	};
9755cfb1782SBart Van Assche 
9765cfb1782SBart Van Assche 	return ib_post_send(target->qp, &wr, &bad_wr);
9775cfb1782SBart Van Assche }
9785cfb1782SBart Van Assche 
979d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd,
980d945e1dfSRoland Dreier 			   struct srp_target_port *target,
981d945e1dfSRoland Dreier 			   struct srp_request *req)
982d945e1dfSRoland Dreier {
9835cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
9845cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
9855cfb1782SBart Van Assche 	int i, res;
9868f26c9ffSDavid Dillow 
987bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) ||
988d945e1dfSRoland Dreier 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
989d945e1dfSRoland Dreier 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
990d945e1dfSRoland Dreier 		return;
991d945e1dfSRoland Dreier 
9925cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
9935cfb1782SBart Van Assche 		struct srp_fr_desc **pfr;
9945cfb1782SBart Van Assche 
9955cfb1782SBart Van Assche 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
9965cfb1782SBart Van Assche 			res = srp_inv_rkey(target, (*pfr)->mr->rkey);
9975cfb1782SBart Van Assche 			if (res < 0) {
9985cfb1782SBart Van Assche 				shost_printk(KERN_ERR, target->scsi_host, PFX
9995cfb1782SBart Van Assche 				  "Queueing INV WR for rkey %#x failed (%d)\n",
10005cfb1782SBart Van Assche 				  (*pfr)->mr->rkey, res);
10015cfb1782SBart Van Assche 				queue_work(system_long_wq,
10025cfb1782SBart Van Assche 					   &target->tl_err_work);
10035cfb1782SBart Van Assche 			}
10045cfb1782SBart Van Assche 		}
10055cfb1782SBart Van Assche 		if (req->nmdesc)
10065cfb1782SBart Van Assche 			srp_fr_pool_put(target->fr_pool, req->fr_list,
10075cfb1782SBart Van Assche 					req->nmdesc);
10085cfb1782SBart Van Assche 	} else {
10095cfb1782SBart Van Assche 		struct ib_pool_fmr **pfmr;
10105cfb1782SBart Van Assche 
10115cfb1782SBart Van Assche 		for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
10125cfb1782SBart Van Assche 			ib_fmr_pool_unmap(*pfmr);
10135cfb1782SBart Van Assche 	}
1014f5358a17SRoland Dreier 
10158f26c9ffSDavid Dillow 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
10168f26c9ffSDavid Dillow 			scmnd->sc_data_direction);
1017d945e1dfSRoland Dreier }
1018d945e1dfSRoland Dreier 
101922032991SBart Van Assche /**
102022032991SBart Van Assche  * srp_claim_req - Take ownership of the scmnd associated with a request.
102122032991SBart Van Assche  * @target: SRP target port.
102222032991SBart Van Assche  * @req: SRP request.
1023b3fe628dSBart Van Assche  * @sdev: If not NULL, only take ownership for this SCSI device.
102422032991SBart Van Assche  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
102522032991SBart Van Assche  *         ownership of @req->scmnd if it equals @scmnd.
102622032991SBart Van Assche  *
102722032991SBart Van Assche  * Return value:
102822032991SBart Van Assche  * Either NULL or a pointer to the SCSI command the caller became owner of.
102922032991SBart Van Assche  */
103022032991SBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
103122032991SBart Van Assche 				       struct srp_request *req,
1032b3fe628dSBart Van Assche 				       struct scsi_device *sdev,
103322032991SBart Van Assche 				       struct scsi_cmnd *scmnd)
1034526b4caaSIshai Rabinovitz {
103594a9174cSBart Van Assche 	unsigned long flags;
103694a9174cSBart Van Assche 
103722032991SBart Van Assche 	spin_lock_irqsave(&target->lock, flags);
1038b3fe628dSBart Van Assche 	if (req->scmnd &&
1039b3fe628dSBart Van Assche 	    (!sdev || req->scmnd->device == sdev) &&
1040b3fe628dSBart Van Assche 	    (!scmnd || req->scmnd == scmnd)) {
104122032991SBart Van Assche 		scmnd = req->scmnd;
104222032991SBart Van Assche 		req->scmnd = NULL;
104322032991SBart Van Assche 	} else {
104422032991SBart Van Assche 		scmnd = NULL;
104522032991SBart Van Assche 	}
104622032991SBart Van Assche 	spin_unlock_irqrestore(&target->lock, flags);
104722032991SBart Van Assche 
104822032991SBart Van Assche 	return scmnd;
104922032991SBart Van Assche }
105022032991SBart Van Assche 
105122032991SBart Van Assche /**
105222032991SBart Van Assche  * srp_free_req() - Unmap data and add request to the free request list.
1053af24663bSBart Van Assche  * @target: SRP target port.
1054af24663bSBart Van Assche  * @req:    Request to be freed.
1055af24663bSBart Van Assche  * @scmnd:  SCSI command associated with @req.
1056af24663bSBart Van Assche  * @req_lim_delta: Amount to be added to @target->req_lim.
105722032991SBart Van Assche  */
105822032991SBart Van Assche static void srp_free_req(struct srp_target_port *target,
105922032991SBart Van Assche 			 struct srp_request *req, struct scsi_cmnd *scmnd,
106022032991SBart Van Assche 			 s32 req_lim_delta)
106122032991SBart Van Assche {
106222032991SBart Van Assche 	unsigned long flags;
106322032991SBart Van Assche 
106422032991SBart Van Assche 	srp_unmap_data(scmnd, target, req);
106522032991SBart Van Assche 
1066e9684678SBart Van Assche 	spin_lock_irqsave(&target->lock, flags);
106794a9174cSBart Van Assche 	target->req_lim += req_lim_delta;
1068536ae14eSBart Van Assche 	list_add_tail(&req->list, &target->free_reqs);
1069e9684678SBart Van Assche 	spin_unlock_irqrestore(&target->lock, flags);
1070526b4caaSIshai Rabinovitz }
1071526b4caaSIshai Rabinovitz 
1072ed9b2264SBart Van Assche static void srp_finish_req(struct srp_target_port *target,
1073b3fe628dSBart Van Assche 			   struct srp_request *req, struct scsi_device *sdev,
1074b3fe628dSBart Van Assche 			   int result)
1075526b4caaSIshai Rabinovitz {
1076b3fe628dSBart Van Assche 	struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL);
107722032991SBart Van Assche 
107822032991SBart Van Assche 	if (scmnd) {
10799b796d06SBart Van Assche 		srp_free_req(target, req, scmnd, 0);
1080ed9b2264SBart Van Assche 		scmnd->result = result;
108122032991SBart Van Assche 		scmnd->scsi_done(scmnd);
108222032991SBart Van Assche 	}
1083526b4caaSIshai Rabinovitz }
1084526b4caaSIshai Rabinovitz 
1085ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport)
1086aef9ec39SRoland Dreier {
1087ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1088b3fe628dSBart Van Assche 	struct Scsi_Host *shost = target->scsi_host;
1089b3fe628dSBart Van Assche 	struct scsi_device *sdev;
1090ed9b2264SBart Van Assche 	int i;
1091aef9ec39SRoland Dreier 
1092b3fe628dSBart Van Assche 	/*
1093b3fe628dSBart Van Assche 	 * Invoking srp_terminate_io() while srp_queuecommand() is running
1094b3fe628dSBart Van Assche 	 * is not safe. Hence the warning statement below.
1095b3fe628dSBart Van Assche 	 */
1096b3fe628dSBart Van Assche 	shost_for_each_device(sdev, shost)
1097b3fe628dSBart Van Assche 		WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1098b3fe628dSBart Van Assche 
10994d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
1100ed9b2264SBart Van Assche 		struct srp_request *req = &target->req_ring[i];
1101b3fe628dSBart Van Assche 		srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16);
1102ed9b2264SBart Van Assche 	}
1103ed9b2264SBart Van Assche }
1104ed9b2264SBart Van Assche 
1105ed9b2264SBart Van Assche /*
1106ed9b2264SBart Van Assche  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1107ed9b2264SBart Van Assche  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1108ed9b2264SBart Van Assche  * srp_reset_device() or srp_reset_host() calls will occur while this function
1109ed9b2264SBart Van Assche  * is in progress. One way to realize that is not to call this function
1110ed9b2264SBart Van Assche  * directly but to call srp_reconnect_rport() instead since that last function
1111ed9b2264SBart Van Assche  * serializes calls of this function via rport->mutex and also blocks
1112ed9b2264SBart Van Assche  * srp_queuecommand() calls before invoking this function.
1113ed9b2264SBart Van Assche  */
1114ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport)
1115ed9b2264SBart Van Assche {
1116ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1117ed9b2264SBart Van Assche 	int i, ret;
111809be70a2SBart Van Assche 
1119aef9ec39SRoland Dreier 	srp_disconnect_target(target);
1120aef9ec39SRoland Dreier 	/*
1121c7c4e7ffSBart Van Assche 	 * Now get a new local CM ID so that we avoid confusing the target in
1122c7c4e7ffSBart Van Assche 	 * case things are really fouled up. Doing so also ensures that all CM
1123c7c4e7ffSBart Van Assche 	 * callbacks will have finished before a new QP is allocated.
1124aef9ec39SRoland Dreier 	 */
11259fe4bcf4SDavid Dillow 	ret = srp_new_cm_id(target);
1126aef9ec39SRoland Dreier 
11274d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
1128536ae14eSBart Van Assche 		struct srp_request *req = &target->req_ring[i];
1129b3fe628dSBart Van Assche 		srp_finish_req(target, req, NULL, DID_RESET << 16);
1130536ae14eSBart Van Assche 	}
1131aef9ec39SRoland Dreier 
11325cfb1782SBart Van Assche 	/*
11335cfb1782SBart Van Assche 	 * Whether or not creating a new CM ID succeeded, create a new
11345cfb1782SBart Van Assche 	 * QP. This guarantees that all callback functions for the old QP have
11355cfb1782SBart Van Assche 	 * finished before any send requests are posted on the new QP.
11365cfb1782SBart Van Assche 	 */
11375cfb1782SBart Van Assche 	ret += srp_create_target_ib(target);
11385cfb1782SBart Van Assche 
1139536ae14eSBart Van Assche 	INIT_LIST_HEAD(&target->free_tx);
11404d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i)
1141536ae14eSBart Van Assche 		list_add(&target->tx_ring[i]->list, &target->free_tx);
1142aef9ec39SRoland Dreier 
1143c7c4e7ffSBart Van Assche 	if (ret == 0)
1144aef9ec39SRoland Dreier 		ret = srp_connect_target(target);
114509be70a2SBart Van Assche 
1146ed9b2264SBart Van Assche 	if (ret == 0)
1147ed9b2264SBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
1148ed9b2264SBart Van Assche 			     PFX "reconnect succeeded\n");
1149aef9ec39SRoland Dreier 
1150aef9ec39SRoland Dreier 	return ret;
1151aef9ec39SRoland Dreier }
1152aef9ec39SRoland Dreier 
11538f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
11548f26c9ffSDavid Dillow 			 unsigned int dma_len, u32 rkey)
1155f5358a17SRoland Dreier {
11568f26c9ffSDavid Dillow 	struct srp_direct_buf *desc = state->desc;
11578f26c9ffSDavid Dillow 
11588f26c9ffSDavid Dillow 	desc->va = cpu_to_be64(dma_addr);
11598f26c9ffSDavid Dillow 	desc->key = cpu_to_be32(rkey);
11608f26c9ffSDavid Dillow 	desc->len = cpu_to_be32(dma_len);
11618f26c9ffSDavid Dillow 
11628f26c9ffSDavid Dillow 	state->total_len += dma_len;
11638f26c9ffSDavid Dillow 	state->desc++;
11648f26c9ffSDavid Dillow 	state->ndesc++;
11658f26c9ffSDavid Dillow }
11668f26c9ffSDavid Dillow 
11678f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state,
11688f26c9ffSDavid Dillow 			      struct srp_target_port *target)
11698f26c9ffSDavid Dillow {
11708f26c9ffSDavid Dillow 	struct ib_pool_fmr *fmr;
1171f5358a17SRoland Dreier 	u64 io_addr = 0;
11728f26c9ffSDavid Dillow 
1173d1b4289eSBart Van Assche 	fmr = ib_fmr_pool_map_phys(target->fmr_pool, state->pages,
11748f26c9ffSDavid Dillow 				   state->npages, io_addr);
11758f26c9ffSDavid Dillow 	if (IS_ERR(fmr))
11768f26c9ffSDavid Dillow 		return PTR_ERR(fmr);
11778f26c9ffSDavid Dillow 
11788f26c9ffSDavid Dillow 	*state->next_fmr++ = fmr;
117952ede08fSBart Van Assche 	state->nmdesc++;
11808f26c9ffSDavid Dillow 
118152ede08fSBart Van Assche 	srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
1182539dde6fSBart Van Assche 
11838f26c9ffSDavid Dillow 	return 0;
11848f26c9ffSDavid Dillow }
11858f26c9ffSDavid Dillow 
11865cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state,
11875cfb1782SBart Van Assche 			     struct srp_target_port *target)
11885cfb1782SBart Van Assche {
11895cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
11905cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
11915cfb1782SBart Van Assche 	struct ib_send_wr wr;
11925cfb1782SBart Van Assche 	struct srp_fr_desc *desc;
11935cfb1782SBart Van Assche 	u32 rkey;
11945cfb1782SBart Van Assche 
11955cfb1782SBart Van Assche 	desc = srp_fr_pool_get(target->fr_pool);
11965cfb1782SBart Van Assche 	if (!desc)
11975cfb1782SBart Van Assche 		return -ENOMEM;
11985cfb1782SBart Van Assche 
11995cfb1782SBart Van Assche 	rkey = ib_inc_rkey(desc->mr->rkey);
12005cfb1782SBart Van Assche 	ib_update_fast_reg_key(desc->mr, rkey);
12015cfb1782SBart Van Assche 
12025cfb1782SBart Van Assche 	memcpy(desc->frpl->page_list, state->pages,
12035cfb1782SBart Van Assche 	       sizeof(state->pages[0]) * state->npages);
12045cfb1782SBart Van Assche 
12055cfb1782SBart Van Assche 	memset(&wr, 0, sizeof(wr));
12065cfb1782SBart Van Assche 	wr.opcode = IB_WR_FAST_REG_MR;
12075cfb1782SBart Van Assche 	wr.wr_id = FAST_REG_WR_ID_MASK;
12085cfb1782SBart Van Assche 	wr.wr.fast_reg.iova_start = state->base_dma_addr;
12095cfb1782SBart Van Assche 	wr.wr.fast_reg.page_list = desc->frpl;
12105cfb1782SBart Van Assche 	wr.wr.fast_reg.page_list_len = state->npages;
12115cfb1782SBart Van Assche 	wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
12125cfb1782SBart Van Assche 	wr.wr.fast_reg.length = state->dma_len;
12135cfb1782SBart Van Assche 	wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
12145cfb1782SBart Van Assche 				       IB_ACCESS_REMOTE_READ |
12155cfb1782SBart Van Assche 				       IB_ACCESS_REMOTE_WRITE);
12165cfb1782SBart Van Assche 	wr.wr.fast_reg.rkey = desc->mr->lkey;
12175cfb1782SBart Van Assche 
12185cfb1782SBart Van Assche 	*state->next_fr++ = desc;
12195cfb1782SBart Van Assche 	state->nmdesc++;
12205cfb1782SBart Van Assche 
12215cfb1782SBart Van Assche 	srp_map_desc(state, state->base_dma_addr, state->dma_len,
12225cfb1782SBart Van Assche 		     desc->mr->rkey);
12235cfb1782SBart Van Assche 
12245cfb1782SBart Van Assche 	return ib_post_send(target->qp, &wr, &bad_wr);
12255cfb1782SBart Van Assche }
12265cfb1782SBart Van Assche 
1227539dde6fSBart Van Assche static int srp_finish_mapping(struct srp_map_state *state,
1228539dde6fSBart Van Assche 			      struct srp_target_port *target)
1229539dde6fSBart Van Assche {
1230539dde6fSBart Van Assche 	int ret = 0;
1231539dde6fSBart Van Assche 
1232539dde6fSBart Van Assche 	if (state->npages == 0)
1233539dde6fSBart Van Assche 		return 0;
1234539dde6fSBart Van Assche 
1235b1b8854dSBart Van Assche 	if (state->npages == 1 && !register_always)
123652ede08fSBart Van Assche 		srp_map_desc(state, state->base_dma_addr, state->dma_len,
1237539dde6fSBart Van Assche 			     target->rkey);
1238539dde6fSBart Van Assche 	else
12395cfb1782SBart Van Assche 		ret = target->srp_host->srp_dev->use_fast_reg ?
12405cfb1782SBart Van Assche 			srp_map_finish_fr(state, target) :
12415cfb1782SBart Van Assche 			srp_map_finish_fmr(state, target);
1242539dde6fSBart Van Assche 
1243539dde6fSBart Van Assche 	if (ret == 0) {
1244539dde6fSBart Van Assche 		state->npages = 0;
124552ede08fSBart Van Assche 		state->dma_len = 0;
1246539dde6fSBart Van Assche 	}
1247539dde6fSBart Van Assche 
1248539dde6fSBart Van Assche 	return ret;
1249539dde6fSBart Van Assche }
1250539dde6fSBart Van Assche 
12518f26c9ffSDavid Dillow static void srp_map_update_start(struct srp_map_state *state,
12528f26c9ffSDavid Dillow 				 struct scatterlist *sg, int sg_index,
12538f26c9ffSDavid Dillow 				 dma_addr_t dma_addr)
12548f26c9ffSDavid Dillow {
12558f26c9ffSDavid Dillow 	state->unmapped_sg = sg;
12568f26c9ffSDavid Dillow 	state->unmapped_index = sg_index;
12578f26c9ffSDavid Dillow 	state->unmapped_addr = dma_addr;
12588f26c9ffSDavid Dillow }
12598f26c9ffSDavid Dillow 
12608f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state,
12618f26c9ffSDavid Dillow 			    struct srp_target_port *target,
12628f26c9ffSDavid Dillow 			    struct scatterlist *sg, int sg_index,
12635cfb1782SBart Van Assche 			    bool use_mr)
12648f26c9ffSDavid Dillow {
126505321937SGreg Kroah-Hartman 	struct srp_device *dev = target->srp_host->srp_dev;
126685507bccSRalph Campbell 	struct ib_device *ibdev = dev->dev;
12678f26c9ffSDavid Dillow 	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1268bb350d1dSFUJITA Tomonori 	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
12698f26c9ffSDavid Dillow 	unsigned int len;
12708f26c9ffSDavid Dillow 	int ret;
127185507bccSRalph Campbell 
12728f26c9ffSDavid Dillow 	if (!dma_len)
12738f26c9ffSDavid Dillow 		return 0;
12748f26c9ffSDavid Dillow 
12755cfb1782SBart Van Assche 	if (!use_mr) {
12765cfb1782SBart Van Assche 		/*
12775cfb1782SBart Van Assche 		 * Once we're in direct map mode for a request, we don't
12785cfb1782SBart Van Assche 		 * go back to FMR or FR mode, so no need to update anything
12798f26c9ffSDavid Dillow 		 * other than the descriptor.
12808f26c9ffSDavid Dillow 		 */
12818f26c9ffSDavid Dillow 		srp_map_desc(state, dma_addr, dma_len, target->rkey);
12828f26c9ffSDavid Dillow 		return 0;
1283f5358a17SRoland Dreier 	}
1284f5358a17SRoland Dreier 
12855cfb1782SBart Van Assche 	/*
12865cfb1782SBart Van Assche 	 * Since not all RDMA HW drivers support non-zero page offsets for
12875cfb1782SBart Van Assche 	 * FMR, if we start at an offset into a page, don't merge into the
12885cfb1782SBart Van Assche 	 * current FMR mapping. Finish it out, and use the kernel's MR for
12895cfb1782SBart Van Assche 	 * this sg entry.
12908f26c9ffSDavid Dillow 	 */
12915cfb1782SBart Van Assche 	if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
12925cfb1782SBart Van Assche 	    dma_len > dev->mr_max_size) {
1293539dde6fSBart Van Assche 		ret = srp_finish_mapping(state, target);
12948f26c9ffSDavid Dillow 		if (ret)
12958f26c9ffSDavid Dillow 			return ret;
12968f26c9ffSDavid Dillow 
12978f26c9ffSDavid Dillow 		srp_map_desc(state, dma_addr, dma_len, target->rkey);
12988f26c9ffSDavid Dillow 		srp_map_update_start(state, NULL, 0, 0);
12998f26c9ffSDavid Dillow 		return 0;
1300f5358a17SRoland Dreier 	}
1301f5358a17SRoland Dreier 
13025cfb1782SBart Van Assche 	/*
13035cfb1782SBart Van Assche 	 * If this is the first sg that will be mapped via FMR or via FR, save
13045cfb1782SBart Van Assche 	 * our position. We need to know the first unmapped entry, its index,
13055cfb1782SBart Van Assche 	 * and the first unmapped address within that entry to be able to
13065cfb1782SBart Van Assche 	 * restart mapping after an error.
13078f26c9ffSDavid Dillow 	 */
13088f26c9ffSDavid Dillow 	if (!state->unmapped_sg)
13098f26c9ffSDavid Dillow 		srp_map_update_start(state, sg, sg_index, dma_addr);
1310f5358a17SRoland Dreier 
13118f26c9ffSDavid Dillow 	while (dma_len) {
13125cfb1782SBart Van Assche 		unsigned offset = dma_addr & ~dev->mr_page_mask;
13135cfb1782SBart Van Assche 		if (state->npages == dev->max_pages_per_mr || offset != 0) {
1314539dde6fSBart Van Assche 			ret = srp_finish_mapping(state, target);
13158f26c9ffSDavid Dillow 			if (ret)
13168f26c9ffSDavid Dillow 				return ret;
1317f5358a17SRoland Dreier 
13188f26c9ffSDavid Dillow 			srp_map_update_start(state, sg, sg_index, dma_addr);
131985507bccSRalph Campbell 		}
1320f5358a17SRoland Dreier 
13215cfb1782SBart Van Assche 		len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
13228f26c9ffSDavid Dillow 
13238f26c9ffSDavid Dillow 		if (!state->npages)
13248f26c9ffSDavid Dillow 			state->base_dma_addr = dma_addr;
13255cfb1782SBart Van Assche 		state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
132652ede08fSBart Van Assche 		state->dma_len += len;
13278f26c9ffSDavid Dillow 		dma_addr += len;
13288f26c9ffSDavid Dillow 		dma_len -= len;
1329f5358a17SRoland Dreier 	}
1330f5358a17SRoland Dreier 
13315cfb1782SBart Van Assche 	/*
13325cfb1782SBart Van Assche 	 * If the last entry of the MR wasn't a full page, then we need to
13338f26c9ffSDavid Dillow 	 * close it out and start a new one -- we can only merge at page
13348f26c9ffSDavid Dillow 	 * boundries.
13358f26c9ffSDavid Dillow 	 */
1336f5358a17SRoland Dreier 	ret = 0;
133752ede08fSBart Van Assche 	if (len != dev->mr_page_size) {
1338539dde6fSBart Van Assche 		ret = srp_finish_mapping(state, target);
13398f26c9ffSDavid Dillow 		if (!ret)
13408f26c9ffSDavid Dillow 			srp_map_update_start(state, NULL, 0, 0);
13418f26c9ffSDavid Dillow 	}
1342f5358a17SRoland Dreier 	return ret;
1343f5358a17SRoland Dreier }
1344f5358a17SRoland Dreier 
13455cfb1782SBart Van Assche static int srp_map_sg(struct srp_map_state *state,
134676bc1e1dSBart Van Assche 		      struct srp_target_port *target, struct srp_request *req,
134776bc1e1dSBart Van Assche 		      struct scatterlist *scat, int count)
134876bc1e1dSBart Van Assche {
134976bc1e1dSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
135076bc1e1dSBart Van Assche 	struct ib_device *ibdev = dev->dev;
135176bc1e1dSBart Van Assche 	struct scatterlist *sg;
13525cfb1782SBart Van Assche 	int i;
13535cfb1782SBart Van Assche 	bool use_mr;
135476bc1e1dSBart Van Assche 
135576bc1e1dSBart Van Assche 	state->desc	= req->indirect_desc;
135676bc1e1dSBart Van Assche 	state->pages	= req->map_page;
13575cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
13585cfb1782SBart Van Assche 		state->next_fr = req->fr_list;
13595cfb1782SBart Van Assche 		use_mr = !!target->fr_pool;
13605cfb1782SBart Van Assche 	} else {
136176bc1e1dSBart Van Assche 		state->next_fmr = req->fmr_list;
13625cfb1782SBart Van Assche 		use_mr = !!target->fmr_pool;
13635cfb1782SBart Van Assche 	}
136476bc1e1dSBart Van Assche 
136576bc1e1dSBart Van Assche 	for_each_sg(scat, sg, count, i) {
13665cfb1782SBart Van Assche 		if (srp_map_sg_entry(state, target, sg, i, use_mr)) {
13675cfb1782SBart Van Assche 			/*
13685cfb1782SBart Van Assche 			 * Memory registration failed, so backtrack to the
13695cfb1782SBart Van Assche 			 * first unmapped entry and continue on without using
13705cfb1782SBart Van Assche 			 * memory registration.
137176bc1e1dSBart Van Assche 			 */
137276bc1e1dSBart Van Assche 			dma_addr_t dma_addr;
137376bc1e1dSBart Van Assche 			unsigned int dma_len;
137476bc1e1dSBart Van Assche 
137576bc1e1dSBart Van Assche backtrack:
137676bc1e1dSBart Van Assche 			sg = state->unmapped_sg;
137776bc1e1dSBart Van Assche 			i = state->unmapped_index;
137876bc1e1dSBart Van Assche 
137976bc1e1dSBart Van Assche 			dma_addr = ib_sg_dma_address(ibdev, sg);
138076bc1e1dSBart Van Assche 			dma_len = ib_sg_dma_len(ibdev, sg);
138176bc1e1dSBart Van Assche 			dma_len -= (state->unmapped_addr - dma_addr);
138276bc1e1dSBart Van Assche 			dma_addr = state->unmapped_addr;
13835cfb1782SBart Van Assche 			use_mr = false;
138476bc1e1dSBart Van Assche 			srp_map_desc(state, dma_addr, dma_len, target->rkey);
138576bc1e1dSBart Van Assche 		}
138676bc1e1dSBart Van Assche 	}
138776bc1e1dSBart Van Assche 
13885cfb1782SBart Van Assche 	if (use_mr && srp_finish_mapping(state, target))
138976bc1e1dSBart Van Assche 		goto backtrack;
139076bc1e1dSBart Van Assche 
139152ede08fSBart Van Assche 	req->nmdesc = state->nmdesc;
13925cfb1782SBart Van Assche 
13935cfb1782SBart Van Assche 	return 0;
139476bc1e1dSBart Van Assche }
139576bc1e1dSBart Van Assche 
1396aef9ec39SRoland Dreier static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1397aef9ec39SRoland Dreier 			struct srp_request *req)
1398aef9ec39SRoland Dreier {
139976bc1e1dSBart Van Assche 	struct scatterlist *scat;
1400aef9ec39SRoland Dreier 	struct srp_cmd *cmd = req->cmd->buf;
140176bc1e1dSBart Van Assche 	int len, nents, count;
140285507bccSRalph Campbell 	struct srp_device *dev;
140385507bccSRalph Campbell 	struct ib_device *ibdev;
14048f26c9ffSDavid Dillow 	struct srp_map_state state;
14058f26c9ffSDavid Dillow 	struct srp_indirect_buf *indirect_hdr;
14068f26c9ffSDavid Dillow 	u32 table_len;
14078f26c9ffSDavid Dillow 	u8 fmt;
1408aef9ec39SRoland Dreier 
1409bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1410aef9ec39SRoland Dreier 		return sizeof (struct srp_cmd);
1411aef9ec39SRoland Dreier 
1412aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1413aef9ec39SRoland Dreier 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
14147aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
14157aa54bd7SDavid Dillow 			     PFX "Unhandled data direction %d\n",
1416aef9ec39SRoland Dreier 			     scmnd->sc_data_direction);
1417aef9ec39SRoland Dreier 		return -EINVAL;
1418aef9ec39SRoland Dreier 	}
1419aef9ec39SRoland Dreier 
1420bb350d1dSFUJITA Tomonori 	nents = scsi_sg_count(scmnd);
1421bb350d1dSFUJITA Tomonori 	scat  = scsi_sglist(scmnd);
1422aef9ec39SRoland Dreier 
142305321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev;
142485507bccSRalph Campbell 	ibdev = dev->dev;
142585507bccSRalph Campbell 
142685507bccSRalph Campbell 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
14278f26c9ffSDavid Dillow 	if (unlikely(count == 0))
14288f26c9ffSDavid Dillow 		return -EIO;
1429aef9ec39SRoland Dreier 
1430aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_DIRECT;
1431f5358a17SRoland Dreier 	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
1432f5358a17SRoland Dreier 
1433b1b8854dSBart Van Assche 	if (count == 1 && !register_always) {
1434f5358a17SRoland Dreier 		/*
1435f5358a17SRoland Dreier 		 * The midlayer only generated a single gather/scatter
1436f5358a17SRoland Dreier 		 * entry, or DMA mapping coalesced everything to a
1437f5358a17SRoland Dreier 		 * single entry.  So a direct descriptor along with
1438f5358a17SRoland Dreier 		 * the DMA MR suffices.
1439f5358a17SRoland Dreier 		 */
1440f5358a17SRoland Dreier 		struct srp_direct_buf *buf = (void *) cmd->add_data;
1441aef9ec39SRoland Dreier 
144285507bccSRalph Campbell 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
14439af76271SDavid Dillow 		buf->key = cpu_to_be32(target->rkey);
144485507bccSRalph Campbell 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
14458f26c9ffSDavid Dillow 
144652ede08fSBart Van Assche 		req->nmdesc = 0;
14478f26c9ffSDavid Dillow 		goto map_complete;
14488f26c9ffSDavid Dillow 	}
14498f26c9ffSDavid Dillow 
14505cfb1782SBart Van Assche 	/*
14515cfb1782SBart Van Assche 	 * We have more than one scatter/gather entry, so build our indirect
14525cfb1782SBart Van Assche 	 * descriptor table, trying to merge as many entries as we can.
1453f5358a17SRoland Dreier 	 */
14548f26c9ffSDavid Dillow 	indirect_hdr = (void *) cmd->add_data;
14558f26c9ffSDavid Dillow 
1456c07d424dSDavid Dillow 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1457c07d424dSDavid Dillow 				   target->indirect_size, DMA_TO_DEVICE);
1458c07d424dSDavid Dillow 
14598f26c9ffSDavid Dillow 	memset(&state, 0, sizeof(state));
14605cfb1782SBart Van Assche 	srp_map_sg(&state, target, req, scat, count);
14618f26c9ffSDavid Dillow 
1462c07d424dSDavid Dillow 	/* We've mapped the request, now pull as much of the indirect
1463c07d424dSDavid Dillow 	 * descriptor table as we can into the command buffer. If this
1464c07d424dSDavid Dillow 	 * target is not using an external indirect table, we are
1465c07d424dSDavid Dillow 	 * guaranteed to fit into the command, as the SCSI layer won't
1466c07d424dSDavid Dillow 	 * give us more S/G entries than we allow.
14678f26c9ffSDavid Dillow 	 */
14688f26c9ffSDavid Dillow 	if (state.ndesc == 1) {
14695cfb1782SBart Van Assche 		/*
14705cfb1782SBart Van Assche 		 * Memory registration collapsed the sg-list into one entry,
14718f26c9ffSDavid Dillow 		 * so use a direct descriptor.
14728f26c9ffSDavid Dillow 		 */
14738f26c9ffSDavid Dillow 		struct srp_direct_buf *buf = (void *) cmd->add_data;
14748f26c9ffSDavid Dillow 
1475c07d424dSDavid Dillow 		*buf = req->indirect_desc[0];
14768f26c9ffSDavid Dillow 		goto map_complete;
14778f26c9ffSDavid Dillow 	}
14788f26c9ffSDavid Dillow 
1479c07d424dSDavid Dillow 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1480c07d424dSDavid Dillow 						!target->allow_ext_sg)) {
1481c07d424dSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
1482c07d424dSDavid Dillow 			     "Could not fit S/G list into SRP_CMD\n");
1483c07d424dSDavid Dillow 		return -EIO;
1484c07d424dSDavid Dillow 	}
1485c07d424dSDavid Dillow 
1486c07d424dSDavid Dillow 	count = min(state.ndesc, target->cmd_sg_cnt);
14878f26c9ffSDavid Dillow 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1488aef9ec39SRoland Dreier 
1489aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_INDIRECT;
14908f26c9ffSDavid Dillow 	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1491c07d424dSDavid Dillow 	len += count * sizeof (struct srp_direct_buf);
1492f5358a17SRoland Dreier 
1493c07d424dSDavid Dillow 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1494c07d424dSDavid Dillow 	       count * sizeof (struct srp_direct_buf));
149585507bccSRalph Campbell 
1496c07d424dSDavid Dillow 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
14978f26c9ffSDavid Dillow 	indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
14988f26c9ffSDavid Dillow 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
14998f26c9ffSDavid Dillow 	indirect_hdr->len = cpu_to_be32(state.total_len);
1500aef9ec39SRoland Dreier 
1501aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1502c07d424dSDavid Dillow 		cmd->data_out_desc_cnt = count;
1503aef9ec39SRoland Dreier 	else
1504c07d424dSDavid Dillow 		cmd->data_in_desc_cnt = count;
1505c07d424dSDavid Dillow 
1506c07d424dSDavid Dillow 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1507c07d424dSDavid Dillow 				      DMA_TO_DEVICE);
1508aef9ec39SRoland Dreier 
15098f26c9ffSDavid Dillow map_complete:
1510aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1511aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt << 4;
1512aef9ec39SRoland Dreier 	else
1513aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt;
1514aef9ec39SRoland Dreier 
1515aef9ec39SRoland Dreier 	return len;
1516aef9ec39SRoland Dreier }
1517aef9ec39SRoland Dreier 
151805a1d750SDavid Dillow /*
151976c75b25SBart Van Assche  * Return an IU and possible credit to the free pool
152076c75b25SBart Van Assche  */
152176c75b25SBart Van Assche static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
152276c75b25SBart Van Assche 			  enum srp_iu_type iu_type)
152376c75b25SBart Van Assche {
152476c75b25SBart Van Assche 	unsigned long flags;
152576c75b25SBart Van Assche 
1526e9684678SBart Van Assche 	spin_lock_irqsave(&target->lock, flags);
152776c75b25SBart Van Assche 	list_add(&iu->list, &target->free_tx);
152876c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP)
152976c75b25SBart Van Assche 		++target->req_lim;
1530e9684678SBart Van Assche 	spin_unlock_irqrestore(&target->lock, flags);
153176c75b25SBart Van Assche }
153276c75b25SBart Van Assche 
153376c75b25SBart Van Assche /*
1534e9684678SBart Van Assche  * Must be called with target->lock held to protect req_lim and free_tx.
1535e9684678SBart Van Assche  * If IU is not sent, it must be returned using srp_put_tx_iu().
153605a1d750SDavid Dillow  *
153705a1d750SDavid Dillow  * Note:
153805a1d750SDavid Dillow  * An upper limit for the number of allocated information units for each
153905a1d750SDavid Dillow  * request type is:
154005a1d750SDavid Dillow  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
154105a1d750SDavid Dillow  *   more than Scsi_Host.can_queue requests.
154205a1d750SDavid Dillow  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
154305a1d750SDavid Dillow  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
154405a1d750SDavid Dillow  *   one unanswered SRP request to an initiator.
154505a1d750SDavid Dillow  */
154605a1d750SDavid Dillow static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
154705a1d750SDavid Dillow 				      enum srp_iu_type iu_type)
154805a1d750SDavid Dillow {
154905a1d750SDavid Dillow 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
155005a1d750SDavid Dillow 	struct srp_iu *iu;
155105a1d750SDavid Dillow 
155205a1d750SDavid Dillow 	srp_send_completion(target->send_cq, target);
155305a1d750SDavid Dillow 
1554dcb4cb85SBart Van Assche 	if (list_empty(&target->free_tx))
155505a1d750SDavid Dillow 		return NULL;
155605a1d750SDavid Dillow 
155705a1d750SDavid Dillow 	/* Initiator responses to target requests do not consume credits */
155876c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP) {
155976c75b25SBart Van Assche 		if (target->req_lim <= rsv) {
156005a1d750SDavid Dillow 			++target->zero_req_lim;
156105a1d750SDavid Dillow 			return NULL;
156205a1d750SDavid Dillow 		}
156305a1d750SDavid Dillow 
156476c75b25SBart Van Assche 		--target->req_lim;
156576c75b25SBart Van Assche 	}
156676c75b25SBart Van Assche 
1567dcb4cb85SBart Van Assche 	iu = list_first_entry(&target->free_tx, struct srp_iu, list);
156876c75b25SBart Van Assche 	list_del(&iu->list);
156905a1d750SDavid Dillow 	return iu;
157005a1d750SDavid Dillow }
157105a1d750SDavid Dillow 
157276c75b25SBart Van Assche static int srp_post_send(struct srp_target_port *target,
157305a1d750SDavid Dillow 			 struct srp_iu *iu, int len)
157405a1d750SDavid Dillow {
157505a1d750SDavid Dillow 	struct ib_sge list;
157605a1d750SDavid Dillow 	struct ib_send_wr wr, *bad_wr;
157705a1d750SDavid Dillow 
157805a1d750SDavid Dillow 	list.addr   = iu->dma;
157905a1d750SDavid Dillow 	list.length = len;
15809af76271SDavid Dillow 	list.lkey   = target->lkey;
158105a1d750SDavid Dillow 
158205a1d750SDavid Dillow 	wr.next       = NULL;
1583dcb4cb85SBart Van Assche 	wr.wr_id      = (uintptr_t) iu;
158405a1d750SDavid Dillow 	wr.sg_list    = &list;
158505a1d750SDavid Dillow 	wr.num_sge    = 1;
158605a1d750SDavid Dillow 	wr.opcode     = IB_WR_SEND;
158705a1d750SDavid Dillow 	wr.send_flags = IB_SEND_SIGNALED;
158805a1d750SDavid Dillow 
158976c75b25SBart Van Assche 	return ib_post_send(target->qp, &wr, &bad_wr);
159005a1d750SDavid Dillow }
159105a1d750SDavid Dillow 
1592dcb4cb85SBart Van Assche static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
1593c996bb47SBart Van Assche {
1594c996bb47SBart Van Assche 	struct ib_recv_wr wr, *bad_wr;
1595dcb4cb85SBart Van Assche 	struct ib_sge list;
1596c996bb47SBart Van Assche 
1597c996bb47SBart Van Assche 	list.addr   = iu->dma;
1598c996bb47SBart Van Assche 	list.length = iu->size;
15999af76271SDavid Dillow 	list.lkey   = target->lkey;
1600c996bb47SBart Van Assche 
1601c996bb47SBart Van Assche 	wr.next     = NULL;
1602dcb4cb85SBart Van Assche 	wr.wr_id    = (uintptr_t) iu;
1603c996bb47SBart Van Assche 	wr.sg_list  = &list;
1604c996bb47SBart Van Assche 	wr.num_sge  = 1;
1605c996bb47SBart Van Assche 
1606dcb4cb85SBart Van Assche 	return ib_post_recv(target->qp, &wr, &bad_wr);
1607c996bb47SBart Van Assche }
1608c996bb47SBart Van Assche 
1609aef9ec39SRoland Dreier static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1610aef9ec39SRoland Dreier {
1611aef9ec39SRoland Dreier 	struct srp_request *req;
1612aef9ec39SRoland Dreier 	struct scsi_cmnd *scmnd;
1613aef9ec39SRoland Dreier 	unsigned long flags;
1614aef9ec39SRoland Dreier 
1615aef9ec39SRoland Dreier 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1616e9684678SBart Van Assche 		spin_lock_irqsave(&target->lock, flags);
161794a9174cSBart Van Assche 		target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1618e9684678SBart Van Assche 		spin_unlock_irqrestore(&target->lock, flags);
161994a9174cSBart Van Assche 
1620f8b6e31eSDavid Dillow 		target->tsk_mgmt_status = -1;
1621f8b6e31eSDavid Dillow 		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1622f8b6e31eSDavid Dillow 			target->tsk_mgmt_status = rsp->data[3];
1623f8b6e31eSDavid Dillow 		complete(&target->tsk_mgmt_done);
1624aef9ec39SRoland Dreier 	} else {
1625f8b6e31eSDavid Dillow 		req = &target->req_ring[rsp->tag];
1626b3fe628dSBart Van Assche 		scmnd = srp_claim_req(target, req, NULL, NULL);
162722032991SBart Van Assche 		if (!scmnd) {
16287aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
16297aa54bd7SDavid Dillow 				     "Null scmnd for RSP w/tag %016llx\n",
1630aef9ec39SRoland Dreier 				     (unsigned long long) rsp->tag);
163122032991SBart Van Assche 
163222032991SBart Van Assche 			spin_lock_irqsave(&target->lock, flags);
163322032991SBart Van Assche 			target->req_lim += be32_to_cpu(rsp->req_lim_delta);
163422032991SBart Van Assche 			spin_unlock_irqrestore(&target->lock, flags);
163522032991SBart Van Assche 
163622032991SBart Van Assche 			return;
163722032991SBart Van Assche 		}
1638aef9ec39SRoland Dreier 		scmnd->result = rsp->status;
1639aef9ec39SRoland Dreier 
1640aef9ec39SRoland Dreier 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1641aef9ec39SRoland Dreier 			memcpy(scmnd->sense_buffer, rsp->data +
1642aef9ec39SRoland Dreier 			       be32_to_cpu(rsp->resp_data_len),
1643aef9ec39SRoland Dreier 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1644aef9ec39SRoland Dreier 				     SCSI_SENSE_BUFFERSIZE));
1645aef9ec39SRoland Dreier 		}
1646aef9ec39SRoland Dreier 
1647e714531aSBart Van Assche 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1648bb350d1dSFUJITA Tomonori 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1649e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1650e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1651e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1652e714531aSBart Van Assche 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1653e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1654e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1655aef9ec39SRoland Dreier 
165622032991SBart Van Assche 		srp_free_req(target, req, scmnd,
165722032991SBart Van Assche 			     be32_to_cpu(rsp->req_lim_delta));
165822032991SBart Van Assche 
1659f8b6e31eSDavid Dillow 		scmnd->host_scribble = NULL;
1660aef9ec39SRoland Dreier 		scmnd->scsi_done(scmnd);
1661aef9ec39SRoland Dreier 	}
1662aef9ec39SRoland Dreier }
1663aef9ec39SRoland Dreier 
1664bb12588aSDavid Dillow static int srp_response_common(struct srp_target_port *target, s32 req_delta,
1665bb12588aSDavid Dillow 			       void *rsp, int len)
1666bb12588aSDavid Dillow {
166776c75b25SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1668bb12588aSDavid Dillow 	unsigned long flags;
1669bb12588aSDavid Dillow 	struct srp_iu *iu;
167076c75b25SBart Van Assche 	int err;
1671bb12588aSDavid Dillow 
1672e9684678SBart Van Assche 	spin_lock_irqsave(&target->lock, flags);
1673bb12588aSDavid Dillow 	target->req_lim += req_delta;
1674bb12588aSDavid Dillow 	iu = __srp_get_tx_iu(target, SRP_IU_RSP);
1675e9684678SBart Van Assche 	spin_unlock_irqrestore(&target->lock, flags);
167676c75b25SBart Van Assche 
1677bb12588aSDavid Dillow 	if (!iu) {
1678bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1679bb12588aSDavid Dillow 			     "no IU available to send response\n");
168076c75b25SBart Van Assche 		return 1;
1681bb12588aSDavid Dillow 	}
1682bb12588aSDavid Dillow 
1683bb12588aSDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1684bb12588aSDavid Dillow 	memcpy(iu->buf, rsp, len);
1685bb12588aSDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1686bb12588aSDavid Dillow 
168776c75b25SBart Van Assche 	err = srp_post_send(target, iu, len);
168876c75b25SBart Van Assche 	if (err) {
1689bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1690bb12588aSDavid Dillow 			     "unable to post response: %d\n", err);
169176c75b25SBart Van Assche 		srp_put_tx_iu(target, iu, SRP_IU_RSP);
169276c75b25SBart Van Assche 	}
1693bb12588aSDavid Dillow 
1694bb12588aSDavid Dillow 	return err;
1695bb12588aSDavid Dillow }
1696bb12588aSDavid Dillow 
1697bb12588aSDavid Dillow static void srp_process_cred_req(struct srp_target_port *target,
1698bb12588aSDavid Dillow 				 struct srp_cred_req *req)
1699bb12588aSDavid Dillow {
1700bb12588aSDavid Dillow 	struct srp_cred_rsp rsp = {
1701bb12588aSDavid Dillow 		.opcode = SRP_CRED_RSP,
1702bb12588aSDavid Dillow 		.tag = req->tag,
1703bb12588aSDavid Dillow 	};
1704bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1705bb12588aSDavid Dillow 
1706bb12588aSDavid Dillow 	if (srp_response_common(target, delta, &rsp, sizeof rsp))
1707bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1708bb12588aSDavid Dillow 			     "problems processing SRP_CRED_REQ\n");
1709bb12588aSDavid Dillow }
1710bb12588aSDavid Dillow 
1711bb12588aSDavid Dillow static void srp_process_aer_req(struct srp_target_port *target,
1712bb12588aSDavid Dillow 				struct srp_aer_req *req)
1713bb12588aSDavid Dillow {
1714bb12588aSDavid Dillow 	struct srp_aer_rsp rsp = {
1715bb12588aSDavid Dillow 		.opcode = SRP_AER_RSP,
1716bb12588aSDavid Dillow 		.tag = req->tag,
1717bb12588aSDavid Dillow 	};
1718bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1719bb12588aSDavid Dillow 
1720bb12588aSDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX
1721bb12588aSDavid Dillow 		     "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1722bb12588aSDavid Dillow 
1723bb12588aSDavid Dillow 	if (srp_response_common(target, delta, &rsp, sizeof rsp))
1724bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1725bb12588aSDavid Dillow 			     "problems processing SRP_AER_REQ\n");
1726bb12588aSDavid Dillow }
1727bb12588aSDavid Dillow 
1728aef9ec39SRoland Dreier static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1729aef9ec39SRoland Dreier {
1730dcb4cb85SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1731737b94ebSRoland Dreier 	struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1732c996bb47SBart Van Assche 	int res;
1733aef9ec39SRoland Dreier 	u8 opcode;
1734aef9ec39SRoland Dreier 
173585507bccSRalph Campbell 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
173685507bccSRalph Campbell 				   DMA_FROM_DEVICE);
1737aef9ec39SRoland Dreier 
1738aef9ec39SRoland Dreier 	opcode = *(u8 *) iu->buf;
1739aef9ec39SRoland Dreier 
1740aef9ec39SRoland Dreier 	if (0) {
17417aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
17427aa54bd7SDavid Dillow 			     PFX "recv completion, opcode 0x%02x\n", opcode);
17437a700811SBart Van Assche 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
17447a700811SBart Van Assche 			       iu->buf, wc->byte_len, true);
1745aef9ec39SRoland Dreier 	}
1746aef9ec39SRoland Dreier 
1747aef9ec39SRoland Dreier 	switch (opcode) {
1748aef9ec39SRoland Dreier 	case SRP_RSP:
1749aef9ec39SRoland Dreier 		srp_process_rsp(target, iu->buf);
1750aef9ec39SRoland Dreier 		break;
1751aef9ec39SRoland Dreier 
1752bb12588aSDavid Dillow 	case SRP_CRED_REQ:
1753bb12588aSDavid Dillow 		srp_process_cred_req(target, iu->buf);
1754bb12588aSDavid Dillow 		break;
1755bb12588aSDavid Dillow 
1756bb12588aSDavid Dillow 	case SRP_AER_REQ:
1757bb12588aSDavid Dillow 		srp_process_aer_req(target, iu->buf);
1758bb12588aSDavid Dillow 		break;
1759bb12588aSDavid Dillow 
1760aef9ec39SRoland Dreier 	case SRP_T_LOGOUT:
1761aef9ec39SRoland Dreier 		/* XXX Handle target logout */
17627aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
17637aa54bd7SDavid Dillow 			     PFX "Got target logout request\n");
1764aef9ec39SRoland Dreier 		break;
1765aef9ec39SRoland Dreier 
1766aef9ec39SRoland Dreier 	default:
17677aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
17687aa54bd7SDavid Dillow 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1769aef9ec39SRoland Dreier 		break;
1770aef9ec39SRoland Dreier 	}
1771aef9ec39SRoland Dreier 
177285507bccSRalph Campbell 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
177385507bccSRalph Campbell 				      DMA_FROM_DEVICE);
1774c996bb47SBart Van Assche 
1775dcb4cb85SBart Van Assche 	res = srp_post_recv(target, iu);
1776c996bb47SBart Van Assche 	if (res != 0)
1777c996bb47SBart Van Assche 		shost_printk(KERN_ERR, target->scsi_host,
1778c996bb47SBart Van Assche 			     PFX "Recv failed with error code %d\n", res);
1779aef9ec39SRoland Dreier }
1780aef9ec39SRoland Dreier 
1781c1120f89SBart Van Assche /**
1782c1120f89SBart Van Assche  * srp_tl_err_work() - handle a transport layer error
1783af24663bSBart Van Assche  * @work: Work structure embedded in an SRP target port.
1784c1120f89SBart Van Assche  *
1785c1120f89SBart Van Assche  * Note: This function may get invoked before the rport has been created,
1786c1120f89SBart Van Assche  * hence the target->rport test.
1787c1120f89SBart Van Assche  */
1788c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work)
1789c1120f89SBart Van Assche {
1790c1120f89SBart Van Assche 	struct srp_target_port *target;
1791c1120f89SBart Van Assche 
1792c1120f89SBart Van Assche 	target = container_of(work, struct srp_target_port, tl_err_work);
1793c1120f89SBart Van Assche 	if (target->rport)
1794c1120f89SBart Van Assche 		srp_start_tl_fail_timers(target->rport);
1795c1120f89SBart Van Assche }
1796c1120f89SBart Van Assche 
17975cfb1782SBart Van Assche static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
17985cfb1782SBart Van Assche 			      bool send_err, struct srp_target_port *target)
1799948d1e88SBart Van Assche {
1800294c875aSBart Van Assche 	if (target->connected && !target->qp_in_error) {
18015cfb1782SBart Van Assche 		if (wr_id & LOCAL_INV_WR_ID_MASK) {
18025cfb1782SBart Van Assche 			shost_printk(KERN_ERR, target->scsi_host, PFX
18035cfb1782SBart Van Assche 				     "LOCAL_INV failed with status %d\n",
18044f0af697SBart Van Assche 				     wc_status);
18055cfb1782SBart Van Assche 		} else if (wr_id & FAST_REG_WR_ID_MASK) {
18065cfb1782SBart Van Assche 			shost_printk(KERN_ERR, target->scsi_host, PFX
18075cfb1782SBart Van Assche 				     "FAST_REG_MR failed status %d\n",
18085cfb1782SBart Van Assche 				     wc_status);
18095cfb1782SBart Van Assche 		} else {
18105cfb1782SBart Van Assche 			shost_printk(KERN_ERR, target->scsi_host,
18115cfb1782SBart Van Assche 				     PFX "failed %s status %d for iu %p\n",
18125cfb1782SBart Van Assche 				     send_err ? "send" : "receive",
18135cfb1782SBart Van Assche 				     wc_status, (void *)(uintptr_t)wr_id);
18145cfb1782SBart Van Assche 		}
1815c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
18164f0af697SBart Van Assche 	}
1817948d1e88SBart Van Assche 	target->qp_in_error = true;
1818948d1e88SBart Van Assche }
1819948d1e88SBart Van Assche 
18209c03dc9fSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1821aef9ec39SRoland Dreier {
1822aef9ec39SRoland Dreier 	struct srp_target_port *target = target_ptr;
1823aef9ec39SRoland Dreier 	struct ib_wc wc;
1824aef9ec39SRoland Dreier 
1825aef9ec39SRoland Dreier 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1826aef9ec39SRoland Dreier 	while (ib_poll_cq(cq, 1, &wc) > 0) {
1827948d1e88SBart Van Assche 		if (likely(wc.status == IB_WC_SUCCESS)) {
1828948d1e88SBart Van Assche 			srp_handle_recv(target, &wc);
1829948d1e88SBart Van Assche 		} else {
18305cfb1782SBart Van Assche 			srp_handle_qp_err(wc.wr_id, wc.status, false, target);
1831aef9ec39SRoland Dreier 		}
18329c03dc9fSBart Van Assche 	}
18339c03dc9fSBart Van Assche }
18349c03dc9fSBart Van Assche 
18359c03dc9fSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
18369c03dc9fSBart Van Assche {
18379c03dc9fSBart Van Assche 	struct srp_target_port *target = target_ptr;
18389c03dc9fSBart Van Assche 	struct ib_wc wc;
1839dcb4cb85SBart Van Assche 	struct srp_iu *iu;
18409c03dc9fSBart Van Assche 
18419c03dc9fSBart Van Assche 	while (ib_poll_cq(cq, 1, &wc) > 0) {
1842948d1e88SBart Van Assche 		if (likely(wc.status == IB_WC_SUCCESS)) {
1843737b94ebSRoland Dreier 			iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1844dcb4cb85SBart Van Assche 			list_add(&iu->list, &target->free_tx);
1845948d1e88SBart Van Assche 		} else {
18465cfb1782SBart Van Assche 			srp_handle_qp_err(wc.wr_id, wc.status, true, target);
1847948d1e88SBart Van Assche 		}
1848aef9ec39SRoland Dreier 	}
1849aef9ec39SRoland Dreier }
1850aef9ec39SRoland Dreier 
185176c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1852aef9ec39SRoland Dreier {
185376c75b25SBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
1854a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
1855aef9ec39SRoland Dreier 	struct srp_request *req;
1856aef9ec39SRoland Dreier 	struct srp_iu *iu;
1857aef9ec39SRoland Dreier 	struct srp_cmd *cmd;
185885507bccSRalph Campbell 	struct ib_device *dev;
185976c75b25SBart Van Assche 	unsigned long flags;
1860d1b4289eSBart Van Assche 	int len, ret;
1861a95cadb9SBart Van Assche 	const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1862a95cadb9SBart Van Assche 
1863a95cadb9SBart Van Assche 	/*
1864a95cadb9SBart Van Assche 	 * The SCSI EH thread is the only context from which srp_queuecommand()
1865a95cadb9SBart Van Assche 	 * can get invoked for blocked devices (SDEV_BLOCK /
1866a95cadb9SBart Van Assche 	 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1867a95cadb9SBart Van Assche 	 * locking the rport mutex if invoked from inside the SCSI EH.
1868a95cadb9SBart Van Assche 	 */
1869a95cadb9SBart Van Assche 	if (in_scsi_eh)
1870a95cadb9SBart Van Assche 		mutex_lock(&rport->mutex);
1871aef9ec39SRoland Dreier 
1872d1b4289eSBart Van Assche 	scmnd->result = srp_chkready(target->rport);
1873d1b4289eSBart Van Assche 	if (unlikely(scmnd->result))
1874d1b4289eSBart Van Assche 		goto err;
18752ce19e72SBart Van Assche 
1876e9684678SBart Van Assche 	spin_lock_irqsave(&target->lock, flags);
1877bb12588aSDavid Dillow 	iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1878aef9ec39SRoland Dreier 	if (!iu)
1879695b8349SBart Van Assche 		goto err_unlock;
1880695b8349SBart Van Assche 
1881695b8349SBart Van Assche 	req = list_first_entry(&target->free_reqs, struct srp_request, list);
1882695b8349SBart Van Assche 	list_del(&req->list);
1883695b8349SBart Van Assche 	spin_unlock_irqrestore(&target->lock, flags);
1884aef9ec39SRoland Dreier 
188505321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev->dev;
188649248644SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
188785507bccSRalph Campbell 				   DMA_TO_DEVICE);
1888aef9ec39SRoland Dreier 
1889f8b6e31eSDavid Dillow 	scmnd->host_scribble = (void *) req;
1890aef9ec39SRoland Dreier 
1891aef9ec39SRoland Dreier 	cmd = iu->buf;
1892aef9ec39SRoland Dreier 	memset(cmd, 0, sizeof *cmd);
1893aef9ec39SRoland Dreier 
1894aef9ec39SRoland Dreier 	cmd->opcode = SRP_CMD;
1895aef9ec39SRoland Dreier 	cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
1896d945e1dfSRoland Dreier 	cmd->tag    = req->index;
1897aef9ec39SRoland Dreier 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1898aef9ec39SRoland Dreier 
1899aef9ec39SRoland Dreier 	req->scmnd    = scmnd;
1900aef9ec39SRoland Dreier 	req->cmd      = iu;
1901aef9ec39SRoland Dreier 
1902aef9ec39SRoland Dreier 	len = srp_map_data(scmnd, target, req);
1903aef9ec39SRoland Dreier 	if (len < 0) {
19047aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
1905d1b4289eSBart Van Assche 			     PFX "Failed to map data (%d)\n", len);
1906d1b4289eSBart Van Assche 		/*
1907d1b4289eSBart Van Assche 		 * If we ran out of memory descriptors (-ENOMEM) because an
1908d1b4289eSBart Van Assche 		 * application is queuing many requests with more than
190952ede08fSBart Van Assche 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
1910d1b4289eSBart Van Assche 		 * to reduce queue depth temporarily.
1911d1b4289eSBart Van Assche 		 */
1912d1b4289eSBart Van Assche 		scmnd->result = len == -ENOMEM ?
1913d1b4289eSBart Van Assche 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
191476c75b25SBart Van Assche 		goto err_iu;
1915aef9ec39SRoland Dreier 	}
1916aef9ec39SRoland Dreier 
191749248644SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
191885507bccSRalph Campbell 				      DMA_TO_DEVICE);
1919aef9ec39SRoland Dreier 
192076c75b25SBart Van Assche 	if (srp_post_send(target, iu, len)) {
19217aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1922aef9ec39SRoland Dreier 		goto err_unmap;
1923aef9ec39SRoland Dreier 	}
1924aef9ec39SRoland Dreier 
1925d1b4289eSBart Van Assche 	ret = 0;
1926d1b4289eSBart Van Assche 
1927a95cadb9SBart Van Assche unlock_rport:
1928a95cadb9SBart Van Assche 	if (in_scsi_eh)
1929a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
1930a95cadb9SBart Van Assche 
1931d1b4289eSBart Van Assche 	return ret;
1932aef9ec39SRoland Dreier 
1933aef9ec39SRoland Dreier err_unmap:
1934aef9ec39SRoland Dreier 	srp_unmap_data(scmnd, target, req);
1935aef9ec39SRoland Dreier 
193676c75b25SBart Van Assche err_iu:
193776c75b25SBart Van Assche 	srp_put_tx_iu(target, iu, SRP_IU_CMD);
193876c75b25SBart Van Assche 
1939024ca901SBart Van Assche 	/*
1940024ca901SBart Van Assche 	 * Avoid that the loops that iterate over the request ring can
1941024ca901SBart Van Assche 	 * encounter a dangling SCSI command pointer.
1942024ca901SBart Van Assche 	 */
1943024ca901SBart Van Assche 	req->scmnd = NULL;
1944024ca901SBart Van Assche 
1945e9684678SBart Van Assche 	spin_lock_irqsave(&target->lock, flags);
194676c75b25SBart Van Assche 	list_add(&req->list, &target->free_reqs);
1947695b8349SBart Van Assche 
1948695b8349SBart Van Assche err_unlock:
1949e9684678SBart Van Assche 	spin_unlock_irqrestore(&target->lock, flags);
195076c75b25SBart Van Assche 
1951d1b4289eSBart Van Assche err:
1952d1b4289eSBart Van Assche 	if (scmnd->result) {
1953d1b4289eSBart Van Assche 		scmnd->scsi_done(scmnd);
1954d1b4289eSBart Van Assche 		ret = 0;
1955d1b4289eSBart Van Assche 	} else {
1956d1b4289eSBart Van Assche 		ret = SCSI_MLQUEUE_HOST_BUSY;
1957d1b4289eSBart Van Assche 	}
1958a95cadb9SBart Van Assche 
1959d1b4289eSBart Van Assche 	goto unlock_rport;
1960aef9ec39SRoland Dreier }
1961aef9ec39SRoland Dreier 
19624d73f95fSBart Van Assche /*
19634d73f95fSBart Van Assche  * Note: the resources allocated in this function are freed in
19644d73f95fSBart Van Assche  * srp_free_target_ib().
19654d73f95fSBart Van Assche  */
1966aef9ec39SRoland Dreier static int srp_alloc_iu_bufs(struct srp_target_port *target)
1967aef9ec39SRoland Dreier {
1968aef9ec39SRoland Dreier 	int i;
1969aef9ec39SRoland Dreier 
19704d73f95fSBart Van Assche 	target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
19714d73f95fSBart Van Assche 				  GFP_KERNEL);
19724d73f95fSBart Van Assche 	if (!target->rx_ring)
19734d73f95fSBart Van Assche 		goto err_no_ring;
19744d73f95fSBart Van Assche 	target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
19754d73f95fSBart Van Assche 				  GFP_KERNEL);
19764d73f95fSBart Van Assche 	if (!target->tx_ring)
19774d73f95fSBart Van Assche 		goto err_no_ring;
19784d73f95fSBart Van Assche 
19794d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
1980aef9ec39SRoland Dreier 		target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1981aef9ec39SRoland Dreier 						  target->max_ti_iu_len,
1982aef9ec39SRoland Dreier 						  GFP_KERNEL, DMA_FROM_DEVICE);
1983aef9ec39SRoland Dreier 		if (!target->rx_ring[i])
1984aef9ec39SRoland Dreier 			goto err;
1985aef9ec39SRoland Dreier 	}
1986aef9ec39SRoland Dreier 
19874d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
1988aef9ec39SRoland Dreier 		target->tx_ring[i] = srp_alloc_iu(target->srp_host,
198949248644SDavid Dillow 						  target->max_iu_len,
1990aef9ec39SRoland Dreier 						  GFP_KERNEL, DMA_TO_DEVICE);
1991aef9ec39SRoland Dreier 		if (!target->tx_ring[i])
1992aef9ec39SRoland Dreier 			goto err;
1993dcb4cb85SBart Van Assche 
1994dcb4cb85SBart Van Assche 		list_add(&target->tx_ring[i]->list, &target->free_tx);
1995aef9ec39SRoland Dreier 	}
1996aef9ec39SRoland Dreier 
1997aef9ec39SRoland Dreier 	return 0;
1998aef9ec39SRoland Dreier 
1999aef9ec39SRoland Dreier err:
20004d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2001aef9ec39SRoland Dreier 		srp_free_iu(target->srp_host, target->rx_ring[i]);
20024d73f95fSBart Van Assche 		srp_free_iu(target->srp_host, target->tx_ring[i]);
2003aef9ec39SRoland Dreier 	}
2004aef9ec39SRoland Dreier 
20054d73f95fSBart Van Assche 
20064d73f95fSBart Van Assche err_no_ring:
20074d73f95fSBart Van Assche 	kfree(target->tx_ring);
20084d73f95fSBart Van Assche 	target->tx_ring = NULL;
20094d73f95fSBart Van Assche 	kfree(target->rx_ring);
20104d73f95fSBart Van Assche 	target->rx_ring = NULL;
2011aef9ec39SRoland Dreier 
2012aef9ec39SRoland Dreier 	return -ENOMEM;
2013aef9ec39SRoland Dreier }
2014aef9ec39SRoland Dreier 
2015c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2016c9b03c1aSBart Van Assche {
2017c9b03c1aSBart Van Assche 	uint64_t T_tr_ns, max_compl_time_ms;
2018c9b03c1aSBart Van Assche 	uint32_t rq_tmo_jiffies;
2019c9b03c1aSBart Van Assche 
2020c9b03c1aSBart Van Assche 	/*
2021c9b03c1aSBart Van Assche 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2022c9b03c1aSBart Van Assche 	 * table 91), both the QP timeout and the retry count have to be set
2023c9b03c1aSBart Van Assche 	 * for RC QP's during the RTR to RTS transition.
2024c9b03c1aSBart Van Assche 	 */
2025c9b03c1aSBart Van Assche 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2026c9b03c1aSBart Van Assche 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2027c9b03c1aSBart Van Assche 
2028c9b03c1aSBart Van Assche 	/*
2029c9b03c1aSBart Van Assche 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2030c9b03c1aSBart Van Assche 	 * it can take before an error completion is generated. See also
2031c9b03c1aSBart Van Assche 	 * C9-140..142 in the IBTA spec for more information about how to
2032c9b03c1aSBart Van Assche 	 * convert the QP Local ACK Timeout value to nanoseconds.
2033c9b03c1aSBart Van Assche 	 */
2034c9b03c1aSBart Van Assche 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2035c9b03c1aSBart Van Assche 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2036c9b03c1aSBart Van Assche 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2037c9b03c1aSBart Van Assche 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2038c9b03c1aSBart Van Assche 
2039c9b03c1aSBart Van Assche 	return rq_tmo_jiffies;
2040c9b03c1aSBart Van Assche }
2041c9b03c1aSBart Van Assche 
2042961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2043961e0be8SDavid Dillow 			       struct srp_login_rsp *lrsp,
2044961e0be8SDavid Dillow 			       struct srp_target_port *target)
2045961e0be8SDavid Dillow {
2046961e0be8SDavid Dillow 	struct ib_qp_attr *qp_attr = NULL;
2047961e0be8SDavid Dillow 	int attr_mask = 0;
2048961e0be8SDavid Dillow 	int ret;
2049961e0be8SDavid Dillow 	int i;
2050961e0be8SDavid Dillow 
2051961e0be8SDavid Dillow 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2052961e0be8SDavid Dillow 		target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2053961e0be8SDavid Dillow 		target->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2054961e0be8SDavid Dillow 
2055961e0be8SDavid Dillow 		/*
2056961e0be8SDavid Dillow 		 * Reserve credits for task management so we don't
2057961e0be8SDavid Dillow 		 * bounce requests back to the SCSI mid-layer.
2058961e0be8SDavid Dillow 		 */
2059961e0be8SDavid Dillow 		target->scsi_host->can_queue
2060961e0be8SDavid Dillow 			= min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2061961e0be8SDavid Dillow 			      target->scsi_host->can_queue);
20624d73f95fSBart Van Assche 		target->scsi_host->cmd_per_lun
20634d73f95fSBart Van Assche 			= min_t(int, target->scsi_host->can_queue,
20644d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun);
2065961e0be8SDavid Dillow 	} else {
2066961e0be8SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
2067961e0be8SDavid Dillow 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2068961e0be8SDavid Dillow 		ret = -ECONNRESET;
2069961e0be8SDavid Dillow 		goto error;
2070961e0be8SDavid Dillow 	}
2071961e0be8SDavid Dillow 
20724d73f95fSBart Van Assche 	if (!target->rx_ring) {
2073961e0be8SDavid Dillow 		ret = srp_alloc_iu_bufs(target);
2074961e0be8SDavid Dillow 		if (ret)
2075961e0be8SDavid Dillow 			goto error;
2076961e0be8SDavid Dillow 	}
2077961e0be8SDavid Dillow 
2078961e0be8SDavid Dillow 	ret = -ENOMEM;
2079961e0be8SDavid Dillow 	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2080961e0be8SDavid Dillow 	if (!qp_attr)
2081961e0be8SDavid Dillow 		goto error;
2082961e0be8SDavid Dillow 
2083961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTR;
2084961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2085961e0be8SDavid Dillow 	if (ret)
2086961e0be8SDavid Dillow 		goto error_free;
2087961e0be8SDavid Dillow 
2088961e0be8SDavid Dillow 	ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
2089961e0be8SDavid Dillow 	if (ret)
2090961e0be8SDavid Dillow 		goto error_free;
2091961e0be8SDavid Dillow 
20924d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; i++) {
2093961e0be8SDavid Dillow 		struct srp_iu *iu = target->rx_ring[i];
2094961e0be8SDavid Dillow 		ret = srp_post_recv(target, iu);
2095961e0be8SDavid Dillow 		if (ret)
2096961e0be8SDavid Dillow 			goto error_free;
2097961e0be8SDavid Dillow 	}
2098961e0be8SDavid Dillow 
2099961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTS;
2100961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2101961e0be8SDavid Dillow 	if (ret)
2102961e0be8SDavid Dillow 		goto error_free;
2103961e0be8SDavid Dillow 
2104c9b03c1aSBart Van Assche 	target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2105c9b03c1aSBart Van Assche 
2106961e0be8SDavid Dillow 	ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
2107961e0be8SDavid Dillow 	if (ret)
2108961e0be8SDavid Dillow 		goto error_free;
2109961e0be8SDavid Dillow 
2110961e0be8SDavid Dillow 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
2111961e0be8SDavid Dillow 
2112961e0be8SDavid Dillow error_free:
2113961e0be8SDavid Dillow 	kfree(qp_attr);
2114961e0be8SDavid Dillow 
2115961e0be8SDavid Dillow error:
2116961e0be8SDavid Dillow 	target->status = ret;
2117961e0be8SDavid Dillow }
2118961e0be8SDavid Dillow 
2119aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2120aef9ec39SRoland Dreier 			       struct ib_cm_event *event,
2121aef9ec39SRoland Dreier 			       struct srp_target_port *target)
2122aef9ec39SRoland Dreier {
21237aa54bd7SDavid Dillow 	struct Scsi_Host *shost = target->scsi_host;
2124aef9ec39SRoland Dreier 	struct ib_class_port_info *cpi;
2125aef9ec39SRoland Dreier 	int opcode;
2126aef9ec39SRoland Dreier 
2127aef9ec39SRoland Dreier 	switch (event->param.rej_rcvd.reason) {
2128aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_CM_REDIRECT:
2129aef9ec39SRoland Dreier 		cpi = event->param.rej_rcvd.ari;
2130aef9ec39SRoland Dreier 		target->path.dlid = cpi->redirect_lid;
2131aef9ec39SRoland Dreier 		target->path.pkey = cpi->redirect_pkey;
2132aef9ec39SRoland Dreier 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2133aef9ec39SRoland Dreier 		memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
2134aef9ec39SRoland Dreier 
2135aef9ec39SRoland Dreier 		target->status = target->path.dlid ?
2136aef9ec39SRoland Dreier 			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2137aef9ec39SRoland Dreier 		break;
2138aef9ec39SRoland Dreier 
2139aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_REDIRECT:
21405d7cbfd6SRoland Dreier 		if (srp_target_is_topspin(target)) {
2141aef9ec39SRoland Dreier 			/*
2142aef9ec39SRoland Dreier 			 * Topspin/Cisco SRP gateways incorrectly send
2143aef9ec39SRoland Dreier 			 * reject reason code 25 when they mean 24
2144aef9ec39SRoland Dreier 			 * (port redirect).
2145aef9ec39SRoland Dreier 			 */
2146aef9ec39SRoland Dreier 			memcpy(target->path.dgid.raw,
2147aef9ec39SRoland Dreier 			       event->param.rej_rcvd.ari, 16);
2148aef9ec39SRoland Dreier 
21497aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, shost,
21507aa54bd7SDavid Dillow 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2151aef9ec39SRoland Dreier 				     (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
2152aef9ec39SRoland Dreier 				     (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
2153aef9ec39SRoland Dreier 
2154aef9ec39SRoland Dreier 			target->status = SRP_PORT_REDIRECT;
2155aef9ec39SRoland Dreier 		} else {
21567aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
21577aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2158aef9ec39SRoland Dreier 			target->status = -ECONNRESET;
2159aef9ec39SRoland Dreier 		}
2160aef9ec39SRoland Dreier 		break;
2161aef9ec39SRoland Dreier 
2162aef9ec39SRoland Dreier 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
21637aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost,
21647aa54bd7SDavid Dillow 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2165aef9ec39SRoland Dreier 		target->status = -ECONNRESET;
2166aef9ec39SRoland Dreier 		break;
2167aef9ec39SRoland Dreier 
2168aef9ec39SRoland Dreier 	case IB_CM_REJ_CONSUMER_DEFINED:
2169aef9ec39SRoland Dreier 		opcode = *(u8 *) event->private_data;
2170aef9ec39SRoland Dreier 		if (opcode == SRP_LOGIN_REJ) {
2171aef9ec39SRoland Dreier 			struct srp_login_rej *rej = event->private_data;
2172aef9ec39SRoland Dreier 			u32 reason = be32_to_cpu(rej->reason);
2173aef9ec39SRoland Dreier 
2174aef9ec39SRoland Dreier 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
21757aa54bd7SDavid Dillow 				shost_printk(KERN_WARNING, shost,
21767aa54bd7SDavid Dillow 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2177aef9ec39SRoland Dreier 			else
2178e7ffde01SBart Van Assche 				shost_printk(KERN_WARNING, shost, PFX
2179e7ffde01SBart Van Assche 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2180e7ffde01SBart Van Assche 					     target->path.sgid.raw,
2181e7ffde01SBart Van Assche 					     target->orig_dgid, reason);
2182aef9ec39SRoland Dreier 		} else
21837aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
21847aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2185aef9ec39SRoland Dreier 				     " opcode 0x%02x\n", opcode);
2186aef9ec39SRoland Dreier 		target->status = -ECONNRESET;
2187aef9ec39SRoland Dreier 		break;
2188aef9ec39SRoland Dreier 
21899fe4bcf4SDavid Dillow 	case IB_CM_REJ_STALE_CONN:
21909fe4bcf4SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
21919fe4bcf4SDavid Dillow 		target->status = SRP_STALE_CONN;
21929fe4bcf4SDavid Dillow 		break;
21939fe4bcf4SDavid Dillow 
2194aef9ec39SRoland Dreier 	default:
21957aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2196aef9ec39SRoland Dreier 			     event->param.rej_rcvd.reason);
2197aef9ec39SRoland Dreier 		target->status = -ECONNRESET;
2198aef9ec39SRoland Dreier 	}
2199aef9ec39SRoland Dreier }
2200aef9ec39SRoland Dreier 
2201aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2202aef9ec39SRoland Dreier {
2203aef9ec39SRoland Dreier 	struct srp_target_port *target = cm_id->context;
2204aef9ec39SRoland Dreier 	int comp = 0;
2205aef9ec39SRoland Dreier 
2206aef9ec39SRoland Dreier 	switch (event->event) {
2207aef9ec39SRoland Dreier 	case IB_CM_REQ_ERROR:
22087aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
22097aa54bd7SDavid Dillow 			     PFX "Sending CM REQ failed\n");
2210aef9ec39SRoland Dreier 		comp = 1;
2211aef9ec39SRoland Dreier 		target->status = -ECONNRESET;
2212aef9ec39SRoland Dreier 		break;
2213aef9ec39SRoland Dreier 
2214aef9ec39SRoland Dreier 	case IB_CM_REP_RECEIVED:
2215aef9ec39SRoland Dreier 		comp = 1;
2216961e0be8SDavid Dillow 		srp_cm_rep_handler(cm_id, event->private_data, target);
2217aef9ec39SRoland Dreier 		break;
2218aef9ec39SRoland Dreier 
2219aef9ec39SRoland Dreier 	case IB_CM_REJ_RECEIVED:
22207aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2221aef9ec39SRoland Dreier 		comp = 1;
2222aef9ec39SRoland Dreier 
2223aef9ec39SRoland Dreier 		srp_cm_rej_handler(cm_id, event, target);
2224aef9ec39SRoland Dreier 		break;
2225aef9ec39SRoland Dreier 
2226b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_RECEIVED:
22277aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
22287aa54bd7SDavid Dillow 			     PFX "DREQ received - connection closed\n");
2229294c875aSBart Van Assche 		srp_change_conn_state(target, false);
2230b7ac4ab4SIshai Rabinovitz 		if (ib_send_cm_drep(cm_id, NULL, 0))
22317aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
22327aa54bd7SDavid Dillow 				     PFX "Sending CM DREP failed\n");
2233c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
2234aef9ec39SRoland Dreier 		break;
2235aef9ec39SRoland Dreier 
2236aef9ec39SRoland Dreier 	case IB_CM_TIMEWAIT_EXIT:
22377aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
22387aa54bd7SDavid Dillow 			     PFX "connection closed\n");
2239ac72d766SBart Van Assche 		comp = 1;
2240aef9ec39SRoland Dreier 
2241aef9ec39SRoland Dreier 		target->status = 0;
2242aef9ec39SRoland Dreier 		break;
2243aef9ec39SRoland Dreier 
2244b7ac4ab4SIshai Rabinovitz 	case IB_CM_MRA_RECEIVED:
2245b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_ERROR:
2246b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREP_RECEIVED:
2247b7ac4ab4SIshai Rabinovitz 		break;
2248b7ac4ab4SIshai Rabinovitz 
2249aef9ec39SRoland Dreier 	default:
22507aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
22517aa54bd7SDavid Dillow 			     PFX "Unhandled CM event %d\n", event->event);
2252aef9ec39SRoland Dreier 		break;
2253aef9ec39SRoland Dreier 	}
2254aef9ec39SRoland Dreier 
2255aef9ec39SRoland Dreier 	if (comp)
2256aef9ec39SRoland Dreier 		complete(&target->done);
2257aef9ec39SRoland Dreier 
2258aef9ec39SRoland Dreier 	return 0;
2259aef9ec39SRoland Dreier }
2260aef9ec39SRoland Dreier 
226171444b97SJack Wang /**
226271444b97SJack Wang  * srp_change_queue_depth - setting device queue depth
226371444b97SJack Wang  * @sdev: scsi device struct
226471444b97SJack Wang  * @qdepth: requested queue depth
226571444b97SJack Wang  * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
226671444b97SJack Wang  * (see include/scsi/scsi_host.h for definition)
226771444b97SJack Wang  *
226871444b97SJack Wang  * Returns queue depth.
226971444b97SJack Wang  */
227071444b97SJack Wang static int
227171444b97SJack Wang srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
227271444b97SJack Wang {
227371444b97SJack Wang 	struct Scsi_Host *shost = sdev->host;
227471444b97SJack Wang 	int max_depth;
227571444b97SJack Wang 	if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
227671444b97SJack Wang 		max_depth = shost->can_queue;
227771444b97SJack Wang 		if (!sdev->tagged_supported)
227871444b97SJack Wang 			max_depth = 1;
227971444b97SJack Wang 		if (qdepth > max_depth)
228071444b97SJack Wang 			qdepth = max_depth;
2281*c8b09f6fSChristoph Hellwig 		scsi_adjust_queue_depth(sdev, qdepth);
228271444b97SJack Wang 	} else if (reason == SCSI_QDEPTH_QFULL)
228371444b97SJack Wang 		scsi_track_queue_full(sdev, qdepth);
228471444b97SJack Wang 	else
228571444b97SJack Wang 		return -EOPNOTSUPP;
228671444b97SJack Wang 
228771444b97SJack Wang 	return sdev->queue_depth;
228871444b97SJack Wang }
228971444b97SJack Wang 
2290d945e1dfSRoland Dreier static int srp_send_tsk_mgmt(struct srp_target_port *target,
2291f8b6e31eSDavid Dillow 			     u64 req_tag, unsigned int lun, u8 func)
2292aef9ec39SRoland Dreier {
2293a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
229419081f31SDavid Dillow 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2295aef9ec39SRoland Dreier 	struct srp_iu *iu;
2296aef9ec39SRoland Dreier 	struct srp_tsk_mgmt *tsk_mgmt;
2297aef9ec39SRoland Dreier 
22983780d1f0SBart Van Assche 	if (!target->connected || target->qp_in_error)
22993780d1f0SBart Van Assche 		return -1;
23003780d1f0SBart Van Assche 
2301f8b6e31eSDavid Dillow 	init_completion(&target->tsk_mgmt_done);
2302aef9ec39SRoland Dreier 
2303a95cadb9SBart Van Assche 	/*
2304a95cadb9SBart Van Assche 	 * Lock the rport mutex to avoid that srp_create_target_ib() is
2305a95cadb9SBart Van Assche 	 * invoked while a task management function is being sent.
2306a95cadb9SBart Van Assche 	 */
2307a95cadb9SBart Van Assche 	mutex_lock(&rport->mutex);
2308e9684678SBart Van Assche 	spin_lock_irq(&target->lock);
2309bb12588aSDavid Dillow 	iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
2310e9684678SBart Van Assche 	spin_unlock_irq(&target->lock);
231176c75b25SBart Van Assche 
2312a95cadb9SBart Van Assche 	if (!iu) {
2313a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2314a95cadb9SBart Van Assche 
231576c75b25SBart Van Assche 		return -1;
2316a95cadb9SBart Van Assche 	}
2317aef9ec39SRoland Dreier 
231819081f31SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
231919081f31SDavid Dillow 				   DMA_TO_DEVICE);
2320aef9ec39SRoland Dreier 	tsk_mgmt = iu->buf;
2321aef9ec39SRoland Dreier 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2322aef9ec39SRoland Dreier 
2323aef9ec39SRoland Dreier 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2324f8b6e31eSDavid Dillow 	tsk_mgmt->lun		= cpu_to_be64((u64) lun << 48);
2325f8b6e31eSDavid Dillow 	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
2326aef9ec39SRoland Dreier 	tsk_mgmt->tsk_mgmt_func = func;
2327f8b6e31eSDavid Dillow 	tsk_mgmt->task_tag	= req_tag;
2328aef9ec39SRoland Dreier 
232919081f31SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
233019081f31SDavid Dillow 				      DMA_TO_DEVICE);
233176c75b25SBart Van Assche 	if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
233276c75b25SBart Van Assche 		srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
2333a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2334a95cadb9SBart Van Assche 
233576c75b25SBart Van Assche 		return -1;
233676c75b25SBart Van Assche 	}
2337a95cadb9SBart Van Assche 	mutex_unlock(&rport->mutex);
2338d945e1dfSRoland Dreier 
2339f8b6e31eSDavid Dillow 	if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
2340aef9ec39SRoland Dreier 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2341d945e1dfSRoland Dreier 		return -1;
2342aef9ec39SRoland Dreier 
2343d945e1dfSRoland Dreier 	return 0;
2344d945e1dfSRoland Dreier }
2345d945e1dfSRoland Dreier 
2346aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd)
2347aef9ec39SRoland Dreier {
2348d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2349f8b6e31eSDavid Dillow 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2350086f44f5SBart Van Assche 	int ret;
2351d945e1dfSRoland Dreier 
23527aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2353aef9ec39SRoland Dreier 
2354b3fe628dSBart Van Assche 	if (!req || !srp_claim_req(target, req, NULL, scmnd))
235599b6697aSBart Van Assche 		return SUCCESS;
2356086f44f5SBart Van Assche 	if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
235780d5e8a2SBart Van Assche 			      SRP_TSK_ABORT_TASK) == 0)
2358086f44f5SBart Van Assche 		ret = SUCCESS;
2359ed9b2264SBart Van Assche 	else if (target->rport->state == SRP_RPORT_LOST)
236099e1c139SBart Van Assche 		ret = FAST_IO_FAIL;
2361086f44f5SBart Van Assche 	else
2362086f44f5SBart Van Assche 		ret = FAILED;
236322032991SBart Van Assche 	srp_free_req(target, req, scmnd, 0);
2364d945e1dfSRoland Dreier 	scmnd->result = DID_ABORT << 16;
2365d8536670SBart Van Assche 	scmnd->scsi_done(scmnd);
2366d945e1dfSRoland Dreier 
2367086f44f5SBart Van Assche 	return ret;
2368aef9ec39SRoland Dreier }
2369aef9ec39SRoland Dreier 
2370aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd)
2371aef9ec39SRoland Dreier {
2372d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2373536ae14eSBart Van Assche 	int i;
2374d945e1dfSRoland Dreier 
23757aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2376aef9ec39SRoland Dreier 
2377f8b6e31eSDavid Dillow 	if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
2378f8b6e31eSDavid Dillow 			      SRP_TSK_LUN_RESET))
2379d945e1dfSRoland Dreier 		return FAILED;
2380f8b6e31eSDavid Dillow 	if (target->tsk_mgmt_status)
2381d945e1dfSRoland Dreier 		return FAILED;
2382d945e1dfSRoland Dreier 
23834d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
2384536ae14eSBart Van Assche 		struct srp_request *req = &target->req_ring[i];
2385b3fe628dSBart Van Assche 		srp_finish_req(target, req, scmnd->device, DID_RESET << 16);
2386536ae14eSBart Van Assche 	}
2387d945e1dfSRoland Dreier 
2388d945e1dfSRoland Dreier 	return SUCCESS;
2389aef9ec39SRoland Dreier }
2390aef9ec39SRoland Dreier 
2391aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd)
2392aef9ec39SRoland Dreier {
2393aef9ec39SRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2394aef9ec39SRoland Dreier 
23957aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2396aef9ec39SRoland Dreier 
2397ed9b2264SBart Van Assche 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2398aef9ec39SRoland Dreier }
2399aef9ec39SRoland Dreier 
2400c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev)
2401c9b03c1aSBart Van Assche {
2402c9b03c1aSBart Van Assche 	struct Scsi_Host *shost = sdev->host;
2403c9b03c1aSBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2404c9b03c1aSBart Van Assche 	struct request_queue *q = sdev->request_queue;
2405c9b03c1aSBart Van Assche 	unsigned long timeout;
2406c9b03c1aSBart Van Assche 
2407c9b03c1aSBart Van Assche 	if (sdev->type == TYPE_DISK) {
2408c9b03c1aSBart Van Assche 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2409c9b03c1aSBart Van Assche 		blk_queue_rq_timeout(q, timeout);
2410c9b03c1aSBart Van Assche 	}
2411c9b03c1aSBart Van Assche 
2412c9b03c1aSBart Van Assche 	return 0;
2413c9b03c1aSBart Van Assche }
2414c9b03c1aSBart Van Assche 
2415ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2416ee959b00STony Jones 			   char *buf)
24176ecb0c84SRoland Dreier {
2418ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
24196ecb0c84SRoland Dreier 
24206ecb0c84SRoland Dreier 	return sprintf(buf, "0x%016llx\n",
24216ecb0c84SRoland Dreier 		       (unsigned long long) be64_to_cpu(target->id_ext));
24226ecb0c84SRoland Dreier }
24236ecb0c84SRoland Dreier 
2424ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2425ee959b00STony Jones 			     char *buf)
24266ecb0c84SRoland Dreier {
2427ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
24286ecb0c84SRoland Dreier 
24296ecb0c84SRoland Dreier 	return sprintf(buf, "0x%016llx\n",
24306ecb0c84SRoland Dreier 		       (unsigned long long) be64_to_cpu(target->ioc_guid));
24316ecb0c84SRoland Dreier }
24326ecb0c84SRoland Dreier 
2433ee959b00STony Jones static ssize_t show_service_id(struct device *dev,
2434ee959b00STony Jones 			       struct device_attribute *attr, char *buf)
24356ecb0c84SRoland Dreier {
2436ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
24376ecb0c84SRoland Dreier 
24386ecb0c84SRoland Dreier 	return sprintf(buf, "0x%016llx\n",
24396ecb0c84SRoland Dreier 		       (unsigned long long) be64_to_cpu(target->service_id));
24406ecb0c84SRoland Dreier }
24416ecb0c84SRoland Dreier 
2442ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2443ee959b00STony Jones 			 char *buf)
24446ecb0c84SRoland Dreier {
2445ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
24466ecb0c84SRoland Dreier 
24476ecb0c84SRoland Dreier 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
24486ecb0c84SRoland Dreier }
24496ecb0c84SRoland Dreier 
2450848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2451848b3082SBart Van Assche 			 char *buf)
2452848b3082SBart Van Assche {
2453848b3082SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2454848b3082SBart Van Assche 
2455848b3082SBart Van Assche 	return sprintf(buf, "%pI6\n", target->path.sgid.raw);
2456848b3082SBart Van Assche }
2457848b3082SBart Van Assche 
2458ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2459ee959b00STony Jones 			 char *buf)
24606ecb0c84SRoland Dreier {
2461ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
24626ecb0c84SRoland Dreier 
24635b095d98SHarvey Harrison 	return sprintf(buf, "%pI6\n", target->path.dgid.raw);
24646ecb0c84SRoland Dreier }
24656ecb0c84SRoland Dreier 
2466ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev,
2467ee959b00STony Jones 			      struct device_attribute *attr, char *buf)
24683633b3d0SIshai Rabinovitz {
2469ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
24703633b3d0SIshai Rabinovitz 
24715b095d98SHarvey Harrison 	return sprintf(buf, "%pI6\n", target->orig_dgid);
24723633b3d0SIshai Rabinovitz }
24733633b3d0SIshai Rabinovitz 
247489de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev,
247589de7486SBart Van Assche 			    struct device_attribute *attr, char *buf)
247689de7486SBart Van Assche {
247789de7486SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
247889de7486SBart Van Assche 
247989de7486SBart Van Assche 	return sprintf(buf, "%d\n", target->req_lim);
248089de7486SBart Van Assche }
248189de7486SBart Van Assche 
2482ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev,
2483ee959b00STony Jones 				 struct device_attribute *attr, char *buf)
24846bfa24faSRoland Dreier {
2485ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
24866bfa24faSRoland Dreier 
24876bfa24faSRoland Dreier 	return sprintf(buf, "%d\n", target->zero_req_lim);
24886bfa24faSRoland Dreier }
24896bfa24faSRoland Dreier 
2490ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev,
2491ee959b00STony Jones 				  struct device_attribute *attr, char *buf)
2492ded7f1a1SIshai Rabinovitz {
2493ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2494ded7f1a1SIshai Rabinovitz 
2495ded7f1a1SIshai Rabinovitz 	return sprintf(buf, "%d\n", target->srp_host->port);
2496ded7f1a1SIshai Rabinovitz }
2497ded7f1a1SIshai Rabinovitz 
2498ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev,
2499ee959b00STony Jones 				    struct device_attribute *attr, char *buf)
2500ded7f1a1SIshai Rabinovitz {
2501ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2502ded7f1a1SIshai Rabinovitz 
250305321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2504ded7f1a1SIshai Rabinovitz }
2505ded7f1a1SIshai Rabinovitz 
25064b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev,
25074b5e5f41SBart Van Assche 				struct device_attribute *attr, char *buf)
25084b5e5f41SBart Van Assche {
25094b5e5f41SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
25104b5e5f41SBart Van Assche 
25114b5e5f41SBart Van Assche 	return sprintf(buf, "%d\n", target->comp_vector);
25124b5e5f41SBart Van Assche }
25134b5e5f41SBart Van Assche 
25147bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev,
25157bb312e4SVu Pham 				   struct device_attribute *attr, char *buf)
25167bb312e4SVu Pham {
25177bb312e4SVu Pham 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
25187bb312e4SVu Pham 
25197bb312e4SVu Pham 	return sprintf(buf, "%d\n", target->tl_retry_count);
25207bb312e4SVu Pham }
25217bb312e4SVu Pham 
252249248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev,
252349248644SDavid Dillow 				   struct device_attribute *attr, char *buf)
252449248644SDavid Dillow {
252549248644SDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
252649248644SDavid Dillow 
252749248644SDavid Dillow 	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
252849248644SDavid Dillow }
252949248644SDavid Dillow 
2530c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev,
2531c07d424dSDavid Dillow 				 struct device_attribute *attr, char *buf)
2532c07d424dSDavid Dillow {
2533c07d424dSDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2534c07d424dSDavid Dillow 
2535c07d424dSDavid Dillow 	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2536c07d424dSDavid Dillow }
2537c07d424dSDavid Dillow 
2538ee959b00STony Jones static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
2539ee959b00STony Jones static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
2540ee959b00STony Jones static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
2541ee959b00STony Jones static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
2542848b3082SBart Van Assche static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
2543ee959b00STony Jones static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
2544ee959b00STony Jones static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
254589de7486SBart Van Assche static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2546ee959b00STony Jones static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
2547ee959b00STony Jones static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2548ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
25494b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
25507bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
255149248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2552c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
25536ecb0c84SRoland Dreier 
2554ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = {
2555ee959b00STony Jones 	&dev_attr_id_ext,
2556ee959b00STony Jones 	&dev_attr_ioc_guid,
2557ee959b00STony Jones 	&dev_attr_service_id,
2558ee959b00STony Jones 	&dev_attr_pkey,
2559848b3082SBart Van Assche 	&dev_attr_sgid,
2560ee959b00STony Jones 	&dev_attr_dgid,
2561ee959b00STony Jones 	&dev_attr_orig_dgid,
256289de7486SBart Van Assche 	&dev_attr_req_lim,
2563ee959b00STony Jones 	&dev_attr_zero_req_lim,
2564ee959b00STony Jones 	&dev_attr_local_ib_port,
2565ee959b00STony Jones 	&dev_attr_local_ib_device,
25664b5e5f41SBart Van Assche 	&dev_attr_comp_vector,
25677bb312e4SVu Pham 	&dev_attr_tl_retry_count,
256849248644SDavid Dillow 	&dev_attr_cmd_sg_entries,
2569c07d424dSDavid Dillow 	&dev_attr_allow_ext_sg,
25706ecb0c84SRoland Dreier 	NULL
25716ecb0c84SRoland Dreier };
25726ecb0c84SRoland Dreier 
2573aef9ec39SRoland Dreier static struct scsi_host_template srp_template = {
2574aef9ec39SRoland Dreier 	.module				= THIS_MODULE,
2575b7f008fdSRoland Dreier 	.name				= "InfiniBand SRP initiator",
2576b7f008fdSRoland Dreier 	.proc_name			= DRV_NAME,
2577c9b03c1aSBart Van Assche 	.slave_configure		= srp_slave_configure,
2578aef9ec39SRoland Dreier 	.info				= srp_target_info,
2579aef9ec39SRoland Dreier 	.queuecommand			= srp_queuecommand,
258071444b97SJack Wang 	.change_queue_depth             = srp_change_queue_depth,
2581a62182f3SChristoph Hellwig 	.change_queue_type              = scsi_change_queue_type,
2582aef9ec39SRoland Dreier 	.eh_abort_handler		= srp_abort,
2583aef9ec39SRoland Dreier 	.eh_device_reset_handler	= srp_reset_device,
2584aef9ec39SRoland Dreier 	.eh_host_reset_handler		= srp_reset_host,
25852742c1daSBart Van Assche 	.skip_settle_delay		= true,
258649248644SDavid Dillow 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
25874d73f95fSBart Van Assche 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
2588aef9ec39SRoland Dreier 	.this_id			= -1,
25894d73f95fSBart Van Assche 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
25906ecb0c84SRoland Dreier 	.use_clustering			= ENABLE_CLUSTERING,
25916ecb0c84SRoland Dreier 	.shost_attrs			= srp_host_attrs
2592aef9ec39SRoland Dreier };
2593aef9ec39SRoland Dreier 
2594aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2595aef9ec39SRoland Dreier {
25963236822bSFUJITA Tomonori 	struct srp_rport_identifiers ids;
25973236822bSFUJITA Tomonori 	struct srp_rport *rport;
25983236822bSFUJITA Tomonori 
2599aef9ec39SRoland Dreier 	sprintf(target->target_name, "SRP.T10:%016llX",
2600aef9ec39SRoland Dreier 		 (unsigned long long) be64_to_cpu(target->id_ext));
2601aef9ec39SRoland Dreier 
260205321937SGreg Kroah-Hartman 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2603aef9ec39SRoland Dreier 		return -ENODEV;
2604aef9ec39SRoland Dreier 
26053236822bSFUJITA Tomonori 	memcpy(ids.port_id, &target->id_ext, 8);
26063236822bSFUJITA Tomonori 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2607aebd5e47SFUJITA Tomonori 	ids.roles = SRP_RPORT_ROLE_TARGET;
26083236822bSFUJITA Tomonori 	rport = srp_rport_add(target->scsi_host, &ids);
26093236822bSFUJITA Tomonori 	if (IS_ERR(rport)) {
26103236822bSFUJITA Tomonori 		scsi_remove_host(target->scsi_host);
26113236822bSFUJITA Tomonori 		return PTR_ERR(rport);
26123236822bSFUJITA Tomonori 	}
26133236822bSFUJITA Tomonori 
2614dc1bdbd9SBart Van Assche 	rport->lld_data = target;
26159dd69a60SBart Van Assche 	target->rport = rport;
2616dc1bdbd9SBart Van Assche 
2617b3589fd4SMatthew Wilcox 	spin_lock(&host->target_lock);
2618aef9ec39SRoland Dreier 	list_add_tail(&target->list, &host->target_list);
2619b3589fd4SMatthew Wilcox 	spin_unlock(&host->target_lock);
2620aef9ec39SRoland Dreier 
2621aef9ec39SRoland Dreier 	target->state = SRP_TARGET_LIVE;
2622aef9ec39SRoland Dreier 
2623aef9ec39SRoland Dreier 	scsi_scan_target(&target->scsi_host->shost_gendev,
26241962a4a1SMatthew Wilcox 			 0, target->scsi_id, SCAN_WILD_CARD, 0);
2625aef9ec39SRoland Dreier 
2626aef9ec39SRoland Dreier 	return 0;
2627aef9ec39SRoland Dreier }
2628aef9ec39SRoland Dreier 
2629ee959b00STony Jones static void srp_release_dev(struct device *dev)
2630aef9ec39SRoland Dreier {
2631aef9ec39SRoland Dreier 	struct srp_host *host =
2632ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
2633aef9ec39SRoland Dreier 
2634aef9ec39SRoland Dreier 	complete(&host->released);
2635aef9ec39SRoland Dreier }
2636aef9ec39SRoland Dreier 
2637aef9ec39SRoland Dreier static struct class srp_class = {
2638aef9ec39SRoland Dreier 	.name    = "infiniband_srp",
2639ee959b00STony Jones 	.dev_release = srp_release_dev
2640aef9ec39SRoland Dreier };
2641aef9ec39SRoland Dreier 
264296fc248aSBart Van Assche /**
264396fc248aSBart Van Assche  * srp_conn_unique() - check whether the connection to a target is unique
2644af24663bSBart Van Assche  * @host:   SRP host.
2645af24663bSBart Van Assche  * @target: SRP target port.
264696fc248aSBart Van Assche  */
264796fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host,
264896fc248aSBart Van Assche 			    struct srp_target_port *target)
264996fc248aSBart Van Assche {
265096fc248aSBart Van Assche 	struct srp_target_port *t;
265196fc248aSBart Van Assche 	bool ret = false;
265296fc248aSBart Van Assche 
265396fc248aSBart Van Assche 	if (target->state == SRP_TARGET_REMOVED)
265496fc248aSBart Van Assche 		goto out;
265596fc248aSBart Van Assche 
265696fc248aSBart Van Assche 	ret = true;
265796fc248aSBart Van Assche 
265896fc248aSBart Van Assche 	spin_lock(&host->target_lock);
265996fc248aSBart Van Assche 	list_for_each_entry(t, &host->target_list, list) {
266096fc248aSBart Van Assche 		if (t != target &&
266196fc248aSBart Van Assche 		    target->id_ext == t->id_ext &&
266296fc248aSBart Van Assche 		    target->ioc_guid == t->ioc_guid &&
266396fc248aSBart Van Assche 		    target->initiator_ext == t->initiator_ext) {
266496fc248aSBart Van Assche 			ret = false;
266596fc248aSBart Van Assche 			break;
266696fc248aSBart Van Assche 		}
266796fc248aSBart Van Assche 	}
266896fc248aSBart Van Assche 	spin_unlock(&host->target_lock);
266996fc248aSBart Van Assche 
267096fc248aSBart Van Assche out:
267196fc248aSBart Van Assche 	return ret;
267296fc248aSBart Van Assche }
267396fc248aSBart Van Assche 
2674aef9ec39SRoland Dreier /*
2675aef9ec39SRoland Dreier  * Target ports are added by writing
2676aef9ec39SRoland Dreier  *
2677aef9ec39SRoland Dreier  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2678aef9ec39SRoland Dreier  *     pkey=<P_Key>,service_id=<service ID>
2679aef9ec39SRoland Dreier  *
2680aef9ec39SRoland Dreier  * to the add_target sysfs attribute.
2681aef9ec39SRoland Dreier  */
2682aef9ec39SRoland Dreier enum {
2683aef9ec39SRoland Dreier 	SRP_OPT_ERR		= 0,
2684aef9ec39SRoland Dreier 	SRP_OPT_ID_EXT		= 1 << 0,
2685aef9ec39SRoland Dreier 	SRP_OPT_IOC_GUID	= 1 << 1,
2686aef9ec39SRoland Dreier 	SRP_OPT_DGID		= 1 << 2,
2687aef9ec39SRoland Dreier 	SRP_OPT_PKEY		= 1 << 3,
2688aef9ec39SRoland Dreier 	SRP_OPT_SERVICE_ID	= 1 << 4,
2689aef9ec39SRoland Dreier 	SRP_OPT_MAX_SECT	= 1 << 5,
269052fb2b50SVu Pham 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
26910c0450dbSRamachandra K 	SRP_OPT_IO_CLASS	= 1 << 7,
269201cb9bcbSIshai Rabinovitz 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
269349248644SDavid Dillow 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
2694c07d424dSDavid Dillow 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
2695c07d424dSDavid Dillow 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
26964b5e5f41SBart Van Assche 	SRP_OPT_COMP_VECTOR	= 1 << 12,
26977bb312e4SVu Pham 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
26984d73f95fSBart Van Assche 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
2699aef9ec39SRoland Dreier 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
2700aef9ec39SRoland Dreier 				   SRP_OPT_IOC_GUID	|
2701aef9ec39SRoland Dreier 				   SRP_OPT_DGID		|
2702aef9ec39SRoland Dreier 				   SRP_OPT_PKEY		|
2703aef9ec39SRoland Dreier 				   SRP_OPT_SERVICE_ID),
2704aef9ec39SRoland Dreier };
2705aef9ec39SRoland Dreier 
2706a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = {
2707aef9ec39SRoland Dreier 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
2708aef9ec39SRoland Dreier 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
2709aef9ec39SRoland Dreier 	{ SRP_OPT_DGID,			"dgid=%s" 		},
2710aef9ec39SRoland Dreier 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
2711aef9ec39SRoland Dreier 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
2712aef9ec39SRoland Dreier 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
271352fb2b50SVu Pham 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
27140c0450dbSRamachandra K 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
271501cb9bcbSIshai Rabinovitz 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
271649248644SDavid Dillow 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
2717c07d424dSDavid Dillow 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
2718c07d424dSDavid Dillow 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
27194b5e5f41SBart Van Assche 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
27207bb312e4SVu Pham 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
27214d73f95fSBart Van Assche 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
2722aef9ec39SRoland Dreier 	{ SRP_OPT_ERR,			NULL 			}
2723aef9ec39SRoland Dreier };
2724aef9ec39SRoland Dreier 
2725aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target)
2726aef9ec39SRoland Dreier {
2727aef9ec39SRoland Dreier 	char *options, *sep_opt;
2728aef9ec39SRoland Dreier 	char *p;
2729aef9ec39SRoland Dreier 	char dgid[3];
2730aef9ec39SRoland Dreier 	substring_t args[MAX_OPT_ARGS];
2731aef9ec39SRoland Dreier 	int opt_mask = 0;
2732aef9ec39SRoland Dreier 	int token;
2733aef9ec39SRoland Dreier 	int ret = -EINVAL;
2734aef9ec39SRoland Dreier 	int i;
2735aef9ec39SRoland Dreier 
2736aef9ec39SRoland Dreier 	options = kstrdup(buf, GFP_KERNEL);
2737aef9ec39SRoland Dreier 	if (!options)
2738aef9ec39SRoland Dreier 		return -ENOMEM;
2739aef9ec39SRoland Dreier 
2740aef9ec39SRoland Dreier 	sep_opt = options;
2741aef9ec39SRoland Dreier 	while ((p = strsep(&sep_opt, ",")) != NULL) {
2742aef9ec39SRoland Dreier 		if (!*p)
2743aef9ec39SRoland Dreier 			continue;
2744aef9ec39SRoland Dreier 
2745aef9ec39SRoland Dreier 		token = match_token(p, srp_opt_tokens, args);
2746aef9ec39SRoland Dreier 		opt_mask |= token;
2747aef9ec39SRoland Dreier 
2748aef9ec39SRoland Dreier 		switch (token) {
2749aef9ec39SRoland Dreier 		case SRP_OPT_ID_EXT:
2750aef9ec39SRoland Dreier 			p = match_strdup(args);
2751a20f3a6dSIshai Rabinovitz 			if (!p) {
2752a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2753a20f3a6dSIshai Rabinovitz 				goto out;
2754a20f3a6dSIshai Rabinovitz 			}
2755aef9ec39SRoland Dreier 			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2756aef9ec39SRoland Dreier 			kfree(p);
2757aef9ec39SRoland Dreier 			break;
2758aef9ec39SRoland Dreier 
2759aef9ec39SRoland Dreier 		case SRP_OPT_IOC_GUID:
2760aef9ec39SRoland Dreier 			p = match_strdup(args);
2761a20f3a6dSIshai Rabinovitz 			if (!p) {
2762a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2763a20f3a6dSIshai Rabinovitz 				goto out;
2764a20f3a6dSIshai Rabinovitz 			}
2765aef9ec39SRoland Dreier 			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2766aef9ec39SRoland Dreier 			kfree(p);
2767aef9ec39SRoland Dreier 			break;
2768aef9ec39SRoland Dreier 
2769aef9ec39SRoland Dreier 		case SRP_OPT_DGID:
2770aef9ec39SRoland Dreier 			p = match_strdup(args);
2771a20f3a6dSIshai Rabinovitz 			if (!p) {
2772a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2773a20f3a6dSIshai Rabinovitz 				goto out;
2774a20f3a6dSIshai Rabinovitz 			}
2775aef9ec39SRoland Dreier 			if (strlen(p) != 32) {
2776e0bda7d8SBart Van Assche 				pr_warn("bad dest GID parameter '%s'\n", p);
2777ce1823f0SRoland Dreier 				kfree(p);
2778aef9ec39SRoland Dreier 				goto out;
2779aef9ec39SRoland Dreier 			}
2780aef9ec39SRoland Dreier 
2781aef9ec39SRoland Dreier 			for (i = 0; i < 16; ++i) {
2782aef9ec39SRoland Dreier 				strlcpy(dgid, p + i * 2, 3);
2783aef9ec39SRoland Dreier 				target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
2784aef9ec39SRoland Dreier 			}
2785bf17c1c7SRoland Dreier 			kfree(p);
27863633b3d0SIshai Rabinovitz 			memcpy(target->orig_dgid, target->path.dgid.raw, 16);
2787aef9ec39SRoland Dreier 			break;
2788aef9ec39SRoland Dreier 
2789aef9ec39SRoland Dreier 		case SRP_OPT_PKEY:
2790aef9ec39SRoland Dreier 			if (match_hex(args, &token)) {
2791e0bda7d8SBart Van Assche 				pr_warn("bad P_Key parameter '%s'\n", p);
2792aef9ec39SRoland Dreier 				goto out;
2793aef9ec39SRoland Dreier 			}
2794aef9ec39SRoland Dreier 			target->path.pkey = cpu_to_be16(token);
2795aef9ec39SRoland Dreier 			break;
2796aef9ec39SRoland Dreier 
2797aef9ec39SRoland Dreier 		case SRP_OPT_SERVICE_ID:
2798aef9ec39SRoland Dreier 			p = match_strdup(args);
2799a20f3a6dSIshai Rabinovitz 			if (!p) {
2800a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2801a20f3a6dSIshai Rabinovitz 				goto out;
2802a20f3a6dSIshai Rabinovitz 			}
2803aef9ec39SRoland Dreier 			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2804247e020eSSean Hefty 			target->path.service_id = target->service_id;
2805aef9ec39SRoland Dreier 			kfree(p);
2806aef9ec39SRoland Dreier 			break;
2807aef9ec39SRoland Dreier 
2808aef9ec39SRoland Dreier 		case SRP_OPT_MAX_SECT:
2809aef9ec39SRoland Dreier 			if (match_int(args, &token)) {
2810e0bda7d8SBart Van Assche 				pr_warn("bad max sect parameter '%s'\n", p);
2811aef9ec39SRoland Dreier 				goto out;
2812aef9ec39SRoland Dreier 			}
2813aef9ec39SRoland Dreier 			target->scsi_host->max_sectors = token;
2814aef9ec39SRoland Dreier 			break;
2815aef9ec39SRoland Dreier 
28164d73f95fSBart Van Assche 		case SRP_OPT_QUEUE_SIZE:
28174d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
28184d73f95fSBart Van Assche 				pr_warn("bad queue_size parameter '%s'\n", p);
28194d73f95fSBart Van Assche 				goto out;
28204d73f95fSBart Van Assche 			}
28214d73f95fSBart Van Assche 			target->scsi_host->can_queue = token;
28224d73f95fSBart Van Assche 			target->queue_size = token + SRP_RSP_SQ_SIZE +
28234d73f95fSBart Van Assche 					     SRP_TSK_MGMT_SQ_SIZE;
28244d73f95fSBart Van Assche 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
28254d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun = token;
28264d73f95fSBart Van Assche 			break;
28274d73f95fSBart Van Assche 
282852fb2b50SVu Pham 		case SRP_OPT_MAX_CMD_PER_LUN:
28294d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
2830e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
2831e0bda7d8SBart Van Assche 					p);
283252fb2b50SVu Pham 				goto out;
283352fb2b50SVu Pham 			}
28344d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun = token;
283552fb2b50SVu Pham 			break;
283652fb2b50SVu Pham 
28370c0450dbSRamachandra K 		case SRP_OPT_IO_CLASS:
28380c0450dbSRamachandra K 			if (match_hex(args, &token)) {
2839e0bda7d8SBart Van Assche 				pr_warn("bad IO class parameter '%s'\n", p);
28400c0450dbSRamachandra K 				goto out;
28410c0450dbSRamachandra K 			}
28420c0450dbSRamachandra K 			if (token != SRP_REV10_IB_IO_CLASS &&
28430c0450dbSRamachandra K 			    token != SRP_REV16A_IB_IO_CLASS) {
2844e0bda7d8SBart Van Assche 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2845e0bda7d8SBart Van Assche 					token, SRP_REV10_IB_IO_CLASS,
2846e0bda7d8SBart Van Assche 					SRP_REV16A_IB_IO_CLASS);
28470c0450dbSRamachandra K 				goto out;
28480c0450dbSRamachandra K 			}
28490c0450dbSRamachandra K 			target->io_class = token;
28500c0450dbSRamachandra K 			break;
28510c0450dbSRamachandra K 
285201cb9bcbSIshai Rabinovitz 		case SRP_OPT_INITIATOR_EXT:
285301cb9bcbSIshai Rabinovitz 			p = match_strdup(args);
2854a20f3a6dSIshai Rabinovitz 			if (!p) {
2855a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2856a20f3a6dSIshai Rabinovitz 				goto out;
2857a20f3a6dSIshai Rabinovitz 			}
285801cb9bcbSIshai Rabinovitz 			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
285901cb9bcbSIshai Rabinovitz 			kfree(p);
286001cb9bcbSIshai Rabinovitz 			break;
286101cb9bcbSIshai Rabinovitz 
286249248644SDavid Dillow 		case SRP_OPT_CMD_SG_ENTRIES:
286349248644SDavid Dillow 			if (match_int(args, &token) || token < 1 || token > 255) {
2864e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2865e0bda7d8SBart Van Assche 					p);
286649248644SDavid Dillow 				goto out;
286749248644SDavid Dillow 			}
286849248644SDavid Dillow 			target->cmd_sg_cnt = token;
286949248644SDavid Dillow 			break;
287049248644SDavid Dillow 
2871c07d424dSDavid Dillow 		case SRP_OPT_ALLOW_EXT_SG:
2872c07d424dSDavid Dillow 			if (match_int(args, &token)) {
2873e0bda7d8SBart Van Assche 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
2874c07d424dSDavid Dillow 				goto out;
2875c07d424dSDavid Dillow 			}
2876c07d424dSDavid Dillow 			target->allow_ext_sg = !!token;
2877c07d424dSDavid Dillow 			break;
2878c07d424dSDavid Dillow 
2879c07d424dSDavid Dillow 		case SRP_OPT_SG_TABLESIZE:
2880c07d424dSDavid Dillow 			if (match_int(args, &token) || token < 1 ||
2881c07d424dSDavid Dillow 					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2882e0bda7d8SBart Van Assche 				pr_warn("bad max sg_tablesize parameter '%s'\n",
2883e0bda7d8SBart Van Assche 					p);
2884c07d424dSDavid Dillow 				goto out;
2885c07d424dSDavid Dillow 			}
2886c07d424dSDavid Dillow 			target->sg_tablesize = token;
2887c07d424dSDavid Dillow 			break;
2888c07d424dSDavid Dillow 
28894b5e5f41SBart Van Assche 		case SRP_OPT_COMP_VECTOR:
28904b5e5f41SBart Van Assche 			if (match_int(args, &token) || token < 0) {
28914b5e5f41SBart Van Assche 				pr_warn("bad comp_vector parameter '%s'\n", p);
28924b5e5f41SBart Van Assche 				goto out;
28934b5e5f41SBart Van Assche 			}
28944b5e5f41SBart Van Assche 			target->comp_vector = token;
28954b5e5f41SBart Van Assche 			break;
28964b5e5f41SBart Van Assche 
28977bb312e4SVu Pham 		case SRP_OPT_TL_RETRY_COUNT:
28987bb312e4SVu Pham 			if (match_int(args, &token) || token < 2 || token > 7) {
28997bb312e4SVu Pham 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
29007bb312e4SVu Pham 					p);
29017bb312e4SVu Pham 				goto out;
29027bb312e4SVu Pham 			}
29037bb312e4SVu Pham 			target->tl_retry_count = token;
29047bb312e4SVu Pham 			break;
29057bb312e4SVu Pham 
2906aef9ec39SRoland Dreier 		default:
2907e0bda7d8SBart Van Assche 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2908e0bda7d8SBart Van Assche 				p);
2909aef9ec39SRoland Dreier 			goto out;
2910aef9ec39SRoland Dreier 		}
2911aef9ec39SRoland Dreier 	}
2912aef9ec39SRoland Dreier 
2913aef9ec39SRoland Dreier 	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
2914aef9ec39SRoland Dreier 		ret = 0;
2915aef9ec39SRoland Dreier 	else
2916aef9ec39SRoland Dreier 		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2917aef9ec39SRoland Dreier 			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2918aef9ec39SRoland Dreier 			    !(srp_opt_tokens[i].token & opt_mask))
2919e0bda7d8SBart Van Assche 				pr_warn("target creation request is missing parameter '%s'\n",
2920aef9ec39SRoland Dreier 					srp_opt_tokens[i].pattern);
2921aef9ec39SRoland Dreier 
29224d73f95fSBart Van Assche 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
29234d73f95fSBart Van Assche 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
29244d73f95fSBart Van Assche 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
29254d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun,
29264d73f95fSBart Van Assche 			target->scsi_host->can_queue);
29274d73f95fSBart Van Assche 
2928aef9ec39SRoland Dreier out:
2929aef9ec39SRoland Dreier 	kfree(options);
2930aef9ec39SRoland Dreier 	return ret;
2931aef9ec39SRoland Dreier }
2932aef9ec39SRoland Dreier 
2933ee959b00STony Jones static ssize_t srp_create_target(struct device *dev,
2934ee959b00STony Jones 				 struct device_attribute *attr,
2935aef9ec39SRoland Dreier 				 const char *buf, size_t count)
2936aef9ec39SRoland Dreier {
2937aef9ec39SRoland Dreier 	struct srp_host *host =
2938ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
2939aef9ec39SRoland Dreier 	struct Scsi_Host *target_host;
2940aef9ec39SRoland Dreier 	struct srp_target_port *target;
2941d1b4289eSBart Van Assche 	struct srp_device *srp_dev = host->srp_dev;
2942d1b4289eSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
2943b81d00bdSBart Van Assche 	int ret;
2944aef9ec39SRoland Dreier 
2945aef9ec39SRoland Dreier 	target_host = scsi_host_alloc(&srp_template,
2946aef9ec39SRoland Dreier 				      sizeof (struct srp_target_port));
2947aef9ec39SRoland Dreier 	if (!target_host)
2948aef9ec39SRoland Dreier 		return -ENOMEM;
2949aef9ec39SRoland Dreier 
29503236822bSFUJITA Tomonori 	target_host->transportt  = ib_srp_transport_template;
2951fd1b6c4aSBart Van Assche 	target_host->max_channel = 0;
2952fd1b6c4aSBart Van Assche 	target_host->max_id      = 1;
29535f068992SRoland Dreier 	target_host->max_lun     = SRP_MAX_LUN;
29543c8edf0eSArne Redlich 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
29555f068992SRoland Dreier 
2956aef9ec39SRoland Dreier 	target = host_to_target(target_host);
2957aef9ec39SRoland Dreier 
29580c0450dbSRamachandra K 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
2959aef9ec39SRoland Dreier 	target->scsi_host	= target_host;
2960aef9ec39SRoland Dreier 	target->srp_host	= host;
29619af76271SDavid Dillow 	target->lkey		= host->srp_dev->mr->lkey;
29629af76271SDavid Dillow 	target->rkey		= host->srp_dev->mr->rkey;
296349248644SDavid Dillow 	target->cmd_sg_cnt	= cmd_sg_entries;
2964c07d424dSDavid Dillow 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
2965c07d424dSDavid Dillow 	target->allow_ext_sg	= allow_ext_sg;
29667bb312e4SVu Pham 	target->tl_retry_count	= 7;
29674d73f95fSBart Van Assche 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
2968aef9ec39SRoland Dreier 
29692d7091bcSBart Van Assche 	mutex_lock(&host->add_target_mutex);
29702d7091bcSBart Van Assche 
2971aef9ec39SRoland Dreier 	ret = srp_parse_options(buf, target);
2972aef9ec39SRoland Dreier 	if (ret)
2973aef9ec39SRoland Dreier 		goto err;
2974aef9ec39SRoland Dreier 
29754d73f95fSBart Van Assche 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
29764d73f95fSBart Van Assche 
297796fc248aSBart Van Assche 	if (!srp_conn_unique(target->srp_host, target)) {
297896fc248aSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
297996fc248aSBart Van Assche 			     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
298096fc248aSBart Van Assche 			     be64_to_cpu(target->id_ext),
298196fc248aSBart Van Assche 			     be64_to_cpu(target->ioc_guid),
298296fc248aSBart Van Assche 			     be64_to_cpu(target->initiator_ext));
298396fc248aSBart Van Assche 		ret = -EEXIST;
298496fc248aSBart Van Assche 		goto err;
298596fc248aSBart Van Assche 	}
298696fc248aSBart Van Assche 
29875cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
2988c07d424dSDavid Dillow 	    target->cmd_sg_cnt < target->sg_tablesize) {
29895cfb1782SBart Van Assche 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2990c07d424dSDavid Dillow 		target->sg_tablesize = target->cmd_sg_cnt;
2991c07d424dSDavid Dillow 	}
2992c07d424dSDavid Dillow 
2993c07d424dSDavid Dillow 	target_host->sg_tablesize = target->sg_tablesize;
2994c07d424dSDavid Dillow 	target->indirect_size = target->sg_tablesize *
2995c07d424dSDavid Dillow 				sizeof (struct srp_direct_buf);
299649248644SDavid Dillow 	target->max_iu_len = sizeof (struct srp_cmd) +
299749248644SDavid Dillow 			     sizeof (struct srp_indirect_buf) +
299849248644SDavid Dillow 			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
299949248644SDavid Dillow 
3000c1120f89SBart Van Assche 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3001ef6c49d8SBart Van Assche 	INIT_WORK(&target->remove_work, srp_remove_work);
30028f26c9ffSDavid Dillow 	spin_lock_init(&target->lock);
30038f26c9ffSDavid Dillow 	INIT_LIST_HEAD(&target->free_tx);
3004b81d00bdSBart Van Assche 	ret = srp_alloc_req_data(target);
3005b81d00bdSBart Van Assche 	if (ret)
30068f26c9ffSDavid Dillow 		goto err_free_mem;
30078f26c9ffSDavid Dillow 
30082088ca66SSagi Grimberg 	ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
30092088ca66SSagi Grimberg 	if (ret)
30102088ca66SSagi Grimberg 		goto err_free_mem;
3011aef9ec39SRoland Dreier 
3012aef9ec39SRoland Dreier 	ret = srp_create_target_ib(target);
3013aef9ec39SRoland Dreier 	if (ret)
30148f26c9ffSDavid Dillow 		goto err_free_mem;
3015aef9ec39SRoland Dreier 
30169fe4bcf4SDavid Dillow 	ret = srp_new_cm_id(target);
30179fe4bcf4SDavid Dillow 	if (ret)
30188f26c9ffSDavid Dillow 		goto err_free_ib;
3019aef9ec39SRoland Dreier 
3020aef9ec39SRoland Dreier 	ret = srp_connect_target(target);
3021aef9ec39SRoland Dreier 	if (ret) {
30227aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
30237aa54bd7SDavid Dillow 			     PFX "Connection failed\n");
3024aef9ec39SRoland Dreier 		goto err_cm_id;
3025aef9ec39SRoland Dreier 	}
3026aef9ec39SRoland Dreier 
3027aef9ec39SRoland Dreier 	ret = srp_add_target(host, target);
3028aef9ec39SRoland Dreier 	if (ret)
3029aef9ec39SRoland Dreier 		goto err_disconnect;
3030aef9ec39SRoland Dreier 
3031e7ffde01SBart Van Assche 	shost_printk(KERN_DEBUG, target->scsi_host, PFX
3032e7ffde01SBart Van Assche 		     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3033e7ffde01SBart Van Assche 		     be64_to_cpu(target->id_ext),
3034e7ffde01SBart Van Assche 		     be64_to_cpu(target->ioc_guid),
3035e7ffde01SBart Van Assche 		     be16_to_cpu(target->path.pkey),
3036e7ffde01SBart Van Assche 		     be64_to_cpu(target->service_id),
3037e7ffde01SBart Van Assche 		     target->path.sgid.raw, target->path.dgid.raw);
3038e7ffde01SBart Van Assche 
30392d7091bcSBart Van Assche 	ret = count;
30402d7091bcSBart Van Assche 
30412d7091bcSBart Van Assche out:
30422d7091bcSBart Van Assche 	mutex_unlock(&host->add_target_mutex);
30432d7091bcSBart Van Assche 	return ret;
3044aef9ec39SRoland Dreier 
3045aef9ec39SRoland Dreier err_disconnect:
3046aef9ec39SRoland Dreier 	srp_disconnect_target(target);
3047aef9ec39SRoland Dreier 
3048aef9ec39SRoland Dreier err_cm_id:
3049aef9ec39SRoland Dreier 	ib_destroy_cm_id(target->cm_id);
3050aef9ec39SRoland Dreier 
30518f26c9ffSDavid Dillow err_free_ib:
3052aef9ec39SRoland Dreier 	srp_free_target_ib(target);
3053aef9ec39SRoland Dreier 
30548f26c9ffSDavid Dillow err_free_mem:
30558f26c9ffSDavid Dillow 	srp_free_req_data(target);
30568f26c9ffSDavid Dillow 
3057aef9ec39SRoland Dreier err:
3058aef9ec39SRoland Dreier 	scsi_host_put(target_host);
30592d7091bcSBart Van Assche 	goto out;
3060aef9ec39SRoland Dreier }
3061aef9ec39SRoland Dreier 
3062ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3063aef9ec39SRoland Dreier 
3064ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3065ee959b00STony Jones 			  char *buf)
3066aef9ec39SRoland Dreier {
3067ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3068aef9ec39SRoland Dreier 
306905321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3070aef9ec39SRoland Dreier }
3071aef9ec39SRoland Dreier 
3072ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3073aef9ec39SRoland Dreier 
3074ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3075ee959b00STony Jones 			 char *buf)
3076aef9ec39SRoland Dreier {
3077ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3078aef9ec39SRoland Dreier 
3079aef9ec39SRoland Dreier 	return sprintf(buf, "%d\n", host->port);
3080aef9ec39SRoland Dreier }
3081aef9ec39SRoland Dreier 
3082ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3083aef9ec39SRoland Dreier 
3084f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3085aef9ec39SRoland Dreier {
3086aef9ec39SRoland Dreier 	struct srp_host *host;
3087aef9ec39SRoland Dreier 
3088aef9ec39SRoland Dreier 	host = kzalloc(sizeof *host, GFP_KERNEL);
3089aef9ec39SRoland Dreier 	if (!host)
3090aef9ec39SRoland Dreier 		return NULL;
3091aef9ec39SRoland Dreier 
3092aef9ec39SRoland Dreier 	INIT_LIST_HEAD(&host->target_list);
3093b3589fd4SMatthew Wilcox 	spin_lock_init(&host->target_lock);
3094aef9ec39SRoland Dreier 	init_completion(&host->released);
30952d7091bcSBart Van Assche 	mutex_init(&host->add_target_mutex);
309605321937SGreg Kroah-Hartman 	host->srp_dev = device;
3097aef9ec39SRoland Dreier 	host->port = port;
3098aef9ec39SRoland Dreier 
3099ee959b00STony Jones 	host->dev.class = &srp_class;
3100ee959b00STony Jones 	host->dev.parent = device->dev->dma_device;
3101d927e38cSKay Sievers 	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3102aef9ec39SRoland Dreier 
3103ee959b00STony Jones 	if (device_register(&host->dev))
3104f5358a17SRoland Dreier 		goto free_host;
3105ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_add_target))
3106aef9ec39SRoland Dreier 		goto err_class;
3107ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3108aef9ec39SRoland Dreier 		goto err_class;
3109ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_port))
3110aef9ec39SRoland Dreier 		goto err_class;
3111aef9ec39SRoland Dreier 
3112aef9ec39SRoland Dreier 	return host;
3113aef9ec39SRoland Dreier 
3114aef9ec39SRoland Dreier err_class:
3115ee959b00STony Jones 	device_unregister(&host->dev);
3116aef9ec39SRoland Dreier 
3117f5358a17SRoland Dreier free_host:
3118aef9ec39SRoland Dreier 	kfree(host);
3119aef9ec39SRoland Dreier 
3120aef9ec39SRoland Dreier 	return NULL;
3121aef9ec39SRoland Dreier }
3122aef9ec39SRoland Dreier 
3123aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device)
3124aef9ec39SRoland Dreier {
3125f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3126f5358a17SRoland Dreier 	struct ib_device_attr *dev_attr;
3127aef9ec39SRoland Dreier 	struct srp_host *host;
312852ede08fSBart Van Assche 	int mr_page_shift, s, e, p;
312952ede08fSBart Van Assche 	u64 max_pages_per_mr;
3130aef9ec39SRoland Dreier 
3131f5358a17SRoland Dreier 	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3132f5358a17SRoland Dreier 	if (!dev_attr)
3133cf311cd4SSean Hefty 		return;
3134aef9ec39SRoland Dreier 
3135f5358a17SRoland Dreier 	if (ib_query_device(device, dev_attr)) {
3136e0bda7d8SBart Van Assche 		pr_warn("Query device failed for %s\n", device->name);
3137f5358a17SRoland Dreier 		goto free_attr;
3138f5358a17SRoland Dreier 	}
3139f5358a17SRoland Dreier 
3140f5358a17SRoland Dreier 	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3141f5358a17SRoland Dreier 	if (!srp_dev)
3142f5358a17SRoland Dreier 		goto free_attr;
3143f5358a17SRoland Dreier 
3144d1b4289eSBart Van Assche 	srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3145d1b4289eSBart Van Assche 			    device->map_phys_fmr && device->unmap_fmr);
31465cfb1782SBart Van Assche 	srp_dev->has_fr = (dev_attr->device_cap_flags &
31475cfb1782SBart Van Assche 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
31485cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr)
31495cfb1782SBart Van Assche 		dev_warn(&device->dev, "neither FMR nor FR is supported\n");
31505cfb1782SBart Van Assche 
31515cfb1782SBart Van Assche 	srp_dev->use_fast_reg = (srp_dev->has_fr &&
31525cfb1782SBart Van Assche 				 (!srp_dev->has_fmr || prefer_fr));
3153d1b4289eSBart Van Assche 
3154f5358a17SRoland Dreier 	/*
3155f5358a17SRoland Dreier 	 * Use the smallest page size supported by the HCA, down to a
31568f26c9ffSDavid Dillow 	 * minimum of 4096 bytes. We're unlikely to build large sglists
31578f26c9ffSDavid Dillow 	 * out of smaller entries.
3158f5358a17SRoland Dreier 	 */
315952ede08fSBart Van Assche 	mr_page_shift		= max(12, ffs(dev_attr->page_size_cap) - 1);
316052ede08fSBart Van Assche 	srp_dev->mr_page_size	= 1 << mr_page_shift;
316152ede08fSBart Van Assche 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
316252ede08fSBart Van Assche 	max_pages_per_mr	= dev_attr->max_mr_size;
316352ede08fSBart Van Assche 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
316452ede08fSBart Van Assche 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
316552ede08fSBart Van Assche 					  max_pages_per_mr);
31665cfb1782SBart Van Assche 	if (srp_dev->use_fast_reg) {
31675cfb1782SBart Van Assche 		srp_dev->max_pages_per_mr =
31685cfb1782SBart Van Assche 			min_t(u32, srp_dev->max_pages_per_mr,
31695cfb1782SBart Van Assche 			      dev_attr->max_fast_reg_page_list_len);
31705cfb1782SBart Van Assche 	}
317152ede08fSBart Van Assche 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
317252ede08fSBart Van Assche 				   srp_dev->max_pages_per_mr;
31735cfb1782SBart Van Assche 	pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
317452ede08fSBart Van Assche 		 device->name, mr_page_shift, dev_attr->max_mr_size,
31755cfb1782SBart Van Assche 		 dev_attr->max_fast_reg_page_list_len,
317652ede08fSBart Van Assche 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3177f5358a17SRoland Dreier 
3178f5358a17SRoland Dreier 	INIT_LIST_HEAD(&srp_dev->dev_list);
3179f5358a17SRoland Dreier 
3180f5358a17SRoland Dreier 	srp_dev->dev = device;
3181f5358a17SRoland Dreier 	srp_dev->pd  = ib_alloc_pd(device);
3182f5358a17SRoland Dreier 	if (IS_ERR(srp_dev->pd))
3183f5358a17SRoland Dreier 		goto free_dev;
3184f5358a17SRoland Dreier 
3185f5358a17SRoland Dreier 	srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3186f5358a17SRoland Dreier 				    IB_ACCESS_LOCAL_WRITE |
3187f5358a17SRoland Dreier 				    IB_ACCESS_REMOTE_READ |
3188f5358a17SRoland Dreier 				    IB_ACCESS_REMOTE_WRITE);
3189f5358a17SRoland Dreier 	if (IS_ERR(srp_dev->mr))
3190f5358a17SRoland Dreier 		goto err_pd;
3191f5358a17SRoland Dreier 
319207ebafbaSTom Tucker 	if (device->node_type == RDMA_NODE_IB_SWITCH) {
3193aef9ec39SRoland Dreier 		s = 0;
3194aef9ec39SRoland Dreier 		e = 0;
3195aef9ec39SRoland Dreier 	} else {
3196aef9ec39SRoland Dreier 		s = 1;
3197aef9ec39SRoland Dreier 		e = device->phys_port_cnt;
3198aef9ec39SRoland Dreier 	}
3199aef9ec39SRoland Dreier 
3200aef9ec39SRoland Dreier 	for (p = s; p <= e; ++p) {
3201f5358a17SRoland Dreier 		host = srp_add_port(srp_dev, p);
3202aef9ec39SRoland Dreier 		if (host)
3203f5358a17SRoland Dreier 			list_add_tail(&host->list, &srp_dev->dev_list);
3204aef9ec39SRoland Dreier 	}
3205aef9ec39SRoland Dreier 
3206f5358a17SRoland Dreier 	ib_set_client_data(device, &srp_client, srp_dev);
3207f5358a17SRoland Dreier 
3208f5358a17SRoland Dreier 	goto free_attr;
3209f5358a17SRoland Dreier 
3210f5358a17SRoland Dreier err_pd:
3211f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3212f5358a17SRoland Dreier 
3213f5358a17SRoland Dreier free_dev:
3214f5358a17SRoland Dreier 	kfree(srp_dev);
3215f5358a17SRoland Dreier 
3216f5358a17SRoland Dreier free_attr:
3217f5358a17SRoland Dreier 	kfree(dev_attr);
3218aef9ec39SRoland Dreier }
3219aef9ec39SRoland Dreier 
3220aef9ec39SRoland Dreier static void srp_remove_one(struct ib_device *device)
3221aef9ec39SRoland Dreier {
3222f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3223aef9ec39SRoland Dreier 	struct srp_host *host, *tmp_host;
3224ef6c49d8SBart Van Assche 	struct srp_target_port *target;
3225aef9ec39SRoland Dreier 
3226f5358a17SRoland Dreier 	srp_dev = ib_get_client_data(device, &srp_client);
32271fe0cb84SDotan Barak 	if (!srp_dev)
32281fe0cb84SDotan Barak 		return;
3229aef9ec39SRoland Dreier 
3230f5358a17SRoland Dreier 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3231ee959b00STony Jones 		device_unregister(&host->dev);
3232aef9ec39SRoland Dreier 		/*
3233aef9ec39SRoland Dreier 		 * Wait for the sysfs entry to go away, so that no new
3234aef9ec39SRoland Dreier 		 * target ports can be created.
3235aef9ec39SRoland Dreier 		 */
3236aef9ec39SRoland Dreier 		wait_for_completion(&host->released);
3237aef9ec39SRoland Dreier 
3238aef9ec39SRoland Dreier 		/*
3239ef6c49d8SBart Van Assche 		 * Remove all target ports.
3240aef9ec39SRoland Dreier 		 */
3241b3589fd4SMatthew Wilcox 		spin_lock(&host->target_lock);
3242ef6c49d8SBart Van Assche 		list_for_each_entry(target, &host->target_list, list)
3243ef6c49d8SBart Van Assche 			srp_queue_remove_work(target);
3244b3589fd4SMatthew Wilcox 		spin_unlock(&host->target_lock);
3245aef9ec39SRoland Dreier 
3246aef9ec39SRoland Dreier 		/*
3247bcc05910SBart Van Assche 		 * Wait for tl_err and target port removal tasks.
3248aef9ec39SRoland Dreier 		 */
3249ef6c49d8SBart Van Assche 		flush_workqueue(system_long_wq);
3250bcc05910SBart Van Assche 		flush_workqueue(srp_remove_wq);
3251aef9ec39SRoland Dreier 
3252aef9ec39SRoland Dreier 		kfree(host);
3253aef9ec39SRoland Dreier 	}
3254aef9ec39SRoland Dreier 
3255f5358a17SRoland Dreier 	ib_dereg_mr(srp_dev->mr);
3256f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3257f5358a17SRoland Dreier 
3258f5358a17SRoland Dreier 	kfree(srp_dev);
3259aef9ec39SRoland Dreier }
3260aef9ec39SRoland Dreier 
32613236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = {
3262ed9b2264SBart Van Assche 	.has_rport_state	 = true,
3263ed9b2264SBart Van Assche 	.reset_timer_if_blocked	 = true,
3264a95cadb9SBart Van Assche 	.reconnect_delay	 = &srp_reconnect_delay,
3265ed9b2264SBart Van Assche 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
3266ed9b2264SBart Van Assche 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
3267ed9b2264SBart Van Assche 	.reconnect		 = srp_rport_reconnect,
3268dc1bdbd9SBart Van Assche 	.rport_delete		 = srp_rport_delete,
3269ed9b2264SBart Van Assche 	.terminate_rport_io	 = srp_terminate_io,
32703236822bSFUJITA Tomonori };
32713236822bSFUJITA Tomonori 
3272aef9ec39SRoland Dreier static int __init srp_init_module(void)
3273aef9ec39SRoland Dreier {
3274aef9ec39SRoland Dreier 	int ret;
3275aef9ec39SRoland Dreier 
3276dcb4cb85SBart Van Assche 	BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3277dd5e6e38SBart Van Assche 
327849248644SDavid Dillow 	if (srp_sg_tablesize) {
3279e0bda7d8SBart Van Assche 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
328049248644SDavid Dillow 		if (!cmd_sg_entries)
328149248644SDavid Dillow 			cmd_sg_entries = srp_sg_tablesize;
328249248644SDavid Dillow 	}
328349248644SDavid Dillow 
328449248644SDavid Dillow 	if (!cmd_sg_entries)
328549248644SDavid Dillow 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
328649248644SDavid Dillow 
328749248644SDavid Dillow 	if (cmd_sg_entries > 255) {
3288e0bda7d8SBart Van Assche 		pr_warn("Clamping cmd_sg_entries to 255\n");
328949248644SDavid Dillow 		cmd_sg_entries = 255;
32901e89a194SDavid Dillow 	}
32911e89a194SDavid Dillow 
3292c07d424dSDavid Dillow 	if (!indirect_sg_entries)
3293c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3294c07d424dSDavid Dillow 	else if (indirect_sg_entries < cmd_sg_entries) {
3295e0bda7d8SBart Van Assche 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3296e0bda7d8SBart Van Assche 			cmd_sg_entries);
3297c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3298c07d424dSDavid Dillow 	}
3299c07d424dSDavid Dillow 
3300bcc05910SBart Van Assche 	srp_remove_wq = create_workqueue("srp_remove");
3301da05be29SWei Yongjun 	if (!srp_remove_wq) {
3302da05be29SWei Yongjun 		ret = -ENOMEM;
3303bcc05910SBart Van Assche 		goto out;
3304bcc05910SBart Van Assche 	}
3305bcc05910SBart Van Assche 
3306bcc05910SBart Van Assche 	ret = -ENOMEM;
33073236822bSFUJITA Tomonori 	ib_srp_transport_template =
33083236822bSFUJITA Tomonori 		srp_attach_transport(&ib_srp_transport_functions);
33093236822bSFUJITA Tomonori 	if (!ib_srp_transport_template)
3310bcc05910SBart Van Assche 		goto destroy_wq;
33113236822bSFUJITA Tomonori 
3312aef9ec39SRoland Dreier 	ret = class_register(&srp_class);
3313aef9ec39SRoland Dreier 	if (ret) {
3314e0bda7d8SBart Van Assche 		pr_err("couldn't register class infiniband_srp\n");
3315bcc05910SBart Van Assche 		goto release_tr;
3316aef9ec39SRoland Dreier 	}
3317aef9ec39SRoland Dreier 
3318c1a0b23bSMichael S. Tsirkin 	ib_sa_register_client(&srp_sa_client);
3319c1a0b23bSMichael S. Tsirkin 
3320aef9ec39SRoland Dreier 	ret = ib_register_client(&srp_client);
3321aef9ec39SRoland Dreier 	if (ret) {
3322e0bda7d8SBart Van Assche 		pr_err("couldn't register IB client\n");
3323bcc05910SBart Van Assche 		goto unreg_sa;
3324aef9ec39SRoland Dreier 	}
3325aef9ec39SRoland Dreier 
3326bcc05910SBart Van Assche out:
3327bcc05910SBart Van Assche 	return ret;
3328bcc05910SBart Van Assche 
3329bcc05910SBart Van Assche unreg_sa:
3330bcc05910SBart Van Assche 	ib_sa_unregister_client(&srp_sa_client);
3331bcc05910SBart Van Assche 	class_unregister(&srp_class);
3332bcc05910SBart Van Assche 
3333bcc05910SBart Van Assche release_tr:
3334bcc05910SBart Van Assche 	srp_release_transport(ib_srp_transport_template);
3335bcc05910SBart Van Assche 
3336bcc05910SBart Van Assche destroy_wq:
3337bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3338bcc05910SBart Van Assche 	goto out;
3339aef9ec39SRoland Dreier }
3340aef9ec39SRoland Dreier 
3341aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void)
3342aef9ec39SRoland Dreier {
3343aef9ec39SRoland Dreier 	ib_unregister_client(&srp_client);
3344c1a0b23bSMichael S. Tsirkin 	ib_sa_unregister_client(&srp_sa_client);
3345aef9ec39SRoland Dreier 	class_unregister(&srp_class);
33463236822bSFUJITA Tomonori 	srp_release_transport(ib_srp_transport_template);
3347bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3348aef9ec39SRoland Dreier }
3349aef9ec39SRoland Dreier 
3350aef9ec39SRoland Dreier module_init(srp_init_module);
3351aef9ec39SRoland Dreier module_exit(srp_cleanup_module);
3352