xref: /linux/drivers/infiniband/ulp/srp/ib_srp.c (revision c6333f9f9f7646e311248a09e8ed96126a97aba8)
1aef9ec39SRoland Dreier /*
2aef9ec39SRoland Dreier  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3aef9ec39SRoland Dreier  *
4aef9ec39SRoland Dreier  * This software is available to you under a choice of one of two
5aef9ec39SRoland Dreier  * licenses.  You may choose to be licensed under the terms of the GNU
6aef9ec39SRoland Dreier  * General Public License (GPL) Version 2, available from the file
7aef9ec39SRoland Dreier  * COPYING in the main directory of this source tree, or the
8aef9ec39SRoland Dreier  * OpenIB.org BSD license below:
9aef9ec39SRoland Dreier  *
10aef9ec39SRoland Dreier  *     Redistribution and use in source and binary forms, with or
11aef9ec39SRoland Dreier  *     without modification, are permitted provided that the following
12aef9ec39SRoland Dreier  *     conditions are met:
13aef9ec39SRoland Dreier  *
14aef9ec39SRoland Dreier  *      - Redistributions of source code must retain the above
15aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
16aef9ec39SRoland Dreier  *        disclaimer.
17aef9ec39SRoland Dreier  *
18aef9ec39SRoland Dreier  *      - Redistributions in binary form must reproduce the above
19aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
20aef9ec39SRoland Dreier  *        disclaimer in the documentation and/or other materials
21aef9ec39SRoland Dreier  *        provided with the distribution.
22aef9ec39SRoland Dreier  *
23aef9ec39SRoland Dreier  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24aef9ec39SRoland Dreier  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25aef9ec39SRoland Dreier  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26aef9ec39SRoland Dreier  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27aef9ec39SRoland Dreier  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28aef9ec39SRoland Dreier  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29aef9ec39SRoland Dreier  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30aef9ec39SRoland Dreier  * SOFTWARE.
31aef9ec39SRoland Dreier  */
32aef9ec39SRoland Dreier 
33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34e0bda7d8SBart Van Assche 
35aef9ec39SRoland Dreier #include <linux/module.h>
36aef9ec39SRoland Dreier #include <linux/init.h>
37aef9ec39SRoland Dreier #include <linux/slab.h>
38aef9ec39SRoland Dreier #include <linux/err.h>
39aef9ec39SRoland Dreier #include <linux/string.h>
40aef9ec39SRoland Dreier #include <linux/parser.h>
41aef9ec39SRoland Dreier #include <linux/random.h>
42de25968cSTim Schmielau #include <linux/jiffies.h>
4356b5390cSBart Van Assche #include <rdma/ib_cache.h>
44aef9ec39SRoland Dreier 
4560063497SArun Sharma #include <linux/atomic.h>
46aef9ec39SRoland Dreier 
47aef9ec39SRoland Dreier #include <scsi/scsi.h>
48aef9ec39SRoland Dreier #include <scsi/scsi_device.h>
49aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h>
5071444b97SJack Wang #include <scsi/scsi_tcq.h>
51aef9ec39SRoland Dreier #include <scsi/srp.h>
523236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h>
53aef9ec39SRoland Dreier 
54aef9ec39SRoland Dreier #include "ib_srp.h"
55aef9ec39SRoland Dreier 
56aef9ec39SRoland Dreier #define DRV_NAME	"ib_srp"
57aef9ec39SRoland Dreier #define PFX		DRV_NAME ": "
58713ef24eSBart Van Assche #define DRV_VERSION	"2.0"
59713ef24eSBart Van Assche #define DRV_RELDATE	"July 26, 2015"
60aef9ec39SRoland Dreier 
61aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier");
6233ab3e5bSBart Van Assche MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL");
6433ab3e5bSBart Van Assche MODULE_VERSION(DRV_VERSION);
6533ab3e5bSBart Van Assche MODULE_INFO(release_date, DRV_RELDATE);
66aef9ec39SRoland Dreier 
6749248644SDavid Dillow static unsigned int srp_sg_tablesize;
6849248644SDavid Dillow static unsigned int cmd_sg_entries;
69c07d424dSDavid Dillow static unsigned int indirect_sg_entries;
70c07d424dSDavid Dillow static bool allow_ext_sg;
7103f6fb93SBart Van Assche static bool prefer_fr = true;
7203f6fb93SBart Van Assche static bool register_always = true;
73aef9ec39SRoland Dreier static int topspin_workarounds = 1;
74aef9ec39SRoland Dreier 
7549248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444);
7649248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
7749248644SDavid Dillow 
7849248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444);
7949248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries,
8049248644SDavid Dillow 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
8149248644SDavid Dillow 
82c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444);
83c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries,
84c07d424dSDavid Dillow 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85c07d424dSDavid Dillow 
86c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444);
87c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg,
88c07d424dSDavid Dillow 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89c07d424dSDavid Dillow 
90aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444);
91aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds,
92aef9ec39SRoland Dreier 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93aef9ec39SRoland Dreier 
945cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444);
955cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr,
965cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported");
975cfb1782SBart Van Assche 
98b1b8854dSBart Van Assche module_param(register_always, bool, 0444);
99b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always,
100b1b8854dSBart Van Assche 		 "Use memory registration even for contiguous memory regions");
101b1b8854dSBart Van Assche 
1029c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops;
103ed9b2264SBart Van Assche 
104a95cadb9SBart Van Assche static int srp_reconnect_delay = 10;
105a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106a95cadb9SBart Van Assche 		S_IRUGO | S_IWUSR);
107a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108a95cadb9SBart Van Assche 
109ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15;
110ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
112ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo,
113ed9b2264SBart Van Assche 		 "Number of seconds between the observation of a transport"
114ed9b2264SBart Van Assche 		 " layer error and failing all I/O. \"off\" means that this"
115ed9b2264SBart Van Assche 		 " functionality is disabled.");
116ed9b2264SBart Van Assche 
117a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600;
118ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
120ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo,
121ed9b2264SBart Van Assche 		 "Maximum number of seconds that the SRP transport should"
122ed9b2264SBart Van Assche 		 " insulate transport layer errors. After this time has been"
123ed9b2264SBart Van Assche 		 " exceeded the SCSI host is removed. Should be"
124ed9b2264SBart Van Assche 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125ed9b2264SBart Van Assche 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
126ed9b2264SBart Van Assche 		 " this functionality is disabled.");
127ed9b2264SBart Van Assche 
128d92c0da7SBart Van Assche static unsigned ch_count;
129d92c0da7SBart Van Assche module_param(ch_count, uint, 0444);
130d92c0da7SBart Van Assche MODULE_PARM_DESC(ch_count,
131d92c0da7SBart Van Assche 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132d92c0da7SBart Van Assche 
133aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device);
1347c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data);
1351dc7b1f1SChristoph Hellwig static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
1361dc7b1f1SChristoph Hellwig static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
1371dc7b1f1SChristoph Hellwig 		const char *opname);
138aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139aef9ec39SRoland Dreier 
1403236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template;
141bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq;
1423236822bSFUJITA Tomonori 
143aef9ec39SRoland Dreier static struct ib_client srp_client = {
144aef9ec39SRoland Dreier 	.name   = "srp",
145aef9ec39SRoland Dreier 	.add    = srp_add_one,
146aef9ec39SRoland Dreier 	.remove = srp_remove_one
147aef9ec39SRoland Dreier };
148aef9ec39SRoland Dreier 
149c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client;
150c1a0b23bSMichael S. Tsirkin 
151ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152ed9b2264SBart Van Assche {
153ed9b2264SBart Van Assche 	int tmo = *(int *)kp->arg;
154ed9b2264SBart Van Assche 
155ed9b2264SBart Van Assche 	if (tmo >= 0)
156ed9b2264SBart Van Assche 		return sprintf(buffer, "%d", tmo);
157ed9b2264SBart Van Assche 	else
158ed9b2264SBart Van Assche 		return sprintf(buffer, "off");
159ed9b2264SBart Van Assche }
160ed9b2264SBart Van Assche 
161ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162ed9b2264SBart Van Assche {
163ed9b2264SBart Van Assche 	int tmo, res;
164ed9b2264SBart Van Assche 
1653fdf70acSSagi Grimberg 	res = srp_parse_tmo(&tmo, val);
166ed9b2264SBart Van Assche 	if (res)
167ed9b2264SBart Van Assche 		goto out;
1683fdf70acSSagi Grimberg 
169a95cadb9SBart Van Assche 	if (kp->arg == &srp_reconnect_delay)
170a95cadb9SBart Van Assche 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171a95cadb9SBart Van Assche 				    srp_dev_loss_tmo);
172a95cadb9SBart Van Assche 	else if (kp->arg == &srp_fast_io_fail_tmo)
173a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
174ed9b2264SBart Van Assche 	else
175a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176a95cadb9SBart Van Assche 				    tmo);
177ed9b2264SBart Van Assche 	if (res)
178ed9b2264SBart Van Assche 		goto out;
179ed9b2264SBart Van Assche 	*(int *)kp->arg = tmo;
180ed9b2264SBart Van Assche 
181ed9b2264SBart Van Assche out:
182ed9b2264SBart Van Assche 	return res;
183ed9b2264SBart Van Assche }
184ed9b2264SBart Van Assche 
1859c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops = {
186ed9b2264SBart Van Assche 	.get = srp_tmo_get,
187ed9b2264SBart Van Assche 	.set = srp_tmo_set,
188ed9b2264SBart Van Assche };
189ed9b2264SBart Van Assche 
190aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191aef9ec39SRoland Dreier {
192aef9ec39SRoland Dreier 	return (struct srp_target_port *) host->hostdata;
193aef9ec39SRoland Dreier }
194aef9ec39SRoland Dreier 
195aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host)
196aef9ec39SRoland Dreier {
197aef9ec39SRoland Dreier 	return host_to_target(host)->target_name;
198aef9ec39SRoland Dreier }
199aef9ec39SRoland Dreier 
2005d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target)
2015d7cbfd6SRoland Dreier {
2025d7cbfd6SRoland Dreier 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
2033d1ff48dSRaghava Kondapalli 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
2045d7cbfd6SRoland Dreier 
2055d7cbfd6SRoland Dreier 	return topspin_workarounds &&
2063d1ff48dSRaghava Kondapalli 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
2073d1ff48dSRaghava Kondapalli 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
2085d7cbfd6SRoland Dreier }
2095d7cbfd6SRoland Dreier 
210aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211aef9ec39SRoland Dreier 				   gfp_t gfp_mask,
212aef9ec39SRoland Dreier 				   enum dma_data_direction direction)
213aef9ec39SRoland Dreier {
214aef9ec39SRoland Dreier 	struct srp_iu *iu;
215aef9ec39SRoland Dreier 
216aef9ec39SRoland Dreier 	iu = kmalloc(sizeof *iu, gfp_mask);
217aef9ec39SRoland Dreier 	if (!iu)
218aef9ec39SRoland Dreier 		goto out;
219aef9ec39SRoland Dreier 
220aef9ec39SRoland Dreier 	iu->buf = kzalloc(size, gfp_mask);
221aef9ec39SRoland Dreier 	if (!iu->buf)
222aef9ec39SRoland Dreier 		goto out_free_iu;
223aef9ec39SRoland Dreier 
22405321937SGreg Kroah-Hartman 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
22505321937SGreg Kroah-Hartman 				    direction);
22605321937SGreg Kroah-Hartman 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
227aef9ec39SRoland Dreier 		goto out_free_buf;
228aef9ec39SRoland Dreier 
229aef9ec39SRoland Dreier 	iu->size      = size;
230aef9ec39SRoland Dreier 	iu->direction = direction;
231aef9ec39SRoland Dreier 
232aef9ec39SRoland Dreier 	return iu;
233aef9ec39SRoland Dreier 
234aef9ec39SRoland Dreier out_free_buf:
235aef9ec39SRoland Dreier 	kfree(iu->buf);
236aef9ec39SRoland Dreier out_free_iu:
237aef9ec39SRoland Dreier 	kfree(iu);
238aef9ec39SRoland Dreier out:
239aef9ec39SRoland Dreier 	return NULL;
240aef9ec39SRoland Dreier }
241aef9ec39SRoland Dreier 
242aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243aef9ec39SRoland Dreier {
244aef9ec39SRoland Dreier 	if (!iu)
245aef9ec39SRoland Dreier 		return;
246aef9ec39SRoland Dreier 
24705321937SGreg Kroah-Hartman 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
24805321937SGreg Kroah-Hartman 			    iu->direction);
249aef9ec39SRoland Dreier 	kfree(iu->buf);
250aef9ec39SRoland Dreier 	kfree(iu);
251aef9ec39SRoland Dreier }
252aef9ec39SRoland Dreier 
253aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context)
254aef9ec39SRoland Dreier {
25557363d98SSagi Grimberg 	pr_debug("QP event %s (%d)\n",
25657363d98SSagi Grimberg 		 ib_event_msg(event->event), event->event);
257aef9ec39SRoland Dreier }
258aef9ec39SRoland Dreier 
259aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target,
260aef9ec39SRoland Dreier 		       struct ib_qp *qp)
261aef9ec39SRoland Dreier {
262aef9ec39SRoland Dreier 	struct ib_qp_attr *attr;
263aef9ec39SRoland Dreier 	int ret;
264aef9ec39SRoland Dreier 
265aef9ec39SRoland Dreier 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
266aef9ec39SRoland Dreier 	if (!attr)
267aef9ec39SRoland Dreier 		return -ENOMEM;
268aef9ec39SRoland Dreier 
26956b5390cSBart Van Assche 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270aef9ec39SRoland Dreier 				  target->srp_host->port,
271747fe000SBart Van Assche 				  be16_to_cpu(target->pkey),
272aef9ec39SRoland Dreier 				  &attr->pkey_index);
273aef9ec39SRoland Dreier 	if (ret)
274aef9ec39SRoland Dreier 		goto out;
275aef9ec39SRoland Dreier 
276aef9ec39SRoland Dreier 	attr->qp_state        = IB_QPS_INIT;
277aef9ec39SRoland Dreier 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278aef9ec39SRoland Dreier 				    IB_ACCESS_REMOTE_WRITE);
279aef9ec39SRoland Dreier 	attr->port_num        = target->srp_host->port;
280aef9ec39SRoland Dreier 
281aef9ec39SRoland Dreier 	ret = ib_modify_qp(qp, attr,
282aef9ec39SRoland Dreier 			   IB_QP_STATE		|
283aef9ec39SRoland Dreier 			   IB_QP_PKEY_INDEX	|
284aef9ec39SRoland Dreier 			   IB_QP_ACCESS_FLAGS	|
285aef9ec39SRoland Dreier 			   IB_QP_PORT);
286aef9ec39SRoland Dreier 
287aef9ec39SRoland Dreier out:
288aef9ec39SRoland Dreier 	kfree(attr);
289aef9ec39SRoland Dreier 	return ret;
290aef9ec39SRoland Dreier }
291aef9ec39SRoland Dreier 
292509c07bcSBart Van Assche static int srp_new_cm_id(struct srp_rdma_ch *ch)
2939fe4bcf4SDavid Dillow {
294509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2959fe4bcf4SDavid Dillow 	struct ib_cm_id *new_cm_id;
2969fe4bcf4SDavid Dillow 
29705321937SGreg Kroah-Hartman 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
298509c07bcSBart Van Assche 				    srp_cm_handler, ch);
2999fe4bcf4SDavid Dillow 	if (IS_ERR(new_cm_id))
3009fe4bcf4SDavid Dillow 		return PTR_ERR(new_cm_id);
3019fe4bcf4SDavid Dillow 
302509c07bcSBart Van Assche 	if (ch->cm_id)
303509c07bcSBart Van Assche 		ib_destroy_cm_id(ch->cm_id);
304509c07bcSBart Van Assche 	ch->cm_id = new_cm_id;
305509c07bcSBart Van Assche 	ch->path.sgid = target->sgid;
306509c07bcSBart Van Assche 	ch->path.dgid = target->orig_dgid;
307509c07bcSBart Van Assche 	ch->path.pkey = target->pkey;
308509c07bcSBart Van Assche 	ch->path.service_id = target->service_id;
3099fe4bcf4SDavid Dillow 
3109fe4bcf4SDavid Dillow 	return 0;
3119fe4bcf4SDavid Dillow }
3129fe4bcf4SDavid Dillow 
313d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314d1b4289eSBart Van Assche {
315d1b4289eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
316d1b4289eSBart Van Assche 	struct ib_fmr_pool_param fmr_param;
317d1b4289eSBart Van Assche 
318d1b4289eSBart Van Assche 	memset(&fmr_param, 0, sizeof(fmr_param));
319d1b4289eSBart Van Assche 	fmr_param.pool_size	    = target->scsi_host->can_queue;
320d1b4289eSBart Van Assche 	fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
321d1b4289eSBart Van Assche 	fmr_param.cache		    = 1;
32252ede08fSBart Van Assche 	fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
32352ede08fSBart Van Assche 	fmr_param.page_shift	    = ilog2(dev->mr_page_size);
324d1b4289eSBart Van Assche 	fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
325d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_WRITE |
326d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_READ);
327d1b4289eSBart Van Assche 
328d1b4289eSBart Van Assche 	return ib_create_fmr_pool(dev->pd, &fmr_param);
329d1b4289eSBart Van Assche }
330d1b4289eSBart Van Assche 
3315cfb1782SBart Van Assche /**
3325cfb1782SBart Van Assche  * srp_destroy_fr_pool() - free the resources owned by a pool
3335cfb1782SBart Van Assche  * @pool: Fast registration pool to be destroyed.
3345cfb1782SBart Van Assche  */
3355cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
3365cfb1782SBart Van Assche {
3375cfb1782SBart Van Assche 	int i;
3385cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3395cfb1782SBart Van Assche 
3405cfb1782SBart Van Assche 	if (!pool)
3415cfb1782SBart Van Assche 		return;
3425cfb1782SBart Van Assche 
3435cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
3445cfb1782SBart Van Assche 		if (d->mr)
3455cfb1782SBart Van Assche 			ib_dereg_mr(d->mr);
3465cfb1782SBart Van Assche 	}
3475cfb1782SBart Van Assche 	kfree(pool);
3485cfb1782SBart Van Assche }
3495cfb1782SBart Van Assche 
3505cfb1782SBart Van Assche /**
3515cfb1782SBart Van Assche  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
3525cfb1782SBart Van Assche  * @device:            IB device to allocate fast registration descriptors for.
3535cfb1782SBart Van Assche  * @pd:                Protection domain associated with the FR descriptors.
3545cfb1782SBart Van Assche  * @pool_size:         Number of descriptors to allocate.
3555cfb1782SBart Van Assche  * @max_page_list_len: Maximum fast registration work request page list length.
3565cfb1782SBart Van Assche  */
3575cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
3585cfb1782SBart Van Assche 					      struct ib_pd *pd, int pool_size,
3595cfb1782SBart Van Assche 					      int max_page_list_len)
3605cfb1782SBart Van Assche {
3615cfb1782SBart Van Assche 	struct srp_fr_pool *pool;
3625cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3635cfb1782SBart Van Assche 	struct ib_mr *mr;
3645cfb1782SBart Van Assche 	int i, ret = -EINVAL;
3655cfb1782SBart Van Assche 
3665cfb1782SBart Van Assche 	if (pool_size <= 0)
3675cfb1782SBart Van Assche 		goto err;
3685cfb1782SBart Van Assche 	ret = -ENOMEM;
3695cfb1782SBart Van Assche 	pool = kzalloc(sizeof(struct srp_fr_pool) +
3705cfb1782SBart Van Assche 		       pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
3715cfb1782SBart Van Assche 	if (!pool)
3725cfb1782SBart Van Assche 		goto err;
3735cfb1782SBart Van Assche 	pool->size = pool_size;
3745cfb1782SBart Van Assche 	pool->max_page_list_len = max_page_list_len;
3755cfb1782SBart Van Assche 	spin_lock_init(&pool->lock);
3765cfb1782SBart Van Assche 	INIT_LIST_HEAD(&pool->free_list);
3775cfb1782SBart Van Assche 
3785cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
379563b67c5SSagi Grimberg 		mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
380563b67c5SSagi Grimberg 				 max_page_list_len);
3815cfb1782SBart Van Assche 		if (IS_ERR(mr)) {
3825cfb1782SBart Van Assche 			ret = PTR_ERR(mr);
3835cfb1782SBart Van Assche 			goto destroy_pool;
3845cfb1782SBart Van Assche 		}
3855cfb1782SBart Van Assche 		d->mr = mr;
3865cfb1782SBart Van Assche 		list_add_tail(&d->entry, &pool->free_list);
3875cfb1782SBart Van Assche 	}
3885cfb1782SBart Van Assche 
3895cfb1782SBart Van Assche out:
3905cfb1782SBart Van Assche 	return pool;
3915cfb1782SBart Van Assche 
3925cfb1782SBart Van Assche destroy_pool:
3935cfb1782SBart Van Assche 	srp_destroy_fr_pool(pool);
3945cfb1782SBart Van Assche 
3955cfb1782SBart Van Assche err:
3965cfb1782SBart Van Assche 	pool = ERR_PTR(ret);
3975cfb1782SBart Van Assche 	goto out;
3985cfb1782SBart Van Assche }
3995cfb1782SBart Van Assche 
4005cfb1782SBart Van Assche /**
4015cfb1782SBart Van Assche  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
4025cfb1782SBart Van Assche  * @pool: Pool to obtain descriptor from.
4035cfb1782SBart Van Assche  */
4045cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
4055cfb1782SBart Van Assche {
4065cfb1782SBart Van Assche 	struct srp_fr_desc *d = NULL;
4075cfb1782SBart Van Assche 	unsigned long flags;
4085cfb1782SBart Van Assche 
4095cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4105cfb1782SBart Van Assche 	if (!list_empty(&pool->free_list)) {
4115cfb1782SBart Van Assche 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
4125cfb1782SBart Van Assche 		list_del(&d->entry);
4135cfb1782SBart Van Assche 	}
4145cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4155cfb1782SBart Van Assche 
4165cfb1782SBart Van Assche 	return d;
4175cfb1782SBart Van Assche }
4185cfb1782SBart Van Assche 
4195cfb1782SBart Van Assche /**
4205cfb1782SBart Van Assche  * srp_fr_pool_put() - put an FR descriptor back in the free list
4215cfb1782SBart Van Assche  * @pool: Pool the descriptor was allocated from.
4225cfb1782SBart Van Assche  * @desc: Pointer to an array of fast registration descriptor pointers.
4235cfb1782SBart Van Assche  * @n:    Number of descriptors to put back.
4245cfb1782SBart Van Assche  *
4255cfb1782SBart Van Assche  * Note: The caller must already have queued an invalidation request for
4265cfb1782SBart Van Assche  * desc->mr->rkey before calling this function.
4275cfb1782SBart Van Assche  */
4285cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
4295cfb1782SBart Van Assche 			    int n)
4305cfb1782SBart Van Assche {
4315cfb1782SBart Van Assche 	unsigned long flags;
4325cfb1782SBart Van Assche 	int i;
4335cfb1782SBart Van Assche 
4345cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4355cfb1782SBart Van Assche 	for (i = 0; i < n; i++)
4365cfb1782SBart Van Assche 		list_add(&desc[i]->entry, &pool->free_list);
4375cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4385cfb1782SBart Van Assche }
4395cfb1782SBart Van Assche 
4405cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
4415cfb1782SBart Van Assche {
4425cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
4435cfb1782SBart Van Assche 
4445cfb1782SBart Van Assche 	return srp_create_fr_pool(dev->dev, dev->pd,
4455cfb1782SBart Van Assche 				  target->scsi_host->can_queue,
4465cfb1782SBart Van Assche 				  dev->max_pages_per_mr);
4475cfb1782SBart Van Assche }
4485cfb1782SBart Van Assche 
4491dc7b1f1SChristoph Hellwig static void srp_drain_done(struct ib_cq *cq, struct ib_wc *wc)
4501dc7b1f1SChristoph Hellwig {
4511dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
4521dc7b1f1SChristoph Hellwig 
4531dc7b1f1SChristoph Hellwig 	complete(&ch->done);
4541dc7b1f1SChristoph Hellwig }
4551dc7b1f1SChristoph Hellwig 
4561dc7b1f1SChristoph Hellwig static struct ib_cqe srp_drain_cqe = {
4571dc7b1f1SChristoph Hellwig 	.done		= srp_drain_done,
4581dc7b1f1SChristoph Hellwig };
4591dc7b1f1SChristoph Hellwig 
4607dad6b2eSBart Van Assche /**
4617dad6b2eSBart Van Assche  * srp_destroy_qp() - destroy an RDMA queue pair
4627dad6b2eSBart Van Assche  * @ch: SRP RDMA channel.
4637dad6b2eSBart Van Assche  *
4647dad6b2eSBart Van Assche  * Change a queue pair into the error state and wait until all receive
4657dad6b2eSBart Van Assche  * completions have been processed before destroying it. This avoids that
4667dad6b2eSBart Van Assche  * the receive completion handler can access the queue pair while it is
4677dad6b2eSBart Van Assche  * being destroyed.
4687dad6b2eSBart Van Assche  */
4697dad6b2eSBart Van Assche static void srp_destroy_qp(struct srp_rdma_ch *ch)
4707dad6b2eSBart Van Assche {
4717dad6b2eSBart Van Assche 	static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
47214d3a3b2SChristoph Hellwig 	static struct ib_recv_wr wr = { 0 };
4737dad6b2eSBart Van Assche 	struct ib_recv_wr *bad_wr;
4747dad6b2eSBart Van Assche 	int ret;
4757dad6b2eSBart Van Assche 
4761dc7b1f1SChristoph Hellwig 	wr.wr_cqe = &srp_drain_cqe;
4777dad6b2eSBart Van Assche 	/* Destroying a QP and reusing ch->done is only safe if not connected */
478c014c8cdSBart Van Assche 	WARN_ON_ONCE(ch->connected);
4797dad6b2eSBart Van Assche 
4807dad6b2eSBart Van Assche 	ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
4817dad6b2eSBart Van Assche 	WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
4827dad6b2eSBart Van Assche 	if (ret)
4837dad6b2eSBart Van Assche 		goto out;
4847dad6b2eSBart Van Assche 
4857dad6b2eSBart Van Assche 	init_completion(&ch->done);
4867dad6b2eSBart Van Assche 	ret = ib_post_recv(ch->qp, &wr, &bad_wr);
4877dad6b2eSBart Van Assche 	WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
4887dad6b2eSBart Van Assche 	if (ret == 0)
4897dad6b2eSBart Van Assche 		wait_for_completion(&ch->done);
4907dad6b2eSBart Van Assche 
4917dad6b2eSBart Van Assche out:
4927dad6b2eSBart Van Assche 	ib_destroy_qp(ch->qp);
4937dad6b2eSBart Van Assche }
4947dad6b2eSBart Van Assche 
495509c07bcSBart Van Assche static int srp_create_ch_ib(struct srp_rdma_ch *ch)
496aef9ec39SRoland Dreier {
497509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
49862154b2eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
499aef9ec39SRoland Dreier 	struct ib_qp_init_attr *init_attr;
50073aa89edSIshai Rabinovitz 	struct ib_cq *recv_cq, *send_cq;
50173aa89edSIshai Rabinovitz 	struct ib_qp *qp;
502d1b4289eSBart Van Assche 	struct ib_fmr_pool *fmr_pool = NULL;
5035cfb1782SBart Van Assche 	struct srp_fr_pool *fr_pool = NULL;
50409c0c0beSSagi Grimberg 	const int m = dev->use_fast_reg ? 3 : 1;
505aef9ec39SRoland Dreier 	int ret;
506aef9ec39SRoland Dreier 
507aef9ec39SRoland Dreier 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
508aef9ec39SRoland Dreier 	if (!init_attr)
509aef9ec39SRoland Dreier 		return -ENOMEM;
510aef9ec39SRoland Dreier 
5111dc7b1f1SChristoph Hellwig 	/* queue_size + 1 for ib_drain_qp */
5121dc7b1f1SChristoph Hellwig 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
5131dc7b1f1SChristoph Hellwig 				ch->comp_vector, IB_POLL_SOFTIRQ);
51473aa89edSIshai Rabinovitz 	if (IS_ERR(recv_cq)) {
51573aa89edSIshai Rabinovitz 		ret = PTR_ERR(recv_cq);
516da9d2f07SRoland Dreier 		goto err;
517aef9ec39SRoland Dreier 	}
518aef9ec39SRoland Dreier 
5191dc7b1f1SChristoph Hellwig 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
5201dc7b1f1SChristoph Hellwig 				ch->comp_vector, IB_POLL_DIRECT);
52173aa89edSIshai Rabinovitz 	if (IS_ERR(send_cq)) {
52273aa89edSIshai Rabinovitz 		ret = PTR_ERR(send_cq);
523da9d2f07SRoland Dreier 		goto err_recv_cq;
5249c03dc9fSBart Van Assche 	}
5259c03dc9fSBart Van Assche 
526aef9ec39SRoland Dreier 	init_attr->event_handler       = srp_qp_event;
5275cfb1782SBart Van Assche 	init_attr->cap.max_send_wr     = m * target->queue_size;
5287dad6b2eSBart Van Assche 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
529aef9ec39SRoland Dreier 	init_attr->cap.max_recv_sge    = 1;
530aef9ec39SRoland Dreier 	init_attr->cap.max_send_sge    = 1;
5315cfb1782SBart Van Assche 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
532aef9ec39SRoland Dreier 	init_attr->qp_type             = IB_QPT_RC;
53373aa89edSIshai Rabinovitz 	init_attr->send_cq             = send_cq;
53473aa89edSIshai Rabinovitz 	init_attr->recv_cq             = recv_cq;
535aef9ec39SRoland Dreier 
53662154b2eSBart Van Assche 	qp = ib_create_qp(dev->pd, init_attr);
53773aa89edSIshai Rabinovitz 	if (IS_ERR(qp)) {
53873aa89edSIshai Rabinovitz 		ret = PTR_ERR(qp);
539da9d2f07SRoland Dreier 		goto err_send_cq;
540aef9ec39SRoland Dreier 	}
541aef9ec39SRoland Dreier 
54273aa89edSIshai Rabinovitz 	ret = srp_init_qp(target, qp);
543da9d2f07SRoland Dreier 	if (ret)
544da9d2f07SRoland Dreier 		goto err_qp;
545aef9ec39SRoland Dreier 
546002f1567SBart Van Assche 	if (dev->use_fast_reg) {
5475cfb1782SBart Van Assche 		fr_pool = srp_alloc_fr_pool(target);
5485cfb1782SBart Van Assche 		if (IS_ERR(fr_pool)) {
5495cfb1782SBart Van Assche 			ret = PTR_ERR(fr_pool);
5505cfb1782SBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
5515cfb1782SBart Van Assche 				     "FR pool allocation failed (%d)\n", ret);
5525cfb1782SBart Van Assche 			goto err_qp;
5535cfb1782SBart Van Assche 		}
554002f1567SBart Van Assche 	} else if (dev->use_fmr) {
555d1b4289eSBart Van Assche 		fmr_pool = srp_alloc_fmr_pool(target);
556d1b4289eSBart Van Assche 		if (IS_ERR(fmr_pool)) {
557d1b4289eSBart Van Assche 			ret = PTR_ERR(fmr_pool);
558d1b4289eSBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
559d1b4289eSBart Van Assche 				     "FMR pool allocation failed (%d)\n", ret);
560d1b4289eSBart Van Assche 			goto err_qp;
561d1b4289eSBart Van Assche 		}
562d1b4289eSBart Van Assche 	}
563d1b4289eSBart Van Assche 
564509c07bcSBart Van Assche 	if (ch->qp)
5657dad6b2eSBart Van Assche 		srp_destroy_qp(ch);
566509c07bcSBart Van Assche 	if (ch->recv_cq)
5671dc7b1f1SChristoph Hellwig 		ib_free_cq(ch->recv_cq);
568509c07bcSBart Van Assche 	if (ch->send_cq)
5691dc7b1f1SChristoph Hellwig 		ib_free_cq(ch->send_cq);
57073aa89edSIshai Rabinovitz 
571509c07bcSBart Van Assche 	ch->qp = qp;
572509c07bcSBart Van Assche 	ch->recv_cq = recv_cq;
573509c07bcSBart Van Assche 	ch->send_cq = send_cq;
57473aa89edSIshai Rabinovitz 
5757fbc67dfSSagi Grimberg 	if (dev->use_fast_reg) {
5767fbc67dfSSagi Grimberg 		if (ch->fr_pool)
5777fbc67dfSSagi Grimberg 			srp_destroy_fr_pool(ch->fr_pool);
5787fbc67dfSSagi Grimberg 		ch->fr_pool = fr_pool;
5797fbc67dfSSagi Grimberg 	} else if (dev->use_fmr) {
5807fbc67dfSSagi Grimberg 		if (ch->fmr_pool)
5817fbc67dfSSagi Grimberg 			ib_destroy_fmr_pool(ch->fmr_pool);
5827fbc67dfSSagi Grimberg 		ch->fmr_pool = fmr_pool;
5837fbc67dfSSagi Grimberg 	}
5847fbc67dfSSagi Grimberg 
585da9d2f07SRoland Dreier 	kfree(init_attr);
586da9d2f07SRoland Dreier 	return 0;
587da9d2f07SRoland Dreier 
588da9d2f07SRoland Dreier err_qp:
5891dc7b1f1SChristoph Hellwig 	srp_destroy_qp(ch);
590da9d2f07SRoland Dreier 
591da9d2f07SRoland Dreier err_send_cq:
5921dc7b1f1SChristoph Hellwig 	ib_free_cq(send_cq);
593da9d2f07SRoland Dreier 
594da9d2f07SRoland Dreier err_recv_cq:
5951dc7b1f1SChristoph Hellwig 	ib_free_cq(recv_cq);
596da9d2f07SRoland Dreier 
597da9d2f07SRoland Dreier err:
598aef9ec39SRoland Dreier 	kfree(init_attr);
599aef9ec39SRoland Dreier 	return ret;
600aef9ec39SRoland Dreier }
601aef9ec39SRoland Dreier 
6024d73f95fSBart Van Assche /*
6034d73f95fSBart Van Assche  * Note: this function may be called without srp_alloc_iu_bufs() having been
604509c07bcSBart Van Assche  * invoked. Hence the ch->[rt]x_ring checks.
6054d73f95fSBart Van Assche  */
606509c07bcSBart Van Assche static void srp_free_ch_ib(struct srp_target_port *target,
607509c07bcSBart Van Assche 			   struct srp_rdma_ch *ch)
608aef9ec39SRoland Dreier {
6095cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
610aef9ec39SRoland Dreier 	int i;
611aef9ec39SRoland Dreier 
612d92c0da7SBart Van Assche 	if (!ch->target)
613d92c0da7SBart Van Assche 		return;
614d92c0da7SBart Van Assche 
615509c07bcSBart Van Assche 	if (ch->cm_id) {
616509c07bcSBart Van Assche 		ib_destroy_cm_id(ch->cm_id);
617509c07bcSBart Van Assche 		ch->cm_id = NULL;
618394c595eSBart Van Assche 	}
619394c595eSBart Van Assche 
620d92c0da7SBart Van Assche 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
621d92c0da7SBart Van Assche 	if (!ch->qp)
622d92c0da7SBart Van Assche 		return;
623d92c0da7SBart Van Assche 
6245cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
625509c07bcSBart Van Assche 		if (ch->fr_pool)
626509c07bcSBart Van Assche 			srp_destroy_fr_pool(ch->fr_pool);
627002f1567SBart Van Assche 	} else if (dev->use_fmr) {
628509c07bcSBart Van Assche 		if (ch->fmr_pool)
629509c07bcSBart Van Assche 			ib_destroy_fmr_pool(ch->fmr_pool);
6305cfb1782SBart Van Assche 	}
6311dc7b1f1SChristoph Hellwig 
6327dad6b2eSBart Van Assche 	srp_destroy_qp(ch);
6331dc7b1f1SChristoph Hellwig 	ib_free_cq(ch->send_cq);
6341dc7b1f1SChristoph Hellwig 	ib_free_cq(ch->recv_cq);
635aef9ec39SRoland Dreier 
636d92c0da7SBart Van Assche 	/*
637d92c0da7SBart Van Assche 	 * Avoid that the SCSI error handler tries to use this channel after
638d92c0da7SBart Van Assche 	 * it has been freed. The SCSI error handler can namely continue
639d92c0da7SBart Van Assche 	 * trying to perform recovery actions after scsi_remove_host()
640d92c0da7SBart Van Assche 	 * returned.
641d92c0da7SBart Van Assche 	 */
642d92c0da7SBart Van Assche 	ch->target = NULL;
643d92c0da7SBart Van Assche 
644509c07bcSBart Van Assche 	ch->qp = NULL;
645509c07bcSBart Van Assche 	ch->send_cq = ch->recv_cq = NULL;
64673aa89edSIshai Rabinovitz 
647509c07bcSBart Van Assche 	if (ch->rx_ring) {
6484d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
649509c07bcSBart Van Assche 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
650509c07bcSBart Van Assche 		kfree(ch->rx_ring);
651509c07bcSBart Van Assche 		ch->rx_ring = NULL;
6524d73f95fSBart Van Assche 	}
653509c07bcSBart Van Assche 	if (ch->tx_ring) {
6544d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
655509c07bcSBart Van Assche 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
656509c07bcSBart Van Assche 		kfree(ch->tx_ring);
657509c07bcSBart Van Assche 		ch->tx_ring = NULL;
6584d73f95fSBart Van Assche 	}
659aef9ec39SRoland Dreier }
660aef9ec39SRoland Dreier 
661aef9ec39SRoland Dreier static void srp_path_rec_completion(int status,
662aef9ec39SRoland Dreier 				    struct ib_sa_path_rec *pathrec,
663509c07bcSBart Van Assche 				    void *ch_ptr)
664aef9ec39SRoland Dreier {
665509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = ch_ptr;
666509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
667aef9ec39SRoland Dreier 
668509c07bcSBart Van Assche 	ch->status = status;
669aef9ec39SRoland Dreier 	if (status)
6707aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
6717aa54bd7SDavid Dillow 			     PFX "Got failed path rec status %d\n", status);
672aef9ec39SRoland Dreier 	else
673509c07bcSBart Van Assche 		ch->path = *pathrec;
674509c07bcSBart Van Assche 	complete(&ch->done);
675aef9ec39SRoland Dreier }
676aef9ec39SRoland Dreier 
677509c07bcSBart Van Assche static int srp_lookup_path(struct srp_rdma_ch *ch)
678aef9ec39SRoland Dreier {
679509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
680a702adceSBart Van Assche 	int ret;
681a702adceSBart Van Assche 
682509c07bcSBart Van Assche 	ch->path.numb_path = 1;
683aef9ec39SRoland Dreier 
684509c07bcSBart Van Assche 	init_completion(&ch->done);
685aef9ec39SRoland Dreier 
686509c07bcSBart Van Assche 	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
68705321937SGreg Kroah-Hartman 					       target->srp_host->srp_dev->dev,
688aef9ec39SRoland Dreier 					       target->srp_host->port,
689509c07bcSBart Van Assche 					       &ch->path,
690247e020eSSean Hefty 					       IB_SA_PATH_REC_SERVICE_ID |
691aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_DGID	 |
692aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_SGID	 |
693aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_NUMB_PATH	 |
694aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_PKEY,
695aef9ec39SRoland Dreier 					       SRP_PATH_REC_TIMEOUT_MS,
696aef9ec39SRoland Dreier 					       GFP_KERNEL,
697aef9ec39SRoland Dreier 					       srp_path_rec_completion,
698509c07bcSBart Van Assche 					       ch, &ch->path_query);
699509c07bcSBart Van Assche 	if (ch->path_query_id < 0)
700509c07bcSBart Van Assche 		return ch->path_query_id;
701aef9ec39SRoland Dreier 
702509c07bcSBart Van Assche 	ret = wait_for_completion_interruptible(&ch->done);
703a702adceSBart Van Assche 	if (ret < 0)
704a702adceSBart Van Assche 		return ret;
705aef9ec39SRoland Dreier 
706509c07bcSBart Van Assche 	if (ch->status < 0)
7077aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
7087aa54bd7SDavid Dillow 			     PFX "Path record query failed\n");
709aef9ec39SRoland Dreier 
710509c07bcSBart Van Assche 	return ch->status;
711aef9ec39SRoland Dreier }
712aef9ec39SRoland Dreier 
713d92c0da7SBart Van Assche static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
714aef9ec39SRoland Dreier {
715509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
716aef9ec39SRoland Dreier 	struct {
717aef9ec39SRoland Dreier 		struct ib_cm_req_param param;
718aef9ec39SRoland Dreier 		struct srp_login_req   priv;
719aef9ec39SRoland Dreier 	} *req = NULL;
720aef9ec39SRoland Dreier 	int status;
721aef9ec39SRoland Dreier 
722aef9ec39SRoland Dreier 	req = kzalloc(sizeof *req, GFP_KERNEL);
723aef9ec39SRoland Dreier 	if (!req)
724aef9ec39SRoland Dreier 		return -ENOMEM;
725aef9ec39SRoland Dreier 
726509c07bcSBart Van Assche 	req->param.primary_path		      = &ch->path;
727aef9ec39SRoland Dreier 	req->param.alternate_path 	      = NULL;
728aef9ec39SRoland Dreier 	req->param.service_id 		      = target->service_id;
729509c07bcSBart Van Assche 	req->param.qp_num		      = ch->qp->qp_num;
730509c07bcSBart Van Assche 	req->param.qp_type		      = ch->qp->qp_type;
731aef9ec39SRoland Dreier 	req->param.private_data 	      = &req->priv;
732aef9ec39SRoland Dreier 	req->param.private_data_len 	      = sizeof req->priv;
733aef9ec39SRoland Dreier 	req->param.flow_control 	      = 1;
734aef9ec39SRoland Dreier 
735aef9ec39SRoland Dreier 	get_random_bytes(&req->param.starting_psn, 4);
736aef9ec39SRoland Dreier 	req->param.starting_psn 	     &= 0xffffff;
737aef9ec39SRoland Dreier 
738aef9ec39SRoland Dreier 	/*
739aef9ec39SRoland Dreier 	 * Pick some arbitrary defaults here; we could make these
740aef9ec39SRoland Dreier 	 * module parameters if anyone cared about setting them.
741aef9ec39SRoland Dreier 	 */
742aef9ec39SRoland Dreier 	req->param.responder_resources	      = 4;
743aef9ec39SRoland Dreier 	req->param.remote_cm_response_timeout = 20;
744aef9ec39SRoland Dreier 	req->param.local_cm_response_timeout  = 20;
7457bb312e4SVu Pham 	req->param.retry_count                = target->tl_retry_count;
746aef9ec39SRoland Dreier 	req->param.rnr_retry_count 	      = 7;
747aef9ec39SRoland Dreier 	req->param.max_cm_retries 	      = 15;
748aef9ec39SRoland Dreier 
749aef9ec39SRoland Dreier 	req->priv.opcode     	= SRP_LOGIN_REQ;
750aef9ec39SRoland Dreier 	req->priv.tag        	= 0;
75149248644SDavid Dillow 	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
752aef9ec39SRoland Dreier 	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
753aef9ec39SRoland Dreier 					      SRP_BUF_FORMAT_INDIRECT);
754d92c0da7SBart Van Assche 	req->priv.req_flags	= (multich ? SRP_MULTICHAN_MULTI :
755d92c0da7SBart Van Assche 				   SRP_MULTICHAN_SINGLE);
7560c0450dbSRamachandra K 	/*
7570c0450dbSRamachandra K 	 * In the published SRP specification (draft rev. 16a), the
7580c0450dbSRamachandra K 	 * port identifier format is 8 bytes of ID extension followed
7590c0450dbSRamachandra K 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
7600c0450dbSRamachandra K 	 * opposite order, so that the GUID comes first.
7610c0450dbSRamachandra K 	 *
7620c0450dbSRamachandra K 	 * Targets conforming to these obsolete drafts can be
7630c0450dbSRamachandra K 	 * recognized by the I/O Class they report.
7640c0450dbSRamachandra K 	 */
7650c0450dbSRamachandra K 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
7660c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
767747fe000SBart Van Assche 		       &target->sgid.global.interface_id, 8);
7680c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id + 8,
76901cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
7700c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
7710c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
7720c0450dbSRamachandra K 	} else {
7730c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
77401cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
77501cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
776747fe000SBart Van Assche 		       &target->sgid.global.interface_id, 8);
7770c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
7780c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
7790c0450dbSRamachandra K 	}
7800c0450dbSRamachandra K 
781aef9ec39SRoland Dreier 	/*
782aef9ec39SRoland Dreier 	 * Topspin/Cisco SRP targets will reject our login unless we
78301cb9bcbSIshai Rabinovitz 	 * zero out the first 8 bytes of our initiator port ID and set
78401cb9bcbSIshai Rabinovitz 	 * the second 8 bytes to the local node GUID.
785aef9ec39SRoland Dreier 	 */
7865d7cbfd6SRoland Dreier 	if (srp_target_is_topspin(target)) {
7877aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
7887aa54bd7SDavid Dillow 			     PFX "Topspin/Cisco initiator port ID workaround "
789aef9ec39SRoland Dreier 			     "activated for target GUID %016llx\n",
79045c37cadSBart Van Assche 			     be64_to_cpu(target->ioc_guid));
791aef9ec39SRoland Dreier 		memset(req->priv.initiator_port_id, 0, 8);
79201cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
79305321937SGreg Kroah-Hartman 		       &target->srp_host->srp_dev->dev->node_guid, 8);
794aef9ec39SRoland Dreier 	}
795aef9ec39SRoland Dreier 
796509c07bcSBart Van Assche 	status = ib_send_cm_req(ch->cm_id, &req->param);
797aef9ec39SRoland Dreier 
798aef9ec39SRoland Dreier 	kfree(req);
799aef9ec39SRoland Dreier 
800aef9ec39SRoland Dreier 	return status;
801aef9ec39SRoland Dreier }
802aef9ec39SRoland Dreier 
803ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target)
804ef6c49d8SBart Van Assche {
805ef6c49d8SBart Van Assche 	bool changed = false;
806ef6c49d8SBart Van Assche 
807ef6c49d8SBart Van Assche 	spin_lock_irq(&target->lock);
808ef6c49d8SBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
809ef6c49d8SBart Van Assche 		target->state = SRP_TARGET_REMOVED;
810ef6c49d8SBart Van Assche 		changed = true;
811ef6c49d8SBart Van Assche 	}
812ef6c49d8SBart Van Assche 	spin_unlock_irq(&target->lock);
813ef6c49d8SBart Van Assche 
814ef6c49d8SBart Van Assche 	if (changed)
815bcc05910SBart Van Assche 		queue_work(srp_remove_wq, &target->remove_work);
816ef6c49d8SBart Van Assche 
817ef6c49d8SBart Van Assche 	return changed;
818ef6c49d8SBart Van Assche }
819ef6c49d8SBart Van Assche 
820aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target)
821aef9ec39SRoland Dreier {
822d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
823d92c0da7SBart Van Assche 	int i;
824509c07bcSBart Van Assche 
825aef9ec39SRoland Dreier 	/* XXX should send SRP_I_LOGOUT request */
826aef9ec39SRoland Dreier 
827d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
828d92c0da7SBart Van Assche 		ch = &target->ch[i];
829c014c8cdSBart Van Assche 		ch->connected = false;
830d92c0da7SBart Van Assche 		if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
8317aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, target->scsi_host,
8327aa54bd7SDavid Dillow 				     PFX "Sending CM DREQ failed\n");
833aef9ec39SRoland Dreier 		}
834294c875aSBart Van Assche 	}
835294c875aSBart Van Assche }
836aef9ec39SRoland Dreier 
837509c07bcSBart Van Assche static void srp_free_req_data(struct srp_target_port *target,
838509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
8398f26c9ffSDavid Dillow {
8405cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
8415cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
8428f26c9ffSDavid Dillow 	struct srp_request *req;
8438f26c9ffSDavid Dillow 	int i;
8448f26c9ffSDavid Dillow 
84547513cf4SBart Van Assche 	if (!ch->req_ring)
8464d73f95fSBart Van Assche 		return;
8474d73f95fSBart Van Assche 
8484d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
849509c07bcSBart Van Assche 		req = &ch->req_ring[i];
8509a21be53SSagi Grimberg 		if (dev->use_fast_reg) {
8515cfb1782SBart Van Assche 			kfree(req->fr_list);
8529a21be53SSagi Grimberg 		} else {
8538f26c9ffSDavid Dillow 			kfree(req->fmr_list);
8548f26c9ffSDavid Dillow 			kfree(req->map_page);
8559a21be53SSagi Grimberg 		}
856c07d424dSDavid Dillow 		if (req->indirect_dma_addr) {
857c07d424dSDavid Dillow 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
858c07d424dSDavid Dillow 					    target->indirect_size,
859c07d424dSDavid Dillow 					    DMA_TO_DEVICE);
860c07d424dSDavid Dillow 		}
861c07d424dSDavid Dillow 		kfree(req->indirect_desc);
8628f26c9ffSDavid Dillow 	}
8634d73f95fSBart Van Assche 
864509c07bcSBart Van Assche 	kfree(ch->req_ring);
865509c07bcSBart Van Assche 	ch->req_ring = NULL;
8668f26c9ffSDavid Dillow }
8678f26c9ffSDavid Dillow 
868509c07bcSBart Van Assche static int srp_alloc_req_data(struct srp_rdma_ch *ch)
869b81d00bdSBart Van Assche {
870509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
871b81d00bdSBart Van Assche 	struct srp_device *srp_dev = target->srp_host->srp_dev;
872b81d00bdSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
873b81d00bdSBart Van Assche 	struct srp_request *req;
8745cfb1782SBart Van Assche 	void *mr_list;
875b81d00bdSBart Van Assche 	dma_addr_t dma_addr;
876b81d00bdSBart Van Assche 	int i, ret = -ENOMEM;
877b81d00bdSBart Van Assche 
878509c07bcSBart Van Assche 	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
879509c07bcSBart Van Assche 			       GFP_KERNEL);
880509c07bcSBart Van Assche 	if (!ch->req_ring)
8814d73f95fSBart Van Assche 		goto out;
8824d73f95fSBart Van Assche 
8834d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
884509c07bcSBart Van Assche 		req = &ch->req_ring[i];
8855cfb1782SBart Van Assche 		mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
886b81d00bdSBart Van Assche 				  GFP_KERNEL);
8875cfb1782SBart Van Assche 		if (!mr_list)
8885cfb1782SBart Van Assche 			goto out;
8899a21be53SSagi Grimberg 		if (srp_dev->use_fast_reg) {
8905cfb1782SBart Van Assche 			req->fr_list = mr_list;
8919a21be53SSagi Grimberg 		} else {
8925cfb1782SBart Van Assche 			req->fmr_list = mr_list;
89352ede08fSBart Van Assche 			req->map_page = kmalloc(srp_dev->max_pages_per_mr *
894d1b4289eSBart Van Assche 						sizeof(void *), GFP_KERNEL);
8955cfb1782SBart Van Assche 			if (!req->map_page)
8965cfb1782SBart Van Assche 				goto out;
8979a21be53SSagi Grimberg 		}
898b81d00bdSBart Van Assche 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
8995cfb1782SBart Van Assche 		if (!req->indirect_desc)
900b81d00bdSBart Van Assche 			goto out;
901b81d00bdSBart Van Assche 
902b81d00bdSBart Van Assche 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
903b81d00bdSBart Van Assche 					     target->indirect_size,
904b81d00bdSBart Van Assche 					     DMA_TO_DEVICE);
905b81d00bdSBart Van Assche 		if (ib_dma_mapping_error(ibdev, dma_addr))
906b81d00bdSBart Van Assche 			goto out;
907b81d00bdSBart Van Assche 
908b81d00bdSBart Van Assche 		req->indirect_dma_addr = dma_addr;
909b81d00bdSBart Van Assche 	}
910b81d00bdSBart Van Assche 	ret = 0;
911b81d00bdSBart Van Assche 
912b81d00bdSBart Van Assche out:
913b81d00bdSBart Van Assche 	return ret;
914b81d00bdSBart Van Assche }
915b81d00bdSBart Van Assche 
916683b159aSBart Van Assche /**
917683b159aSBart Van Assche  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
918683b159aSBart Van Assche  * @shost: SCSI host whose attributes to remove from sysfs.
919683b159aSBart Van Assche  *
920683b159aSBart Van Assche  * Note: Any attributes defined in the host template and that did not exist
921683b159aSBart Van Assche  * before invocation of this function will be ignored.
922683b159aSBart Van Assche  */
923683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
924683b159aSBart Van Assche {
925683b159aSBart Van Assche 	struct device_attribute **attr;
926683b159aSBart Van Assche 
927683b159aSBart Van Assche 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
928683b159aSBart Van Assche 		device_remove_file(&shost->shost_dev, *attr);
929683b159aSBart Van Assche }
930683b159aSBart Van Assche 
931ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target)
932ee12d6a8SBart Van Assche {
933d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
934d92c0da7SBart Van Assche 	int i;
935509c07bcSBart Van Assche 
936ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
937ef6c49d8SBart Van Assche 
938ee12d6a8SBart Van Assche 	srp_del_scsi_host_attr(target->scsi_host);
9399dd69a60SBart Van Assche 	srp_rport_get(target->rport);
940ee12d6a8SBart Van Assche 	srp_remove_host(target->scsi_host);
941ee12d6a8SBart Van Assche 	scsi_remove_host(target->scsi_host);
94293079162SBart Van Assche 	srp_stop_rport_timers(target->rport);
943ef6c49d8SBart Van Assche 	srp_disconnect_target(target);
944d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
945d92c0da7SBart Van Assche 		ch = &target->ch[i];
946509c07bcSBart Van Assche 		srp_free_ch_ib(target, ch);
947d92c0da7SBart Van Assche 	}
948c1120f89SBart Van Assche 	cancel_work_sync(&target->tl_err_work);
9499dd69a60SBart Van Assche 	srp_rport_put(target->rport);
950d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
951d92c0da7SBart Van Assche 		ch = &target->ch[i];
952509c07bcSBart Van Assche 		srp_free_req_data(target, ch);
953d92c0da7SBart Van Assche 	}
954d92c0da7SBart Van Assche 	kfree(target->ch);
955d92c0da7SBart Van Assche 	target->ch = NULL;
95665d7dd2fSVu Pham 
95765d7dd2fSVu Pham 	spin_lock(&target->srp_host->target_lock);
95865d7dd2fSVu Pham 	list_del(&target->list);
95965d7dd2fSVu Pham 	spin_unlock(&target->srp_host->target_lock);
96065d7dd2fSVu Pham 
961ee12d6a8SBart Van Assche 	scsi_host_put(target->scsi_host);
962ee12d6a8SBart Van Assche }
963ee12d6a8SBart Van Assche 
964c4028958SDavid Howells static void srp_remove_work(struct work_struct *work)
965aef9ec39SRoland Dreier {
966c4028958SDavid Howells 	struct srp_target_port *target =
967ef6c49d8SBart Van Assche 		container_of(work, struct srp_target_port, remove_work);
968aef9ec39SRoland Dreier 
969ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
970aef9ec39SRoland Dreier 
97196fc248aSBart Van Assche 	srp_remove_target(target);
972aef9ec39SRoland Dreier }
973aef9ec39SRoland Dreier 
974dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport)
975dc1bdbd9SBart Van Assche {
976dc1bdbd9SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
977dc1bdbd9SBart Van Assche 
978dc1bdbd9SBart Van Assche 	srp_queue_remove_work(target);
979dc1bdbd9SBart Van Assche }
980dc1bdbd9SBart Van Assche 
981c014c8cdSBart Van Assche /**
982c014c8cdSBart Van Assche  * srp_connected_ch() - number of connected channels
983c014c8cdSBart Van Assche  * @target: SRP target port.
984c014c8cdSBart Van Assche  */
985c014c8cdSBart Van Assche static int srp_connected_ch(struct srp_target_port *target)
986c014c8cdSBart Van Assche {
987c014c8cdSBart Van Assche 	int i, c = 0;
988c014c8cdSBart Van Assche 
989c014c8cdSBart Van Assche 	for (i = 0; i < target->ch_count; i++)
990c014c8cdSBart Van Assche 		c += target->ch[i].connected;
991c014c8cdSBart Van Assche 
992c014c8cdSBart Van Assche 	return c;
993c014c8cdSBart Van Assche }
994c014c8cdSBart Van Assche 
995d92c0da7SBart Van Assche static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
996aef9ec39SRoland Dreier {
997509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
998aef9ec39SRoland Dreier 	int ret;
999aef9ec39SRoland Dreier 
1000c014c8cdSBart Van Assche 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1001294c875aSBart Van Assche 
1002509c07bcSBart Van Assche 	ret = srp_lookup_path(ch);
1003aef9ec39SRoland Dreier 	if (ret)
10044d59ad29SBart Van Assche 		goto out;
1005aef9ec39SRoland Dreier 
1006aef9ec39SRoland Dreier 	while (1) {
1007509c07bcSBart Van Assche 		init_completion(&ch->done);
1008d92c0da7SBart Van Assche 		ret = srp_send_req(ch, multich);
1009aef9ec39SRoland Dreier 		if (ret)
10104d59ad29SBart Van Assche 			goto out;
1011509c07bcSBart Van Assche 		ret = wait_for_completion_interruptible(&ch->done);
1012a702adceSBart Van Assche 		if (ret < 0)
10134d59ad29SBart Van Assche 			goto out;
1014aef9ec39SRoland Dreier 
1015aef9ec39SRoland Dreier 		/*
1016aef9ec39SRoland Dreier 		 * The CM event handling code will set status to
1017aef9ec39SRoland Dreier 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
1018aef9ec39SRoland Dreier 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1019aef9ec39SRoland Dreier 		 * redirect REJ back.
1020aef9ec39SRoland Dreier 		 */
10214d59ad29SBart Van Assche 		ret = ch->status;
10224d59ad29SBart Van Assche 		switch (ret) {
1023aef9ec39SRoland Dreier 		case 0:
1024c014c8cdSBart Van Assche 			ch->connected = true;
10254d59ad29SBart Van Assche 			goto out;
1026aef9ec39SRoland Dreier 
1027aef9ec39SRoland Dreier 		case SRP_PORT_REDIRECT:
1028509c07bcSBart Van Assche 			ret = srp_lookup_path(ch);
1029aef9ec39SRoland Dreier 			if (ret)
10304d59ad29SBart Van Assche 				goto out;
1031aef9ec39SRoland Dreier 			break;
1032aef9ec39SRoland Dreier 
1033aef9ec39SRoland Dreier 		case SRP_DLID_REDIRECT:
1034aef9ec39SRoland Dreier 			break;
1035aef9ec39SRoland Dreier 
10369fe4bcf4SDavid Dillow 		case SRP_STALE_CONN:
10379fe4bcf4SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host, PFX
10389fe4bcf4SDavid Dillow 				     "giving up on stale connection\n");
10394d59ad29SBart Van Assche 			ret = -ECONNRESET;
10404d59ad29SBart Van Assche 			goto out;
10419fe4bcf4SDavid Dillow 
1042aef9ec39SRoland Dreier 		default:
10434d59ad29SBart Van Assche 			goto out;
1044aef9ec39SRoland Dreier 		}
1045aef9ec39SRoland Dreier 	}
10464d59ad29SBart Van Assche 
10474d59ad29SBart Van Assche out:
10484d59ad29SBart Van Assche 	return ret <= 0 ? ret : -ENODEV;
1049aef9ec39SRoland Dreier }
1050aef9ec39SRoland Dreier 
10511dc7b1f1SChristoph Hellwig static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
10521dc7b1f1SChristoph Hellwig {
10531dc7b1f1SChristoph Hellwig 	srp_handle_qp_err(cq, wc, "INV RKEY");
10541dc7b1f1SChristoph Hellwig }
10551dc7b1f1SChristoph Hellwig 
10561dc7b1f1SChristoph Hellwig static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
10571dc7b1f1SChristoph Hellwig 		u32 rkey)
10585cfb1782SBart Van Assche {
10595cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
10605cfb1782SBart Van Assche 	struct ib_send_wr wr = {
10615cfb1782SBart Van Assche 		.opcode		    = IB_WR_LOCAL_INV,
10625cfb1782SBart Van Assche 		.next		    = NULL,
10635cfb1782SBart Van Assche 		.num_sge	    = 0,
10645cfb1782SBart Van Assche 		.send_flags	    = 0,
10655cfb1782SBart Van Assche 		.ex.invalidate_rkey = rkey,
10665cfb1782SBart Van Assche 	};
10675cfb1782SBart Van Assche 
10681dc7b1f1SChristoph Hellwig 	wr.wr_cqe = &req->reg_cqe;
10691dc7b1f1SChristoph Hellwig 	req->reg_cqe.done = srp_inv_rkey_err_done;
1070509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
10715cfb1782SBart Van Assche }
10725cfb1782SBart Van Assche 
1073d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd,
1074509c07bcSBart Van Assche 			   struct srp_rdma_ch *ch,
1075d945e1dfSRoland Dreier 			   struct srp_request *req)
1076d945e1dfSRoland Dreier {
1077509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
10785cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
10795cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
10805cfb1782SBart Van Assche 	int i, res;
10818f26c9ffSDavid Dillow 
1082bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) ||
1083d945e1dfSRoland Dreier 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1084d945e1dfSRoland Dreier 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1085d945e1dfSRoland Dreier 		return;
1086d945e1dfSRoland Dreier 
10875cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
10885cfb1782SBart Van Assche 		struct srp_fr_desc **pfr;
10895cfb1782SBart Van Assche 
10905cfb1782SBart Van Assche 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
10911dc7b1f1SChristoph Hellwig 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
10925cfb1782SBart Van Assche 			if (res < 0) {
10935cfb1782SBart Van Assche 				shost_printk(KERN_ERR, target->scsi_host, PFX
10945cfb1782SBart Van Assche 				  "Queueing INV WR for rkey %#x failed (%d)\n",
10955cfb1782SBart Van Assche 				  (*pfr)->mr->rkey, res);
10965cfb1782SBart Van Assche 				queue_work(system_long_wq,
10975cfb1782SBart Van Assche 					   &target->tl_err_work);
10985cfb1782SBart Van Assche 			}
10995cfb1782SBart Van Assche 		}
11005cfb1782SBart Van Assche 		if (req->nmdesc)
1101509c07bcSBart Van Assche 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
11025cfb1782SBart Van Assche 					req->nmdesc);
1103002f1567SBart Van Assche 	} else if (dev->use_fmr) {
11045cfb1782SBart Van Assche 		struct ib_pool_fmr **pfmr;
11055cfb1782SBart Van Assche 
11065cfb1782SBart Van Assche 		for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
11075cfb1782SBart Van Assche 			ib_fmr_pool_unmap(*pfmr);
11085cfb1782SBart Van Assche 	}
1109f5358a17SRoland Dreier 
11108f26c9ffSDavid Dillow 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
11118f26c9ffSDavid Dillow 			scmnd->sc_data_direction);
1112d945e1dfSRoland Dreier }
1113d945e1dfSRoland Dreier 
111422032991SBart Van Assche /**
111522032991SBart Van Assche  * srp_claim_req - Take ownership of the scmnd associated with a request.
1116509c07bcSBart Van Assche  * @ch: SRP RDMA channel.
111722032991SBart Van Assche  * @req: SRP request.
1118b3fe628dSBart Van Assche  * @sdev: If not NULL, only take ownership for this SCSI device.
111922032991SBart Van Assche  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
112022032991SBart Van Assche  *         ownership of @req->scmnd if it equals @scmnd.
112122032991SBart Van Assche  *
112222032991SBart Van Assche  * Return value:
112322032991SBart Van Assche  * Either NULL or a pointer to the SCSI command the caller became owner of.
112422032991SBart Van Assche  */
1125509c07bcSBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
112622032991SBart Van Assche 				       struct srp_request *req,
1127b3fe628dSBart Van Assche 				       struct scsi_device *sdev,
112822032991SBart Van Assche 				       struct scsi_cmnd *scmnd)
1129526b4caaSIshai Rabinovitz {
113094a9174cSBart Van Assche 	unsigned long flags;
113194a9174cSBart Van Assche 
1132509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1133b3fe628dSBart Van Assche 	if (req->scmnd &&
1134b3fe628dSBart Van Assche 	    (!sdev || req->scmnd->device == sdev) &&
1135b3fe628dSBart Van Assche 	    (!scmnd || req->scmnd == scmnd)) {
113622032991SBart Van Assche 		scmnd = req->scmnd;
113722032991SBart Van Assche 		req->scmnd = NULL;
113822032991SBart Van Assche 	} else {
113922032991SBart Van Assche 		scmnd = NULL;
114022032991SBart Van Assche 	}
1141509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
114222032991SBart Van Assche 
114322032991SBart Van Assche 	return scmnd;
114422032991SBart Van Assche }
114522032991SBart Van Assche 
114622032991SBart Van Assche /**
114722032991SBart Van Assche  * srp_free_req() - Unmap data and add request to the free request list.
1148509c07bcSBart Van Assche  * @ch:     SRP RDMA channel.
1149af24663bSBart Van Assche  * @req:    Request to be freed.
1150af24663bSBart Van Assche  * @scmnd:  SCSI command associated with @req.
1151af24663bSBart Van Assche  * @req_lim_delta: Amount to be added to @target->req_lim.
115222032991SBart Van Assche  */
1153509c07bcSBart Van Assche static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1154509c07bcSBart Van Assche 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
115522032991SBart Van Assche {
115622032991SBart Van Assche 	unsigned long flags;
115722032991SBart Van Assche 
1158509c07bcSBart Van Assche 	srp_unmap_data(scmnd, ch, req);
115922032991SBart Van Assche 
1160509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1161509c07bcSBart Van Assche 	ch->req_lim += req_lim_delta;
1162509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
1163526b4caaSIshai Rabinovitz }
1164526b4caaSIshai Rabinovitz 
1165509c07bcSBart Van Assche static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1166509c07bcSBart Van Assche 			   struct scsi_device *sdev, int result)
1167526b4caaSIshai Rabinovitz {
1168509c07bcSBart Van Assche 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
116922032991SBart Van Assche 
117022032991SBart Van Assche 	if (scmnd) {
1171509c07bcSBart Van Assche 		srp_free_req(ch, req, scmnd, 0);
1172ed9b2264SBart Van Assche 		scmnd->result = result;
117322032991SBart Van Assche 		scmnd->scsi_done(scmnd);
117422032991SBart Van Assche 	}
1175526b4caaSIshai Rabinovitz }
1176526b4caaSIshai Rabinovitz 
1177ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport)
1178aef9ec39SRoland Dreier {
1179ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1180d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
1181b3fe628dSBart Van Assche 	struct Scsi_Host *shost = target->scsi_host;
1182b3fe628dSBart Van Assche 	struct scsi_device *sdev;
1183d92c0da7SBart Van Assche 	int i, j;
1184aef9ec39SRoland Dreier 
1185b3fe628dSBart Van Assche 	/*
1186b3fe628dSBart Van Assche 	 * Invoking srp_terminate_io() while srp_queuecommand() is running
1187b3fe628dSBart Van Assche 	 * is not safe. Hence the warning statement below.
1188b3fe628dSBart Van Assche 	 */
1189b3fe628dSBart Van Assche 	shost_for_each_device(sdev, shost)
1190b3fe628dSBart Van Assche 		WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1191b3fe628dSBart Van Assche 
1192d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1193d92c0da7SBart Van Assche 		ch = &target->ch[i];
1194509c07bcSBart Van Assche 
1195d92c0da7SBart Van Assche 		for (j = 0; j < target->req_ring_size; ++j) {
1196d92c0da7SBart Van Assche 			struct srp_request *req = &ch->req_ring[j];
1197d92c0da7SBart Van Assche 
1198d92c0da7SBart Van Assche 			srp_finish_req(ch, req, NULL,
1199d92c0da7SBart Van Assche 				       DID_TRANSPORT_FAILFAST << 16);
1200d92c0da7SBart Van Assche 		}
1201ed9b2264SBart Van Assche 	}
1202ed9b2264SBart Van Assche }
1203ed9b2264SBart Van Assche 
1204ed9b2264SBart Van Assche /*
1205ed9b2264SBart Van Assche  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1206ed9b2264SBart Van Assche  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1207ed9b2264SBart Van Assche  * srp_reset_device() or srp_reset_host() calls will occur while this function
1208ed9b2264SBart Van Assche  * is in progress. One way to realize that is not to call this function
1209ed9b2264SBart Van Assche  * directly but to call srp_reconnect_rport() instead since that last function
1210ed9b2264SBart Van Assche  * serializes calls of this function via rport->mutex and also blocks
1211ed9b2264SBart Van Assche  * srp_queuecommand() calls before invoking this function.
1212ed9b2264SBart Van Assche  */
1213ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport)
1214ed9b2264SBart Van Assche {
1215ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1216d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
1217d92c0da7SBart Van Assche 	int i, j, ret = 0;
1218d92c0da7SBart Van Assche 	bool multich = false;
121909be70a2SBart Van Assche 
1220aef9ec39SRoland Dreier 	srp_disconnect_target(target);
122134aa654eSBart Van Assche 
122234aa654eSBart Van Assche 	if (target->state == SRP_TARGET_SCANNING)
122334aa654eSBart Van Assche 		return -ENODEV;
122434aa654eSBart Van Assche 
1225aef9ec39SRoland Dreier 	/*
1226c7c4e7ffSBart Van Assche 	 * Now get a new local CM ID so that we avoid confusing the target in
1227c7c4e7ffSBart Van Assche 	 * case things are really fouled up. Doing so also ensures that all CM
1228c7c4e7ffSBart Van Assche 	 * callbacks will have finished before a new QP is allocated.
1229aef9ec39SRoland Dreier 	 */
1230d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1231d92c0da7SBart Van Assche 		ch = &target->ch[i];
1232d92c0da7SBart Van Assche 		ret += srp_new_cm_id(ch);
1233d92c0da7SBart Van Assche 	}
1234d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1235d92c0da7SBart Van Assche 		ch = &target->ch[i];
1236d92c0da7SBart Van Assche 		for (j = 0; j < target->req_ring_size; ++j) {
1237d92c0da7SBart Van Assche 			struct srp_request *req = &ch->req_ring[j];
1238509c07bcSBart Van Assche 
1239509c07bcSBart Van Assche 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
1240536ae14eSBart Van Assche 		}
1241d92c0da7SBart Van Assche 	}
1242d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1243d92c0da7SBart Van Assche 		ch = &target->ch[i];
12445cfb1782SBart Van Assche 		/*
12455cfb1782SBart Van Assche 		 * Whether or not creating a new CM ID succeeded, create a new
1246d92c0da7SBart Van Assche 		 * QP. This guarantees that all completion callback function
1247d92c0da7SBart Van Assche 		 * invocations have finished before request resetting starts.
12485cfb1782SBart Van Assche 		 */
1249509c07bcSBart Van Assche 		ret += srp_create_ch_ib(ch);
12505cfb1782SBart Van Assche 
1251509c07bcSBart Van Assche 		INIT_LIST_HEAD(&ch->free_tx);
1252d92c0da7SBart Van Assche 		for (j = 0; j < target->queue_size; ++j)
1253d92c0da7SBart Van Assche 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1254d92c0da7SBart Van Assche 	}
12558de9fe3aSBart Van Assche 
12568de9fe3aSBart Van Assche 	target->qp_in_error = false;
12578de9fe3aSBart Van Assche 
1258d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1259d92c0da7SBart Van Assche 		ch = &target->ch[i];
1260bbac5ccfSBart Van Assche 		if (ret)
1261d92c0da7SBart Van Assche 			break;
1262d92c0da7SBart Van Assche 		ret = srp_connect_ch(ch, multich);
1263d92c0da7SBart Van Assche 		multich = true;
1264d92c0da7SBart Van Assche 	}
126509be70a2SBart Van Assche 
1266ed9b2264SBart Van Assche 	if (ret == 0)
1267ed9b2264SBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
1268ed9b2264SBart Van Assche 			     PFX "reconnect succeeded\n");
1269aef9ec39SRoland Dreier 
1270aef9ec39SRoland Dreier 	return ret;
1271aef9ec39SRoland Dreier }
1272aef9ec39SRoland Dreier 
12738f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
12748f26c9ffSDavid Dillow 			 unsigned int dma_len, u32 rkey)
1275f5358a17SRoland Dreier {
12768f26c9ffSDavid Dillow 	struct srp_direct_buf *desc = state->desc;
12778f26c9ffSDavid Dillow 
12783ae95da8SBart Van Assche 	WARN_ON_ONCE(!dma_len);
12793ae95da8SBart Van Assche 
12808f26c9ffSDavid Dillow 	desc->va = cpu_to_be64(dma_addr);
12818f26c9ffSDavid Dillow 	desc->key = cpu_to_be32(rkey);
12828f26c9ffSDavid Dillow 	desc->len = cpu_to_be32(dma_len);
12838f26c9ffSDavid Dillow 
12848f26c9ffSDavid Dillow 	state->total_len += dma_len;
12858f26c9ffSDavid Dillow 	state->desc++;
12868f26c9ffSDavid Dillow 	state->ndesc++;
12878f26c9ffSDavid Dillow }
12888f26c9ffSDavid Dillow 
12898f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state,
1290509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
12918f26c9ffSDavid Dillow {
1292186fbc66SBart Van Assche 	struct srp_target_port *target = ch->target;
1293186fbc66SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
12948f26c9ffSDavid Dillow 	struct ib_pool_fmr *fmr;
1295f5358a17SRoland Dreier 	u64 io_addr = 0;
12968f26c9ffSDavid Dillow 
1297f731ed62SBart Van Assche 	if (state->fmr.next >= state->fmr.end)
1298f731ed62SBart Van Assche 		return -ENOMEM;
1299f731ed62SBart Van Assche 
130026630e8aSSagi Grimberg 	WARN_ON_ONCE(!dev->use_fmr);
130126630e8aSSagi Grimberg 
130226630e8aSSagi Grimberg 	if (state->npages == 0)
130326630e8aSSagi Grimberg 		return 0;
130426630e8aSSagi Grimberg 
130526630e8aSSagi Grimberg 	if (state->npages == 1 && target->global_mr) {
130626630e8aSSagi Grimberg 		srp_map_desc(state, state->base_dma_addr, state->dma_len,
130726630e8aSSagi Grimberg 			     target->global_mr->rkey);
130826630e8aSSagi Grimberg 		goto reset_state;
130926630e8aSSagi Grimberg 	}
131026630e8aSSagi Grimberg 
1311509c07bcSBart Van Assche 	fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
13128f26c9ffSDavid Dillow 				   state->npages, io_addr);
13138f26c9ffSDavid Dillow 	if (IS_ERR(fmr))
13148f26c9ffSDavid Dillow 		return PTR_ERR(fmr);
13158f26c9ffSDavid Dillow 
1316f731ed62SBart Van Assche 	*state->fmr.next++ = fmr;
131752ede08fSBart Van Assche 	state->nmdesc++;
13188f26c9ffSDavid Dillow 
1319186fbc66SBart Van Assche 	srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1320186fbc66SBart Van Assche 		     state->dma_len, fmr->fmr->rkey);
1321539dde6fSBart Van Assche 
132226630e8aSSagi Grimberg reset_state:
132326630e8aSSagi Grimberg 	state->npages = 0;
132426630e8aSSagi Grimberg 	state->dma_len = 0;
132526630e8aSSagi Grimberg 
13268f26c9ffSDavid Dillow 	return 0;
13278f26c9ffSDavid Dillow }
13288f26c9ffSDavid Dillow 
13291dc7b1f1SChristoph Hellwig static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
13301dc7b1f1SChristoph Hellwig {
13311dc7b1f1SChristoph Hellwig 	srp_handle_qp_err(cq, wc, "FAST REG");
13321dc7b1f1SChristoph Hellwig }
13331dc7b1f1SChristoph Hellwig 
13345cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state,
13351dc7b1f1SChristoph Hellwig 			     struct srp_request *req,
133657b0be9cSBart Van Assche 			     struct srp_rdma_ch *ch, int sg_nents)
13375cfb1782SBart Van Assche {
1338509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
13395cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
13405cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
1341f7f7aab1SSagi Grimberg 	struct ib_reg_wr wr;
13425cfb1782SBart Van Assche 	struct srp_fr_desc *desc;
13435cfb1782SBart Van Assche 	u32 rkey;
1344f7f7aab1SSagi Grimberg 	int n, err;
13455cfb1782SBart Van Assche 
1346f731ed62SBart Van Assche 	if (state->fr.next >= state->fr.end)
1347f731ed62SBart Van Assche 		return -ENOMEM;
1348f731ed62SBart Van Assche 
134926630e8aSSagi Grimberg 	WARN_ON_ONCE(!dev->use_fast_reg);
135026630e8aSSagi Grimberg 
135157b0be9cSBart Van Assche 	if (sg_nents == 0)
135226630e8aSSagi Grimberg 		return 0;
135326630e8aSSagi Grimberg 
135457b0be9cSBart Van Assche 	if (sg_nents == 1 && target->global_mr) {
1355f7f7aab1SSagi Grimberg 		srp_map_desc(state, sg_dma_address(state->sg),
1356f7f7aab1SSagi Grimberg 			     sg_dma_len(state->sg),
135726630e8aSSagi Grimberg 			     target->global_mr->rkey);
1358f7f7aab1SSagi Grimberg 		return 1;
135926630e8aSSagi Grimberg 	}
136026630e8aSSagi Grimberg 
1361509c07bcSBart Van Assche 	desc = srp_fr_pool_get(ch->fr_pool);
13625cfb1782SBart Van Assche 	if (!desc)
13635cfb1782SBart Van Assche 		return -ENOMEM;
13645cfb1782SBart Van Assche 
13655cfb1782SBart Van Assche 	rkey = ib_inc_rkey(desc->mr->rkey);
13665cfb1782SBart Van Assche 	ib_update_fast_reg_key(desc->mr, rkey);
13675cfb1782SBart Van Assche 
136857b0be9cSBart Van Assche 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
1369f7f7aab1SSagi Grimberg 	if (unlikely(n < 0))
1370f7f7aab1SSagi Grimberg 		return n;
13715cfb1782SBart Van Assche 
13721dc7b1f1SChristoph Hellwig 	req->reg_cqe.done = srp_reg_mr_err_done;
13731dc7b1f1SChristoph Hellwig 
1374f7f7aab1SSagi Grimberg 	wr.wr.next = NULL;
1375f7f7aab1SSagi Grimberg 	wr.wr.opcode = IB_WR_REG_MR;
13761dc7b1f1SChristoph Hellwig 	wr.wr.wr_cqe = &req->reg_cqe;
1377f7f7aab1SSagi Grimberg 	wr.wr.num_sge = 0;
1378f7f7aab1SSagi Grimberg 	wr.wr.send_flags = 0;
1379f7f7aab1SSagi Grimberg 	wr.mr = desc->mr;
1380f7f7aab1SSagi Grimberg 	wr.key = desc->mr->rkey;
1381f7f7aab1SSagi Grimberg 	wr.access = (IB_ACCESS_LOCAL_WRITE |
13825cfb1782SBart Van Assche 		     IB_ACCESS_REMOTE_READ |
13835cfb1782SBart Van Assche 		     IB_ACCESS_REMOTE_WRITE);
13845cfb1782SBart Van Assche 
1385f731ed62SBart Van Assche 	*state->fr.next++ = desc;
13865cfb1782SBart Van Assche 	state->nmdesc++;
13875cfb1782SBart Van Assche 
1388f7f7aab1SSagi Grimberg 	srp_map_desc(state, desc->mr->iova,
1389f7f7aab1SSagi Grimberg 		     desc->mr->length, desc->mr->rkey);
13905cfb1782SBart Van Assche 
139126630e8aSSagi Grimberg 	err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1392f7f7aab1SSagi Grimberg 	if (unlikely(err))
139326630e8aSSagi Grimberg 		return err;
139426630e8aSSagi Grimberg 
1395f7f7aab1SSagi Grimberg 	return n;
13965cfb1782SBart Van Assche }
13975cfb1782SBart Van Assche 
13988f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state,
1399509c07bcSBart Van Assche 			    struct srp_rdma_ch *ch,
14003ae95da8SBart Van Assche 			    struct scatterlist *sg, int sg_index)
14018f26c9ffSDavid Dillow {
1402509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
140305321937SGreg Kroah-Hartman 	struct srp_device *dev = target->srp_host->srp_dev;
140485507bccSRalph Campbell 	struct ib_device *ibdev = dev->dev;
14058f26c9ffSDavid Dillow 	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1406bb350d1dSFUJITA Tomonori 	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
14073ae95da8SBart Van Assche 	unsigned int len = 0;
14088f26c9ffSDavid Dillow 	int ret;
140985507bccSRalph Campbell 
14103ae95da8SBart Van Assche 	WARN_ON_ONCE(!dma_len);
1411f5358a17SRoland Dreier 
14128f26c9ffSDavid Dillow 	while (dma_len) {
14135cfb1782SBart Van Assche 		unsigned offset = dma_addr & ~dev->mr_page_mask;
14145cfb1782SBart Van Assche 		if (state->npages == dev->max_pages_per_mr || offset != 0) {
1415f7f7aab1SSagi Grimberg 			ret = srp_map_finish_fmr(state, ch);
14168f26c9ffSDavid Dillow 			if (ret)
14178f26c9ffSDavid Dillow 				return ret;
141885507bccSRalph Campbell 		}
1419f5358a17SRoland Dreier 
14205cfb1782SBart Van Assche 		len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
14218f26c9ffSDavid Dillow 
14228f26c9ffSDavid Dillow 		if (!state->npages)
14238f26c9ffSDavid Dillow 			state->base_dma_addr = dma_addr;
14245cfb1782SBart Van Assche 		state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
142552ede08fSBart Van Assche 		state->dma_len += len;
14268f26c9ffSDavid Dillow 		dma_addr += len;
14278f26c9ffSDavid Dillow 		dma_len -= len;
1428f5358a17SRoland Dreier 	}
1429f5358a17SRoland Dreier 
14305cfb1782SBart Van Assche 	/*
14315cfb1782SBart Van Assche 	 * If the last entry of the MR wasn't a full page, then we need to
14328f26c9ffSDavid Dillow 	 * close it out and start a new one -- we can only merge at page
14338f26c9ffSDavid Dillow 	 * boundries.
14348f26c9ffSDavid Dillow 	 */
1435f5358a17SRoland Dreier 	ret = 0;
14360e0d3a48SBart Van Assche 	if (len != dev->mr_page_size)
1437f7f7aab1SSagi Grimberg 		ret = srp_map_finish_fmr(state, ch);
1438f5358a17SRoland Dreier 	return ret;
1439f5358a17SRoland Dreier }
1440f5358a17SRoland Dreier 
144126630e8aSSagi Grimberg static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
144226630e8aSSagi Grimberg 			  struct srp_request *req, struct scatterlist *scat,
144326630e8aSSagi Grimberg 			  int count)
144426630e8aSSagi Grimberg {
144526630e8aSSagi Grimberg 	struct scatterlist *sg;
144626630e8aSSagi Grimberg 	int i, ret;
144726630e8aSSagi Grimberg 
144826630e8aSSagi Grimberg 	state->desc = req->indirect_desc;
144926630e8aSSagi Grimberg 	state->pages = req->map_page;
145026630e8aSSagi Grimberg 	state->fmr.next = req->fmr_list;
145126630e8aSSagi Grimberg 	state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
145226630e8aSSagi Grimberg 
145326630e8aSSagi Grimberg 	for_each_sg(scat, sg, count, i) {
145426630e8aSSagi Grimberg 		ret = srp_map_sg_entry(state, ch, sg, i);
145526630e8aSSagi Grimberg 		if (ret)
145626630e8aSSagi Grimberg 			return ret;
145726630e8aSSagi Grimberg 	}
145826630e8aSSagi Grimberg 
1459f7f7aab1SSagi Grimberg 	ret = srp_map_finish_fmr(state, ch);
146026630e8aSSagi Grimberg 	if (ret)
146126630e8aSSagi Grimberg 		return ret;
146226630e8aSSagi Grimberg 
146326630e8aSSagi Grimberg 	req->nmdesc = state->nmdesc;
146426630e8aSSagi Grimberg 
146526630e8aSSagi Grimberg 	return 0;
146626630e8aSSagi Grimberg }
146726630e8aSSagi Grimberg 
146826630e8aSSagi Grimberg static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
146926630e8aSSagi Grimberg 			 struct srp_request *req, struct scatterlist *scat,
147026630e8aSSagi Grimberg 			 int count)
147126630e8aSSagi Grimberg {
147226630e8aSSagi Grimberg 	state->desc = req->indirect_desc;
1473f7f7aab1SSagi Grimberg 	state->fr.next = req->fr_list;
1474f7f7aab1SSagi Grimberg 	state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1475f7f7aab1SSagi Grimberg 	state->sg = scat;
147626630e8aSSagi Grimberg 
147757b0be9cSBart Van Assche 	while (count) {
1478f7f7aab1SSagi Grimberg 		int i, n;
1479f7f7aab1SSagi Grimberg 
1480*c6333f9fSDoug Ledford 		n = srp_map_finish_fr(state, req, ch, count);
1481f7f7aab1SSagi Grimberg 		if (unlikely(n < 0))
1482f7f7aab1SSagi Grimberg 			return n;
1483f7f7aab1SSagi Grimberg 
148457b0be9cSBart Van Assche 		count -= n;
1485f7f7aab1SSagi Grimberg 		for (i = 0; i < n; i++)
1486f7f7aab1SSagi Grimberg 			state->sg = sg_next(state->sg);
148726630e8aSSagi Grimberg 	}
148826630e8aSSagi Grimberg 
148926630e8aSSagi Grimberg 	req->nmdesc = state->nmdesc;
149026630e8aSSagi Grimberg 
149126630e8aSSagi Grimberg 	return 0;
149226630e8aSSagi Grimberg }
149326630e8aSSagi Grimberg 
149426630e8aSSagi Grimberg static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1495509c07bcSBart Van Assche 			  struct srp_request *req, struct scatterlist *scat,
1496509c07bcSBart Van Assche 			  int count)
149776bc1e1dSBart Van Assche {
1498509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
149976bc1e1dSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
150076bc1e1dSBart Van Assche 	struct scatterlist *sg;
150126630e8aSSagi Grimberg 	int i;
150276bc1e1dSBart Van Assche 
150376bc1e1dSBart Van Assche 	state->desc = req->indirect_desc;
15043ae95da8SBart Van Assche 	for_each_sg(scat, sg, count, i) {
15053ae95da8SBart Van Assche 		srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
150603f6fb93SBart Van Assche 			     ib_sg_dma_len(dev->dev, sg),
150703f6fb93SBart Van Assche 			     target->global_mr->rkey);
15083ae95da8SBart Van Assche 	}
150976bc1e1dSBart Van Assche 
151052ede08fSBart Van Assche 	req->nmdesc = state->nmdesc;
15115cfb1782SBart Van Assche 
151226630e8aSSagi Grimberg 	return 0;
151376bc1e1dSBart Van Assche }
151476bc1e1dSBart Van Assche 
1515330179f2SBart Van Assche /*
1516330179f2SBart Van Assche  * Register the indirect data buffer descriptor with the HCA.
1517330179f2SBart Van Assche  *
1518330179f2SBart Van Assche  * Note: since the indirect data buffer descriptor has been allocated with
1519330179f2SBart Van Assche  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1520330179f2SBart Van Assche  * memory buffer.
1521330179f2SBart Van Assche  */
1522330179f2SBart Van Assche static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1523330179f2SBart Van Assche 		       void **next_mr, void **end_mr, u32 idb_len,
1524330179f2SBart Van Assche 		       __be32 *idb_rkey)
1525330179f2SBart Van Assche {
1526330179f2SBart Van Assche 	struct srp_target_port *target = ch->target;
1527330179f2SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
1528330179f2SBart Van Assche 	struct srp_map_state state;
1529330179f2SBart Van Assche 	struct srp_direct_buf idb_desc;
1530330179f2SBart Van Assche 	u64 idb_pages[1];
1531f7f7aab1SSagi Grimberg 	struct scatterlist idb_sg[1];
1532330179f2SBart Van Assche 	int ret;
1533330179f2SBart Van Assche 
1534330179f2SBart Van Assche 	memset(&state, 0, sizeof(state));
1535330179f2SBart Van Assche 	memset(&idb_desc, 0, sizeof(idb_desc));
1536330179f2SBart Van Assche 	state.gen.next = next_mr;
1537330179f2SBart Van Assche 	state.gen.end = end_mr;
1538330179f2SBart Van Assche 	state.desc = &idb_desc;
1539f7f7aab1SSagi Grimberg 	state.base_dma_addr = req->indirect_dma_addr;
1540f7f7aab1SSagi Grimberg 	state.dma_len = idb_len;
1541f7f7aab1SSagi Grimberg 
1542f7f7aab1SSagi Grimberg 	if (dev->use_fast_reg) {
1543f7f7aab1SSagi Grimberg 		state.sg = idb_sg;
1544f7f7aab1SSagi Grimberg 		sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1545f7f7aab1SSagi Grimberg 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1546fc925518SChristoph Hellwig #ifdef CONFIG_NEED_SG_DMA_LENGTH
1547fc925518SChristoph Hellwig 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1548fc925518SChristoph Hellwig #endif
1549*c6333f9fSDoug Ledford 		ret = srp_map_finish_fr(&state, req, ch, 1);
1550f7f7aab1SSagi Grimberg 		if (ret < 0)
1551f7f7aab1SSagi Grimberg 			return ret;
1552f7f7aab1SSagi Grimberg 	} else if (dev->use_fmr) {
1553330179f2SBart Van Assche 		state.pages = idb_pages;
1554330179f2SBart Van Assche 		state.pages[0] = (req->indirect_dma_addr &
1555330179f2SBart Van Assche 				  dev->mr_page_mask);
1556330179f2SBart Van Assche 		state.npages = 1;
1557f7f7aab1SSagi Grimberg 		ret = srp_map_finish_fmr(&state, ch);
1558330179f2SBart Van Assche 		if (ret < 0)
1559f7f7aab1SSagi Grimberg 			return ret;
1560f7f7aab1SSagi Grimberg 	} else {
1561f7f7aab1SSagi Grimberg 		return -EINVAL;
1562f7f7aab1SSagi Grimberg 	}
1563330179f2SBart Van Assche 
1564330179f2SBart Van Assche 	*idb_rkey = idb_desc.key;
1565330179f2SBart Van Assche 
1566f7f7aab1SSagi Grimberg 	return 0;
1567330179f2SBart Van Assche }
1568330179f2SBart Van Assche 
1569509c07bcSBart Van Assche static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1570aef9ec39SRoland Dreier 			struct srp_request *req)
1571aef9ec39SRoland Dreier {
1572509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
157376bc1e1dSBart Van Assche 	struct scatterlist *scat;
1574aef9ec39SRoland Dreier 	struct srp_cmd *cmd = req->cmd->buf;
1575330179f2SBart Van Assche 	int len, nents, count, ret;
157685507bccSRalph Campbell 	struct srp_device *dev;
157785507bccSRalph Campbell 	struct ib_device *ibdev;
15788f26c9ffSDavid Dillow 	struct srp_map_state state;
15798f26c9ffSDavid Dillow 	struct srp_indirect_buf *indirect_hdr;
1580330179f2SBart Van Assche 	u32 idb_len, table_len;
1581330179f2SBart Van Assche 	__be32 idb_rkey;
15828f26c9ffSDavid Dillow 	u8 fmt;
1583aef9ec39SRoland Dreier 
1584bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1585aef9ec39SRoland Dreier 		return sizeof (struct srp_cmd);
1586aef9ec39SRoland Dreier 
1587aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1588aef9ec39SRoland Dreier 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
15897aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
15907aa54bd7SDavid Dillow 			     PFX "Unhandled data direction %d\n",
1591aef9ec39SRoland Dreier 			     scmnd->sc_data_direction);
1592aef9ec39SRoland Dreier 		return -EINVAL;
1593aef9ec39SRoland Dreier 	}
1594aef9ec39SRoland Dreier 
1595bb350d1dSFUJITA Tomonori 	nents = scsi_sg_count(scmnd);
1596bb350d1dSFUJITA Tomonori 	scat  = scsi_sglist(scmnd);
1597aef9ec39SRoland Dreier 
159805321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev;
159985507bccSRalph Campbell 	ibdev = dev->dev;
160085507bccSRalph Campbell 
160185507bccSRalph Campbell 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
16028f26c9ffSDavid Dillow 	if (unlikely(count == 0))
16038f26c9ffSDavid Dillow 		return -EIO;
1604aef9ec39SRoland Dreier 
1605aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_DIRECT;
1606f5358a17SRoland Dreier 	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
1607f5358a17SRoland Dreier 
160803f6fb93SBart Van Assche 	if (count == 1 && target->global_mr) {
1609f5358a17SRoland Dreier 		/*
1610f5358a17SRoland Dreier 		 * The midlayer only generated a single gather/scatter
1611f5358a17SRoland Dreier 		 * entry, or DMA mapping coalesced everything to a
1612f5358a17SRoland Dreier 		 * single entry.  So a direct descriptor along with
1613f5358a17SRoland Dreier 		 * the DMA MR suffices.
1614f5358a17SRoland Dreier 		 */
1615f5358a17SRoland Dreier 		struct srp_direct_buf *buf = (void *) cmd->add_data;
1616aef9ec39SRoland Dreier 
161785507bccSRalph Campbell 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
161803f6fb93SBart Van Assche 		buf->key = cpu_to_be32(target->global_mr->rkey);
161985507bccSRalph Campbell 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
16208f26c9ffSDavid Dillow 
162152ede08fSBart Van Assche 		req->nmdesc = 0;
16228f26c9ffSDavid Dillow 		goto map_complete;
16238f26c9ffSDavid Dillow 	}
16248f26c9ffSDavid Dillow 
16255cfb1782SBart Van Assche 	/*
16265cfb1782SBart Van Assche 	 * We have more than one scatter/gather entry, so build our indirect
16275cfb1782SBart Van Assche 	 * descriptor table, trying to merge as many entries as we can.
1628f5358a17SRoland Dreier 	 */
16298f26c9ffSDavid Dillow 	indirect_hdr = (void *) cmd->add_data;
16308f26c9ffSDavid Dillow 
1631c07d424dSDavid Dillow 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1632c07d424dSDavid Dillow 				   target->indirect_size, DMA_TO_DEVICE);
1633c07d424dSDavid Dillow 
16348f26c9ffSDavid Dillow 	memset(&state, 0, sizeof(state));
163526630e8aSSagi Grimberg 	if (dev->use_fast_reg)
163626630e8aSSagi Grimberg 		srp_map_sg_fr(&state, ch, req, scat, count);
163726630e8aSSagi Grimberg 	else if (dev->use_fmr)
163826630e8aSSagi Grimberg 		srp_map_sg_fmr(&state, ch, req, scat, count);
163926630e8aSSagi Grimberg 	else
164026630e8aSSagi Grimberg 		srp_map_sg_dma(&state, ch, req, scat, count);
16418f26c9ffSDavid Dillow 
1642c07d424dSDavid Dillow 	/* We've mapped the request, now pull as much of the indirect
1643c07d424dSDavid Dillow 	 * descriptor table as we can into the command buffer. If this
1644c07d424dSDavid Dillow 	 * target is not using an external indirect table, we are
1645c07d424dSDavid Dillow 	 * guaranteed to fit into the command, as the SCSI layer won't
1646c07d424dSDavid Dillow 	 * give us more S/G entries than we allow.
16478f26c9ffSDavid Dillow 	 */
16488f26c9ffSDavid Dillow 	if (state.ndesc == 1) {
16495cfb1782SBart Van Assche 		/*
16505cfb1782SBart Van Assche 		 * Memory registration collapsed the sg-list into one entry,
16518f26c9ffSDavid Dillow 		 * so use a direct descriptor.
16528f26c9ffSDavid Dillow 		 */
16538f26c9ffSDavid Dillow 		struct srp_direct_buf *buf = (void *) cmd->add_data;
16548f26c9ffSDavid Dillow 
1655c07d424dSDavid Dillow 		*buf = req->indirect_desc[0];
16568f26c9ffSDavid Dillow 		goto map_complete;
16578f26c9ffSDavid Dillow 	}
16588f26c9ffSDavid Dillow 
1659c07d424dSDavid Dillow 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1660c07d424dSDavid Dillow 						!target->allow_ext_sg)) {
1661c07d424dSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
1662c07d424dSDavid Dillow 			     "Could not fit S/G list into SRP_CMD\n");
1663c07d424dSDavid Dillow 		return -EIO;
1664c07d424dSDavid Dillow 	}
1665c07d424dSDavid Dillow 
1666c07d424dSDavid Dillow 	count = min(state.ndesc, target->cmd_sg_cnt);
16678f26c9ffSDavid Dillow 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1668330179f2SBart Van Assche 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1669aef9ec39SRoland Dreier 
1670aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_INDIRECT;
16718f26c9ffSDavid Dillow 	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1672c07d424dSDavid Dillow 	len += count * sizeof (struct srp_direct_buf);
1673f5358a17SRoland Dreier 
1674c07d424dSDavid Dillow 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1675c07d424dSDavid Dillow 	       count * sizeof (struct srp_direct_buf));
167685507bccSRalph Campbell 
167703f6fb93SBart Van Assche 	if (!target->global_mr) {
1678330179f2SBart Van Assche 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1679330179f2SBart Van Assche 				  idb_len, &idb_rkey);
1680330179f2SBart Van Assche 		if (ret < 0)
1681330179f2SBart Van Assche 			return ret;
1682330179f2SBart Van Assche 		req->nmdesc++;
1683330179f2SBart Van Assche 	} else {
1684a745f4f4SBart Van Assche 		idb_rkey = cpu_to_be32(target->global_mr->rkey);
1685330179f2SBart Van Assche 	}
1686330179f2SBart Van Assche 
1687c07d424dSDavid Dillow 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1688330179f2SBart Van Assche 	indirect_hdr->table_desc.key = idb_rkey;
16898f26c9ffSDavid Dillow 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
16908f26c9ffSDavid Dillow 	indirect_hdr->len = cpu_to_be32(state.total_len);
1691aef9ec39SRoland Dreier 
1692aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1693c07d424dSDavid Dillow 		cmd->data_out_desc_cnt = count;
1694aef9ec39SRoland Dreier 	else
1695c07d424dSDavid Dillow 		cmd->data_in_desc_cnt = count;
1696c07d424dSDavid Dillow 
1697c07d424dSDavid Dillow 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1698c07d424dSDavid Dillow 				      DMA_TO_DEVICE);
1699aef9ec39SRoland Dreier 
17008f26c9ffSDavid Dillow map_complete:
1701aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1702aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt << 4;
1703aef9ec39SRoland Dreier 	else
1704aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt;
1705aef9ec39SRoland Dreier 
1706aef9ec39SRoland Dreier 	return len;
1707aef9ec39SRoland Dreier }
1708aef9ec39SRoland Dreier 
170905a1d750SDavid Dillow /*
171076c75b25SBart Van Assche  * Return an IU and possible credit to the free pool
171176c75b25SBart Van Assche  */
1712509c07bcSBart Van Assche static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
171376c75b25SBart Van Assche 			  enum srp_iu_type iu_type)
171476c75b25SBart Van Assche {
171576c75b25SBart Van Assche 	unsigned long flags;
171676c75b25SBart Van Assche 
1717509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1718509c07bcSBart Van Assche 	list_add(&iu->list, &ch->free_tx);
171976c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP)
1720509c07bcSBart Van Assche 		++ch->req_lim;
1721509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
172276c75b25SBart Van Assche }
172376c75b25SBart Van Assche 
172476c75b25SBart Van Assche /*
1725509c07bcSBart Van Assche  * Must be called with ch->lock held to protect req_lim and free_tx.
1726e9684678SBart Van Assche  * If IU is not sent, it must be returned using srp_put_tx_iu().
172705a1d750SDavid Dillow  *
172805a1d750SDavid Dillow  * Note:
172905a1d750SDavid Dillow  * An upper limit for the number of allocated information units for each
173005a1d750SDavid Dillow  * request type is:
173105a1d750SDavid Dillow  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
173205a1d750SDavid Dillow  *   more than Scsi_Host.can_queue requests.
173305a1d750SDavid Dillow  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
173405a1d750SDavid Dillow  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
173505a1d750SDavid Dillow  *   one unanswered SRP request to an initiator.
173605a1d750SDavid Dillow  */
1737509c07bcSBart Van Assche static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
173805a1d750SDavid Dillow 				      enum srp_iu_type iu_type)
173905a1d750SDavid Dillow {
1740509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
174105a1d750SDavid Dillow 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
174205a1d750SDavid Dillow 	struct srp_iu *iu;
174305a1d750SDavid Dillow 
17441dc7b1f1SChristoph Hellwig 	ib_process_cq_direct(ch->send_cq, -1);
174505a1d750SDavid Dillow 
1746509c07bcSBart Van Assche 	if (list_empty(&ch->free_tx))
174705a1d750SDavid Dillow 		return NULL;
174805a1d750SDavid Dillow 
174905a1d750SDavid Dillow 	/* Initiator responses to target requests do not consume credits */
175076c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP) {
1751509c07bcSBart Van Assche 		if (ch->req_lim <= rsv) {
175205a1d750SDavid Dillow 			++target->zero_req_lim;
175305a1d750SDavid Dillow 			return NULL;
175405a1d750SDavid Dillow 		}
175505a1d750SDavid Dillow 
1756509c07bcSBart Van Assche 		--ch->req_lim;
175776c75b25SBart Van Assche 	}
175876c75b25SBart Van Assche 
1759509c07bcSBart Van Assche 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
176076c75b25SBart Van Assche 	list_del(&iu->list);
176105a1d750SDavid Dillow 	return iu;
176205a1d750SDavid Dillow }
176305a1d750SDavid Dillow 
17641dc7b1f1SChristoph Hellwig static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
17651dc7b1f1SChristoph Hellwig {
17661dc7b1f1SChristoph Hellwig 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
17671dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
17681dc7b1f1SChristoph Hellwig 
17691dc7b1f1SChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
17701dc7b1f1SChristoph Hellwig 		srp_handle_qp_err(cq, wc, "SEND");
17711dc7b1f1SChristoph Hellwig 		return;
17721dc7b1f1SChristoph Hellwig 	}
17731dc7b1f1SChristoph Hellwig 
17741dc7b1f1SChristoph Hellwig 	list_add(&iu->list, &ch->free_tx);
17751dc7b1f1SChristoph Hellwig }
17761dc7b1f1SChristoph Hellwig 
1777509c07bcSBart Van Assche static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
177805a1d750SDavid Dillow {
1779509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
178005a1d750SDavid Dillow 	struct ib_sge list;
178105a1d750SDavid Dillow 	struct ib_send_wr wr, *bad_wr;
178205a1d750SDavid Dillow 
178305a1d750SDavid Dillow 	list.addr   = iu->dma;
178405a1d750SDavid Dillow 	list.length = len;
17859af76271SDavid Dillow 	list.lkey   = target->lkey;
178605a1d750SDavid Dillow 
17871dc7b1f1SChristoph Hellwig 	iu->cqe.done = srp_send_done;
17881dc7b1f1SChristoph Hellwig 
178905a1d750SDavid Dillow 	wr.next       = NULL;
17901dc7b1f1SChristoph Hellwig 	wr.wr_cqe     = &iu->cqe;
179105a1d750SDavid Dillow 	wr.sg_list    = &list;
179205a1d750SDavid Dillow 	wr.num_sge    = 1;
179305a1d750SDavid Dillow 	wr.opcode     = IB_WR_SEND;
179405a1d750SDavid Dillow 	wr.send_flags = IB_SEND_SIGNALED;
179505a1d750SDavid Dillow 
1796509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
179705a1d750SDavid Dillow }
179805a1d750SDavid Dillow 
1799509c07bcSBart Van Assche static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1800c996bb47SBart Van Assche {
1801509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1802c996bb47SBart Van Assche 	struct ib_recv_wr wr, *bad_wr;
1803dcb4cb85SBart Van Assche 	struct ib_sge list;
1804c996bb47SBart Van Assche 
1805c996bb47SBart Van Assche 	list.addr   = iu->dma;
1806c996bb47SBart Van Assche 	list.length = iu->size;
18079af76271SDavid Dillow 	list.lkey   = target->lkey;
1808c996bb47SBart Van Assche 
18091dc7b1f1SChristoph Hellwig 	iu->cqe.done = srp_recv_done;
18101dc7b1f1SChristoph Hellwig 
1811c996bb47SBart Van Assche 	wr.next     = NULL;
18121dc7b1f1SChristoph Hellwig 	wr.wr_cqe   = &iu->cqe;
1813c996bb47SBart Van Assche 	wr.sg_list  = &list;
1814c996bb47SBart Van Assche 	wr.num_sge  = 1;
1815c996bb47SBart Van Assche 
1816509c07bcSBart Van Assche 	return ib_post_recv(ch->qp, &wr, &bad_wr);
1817c996bb47SBart Van Assche }
1818c996bb47SBart Van Assche 
1819509c07bcSBart Van Assche static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1820aef9ec39SRoland Dreier {
1821509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1822aef9ec39SRoland Dreier 	struct srp_request *req;
1823aef9ec39SRoland Dreier 	struct scsi_cmnd *scmnd;
1824aef9ec39SRoland Dreier 	unsigned long flags;
1825aef9ec39SRoland Dreier 
1826aef9ec39SRoland Dreier 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1827509c07bcSBart Van Assche 		spin_lock_irqsave(&ch->lock, flags);
1828509c07bcSBart Van Assche 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1829509c07bcSBart Van Assche 		spin_unlock_irqrestore(&ch->lock, flags);
183094a9174cSBart Van Assche 
1831509c07bcSBart Van Assche 		ch->tsk_mgmt_status = -1;
1832f8b6e31eSDavid Dillow 		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1833509c07bcSBart Van Assche 			ch->tsk_mgmt_status = rsp->data[3];
1834509c07bcSBart Van Assche 		complete(&ch->tsk_mgmt_done);
1835aef9ec39SRoland Dreier 	} else {
183677f2c1a4SBart Van Assche 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
183777f2c1a4SBart Van Assche 		if (scmnd) {
183877f2c1a4SBart Van Assche 			req = (void *)scmnd->host_scribble;
183977f2c1a4SBart Van Assche 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
184077f2c1a4SBart Van Assche 		}
184122032991SBart Van Assche 		if (!scmnd) {
18427aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
1843d92c0da7SBart Van Assche 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1844d92c0da7SBart Van Assche 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
184522032991SBart Van Assche 
1846509c07bcSBart Van Assche 			spin_lock_irqsave(&ch->lock, flags);
1847509c07bcSBart Van Assche 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1848509c07bcSBart Van Assche 			spin_unlock_irqrestore(&ch->lock, flags);
184922032991SBart Van Assche 
185022032991SBart Van Assche 			return;
185122032991SBart Van Assche 		}
1852aef9ec39SRoland Dreier 		scmnd->result = rsp->status;
1853aef9ec39SRoland Dreier 
1854aef9ec39SRoland Dreier 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1855aef9ec39SRoland Dreier 			memcpy(scmnd->sense_buffer, rsp->data +
1856aef9ec39SRoland Dreier 			       be32_to_cpu(rsp->resp_data_len),
1857aef9ec39SRoland Dreier 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1858aef9ec39SRoland Dreier 				     SCSI_SENSE_BUFFERSIZE));
1859aef9ec39SRoland Dreier 		}
1860aef9ec39SRoland Dreier 
1861e714531aSBart Van Assche 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1862bb350d1dSFUJITA Tomonori 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1863e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1864e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1865e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1866e714531aSBart Van Assche 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1867e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1868e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1869aef9ec39SRoland Dreier 
1870509c07bcSBart Van Assche 		srp_free_req(ch, req, scmnd,
187122032991SBart Van Assche 			     be32_to_cpu(rsp->req_lim_delta));
187222032991SBart Van Assche 
1873f8b6e31eSDavid Dillow 		scmnd->host_scribble = NULL;
1874aef9ec39SRoland Dreier 		scmnd->scsi_done(scmnd);
1875aef9ec39SRoland Dreier 	}
1876aef9ec39SRoland Dreier }
1877aef9ec39SRoland Dreier 
1878509c07bcSBart Van Assche static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1879bb12588aSDavid Dillow 			       void *rsp, int len)
1880bb12588aSDavid Dillow {
1881509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
188276c75b25SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1883bb12588aSDavid Dillow 	unsigned long flags;
1884bb12588aSDavid Dillow 	struct srp_iu *iu;
188576c75b25SBart Van Assche 	int err;
1886bb12588aSDavid Dillow 
1887509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1888509c07bcSBart Van Assche 	ch->req_lim += req_delta;
1889509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1890509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
189176c75b25SBart Van Assche 
1892bb12588aSDavid Dillow 	if (!iu) {
1893bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1894bb12588aSDavid Dillow 			     "no IU available to send response\n");
189576c75b25SBart Van Assche 		return 1;
1896bb12588aSDavid Dillow 	}
1897bb12588aSDavid Dillow 
1898bb12588aSDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1899bb12588aSDavid Dillow 	memcpy(iu->buf, rsp, len);
1900bb12588aSDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1901bb12588aSDavid Dillow 
1902509c07bcSBart Van Assche 	err = srp_post_send(ch, iu, len);
190376c75b25SBart Van Assche 	if (err) {
1904bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1905bb12588aSDavid Dillow 			     "unable to post response: %d\n", err);
1906509c07bcSBart Van Assche 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
190776c75b25SBart Van Assche 	}
1908bb12588aSDavid Dillow 
1909bb12588aSDavid Dillow 	return err;
1910bb12588aSDavid Dillow }
1911bb12588aSDavid Dillow 
1912509c07bcSBart Van Assche static void srp_process_cred_req(struct srp_rdma_ch *ch,
1913bb12588aSDavid Dillow 				 struct srp_cred_req *req)
1914bb12588aSDavid Dillow {
1915bb12588aSDavid Dillow 	struct srp_cred_rsp rsp = {
1916bb12588aSDavid Dillow 		.opcode = SRP_CRED_RSP,
1917bb12588aSDavid Dillow 		.tag = req->tag,
1918bb12588aSDavid Dillow 	};
1919bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1920bb12588aSDavid Dillow 
1921509c07bcSBart Van Assche 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1922509c07bcSBart Van Assche 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1923bb12588aSDavid Dillow 			     "problems processing SRP_CRED_REQ\n");
1924bb12588aSDavid Dillow }
1925bb12588aSDavid Dillow 
1926509c07bcSBart Van Assche static void srp_process_aer_req(struct srp_rdma_ch *ch,
1927bb12588aSDavid Dillow 				struct srp_aer_req *req)
1928bb12588aSDavid Dillow {
1929509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1930bb12588aSDavid Dillow 	struct srp_aer_rsp rsp = {
1931bb12588aSDavid Dillow 		.opcode = SRP_AER_RSP,
1932bb12588aSDavid Dillow 		.tag = req->tag,
1933bb12588aSDavid Dillow 	};
1934bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1935bb12588aSDavid Dillow 
1936bb12588aSDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX
1937985aa495SBart Van Assche 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1938bb12588aSDavid Dillow 
1939509c07bcSBart Van Assche 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1940bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1941bb12588aSDavid Dillow 			     "problems processing SRP_AER_REQ\n");
1942bb12588aSDavid Dillow }
1943bb12588aSDavid Dillow 
19441dc7b1f1SChristoph Hellwig static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1945aef9ec39SRoland Dreier {
19461dc7b1f1SChristoph Hellwig 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
19471dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
1948509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1949dcb4cb85SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1950c996bb47SBart Van Assche 	int res;
1951aef9ec39SRoland Dreier 	u8 opcode;
1952aef9ec39SRoland Dreier 
19531dc7b1f1SChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
19541dc7b1f1SChristoph Hellwig 		srp_handle_qp_err(cq, wc, "RECV");
19551dc7b1f1SChristoph Hellwig 		return;
19561dc7b1f1SChristoph Hellwig 	}
19571dc7b1f1SChristoph Hellwig 
1958509c07bcSBart Van Assche 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
195985507bccSRalph Campbell 				   DMA_FROM_DEVICE);
1960aef9ec39SRoland Dreier 
1961aef9ec39SRoland Dreier 	opcode = *(u8 *) iu->buf;
1962aef9ec39SRoland Dreier 
1963aef9ec39SRoland Dreier 	if (0) {
19647aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
19657aa54bd7SDavid Dillow 			     PFX "recv completion, opcode 0x%02x\n", opcode);
19667a700811SBart Van Assche 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
19677a700811SBart Van Assche 			       iu->buf, wc->byte_len, true);
1968aef9ec39SRoland Dreier 	}
1969aef9ec39SRoland Dreier 
1970aef9ec39SRoland Dreier 	switch (opcode) {
1971aef9ec39SRoland Dreier 	case SRP_RSP:
1972509c07bcSBart Van Assche 		srp_process_rsp(ch, iu->buf);
1973aef9ec39SRoland Dreier 		break;
1974aef9ec39SRoland Dreier 
1975bb12588aSDavid Dillow 	case SRP_CRED_REQ:
1976509c07bcSBart Van Assche 		srp_process_cred_req(ch, iu->buf);
1977bb12588aSDavid Dillow 		break;
1978bb12588aSDavid Dillow 
1979bb12588aSDavid Dillow 	case SRP_AER_REQ:
1980509c07bcSBart Van Assche 		srp_process_aer_req(ch, iu->buf);
1981bb12588aSDavid Dillow 		break;
1982bb12588aSDavid Dillow 
1983aef9ec39SRoland Dreier 	case SRP_T_LOGOUT:
1984aef9ec39SRoland Dreier 		/* XXX Handle target logout */
19857aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
19867aa54bd7SDavid Dillow 			     PFX "Got target logout request\n");
1987aef9ec39SRoland Dreier 		break;
1988aef9ec39SRoland Dreier 
1989aef9ec39SRoland Dreier 	default:
19907aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
19917aa54bd7SDavid Dillow 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1992aef9ec39SRoland Dreier 		break;
1993aef9ec39SRoland Dreier 	}
1994aef9ec39SRoland Dreier 
1995509c07bcSBart Van Assche 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
199685507bccSRalph Campbell 				      DMA_FROM_DEVICE);
1997c996bb47SBart Van Assche 
1998509c07bcSBart Van Assche 	res = srp_post_recv(ch, iu);
1999c996bb47SBart Van Assche 	if (res != 0)
2000c996bb47SBart Van Assche 		shost_printk(KERN_ERR, target->scsi_host,
2001c996bb47SBart Van Assche 			     PFX "Recv failed with error code %d\n", res);
2002aef9ec39SRoland Dreier }
2003aef9ec39SRoland Dreier 
2004c1120f89SBart Van Assche /**
2005c1120f89SBart Van Assche  * srp_tl_err_work() - handle a transport layer error
2006af24663bSBart Van Assche  * @work: Work structure embedded in an SRP target port.
2007c1120f89SBart Van Assche  *
2008c1120f89SBart Van Assche  * Note: This function may get invoked before the rport has been created,
2009c1120f89SBart Van Assche  * hence the target->rport test.
2010c1120f89SBart Van Assche  */
2011c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work)
2012c1120f89SBart Van Assche {
2013c1120f89SBart Van Assche 	struct srp_target_port *target;
2014c1120f89SBart Van Assche 
2015c1120f89SBart Van Assche 	target = container_of(work, struct srp_target_port, tl_err_work);
2016c1120f89SBart Van Assche 	if (target->rport)
2017c1120f89SBart Van Assche 		srp_start_tl_fail_timers(target->rport);
2018c1120f89SBart Van Assche }
2019c1120f89SBart Van Assche 
20201dc7b1f1SChristoph Hellwig static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
20211dc7b1f1SChristoph Hellwig 		const char *opname)
2022948d1e88SBart Van Assche {
20231dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
20247dad6b2eSBart Van Assche 	struct srp_target_port *target = ch->target;
20257dad6b2eSBart Van Assche 
2026c014c8cdSBart Van Assche 	if (ch->connected && !target->qp_in_error) {
20275cfb1782SBart Van Assche 		shost_printk(KERN_ERR, target->scsi_host,
20281dc7b1f1SChristoph Hellwig 			     PFX "failed %s status %s (%d) for CQE %p\n",
20291dc7b1f1SChristoph Hellwig 			     opname, ib_wc_status_msg(wc->status), wc->status,
20301dc7b1f1SChristoph Hellwig 			     wc->wr_cqe);
2031c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
20324f0af697SBart Van Assche 	}
2033948d1e88SBart Van Assche 	target->qp_in_error = true;
2034948d1e88SBart Van Assche }
2035948d1e88SBart Van Assche 
203676c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2037aef9ec39SRoland Dreier {
203876c75b25SBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2039a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
2040509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
2041aef9ec39SRoland Dreier 	struct srp_request *req;
2042aef9ec39SRoland Dreier 	struct srp_iu *iu;
2043aef9ec39SRoland Dreier 	struct srp_cmd *cmd;
204485507bccSRalph Campbell 	struct ib_device *dev;
204576c75b25SBart Van Assche 	unsigned long flags;
204677f2c1a4SBart Van Assche 	u32 tag;
204777f2c1a4SBart Van Assche 	u16 idx;
2048d1b4289eSBart Van Assche 	int len, ret;
2049a95cadb9SBart Van Assche 	const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2050a95cadb9SBart Van Assche 
2051a95cadb9SBart Van Assche 	/*
2052a95cadb9SBart Van Assche 	 * The SCSI EH thread is the only context from which srp_queuecommand()
2053a95cadb9SBart Van Assche 	 * can get invoked for blocked devices (SDEV_BLOCK /
2054a95cadb9SBart Van Assche 	 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2055a95cadb9SBart Van Assche 	 * locking the rport mutex if invoked from inside the SCSI EH.
2056a95cadb9SBart Van Assche 	 */
2057a95cadb9SBart Van Assche 	if (in_scsi_eh)
2058a95cadb9SBart Van Assche 		mutex_lock(&rport->mutex);
2059aef9ec39SRoland Dreier 
2060d1b4289eSBart Van Assche 	scmnd->result = srp_chkready(target->rport);
2061d1b4289eSBart Van Assche 	if (unlikely(scmnd->result))
2062d1b4289eSBart Van Assche 		goto err;
20632ce19e72SBart Van Assche 
206477f2c1a4SBart Van Assche 	WARN_ON_ONCE(scmnd->request->tag < 0);
206577f2c1a4SBart Van Assche 	tag = blk_mq_unique_tag(scmnd->request);
2066d92c0da7SBart Van Assche 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
206777f2c1a4SBart Van Assche 	idx = blk_mq_unique_tag_to_tag(tag);
206877f2c1a4SBart Van Assche 	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
206977f2c1a4SBart Van Assche 		  dev_name(&shost->shost_gendev), tag, idx,
207077f2c1a4SBart Van Assche 		  target->req_ring_size);
2071509c07bcSBart Van Assche 
2072509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
2073509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2074509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
2075aef9ec39SRoland Dreier 
207677f2c1a4SBart Van Assche 	if (!iu)
207777f2c1a4SBart Van Assche 		goto err;
207877f2c1a4SBart Van Assche 
207977f2c1a4SBart Van Assche 	req = &ch->req_ring[idx];
208005321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev->dev;
208149248644SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
208285507bccSRalph Campbell 				   DMA_TO_DEVICE);
2083aef9ec39SRoland Dreier 
2084f8b6e31eSDavid Dillow 	scmnd->host_scribble = (void *) req;
2085aef9ec39SRoland Dreier 
2086aef9ec39SRoland Dreier 	cmd = iu->buf;
2087aef9ec39SRoland Dreier 	memset(cmd, 0, sizeof *cmd);
2088aef9ec39SRoland Dreier 
2089aef9ec39SRoland Dreier 	cmd->opcode = SRP_CMD;
2090985aa495SBart Van Assche 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
209177f2c1a4SBart Van Assche 	cmd->tag    = tag;
2092aef9ec39SRoland Dreier 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2093aef9ec39SRoland Dreier 
2094aef9ec39SRoland Dreier 	req->scmnd    = scmnd;
2095aef9ec39SRoland Dreier 	req->cmd      = iu;
2096aef9ec39SRoland Dreier 
2097509c07bcSBart Van Assche 	len = srp_map_data(scmnd, ch, req);
2098aef9ec39SRoland Dreier 	if (len < 0) {
20997aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
2100d1b4289eSBart Van Assche 			     PFX "Failed to map data (%d)\n", len);
2101d1b4289eSBart Van Assche 		/*
2102d1b4289eSBart Van Assche 		 * If we ran out of memory descriptors (-ENOMEM) because an
2103d1b4289eSBart Van Assche 		 * application is queuing many requests with more than
210452ede08fSBart Van Assche 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2105d1b4289eSBart Van Assche 		 * to reduce queue depth temporarily.
2106d1b4289eSBart Van Assche 		 */
2107d1b4289eSBart Van Assche 		scmnd->result = len == -ENOMEM ?
2108d1b4289eSBart Van Assche 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
210976c75b25SBart Van Assche 		goto err_iu;
2110aef9ec39SRoland Dreier 	}
2111aef9ec39SRoland Dreier 
211249248644SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
211385507bccSRalph Campbell 				      DMA_TO_DEVICE);
2114aef9ec39SRoland Dreier 
2115509c07bcSBart Van Assche 	if (srp_post_send(ch, iu, len)) {
21167aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2117aef9ec39SRoland Dreier 		goto err_unmap;
2118aef9ec39SRoland Dreier 	}
2119aef9ec39SRoland Dreier 
2120d1b4289eSBart Van Assche 	ret = 0;
2121d1b4289eSBart Van Assche 
2122a95cadb9SBart Van Assche unlock_rport:
2123a95cadb9SBart Van Assche 	if (in_scsi_eh)
2124a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2125a95cadb9SBart Van Assche 
2126d1b4289eSBart Van Assche 	return ret;
2127aef9ec39SRoland Dreier 
2128aef9ec39SRoland Dreier err_unmap:
2129509c07bcSBart Van Assche 	srp_unmap_data(scmnd, ch, req);
2130aef9ec39SRoland Dreier 
213176c75b25SBart Van Assche err_iu:
2132509c07bcSBart Van Assche 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
213376c75b25SBart Van Assche 
2134024ca901SBart Van Assche 	/*
2135024ca901SBart Van Assche 	 * Avoid that the loops that iterate over the request ring can
2136024ca901SBart Van Assche 	 * encounter a dangling SCSI command pointer.
2137024ca901SBart Van Assche 	 */
2138024ca901SBart Van Assche 	req->scmnd = NULL;
2139024ca901SBart Van Assche 
2140d1b4289eSBart Van Assche err:
2141d1b4289eSBart Van Assche 	if (scmnd->result) {
2142d1b4289eSBart Van Assche 		scmnd->scsi_done(scmnd);
2143d1b4289eSBart Van Assche 		ret = 0;
2144d1b4289eSBart Van Assche 	} else {
2145d1b4289eSBart Van Assche 		ret = SCSI_MLQUEUE_HOST_BUSY;
2146d1b4289eSBart Van Assche 	}
2147a95cadb9SBart Van Assche 
2148d1b4289eSBart Van Assche 	goto unlock_rport;
2149aef9ec39SRoland Dreier }
2150aef9ec39SRoland Dreier 
21514d73f95fSBart Van Assche /*
21524d73f95fSBart Van Assche  * Note: the resources allocated in this function are freed in
2153509c07bcSBart Van Assche  * srp_free_ch_ib().
21544d73f95fSBart Van Assche  */
2155509c07bcSBart Van Assche static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2156aef9ec39SRoland Dreier {
2157509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2158aef9ec39SRoland Dreier 	int i;
2159aef9ec39SRoland Dreier 
2160509c07bcSBart Van Assche 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
21614d73f95fSBart Van Assche 			      GFP_KERNEL);
2162509c07bcSBart Van Assche 	if (!ch->rx_ring)
21634d73f95fSBart Van Assche 		goto err_no_ring;
2164509c07bcSBart Van Assche 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
21654d73f95fSBart Van Assche 			      GFP_KERNEL);
2166509c07bcSBart Van Assche 	if (!ch->tx_ring)
21674d73f95fSBart Van Assche 		goto err_no_ring;
21684d73f95fSBart Van Assche 
21694d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2170509c07bcSBart Van Assche 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2171509c07bcSBart Van Assche 					      ch->max_ti_iu_len,
2172aef9ec39SRoland Dreier 					      GFP_KERNEL, DMA_FROM_DEVICE);
2173509c07bcSBart Van Assche 		if (!ch->rx_ring[i])
2174aef9ec39SRoland Dreier 			goto err;
2175aef9ec39SRoland Dreier 	}
2176aef9ec39SRoland Dreier 
21774d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2178509c07bcSBart Van Assche 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
217949248644SDavid Dillow 					      target->max_iu_len,
2180aef9ec39SRoland Dreier 					      GFP_KERNEL, DMA_TO_DEVICE);
2181509c07bcSBart Van Assche 		if (!ch->tx_ring[i])
2182aef9ec39SRoland Dreier 			goto err;
2183dcb4cb85SBart Van Assche 
2184509c07bcSBart Van Assche 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2185aef9ec39SRoland Dreier 	}
2186aef9ec39SRoland Dreier 
2187aef9ec39SRoland Dreier 	return 0;
2188aef9ec39SRoland Dreier 
2189aef9ec39SRoland Dreier err:
21904d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2191509c07bcSBart Van Assche 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2192509c07bcSBart Van Assche 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2193aef9ec39SRoland Dreier 	}
2194aef9ec39SRoland Dreier 
21954d73f95fSBart Van Assche 
21964d73f95fSBart Van Assche err_no_ring:
2197509c07bcSBart Van Assche 	kfree(ch->tx_ring);
2198509c07bcSBart Van Assche 	ch->tx_ring = NULL;
2199509c07bcSBart Van Assche 	kfree(ch->rx_ring);
2200509c07bcSBart Van Assche 	ch->rx_ring = NULL;
2201aef9ec39SRoland Dreier 
2202aef9ec39SRoland Dreier 	return -ENOMEM;
2203aef9ec39SRoland Dreier }
2204aef9ec39SRoland Dreier 
2205c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2206c9b03c1aSBart Van Assche {
2207c9b03c1aSBart Van Assche 	uint64_t T_tr_ns, max_compl_time_ms;
2208c9b03c1aSBart Van Assche 	uint32_t rq_tmo_jiffies;
2209c9b03c1aSBart Van Assche 
2210c9b03c1aSBart Van Assche 	/*
2211c9b03c1aSBart Van Assche 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2212c9b03c1aSBart Van Assche 	 * table 91), both the QP timeout and the retry count have to be set
2213c9b03c1aSBart Van Assche 	 * for RC QP's during the RTR to RTS transition.
2214c9b03c1aSBart Van Assche 	 */
2215c9b03c1aSBart Van Assche 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2216c9b03c1aSBart Van Assche 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2217c9b03c1aSBart Van Assche 
2218c9b03c1aSBart Van Assche 	/*
2219c9b03c1aSBart Van Assche 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2220c9b03c1aSBart Van Assche 	 * it can take before an error completion is generated. See also
2221c9b03c1aSBart Van Assche 	 * C9-140..142 in the IBTA spec for more information about how to
2222c9b03c1aSBart Van Assche 	 * convert the QP Local ACK Timeout value to nanoseconds.
2223c9b03c1aSBart Van Assche 	 */
2224c9b03c1aSBart Van Assche 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2225c9b03c1aSBart Van Assche 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2226c9b03c1aSBart Van Assche 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2227c9b03c1aSBart Van Assche 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2228c9b03c1aSBart Van Assche 
2229c9b03c1aSBart Van Assche 	return rq_tmo_jiffies;
2230c9b03c1aSBart Van Assche }
2231c9b03c1aSBart Van Assche 
2232961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2233e6300cbdSBart Van Assche 			       const struct srp_login_rsp *lrsp,
2234509c07bcSBart Van Assche 			       struct srp_rdma_ch *ch)
2235961e0be8SDavid Dillow {
2236509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2237961e0be8SDavid Dillow 	struct ib_qp_attr *qp_attr = NULL;
2238961e0be8SDavid Dillow 	int attr_mask = 0;
2239961e0be8SDavid Dillow 	int ret;
2240961e0be8SDavid Dillow 	int i;
2241961e0be8SDavid Dillow 
2242961e0be8SDavid Dillow 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2243509c07bcSBart Van Assche 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2244509c07bcSBart Van Assche 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2245961e0be8SDavid Dillow 
2246961e0be8SDavid Dillow 		/*
2247961e0be8SDavid Dillow 		 * Reserve credits for task management so we don't
2248961e0be8SDavid Dillow 		 * bounce requests back to the SCSI mid-layer.
2249961e0be8SDavid Dillow 		 */
2250961e0be8SDavid Dillow 		target->scsi_host->can_queue
2251509c07bcSBart Van Assche 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2252961e0be8SDavid Dillow 			      target->scsi_host->can_queue);
22534d73f95fSBart Van Assche 		target->scsi_host->cmd_per_lun
22544d73f95fSBart Van Assche 			= min_t(int, target->scsi_host->can_queue,
22554d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun);
2256961e0be8SDavid Dillow 	} else {
2257961e0be8SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
2258961e0be8SDavid Dillow 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2259961e0be8SDavid Dillow 		ret = -ECONNRESET;
2260961e0be8SDavid Dillow 		goto error;
2261961e0be8SDavid Dillow 	}
2262961e0be8SDavid Dillow 
2263509c07bcSBart Van Assche 	if (!ch->rx_ring) {
2264509c07bcSBart Van Assche 		ret = srp_alloc_iu_bufs(ch);
2265961e0be8SDavid Dillow 		if (ret)
2266961e0be8SDavid Dillow 			goto error;
2267961e0be8SDavid Dillow 	}
2268961e0be8SDavid Dillow 
2269961e0be8SDavid Dillow 	ret = -ENOMEM;
2270961e0be8SDavid Dillow 	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2271961e0be8SDavid Dillow 	if (!qp_attr)
2272961e0be8SDavid Dillow 		goto error;
2273961e0be8SDavid Dillow 
2274961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTR;
2275961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2276961e0be8SDavid Dillow 	if (ret)
2277961e0be8SDavid Dillow 		goto error_free;
2278961e0be8SDavid Dillow 
2279509c07bcSBart Van Assche 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2280961e0be8SDavid Dillow 	if (ret)
2281961e0be8SDavid Dillow 		goto error_free;
2282961e0be8SDavid Dillow 
22834d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; i++) {
2284509c07bcSBart Van Assche 		struct srp_iu *iu = ch->rx_ring[i];
2285509c07bcSBart Van Assche 
2286509c07bcSBart Van Assche 		ret = srp_post_recv(ch, iu);
2287961e0be8SDavid Dillow 		if (ret)
2288961e0be8SDavid Dillow 			goto error_free;
2289961e0be8SDavid Dillow 	}
2290961e0be8SDavid Dillow 
2291961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTS;
2292961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2293961e0be8SDavid Dillow 	if (ret)
2294961e0be8SDavid Dillow 		goto error_free;
2295961e0be8SDavid Dillow 
2296c9b03c1aSBart Van Assche 	target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2297c9b03c1aSBart Van Assche 
2298509c07bcSBart Van Assche 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2299961e0be8SDavid Dillow 	if (ret)
2300961e0be8SDavid Dillow 		goto error_free;
2301961e0be8SDavid Dillow 
2302961e0be8SDavid Dillow 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
2303961e0be8SDavid Dillow 
2304961e0be8SDavid Dillow error_free:
2305961e0be8SDavid Dillow 	kfree(qp_attr);
2306961e0be8SDavid Dillow 
2307961e0be8SDavid Dillow error:
2308509c07bcSBart Van Assche 	ch->status = ret;
2309961e0be8SDavid Dillow }
2310961e0be8SDavid Dillow 
2311aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2312aef9ec39SRoland Dreier 			       struct ib_cm_event *event,
2313509c07bcSBart Van Assche 			       struct srp_rdma_ch *ch)
2314aef9ec39SRoland Dreier {
2315509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
23167aa54bd7SDavid Dillow 	struct Scsi_Host *shost = target->scsi_host;
2317aef9ec39SRoland Dreier 	struct ib_class_port_info *cpi;
2318aef9ec39SRoland Dreier 	int opcode;
2319aef9ec39SRoland Dreier 
2320aef9ec39SRoland Dreier 	switch (event->param.rej_rcvd.reason) {
2321aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_CM_REDIRECT:
2322aef9ec39SRoland Dreier 		cpi = event->param.rej_rcvd.ari;
2323509c07bcSBart Van Assche 		ch->path.dlid = cpi->redirect_lid;
2324509c07bcSBart Van Assche 		ch->path.pkey = cpi->redirect_pkey;
2325aef9ec39SRoland Dreier 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2326509c07bcSBart Van Assche 		memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2327aef9ec39SRoland Dreier 
2328509c07bcSBart Van Assche 		ch->status = ch->path.dlid ?
2329aef9ec39SRoland Dreier 			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2330aef9ec39SRoland Dreier 		break;
2331aef9ec39SRoland Dreier 
2332aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_REDIRECT:
23335d7cbfd6SRoland Dreier 		if (srp_target_is_topspin(target)) {
2334aef9ec39SRoland Dreier 			/*
2335aef9ec39SRoland Dreier 			 * Topspin/Cisco SRP gateways incorrectly send
2336aef9ec39SRoland Dreier 			 * reject reason code 25 when they mean 24
2337aef9ec39SRoland Dreier 			 * (port redirect).
2338aef9ec39SRoland Dreier 			 */
2339509c07bcSBart Van Assche 			memcpy(ch->path.dgid.raw,
2340aef9ec39SRoland Dreier 			       event->param.rej_rcvd.ari, 16);
2341aef9ec39SRoland Dreier 
23427aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, shost,
23437aa54bd7SDavid Dillow 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2344509c07bcSBart Van Assche 				     be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2345509c07bcSBart Van Assche 				     be64_to_cpu(ch->path.dgid.global.interface_id));
2346aef9ec39SRoland Dreier 
2347509c07bcSBart Van Assche 			ch->status = SRP_PORT_REDIRECT;
2348aef9ec39SRoland Dreier 		} else {
23497aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
23507aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2351509c07bcSBart Van Assche 			ch->status = -ECONNRESET;
2352aef9ec39SRoland Dreier 		}
2353aef9ec39SRoland Dreier 		break;
2354aef9ec39SRoland Dreier 
2355aef9ec39SRoland Dreier 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
23567aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost,
23577aa54bd7SDavid Dillow 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2358509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2359aef9ec39SRoland Dreier 		break;
2360aef9ec39SRoland Dreier 
2361aef9ec39SRoland Dreier 	case IB_CM_REJ_CONSUMER_DEFINED:
2362aef9ec39SRoland Dreier 		opcode = *(u8 *) event->private_data;
2363aef9ec39SRoland Dreier 		if (opcode == SRP_LOGIN_REJ) {
2364aef9ec39SRoland Dreier 			struct srp_login_rej *rej = event->private_data;
2365aef9ec39SRoland Dreier 			u32 reason = be32_to_cpu(rej->reason);
2366aef9ec39SRoland Dreier 
2367aef9ec39SRoland Dreier 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
23687aa54bd7SDavid Dillow 				shost_printk(KERN_WARNING, shost,
23697aa54bd7SDavid Dillow 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2370aef9ec39SRoland Dreier 			else
2371e7ffde01SBart Van Assche 				shost_printk(KERN_WARNING, shost, PFX
2372e7ffde01SBart Van Assche 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2373747fe000SBart Van Assche 					     target->sgid.raw,
2374747fe000SBart Van Assche 					     target->orig_dgid.raw, reason);
2375aef9ec39SRoland Dreier 		} else
23767aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
23777aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2378aef9ec39SRoland Dreier 				     " opcode 0x%02x\n", opcode);
2379509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2380aef9ec39SRoland Dreier 		break;
2381aef9ec39SRoland Dreier 
23829fe4bcf4SDavid Dillow 	case IB_CM_REJ_STALE_CONN:
23839fe4bcf4SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2384509c07bcSBart Van Assche 		ch->status = SRP_STALE_CONN;
23859fe4bcf4SDavid Dillow 		break;
23869fe4bcf4SDavid Dillow 
2387aef9ec39SRoland Dreier 	default:
23887aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2389aef9ec39SRoland Dreier 			     event->param.rej_rcvd.reason);
2390509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2391aef9ec39SRoland Dreier 	}
2392aef9ec39SRoland Dreier }
2393aef9ec39SRoland Dreier 
2394aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2395aef9ec39SRoland Dreier {
2396509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = cm_id->context;
2397509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2398aef9ec39SRoland Dreier 	int comp = 0;
2399aef9ec39SRoland Dreier 
2400aef9ec39SRoland Dreier 	switch (event->event) {
2401aef9ec39SRoland Dreier 	case IB_CM_REQ_ERROR:
24027aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
24037aa54bd7SDavid Dillow 			     PFX "Sending CM REQ failed\n");
2404aef9ec39SRoland Dreier 		comp = 1;
2405509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2406aef9ec39SRoland Dreier 		break;
2407aef9ec39SRoland Dreier 
2408aef9ec39SRoland Dreier 	case IB_CM_REP_RECEIVED:
2409aef9ec39SRoland Dreier 		comp = 1;
2410509c07bcSBart Van Assche 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2411aef9ec39SRoland Dreier 		break;
2412aef9ec39SRoland Dreier 
2413aef9ec39SRoland Dreier 	case IB_CM_REJ_RECEIVED:
24147aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2415aef9ec39SRoland Dreier 		comp = 1;
2416aef9ec39SRoland Dreier 
2417509c07bcSBart Van Assche 		srp_cm_rej_handler(cm_id, event, ch);
2418aef9ec39SRoland Dreier 		break;
2419aef9ec39SRoland Dreier 
2420b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_RECEIVED:
24217aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
24227aa54bd7SDavid Dillow 			     PFX "DREQ received - connection closed\n");
2423c014c8cdSBart Van Assche 		ch->connected = false;
2424b7ac4ab4SIshai Rabinovitz 		if (ib_send_cm_drep(cm_id, NULL, 0))
24257aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
24267aa54bd7SDavid Dillow 				     PFX "Sending CM DREP failed\n");
2427c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
2428aef9ec39SRoland Dreier 		break;
2429aef9ec39SRoland Dreier 
2430aef9ec39SRoland Dreier 	case IB_CM_TIMEWAIT_EXIT:
24317aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
24327aa54bd7SDavid Dillow 			     PFX "connection closed\n");
2433ac72d766SBart Van Assche 		comp = 1;
2434aef9ec39SRoland Dreier 
2435509c07bcSBart Van Assche 		ch->status = 0;
2436aef9ec39SRoland Dreier 		break;
2437aef9ec39SRoland Dreier 
2438b7ac4ab4SIshai Rabinovitz 	case IB_CM_MRA_RECEIVED:
2439b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_ERROR:
2440b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREP_RECEIVED:
2441b7ac4ab4SIshai Rabinovitz 		break;
2442b7ac4ab4SIshai Rabinovitz 
2443aef9ec39SRoland Dreier 	default:
24447aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
24457aa54bd7SDavid Dillow 			     PFX "Unhandled CM event %d\n", event->event);
2446aef9ec39SRoland Dreier 		break;
2447aef9ec39SRoland Dreier 	}
2448aef9ec39SRoland Dreier 
2449aef9ec39SRoland Dreier 	if (comp)
2450509c07bcSBart Van Assche 		complete(&ch->done);
2451aef9ec39SRoland Dreier 
2452aef9ec39SRoland Dreier 	return 0;
2453aef9ec39SRoland Dreier }
2454aef9ec39SRoland Dreier 
245571444b97SJack Wang /**
245671444b97SJack Wang  * srp_change_queue_depth - setting device queue depth
245771444b97SJack Wang  * @sdev: scsi device struct
245871444b97SJack Wang  * @qdepth: requested queue depth
245971444b97SJack Wang  *
246071444b97SJack Wang  * Returns queue depth.
246171444b97SJack Wang  */
246271444b97SJack Wang static int
2463db5ed4dfSChristoph Hellwig srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
246471444b97SJack Wang {
246571444b97SJack Wang 	if (!sdev->tagged_supported)
24661e6f2416SChristoph Hellwig 		qdepth = 1;
2467db5ed4dfSChristoph Hellwig 	return scsi_change_queue_depth(sdev, qdepth);
246871444b97SJack Wang }
246971444b97SJack Wang 
2470985aa495SBart Van Assche static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2471985aa495SBart Van Assche 			     u8 func)
2472aef9ec39SRoland Dreier {
2473509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2474a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
247519081f31SDavid Dillow 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2476aef9ec39SRoland Dreier 	struct srp_iu *iu;
2477aef9ec39SRoland Dreier 	struct srp_tsk_mgmt *tsk_mgmt;
2478aef9ec39SRoland Dreier 
2479c014c8cdSBart Van Assche 	if (!ch->connected || target->qp_in_error)
24803780d1f0SBart Van Assche 		return -1;
24813780d1f0SBart Van Assche 
2482509c07bcSBart Van Assche 	init_completion(&ch->tsk_mgmt_done);
2483aef9ec39SRoland Dreier 
2484a95cadb9SBart Van Assche 	/*
2485509c07bcSBart Van Assche 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2486a95cadb9SBart Van Assche 	 * invoked while a task management function is being sent.
2487a95cadb9SBart Van Assche 	 */
2488a95cadb9SBart Van Assche 	mutex_lock(&rport->mutex);
2489509c07bcSBart Van Assche 	spin_lock_irq(&ch->lock);
2490509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2491509c07bcSBart Van Assche 	spin_unlock_irq(&ch->lock);
249276c75b25SBart Van Assche 
2493a95cadb9SBart Van Assche 	if (!iu) {
2494a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2495a95cadb9SBart Van Assche 
249676c75b25SBart Van Assche 		return -1;
2497a95cadb9SBart Van Assche 	}
2498aef9ec39SRoland Dreier 
249919081f31SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
250019081f31SDavid Dillow 				   DMA_TO_DEVICE);
2501aef9ec39SRoland Dreier 	tsk_mgmt = iu->buf;
2502aef9ec39SRoland Dreier 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2503aef9ec39SRoland Dreier 
2504aef9ec39SRoland Dreier 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2505985aa495SBart Van Assche 	int_to_scsilun(lun, &tsk_mgmt->lun);
2506f8b6e31eSDavid Dillow 	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
2507aef9ec39SRoland Dreier 	tsk_mgmt->tsk_mgmt_func = func;
2508f8b6e31eSDavid Dillow 	tsk_mgmt->task_tag	= req_tag;
2509aef9ec39SRoland Dreier 
251019081f31SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
251119081f31SDavid Dillow 				      DMA_TO_DEVICE);
2512509c07bcSBart Van Assche 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2513509c07bcSBart Van Assche 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2514a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2515a95cadb9SBart Van Assche 
251676c75b25SBart Van Assche 		return -1;
251776c75b25SBart Van Assche 	}
2518a95cadb9SBart Van Assche 	mutex_unlock(&rport->mutex);
2519d945e1dfSRoland Dreier 
2520509c07bcSBart Van Assche 	if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2521aef9ec39SRoland Dreier 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2522d945e1dfSRoland Dreier 		return -1;
2523aef9ec39SRoland Dreier 
2524d945e1dfSRoland Dreier 	return 0;
2525d945e1dfSRoland Dreier }
2526d945e1dfSRoland Dreier 
2527aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd)
2528aef9ec39SRoland Dreier {
2529d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2530f8b6e31eSDavid Dillow 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
253177f2c1a4SBart Van Assche 	u32 tag;
2532d92c0da7SBart Van Assche 	u16 ch_idx;
2533509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
2534086f44f5SBart Van Assche 	int ret;
2535d945e1dfSRoland Dreier 
25367aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2537aef9ec39SRoland Dreier 
2538d92c0da7SBart Van Assche 	if (!req)
253999b6697aSBart Van Assche 		return SUCCESS;
254077f2c1a4SBart Van Assche 	tag = blk_mq_unique_tag(scmnd->request);
2541d92c0da7SBart Van Assche 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2542d92c0da7SBart Van Assche 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2543d92c0da7SBart Van Assche 		return SUCCESS;
2544d92c0da7SBart Van Assche 	ch = &target->ch[ch_idx];
2545d92c0da7SBart Van Assche 	if (!srp_claim_req(ch, req, NULL, scmnd))
2546d92c0da7SBart Van Assche 		return SUCCESS;
2547d92c0da7SBart Van Assche 	shost_printk(KERN_ERR, target->scsi_host,
2548d92c0da7SBart Van Assche 		     "Sending SRP abort for tag %#x\n", tag);
254977f2c1a4SBart Van Assche 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
255080d5e8a2SBart Van Assche 			      SRP_TSK_ABORT_TASK) == 0)
2551086f44f5SBart Van Assche 		ret = SUCCESS;
2552ed9b2264SBart Van Assche 	else if (target->rport->state == SRP_RPORT_LOST)
255399e1c139SBart Van Assche 		ret = FAST_IO_FAIL;
2554086f44f5SBart Van Assche 	else
2555086f44f5SBart Van Assche 		ret = FAILED;
2556509c07bcSBart Van Assche 	srp_free_req(ch, req, scmnd, 0);
2557d945e1dfSRoland Dreier 	scmnd->result = DID_ABORT << 16;
2558d8536670SBart Van Assche 	scmnd->scsi_done(scmnd);
2559d945e1dfSRoland Dreier 
2560086f44f5SBart Van Assche 	return ret;
2561aef9ec39SRoland Dreier }
2562aef9ec39SRoland Dreier 
2563aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd)
2564aef9ec39SRoland Dreier {
2565d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2566d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
2567536ae14eSBart Van Assche 	int i;
2568d945e1dfSRoland Dreier 
25697aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2570aef9ec39SRoland Dreier 
2571d92c0da7SBart Van Assche 	ch = &target->ch[0];
2572509c07bcSBart Van Assche 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2573f8b6e31eSDavid Dillow 			      SRP_TSK_LUN_RESET))
2574d945e1dfSRoland Dreier 		return FAILED;
2575509c07bcSBart Van Assche 	if (ch->tsk_mgmt_status)
2576d945e1dfSRoland Dreier 		return FAILED;
2577d945e1dfSRoland Dreier 
2578d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
2579d92c0da7SBart Van Assche 		ch = &target->ch[i];
25804d73f95fSBart Van Assche 		for (i = 0; i < target->req_ring_size; ++i) {
2581509c07bcSBart Van Assche 			struct srp_request *req = &ch->req_ring[i];
2582509c07bcSBart Van Assche 
2583509c07bcSBart Van Assche 			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2584536ae14eSBart Van Assche 		}
2585d92c0da7SBart Van Assche 	}
2586d945e1dfSRoland Dreier 
2587d945e1dfSRoland Dreier 	return SUCCESS;
2588aef9ec39SRoland Dreier }
2589aef9ec39SRoland Dreier 
2590aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd)
2591aef9ec39SRoland Dreier {
2592aef9ec39SRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2593aef9ec39SRoland Dreier 
25947aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2595aef9ec39SRoland Dreier 
2596ed9b2264SBart Van Assche 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2597aef9ec39SRoland Dreier }
2598aef9ec39SRoland Dreier 
2599c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev)
2600c9b03c1aSBart Van Assche {
2601c9b03c1aSBart Van Assche 	struct Scsi_Host *shost = sdev->host;
2602c9b03c1aSBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2603c9b03c1aSBart Van Assche 	struct request_queue *q = sdev->request_queue;
2604c9b03c1aSBart Van Assche 	unsigned long timeout;
2605c9b03c1aSBart Van Assche 
2606c9b03c1aSBart Van Assche 	if (sdev->type == TYPE_DISK) {
2607c9b03c1aSBart Van Assche 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2608c9b03c1aSBart Van Assche 		blk_queue_rq_timeout(q, timeout);
2609c9b03c1aSBart Van Assche 	}
2610c9b03c1aSBart Van Assche 
2611c9b03c1aSBart Van Assche 	return 0;
2612c9b03c1aSBart Van Assche }
2613c9b03c1aSBart Van Assche 
2614ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2615ee959b00STony Jones 			   char *buf)
26166ecb0c84SRoland Dreier {
2617ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26186ecb0c84SRoland Dreier 
261945c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
26206ecb0c84SRoland Dreier }
26216ecb0c84SRoland Dreier 
2622ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2623ee959b00STony Jones 			     char *buf)
26246ecb0c84SRoland Dreier {
2625ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26266ecb0c84SRoland Dreier 
262745c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
26286ecb0c84SRoland Dreier }
26296ecb0c84SRoland Dreier 
2630ee959b00STony Jones static ssize_t show_service_id(struct device *dev,
2631ee959b00STony Jones 			       struct device_attribute *attr, char *buf)
26326ecb0c84SRoland Dreier {
2633ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26346ecb0c84SRoland Dreier 
263545c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
26366ecb0c84SRoland Dreier }
26376ecb0c84SRoland Dreier 
2638ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2639ee959b00STony Jones 			 char *buf)
26406ecb0c84SRoland Dreier {
2641ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26426ecb0c84SRoland Dreier 
2643747fe000SBart Van Assche 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
26446ecb0c84SRoland Dreier }
26456ecb0c84SRoland Dreier 
2646848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2647848b3082SBart Van Assche 			 char *buf)
2648848b3082SBart Van Assche {
2649848b3082SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2650848b3082SBart Van Assche 
2651747fe000SBart Van Assche 	return sprintf(buf, "%pI6\n", target->sgid.raw);
2652848b3082SBart Van Assche }
2653848b3082SBart Van Assche 
2654ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2655ee959b00STony Jones 			 char *buf)
26566ecb0c84SRoland Dreier {
2657ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2658d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch = &target->ch[0];
26596ecb0c84SRoland Dreier 
2660509c07bcSBart Van Assche 	return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
26616ecb0c84SRoland Dreier }
26626ecb0c84SRoland Dreier 
2663ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev,
2664ee959b00STony Jones 			      struct device_attribute *attr, char *buf)
26653633b3d0SIshai Rabinovitz {
2666ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26673633b3d0SIshai Rabinovitz 
2668747fe000SBart Van Assche 	return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
26693633b3d0SIshai Rabinovitz }
26703633b3d0SIshai Rabinovitz 
267189de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev,
267289de7486SBart Van Assche 			    struct device_attribute *attr, char *buf)
267389de7486SBart Van Assche {
267489de7486SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2675d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
2676d92c0da7SBart Van Assche 	int i, req_lim = INT_MAX;
267789de7486SBart Van Assche 
2678d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
2679d92c0da7SBart Van Assche 		ch = &target->ch[i];
2680d92c0da7SBart Van Assche 		req_lim = min(req_lim, ch->req_lim);
2681d92c0da7SBart Van Assche 	}
2682d92c0da7SBart Van Assche 	return sprintf(buf, "%d\n", req_lim);
268389de7486SBart Van Assche }
268489de7486SBart Van Assche 
2685ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev,
2686ee959b00STony Jones 				 struct device_attribute *attr, char *buf)
26876bfa24faSRoland Dreier {
2688ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26896bfa24faSRoland Dreier 
26906bfa24faSRoland Dreier 	return sprintf(buf, "%d\n", target->zero_req_lim);
26916bfa24faSRoland Dreier }
26926bfa24faSRoland Dreier 
2693ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev,
2694ee959b00STony Jones 				  struct device_attribute *attr, char *buf)
2695ded7f1a1SIshai Rabinovitz {
2696ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2697ded7f1a1SIshai Rabinovitz 
2698ded7f1a1SIshai Rabinovitz 	return sprintf(buf, "%d\n", target->srp_host->port);
2699ded7f1a1SIshai Rabinovitz }
2700ded7f1a1SIshai Rabinovitz 
2701ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev,
2702ee959b00STony Jones 				    struct device_attribute *attr, char *buf)
2703ded7f1a1SIshai Rabinovitz {
2704ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2705ded7f1a1SIshai Rabinovitz 
270605321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2707ded7f1a1SIshai Rabinovitz }
2708ded7f1a1SIshai Rabinovitz 
2709d92c0da7SBart Van Assche static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2710d92c0da7SBart Van Assche 			     char *buf)
2711d92c0da7SBart Van Assche {
2712d92c0da7SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2713d92c0da7SBart Van Assche 
2714d92c0da7SBart Van Assche 	return sprintf(buf, "%d\n", target->ch_count);
2715d92c0da7SBart Van Assche }
2716d92c0da7SBart Van Assche 
27174b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev,
27184b5e5f41SBart Van Assche 				struct device_attribute *attr, char *buf)
27194b5e5f41SBart Van Assche {
27204b5e5f41SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27214b5e5f41SBart Van Assche 
27224b5e5f41SBart Van Assche 	return sprintf(buf, "%d\n", target->comp_vector);
27234b5e5f41SBart Van Assche }
27244b5e5f41SBart Van Assche 
27257bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev,
27267bb312e4SVu Pham 				   struct device_attribute *attr, char *buf)
27277bb312e4SVu Pham {
27287bb312e4SVu Pham 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27297bb312e4SVu Pham 
27307bb312e4SVu Pham 	return sprintf(buf, "%d\n", target->tl_retry_count);
27317bb312e4SVu Pham }
27327bb312e4SVu Pham 
273349248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev,
273449248644SDavid Dillow 				   struct device_attribute *attr, char *buf)
273549248644SDavid Dillow {
273649248644SDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
273749248644SDavid Dillow 
273849248644SDavid Dillow 	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
273949248644SDavid Dillow }
274049248644SDavid Dillow 
2741c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev,
2742c07d424dSDavid Dillow 				 struct device_attribute *attr, char *buf)
2743c07d424dSDavid Dillow {
2744c07d424dSDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2745c07d424dSDavid Dillow 
2746c07d424dSDavid Dillow 	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2747c07d424dSDavid Dillow }
2748c07d424dSDavid Dillow 
2749ee959b00STony Jones static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
2750ee959b00STony Jones static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
2751ee959b00STony Jones static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
2752ee959b00STony Jones static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
2753848b3082SBart Van Assche static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
2754ee959b00STony Jones static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
2755ee959b00STony Jones static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
275689de7486SBart Van Assche static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2757ee959b00STony Jones static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
2758ee959b00STony Jones static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2759ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2760d92c0da7SBart Van Assche static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
27614b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
27627bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
276349248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2764c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
27656ecb0c84SRoland Dreier 
2766ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = {
2767ee959b00STony Jones 	&dev_attr_id_ext,
2768ee959b00STony Jones 	&dev_attr_ioc_guid,
2769ee959b00STony Jones 	&dev_attr_service_id,
2770ee959b00STony Jones 	&dev_attr_pkey,
2771848b3082SBart Van Assche 	&dev_attr_sgid,
2772ee959b00STony Jones 	&dev_attr_dgid,
2773ee959b00STony Jones 	&dev_attr_orig_dgid,
277489de7486SBart Van Assche 	&dev_attr_req_lim,
2775ee959b00STony Jones 	&dev_attr_zero_req_lim,
2776ee959b00STony Jones 	&dev_attr_local_ib_port,
2777ee959b00STony Jones 	&dev_attr_local_ib_device,
2778d92c0da7SBart Van Assche 	&dev_attr_ch_count,
27794b5e5f41SBart Van Assche 	&dev_attr_comp_vector,
27807bb312e4SVu Pham 	&dev_attr_tl_retry_count,
278149248644SDavid Dillow 	&dev_attr_cmd_sg_entries,
2782c07d424dSDavid Dillow 	&dev_attr_allow_ext_sg,
27836ecb0c84SRoland Dreier 	NULL
27846ecb0c84SRoland Dreier };
27856ecb0c84SRoland Dreier 
2786aef9ec39SRoland Dreier static struct scsi_host_template srp_template = {
2787aef9ec39SRoland Dreier 	.module				= THIS_MODULE,
2788b7f008fdSRoland Dreier 	.name				= "InfiniBand SRP initiator",
2789b7f008fdSRoland Dreier 	.proc_name			= DRV_NAME,
2790c9b03c1aSBart Van Assche 	.slave_configure		= srp_slave_configure,
2791aef9ec39SRoland Dreier 	.info				= srp_target_info,
2792aef9ec39SRoland Dreier 	.queuecommand			= srp_queuecommand,
279371444b97SJack Wang 	.change_queue_depth             = srp_change_queue_depth,
2794aef9ec39SRoland Dreier 	.eh_abort_handler		= srp_abort,
2795aef9ec39SRoland Dreier 	.eh_device_reset_handler	= srp_reset_device,
2796aef9ec39SRoland Dreier 	.eh_host_reset_handler		= srp_reset_host,
27972742c1daSBart Van Assche 	.skip_settle_delay		= true,
279849248644SDavid Dillow 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
27994d73f95fSBart Van Assche 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
2800aef9ec39SRoland Dreier 	.this_id			= -1,
28014d73f95fSBart Van Assche 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
28026ecb0c84SRoland Dreier 	.use_clustering			= ENABLE_CLUSTERING,
280377f2c1a4SBart Van Assche 	.shost_attrs			= srp_host_attrs,
2804c40ecc12SChristoph Hellwig 	.track_queue_depth		= 1,
2805aef9ec39SRoland Dreier };
2806aef9ec39SRoland Dreier 
280734aa654eSBart Van Assche static int srp_sdev_count(struct Scsi_Host *host)
280834aa654eSBart Van Assche {
280934aa654eSBart Van Assche 	struct scsi_device *sdev;
281034aa654eSBart Van Assche 	int c = 0;
281134aa654eSBart Van Assche 
281234aa654eSBart Van Assche 	shost_for_each_device(sdev, host)
281334aa654eSBart Van Assche 		c++;
281434aa654eSBart Van Assche 
281534aa654eSBart Van Assche 	return c;
281634aa654eSBart Van Assche }
281734aa654eSBart Van Assche 
2818bc44bd1dSBart Van Assche /*
2819bc44bd1dSBart Van Assche  * Return values:
2820bc44bd1dSBart Van Assche  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2821bc44bd1dSBart Van Assche  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2822bc44bd1dSBart Van Assche  *    removal has been scheduled.
2823bc44bd1dSBart Van Assche  * 0 and target->state != SRP_TARGET_REMOVED upon success.
2824bc44bd1dSBart Van Assche  */
2825aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2826aef9ec39SRoland Dreier {
28273236822bSFUJITA Tomonori 	struct srp_rport_identifiers ids;
28283236822bSFUJITA Tomonori 	struct srp_rport *rport;
28293236822bSFUJITA Tomonori 
283034aa654eSBart Van Assche 	target->state = SRP_TARGET_SCANNING;
2831aef9ec39SRoland Dreier 	sprintf(target->target_name, "SRP.T10:%016llX",
283245c37cadSBart Van Assche 		be64_to_cpu(target->id_ext));
2833aef9ec39SRoland Dreier 
283405321937SGreg Kroah-Hartman 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2835aef9ec39SRoland Dreier 		return -ENODEV;
2836aef9ec39SRoland Dreier 
28373236822bSFUJITA Tomonori 	memcpy(ids.port_id, &target->id_ext, 8);
28383236822bSFUJITA Tomonori 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2839aebd5e47SFUJITA Tomonori 	ids.roles = SRP_RPORT_ROLE_TARGET;
28403236822bSFUJITA Tomonori 	rport = srp_rport_add(target->scsi_host, &ids);
28413236822bSFUJITA Tomonori 	if (IS_ERR(rport)) {
28423236822bSFUJITA Tomonori 		scsi_remove_host(target->scsi_host);
28433236822bSFUJITA Tomonori 		return PTR_ERR(rport);
28443236822bSFUJITA Tomonori 	}
28453236822bSFUJITA Tomonori 
2846dc1bdbd9SBart Van Assche 	rport->lld_data = target;
28479dd69a60SBart Van Assche 	target->rport = rport;
2848dc1bdbd9SBart Van Assche 
2849b3589fd4SMatthew Wilcox 	spin_lock(&host->target_lock);
2850aef9ec39SRoland Dreier 	list_add_tail(&target->list, &host->target_list);
2851b3589fd4SMatthew Wilcox 	spin_unlock(&host->target_lock);
2852aef9ec39SRoland Dreier 
2853aef9ec39SRoland Dreier 	scsi_scan_target(&target->scsi_host->shost_gendev,
28541962a4a1SMatthew Wilcox 			 0, target->scsi_id, SCAN_WILD_CARD, 0);
2855aef9ec39SRoland Dreier 
2856c014c8cdSBart Van Assche 	if (srp_connected_ch(target) < target->ch_count ||
2857c014c8cdSBart Van Assche 	    target->qp_in_error) {
285834aa654eSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
285934aa654eSBart Van Assche 			     PFX "SCSI scan failed - removing SCSI host\n");
286034aa654eSBart Van Assche 		srp_queue_remove_work(target);
286134aa654eSBart Van Assche 		goto out;
286234aa654eSBart Van Assche 	}
286334aa654eSBart Van Assche 
286434aa654eSBart Van Assche 	pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
286534aa654eSBart Van Assche 		 dev_name(&target->scsi_host->shost_gendev),
286634aa654eSBart Van Assche 		 srp_sdev_count(target->scsi_host));
286734aa654eSBart Van Assche 
286834aa654eSBart Van Assche 	spin_lock_irq(&target->lock);
286934aa654eSBart Van Assche 	if (target->state == SRP_TARGET_SCANNING)
287034aa654eSBart Van Assche 		target->state = SRP_TARGET_LIVE;
287134aa654eSBart Van Assche 	spin_unlock_irq(&target->lock);
287234aa654eSBart Van Assche 
287334aa654eSBart Van Assche out:
2874aef9ec39SRoland Dreier 	return 0;
2875aef9ec39SRoland Dreier }
2876aef9ec39SRoland Dreier 
2877ee959b00STony Jones static void srp_release_dev(struct device *dev)
2878aef9ec39SRoland Dreier {
2879aef9ec39SRoland Dreier 	struct srp_host *host =
2880ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
2881aef9ec39SRoland Dreier 
2882aef9ec39SRoland Dreier 	complete(&host->released);
2883aef9ec39SRoland Dreier }
2884aef9ec39SRoland Dreier 
2885aef9ec39SRoland Dreier static struct class srp_class = {
2886aef9ec39SRoland Dreier 	.name    = "infiniband_srp",
2887ee959b00STony Jones 	.dev_release = srp_release_dev
2888aef9ec39SRoland Dreier };
2889aef9ec39SRoland Dreier 
289096fc248aSBart Van Assche /**
289196fc248aSBart Van Assche  * srp_conn_unique() - check whether the connection to a target is unique
2892af24663bSBart Van Assche  * @host:   SRP host.
2893af24663bSBart Van Assche  * @target: SRP target port.
289496fc248aSBart Van Assche  */
289596fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host,
289696fc248aSBart Van Assche 			    struct srp_target_port *target)
289796fc248aSBart Van Assche {
289896fc248aSBart Van Assche 	struct srp_target_port *t;
289996fc248aSBart Van Assche 	bool ret = false;
290096fc248aSBart Van Assche 
290196fc248aSBart Van Assche 	if (target->state == SRP_TARGET_REMOVED)
290296fc248aSBart Van Assche 		goto out;
290396fc248aSBart Van Assche 
290496fc248aSBart Van Assche 	ret = true;
290596fc248aSBart Van Assche 
290696fc248aSBart Van Assche 	spin_lock(&host->target_lock);
290796fc248aSBart Van Assche 	list_for_each_entry(t, &host->target_list, list) {
290896fc248aSBart Van Assche 		if (t != target &&
290996fc248aSBart Van Assche 		    target->id_ext == t->id_ext &&
291096fc248aSBart Van Assche 		    target->ioc_guid == t->ioc_guid &&
291196fc248aSBart Van Assche 		    target->initiator_ext == t->initiator_ext) {
291296fc248aSBart Van Assche 			ret = false;
291396fc248aSBart Van Assche 			break;
291496fc248aSBart Van Assche 		}
291596fc248aSBart Van Assche 	}
291696fc248aSBart Van Assche 	spin_unlock(&host->target_lock);
291796fc248aSBart Van Assche 
291896fc248aSBart Van Assche out:
291996fc248aSBart Van Assche 	return ret;
292096fc248aSBart Van Assche }
292196fc248aSBart Van Assche 
2922aef9ec39SRoland Dreier /*
2923aef9ec39SRoland Dreier  * Target ports are added by writing
2924aef9ec39SRoland Dreier  *
2925aef9ec39SRoland Dreier  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2926aef9ec39SRoland Dreier  *     pkey=<P_Key>,service_id=<service ID>
2927aef9ec39SRoland Dreier  *
2928aef9ec39SRoland Dreier  * to the add_target sysfs attribute.
2929aef9ec39SRoland Dreier  */
2930aef9ec39SRoland Dreier enum {
2931aef9ec39SRoland Dreier 	SRP_OPT_ERR		= 0,
2932aef9ec39SRoland Dreier 	SRP_OPT_ID_EXT		= 1 << 0,
2933aef9ec39SRoland Dreier 	SRP_OPT_IOC_GUID	= 1 << 1,
2934aef9ec39SRoland Dreier 	SRP_OPT_DGID		= 1 << 2,
2935aef9ec39SRoland Dreier 	SRP_OPT_PKEY		= 1 << 3,
2936aef9ec39SRoland Dreier 	SRP_OPT_SERVICE_ID	= 1 << 4,
2937aef9ec39SRoland Dreier 	SRP_OPT_MAX_SECT	= 1 << 5,
293852fb2b50SVu Pham 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
29390c0450dbSRamachandra K 	SRP_OPT_IO_CLASS	= 1 << 7,
294001cb9bcbSIshai Rabinovitz 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
294149248644SDavid Dillow 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
2942c07d424dSDavid Dillow 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
2943c07d424dSDavid Dillow 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
29444b5e5f41SBart Van Assche 	SRP_OPT_COMP_VECTOR	= 1 << 12,
29457bb312e4SVu Pham 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
29464d73f95fSBart Van Assche 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
2947aef9ec39SRoland Dreier 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
2948aef9ec39SRoland Dreier 				   SRP_OPT_IOC_GUID	|
2949aef9ec39SRoland Dreier 				   SRP_OPT_DGID		|
2950aef9ec39SRoland Dreier 				   SRP_OPT_PKEY		|
2951aef9ec39SRoland Dreier 				   SRP_OPT_SERVICE_ID),
2952aef9ec39SRoland Dreier };
2953aef9ec39SRoland Dreier 
2954a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = {
2955aef9ec39SRoland Dreier 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
2956aef9ec39SRoland Dreier 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
2957aef9ec39SRoland Dreier 	{ SRP_OPT_DGID,			"dgid=%s" 		},
2958aef9ec39SRoland Dreier 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
2959aef9ec39SRoland Dreier 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
2960aef9ec39SRoland Dreier 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
296152fb2b50SVu Pham 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
29620c0450dbSRamachandra K 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
296301cb9bcbSIshai Rabinovitz 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
296449248644SDavid Dillow 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
2965c07d424dSDavid Dillow 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
2966c07d424dSDavid Dillow 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
29674b5e5f41SBart Van Assche 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
29687bb312e4SVu Pham 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
29694d73f95fSBart Van Assche 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
2970aef9ec39SRoland Dreier 	{ SRP_OPT_ERR,			NULL 			}
2971aef9ec39SRoland Dreier };
2972aef9ec39SRoland Dreier 
2973aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target)
2974aef9ec39SRoland Dreier {
2975aef9ec39SRoland Dreier 	char *options, *sep_opt;
2976aef9ec39SRoland Dreier 	char *p;
2977aef9ec39SRoland Dreier 	char dgid[3];
2978aef9ec39SRoland Dreier 	substring_t args[MAX_OPT_ARGS];
2979aef9ec39SRoland Dreier 	int opt_mask = 0;
2980aef9ec39SRoland Dreier 	int token;
2981aef9ec39SRoland Dreier 	int ret = -EINVAL;
2982aef9ec39SRoland Dreier 	int i;
2983aef9ec39SRoland Dreier 
2984aef9ec39SRoland Dreier 	options = kstrdup(buf, GFP_KERNEL);
2985aef9ec39SRoland Dreier 	if (!options)
2986aef9ec39SRoland Dreier 		return -ENOMEM;
2987aef9ec39SRoland Dreier 
2988aef9ec39SRoland Dreier 	sep_opt = options;
29897dcf9c19SSagi Grimberg 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2990aef9ec39SRoland Dreier 		if (!*p)
2991aef9ec39SRoland Dreier 			continue;
2992aef9ec39SRoland Dreier 
2993aef9ec39SRoland Dreier 		token = match_token(p, srp_opt_tokens, args);
2994aef9ec39SRoland Dreier 		opt_mask |= token;
2995aef9ec39SRoland Dreier 
2996aef9ec39SRoland Dreier 		switch (token) {
2997aef9ec39SRoland Dreier 		case SRP_OPT_ID_EXT:
2998aef9ec39SRoland Dreier 			p = match_strdup(args);
2999a20f3a6dSIshai Rabinovitz 			if (!p) {
3000a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3001a20f3a6dSIshai Rabinovitz 				goto out;
3002a20f3a6dSIshai Rabinovitz 			}
3003aef9ec39SRoland Dreier 			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3004aef9ec39SRoland Dreier 			kfree(p);
3005aef9ec39SRoland Dreier 			break;
3006aef9ec39SRoland Dreier 
3007aef9ec39SRoland Dreier 		case SRP_OPT_IOC_GUID:
3008aef9ec39SRoland Dreier 			p = match_strdup(args);
3009a20f3a6dSIshai Rabinovitz 			if (!p) {
3010a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3011a20f3a6dSIshai Rabinovitz 				goto out;
3012a20f3a6dSIshai Rabinovitz 			}
3013aef9ec39SRoland Dreier 			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3014aef9ec39SRoland Dreier 			kfree(p);
3015aef9ec39SRoland Dreier 			break;
3016aef9ec39SRoland Dreier 
3017aef9ec39SRoland Dreier 		case SRP_OPT_DGID:
3018aef9ec39SRoland Dreier 			p = match_strdup(args);
3019a20f3a6dSIshai Rabinovitz 			if (!p) {
3020a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3021a20f3a6dSIshai Rabinovitz 				goto out;
3022a20f3a6dSIshai Rabinovitz 			}
3023aef9ec39SRoland Dreier 			if (strlen(p) != 32) {
3024e0bda7d8SBart Van Assche 				pr_warn("bad dest GID parameter '%s'\n", p);
3025ce1823f0SRoland Dreier 				kfree(p);
3026aef9ec39SRoland Dreier 				goto out;
3027aef9ec39SRoland Dreier 			}
3028aef9ec39SRoland Dreier 
3029aef9ec39SRoland Dreier 			for (i = 0; i < 16; ++i) {
3030747fe000SBart Van Assche 				strlcpy(dgid, p + i * 2, sizeof(dgid));
3031747fe000SBart Van Assche 				if (sscanf(dgid, "%hhx",
3032747fe000SBart Van Assche 					   &target->orig_dgid.raw[i]) < 1) {
3033747fe000SBart Van Assche 					ret = -EINVAL;
3034747fe000SBart Van Assche 					kfree(p);
3035747fe000SBart Van Assche 					goto out;
3036747fe000SBart Van Assche 				}
3037aef9ec39SRoland Dreier 			}
3038bf17c1c7SRoland Dreier 			kfree(p);
3039aef9ec39SRoland Dreier 			break;
3040aef9ec39SRoland Dreier 
3041aef9ec39SRoland Dreier 		case SRP_OPT_PKEY:
3042aef9ec39SRoland Dreier 			if (match_hex(args, &token)) {
3043e0bda7d8SBart Van Assche 				pr_warn("bad P_Key parameter '%s'\n", p);
3044aef9ec39SRoland Dreier 				goto out;
3045aef9ec39SRoland Dreier 			}
3046747fe000SBart Van Assche 			target->pkey = cpu_to_be16(token);
3047aef9ec39SRoland Dreier 			break;
3048aef9ec39SRoland Dreier 
3049aef9ec39SRoland Dreier 		case SRP_OPT_SERVICE_ID:
3050aef9ec39SRoland Dreier 			p = match_strdup(args);
3051a20f3a6dSIshai Rabinovitz 			if (!p) {
3052a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3053a20f3a6dSIshai Rabinovitz 				goto out;
3054a20f3a6dSIshai Rabinovitz 			}
3055aef9ec39SRoland Dreier 			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3056aef9ec39SRoland Dreier 			kfree(p);
3057aef9ec39SRoland Dreier 			break;
3058aef9ec39SRoland Dreier 
3059aef9ec39SRoland Dreier 		case SRP_OPT_MAX_SECT:
3060aef9ec39SRoland Dreier 			if (match_int(args, &token)) {
3061e0bda7d8SBart Van Assche 				pr_warn("bad max sect parameter '%s'\n", p);
3062aef9ec39SRoland Dreier 				goto out;
3063aef9ec39SRoland Dreier 			}
3064aef9ec39SRoland Dreier 			target->scsi_host->max_sectors = token;
3065aef9ec39SRoland Dreier 			break;
3066aef9ec39SRoland Dreier 
30674d73f95fSBart Van Assche 		case SRP_OPT_QUEUE_SIZE:
30684d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
30694d73f95fSBart Van Assche 				pr_warn("bad queue_size parameter '%s'\n", p);
30704d73f95fSBart Van Assche 				goto out;
30714d73f95fSBart Van Assche 			}
30724d73f95fSBart Van Assche 			target->scsi_host->can_queue = token;
30734d73f95fSBart Van Assche 			target->queue_size = token + SRP_RSP_SQ_SIZE +
30744d73f95fSBart Van Assche 					     SRP_TSK_MGMT_SQ_SIZE;
30754d73f95fSBart Van Assche 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
30764d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun = token;
30774d73f95fSBart Van Assche 			break;
30784d73f95fSBart Van Assche 
307952fb2b50SVu Pham 		case SRP_OPT_MAX_CMD_PER_LUN:
30804d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
3081e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3082e0bda7d8SBart Van Assche 					p);
308352fb2b50SVu Pham 				goto out;
308452fb2b50SVu Pham 			}
30854d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun = token;
308652fb2b50SVu Pham 			break;
308752fb2b50SVu Pham 
30880c0450dbSRamachandra K 		case SRP_OPT_IO_CLASS:
30890c0450dbSRamachandra K 			if (match_hex(args, &token)) {
3090e0bda7d8SBart Van Assche 				pr_warn("bad IO class parameter '%s'\n", p);
30910c0450dbSRamachandra K 				goto out;
30920c0450dbSRamachandra K 			}
30930c0450dbSRamachandra K 			if (token != SRP_REV10_IB_IO_CLASS &&
30940c0450dbSRamachandra K 			    token != SRP_REV16A_IB_IO_CLASS) {
3095e0bda7d8SBart Van Assche 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3096e0bda7d8SBart Van Assche 					token, SRP_REV10_IB_IO_CLASS,
3097e0bda7d8SBart Van Assche 					SRP_REV16A_IB_IO_CLASS);
30980c0450dbSRamachandra K 				goto out;
30990c0450dbSRamachandra K 			}
31000c0450dbSRamachandra K 			target->io_class = token;
31010c0450dbSRamachandra K 			break;
31020c0450dbSRamachandra K 
310301cb9bcbSIshai Rabinovitz 		case SRP_OPT_INITIATOR_EXT:
310401cb9bcbSIshai Rabinovitz 			p = match_strdup(args);
3105a20f3a6dSIshai Rabinovitz 			if (!p) {
3106a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3107a20f3a6dSIshai Rabinovitz 				goto out;
3108a20f3a6dSIshai Rabinovitz 			}
310901cb9bcbSIshai Rabinovitz 			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
311001cb9bcbSIshai Rabinovitz 			kfree(p);
311101cb9bcbSIshai Rabinovitz 			break;
311201cb9bcbSIshai Rabinovitz 
311349248644SDavid Dillow 		case SRP_OPT_CMD_SG_ENTRIES:
311449248644SDavid Dillow 			if (match_int(args, &token) || token < 1 || token > 255) {
3115e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3116e0bda7d8SBart Van Assche 					p);
311749248644SDavid Dillow 				goto out;
311849248644SDavid Dillow 			}
311949248644SDavid Dillow 			target->cmd_sg_cnt = token;
312049248644SDavid Dillow 			break;
312149248644SDavid Dillow 
3122c07d424dSDavid Dillow 		case SRP_OPT_ALLOW_EXT_SG:
3123c07d424dSDavid Dillow 			if (match_int(args, &token)) {
3124e0bda7d8SBart Van Assche 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3125c07d424dSDavid Dillow 				goto out;
3126c07d424dSDavid Dillow 			}
3127c07d424dSDavid Dillow 			target->allow_ext_sg = !!token;
3128c07d424dSDavid Dillow 			break;
3129c07d424dSDavid Dillow 
3130c07d424dSDavid Dillow 		case SRP_OPT_SG_TABLESIZE:
3131c07d424dSDavid Dillow 			if (match_int(args, &token) || token < 1 ||
3132c07d424dSDavid Dillow 					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3133e0bda7d8SBart Van Assche 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3134e0bda7d8SBart Van Assche 					p);
3135c07d424dSDavid Dillow 				goto out;
3136c07d424dSDavid Dillow 			}
3137c07d424dSDavid Dillow 			target->sg_tablesize = token;
3138c07d424dSDavid Dillow 			break;
3139c07d424dSDavid Dillow 
31404b5e5f41SBart Van Assche 		case SRP_OPT_COMP_VECTOR:
31414b5e5f41SBart Van Assche 			if (match_int(args, &token) || token < 0) {
31424b5e5f41SBart Van Assche 				pr_warn("bad comp_vector parameter '%s'\n", p);
31434b5e5f41SBart Van Assche 				goto out;
31444b5e5f41SBart Van Assche 			}
31454b5e5f41SBart Van Assche 			target->comp_vector = token;
31464b5e5f41SBart Van Assche 			break;
31474b5e5f41SBart Van Assche 
31487bb312e4SVu Pham 		case SRP_OPT_TL_RETRY_COUNT:
31497bb312e4SVu Pham 			if (match_int(args, &token) || token < 2 || token > 7) {
31507bb312e4SVu Pham 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
31517bb312e4SVu Pham 					p);
31527bb312e4SVu Pham 				goto out;
31537bb312e4SVu Pham 			}
31547bb312e4SVu Pham 			target->tl_retry_count = token;
31557bb312e4SVu Pham 			break;
31567bb312e4SVu Pham 
3157aef9ec39SRoland Dreier 		default:
3158e0bda7d8SBart Van Assche 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3159e0bda7d8SBart Van Assche 				p);
3160aef9ec39SRoland Dreier 			goto out;
3161aef9ec39SRoland Dreier 		}
3162aef9ec39SRoland Dreier 	}
3163aef9ec39SRoland Dreier 
3164aef9ec39SRoland Dreier 	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3165aef9ec39SRoland Dreier 		ret = 0;
3166aef9ec39SRoland Dreier 	else
3167aef9ec39SRoland Dreier 		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3168aef9ec39SRoland Dreier 			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3169aef9ec39SRoland Dreier 			    !(srp_opt_tokens[i].token & opt_mask))
3170e0bda7d8SBart Van Assche 				pr_warn("target creation request is missing parameter '%s'\n",
3171aef9ec39SRoland Dreier 					srp_opt_tokens[i].pattern);
3172aef9ec39SRoland Dreier 
31734d73f95fSBart Van Assche 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
31744d73f95fSBart Van Assche 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
31754d73f95fSBart Van Assche 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
31764d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun,
31774d73f95fSBart Van Assche 			target->scsi_host->can_queue);
31784d73f95fSBart Van Assche 
3179aef9ec39SRoland Dreier out:
3180aef9ec39SRoland Dreier 	kfree(options);
3181aef9ec39SRoland Dreier 	return ret;
3182aef9ec39SRoland Dreier }
3183aef9ec39SRoland Dreier 
3184ee959b00STony Jones static ssize_t srp_create_target(struct device *dev,
3185ee959b00STony Jones 				 struct device_attribute *attr,
3186aef9ec39SRoland Dreier 				 const char *buf, size_t count)
3187aef9ec39SRoland Dreier {
3188aef9ec39SRoland Dreier 	struct srp_host *host =
3189ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
3190aef9ec39SRoland Dreier 	struct Scsi_Host *target_host;
3191aef9ec39SRoland Dreier 	struct srp_target_port *target;
3192509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
3193d1b4289eSBart Van Assche 	struct srp_device *srp_dev = host->srp_dev;
3194d1b4289eSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
3195d92c0da7SBart Van Assche 	int ret, node_idx, node, cpu, i;
3196d92c0da7SBart Van Assche 	bool multich = false;
3197aef9ec39SRoland Dreier 
3198aef9ec39SRoland Dreier 	target_host = scsi_host_alloc(&srp_template,
3199aef9ec39SRoland Dreier 				      sizeof (struct srp_target_port));
3200aef9ec39SRoland Dreier 	if (!target_host)
3201aef9ec39SRoland Dreier 		return -ENOMEM;
3202aef9ec39SRoland Dreier 
32033236822bSFUJITA Tomonori 	target_host->transportt  = ib_srp_transport_template;
3204fd1b6c4aSBart Van Assche 	target_host->max_channel = 0;
3205fd1b6c4aSBart Van Assche 	target_host->max_id      = 1;
3206985aa495SBart Van Assche 	target_host->max_lun     = -1LL;
32073c8edf0eSArne Redlich 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
32085f068992SRoland Dreier 
3209aef9ec39SRoland Dreier 	target = host_to_target(target_host);
3210aef9ec39SRoland Dreier 
32110c0450dbSRamachandra K 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3212aef9ec39SRoland Dreier 	target->scsi_host	= target_host;
3213aef9ec39SRoland Dreier 	target->srp_host	= host;
3214e6bf5f48SJason Gunthorpe 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
321503f6fb93SBart Van Assche 	target->global_mr	= host->srp_dev->global_mr;
321649248644SDavid Dillow 	target->cmd_sg_cnt	= cmd_sg_entries;
3217c07d424dSDavid Dillow 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3218c07d424dSDavid Dillow 	target->allow_ext_sg	= allow_ext_sg;
32197bb312e4SVu Pham 	target->tl_retry_count	= 7;
32204d73f95fSBart Van Assche 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3221aef9ec39SRoland Dreier 
322234aa654eSBart Van Assche 	/*
322334aa654eSBart Van Assche 	 * Avoid that the SCSI host can be removed by srp_remove_target()
322434aa654eSBart Van Assche 	 * before this function returns.
322534aa654eSBart Van Assche 	 */
322634aa654eSBart Van Assche 	scsi_host_get(target->scsi_host);
322734aa654eSBart Van Assche 
32282d7091bcSBart Van Assche 	mutex_lock(&host->add_target_mutex);
32292d7091bcSBart Van Assche 
3230aef9ec39SRoland Dreier 	ret = srp_parse_options(buf, target);
3231aef9ec39SRoland Dreier 	if (ret)
3232fb49c8bbSBart Van Assche 		goto out;
3233aef9ec39SRoland Dreier 
32344d73f95fSBart Van Assche 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
32354d73f95fSBart Van Assche 
323696fc248aSBart Van Assche 	if (!srp_conn_unique(target->srp_host, target)) {
323796fc248aSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
323896fc248aSBart Van Assche 			     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
323996fc248aSBart Van Assche 			     be64_to_cpu(target->id_ext),
324096fc248aSBart Van Assche 			     be64_to_cpu(target->ioc_guid),
324196fc248aSBart Van Assche 			     be64_to_cpu(target->initiator_ext));
324296fc248aSBart Van Assche 		ret = -EEXIST;
3243fb49c8bbSBart Van Assche 		goto out;
324496fc248aSBart Van Assche 	}
324596fc248aSBart Van Assche 
32465cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3247c07d424dSDavid Dillow 	    target->cmd_sg_cnt < target->sg_tablesize) {
32485cfb1782SBart Van Assche 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3249c07d424dSDavid Dillow 		target->sg_tablesize = target->cmd_sg_cnt;
3250c07d424dSDavid Dillow 	}
3251c07d424dSDavid Dillow 
3252c07d424dSDavid Dillow 	target_host->sg_tablesize = target->sg_tablesize;
3253c07d424dSDavid Dillow 	target->indirect_size = target->sg_tablesize *
3254c07d424dSDavid Dillow 				sizeof (struct srp_direct_buf);
325549248644SDavid Dillow 	target->max_iu_len = sizeof (struct srp_cmd) +
325649248644SDavid Dillow 			     sizeof (struct srp_indirect_buf) +
325749248644SDavid Dillow 			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
325849248644SDavid Dillow 
3259c1120f89SBart Van Assche 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3260ef6c49d8SBart Van Assche 	INIT_WORK(&target->remove_work, srp_remove_work);
32618f26c9ffSDavid Dillow 	spin_lock_init(&target->lock);
326255ee3ab2SMatan Barak 	ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
32632088ca66SSagi Grimberg 	if (ret)
3264fb49c8bbSBart Van Assche 		goto out;
3265d92c0da7SBart Van Assche 
3266d92c0da7SBart Van Assche 	ret = -ENOMEM;
3267d92c0da7SBart Van Assche 	target->ch_count = max_t(unsigned, num_online_nodes(),
3268d92c0da7SBart Van Assche 				 min(ch_count ? :
3269d92c0da7SBart Van Assche 				     min(4 * num_online_nodes(),
3270d92c0da7SBart Van Assche 					 ibdev->num_comp_vectors),
3271d92c0da7SBart Van Assche 				     num_online_cpus()));
3272d92c0da7SBart Van Assche 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3273d92c0da7SBart Van Assche 			     GFP_KERNEL);
3274d92c0da7SBart Van Assche 	if (!target->ch)
3275fb49c8bbSBart Van Assche 		goto out;
3276d92c0da7SBart Van Assche 
3277d92c0da7SBart Van Assche 	node_idx = 0;
3278d92c0da7SBart Van Assche 	for_each_online_node(node) {
3279d92c0da7SBart Van Assche 		const int ch_start = (node_idx * target->ch_count /
3280d92c0da7SBart Van Assche 				      num_online_nodes());
3281d92c0da7SBart Van Assche 		const int ch_end = ((node_idx + 1) * target->ch_count /
3282d92c0da7SBart Van Assche 				    num_online_nodes());
3283d92c0da7SBart Van Assche 		const int cv_start = (node_idx * ibdev->num_comp_vectors /
3284d92c0da7SBart Van Assche 				      num_online_nodes() + target->comp_vector)
3285d92c0da7SBart Van Assche 				     % ibdev->num_comp_vectors;
3286d92c0da7SBart Van Assche 		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3287d92c0da7SBart Van Assche 				    num_online_nodes() + target->comp_vector)
3288d92c0da7SBart Van Assche 				   % ibdev->num_comp_vectors;
3289d92c0da7SBart Van Assche 		int cpu_idx = 0;
3290d92c0da7SBart Van Assche 
3291d92c0da7SBart Van Assche 		for_each_online_cpu(cpu) {
3292d92c0da7SBart Van Assche 			if (cpu_to_node(cpu) != node)
3293d92c0da7SBart Van Assche 				continue;
3294d92c0da7SBart Van Assche 			if (ch_start + cpu_idx >= ch_end)
3295d92c0da7SBart Van Assche 				continue;
3296d92c0da7SBart Van Assche 			ch = &target->ch[ch_start + cpu_idx];
3297d92c0da7SBart Van Assche 			ch->target = target;
3298d92c0da7SBart Van Assche 			ch->comp_vector = cv_start == cv_end ? cv_start :
3299d92c0da7SBart Van Assche 				cv_start + cpu_idx % (cv_end - cv_start);
3300d92c0da7SBart Van Assche 			spin_lock_init(&ch->lock);
3301d92c0da7SBart Van Assche 			INIT_LIST_HEAD(&ch->free_tx);
3302d92c0da7SBart Van Assche 			ret = srp_new_cm_id(ch);
3303d92c0da7SBart Van Assche 			if (ret)
3304d92c0da7SBart Van Assche 				goto err_disconnect;
3305aef9ec39SRoland Dreier 
3306509c07bcSBart Van Assche 			ret = srp_create_ch_ib(ch);
3307aef9ec39SRoland Dreier 			if (ret)
3308d92c0da7SBart Van Assche 				goto err_disconnect;
3309aef9ec39SRoland Dreier 
3310d92c0da7SBart Van Assche 			ret = srp_alloc_req_data(ch);
33119fe4bcf4SDavid Dillow 			if (ret)
3312d92c0da7SBart Van Assche 				goto err_disconnect;
3313aef9ec39SRoland Dreier 
3314d92c0da7SBart Van Assche 			ret = srp_connect_ch(ch, multich);
3315aef9ec39SRoland Dreier 			if (ret) {
33167aa54bd7SDavid Dillow 				shost_printk(KERN_ERR, target->scsi_host,
3317d92c0da7SBart Van Assche 					     PFX "Connection %d/%d failed\n",
3318d92c0da7SBart Van Assche 					     ch_start + cpu_idx,
3319d92c0da7SBart Van Assche 					     target->ch_count);
3320d92c0da7SBart Van Assche 				if (node_idx == 0 && cpu_idx == 0) {
3321d92c0da7SBart Van Assche 					goto err_disconnect;
3322d92c0da7SBart Van Assche 				} else {
3323d92c0da7SBart Van Assche 					srp_free_ch_ib(target, ch);
3324d92c0da7SBart Van Assche 					srp_free_req_data(target, ch);
3325d92c0da7SBart Van Assche 					target->ch_count = ch - target->ch;
3326c257ea6fSBart Van Assche 					goto connected;
3327aef9ec39SRoland Dreier 				}
3328d92c0da7SBart Van Assche 			}
3329d92c0da7SBart Van Assche 
3330d92c0da7SBart Van Assche 			multich = true;
3331d92c0da7SBart Van Assche 			cpu_idx++;
3332d92c0da7SBart Van Assche 		}
3333d92c0da7SBart Van Assche 		node_idx++;
3334d92c0da7SBart Van Assche 	}
3335d92c0da7SBart Van Assche 
3336c257ea6fSBart Van Assche connected:
3337d92c0da7SBart Van Assche 	target->scsi_host->nr_hw_queues = target->ch_count;
3338aef9ec39SRoland Dreier 
3339aef9ec39SRoland Dreier 	ret = srp_add_target(host, target);
3340aef9ec39SRoland Dreier 	if (ret)
3341aef9ec39SRoland Dreier 		goto err_disconnect;
3342aef9ec39SRoland Dreier 
334334aa654eSBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
3344e7ffde01SBart Van Assche 		shost_printk(KERN_DEBUG, target->scsi_host, PFX
3345e7ffde01SBart Van Assche 			     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3346e7ffde01SBart Van Assche 			     be64_to_cpu(target->id_ext),
3347e7ffde01SBart Van Assche 			     be64_to_cpu(target->ioc_guid),
3348747fe000SBart Van Assche 			     be16_to_cpu(target->pkey),
3349e7ffde01SBart Van Assche 			     be64_to_cpu(target->service_id),
3350747fe000SBart Van Assche 			     target->sgid.raw, target->orig_dgid.raw);
335134aa654eSBart Van Assche 	}
3352e7ffde01SBart Van Assche 
33532d7091bcSBart Van Assche 	ret = count;
33542d7091bcSBart Van Assche 
33552d7091bcSBart Van Assche out:
33562d7091bcSBart Van Assche 	mutex_unlock(&host->add_target_mutex);
335734aa654eSBart Van Assche 
335834aa654eSBart Van Assche 	scsi_host_put(target->scsi_host);
3359bc44bd1dSBart Van Assche 	if (ret < 0)
3360bc44bd1dSBart Van Assche 		scsi_host_put(target->scsi_host);
336134aa654eSBart Van Assche 
33622d7091bcSBart Van Assche 	return ret;
3363aef9ec39SRoland Dreier 
3364aef9ec39SRoland Dreier err_disconnect:
3365aef9ec39SRoland Dreier 	srp_disconnect_target(target);
3366aef9ec39SRoland Dreier 
3367d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
3368d92c0da7SBart Van Assche 		ch = &target->ch[i];
3369509c07bcSBart Van Assche 		srp_free_ch_ib(target, ch);
3370509c07bcSBart Van Assche 		srp_free_req_data(target, ch);
3371d92c0da7SBart Van Assche 	}
3372d92c0da7SBart Van Assche 
3373d92c0da7SBart Van Assche 	kfree(target->ch);
33742d7091bcSBart Van Assche 	goto out;
3375aef9ec39SRoland Dreier }
3376aef9ec39SRoland Dreier 
3377ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3378aef9ec39SRoland Dreier 
3379ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3380ee959b00STony Jones 			  char *buf)
3381aef9ec39SRoland Dreier {
3382ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3383aef9ec39SRoland Dreier 
338405321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3385aef9ec39SRoland Dreier }
3386aef9ec39SRoland Dreier 
3387ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3388aef9ec39SRoland Dreier 
3389ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3390ee959b00STony Jones 			 char *buf)
3391aef9ec39SRoland Dreier {
3392ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3393aef9ec39SRoland Dreier 
3394aef9ec39SRoland Dreier 	return sprintf(buf, "%d\n", host->port);
3395aef9ec39SRoland Dreier }
3396aef9ec39SRoland Dreier 
3397ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3398aef9ec39SRoland Dreier 
3399f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3400aef9ec39SRoland Dreier {
3401aef9ec39SRoland Dreier 	struct srp_host *host;
3402aef9ec39SRoland Dreier 
3403aef9ec39SRoland Dreier 	host = kzalloc(sizeof *host, GFP_KERNEL);
3404aef9ec39SRoland Dreier 	if (!host)
3405aef9ec39SRoland Dreier 		return NULL;
3406aef9ec39SRoland Dreier 
3407aef9ec39SRoland Dreier 	INIT_LIST_HEAD(&host->target_list);
3408b3589fd4SMatthew Wilcox 	spin_lock_init(&host->target_lock);
3409aef9ec39SRoland Dreier 	init_completion(&host->released);
34102d7091bcSBart Van Assche 	mutex_init(&host->add_target_mutex);
341105321937SGreg Kroah-Hartman 	host->srp_dev = device;
3412aef9ec39SRoland Dreier 	host->port = port;
3413aef9ec39SRoland Dreier 
3414ee959b00STony Jones 	host->dev.class = &srp_class;
3415ee959b00STony Jones 	host->dev.parent = device->dev->dma_device;
3416d927e38cSKay Sievers 	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3417aef9ec39SRoland Dreier 
3418ee959b00STony Jones 	if (device_register(&host->dev))
3419f5358a17SRoland Dreier 		goto free_host;
3420ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_add_target))
3421aef9ec39SRoland Dreier 		goto err_class;
3422ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3423aef9ec39SRoland Dreier 		goto err_class;
3424ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_port))
3425aef9ec39SRoland Dreier 		goto err_class;
3426aef9ec39SRoland Dreier 
3427aef9ec39SRoland Dreier 	return host;
3428aef9ec39SRoland Dreier 
3429aef9ec39SRoland Dreier err_class:
3430ee959b00STony Jones 	device_unregister(&host->dev);
3431aef9ec39SRoland Dreier 
3432f5358a17SRoland Dreier free_host:
3433aef9ec39SRoland Dreier 	kfree(host);
3434aef9ec39SRoland Dreier 
3435aef9ec39SRoland Dreier 	return NULL;
3436aef9ec39SRoland Dreier }
3437aef9ec39SRoland Dreier 
3438aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device)
3439aef9ec39SRoland Dreier {
3440f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3441f5358a17SRoland Dreier 	struct ib_device_attr *dev_attr;
3442aef9ec39SRoland Dreier 	struct srp_host *host;
34434139032bSHal Rosenstock 	int mr_page_shift, p;
344452ede08fSBart Van Assche 	u64 max_pages_per_mr;
3445aef9ec39SRoland Dreier 
3446f5358a17SRoland Dreier 	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3447f5358a17SRoland Dreier 	if (!dev_attr)
3448cf311cd4SSean Hefty 		return;
3449aef9ec39SRoland Dreier 
3450f5358a17SRoland Dreier 	if (ib_query_device(device, dev_attr)) {
3451e0bda7d8SBart Van Assche 		pr_warn("Query device failed for %s\n", device->name);
3452f5358a17SRoland Dreier 		goto free_attr;
3453f5358a17SRoland Dreier 	}
3454f5358a17SRoland Dreier 
3455f5358a17SRoland Dreier 	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3456f5358a17SRoland Dreier 	if (!srp_dev)
3457f5358a17SRoland Dreier 		goto free_attr;
3458f5358a17SRoland Dreier 
3459d1b4289eSBart Van Assche 	srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3460d1b4289eSBart Van Assche 			    device->map_phys_fmr && device->unmap_fmr);
34615cfb1782SBart Van Assche 	srp_dev->has_fr = (dev_attr->device_cap_flags &
34625cfb1782SBart Van Assche 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
34635cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr)
34645cfb1782SBart Van Assche 		dev_warn(&device->dev, "neither FMR nor FR is supported\n");
34655cfb1782SBart Van Assche 
34665cfb1782SBart Van Assche 	srp_dev->use_fast_reg = (srp_dev->has_fr &&
34675cfb1782SBart Van Assche 				 (!srp_dev->has_fmr || prefer_fr));
3468002f1567SBart Van Assche 	srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3469d1b4289eSBart Van Assche 
3470f5358a17SRoland Dreier 	/*
3471f5358a17SRoland Dreier 	 * Use the smallest page size supported by the HCA, down to a
34728f26c9ffSDavid Dillow 	 * minimum of 4096 bytes. We're unlikely to build large sglists
34738f26c9ffSDavid Dillow 	 * out of smaller entries.
3474f5358a17SRoland Dreier 	 */
347552ede08fSBart Van Assche 	mr_page_shift		= max(12, ffs(dev_attr->page_size_cap) - 1);
347652ede08fSBart Van Assche 	srp_dev->mr_page_size	= 1 << mr_page_shift;
347752ede08fSBart Van Assche 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
347852ede08fSBart Van Assche 	max_pages_per_mr	= dev_attr->max_mr_size;
347952ede08fSBart Van Assche 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
348052ede08fSBart Van Assche 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
348152ede08fSBart Van Assche 					  max_pages_per_mr);
34825cfb1782SBart Van Assche 	if (srp_dev->use_fast_reg) {
34835cfb1782SBart Van Assche 		srp_dev->max_pages_per_mr =
34845cfb1782SBart Van Assche 			min_t(u32, srp_dev->max_pages_per_mr,
34855cfb1782SBart Van Assche 			      dev_attr->max_fast_reg_page_list_len);
34865cfb1782SBart Van Assche 	}
348752ede08fSBart Van Assche 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
348852ede08fSBart Van Assche 				   srp_dev->max_pages_per_mr;
34895cfb1782SBart Van Assche 	pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
349052ede08fSBart Van Assche 		 device->name, mr_page_shift, dev_attr->max_mr_size,
34915cfb1782SBart Van Assche 		 dev_attr->max_fast_reg_page_list_len,
349252ede08fSBart Van Assche 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3493f5358a17SRoland Dreier 
3494f5358a17SRoland Dreier 	INIT_LIST_HEAD(&srp_dev->dev_list);
3495f5358a17SRoland Dreier 
3496f5358a17SRoland Dreier 	srp_dev->dev = device;
3497f5358a17SRoland Dreier 	srp_dev->pd  = ib_alloc_pd(device);
3498f5358a17SRoland Dreier 	if (IS_ERR(srp_dev->pd))
3499f5358a17SRoland Dreier 		goto free_dev;
3500f5358a17SRoland Dreier 
350103f6fb93SBart Van Assche 	if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
350203f6fb93SBart Van Assche 		srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3503f5358a17SRoland Dreier 						   IB_ACCESS_LOCAL_WRITE |
3504f5358a17SRoland Dreier 						   IB_ACCESS_REMOTE_READ |
3505f5358a17SRoland Dreier 						   IB_ACCESS_REMOTE_WRITE);
350603f6fb93SBart Van Assche 		if (IS_ERR(srp_dev->global_mr))
3507f5358a17SRoland Dreier 			goto err_pd;
350803f6fb93SBart Van Assche 	} else {
350903f6fb93SBart Van Assche 		srp_dev->global_mr = NULL;
351003f6fb93SBart Van Assche 	}
3511f5358a17SRoland Dreier 
35124139032bSHal Rosenstock 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3513f5358a17SRoland Dreier 		host = srp_add_port(srp_dev, p);
3514aef9ec39SRoland Dreier 		if (host)
3515f5358a17SRoland Dreier 			list_add_tail(&host->list, &srp_dev->dev_list);
3516aef9ec39SRoland Dreier 	}
3517aef9ec39SRoland Dreier 
3518f5358a17SRoland Dreier 	ib_set_client_data(device, &srp_client, srp_dev);
3519f5358a17SRoland Dreier 
3520f5358a17SRoland Dreier 	goto free_attr;
3521f5358a17SRoland Dreier 
3522f5358a17SRoland Dreier err_pd:
3523f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3524f5358a17SRoland Dreier 
3525f5358a17SRoland Dreier free_dev:
3526f5358a17SRoland Dreier 	kfree(srp_dev);
3527f5358a17SRoland Dreier 
3528f5358a17SRoland Dreier free_attr:
3529f5358a17SRoland Dreier 	kfree(dev_attr);
3530aef9ec39SRoland Dreier }
3531aef9ec39SRoland Dreier 
35327c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data)
3533aef9ec39SRoland Dreier {
3534f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3535aef9ec39SRoland Dreier 	struct srp_host *host, *tmp_host;
3536ef6c49d8SBart Van Assche 	struct srp_target_port *target;
3537aef9ec39SRoland Dreier 
35387c1eb45aSHaggai Eran 	srp_dev = client_data;
35391fe0cb84SDotan Barak 	if (!srp_dev)
35401fe0cb84SDotan Barak 		return;
3541aef9ec39SRoland Dreier 
3542f5358a17SRoland Dreier 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3543ee959b00STony Jones 		device_unregister(&host->dev);
3544aef9ec39SRoland Dreier 		/*
3545aef9ec39SRoland Dreier 		 * Wait for the sysfs entry to go away, so that no new
3546aef9ec39SRoland Dreier 		 * target ports can be created.
3547aef9ec39SRoland Dreier 		 */
3548aef9ec39SRoland Dreier 		wait_for_completion(&host->released);
3549aef9ec39SRoland Dreier 
3550aef9ec39SRoland Dreier 		/*
3551ef6c49d8SBart Van Assche 		 * Remove all target ports.
3552aef9ec39SRoland Dreier 		 */
3553b3589fd4SMatthew Wilcox 		spin_lock(&host->target_lock);
3554ef6c49d8SBart Van Assche 		list_for_each_entry(target, &host->target_list, list)
3555ef6c49d8SBart Van Assche 			srp_queue_remove_work(target);
3556b3589fd4SMatthew Wilcox 		spin_unlock(&host->target_lock);
3557aef9ec39SRoland Dreier 
3558aef9ec39SRoland Dreier 		/*
3559bcc05910SBart Van Assche 		 * Wait for tl_err and target port removal tasks.
3560aef9ec39SRoland Dreier 		 */
3561ef6c49d8SBart Van Assche 		flush_workqueue(system_long_wq);
3562bcc05910SBart Van Assche 		flush_workqueue(srp_remove_wq);
3563aef9ec39SRoland Dreier 
3564aef9ec39SRoland Dreier 		kfree(host);
3565aef9ec39SRoland Dreier 	}
3566aef9ec39SRoland Dreier 
356703f6fb93SBart Van Assche 	if (srp_dev->global_mr)
356803f6fb93SBart Van Assche 		ib_dereg_mr(srp_dev->global_mr);
3569f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3570f5358a17SRoland Dreier 
3571f5358a17SRoland Dreier 	kfree(srp_dev);
3572aef9ec39SRoland Dreier }
3573aef9ec39SRoland Dreier 
35743236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = {
3575ed9b2264SBart Van Assche 	.has_rport_state	 = true,
3576ed9b2264SBart Van Assche 	.reset_timer_if_blocked	 = true,
3577a95cadb9SBart Van Assche 	.reconnect_delay	 = &srp_reconnect_delay,
3578ed9b2264SBart Van Assche 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
3579ed9b2264SBart Van Assche 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
3580ed9b2264SBart Van Assche 	.reconnect		 = srp_rport_reconnect,
3581dc1bdbd9SBart Van Assche 	.rport_delete		 = srp_rport_delete,
3582ed9b2264SBart Van Assche 	.terminate_rport_io	 = srp_terminate_io,
35833236822bSFUJITA Tomonori };
35843236822bSFUJITA Tomonori 
3585aef9ec39SRoland Dreier static int __init srp_init_module(void)
3586aef9ec39SRoland Dreier {
3587aef9ec39SRoland Dreier 	int ret;
3588aef9ec39SRoland Dreier 
358949248644SDavid Dillow 	if (srp_sg_tablesize) {
3590e0bda7d8SBart Van Assche 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
359149248644SDavid Dillow 		if (!cmd_sg_entries)
359249248644SDavid Dillow 			cmd_sg_entries = srp_sg_tablesize;
359349248644SDavid Dillow 	}
359449248644SDavid Dillow 
359549248644SDavid Dillow 	if (!cmd_sg_entries)
359649248644SDavid Dillow 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
359749248644SDavid Dillow 
359849248644SDavid Dillow 	if (cmd_sg_entries > 255) {
3599e0bda7d8SBart Van Assche 		pr_warn("Clamping cmd_sg_entries to 255\n");
360049248644SDavid Dillow 		cmd_sg_entries = 255;
36011e89a194SDavid Dillow 	}
36021e89a194SDavid Dillow 
3603c07d424dSDavid Dillow 	if (!indirect_sg_entries)
3604c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3605c07d424dSDavid Dillow 	else if (indirect_sg_entries < cmd_sg_entries) {
3606e0bda7d8SBart Van Assche 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3607e0bda7d8SBart Van Assche 			cmd_sg_entries);
3608c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3609c07d424dSDavid Dillow 	}
3610c07d424dSDavid Dillow 
3611bcc05910SBart Van Assche 	srp_remove_wq = create_workqueue("srp_remove");
3612da05be29SWei Yongjun 	if (!srp_remove_wq) {
3613da05be29SWei Yongjun 		ret = -ENOMEM;
3614bcc05910SBart Van Assche 		goto out;
3615bcc05910SBart Van Assche 	}
3616bcc05910SBart Van Assche 
3617bcc05910SBart Van Assche 	ret = -ENOMEM;
36183236822bSFUJITA Tomonori 	ib_srp_transport_template =
36193236822bSFUJITA Tomonori 		srp_attach_transport(&ib_srp_transport_functions);
36203236822bSFUJITA Tomonori 	if (!ib_srp_transport_template)
3621bcc05910SBart Van Assche 		goto destroy_wq;
36223236822bSFUJITA Tomonori 
3623aef9ec39SRoland Dreier 	ret = class_register(&srp_class);
3624aef9ec39SRoland Dreier 	if (ret) {
3625e0bda7d8SBart Van Assche 		pr_err("couldn't register class infiniband_srp\n");
3626bcc05910SBart Van Assche 		goto release_tr;
3627aef9ec39SRoland Dreier 	}
3628aef9ec39SRoland Dreier 
3629c1a0b23bSMichael S. Tsirkin 	ib_sa_register_client(&srp_sa_client);
3630c1a0b23bSMichael S. Tsirkin 
3631aef9ec39SRoland Dreier 	ret = ib_register_client(&srp_client);
3632aef9ec39SRoland Dreier 	if (ret) {
3633e0bda7d8SBart Van Assche 		pr_err("couldn't register IB client\n");
3634bcc05910SBart Van Assche 		goto unreg_sa;
3635aef9ec39SRoland Dreier 	}
3636aef9ec39SRoland Dreier 
3637bcc05910SBart Van Assche out:
3638bcc05910SBart Van Assche 	return ret;
3639bcc05910SBart Van Assche 
3640bcc05910SBart Van Assche unreg_sa:
3641bcc05910SBart Van Assche 	ib_sa_unregister_client(&srp_sa_client);
3642bcc05910SBart Van Assche 	class_unregister(&srp_class);
3643bcc05910SBart Van Assche 
3644bcc05910SBart Van Assche release_tr:
3645bcc05910SBart Van Assche 	srp_release_transport(ib_srp_transport_template);
3646bcc05910SBart Van Assche 
3647bcc05910SBart Van Assche destroy_wq:
3648bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3649bcc05910SBart Van Assche 	goto out;
3650aef9ec39SRoland Dreier }
3651aef9ec39SRoland Dreier 
3652aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void)
3653aef9ec39SRoland Dreier {
3654aef9ec39SRoland Dreier 	ib_unregister_client(&srp_client);
3655c1a0b23bSMichael S. Tsirkin 	ib_sa_unregister_client(&srp_sa_client);
3656aef9ec39SRoland Dreier 	class_unregister(&srp_class);
36573236822bSFUJITA Tomonori 	srp_release_transport(ib_srp_transport_template);
3658bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3659aef9ec39SRoland Dreier }
3660aef9ec39SRoland Dreier 
3661aef9ec39SRoland Dreier module_init(srp_init_module);
3662aef9ec39SRoland Dreier module_exit(srp_cleanup_module);
3663