xref: /linux/drivers/infiniband/ulp/srp/ib_srp.c (revision f83b2561a6d4ff12959660ad597580097b744941)
1aef9ec39SRoland Dreier /*
2aef9ec39SRoland Dreier  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3aef9ec39SRoland Dreier  *
4aef9ec39SRoland Dreier  * This software is available to you under a choice of one of two
5aef9ec39SRoland Dreier  * licenses.  You may choose to be licensed under the terms of the GNU
6aef9ec39SRoland Dreier  * General Public License (GPL) Version 2, available from the file
7aef9ec39SRoland Dreier  * COPYING in the main directory of this source tree, or the
8aef9ec39SRoland Dreier  * OpenIB.org BSD license below:
9aef9ec39SRoland Dreier  *
10aef9ec39SRoland Dreier  *     Redistribution and use in source and binary forms, with or
11aef9ec39SRoland Dreier  *     without modification, are permitted provided that the following
12aef9ec39SRoland Dreier  *     conditions are met:
13aef9ec39SRoland Dreier  *
14aef9ec39SRoland Dreier  *      - Redistributions of source code must retain the above
15aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
16aef9ec39SRoland Dreier  *        disclaimer.
17aef9ec39SRoland Dreier  *
18aef9ec39SRoland Dreier  *      - Redistributions in binary form must reproduce the above
19aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
20aef9ec39SRoland Dreier  *        disclaimer in the documentation and/or other materials
21aef9ec39SRoland Dreier  *        provided with the distribution.
22aef9ec39SRoland Dreier  *
23aef9ec39SRoland Dreier  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24aef9ec39SRoland Dreier  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25aef9ec39SRoland Dreier  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26aef9ec39SRoland Dreier  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27aef9ec39SRoland Dreier  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28aef9ec39SRoland Dreier  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29aef9ec39SRoland Dreier  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30aef9ec39SRoland Dreier  * SOFTWARE.
31aef9ec39SRoland Dreier  */
32aef9ec39SRoland Dreier 
33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34e0bda7d8SBart Van Assche 
35aef9ec39SRoland Dreier #include <linux/module.h>
36aef9ec39SRoland Dreier #include <linux/init.h>
37aef9ec39SRoland Dreier #include <linux/slab.h>
38aef9ec39SRoland Dreier #include <linux/err.h>
39aef9ec39SRoland Dreier #include <linux/string.h>
40aef9ec39SRoland Dreier #include <linux/parser.h>
41aef9ec39SRoland Dreier #include <linux/random.h>
42de25968cSTim Schmielau #include <linux/jiffies.h>
4356b5390cSBart Van Assche #include <rdma/ib_cache.h>
44aef9ec39SRoland Dreier 
4560063497SArun Sharma #include <linux/atomic.h>
46aef9ec39SRoland Dreier 
47aef9ec39SRoland Dreier #include <scsi/scsi.h>
48aef9ec39SRoland Dreier #include <scsi/scsi_device.h>
49aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h>
5071444b97SJack Wang #include <scsi/scsi_tcq.h>
51aef9ec39SRoland Dreier #include <scsi/srp.h>
523236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h>
53aef9ec39SRoland Dreier 
54aef9ec39SRoland Dreier #include "ib_srp.h"
55aef9ec39SRoland Dreier 
56aef9ec39SRoland Dreier #define DRV_NAME	"ib_srp"
57aef9ec39SRoland Dreier #define PFX		DRV_NAME ": "
58713ef24eSBart Van Assche #define DRV_VERSION	"2.0"
59713ef24eSBart Van Assche #define DRV_RELDATE	"July 26, 2015"
60aef9ec39SRoland Dreier 
61aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier");
6233ab3e5bSBart Van Assche MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL");
6433ab3e5bSBart Van Assche MODULE_VERSION(DRV_VERSION);
6533ab3e5bSBart Van Assche MODULE_INFO(release_date, DRV_RELDATE);
66aef9ec39SRoland Dreier 
6749248644SDavid Dillow static unsigned int srp_sg_tablesize;
6849248644SDavid Dillow static unsigned int cmd_sg_entries;
69c07d424dSDavid Dillow static unsigned int indirect_sg_entries;
70c07d424dSDavid Dillow static bool allow_ext_sg;
7103f6fb93SBart Van Assche static bool prefer_fr = true;
7203f6fb93SBart Van Assche static bool register_always = true;
73aef9ec39SRoland Dreier static int topspin_workarounds = 1;
74aef9ec39SRoland Dreier 
7549248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444);
7649248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
7749248644SDavid Dillow 
7849248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444);
7949248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries,
8049248644SDavid Dillow 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
8149248644SDavid Dillow 
82c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444);
83c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries,
84c07d424dSDavid Dillow 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85c07d424dSDavid Dillow 
86c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444);
87c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg,
88c07d424dSDavid Dillow 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89c07d424dSDavid Dillow 
90aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444);
91aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds,
92aef9ec39SRoland Dreier 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93aef9ec39SRoland Dreier 
945cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444);
955cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr,
965cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported");
975cfb1782SBart Van Assche 
98b1b8854dSBart Van Assche module_param(register_always, bool, 0444);
99b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always,
100b1b8854dSBart Van Assche 		 "Use memory registration even for contiguous memory regions");
101b1b8854dSBart Van Assche 
1029c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops;
103ed9b2264SBart Van Assche 
104a95cadb9SBart Van Assche static int srp_reconnect_delay = 10;
105a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106a95cadb9SBart Van Assche 		S_IRUGO | S_IWUSR);
107a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108a95cadb9SBart Van Assche 
109ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15;
110ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
112ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo,
113ed9b2264SBart Van Assche 		 "Number of seconds between the observation of a transport"
114ed9b2264SBart Van Assche 		 " layer error and failing all I/O. \"off\" means that this"
115ed9b2264SBart Van Assche 		 " functionality is disabled.");
116ed9b2264SBart Van Assche 
117a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600;
118ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
120ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo,
121ed9b2264SBart Van Assche 		 "Maximum number of seconds that the SRP transport should"
122ed9b2264SBart Van Assche 		 " insulate transport layer errors. After this time has been"
123ed9b2264SBart Van Assche 		 " exceeded the SCSI host is removed. Should be"
124ed9b2264SBart Van Assche 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125ed9b2264SBart Van Assche 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
126ed9b2264SBart Van Assche 		 " this functionality is disabled.");
127ed9b2264SBart Van Assche 
128d92c0da7SBart Van Assche static unsigned ch_count;
129d92c0da7SBart Van Assche module_param(ch_count, uint, 0444);
130d92c0da7SBart Van Assche MODULE_PARM_DESC(ch_count,
131d92c0da7SBart Van Assche 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132d92c0da7SBart Van Assche 
133aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device);
1347c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data);
1351dc7b1f1SChristoph Hellwig static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
1361dc7b1f1SChristoph Hellwig static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
1371dc7b1f1SChristoph Hellwig 		const char *opname);
138aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139aef9ec39SRoland Dreier 
1403236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template;
141bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq;
1423236822bSFUJITA Tomonori 
143aef9ec39SRoland Dreier static struct ib_client srp_client = {
144aef9ec39SRoland Dreier 	.name   = "srp",
145aef9ec39SRoland Dreier 	.add    = srp_add_one,
146aef9ec39SRoland Dreier 	.remove = srp_remove_one
147aef9ec39SRoland Dreier };
148aef9ec39SRoland Dreier 
149c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client;
150c1a0b23bSMichael S. Tsirkin 
151ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152ed9b2264SBart Van Assche {
153ed9b2264SBart Van Assche 	int tmo = *(int *)kp->arg;
154ed9b2264SBart Van Assche 
155ed9b2264SBart Van Assche 	if (tmo >= 0)
156ed9b2264SBart Van Assche 		return sprintf(buffer, "%d", tmo);
157ed9b2264SBart Van Assche 	else
158ed9b2264SBart Van Assche 		return sprintf(buffer, "off");
159ed9b2264SBart Van Assche }
160ed9b2264SBart Van Assche 
161ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162ed9b2264SBart Van Assche {
163ed9b2264SBart Van Assche 	int tmo, res;
164ed9b2264SBart Van Assche 
1653fdf70acSSagi Grimberg 	res = srp_parse_tmo(&tmo, val);
166ed9b2264SBart Van Assche 	if (res)
167ed9b2264SBart Van Assche 		goto out;
1683fdf70acSSagi Grimberg 
169a95cadb9SBart Van Assche 	if (kp->arg == &srp_reconnect_delay)
170a95cadb9SBart Van Assche 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171a95cadb9SBart Van Assche 				    srp_dev_loss_tmo);
172a95cadb9SBart Van Assche 	else if (kp->arg == &srp_fast_io_fail_tmo)
173a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
174ed9b2264SBart Van Assche 	else
175a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176a95cadb9SBart Van Assche 				    tmo);
177ed9b2264SBart Van Assche 	if (res)
178ed9b2264SBart Van Assche 		goto out;
179ed9b2264SBart Van Assche 	*(int *)kp->arg = tmo;
180ed9b2264SBart Van Assche 
181ed9b2264SBart Van Assche out:
182ed9b2264SBart Van Assche 	return res;
183ed9b2264SBart Van Assche }
184ed9b2264SBart Van Assche 
1859c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops = {
186ed9b2264SBart Van Assche 	.get = srp_tmo_get,
187ed9b2264SBart Van Assche 	.set = srp_tmo_set,
188ed9b2264SBart Van Assche };
189ed9b2264SBart Van Assche 
190aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191aef9ec39SRoland Dreier {
192aef9ec39SRoland Dreier 	return (struct srp_target_port *) host->hostdata;
193aef9ec39SRoland Dreier }
194aef9ec39SRoland Dreier 
195aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host)
196aef9ec39SRoland Dreier {
197aef9ec39SRoland Dreier 	return host_to_target(host)->target_name;
198aef9ec39SRoland Dreier }
199aef9ec39SRoland Dreier 
2005d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target)
2015d7cbfd6SRoland Dreier {
2025d7cbfd6SRoland Dreier 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
2033d1ff48dSRaghava Kondapalli 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
2045d7cbfd6SRoland Dreier 
2055d7cbfd6SRoland Dreier 	return topspin_workarounds &&
2063d1ff48dSRaghava Kondapalli 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
2073d1ff48dSRaghava Kondapalli 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
2085d7cbfd6SRoland Dreier }
2095d7cbfd6SRoland Dreier 
210aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211aef9ec39SRoland Dreier 				   gfp_t gfp_mask,
212aef9ec39SRoland Dreier 				   enum dma_data_direction direction)
213aef9ec39SRoland Dreier {
214aef9ec39SRoland Dreier 	struct srp_iu *iu;
215aef9ec39SRoland Dreier 
216aef9ec39SRoland Dreier 	iu = kmalloc(sizeof *iu, gfp_mask);
217aef9ec39SRoland Dreier 	if (!iu)
218aef9ec39SRoland Dreier 		goto out;
219aef9ec39SRoland Dreier 
220aef9ec39SRoland Dreier 	iu->buf = kzalloc(size, gfp_mask);
221aef9ec39SRoland Dreier 	if (!iu->buf)
222aef9ec39SRoland Dreier 		goto out_free_iu;
223aef9ec39SRoland Dreier 
22405321937SGreg Kroah-Hartman 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
22505321937SGreg Kroah-Hartman 				    direction);
22605321937SGreg Kroah-Hartman 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
227aef9ec39SRoland Dreier 		goto out_free_buf;
228aef9ec39SRoland Dreier 
229aef9ec39SRoland Dreier 	iu->size      = size;
230aef9ec39SRoland Dreier 	iu->direction = direction;
231aef9ec39SRoland Dreier 
232aef9ec39SRoland Dreier 	return iu;
233aef9ec39SRoland Dreier 
234aef9ec39SRoland Dreier out_free_buf:
235aef9ec39SRoland Dreier 	kfree(iu->buf);
236aef9ec39SRoland Dreier out_free_iu:
237aef9ec39SRoland Dreier 	kfree(iu);
238aef9ec39SRoland Dreier out:
239aef9ec39SRoland Dreier 	return NULL;
240aef9ec39SRoland Dreier }
241aef9ec39SRoland Dreier 
242aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243aef9ec39SRoland Dreier {
244aef9ec39SRoland Dreier 	if (!iu)
245aef9ec39SRoland Dreier 		return;
246aef9ec39SRoland Dreier 
24705321937SGreg Kroah-Hartman 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
24805321937SGreg Kroah-Hartman 			    iu->direction);
249aef9ec39SRoland Dreier 	kfree(iu->buf);
250aef9ec39SRoland Dreier 	kfree(iu);
251aef9ec39SRoland Dreier }
252aef9ec39SRoland Dreier 
253aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context)
254aef9ec39SRoland Dreier {
25557363d98SSagi Grimberg 	pr_debug("QP event %s (%d)\n",
25657363d98SSagi Grimberg 		 ib_event_msg(event->event), event->event);
257aef9ec39SRoland Dreier }
258aef9ec39SRoland Dreier 
259aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target,
260aef9ec39SRoland Dreier 		       struct ib_qp *qp)
261aef9ec39SRoland Dreier {
262aef9ec39SRoland Dreier 	struct ib_qp_attr *attr;
263aef9ec39SRoland Dreier 	int ret;
264aef9ec39SRoland Dreier 
265aef9ec39SRoland Dreier 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
266aef9ec39SRoland Dreier 	if (!attr)
267aef9ec39SRoland Dreier 		return -ENOMEM;
268aef9ec39SRoland Dreier 
26956b5390cSBart Van Assche 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270aef9ec39SRoland Dreier 				  target->srp_host->port,
271747fe000SBart Van Assche 				  be16_to_cpu(target->pkey),
272aef9ec39SRoland Dreier 				  &attr->pkey_index);
273aef9ec39SRoland Dreier 	if (ret)
274aef9ec39SRoland Dreier 		goto out;
275aef9ec39SRoland Dreier 
276aef9ec39SRoland Dreier 	attr->qp_state        = IB_QPS_INIT;
277aef9ec39SRoland Dreier 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278aef9ec39SRoland Dreier 				    IB_ACCESS_REMOTE_WRITE);
279aef9ec39SRoland Dreier 	attr->port_num        = target->srp_host->port;
280aef9ec39SRoland Dreier 
281aef9ec39SRoland Dreier 	ret = ib_modify_qp(qp, attr,
282aef9ec39SRoland Dreier 			   IB_QP_STATE		|
283aef9ec39SRoland Dreier 			   IB_QP_PKEY_INDEX	|
284aef9ec39SRoland Dreier 			   IB_QP_ACCESS_FLAGS	|
285aef9ec39SRoland Dreier 			   IB_QP_PORT);
286aef9ec39SRoland Dreier 
287aef9ec39SRoland Dreier out:
288aef9ec39SRoland Dreier 	kfree(attr);
289aef9ec39SRoland Dreier 	return ret;
290aef9ec39SRoland Dreier }
291aef9ec39SRoland Dreier 
292509c07bcSBart Van Assche static int srp_new_cm_id(struct srp_rdma_ch *ch)
2939fe4bcf4SDavid Dillow {
294509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2959fe4bcf4SDavid Dillow 	struct ib_cm_id *new_cm_id;
2969fe4bcf4SDavid Dillow 
29705321937SGreg Kroah-Hartman 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
298509c07bcSBart Van Assche 				    srp_cm_handler, ch);
2999fe4bcf4SDavid Dillow 	if (IS_ERR(new_cm_id))
3009fe4bcf4SDavid Dillow 		return PTR_ERR(new_cm_id);
3019fe4bcf4SDavid Dillow 
302509c07bcSBart Van Assche 	if (ch->cm_id)
303509c07bcSBart Van Assche 		ib_destroy_cm_id(ch->cm_id);
304509c07bcSBart Van Assche 	ch->cm_id = new_cm_id;
305509c07bcSBart Van Assche 	ch->path.sgid = target->sgid;
306509c07bcSBart Van Assche 	ch->path.dgid = target->orig_dgid;
307509c07bcSBart Van Assche 	ch->path.pkey = target->pkey;
308509c07bcSBart Van Assche 	ch->path.service_id = target->service_id;
3099fe4bcf4SDavid Dillow 
3109fe4bcf4SDavid Dillow 	return 0;
3119fe4bcf4SDavid Dillow }
3129fe4bcf4SDavid Dillow 
313d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314d1b4289eSBart Van Assche {
315d1b4289eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
316d1b4289eSBart Van Assche 	struct ib_fmr_pool_param fmr_param;
317d1b4289eSBart Van Assche 
318d1b4289eSBart Van Assche 	memset(&fmr_param, 0, sizeof(fmr_param));
319fa9863f8SBart Van Assche 	fmr_param.pool_size	    = target->mr_pool_size;
320d1b4289eSBart Van Assche 	fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
321d1b4289eSBart Van Assche 	fmr_param.cache		    = 1;
32252ede08fSBart Van Assche 	fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
32352ede08fSBart Van Assche 	fmr_param.page_shift	    = ilog2(dev->mr_page_size);
324d1b4289eSBart Van Assche 	fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
325d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_WRITE |
326d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_READ);
327d1b4289eSBart Van Assche 
328d1b4289eSBart Van Assche 	return ib_create_fmr_pool(dev->pd, &fmr_param);
329d1b4289eSBart Van Assche }
330d1b4289eSBart Van Assche 
3315cfb1782SBart Van Assche /**
3325cfb1782SBart Van Assche  * srp_destroy_fr_pool() - free the resources owned by a pool
3335cfb1782SBart Van Assche  * @pool: Fast registration pool to be destroyed.
3345cfb1782SBart Van Assche  */
3355cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
3365cfb1782SBart Van Assche {
3375cfb1782SBart Van Assche 	int i;
3385cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3395cfb1782SBart Van Assche 
3405cfb1782SBart Van Assche 	if (!pool)
3415cfb1782SBart Van Assche 		return;
3425cfb1782SBart Van Assche 
3435cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
3445cfb1782SBart Van Assche 		if (d->mr)
3455cfb1782SBart Van Assche 			ib_dereg_mr(d->mr);
3465cfb1782SBart Van Assche 	}
3475cfb1782SBart Van Assche 	kfree(pool);
3485cfb1782SBart Van Assche }
3495cfb1782SBart Van Assche 
3505cfb1782SBart Van Assche /**
3515cfb1782SBart Van Assche  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
3525cfb1782SBart Van Assche  * @device:            IB device to allocate fast registration descriptors for.
3535cfb1782SBart Van Assche  * @pd:                Protection domain associated with the FR descriptors.
3545cfb1782SBart Van Assche  * @pool_size:         Number of descriptors to allocate.
3555cfb1782SBart Van Assche  * @max_page_list_len: Maximum fast registration work request page list length.
3565cfb1782SBart Van Assche  */
3575cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
3585cfb1782SBart Van Assche 					      struct ib_pd *pd, int pool_size,
3595cfb1782SBart Van Assche 					      int max_page_list_len)
3605cfb1782SBart Van Assche {
3615cfb1782SBart Van Assche 	struct srp_fr_pool *pool;
3625cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3635cfb1782SBart Van Assche 	struct ib_mr *mr;
3645cfb1782SBart Van Assche 	int i, ret = -EINVAL;
3655cfb1782SBart Van Assche 
3665cfb1782SBart Van Assche 	if (pool_size <= 0)
3675cfb1782SBart Van Assche 		goto err;
3685cfb1782SBart Van Assche 	ret = -ENOMEM;
3695cfb1782SBart Van Assche 	pool = kzalloc(sizeof(struct srp_fr_pool) +
3705cfb1782SBart Van Assche 		       pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
3715cfb1782SBart Van Assche 	if (!pool)
3725cfb1782SBart Van Assche 		goto err;
3735cfb1782SBart Van Assche 	pool->size = pool_size;
3745cfb1782SBart Van Assche 	pool->max_page_list_len = max_page_list_len;
3755cfb1782SBart Van Assche 	spin_lock_init(&pool->lock);
3765cfb1782SBart Van Assche 	INIT_LIST_HEAD(&pool->free_list);
3775cfb1782SBart Van Assche 
3785cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
379563b67c5SSagi Grimberg 		mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
380563b67c5SSagi Grimberg 				 max_page_list_len);
3815cfb1782SBart Van Assche 		if (IS_ERR(mr)) {
3825cfb1782SBart Van Assche 			ret = PTR_ERR(mr);
3835cfb1782SBart Van Assche 			goto destroy_pool;
3845cfb1782SBart Van Assche 		}
3855cfb1782SBart Van Assche 		d->mr = mr;
3865cfb1782SBart Van Assche 		list_add_tail(&d->entry, &pool->free_list);
3875cfb1782SBart Van Assche 	}
3885cfb1782SBart Van Assche 
3895cfb1782SBart Van Assche out:
3905cfb1782SBart Van Assche 	return pool;
3915cfb1782SBart Van Assche 
3925cfb1782SBart Van Assche destroy_pool:
3935cfb1782SBart Van Assche 	srp_destroy_fr_pool(pool);
3945cfb1782SBart Van Assche 
3955cfb1782SBart Van Assche err:
3965cfb1782SBart Van Assche 	pool = ERR_PTR(ret);
3975cfb1782SBart Van Assche 	goto out;
3985cfb1782SBart Van Assche }
3995cfb1782SBart Van Assche 
4005cfb1782SBart Van Assche /**
4015cfb1782SBart Van Assche  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
4025cfb1782SBart Van Assche  * @pool: Pool to obtain descriptor from.
4035cfb1782SBart Van Assche  */
4045cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
4055cfb1782SBart Van Assche {
4065cfb1782SBart Van Assche 	struct srp_fr_desc *d = NULL;
4075cfb1782SBart Van Assche 	unsigned long flags;
4085cfb1782SBart Van Assche 
4095cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4105cfb1782SBart Van Assche 	if (!list_empty(&pool->free_list)) {
4115cfb1782SBart Van Assche 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
4125cfb1782SBart Van Assche 		list_del(&d->entry);
4135cfb1782SBart Van Assche 	}
4145cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4155cfb1782SBart Van Assche 
4165cfb1782SBart Van Assche 	return d;
4175cfb1782SBart Van Assche }
4185cfb1782SBart Van Assche 
4195cfb1782SBart Van Assche /**
4205cfb1782SBart Van Assche  * srp_fr_pool_put() - put an FR descriptor back in the free list
4215cfb1782SBart Van Assche  * @pool: Pool the descriptor was allocated from.
4225cfb1782SBart Van Assche  * @desc: Pointer to an array of fast registration descriptor pointers.
4235cfb1782SBart Van Assche  * @n:    Number of descriptors to put back.
4245cfb1782SBart Van Assche  *
4255cfb1782SBart Van Assche  * Note: The caller must already have queued an invalidation request for
4265cfb1782SBart Van Assche  * desc->mr->rkey before calling this function.
4275cfb1782SBart Van Assche  */
4285cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
4295cfb1782SBart Van Assche 			    int n)
4305cfb1782SBart Van Assche {
4315cfb1782SBart Van Assche 	unsigned long flags;
4325cfb1782SBart Van Assche 	int i;
4335cfb1782SBart Van Assche 
4345cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4355cfb1782SBart Van Assche 	for (i = 0; i < n; i++)
4365cfb1782SBart Van Assche 		list_add(&desc[i]->entry, &pool->free_list);
4375cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4385cfb1782SBart Van Assche }
4395cfb1782SBart Van Assche 
4405cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
4415cfb1782SBart Van Assche {
4425cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
4435cfb1782SBart Van Assche 
444fa9863f8SBart Van Assche 	return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
4455cfb1782SBart Van Assche 				  dev->max_pages_per_mr);
4465cfb1782SBart Van Assche }
4475cfb1782SBart Van Assche 
4487dad6b2eSBart Van Assche /**
4497dad6b2eSBart Van Assche  * srp_destroy_qp() - destroy an RDMA queue pair
450*f83b2561SBart Van Assche  * @qp: RDMA queue pair.
4517dad6b2eSBart Van Assche  *
452561392d4SSteve Wise  * Drain the qp before destroying it.  This avoids that the receive
453561392d4SSteve Wise  * completion handler can access the queue pair while it is
4547dad6b2eSBart Van Assche  * being destroyed.
4557dad6b2eSBart Van Assche  */
456*f83b2561SBart Van Assche static void srp_destroy_qp(struct ib_qp *qp)
4577dad6b2eSBart Van Assche {
458*f83b2561SBart Van Assche 	ib_drain_rq(qp);
459*f83b2561SBart Van Assche 	ib_destroy_qp(qp);
4607dad6b2eSBart Van Assche }
4617dad6b2eSBart Van Assche 
462509c07bcSBart Van Assche static int srp_create_ch_ib(struct srp_rdma_ch *ch)
463aef9ec39SRoland Dreier {
464509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
46562154b2eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
466aef9ec39SRoland Dreier 	struct ib_qp_init_attr *init_attr;
46773aa89edSIshai Rabinovitz 	struct ib_cq *recv_cq, *send_cq;
46873aa89edSIshai Rabinovitz 	struct ib_qp *qp;
469d1b4289eSBart Van Assche 	struct ib_fmr_pool *fmr_pool = NULL;
4705cfb1782SBart Van Assche 	struct srp_fr_pool *fr_pool = NULL;
47109c0c0beSSagi Grimberg 	const int m = dev->use_fast_reg ? 3 : 1;
472aef9ec39SRoland Dreier 	int ret;
473aef9ec39SRoland Dreier 
474aef9ec39SRoland Dreier 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
475aef9ec39SRoland Dreier 	if (!init_attr)
476aef9ec39SRoland Dreier 		return -ENOMEM;
477aef9ec39SRoland Dreier 
478561392d4SSteve Wise 	/* queue_size + 1 for ib_drain_rq() */
4791dc7b1f1SChristoph Hellwig 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
4801dc7b1f1SChristoph Hellwig 				ch->comp_vector, IB_POLL_SOFTIRQ);
48173aa89edSIshai Rabinovitz 	if (IS_ERR(recv_cq)) {
48273aa89edSIshai Rabinovitz 		ret = PTR_ERR(recv_cq);
483da9d2f07SRoland Dreier 		goto err;
484aef9ec39SRoland Dreier 	}
485aef9ec39SRoland Dreier 
4861dc7b1f1SChristoph Hellwig 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
4871dc7b1f1SChristoph Hellwig 				ch->comp_vector, IB_POLL_DIRECT);
48873aa89edSIshai Rabinovitz 	if (IS_ERR(send_cq)) {
48973aa89edSIshai Rabinovitz 		ret = PTR_ERR(send_cq);
490da9d2f07SRoland Dreier 		goto err_recv_cq;
4919c03dc9fSBart Van Assche 	}
4929c03dc9fSBart Van Assche 
493aef9ec39SRoland Dreier 	init_attr->event_handler       = srp_qp_event;
4945cfb1782SBart Van Assche 	init_attr->cap.max_send_wr     = m * target->queue_size;
4957dad6b2eSBart Van Assche 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
496aef9ec39SRoland Dreier 	init_attr->cap.max_recv_sge    = 1;
497aef9ec39SRoland Dreier 	init_attr->cap.max_send_sge    = 1;
4985cfb1782SBart Van Assche 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
499aef9ec39SRoland Dreier 	init_attr->qp_type             = IB_QPT_RC;
50073aa89edSIshai Rabinovitz 	init_attr->send_cq             = send_cq;
50173aa89edSIshai Rabinovitz 	init_attr->recv_cq             = recv_cq;
502aef9ec39SRoland Dreier 
50362154b2eSBart Van Assche 	qp = ib_create_qp(dev->pd, init_attr);
50473aa89edSIshai Rabinovitz 	if (IS_ERR(qp)) {
50573aa89edSIshai Rabinovitz 		ret = PTR_ERR(qp);
506da9d2f07SRoland Dreier 		goto err_send_cq;
507aef9ec39SRoland Dreier 	}
508aef9ec39SRoland Dreier 
50973aa89edSIshai Rabinovitz 	ret = srp_init_qp(target, qp);
510da9d2f07SRoland Dreier 	if (ret)
511da9d2f07SRoland Dreier 		goto err_qp;
512aef9ec39SRoland Dreier 
513002f1567SBart Van Assche 	if (dev->use_fast_reg) {
5145cfb1782SBart Van Assche 		fr_pool = srp_alloc_fr_pool(target);
5155cfb1782SBart Van Assche 		if (IS_ERR(fr_pool)) {
5165cfb1782SBart Van Assche 			ret = PTR_ERR(fr_pool);
5175cfb1782SBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
5185cfb1782SBart Van Assche 				     "FR pool allocation failed (%d)\n", ret);
5195cfb1782SBart Van Assche 			goto err_qp;
5205cfb1782SBart Van Assche 		}
521002f1567SBart Van Assche 	} else if (dev->use_fmr) {
522d1b4289eSBart Van Assche 		fmr_pool = srp_alloc_fmr_pool(target);
523d1b4289eSBart Van Assche 		if (IS_ERR(fmr_pool)) {
524d1b4289eSBart Van Assche 			ret = PTR_ERR(fmr_pool);
525d1b4289eSBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
526d1b4289eSBart Van Assche 				     "FMR pool allocation failed (%d)\n", ret);
527d1b4289eSBart Van Assche 			goto err_qp;
528d1b4289eSBart Van Assche 		}
529d1b4289eSBart Van Assche 	}
530d1b4289eSBart Van Assche 
531509c07bcSBart Van Assche 	if (ch->qp)
532*f83b2561SBart Van Assche 		srp_destroy_qp(ch->qp);
533509c07bcSBart Van Assche 	if (ch->recv_cq)
5341dc7b1f1SChristoph Hellwig 		ib_free_cq(ch->recv_cq);
535509c07bcSBart Van Assche 	if (ch->send_cq)
5361dc7b1f1SChristoph Hellwig 		ib_free_cq(ch->send_cq);
53773aa89edSIshai Rabinovitz 
538509c07bcSBart Van Assche 	ch->qp = qp;
539509c07bcSBart Van Assche 	ch->recv_cq = recv_cq;
540509c07bcSBart Van Assche 	ch->send_cq = send_cq;
54173aa89edSIshai Rabinovitz 
5427fbc67dfSSagi Grimberg 	if (dev->use_fast_reg) {
5437fbc67dfSSagi Grimberg 		if (ch->fr_pool)
5447fbc67dfSSagi Grimberg 			srp_destroy_fr_pool(ch->fr_pool);
5457fbc67dfSSagi Grimberg 		ch->fr_pool = fr_pool;
5467fbc67dfSSagi Grimberg 	} else if (dev->use_fmr) {
5477fbc67dfSSagi Grimberg 		if (ch->fmr_pool)
5487fbc67dfSSagi Grimberg 			ib_destroy_fmr_pool(ch->fmr_pool);
5497fbc67dfSSagi Grimberg 		ch->fmr_pool = fmr_pool;
5507fbc67dfSSagi Grimberg 	}
5517fbc67dfSSagi Grimberg 
552da9d2f07SRoland Dreier 	kfree(init_attr);
553da9d2f07SRoland Dreier 	return 0;
554da9d2f07SRoland Dreier 
555da9d2f07SRoland Dreier err_qp:
556*f83b2561SBart Van Assche 	srp_destroy_qp(qp);
557da9d2f07SRoland Dreier 
558da9d2f07SRoland Dreier err_send_cq:
5591dc7b1f1SChristoph Hellwig 	ib_free_cq(send_cq);
560da9d2f07SRoland Dreier 
561da9d2f07SRoland Dreier err_recv_cq:
5621dc7b1f1SChristoph Hellwig 	ib_free_cq(recv_cq);
563da9d2f07SRoland Dreier 
564da9d2f07SRoland Dreier err:
565aef9ec39SRoland Dreier 	kfree(init_attr);
566aef9ec39SRoland Dreier 	return ret;
567aef9ec39SRoland Dreier }
568aef9ec39SRoland Dreier 
5694d73f95fSBart Van Assche /*
5704d73f95fSBart Van Assche  * Note: this function may be called without srp_alloc_iu_bufs() having been
571509c07bcSBart Van Assche  * invoked. Hence the ch->[rt]x_ring checks.
5724d73f95fSBart Van Assche  */
573509c07bcSBart Van Assche static void srp_free_ch_ib(struct srp_target_port *target,
574509c07bcSBart Van Assche 			   struct srp_rdma_ch *ch)
575aef9ec39SRoland Dreier {
5765cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
577aef9ec39SRoland Dreier 	int i;
578aef9ec39SRoland Dreier 
579d92c0da7SBart Van Assche 	if (!ch->target)
580d92c0da7SBart Van Assche 		return;
581d92c0da7SBart Van Assche 
582509c07bcSBart Van Assche 	if (ch->cm_id) {
583509c07bcSBart Van Assche 		ib_destroy_cm_id(ch->cm_id);
584509c07bcSBart Van Assche 		ch->cm_id = NULL;
585394c595eSBart Van Assche 	}
586394c595eSBart Van Assche 
587d92c0da7SBart Van Assche 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
588d92c0da7SBart Van Assche 	if (!ch->qp)
589d92c0da7SBart Van Assche 		return;
590d92c0da7SBart Van Assche 
5915cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
592509c07bcSBart Van Assche 		if (ch->fr_pool)
593509c07bcSBart Van Assche 			srp_destroy_fr_pool(ch->fr_pool);
594002f1567SBart Van Assche 	} else if (dev->use_fmr) {
595509c07bcSBart Van Assche 		if (ch->fmr_pool)
596509c07bcSBart Van Assche 			ib_destroy_fmr_pool(ch->fmr_pool);
5975cfb1782SBart Van Assche 	}
5981dc7b1f1SChristoph Hellwig 
599*f83b2561SBart Van Assche 	srp_destroy_qp(ch->qp);
6001dc7b1f1SChristoph Hellwig 	ib_free_cq(ch->send_cq);
6011dc7b1f1SChristoph Hellwig 	ib_free_cq(ch->recv_cq);
602aef9ec39SRoland Dreier 
603d92c0da7SBart Van Assche 	/*
604d92c0da7SBart Van Assche 	 * Avoid that the SCSI error handler tries to use this channel after
605d92c0da7SBart Van Assche 	 * it has been freed. The SCSI error handler can namely continue
606d92c0da7SBart Van Assche 	 * trying to perform recovery actions after scsi_remove_host()
607d92c0da7SBart Van Assche 	 * returned.
608d92c0da7SBart Van Assche 	 */
609d92c0da7SBart Van Assche 	ch->target = NULL;
610d92c0da7SBart Van Assche 
611509c07bcSBart Van Assche 	ch->qp = NULL;
612509c07bcSBart Van Assche 	ch->send_cq = ch->recv_cq = NULL;
61373aa89edSIshai Rabinovitz 
614509c07bcSBart Van Assche 	if (ch->rx_ring) {
6154d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
616509c07bcSBart Van Assche 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
617509c07bcSBart Van Assche 		kfree(ch->rx_ring);
618509c07bcSBart Van Assche 		ch->rx_ring = NULL;
6194d73f95fSBart Van Assche 	}
620509c07bcSBart Van Assche 	if (ch->tx_ring) {
6214d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
622509c07bcSBart Van Assche 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
623509c07bcSBart Van Assche 		kfree(ch->tx_ring);
624509c07bcSBart Van Assche 		ch->tx_ring = NULL;
6254d73f95fSBart Van Assche 	}
626aef9ec39SRoland Dreier }
627aef9ec39SRoland Dreier 
628aef9ec39SRoland Dreier static void srp_path_rec_completion(int status,
629aef9ec39SRoland Dreier 				    struct ib_sa_path_rec *pathrec,
630509c07bcSBart Van Assche 				    void *ch_ptr)
631aef9ec39SRoland Dreier {
632509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = ch_ptr;
633509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
634aef9ec39SRoland Dreier 
635509c07bcSBart Van Assche 	ch->status = status;
636aef9ec39SRoland Dreier 	if (status)
6377aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
6387aa54bd7SDavid Dillow 			     PFX "Got failed path rec status %d\n", status);
639aef9ec39SRoland Dreier 	else
640509c07bcSBart Van Assche 		ch->path = *pathrec;
641509c07bcSBart Van Assche 	complete(&ch->done);
642aef9ec39SRoland Dreier }
643aef9ec39SRoland Dreier 
644509c07bcSBart Van Assche static int srp_lookup_path(struct srp_rdma_ch *ch)
645aef9ec39SRoland Dreier {
646509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
647a702adceSBart Van Assche 	int ret;
648a702adceSBart Van Assche 
649509c07bcSBart Van Assche 	ch->path.numb_path = 1;
650aef9ec39SRoland Dreier 
651509c07bcSBart Van Assche 	init_completion(&ch->done);
652aef9ec39SRoland Dreier 
653509c07bcSBart Van Assche 	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
65405321937SGreg Kroah-Hartman 					       target->srp_host->srp_dev->dev,
655aef9ec39SRoland Dreier 					       target->srp_host->port,
656509c07bcSBart Van Assche 					       &ch->path,
657247e020eSSean Hefty 					       IB_SA_PATH_REC_SERVICE_ID |
658aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_DGID	 |
659aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_SGID	 |
660aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_NUMB_PATH	 |
661aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_PKEY,
662aef9ec39SRoland Dreier 					       SRP_PATH_REC_TIMEOUT_MS,
663aef9ec39SRoland Dreier 					       GFP_KERNEL,
664aef9ec39SRoland Dreier 					       srp_path_rec_completion,
665509c07bcSBart Van Assche 					       ch, &ch->path_query);
666509c07bcSBart Van Assche 	if (ch->path_query_id < 0)
667509c07bcSBart Van Assche 		return ch->path_query_id;
668aef9ec39SRoland Dreier 
669509c07bcSBart Van Assche 	ret = wait_for_completion_interruptible(&ch->done);
670a702adceSBart Van Assche 	if (ret < 0)
671a702adceSBart Van Assche 		return ret;
672aef9ec39SRoland Dreier 
673509c07bcSBart Van Assche 	if (ch->status < 0)
6747aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
6757aa54bd7SDavid Dillow 			     PFX "Path record query failed\n");
676aef9ec39SRoland Dreier 
677509c07bcSBart Van Assche 	return ch->status;
678aef9ec39SRoland Dreier }
679aef9ec39SRoland Dreier 
680d92c0da7SBart Van Assche static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
681aef9ec39SRoland Dreier {
682509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
683aef9ec39SRoland Dreier 	struct {
684aef9ec39SRoland Dreier 		struct ib_cm_req_param param;
685aef9ec39SRoland Dreier 		struct srp_login_req   priv;
686aef9ec39SRoland Dreier 	} *req = NULL;
687aef9ec39SRoland Dreier 	int status;
688aef9ec39SRoland Dreier 
689aef9ec39SRoland Dreier 	req = kzalloc(sizeof *req, GFP_KERNEL);
690aef9ec39SRoland Dreier 	if (!req)
691aef9ec39SRoland Dreier 		return -ENOMEM;
692aef9ec39SRoland Dreier 
693509c07bcSBart Van Assche 	req->param.primary_path		      = &ch->path;
694aef9ec39SRoland Dreier 	req->param.alternate_path 	      = NULL;
695aef9ec39SRoland Dreier 	req->param.service_id 		      = target->service_id;
696509c07bcSBart Van Assche 	req->param.qp_num		      = ch->qp->qp_num;
697509c07bcSBart Van Assche 	req->param.qp_type		      = ch->qp->qp_type;
698aef9ec39SRoland Dreier 	req->param.private_data 	      = &req->priv;
699aef9ec39SRoland Dreier 	req->param.private_data_len 	      = sizeof req->priv;
700aef9ec39SRoland Dreier 	req->param.flow_control 	      = 1;
701aef9ec39SRoland Dreier 
702aef9ec39SRoland Dreier 	get_random_bytes(&req->param.starting_psn, 4);
703aef9ec39SRoland Dreier 	req->param.starting_psn 	     &= 0xffffff;
704aef9ec39SRoland Dreier 
705aef9ec39SRoland Dreier 	/*
706aef9ec39SRoland Dreier 	 * Pick some arbitrary defaults here; we could make these
707aef9ec39SRoland Dreier 	 * module parameters if anyone cared about setting them.
708aef9ec39SRoland Dreier 	 */
709aef9ec39SRoland Dreier 	req->param.responder_resources	      = 4;
710aef9ec39SRoland Dreier 	req->param.remote_cm_response_timeout = 20;
711aef9ec39SRoland Dreier 	req->param.local_cm_response_timeout  = 20;
7127bb312e4SVu Pham 	req->param.retry_count                = target->tl_retry_count;
713aef9ec39SRoland Dreier 	req->param.rnr_retry_count 	      = 7;
714aef9ec39SRoland Dreier 	req->param.max_cm_retries 	      = 15;
715aef9ec39SRoland Dreier 
716aef9ec39SRoland Dreier 	req->priv.opcode     	= SRP_LOGIN_REQ;
717aef9ec39SRoland Dreier 	req->priv.tag        	= 0;
71849248644SDavid Dillow 	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
719aef9ec39SRoland Dreier 	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
720aef9ec39SRoland Dreier 					      SRP_BUF_FORMAT_INDIRECT);
721d92c0da7SBart Van Assche 	req->priv.req_flags	= (multich ? SRP_MULTICHAN_MULTI :
722d92c0da7SBart Van Assche 				   SRP_MULTICHAN_SINGLE);
7230c0450dbSRamachandra K 	/*
7240c0450dbSRamachandra K 	 * In the published SRP specification (draft rev. 16a), the
7250c0450dbSRamachandra K 	 * port identifier format is 8 bytes of ID extension followed
7260c0450dbSRamachandra K 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
7270c0450dbSRamachandra K 	 * opposite order, so that the GUID comes first.
7280c0450dbSRamachandra K 	 *
7290c0450dbSRamachandra K 	 * Targets conforming to these obsolete drafts can be
7300c0450dbSRamachandra K 	 * recognized by the I/O Class they report.
7310c0450dbSRamachandra K 	 */
7320c0450dbSRamachandra K 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
7330c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
734747fe000SBart Van Assche 		       &target->sgid.global.interface_id, 8);
7350c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id + 8,
73601cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
7370c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
7380c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
7390c0450dbSRamachandra K 	} else {
7400c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
74101cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
74201cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
743747fe000SBart Van Assche 		       &target->sgid.global.interface_id, 8);
7440c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
7450c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
7460c0450dbSRamachandra K 	}
7470c0450dbSRamachandra K 
748aef9ec39SRoland Dreier 	/*
749aef9ec39SRoland Dreier 	 * Topspin/Cisco SRP targets will reject our login unless we
75001cb9bcbSIshai Rabinovitz 	 * zero out the first 8 bytes of our initiator port ID and set
75101cb9bcbSIshai Rabinovitz 	 * the second 8 bytes to the local node GUID.
752aef9ec39SRoland Dreier 	 */
7535d7cbfd6SRoland Dreier 	if (srp_target_is_topspin(target)) {
7547aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
7557aa54bd7SDavid Dillow 			     PFX "Topspin/Cisco initiator port ID workaround "
756aef9ec39SRoland Dreier 			     "activated for target GUID %016llx\n",
75745c37cadSBart Van Assche 			     be64_to_cpu(target->ioc_guid));
758aef9ec39SRoland Dreier 		memset(req->priv.initiator_port_id, 0, 8);
75901cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
76005321937SGreg Kroah-Hartman 		       &target->srp_host->srp_dev->dev->node_guid, 8);
761aef9ec39SRoland Dreier 	}
762aef9ec39SRoland Dreier 
763509c07bcSBart Van Assche 	status = ib_send_cm_req(ch->cm_id, &req->param);
764aef9ec39SRoland Dreier 
765aef9ec39SRoland Dreier 	kfree(req);
766aef9ec39SRoland Dreier 
767aef9ec39SRoland Dreier 	return status;
768aef9ec39SRoland Dreier }
769aef9ec39SRoland Dreier 
770ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target)
771ef6c49d8SBart Van Assche {
772ef6c49d8SBart Van Assche 	bool changed = false;
773ef6c49d8SBart Van Assche 
774ef6c49d8SBart Van Assche 	spin_lock_irq(&target->lock);
775ef6c49d8SBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
776ef6c49d8SBart Van Assche 		target->state = SRP_TARGET_REMOVED;
777ef6c49d8SBart Van Assche 		changed = true;
778ef6c49d8SBart Van Assche 	}
779ef6c49d8SBart Van Assche 	spin_unlock_irq(&target->lock);
780ef6c49d8SBart Van Assche 
781ef6c49d8SBart Van Assche 	if (changed)
782bcc05910SBart Van Assche 		queue_work(srp_remove_wq, &target->remove_work);
783ef6c49d8SBart Van Assche 
784ef6c49d8SBart Van Assche 	return changed;
785ef6c49d8SBart Van Assche }
786ef6c49d8SBart Van Assche 
787aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target)
788aef9ec39SRoland Dreier {
789d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
790d92c0da7SBart Van Assche 	int i;
791509c07bcSBart Van Assche 
792aef9ec39SRoland Dreier 	/* XXX should send SRP_I_LOGOUT request */
793aef9ec39SRoland Dreier 
794d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
795d92c0da7SBart Van Assche 		ch = &target->ch[i];
796c014c8cdSBart Van Assche 		ch->connected = false;
797d92c0da7SBart Van Assche 		if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
7987aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, target->scsi_host,
7997aa54bd7SDavid Dillow 				     PFX "Sending CM DREQ failed\n");
800aef9ec39SRoland Dreier 		}
801294c875aSBart Van Assche 	}
802294c875aSBart Van Assche }
803aef9ec39SRoland Dreier 
804509c07bcSBart Van Assche static void srp_free_req_data(struct srp_target_port *target,
805509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
8068f26c9ffSDavid Dillow {
8075cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
8085cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
8098f26c9ffSDavid Dillow 	struct srp_request *req;
8108f26c9ffSDavid Dillow 	int i;
8118f26c9ffSDavid Dillow 
81247513cf4SBart Van Assche 	if (!ch->req_ring)
8134d73f95fSBart Van Assche 		return;
8144d73f95fSBart Van Assche 
8154d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
816509c07bcSBart Van Assche 		req = &ch->req_ring[i];
8179a21be53SSagi Grimberg 		if (dev->use_fast_reg) {
8185cfb1782SBart Van Assche 			kfree(req->fr_list);
8199a21be53SSagi Grimberg 		} else {
8208f26c9ffSDavid Dillow 			kfree(req->fmr_list);
8218f26c9ffSDavid Dillow 			kfree(req->map_page);
8229a21be53SSagi Grimberg 		}
823c07d424dSDavid Dillow 		if (req->indirect_dma_addr) {
824c07d424dSDavid Dillow 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
825c07d424dSDavid Dillow 					    target->indirect_size,
826c07d424dSDavid Dillow 					    DMA_TO_DEVICE);
827c07d424dSDavid Dillow 		}
828c07d424dSDavid Dillow 		kfree(req->indirect_desc);
8298f26c9ffSDavid Dillow 	}
8304d73f95fSBart Van Assche 
831509c07bcSBart Van Assche 	kfree(ch->req_ring);
832509c07bcSBart Van Assche 	ch->req_ring = NULL;
8338f26c9ffSDavid Dillow }
8348f26c9ffSDavid Dillow 
835509c07bcSBart Van Assche static int srp_alloc_req_data(struct srp_rdma_ch *ch)
836b81d00bdSBart Van Assche {
837509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
838b81d00bdSBart Van Assche 	struct srp_device *srp_dev = target->srp_host->srp_dev;
839b81d00bdSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
840b81d00bdSBart Van Assche 	struct srp_request *req;
8415cfb1782SBart Van Assche 	void *mr_list;
842b81d00bdSBart Van Assche 	dma_addr_t dma_addr;
843b81d00bdSBart Van Assche 	int i, ret = -ENOMEM;
844b81d00bdSBart Van Assche 
845509c07bcSBart Van Assche 	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
846509c07bcSBart Van Assche 			       GFP_KERNEL);
847509c07bcSBart Van Assche 	if (!ch->req_ring)
8484d73f95fSBart Van Assche 		goto out;
8494d73f95fSBart Van Assche 
8504d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
851509c07bcSBart Van Assche 		req = &ch->req_ring[i];
8525cfb1782SBart Van Assche 		mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
853b81d00bdSBart Van Assche 				  GFP_KERNEL);
8545cfb1782SBart Van Assche 		if (!mr_list)
8555cfb1782SBart Van Assche 			goto out;
8569a21be53SSagi Grimberg 		if (srp_dev->use_fast_reg) {
8575cfb1782SBart Van Assche 			req->fr_list = mr_list;
8589a21be53SSagi Grimberg 		} else {
8595cfb1782SBart Van Assche 			req->fmr_list = mr_list;
86052ede08fSBart Van Assche 			req->map_page = kmalloc(srp_dev->max_pages_per_mr *
861d1b4289eSBart Van Assche 						sizeof(void *), GFP_KERNEL);
8625cfb1782SBart Van Assche 			if (!req->map_page)
8635cfb1782SBart Van Assche 				goto out;
8649a21be53SSagi Grimberg 		}
865b81d00bdSBart Van Assche 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
8665cfb1782SBart Van Assche 		if (!req->indirect_desc)
867b81d00bdSBart Van Assche 			goto out;
868b81d00bdSBart Van Assche 
869b81d00bdSBart Van Assche 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
870b81d00bdSBart Van Assche 					     target->indirect_size,
871b81d00bdSBart Van Assche 					     DMA_TO_DEVICE);
872b81d00bdSBart Van Assche 		if (ib_dma_mapping_error(ibdev, dma_addr))
873b81d00bdSBart Van Assche 			goto out;
874b81d00bdSBart Van Assche 
875b81d00bdSBart Van Assche 		req->indirect_dma_addr = dma_addr;
876b81d00bdSBart Van Assche 	}
877b81d00bdSBart Van Assche 	ret = 0;
878b81d00bdSBart Van Assche 
879b81d00bdSBart Van Assche out:
880b81d00bdSBart Van Assche 	return ret;
881b81d00bdSBart Van Assche }
882b81d00bdSBart Van Assche 
883683b159aSBart Van Assche /**
884683b159aSBart Van Assche  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
885683b159aSBart Van Assche  * @shost: SCSI host whose attributes to remove from sysfs.
886683b159aSBart Van Assche  *
887683b159aSBart Van Assche  * Note: Any attributes defined in the host template and that did not exist
888683b159aSBart Van Assche  * before invocation of this function will be ignored.
889683b159aSBart Van Assche  */
890683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
891683b159aSBart Van Assche {
892683b159aSBart Van Assche 	struct device_attribute **attr;
893683b159aSBart Van Assche 
894683b159aSBart Van Assche 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
895683b159aSBart Van Assche 		device_remove_file(&shost->shost_dev, *attr);
896683b159aSBart Van Assche }
897683b159aSBart Van Assche 
898ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target)
899ee12d6a8SBart Van Assche {
900d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
901d92c0da7SBart Van Assche 	int i;
902509c07bcSBart Van Assche 
903ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
904ef6c49d8SBart Van Assche 
905ee12d6a8SBart Van Assche 	srp_del_scsi_host_attr(target->scsi_host);
9069dd69a60SBart Van Assche 	srp_rport_get(target->rport);
907ee12d6a8SBart Van Assche 	srp_remove_host(target->scsi_host);
908ee12d6a8SBart Van Assche 	scsi_remove_host(target->scsi_host);
90993079162SBart Van Assche 	srp_stop_rport_timers(target->rport);
910ef6c49d8SBart Van Assche 	srp_disconnect_target(target);
911d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
912d92c0da7SBart Van Assche 		ch = &target->ch[i];
913509c07bcSBart Van Assche 		srp_free_ch_ib(target, ch);
914d92c0da7SBart Van Assche 	}
915c1120f89SBart Van Assche 	cancel_work_sync(&target->tl_err_work);
9169dd69a60SBart Van Assche 	srp_rport_put(target->rport);
917d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
918d92c0da7SBart Van Assche 		ch = &target->ch[i];
919509c07bcSBart Van Assche 		srp_free_req_data(target, ch);
920d92c0da7SBart Van Assche 	}
921d92c0da7SBart Van Assche 	kfree(target->ch);
922d92c0da7SBart Van Assche 	target->ch = NULL;
92365d7dd2fSVu Pham 
92465d7dd2fSVu Pham 	spin_lock(&target->srp_host->target_lock);
92565d7dd2fSVu Pham 	list_del(&target->list);
92665d7dd2fSVu Pham 	spin_unlock(&target->srp_host->target_lock);
92765d7dd2fSVu Pham 
928ee12d6a8SBart Van Assche 	scsi_host_put(target->scsi_host);
929ee12d6a8SBart Van Assche }
930ee12d6a8SBart Van Assche 
931c4028958SDavid Howells static void srp_remove_work(struct work_struct *work)
932aef9ec39SRoland Dreier {
933c4028958SDavid Howells 	struct srp_target_port *target =
934ef6c49d8SBart Van Assche 		container_of(work, struct srp_target_port, remove_work);
935aef9ec39SRoland Dreier 
936ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
937aef9ec39SRoland Dreier 
93896fc248aSBart Van Assche 	srp_remove_target(target);
939aef9ec39SRoland Dreier }
940aef9ec39SRoland Dreier 
941dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport)
942dc1bdbd9SBart Van Assche {
943dc1bdbd9SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
944dc1bdbd9SBart Van Assche 
945dc1bdbd9SBart Van Assche 	srp_queue_remove_work(target);
946dc1bdbd9SBart Van Assche }
947dc1bdbd9SBart Van Assche 
948c014c8cdSBart Van Assche /**
949c014c8cdSBart Van Assche  * srp_connected_ch() - number of connected channels
950c014c8cdSBart Van Assche  * @target: SRP target port.
951c014c8cdSBart Van Assche  */
952c014c8cdSBart Van Assche static int srp_connected_ch(struct srp_target_port *target)
953c014c8cdSBart Van Assche {
954c014c8cdSBart Van Assche 	int i, c = 0;
955c014c8cdSBart Van Assche 
956c014c8cdSBart Van Assche 	for (i = 0; i < target->ch_count; i++)
957c014c8cdSBart Van Assche 		c += target->ch[i].connected;
958c014c8cdSBart Van Assche 
959c014c8cdSBart Van Assche 	return c;
960c014c8cdSBart Van Assche }
961c014c8cdSBart Van Assche 
962d92c0da7SBart Van Assche static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
963aef9ec39SRoland Dreier {
964509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
965aef9ec39SRoland Dreier 	int ret;
966aef9ec39SRoland Dreier 
967c014c8cdSBart Van Assche 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
968294c875aSBart Van Assche 
969509c07bcSBart Van Assche 	ret = srp_lookup_path(ch);
970aef9ec39SRoland Dreier 	if (ret)
9714d59ad29SBart Van Assche 		goto out;
972aef9ec39SRoland Dreier 
973aef9ec39SRoland Dreier 	while (1) {
974509c07bcSBart Van Assche 		init_completion(&ch->done);
975d92c0da7SBart Van Assche 		ret = srp_send_req(ch, multich);
976aef9ec39SRoland Dreier 		if (ret)
9774d59ad29SBart Van Assche 			goto out;
978509c07bcSBart Van Assche 		ret = wait_for_completion_interruptible(&ch->done);
979a702adceSBart Van Assche 		if (ret < 0)
9804d59ad29SBart Van Assche 			goto out;
981aef9ec39SRoland Dreier 
982aef9ec39SRoland Dreier 		/*
983aef9ec39SRoland Dreier 		 * The CM event handling code will set status to
984aef9ec39SRoland Dreier 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
985aef9ec39SRoland Dreier 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
986aef9ec39SRoland Dreier 		 * redirect REJ back.
987aef9ec39SRoland Dreier 		 */
9884d59ad29SBart Van Assche 		ret = ch->status;
9894d59ad29SBart Van Assche 		switch (ret) {
990aef9ec39SRoland Dreier 		case 0:
991c014c8cdSBart Van Assche 			ch->connected = true;
9924d59ad29SBart Van Assche 			goto out;
993aef9ec39SRoland Dreier 
994aef9ec39SRoland Dreier 		case SRP_PORT_REDIRECT:
995509c07bcSBart Van Assche 			ret = srp_lookup_path(ch);
996aef9ec39SRoland Dreier 			if (ret)
9974d59ad29SBart Van Assche 				goto out;
998aef9ec39SRoland Dreier 			break;
999aef9ec39SRoland Dreier 
1000aef9ec39SRoland Dreier 		case SRP_DLID_REDIRECT:
1001aef9ec39SRoland Dreier 			break;
1002aef9ec39SRoland Dreier 
10039fe4bcf4SDavid Dillow 		case SRP_STALE_CONN:
10049fe4bcf4SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host, PFX
10059fe4bcf4SDavid Dillow 				     "giving up on stale connection\n");
10064d59ad29SBart Van Assche 			ret = -ECONNRESET;
10074d59ad29SBart Van Assche 			goto out;
10089fe4bcf4SDavid Dillow 
1009aef9ec39SRoland Dreier 		default:
10104d59ad29SBart Van Assche 			goto out;
1011aef9ec39SRoland Dreier 		}
1012aef9ec39SRoland Dreier 	}
10134d59ad29SBart Van Assche 
10144d59ad29SBart Van Assche out:
10154d59ad29SBart Van Assche 	return ret <= 0 ? ret : -ENODEV;
1016aef9ec39SRoland Dreier }
1017aef9ec39SRoland Dreier 
10181dc7b1f1SChristoph Hellwig static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
10191dc7b1f1SChristoph Hellwig {
10201dc7b1f1SChristoph Hellwig 	srp_handle_qp_err(cq, wc, "INV RKEY");
10211dc7b1f1SChristoph Hellwig }
10221dc7b1f1SChristoph Hellwig 
10231dc7b1f1SChristoph Hellwig static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
10241dc7b1f1SChristoph Hellwig 		u32 rkey)
10255cfb1782SBart Van Assche {
10265cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
10275cfb1782SBart Van Assche 	struct ib_send_wr wr = {
10285cfb1782SBart Van Assche 		.opcode		    = IB_WR_LOCAL_INV,
10295cfb1782SBart Van Assche 		.next		    = NULL,
10305cfb1782SBart Van Assche 		.num_sge	    = 0,
10315cfb1782SBart Van Assche 		.send_flags	    = 0,
10325cfb1782SBart Van Assche 		.ex.invalidate_rkey = rkey,
10335cfb1782SBart Van Assche 	};
10345cfb1782SBart Van Assche 
10351dc7b1f1SChristoph Hellwig 	wr.wr_cqe = &req->reg_cqe;
10361dc7b1f1SChristoph Hellwig 	req->reg_cqe.done = srp_inv_rkey_err_done;
1037509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
10385cfb1782SBart Van Assche }
10395cfb1782SBart Van Assche 
1040d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd,
1041509c07bcSBart Van Assche 			   struct srp_rdma_ch *ch,
1042d945e1dfSRoland Dreier 			   struct srp_request *req)
1043d945e1dfSRoland Dreier {
1044509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
10455cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
10465cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
10475cfb1782SBart Van Assche 	int i, res;
10488f26c9ffSDavid Dillow 
1049bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) ||
1050d945e1dfSRoland Dreier 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1051d945e1dfSRoland Dreier 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1052d945e1dfSRoland Dreier 		return;
1053d945e1dfSRoland Dreier 
10545cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
10555cfb1782SBart Van Assche 		struct srp_fr_desc **pfr;
10565cfb1782SBart Van Assche 
10575cfb1782SBart Van Assche 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
10581dc7b1f1SChristoph Hellwig 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
10595cfb1782SBart Van Assche 			if (res < 0) {
10605cfb1782SBart Van Assche 				shost_printk(KERN_ERR, target->scsi_host, PFX
10615cfb1782SBart Van Assche 				  "Queueing INV WR for rkey %#x failed (%d)\n",
10625cfb1782SBart Van Assche 				  (*pfr)->mr->rkey, res);
10635cfb1782SBart Van Assche 				queue_work(system_long_wq,
10645cfb1782SBart Van Assche 					   &target->tl_err_work);
10655cfb1782SBart Van Assche 			}
10665cfb1782SBart Van Assche 		}
10675cfb1782SBart Van Assche 		if (req->nmdesc)
1068509c07bcSBart Van Assche 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
10695cfb1782SBart Van Assche 					req->nmdesc);
1070002f1567SBart Van Assche 	} else if (dev->use_fmr) {
10715cfb1782SBart Van Assche 		struct ib_pool_fmr **pfmr;
10725cfb1782SBart Van Assche 
10735cfb1782SBart Van Assche 		for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
10745cfb1782SBart Van Assche 			ib_fmr_pool_unmap(*pfmr);
10755cfb1782SBart Van Assche 	}
1076f5358a17SRoland Dreier 
10778f26c9ffSDavid Dillow 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
10788f26c9ffSDavid Dillow 			scmnd->sc_data_direction);
1079d945e1dfSRoland Dreier }
1080d945e1dfSRoland Dreier 
108122032991SBart Van Assche /**
108222032991SBart Van Assche  * srp_claim_req - Take ownership of the scmnd associated with a request.
1083509c07bcSBart Van Assche  * @ch: SRP RDMA channel.
108422032991SBart Van Assche  * @req: SRP request.
1085b3fe628dSBart Van Assche  * @sdev: If not NULL, only take ownership for this SCSI device.
108622032991SBart Van Assche  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
108722032991SBart Van Assche  *         ownership of @req->scmnd if it equals @scmnd.
108822032991SBart Van Assche  *
108922032991SBart Van Assche  * Return value:
109022032991SBart Van Assche  * Either NULL or a pointer to the SCSI command the caller became owner of.
109122032991SBart Van Assche  */
1092509c07bcSBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
109322032991SBart Van Assche 				       struct srp_request *req,
1094b3fe628dSBart Van Assche 				       struct scsi_device *sdev,
109522032991SBart Van Assche 				       struct scsi_cmnd *scmnd)
1096526b4caaSIshai Rabinovitz {
109794a9174cSBart Van Assche 	unsigned long flags;
109894a9174cSBart Van Assche 
1099509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1100b3fe628dSBart Van Assche 	if (req->scmnd &&
1101b3fe628dSBart Van Assche 	    (!sdev || req->scmnd->device == sdev) &&
1102b3fe628dSBart Van Assche 	    (!scmnd || req->scmnd == scmnd)) {
110322032991SBart Van Assche 		scmnd = req->scmnd;
110422032991SBart Van Assche 		req->scmnd = NULL;
110522032991SBart Van Assche 	} else {
110622032991SBart Van Assche 		scmnd = NULL;
110722032991SBart Van Assche 	}
1108509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
110922032991SBart Van Assche 
111022032991SBart Van Assche 	return scmnd;
111122032991SBart Van Assche }
111222032991SBart Van Assche 
111322032991SBart Van Assche /**
11146ec2ba02SBart Van Assche  * srp_free_req() - Unmap data and adjust ch->req_lim.
1115509c07bcSBart Van Assche  * @ch:     SRP RDMA channel.
1116af24663bSBart Van Assche  * @req:    Request to be freed.
1117af24663bSBart Van Assche  * @scmnd:  SCSI command associated with @req.
1118af24663bSBart Van Assche  * @req_lim_delta: Amount to be added to @target->req_lim.
111922032991SBart Van Assche  */
1120509c07bcSBart Van Assche static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1121509c07bcSBart Van Assche 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
112222032991SBart Van Assche {
112322032991SBart Van Assche 	unsigned long flags;
112422032991SBart Van Assche 
1125509c07bcSBart Van Assche 	srp_unmap_data(scmnd, ch, req);
112622032991SBart Van Assche 
1127509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1128509c07bcSBart Van Assche 	ch->req_lim += req_lim_delta;
1129509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
1130526b4caaSIshai Rabinovitz }
1131526b4caaSIshai Rabinovitz 
1132509c07bcSBart Van Assche static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1133509c07bcSBart Van Assche 			   struct scsi_device *sdev, int result)
1134526b4caaSIshai Rabinovitz {
1135509c07bcSBart Van Assche 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
113622032991SBart Van Assche 
113722032991SBart Van Assche 	if (scmnd) {
1138509c07bcSBart Van Assche 		srp_free_req(ch, req, scmnd, 0);
1139ed9b2264SBart Van Assche 		scmnd->result = result;
114022032991SBart Van Assche 		scmnd->scsi_done(scmnd);
114122032991SBart Van Assche 	}
1142526b4caaSIshai Rabinovitz }
1143526b4caaSIshai Rabinovitz 
1144ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport)
1145aef9ec39SRoland Dreier {
1146ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1147d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
1148b3fe628dSBart Van Assche 	struct Scsi_Host *shost = target->scsi_host;
1149b3fe628dSBart Van Assche 	struct scsi_device *sdev;
1150d92c0da7SBart Van Assche 	int i, j;
1151aef9ec39SRoland Dreier 
1152b3fe628dSBart Van Assche 	/*
1153b3fe628dSBart Van Assche 	 * Invoking srp_terminate_io() while srp_queuecommand() is running
1154b3fe628dSBart Van Assche 	 * is not safe. Hence the warning statement below.
1155b3fe628dSBart Van Assche 	 */
1156b3fe628dSBart Van Assche 	shost_for_each_device(sdev, shost)
1157b3fe628dSBart Van Assche 		WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1158b3fe628dSBart Van Assche 
1159d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1160d92c0da7SBart Van Assche 		ch = &target->ch[i];
1161509c07bcSBart Van Assche 
1162d92c0da7SBart Van Assche 		for (j = 0; j < target->req_ring_size; ++j) {
1163d92c0da7SBart Van Assche 			struct srp_request *req = &ch->req_ring[j];
1164d92c0da7SBart Van Assche 
1165d92c0da7SBart Van Assche 			srp_finish_req(ch, req, NULL,
1166d92c0da7SBart Van Assche 				       DID_TRANSPORT_FAILFAST << 16);
1167d92c0da7SBart Van Assche 		}
1168ed9b2264SBart Van Assche 	}
1169ed9b2264SBart Van Assche }
1170ed9b2264SBart Van Assche 
1171ed9b2264SBart Van Assche /*
1172ed9b2264SBart Van Assche  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1173ed9b2264SBart Van Assche  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1174ed9b2264SBart Van Assche  * srp_reset_device() or srp_reset_host() calls will occur while this function
1175ed9b2264SBart Van Assche  * is in progress. One way to realize that is not to call this function
1176ed9b2264SBart Van Assche  * directly but to call srp_reconnect_rport() instead since that last function
1177ed9b2264SBart Van Assche  * serializes calls of this function via rport->mutex and also blocks
1178ed9b2264SBart Van Assche  * srp_queuecommand() calls before invoking this function.
1179ed9b2264SBart Van Assche  */
1180ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport)
1181ed9b2264SBart Van Assche {
1182ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1183d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
1184d92c0da7SBart Van Assche 	int i, j, ret = 0;
1185d92c0da7SBart Van Assche 	bool multich = false;
118609be70a2SBart Van Assche 
1187aef9ec39SRoland Dreier 	srp_disconnect_target(target);
118834aa654eSBart Van Assche 
118934aa654eSBart Van Assche 	if (target->state == SRP_TARGET_SCANNING)
119034aa654eSBart Van Assche 		return -ENODEV;
119134aa654eSBart Van Assche 
1192aef9ec39SRoland Dreier 	/*
1193c7c4e7ffSBart Van Assche 	 * Now get a new local CM ID so that we avoid confusing the target in
1194c7c4e7ffSBart Van Assche 	 * case things are really fouled up. Doing so also ensures that all CM
1195c7c4e7ffSBart Van Assche 	 * callbacks will have finished before a new QP is allocated.
1196aef9ec39SRoland Dreier 	 */
1197d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1198d92c0da7SBart Van Assche 		ch = &target->ch[i];
1199d92c0da7SBart Van Assche 		ret += srp_new_cm_id(ch);
1200d92c0da7SBart Van Assche 	}
1201d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1202d92c0da7SBart Van Assche 		ch = &target->ch[i];
1203d92c0da7SBart Van Assche 		for (j = 0; j < target->req_ring_size; ++j) {
1204d92c0da7SBart Van Assche 			struct srp_request *req = &ch->req_ring[j];
1205509c07bcSBart Van Assche 
1206509c07bcSBart Van Assche 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
1207536ae14eSBart Van Assche 		}
1208d92c0da7SBart Van Assche 	}
1209d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1210d92c0da7SBart Van Assche 		ch = &target->ch[i];
12115cfb1782SBart Van Assche 		/*
12125cfb1782SBart Van Assche 		 * Whether or not creating a new CM ID succeeded, create a new
1213d92c0da7SBart Van Assche 		 * QP. This guarantees that all completion callback function
1214d92c0da7SBart Van Assche 		 * invocations have finished before request resetting starts.
12155cfb1782SBart Van Assche 		 */
1216509c07bcSBart Van Assche 		ret += srp_create_ch_ib(ch);
12175cfb1782SBart Van Assche 
1218509c07bcSBart Van Assche 		INIT_LIST_HEAD(&ch->free_tx);
1219d92c0da7SBart Van Assche 		for (j = 0; j < target->queue_size; ++j)
1220d92c0da7SBart Van Assche 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1221d92c0da7SBart Van Assche 	}
12228de9fe3aSBart Van Assche 
12238de9fe3aSBart Van Assche 	target->qp_in_error = false;
12248de9fe3aSBart Van Assche 
1225d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1226d92c0da7SBart Van Assche 		ch = &target->ch[i];
1227bbac5ccfSBart Van Assche 		if (ret)
1228d92c0da7SBart Van Assche 			break;
1229d92c0da7SBart Van Assche 		ret = srp_connect_ch(ch, multich);
1230d92c0da7SBart Van Assche 		multich = true;
1231d92c0da7SBart Van Assche 	}
123209be70a2SBart Van Assche 
1233ed9b2264SBart Van Assche 	if (ret == 0)
1234ed9b2264SBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
1235ed9b2264SBart Van Assche 			     PFX "reconnect succeeded\n");
1236aef9ec39SRoland Dreier 
1237aef9ec39SRoland Dreier 	return ret;
1238aef9ec39SRoland Dreier }
1239aef9ec39SRoland Dreier 
12408f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
12418f26c9ffSDavid Dillow 			 unsigned int dma_len, u32 rkey)
1242f5358a17SRoland Dreier {
12438f26c9ffSDavid Dillow 	struct srp_direct_buf *desc = state->desc;
12448f26c9ffSDavid Dillow 
12453ae95da8SBart Van Assche 	WARN_ON_ONCE(!dma_len);
12463ae95da8SBart Van Assche 
12478f26c9ffSDavid Dillow 	desc->va = cpu_to_be64(dma_addr);
12488f26c9ffSDavid Dillow 	desc->key = cpu_to_be32(rkey);
12498f26c9ffSDavid Dillow 	desc->len = cpu_to_be32(dma_len);
12508f26c9ffSDavid Dillow 
12518f26c9ffSDavid Dillow 	state->total_len += dma_len;
12528f26c9ffSDavid Dillow 	state->desc++;
12538f26c9ffSDavid Dillow 	state->ndesc++;
12548f26c9ffSDavid Dillow }
12558f26c9ffSDavid Dillow 
12568f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state,
1257509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
12588f26c9ffSDavid Dillow {
1259186fbc66SBart Van Assche 	struct srp_target_port *target = ch->target;
1260186fbc66SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
12618f26c9ffSDavid Dillow 	struct ib_pool_fmr *fmr;
1262f5358a17SRoland Dreier 	u64 io_addr = 0;
12638f26c9ffSDavid Dillow 
1264f731ed62SBart Van Assche 	if (state->fmr.next >= state->fmr.end)
1265f731ed62SBart Van Assche 		return -ENOMEM;
1266f731ed62SBart Van Assche 
126726630e8aSSagi Grimberg 	WARN_ON_ONCE(!dev->use_fmr);
126826630e8aSSagi Grimberg 
126926630e8aSSagi Grimberg 	if (state->npages == 0)
127026630e8aSSagi Grimberg 		return 0;
127126630e8aSSagi Grimberg 
127226630e8aSSagi Grimberg 	if (state->npages == 1 && target->global_mr) {
127326630e8aSSagi Grimberg 		srp_map_desc(state, state->base_dma_addr, state->dma_len,
127426630e8aSSagi Grimberg 			     target->global_mr->rkey);
127526630e8aSSagi Grimberg 		goto reset_state;
127626630e8aSSagi Grimberg 	}
127726630e8aSSagi Grimberg 
1278509c07bcSBart Van Assche 	fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
12798f26c9ffSDavid Dillow 				   state->npages, io_addr);
12808f26c9ffSDavid Dillow 	if (IS_ERR(fmr))
12818f26c9ffSDavid Dillow 		return PTR_ERR(fmr);
12828f26c9ffSDavid Dillow 
1283f731ed62SBart Van Assche 	*state->fmr.next++ = fmr;
128452ede08fSBart Van Assche 	state->nmdesc++;
12858f26c9ffSDavid Dillow 
1286186fbc66SBart Van Assche 	srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1287186fbc66SBart Van Assche 		     state->dma_len, fmr->fmr->rkey);
1288539dde6fSBart Van Assche 
128926630e8aSSagi Grimberg reset_state:
129026630e8aSSagi Grimberg 	state->npages = 0;
129126630e8aSSagi Grimberg 	state->dma_len = 0;
129226630e8aSSagi Grimberg 
12938f26c9ffSDavid Dillow 	return 0;
12948f26c9ffSDavid Dillow }
12958f26c9ffSDavid Dillow 
12961dc7b1f1SChristoph Hellwig static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
12971dc7b1f1SChristoph Hellwig {
12981dc7b1f1SChristoph Hellwig 	srp_handle_qp_err(cq, wc, "FAST REG");
12991dc7b1f1SChristoph Hellwig }
13001dc7b1f1SChristoph Hellwig 
13015cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state,
13021dc7b1f1SChristoph Hellwig 			     struct srp_request *req,
130357b0be9cSBart Van Assche 			     struct srp_rdma_ch *ch, int sg_nents)
13045cfb1782SBart Van Assche {
1305509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
13065cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
13075cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
1308f7f7aab1SSagi Grimberg 	struct ib_reg_wr wr;
13095cfb1782SBart Van Assche 	struct srp_fr_desc *desc;
13105cfb1782SBart Van Assche 	u32 rkey;
1311f7f7aab1SSagi Grimberg 	int n, err;
13125cfb1782SBart Van Assche 
1313f731ed62SBart Van Assche 	if (state->fr.next >= state->fr.end)
1314f731ed62SBart Van Assche 		return -ENOMEM;
1315f731ed62SBart Van Assche 
131626630e8aSSagi Grimberg 	WARN_ON_ONCE(!dev->use_fast_reg);
131726630e8aSSagi Grimberg 
131857b0be9cSBart Van Assche 	if (sg_nents == 1 && target->global_mr) {
1319f7f7aab1SSagi Grimberg 		srp_map_desc(state, sg_dma_address(state->sg),
1320f7f7aab1SSagi Grimberg 			     sg_dma_len(state->sg),
132126630e8aSSagi Grimberg 			     target->global_mr->rkey);
1322f7f7aab1SSagi Grimberg 		return 1;
132326630e8aSSagi Grimberg 	}
132426630e8aSSagi Grimberg 
1325509c07bcSBart Van Assche 	desc = srp_fr_pool_get(ch->fr_pool);
13265cfb1782SBart Van Assche 	if (!desc)
13275cfb1782SBart Van Assche 		return -ENOMEM;
13285cfb1782SBart Van Assche 
13295cfb1782SBart Van Assche 	rkey = ib_inc_rkey(desc->mr->rkey);
13305cfb1782SBart Van Assche 	ib_update_fast_reg_key(desc->mr, rkey);
13315cfb1782SBart Van Assche 
1332ff2ba993SChristoph Hellwig 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, 0, dev->mr_page_size);
13339d8e7d0dSBart Van Assche 	if (unlikely(n < 0)) {
13349d8e7d0dSBart Van Assche 		srp_fr_pool_put(ch->fr_pool, &desc, 1);
13359d8e7d0dSBart Van Assche 		pr_debug("%s: ib_map_mr_sg(%d) returned %d.\n",
13369d8e7d0dSBart Van Assche 			 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
13379d8e7d0dSBart Van Assche 			 n);
1338f7f7aab1SSagi Grimberg 		return n;
13399d8e7d0dSBart Van Assche 	}
13405cfb1782SBart Van Assche 
13411dc7b1f1SChristoph Hellwig 	req->reg_cqe.done = srp_reg_mr_err_done;
13421dc7b1f1SChristoph Hellwig 
1343f7f7aab1SSagi Grimberg 	wr.wr.next = NULL;
1344f7f7aab1SSagi Grimberg 	wr.wr.opcode = IB_WR_REG_MR;
13451dc7b1f1SChristoph Hellwig 	wr.wr.wr_cqe = &req->reg_cqe;
1346f7f7aab1SSagi Grimberg 	wr.wr.num_sge = 0;
1347f7f7aab1SSagi Grimberg 	wr.wr.send_flags = 0;
1348f7f7aab1SSagi Grimberg 	wr.mr = desc->mr;
1349f7f7aab1SSagi Grimberg 	wr.key = desc->mr->rkey;
1350f7f7aab1SSagi Grimberg 	wr.access = (IB_ACCESS_LOCAL_WRITE |
13515cfb1782SBart Van Assche 		     IB_ACCESS_REMOTE_READ |
13525cfb1782SBart Van Assche 		     IB_ACCESS_REMOTE_WRITE);
13535cfb1782SBart Van Assche 
1354f731ed62SBart Van Assche 	*state->fr.next++ = desc;
13555cfb1782SBart Van Assche 	state->nmdesc++;
13565cfb1782SBart Van Assche 
1357f7f7aab1SSagi Grimberg 	srp_map_desc(state, desc->mr->iova,
1358f7f7aab1SSagi Grimberg 		     desc->mr->length, desc->mr->rkey);
13595cfb1782SBart Van Assche 
136026630e8aSSagi Grimberg 	err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1361f7f7aab1SSagi Grimberg 	if (unlikely(err))
136226630e8aSSagi Grimberg 		return err;
136326630e8aSSagi Grimberg 
1364f7f7aab1SSagi Grimberg 	return n;
13655cfb1782SBart Van Assche }
13665cfb1782SBart Van Assche 
13678f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state,
1368509c07bcSBart Van Assche 			    struct srp_rdma_ch *ch,
13693ae95da8SBart Van Assche 			    struct scatterlist *sg, int sg_index)
13708f26c9ffSDavid Dillow {
1371509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
137205321937SGreg Kroah-Hartman 	struct srp_device *dev = target->srp_host->srp_dev;
137385507bccSRalph Campbell 	struct ib_device *ibdev = dev->dev;
13748f26c9ffSDavid Dillow 	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1375bb350d1dSFUJITA Tomonori 	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
13763ae95da8SBart Van Assche 	unsigned int len = 0;
13778f26c9ffSDavid Dillow 	int ret;
137885507bccSRalph Campbell 
13793ae95da8SBart Van Assche 	WARN_ON_ONCE(!dma_len);
1380f5358a17SRoland Dreier 
13818f26c9ffSDavid Dillow 	while (dma_len) {
13825cfb1782SBart Van Assche 		unsigned offset = dma_addr & ~dev->mr_page_mask;
13835cfb1782SBart Van Assche 		if (state->npages == dev->max_pages_per_mr || offset != 0) {
1384f7f7aab1SSagi Grimberg 			ret = srp_map_finish_fmr(state, ch);
13858f26c9ffSDavid Dillow 			if (ret)
13868f26c9ffSDavid Dillow 				return ret;
138785507bccSRalph Campbell 		}
1388f5358a17SRoland Dreier 
13895cfb1782SBart Van Assche 		len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
13908f26c9ffSDavid Dillow 
13918f26c9ffSDavid Dillow 		if (!state->npages)
13928f26c9ffSDavid Dillow 			state->base_dma_addr = dma_addr;
13935cfb1782SBart Van Assche 		state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
139452ede08fSBart Van Assche 		state->dma_len += len;
13958f26c9ffSDavid Dillow 		dma_addr += len;
13968f26c9ffSDavid Dillow 		dma_len -= len;
1397f5358a17SRoland Dreier 	}
1398f5358a17SRoland Dreier 
13995cfb1782SBart Van Assche 	/*
14005cfb1782SBart Van Assche 	 * If the last entry of the MR wasn't a full page, then we need to
14018f26c9ffSDavid Dillow 	 * close it out and start a new one -- we can only merge at page
14021d3d98c4SBart Van Assche 	 * boundaries.
14038f26c9ffSDavid Dillow 	 */
1404f5358a17SRoland Dreier 	ret = 0;
14050e0d3a48SBart Van Assche 	if (len != dev->mr_page_size)
1406f7f7aab1SSagi Grimberg 		ret = srp_map_finish_fmr(state, ch);
1407f5358a17SRoland Dreier 	return ret;
1408f5358a17SRoland Dreier }
1409f5358a17SRoland Dreier 
141026630e8aSSagi Grimberg static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
141126630e8aSSagi Grimberg 			  struct srp_request *req, struct scatterlist *scat,
141226630e8aSSagi Grimberg 			  int count)
141326630e8aSSagi Grimberg {
141426630e8aSSagi Grimberg 	struct scatterlist *sg;
141526630e8aSSagi Grimberg 	int i, ret;
141626630e8aSSagi Grimberg 
141726630e8aSSagi Grimberg 	state->pages = req->map_page;
141826630e8aSSagi Grimberg 	state->fmr.next = req->fmr_list;
141926630e8aSSagi Grimberg 	state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
142026630e8aSSagi Grimberg 
142126630e8aSSagi Grimberg 	for_each_sg(scat, sg, count, i) {
142226630e8aSSagi Grimberg 		ret = srp_map_sg_entry(state, ch, sg, i);
142326630e8aSSagi Grimberg 		if (ret)
142426630e8aSSagi Grimberg 			return ret;
142526630e8aSSagi Grimberg 	}
142626630e8aSSagi Grimberg 
1427f7f7aab1SSagi Grimberg 	ret = srp_map_finish_fmr(state, ch);
142826630e8aSSagi Grimberg 	if (ret)
142926630e8aSSagi Grimberg 		return ret;
143026630e8aSSagi Grimberg 
143126630e8aSSagi Grimberg 	return 0;
143226630e8aSSagi Grimberg }
143326630e8aSSagi Grimberg 
143426630e8aSSagi Grimberg static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
143526630e8aSSagi Grimberg 			 struct srp_request *req, struct scatterlist *scat,
143626630e8aSSagi Grimberg 			 int count)
143726630e8aSSagi Grimberg {
143826630e8aSSagi Grimberg 	state->desc = req->indirect_desc;
1439f7f7aab1SSagi Grimberg 	state->fr.next = req->fr_list;
1440f7f7aab1SSagi Grimberg 	state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1441f7f7aab1SSagi Grimberg 	state->sg = scat;
144226630e8aSSagi Grimberg 
14433b59b7a6SBart Van Assche 	if (count == 0)
14443b59b7a6SBart Van Assche 		return 0;
14453b59b7a6SBart Van Assche 
144657b0be9cSBart Van Assche 	while (count) {
1447f7f7aab1SSagi Grimberg 		int i, n;
1448f7f7aab1SSagi Grimberg 
1449c6333f9fSDoug Ledford 		n = srp_map_finish_fr(state, req, ch, count);
1450f7f7aab1SSagi Grimberg 		if (unlikely(n < 0))
1451f7f7aab1SSagi Grimberg 			return n;
1452f7f7aab1SSagi Grimberg 
145357b0be9cSBart Van Assche 		count -= n;
1454f7f7aab1SSagi Grimberg 		for (i = 0; i < n; i++)
1455f7f7aab1SSagi Grimberg 			state->sg = sg_next(state->sg);
145626630e8aSSagi Grimberg 	}
145726630e8aSSagi Grimberg 
145826630e8aSSagi Grimberg 	return 0;
145926630e8aSSagi Grimberg }
146026630e8aSSagi Grimberg 
146126630e8aSSagi Grimberg static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1462509c07bcSBart Van Assche 			  struct srp_request *req, struct scatterlist *scat,
1463509c07bcSBart Van Assche 			  int count)
146476bc1e1dSBart Van Assche {
1465509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
146676bc1e1dSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
146776bc1e1dSBart Van Assche 	struct scatterlist *sg;
146826630e8aSSagi Grimberg 	int i;
146976bc1e1dSBart Van Assche 
147076bc1e1dSBart Van Assche 	state->desc = req->indirect_desc;
14713ae95da8SBart Van Assche 	for_each_sg(scat, sg, count, i) {
14723ae95da8SBart Van Assche 		srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
147303f6fb93SBart Van Assche 			     ib_sg_dma_len(dev->dev, sg),
147403f6fb93SBart Van Assche 			     target->global_mr->rkey);
14753ae95da8SBart Van Assche 	}
147676bc1e1dSBart Van Assche 
147726630e8aSSagi Grimberg 	return 0;
147876bc1e1dSBart Van Assche }
147976bc1e1dSBart Van Assche 
1480330179f2SBart Van Assche /*
1481330179f2SBart Van Assche  * Register the indirect data buffer descriptor with the HCA.
1482330179f2SBart Van Assche  *
1483330179f2SBart Van Assche  * Note: since the indirect data buffer descriptor has been allocated with
1484330179f2SBart Van Assche  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1485330179f2SBart Van Assche  * memory buffer.
1486330179f2SBart Van Assche  */
1487330179f2SBart Van Assche static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1488330179f2SBart Van Assche 		       void **next_mr, void **end_mr, u32 idb_len,
1489330179f2SBart Van Assche 		       __be32 *idb_rkey)
1490330179f2SBart Van Assche {
1491330179f2SBart Van Assche 	struct srp_target_port *target = ch->target;
1492330179f2SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
1493330179f2SBart Van Assche 	struct srp_map_state state;
1494330179f2SBart Van Assche 	struct srp_direct_buf idb_desc;
1495330179f2SBart Van Assche 	u64 idb_pages[1];
1496f7f7aab1SSagi Grimberg 	struct scatterlist idb_sg[1];
1497330179f2SBart Van Assche 	int ret;
1498330179f2SBart Van Assche 
1499330179f2SBart Van Assche 	memset(&state, 0, sizeof(state));
1500330179f2SBart Van Assche 	memset(&idb_desc, 0, sizeof(idb_desc));
1501330179f2SBart Van Assche 	state.gen.next = next_mr;
1502330179f2SBart Van Assche 	state.gen.end = end_mr;
1503330179f2SBart Van Assche 	state.desc = &idb_desc;
1504f7f7aab1SSagi Grimberg 	state.base_dma_addr = req->indirect_dma_addr;
1505f7f7aab1SSagi Grimberg 	state.dma_len = idb_len;
1506f7f7aab1SSagi Grimberg 
1507f7f7aab1SSagi Grimberg 	if (dev->use_fast_reg) {
1508f7f7aab1SSagi Grimberg 		state.sg = idb_sg;
1509f7f7aab1SSagi Grimberg 		sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1510f7f7aab1SSagi Grimberg 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1511fc925518SChristoph Hellwig #ifdef CONFIG_NEED_SG_DMA_LENGTH
1512fc925518SChristoph Hellwig 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1513fc925518SChristoph Hellwig #endif
1514c6333f9fSDoug Ledford 		ret = srp_map_finish_fr(&state, req, ch, 1);
1515f7f7aab1SSagi Grimberg 		if (ret < 0)
1516f7f7aab1SSagi Grimberg 			return ret;
1517f7f7aab1SSagi Grimberg 	} else if (dev->use_fmr) {
1518330179f2SBart Van Assche 		state.pages = idb_pages;
1519330179f2SBart Van Assche 		state.pages[0] = (req->indirect_dma_addr &
1520330179f2SBart Van Assche 				  dev->mr_page_mask);
1521330179f2SBart Van Assche 		state.npages = 1;
1522f7f7aab1SSagi Grimberg 		ret = srp_map_finish_fmr(&state, ch);
1523330179f2SBart Van Assche 		if (ret < 0)
1524f7f7aab1SSagi Grimberg 			return ret;
1525f7f7aab1SSagi Grimberg 	} else {
1526f7f7aab1SSagi Grimberg 		return -EINVAL;
1527f7f7aab1SSagi Grimberg 	}
1528330179f2SBart Van Assche 
1529330179f2SBart Van Assche 	*idb_rkey = idb_desc.key;
1530330179f2SBart Van Assche 
1531f7f7aab1SSagi Grimberg 	return 0;
1532330179f2SBart Van Assche }
1533330179f2SBart Van Assche 
153477269cdfSBart Van Assche /**
153577269cdfSBart Van Assche  * srp_map_data() - map SCSI data buffer onto an SRP request
153677269cdfSBart Van Assche  * @scmnd: SCSI command to map
153777269cdfSBart Van Assche  * @ch: SRP RDMA channel
153877269cdfSBart Van Assche  * @req: SRP request
153977269cdfSBart Van Assche  *
154077269cdfSBart Van Assche  * Returns the length in bytes of the SRP_CMD IU or a negative value if
154177269cdfSBart Van Assche  * mapping failed.
154277269cdfSBart Van Assche  */
1543509c07bcSBart Van Assche static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1544aef9ec39SRoland Dreier 			struct srp_request *req)
1545aef9ec39SRoland Dreier {
1546509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
154776bc1e1dSBart Van Assche 	struct scatterlist *scat;
1548aef9ec39SRoland Dreier 	struct srp_cmd *cmd = req->cmd->buf;
1549330179f2SBart Van Assche 	int len, nents, count, ret;
155085507bccSRalph Campbell 	struct srp_device *dev;
155185507bccSRalph Campbell 	struct ib_device *ibdev;
15528f26c9ffSDavid Dillow 	struct srp_map_state state;
15538f26c9ffSDavid Dillow 	struct srp_indirect_buf *indirect_hdr;
1554330179f2SBart Van Assche 	u32 idb_len, table_len;
1555330179f2SBart Van Assche 	__be32 idb_rkey;
15568f26c9ffSDavid Dillow 	u8 fmt;
1557aef9ec39SRoland Dreier 
1558bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1559aef9ec39SRoland Dreier 		return sizeof (struct srp_cmd);
1560aef9ec39SRoland Dreier 
1561aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1562aef9ec39SRoland Dreier 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
15637aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
15647aa54bd7SDavid Dillow 			     PFX "Unhandled data direction %d\n",
1565aef9ec39SRoland Dreier 			     scmnd->sc_data_direction);
1566aef9ec39SRoland Dreier 		return -EINVAL;
1567aef9ec39SRoland Dreier 	}
1568aef9ec39SRoland Dreier 
1569bb350d1dSFUJITA Tomonori 	nents = scsi_sg_count(scmnd);
1570bb350d1dSFUJITA Tomonori 	scat  = scsi_sglist(scmnd);
1571aef9ec39SRoland Dreier 
157205321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev;
157385507bccSRalph Campbell 	ibdev = dev->dev;
157485507bccSRalph Campbell 
157585507bccSRalph Campbell 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
15768f26c9ffSDavid Dillow 	if (unlikely(count == 0))
15778f26c9ffSDavid Dillow 		return -EIO;
1578aef9ec39SRoland Dreier 
1579aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_DIRECT;
1580f5358a17SRoland Dreier 	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
1581f5358a17SRoland Dreier 
158203f6fb93SBart Van Assche 	if (count == 1 && target->global_mr) {
1583f5358a17SRoland Dreier 		/*
1584f5358a17SRoland Dreier 		 * The midlayer only generated a single gather/scatter
1585f5358a17SRoland Dreier 		 * entry, or DMA mapping coalesced everything to a
1586f5358a17SRoland Dreier 		 * single entry.  So a direct descriptor along with
1587f5358a17SRoland Dreier 		 * the DMA MR suffices.
1588f5358a17SRoland Dreier 		 */
1589f5358a17SRoland Dreier 		struct srp_direct_buf *buf = (void *) cmd->add_data;
1590aef9ec39SRoland Dreier 
159185507bccSRalph Campbell 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
159203f6fb93SBart Van Assche 		buf->key = cpu_to_be32(target->global_mr->rkey);
159385507bccSRalph Campbell 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
15948f26c9ffSDavid Dillow 
159552ede08fSBart Van Assche 		req->nmdesc = 0;
15968f26c9ffSDavid Dillow 		goto map_complete;
15978f26c9ffSDavid Dillow 	}
15988f26c9ffSDavid Dillow 
15995cfb1782SBart Van Assche 	/*
16005cfb1782SBart Van Assche 	 * We have more than one scatter/gather entry, so build our indirect
16015cfb1782SBart Van Assche 	 * descriptor table, trying to merge as many entries as we can.
1602f5358a17SRoland Dreier 	 */
16038f26c9ffSDavid Dillow 	indirect_hdr = (void *) cmd->add_data;
16048f26c9ffSDavid Dillow 
1605c07d424dSDavid Dillow 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1606c07d424dSDavid Dillow 				   target->indirect_size, DMA_TO_DEVICE);
1607c07d424dSDavid Dillow 
16088f26c9ffSDavid Dillow 	memset(&state, 0, sizeof(state));
160926630e8aSSagi Grimberg 	if (dev->use_fast_reg)
1610e012f363SBart Van Assche 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
161126630e8aSSagi Grimberg 	else if (dev->use_fmr)
1612e012f363SBart Van Assche 		ret = srp_map_sg_fmr(&state, ch, req, scat, count);
161326630e8aSSagi Grimberg 	else
1614e012f363SBart Van Assche 		ret = srp_map_sg_dma(&state, ch, req, scat, count);
1615e012f363SBart Van Assche 	req->nmdesc = state.nmdesc;
1616e012f363SBart Van Assche 	if (ret < 0)
1617e012f363SBart Van Assche 		goto unmap;
16188f26c9ffSDavid Dillow 
1619c07d424dSDavid Dillow 	/* We've mapped the request, now pull as much of the indirect
1620c07d424dSDavid Dillow 	 * descriptor table as we can into the command buffer. If this
1621c07d424dSDavid Dillow 	 * target is not using an external indirect table, we are
1622c07d424dSDavid Dillow 	 * guaranteed to fit into the command, as the SCSI layer won't
1623c07d424dSDavid Dillow 	 * give us more S/G entries than we allow.
16248f26c9ffSDavid Dillow 	 */
16258f26c9ffSDavid Dillow 	if (state.ndesc == 1) {
16265cfb1782SBart Van Assche 		/*
16275cfb1782SBart Van Assche 		 * Memory registration collapsed the sg-list into one entry,
16288f26c9ffSDavid Dillow 		 * so use a direct descriptor.
16298f26c9ffSDavid Dillow 		 */
16308f26c9ffSDavid Dillow 		struct srp_direct_buf *buf = (void *) cmd->add_data;
16318f26c9ffSDavid Dillow 
1632c07d424dSDavid Dillow 		*buf = req->indirect_desc[0];
16338f26c9ffSDavid Dillow 		goto map_complete;
16348f26c9ffSDavid Dillow 	}
16358f26c9ffSDavid Dillow 
1636c07d424dSDavid Dillow 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1637c07d424dSDavid Dillow 						!target->allow_ext_sg)) {
1638c07d424dSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
1639c07d424dSDavid Dillow 			     "Could not fit S/G list into SRP_CMD\n");
1640e012f363SBart Van Assche 		ret = -EIO;
1641e012f363SBart Van Assche 		goto unmap;
1642c07d424dSDavid Dillow 	}
1643c07d424dSDavid Dillow 
1644c07d424dSDavid Dillow 	count = min(state.ndesc, target->cmd_sg_cnt);
16458f26c9ffSDavid Dillow 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1646330179f2SBart Van Assche 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1647aef9ec39SRoland Dreier 
1648aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_INDIRECT;
16498f26c9ffSDavid Dillow 	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1650c07d424dSDavid Dillow 	len += count * sizeof (struct srp_direct_buf);
1651f5358a17SRoland Dreier 
1652c07d424dSDavid Dillow 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1653c07d424dSDavid Dillow 	       count * sizeof (struct srp_direct_buf));
165485507bccSRalph Campbell 
165503f6fb93SBart Van Assche 	if (!target->global_mr) {
1656330179f2SBart Van Assche 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1657330179f2SBart Van Assche 				  idb_len, &idb_rkey);
1658330179f2SBart Van Assche 		if (ret < 0)
1659e012f363SBart Van Assche 			goto unmap;
1660330179f2SBart Van Assche 		req->nmdesc++;
1661330179f2SBart Van Assche 	} else {
1662a745f4f4SBart Van Assche 		idb_rkey = cpu_to_be32(target->global_mr->rkey);
1663330179f2SBart Van Assche 	}
1664330179f2SBart Van Assche 
1665c07d424dSDavid Dillow 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1666330179f2SBart Van Assche 	indirect_hdr->table_desc.key = idb_rkey;
16678f26c9ffSDavid Dillow 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
16688f26c9ffSDavid Dillow 	indirect_hdr->len = cpu_to_be32(state.total_len);
1669aef9ec39SRoland Dreier 
1670aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1671c07d424dSDavid Dillow 		cmd->data_out_desc_cnt = count;
1672aef9ec39SRoland Dreier 	else
1673c07d424dSDavid Dillow 		cmd->data_in_desc_cnt = count;
1674c07d424dSDavid Dillow 
1675c07d424dSDavid Dillow 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1676c07d424dSDavid Dillow 				      DMA_TO_DEVICE);
1677aef9ec39SRoland Dreier 
16788f26c9ffSDavid Dillow map_complete:
1679aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1680aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt << 4;
1681aef9ec39SRoland Dreier 	else
1682aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt;
1683aef9ec39SRoland Dreier 
1684aef9ec39SRoland Dreier 	return len;
1685e012f363SBart Van Assche 
1686e012f363SBart Van Assche unmap:
1687e012f363SBart Van Assche 	srp_unmap_data(scmnd, ch, req);
1688ffc548bbSBart Van Assche 	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1689ffc548bbSBart Van Assche 		ret = -E2BIG;
1690e012f363SBart Van Assche 	return ret;
1691aef9ec39SRoland Dreier }
1692aef9ec39SRoland Dreier 
169305a1d750SDavid Dillow /*
169476c75b25SBart Van Assche  * Return an IU and possible credit to the free pool
169576c75b25SBart Van Assche  */
1696509c07bcSBart Van Assche static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
169776c75b25SBart Van Assche 			  enum srp_iu_type iu_type)
169876c75b25SBart Van Assche {
169976c75b25SBart Van Assche 	unsigned long flags;
170076c75b25SBart Van Assche 
1701509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1702509c07bcSBart Van Assche 	list_add(&iu->list, &ch->free_tx);
170376c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP)
1704509c07bcSBart Van Assche 		++ch->req_lim;
1705509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
170676c75b25SBart Van Assche }
170776c75b25SBart Van Assche 
170876c75b25SBart Van Assche /*
1709509c07bcSBart Van Assche  * Must be called with ch->lock held to protect req_lim and free_tx.
1710e9684678SBart Van Assche  * If IU is not sent, it must be returned using srp_put_tx_iu().
171105a1d750SDavid Dillow  *
171205a1d750SDavid Dillow  * Note:
171305a1d750SDavid Dillow  * An upper limit for the number of allocated information units for each
171405a1d750SDavid Dillow  * request type is:
171505a1d750SDavid Dillow  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
171605a1d750SDavid Dillow  *   more than Scsi_Host.can_queue requests.
171705a1d750SDavid Dillow  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
171805a1d750SDavid Dillow  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
171905a1d750SDavid Dillow  *   one unanswered SRP request to an initiator.
172005a1d750SDavid Dillow  */
1721509c07bcSBart Van Assche static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
172205a1d750SDavid Dillow 				      enum srp_iu_type iu_type)
172305a1d750SDavid Dillow {
1724509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
172505a1d750SDavid Dillow 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
172605a1d750SDavid Dillow 	struct srp_iu *iu;
172705a1d750SDavid Dillow 
17281dc7b1f1SChristoph Hellwig 	ib_process_cq_direct(ch->send_cq, -1);
172905a1d750SDavid Dillow 
1730509c07bcSBart Van Assche 	if (list_empty(&ch->free_tx))
173105a1d750SDavid Dillow 		return NULL;
173205a1d750SDavid Dillow 
173305a1d750SDavid Dillow 	/* Initiator responses to target requests do not consume credits */
173476c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP) {
1735509c07bcSBart Van Assche 		if (ch->req_lim <= rsv) {
173605a1d750SDavid Dillow 			++target->zero_req_lim;
173705a1d750SDavid Dillow 			return NULL;
173805a1d750SDavid Dillow 		}
173905a1d750SDavid Dillow 
1740509c07bcSBart Van Assche 		--ch->req_lim;
174176c75b25SBart Van Assche 	}
174276c75b25SBart Van Assche 
1743509c07bcSBart Van Assche 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
174476c75b25SBart Van Assche 	list_del(&iu->list);
174505a1d750SDavid Dillow 	return iu;
174605a1d750SDavid Dillow }
174705a1d750SDavid Dillow 
17481dc7b1f1SChristoph Hellwig static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
17491dc7b1f1SChristoph Hellwig {
17501dc7b1f1SChristoph Hellwig 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
17511dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
17521dc7b1f1SChristoph Hellwig 
17531dc7b1f1SChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
17541dc7b1f1SChristoph Hellwig 		srp_handle_qp_err(cq, wc, "SEND");
17551dc7b1f1SChristoph Hellwig 		return;
17561dc7b1f1SChristoph Hellwig 	}
17571dc7b1f1SChristoph Hellwig 
17581dc7b1f1SChristoph Hellwig 	list_add(&iu->list, &ch->free_tx);
17591dc7b1f1SChristoph Hellwig }
17601dc7b1f1SChristoph Hellwig 
1761509c07bcSBart Van Assche static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
176205a1d750SDavid Dillow {
1763509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
176405a1d750SDavid Dillow 	struct ib_sge list;
176505a1d750SDavid Dillow 	struct ib_send_wr wr, *bad_wr;
176605a1d750SDavid Dillow 
176705a1d750SDavid Dillow 	list.addr   = iu->dma;
176805a1d750SDavid Dillow 	list.length = len;
17699af76271SDavid Dillow 	list.lkey   = target->lkey;
177005a1d750SDavid Dillow 
17711dc7b1f1SChristoph Hellwig 	iu->cqe.done = srp_send_done;
17721dc7b1f1SChristoph Hellwig 
177305a1d750SDavid Dillow 	wr.next       = NULL;
17741dc7b1f1SChristoph Hellwig 	wr.wr_cqe     = &iu->cqe;
177505a1d750SDavid Dillow 	wr.sg_list    = &list;
177605a1d750SDavid Dillow 	wr.num_sge    = 1;
177705a1d750SDavid Dillow 	wr.opcode     = IB_WR_SEND;
177805a1d750SDavid Dillow 	wr.send_flags = IB_SEND_SIGNALED;
177905a1d750SDavid Dillow 
1780509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
178105a1d750SDavid Dillow }
178205a1d750SDavid Dillow 
1783509c07bcSBart Van Assche static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1784c996bb47SBart Van Assche {
1785509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1786c996bb47SBart Van Assche 	struct ib_recv_wr wr, *bad_wr;
1787dcb4cb85SBart Van Assche 	struct ib_sge list;
1788c996bb47SBart Van Assche 
1789c996bb47SBart Van Assche 	list.addr   = iu->dma;
1790c996bb47SBart Van Assche 	list.length = iu->size;
17919af76271SDavid Dillow 	list.lkey   = target->lkey;
1792c996bb47SBart Van Assche 
17931dc7b1f1SChristoph Hellwig 	iu->cqe.done = srp_recv_done;
17941dc7b1f1SChristoph Hellwig 
1795c996bb47SBart Van Assche 	wr.next     = NULL;
17961dc7b1f1SChristoph Hellwig 	wr.wr_cqe   = &iu->cqe;
1797c996bb47SBart Van Assche 	wr.sg_list  = &list;
1798c996bb47SBart Van Assche 	wr.num_sge  = 1;
1799c996bb47SBart Van Assche 
1800509c07bcSBart Van Assche 	return ib_post_recv(ch->qp, &wr, &bad_wr);
1801c996bb47SBart Van Assche }
1802c996bb47SBart Van Assche 
1803509c07bcSBart Van Assche static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1804aef9ec39SRoland Dreier {
1805509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1806aef9ec39SRoland Dreier 	struct srp_request *req;
1807aef9ec39SRoland Dreier 	struct scsi_cmnd *scmnd;
1808aef9ec39SRoland Dreier 	unsigned long flags;
1809aef9ec39SRoland Dreier 
1810aef9ec39SRoland Dreier 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1811509c07bcSBart Van Assche 		spin_lock_irqsave(&ch->lock, flags);
1812509c07bcSBart Van Assche 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1813509c07bcSBart Van Assche 		spin_unlock_irqrestore(&ch->lock, flags);
181494a9174cSBart Van Assche 
1815509c07bcSBart Van Assche 		ch->tsk_mgmt_status = -1;
1816f8b6e31eSDavid Dillow 		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1817509c07bcSBart Van Assche 			ch->tsk_mgmt_status = rsp->data[3];
1818509c07bcSBart Van Assche 		complete(&ch->tsk_mgmt_done);
1819aef9ec39SRoland Dreier 	} else {
182077f2c1a4SBart Van Assche 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
182177f2c1a4SBart Van Assche 		if (scmnd) {
182277f2c1a4SBart Van Assche 			req = (void *)scmnd->host_scribble;
182377f2c1a4SBart Van Assche 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
182477f2c1a4SBart Van Assche 		}
182522032991SBart Van Assche 		if (!scmnd) {
18267aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
1827d92c0da7SBart Van Assche 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1828d92c0da7SBart Van Assche 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
182922032991SBart Van Assche 
1830509c07bcSBart Van Assche 			spin_lock_irqsave(&ch->lock, flags);
1831509c07bcSBart Van Assche 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1832509c07bcSBart Van Assche 			spin_unlock_irqrestore(&ch->lock, flags);
183322032991SBart Van Assche 
183422032991SBart Van Assche 			return;
183522032991SBart Van Assche 		}
1836aef9ec39SRoland Dreier 		scmnd->result = rsp->status;
1837aef9ec39SRoland Dreier 
1838aef9ec39SRoland Dreier 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1839aef9ec39SRoland Dreier 			memcpy(scmnd->sense_buffer, rsp->data +
1840aef9ec39SRoland Dreier 			       be32_to_cpu(rsp->resp_data_len),
1841aef9ec39SRoland Dreier 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1842aef9ec39SRoland Dreier 				     SCSI_SENSE_BUFFERSIZE));
1843aef9ec39SRoland Dreier 		}
1844aef9ec39SRoland Dreier 
1845e714531aSBart Van Assche 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1846bb350d1dSFUJITA Tomonori 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1847e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1848e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1849e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1850e714531aSBart Van Assche 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1851e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1852e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1853aef9ec39SRoland Dreier 
1854509c07bcSBart Van Assche 		srp_free_req(ch, req, scmnd,
185522032991SBart Van Assche 			     be32_to_cpu(rsp->req_lim_delta));
185622032991SBart Van Assche 
1857f8b6e31eSDavid Dillow 		scmnd->host_scribble = NULL;
1858aef9ec39SRoland Dreier 		scmnd->scsi_done(scmnd);
1859aef9ec39SRoland Dreier 	}
1860aef9ec39SRoland Dreier }
1861aef9ec39SRoland Dreier 
1862509c07bcSBart Van Assche static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1863bb12588aSDavid Dillow 			       void *rsp, int len)
1864bb12588aSDavid Dillow {
1865509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
186676c75b25SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1867bb12588aSDavid Dillow 	unsigned long flags;
1868bb12588aSDavid Dillow 	struct srp_iu *iu;
186976c75b25SBart Van Assche 	int err;
1870bb12588aSDavid Dillow 
1871509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1872509c07bcSBart Van Assche 	ch->req_lim += req_delta;
1873509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1874509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
187576c75b25SBart Van Assche 
1876bb12588aSDavid Dillow 	if (!iu) {
1877bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1878bb12588aSDavid Dillow 			     "no IU available to send response\n");
187976c75b25SBart Van Assche 		return 1;
1880bb12588aSDavid Dillow 	}
1881bb12588aSDavid Dillow 
1882bb12588aSDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1883bb12588aSDavid Dillow 	memcpy(iu->buf, rsp, len);
1884bb12588aSDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1885bb12588aSDavid Dillow 
1886509c07bcSBart Van Assche 	err = srp_post_send(ch, iu, len);
188776c75b25SBart Van Assche 	if (err) {
1888bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1889bb12588aSDavid Dillow 			     "unable to post response: %d\n", err);
1890509c07bcSBart Van Assche 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
189176c75b25SBart Van Assche 	}
1892bb12588aSDavid Dillow 
1893bb12588aSDavid Dillow 	return err;
1894bb12588aSDavid Dillow }
1895bb12588aSDavid Dillow 
1896509c07bcSBart Van Assche static void srp_process_cred_req(struct srp_rdma_ch *ch,
1897bb12588aSDavid Dillow 				 struct srp_cred_req *req)
1898bb12588aSDavid Dillow {
1899bb12588aSDavid Dillow 	struct srp_cred_rsp rsp = {
1900bb12588aSDavid Dillow 		.opcode = SRP_CRED_RSP,
1901bb12588aSDavid Dillow 		.tag = req->tag,
1902bb12588aSDavid Dillow 	};
1903bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1904bb12588aSDavid Dillow 
1905509c07bcSBart Van Assche 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1906509c07bcSBart Van Assche 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1907bb12588aSDavid Dillow 			     "problems processing SRP_CRED_REQ\n");
1908bb12588aSDavid Dillow }
1909bb12588aSDavid Dillow 
1910509c07bcSBart Van Assche static void srp_process_aer_req(struct srp_rdma_ch *ch,
1911bb12588aSDavid Dillow 				struct srp_aer_req *req)
1912bb12588aSDavid Dillow {
1913509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1914bb12588aSDavid Dillow 	struct srp_aer_rsp rsp = {
1915bb12588aSDavid Dillow 		.opcode = SRP_AER_RSP,
1916bb12588aSDavid Dillow 		.tag = req->tag,
1917bb12588aSDavid Dillow 	};
1918bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1919bb12588aSDavid Dillow 
1920bb12588aSDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX
1921985aa495SBart Van Assche 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1922bb12588aSDavid Dillow 
1923509c07bcSBart Van Assche 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1924bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1925bb12588aSDavid Dillow 			     "problems processing SRP_AER_REQ\n");
1926bb12588aSDavid Dillow }
1927bb12588aSDavid Dillow 
19281dc7b1f1SChristoph Hellwig static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1929aef9ec39SRoland Dreier {
19301dc7b1f1SChristoph Hellwig 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
19311dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
1932509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1933dcb4cb85SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1934c996bb47SBart Van Assche 	int res;
1935aef9ec39SRoland Dreier 	u8 opcode;
1936aef9ec39SRoland Dreier 
19371dc7b1f1SChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
19381dc7b1f1SChristoph Hellwig 		srp_handle_qp_err(cq, wc, "RECV");
19391dc7b1f1SChristoph Hellwig 		return;
19401dc7b1f1SChristoph Hellwig 	}
19411dc7b1f1SChristoph Hellwig 
1942509c07bcSBart Van Assche 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
194385507bccSRalph Campbell 				   DMA_FROM_DEVICE);
1944aef9ec39SRoland Dreier 
1945aef9ec39SRoland Dreier 	opcode = *(u8 *) iu->buf;
1946aef9ec39SRoland Dreier 
1947aef9ec39SRoland Dreier 	if (0) {
19487aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
19497aa54bd7SDavid Dillow 			     PFX "recv completion, opcode 0x%02x\n", opcode);
19507a700811SBart Van Assche 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
19517a700811SBart Van Assche 			       iu->buf, wc->byte_len, true);
1952aef9ec39SRoland Dreier 	}
1953aef9ec39SRoland Dreier 
1954aef9ec39SRoland Dreier 	switch (opcode) {
1955aef9ec39SRoland Dreier 	case SRP_RSP:
1956509c07bcSBart Van Assche 		srp_process_rsp(ch, iu->buf);
1957aef9ec39SRoland Dreier 		break;
1958aef9ec39SRoland Dreier 
1959bb12588aSDavid Dillow 	case SRP_CRED_REQ:
1960509c07bcSBart Van Assche 		srp_process_cred_req(ch, iu->buf);
1961bb12588aSDavid Dillow 		break;
1962bb12588aSDavid Dillow 
1963bb12588aSDavid Dillow 	case SRP_AER_REQ:
1964509c07bcSBart Van Assche 		srp_process_aer_req(ch, iu->buf);
1965bb12588aSDavid Dillow 		break;
1966bb12588aSDavid Dillow 
1967aef9ec39SRoland Dreier 	case SRP_T_LOGOUT:
1968aef9ec39SRoland Dreier 		/* XXX Handle target logout */
19697aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
19707aa54bd7SDavid Dillow 			     PFX "Got target logout request\n");
1971aef9ec39SRoland Dreier 		break;
1972aef9ec39SRoland Dreier 
1973aef9ec39SRoland Dreier 	default:
19747aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
19757aa54bd7SDavid Dillow 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1976aef9ec39SRoland Dreier 		break;
1977aef9ec39SRoland Dreier 	}
1978aef9ec39SRoland Dreier 
1979509c07bcSBart Van Assche 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
198085507bccSRalph Campbell 				      DMA_FROM_DEVICE);
1981c996bb47SBart Van Assche 
1982509c07bcSBart Van Assche 	res = srp_post_recv(ch, iu);
1983c996bb47SBart Van Assche 	if (res != 0)
1984c996bb47SBart Van Assche 		shost_printk(KERN_ERR, target->scsi_host,
1985c996bb47SBart Van Assche 			     PFX "Recv failed with error code %d\n", res);
1986aef9ec39SRoland Dreier }
1987aef9ec39SRoland Dreier 
1988c1120f89SBart Van Assche /**
1989c1120f89SBart Van Assche  * srp_tl_err_work() - handle a transport layer error
1990af24663bSBart Van Assche  * @work: Work structure embedded in an SRP target port.
1991c1120f89SBart Van Assche  *
1992c1120f89SBart Van Assche  * Note: This function may get invoked before the rport has been created,
1993c1120f89SBart Van Assche  * hence the target->rport test.
1994c1120f89SBart Van Assche  */
1995c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work)
1996c1120f89SBart Van Assche {
1997c1120f89SBart Van Assche 	struct srp_target_port *target;
1998c1120f89SBart Van Assche 
1999c1120f89SBart Van Assche 	target = container_of(work, struct srp_target_port, tl_err_work);
2000c1120f89SBart Van Assche 	if (target->rport)
2001c1120f89SBart Van Assche 		srp_start_tl_fail_timers(target->rport);
2002c1120f89SBart Van Assche }
2003c1120f89SBart Van Assche 
20041dc7b1f1SChristoph Hellwig static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
20051dc7b1f1SChristoph Hellwig 		const char *opname)
2006948d1e88SBart Van Assche {
20071dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
20087dad6b2eSBart Van Assche 	struct srp_target_port *target = ch->target;
20097dad6b2eSBart Van Assche 
2010c014c8cdSBart Van Assche 	if (ch->connected && !target->qp_in_error) {
20115cfb1782SBart Van Assche 		shost_printk(KERN_ERR, target->scsi_host,
20121dc7b1f1SChristoph Hellwig 			     PFX "failed %s status %s (%d) for CQE %p\n",
20131dc7b1f1SChristoph Hellwig 			     opname, ib_wc_status_msg(wc->status), wc->status,
20141dc7b1f1SChristoph Hellwig 			     wc->wr_cqe);
2015c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
20164f0af697SBart Van Assche 	}
2017948d1e88SBart Van Assche 	target->qp_in_error = true;
2018948d1e88SBart Van Assche }
2019948d1e88SBart Van Assche 
202076c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2021aef9ec39SRoland Dreier {
202276c75b25SBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2023a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
2024509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
2025aef9ec39SRoland Dreier 	struct srp_request *req;
2026aef9ec39SRoland Dreier 	struct srp_iu *iu;
2027aef9ec39SRoland Dreier 	struct srp_cmd *cmd;
202885507bccSRalph Campbell 	struct ib_device *dev;
202976c75b25SBart Van Assche 	unsigned long flags;
203077f2c1a4SBart Van Assche 	u32 tag;
203177f2c1a4SBart Van Assche 	u16 idx;
2032d1b4289eSBart Van Assche 	int len, ret;
2033a95cadb9SBart Van Assche 	const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2034a95cadb9SBart Van Assche 
2035a95cadb9SBart Van Assche 	/*
2036a95cadb9SBart Van Assche 	 * The SCSI EH thread is the only context from which srp_queuecommand()
2037a95cadb9SBart Van Assche 	 * can get invoked for blocked devices (SDEV_BLOCK /
2038a95cadb9SBart Van Assche 	 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2039a95cadb9SBart Van Assche 	 * locking the rport mutex if invoked from inside the SCSI EH.
2040a95cadb9SBart Van Assche 	 */
2041a95cadb9SBart Van Assche 	if (in_scsi_eh)
2042a95cadb9SBart Van Assche 		mutex_lock(&rport->mutex);
2043aef9ec39SRoland Dreier 
2044d1b4289eSBart Van Assche 	scmnd->result = srp_chkready(target->rport);
2045d1b4289eSBart Van Assche 	if (unlikely(scmnd->result))
2046d1b4289eSBart Van Assche 		goto err;
20472ce19e72SBart Van Assche 
204877f2c1a4SBart Van Assche 	WARN_ON_ONCE(scmnd->request->tag < 0);
204977f2c1a4SBart Van Assche 	tag = blk_mq_unique_tag(scmnd->request);
2050d92c0da7SBart Van Assche 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
205177f2c1a4SBart Van Assche 	idx = blk_mq_unique_tag_to_tag(tag);
205277f2c1a4SBart Van Assche 	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
205377f2c1a4SBart Van Assche 		  dev_name(&shost->shost_gendev), tag, idx,
205477f2c1a4SBart Van Assche 		  target->req_ring_size);
2055509c07bcSBart Van Assche 
2056509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
2057509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2058509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
2059aef9ec39SRoland Dreier 
206077f2c1a4SBart Van Assche 	if (!iu)
206177f2c1a4SBart Van Assche 		goto err;
206277f2c1a4SBart Van Assche 
206377f2c1a4SBart Van Assche 	req = &ch->req_ring[idx];
206405321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev->dev;
206549248644SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
206685507bccSRalph Campbell 				   DMA_TO_DEVICE);
2067aef9ec39SRoland Dreier 
2068f8b6e31eSDavid Dillow 	scmnd->host_scribble = (void *) req;
2069aef9ec39SRoland Dreier 
2070aef9ec39SRoland Dreier 	cmd = iu->buf;
2071aef9ec39SRoland Dreier 	memset(cmd, 0, sizeof *cmd);
2072aef9ec39SRoland Dreier 
2073aef9ec39SRoland Dreier 	cmd->opcode = SRP_CMD;
2074985aa495SBart Van Assche 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
207577f2c1a4SBart Van Assche 	cmd->tag    = tag;
2076aef9ec39SRoland Dreier 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2077aef9ec39SRoland Dreier 
2078aef9ec39SRoland Dreier 	req->scmnd    = scmnd;
2079aef9ec39SRoland Dreier 	req->cmd      = iu;
2080aef9ec39SRoland Dreier 
2081509c07bcSBart Van Assche 	len = srp_map_data(scmnd, ch, req);
2082aef9ec39SRoland Dreier 	if (len < 0) {
20837aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
2084d1b4289eSBart Van Assche 			     PFX "Failed to map data (%d)\n", len);
2085d1b4289eSBart Van Assche 		/*
2086d1b4289eSBart Van Assche 		 * If we ran out of memory descriptors (-ENOMEM) because an
2087d1b4289eSBart Van Assche 		 * application is queuing many requests with more than
208852ede08fSBart Van Assche 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2089d1b4289eSBart Van Assche 		 * to reduce queue depth temporarily.
2090d1b4289eSBart Van Assche 		 */
2091d1b4289eSBart Van Assche 		scmnd->result = len == -ENOMEM ?
2092d1b4289eSBart Van Assche 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
209376c75b25SBart Van Assche 		goto err_iu;
2094aef9ec39SRoland Dreier 	}
2095aef9ec39SRoland Dreier 
209649248644SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
209785507bccSRalph Campbell 				      DMA_TO_DEVICE);
2098aef9ec39SRoland Dreier 
2099509c07bcSBart Van Assche 	if (srp_post_send(ch, iu, len)) {
21007aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2101aef9ec39SRoland Dreier 		goto err_unmap;
2102aef9ec39SRoland Dreier 	}
2103aef9ec39SRoland Dreier 
2104d1b4289eSBart Van Assche 	ret = 0;
2105d1b4289eSBart Van Assche 
2106a95cadb9SBart Van Assche unlock_rport:
2107a95cadb9SBart Van Assche 	if (in_scsi_eh)
2108a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2109a95cadb9SBart Van Assche 
2110d1b4289eSBart Van Assche 	return ret;
2111aef9ec39SRoland Dreier 
2112aef9ec39SRoland Dreier err_unmap:
2113509c07bcSBart Van Assche 	srp_unmap_data(scmnd, ch, req);
2114aef9ec39SRoland Dreier 
211576c75b25SBart Van Assche err_iu:
2116509c07bcSBart Van Assche 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
211776c75b25SBart Van Assche 
2118024ca901SBart Van Assche 	/*
2119024ca901SBart Van Assche 	 * Avoid that the loops that iterate over the request ring can
2120024ca901SBart Van Assche 	 * encounter a dangling SCSI command pointer.
2121024ca901SBart Van Assche 	 */
2122024ca901SBart Van Assche 	req->scmnd = NULL;
2123024ca901SBart Van Assche 
2124d1b4289eSBart Van Assche err:
2125d1b4289eSBart Van Assche 	if (scmnd->result) {
2126d1b4289eSBart Van Assche 		scmnd->scsi_done(scmnd);
2127d1b4289eSBart Van Assche 		ret = 0;
2128d1b4289eSBart Van Assche 	} else {
2129d1b4289eSBart Van Assche 		ret = SCSI_MLQUEUE_HOST_BUSY;
2130d1b4289eSBart Van Assche 	}
2131a95cadb9SBart Van Assche 
2132d1b4289eSBart Van Assche 	goto unlock_rport;
2133aef9ec39SRoland Dreier }
2134aef9ec39SRoland Dreier 
21354d73f95fSBart Van Assche /*
21364d73f95fSBart Van Assche  * Note: the resources allocated in this function are freed in
2137509c07bcSBart Van Assche  * srp_free_ch_ib().
21384d73f95fSBart Van Assche  */
2139509c07bcSBart Van Assche static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2140aef9ec39SRoland Dreier {
2141509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2142aef9ec39SRoland Dreier 	int i;
2143aef9ec39SRoland Dreier 
2144509c07bcSBart Van Assche 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
21454d73f95fSBart Van Assche 			      GFP_KERNEL);
2146509c07bcSBart Van Assche 	if (!ch->rx_ring)
21474d73f95fSBart Van Assche 		goto err_no_ring;
2148509c07bcSBart Van Assche 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
21494d73f95fSBart Van Assche 			      GFP_KERNEL);
2150509c07bcSBart Van Assche 	if (!ch->tx_ring)
21514d73f95fSBart Van Assche 		goto err_no_ring;
21524d73f95fSBart Van Assche 
21534d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2154509c07bcSBart Van Assche 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2155509c07bcSBart Van Assche 					      ch->max_ti_iu_len,
2156aef9ec39SRoland Dreier 					      GFP_KERNEL, DMA_FROM_DEVICE);
2157509c07bcSBart Van Assche 		if (!ch->rx_ring[i])
2158aef9ec39SRoland Dreier 			goto err;
2159aef9ec39SRoland Dreier 	}
2160aef9ec39SRoland Dreier 
21614d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2162509c07bcSBart Van Assche 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
216349248644SDavid Dillow 					      target->max_iu_len,
2164aef9ec39SRoland Dreier 					      GFP_KERNEL, DMA_TO_DEVICE);
2165509c07bcSBart Van Assche 		if (!ch->tx_ring[i])
2166aef9ec39SRoland Dreier 			goto err;
2167dcb4cb85SBart Van Assche 
2168509c07bcSBart Van Assche 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2169aef9ec39SRoland Dreier 	}
2170aef9ec39SRoland Dreier 
2171aef9ec39SRoland Dreier 	return 0;
2172aef9ec39SRoland Dreier 
2173aef9ec39SRoland Dreier err:
21744d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2175509c07bcSBart Van Assche 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2176509c07bcSBart Van Assche 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2177aef9ec39SRoland Dreier 	}
2178aef9ec39SRoland Dreier 
21794d73f95fSBart Van Assche 
21804d73f95fSBart Van Assche err_no_ring:
2181509c07bcSBart Van Assche 	kfree(ch->tx_ring);
2182509c07bcSBart Van Assche 	ch->tx_ring = NULL;
2183509c07bcSBart Van Assche 	kfree(ch->rx_ring);
2184509c07bcSBart Van Assche 	ch->rx_ring = NULL;
2185aef9ec39SRoland Dreier 
2186aef9ec39SRoland Dreier 	return -ENOMEM;
2187aef9ec39SRoland Dreier }
2188aef9ec39SRoland Dreier 
2189c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2190c9b03c1aSBart Van Assche {
2191c9b03c1aSBart Van Assche 	uint64_t T_tr_ns, max_compl_time_ms;
2192c9b03c1aSBart Van Assche 	uint32_t rq_tmo_jiffies;
2193c9b03c1aSBart Van Assche 
2194c9b03c1aSBart Van Assche 	/*
2195c9b03c1aSBart Van Assche 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2196c9b03c1aSBart Van Assche 	 * table 91), both the QP timeout and the retry count have to be set
2197c9b03c1aSBart Van Assche 	 * for RC QP's during the RTR to RTS transition.
2198c9b03c1aSBart Van Assche 	 */
2199c9b03c1aSBart Van Assche 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2200c9b03c1aSBart Van Assche 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2201c9b03c1aSBart Van Assche 
2202c9b03c1aSBart Van Assche 	/*
2203c9b03c1aSBart Van Assche 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2204c9b03c1aSBart Van Assche 	 * it can take before an error completion is generated. See also
2205c9b03c1aSBart Van Assche 	 * C9-140..142 in the IBTA spec for more information about how to
2206c9b03c1aSBart Van Assche 	 * convert the QP Local ACK Timeout value to nanoseconds.
2207c9b03c1aSBart Van Assche 	 */
2208c9b03c1aSBart Van Assche 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2209c9b03c1aSBart Van Assche 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2210c9b03c1aSBart Van Assche 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2211c9b03c1aSBart Van Assche 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2212c9b03c1aSBart Van Assche 
2213c9b03c1aSBart Van Assche 	return rq_tmo_jiffies;
2214c9b03c1aSBart Van Assche }
2215c9b03c1aSBart Van Assche 
2216961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2217e6300cbdSBart Van Assche 			       const struct srp_login_rsp *lrsp,
2218509c07bcSBart Van Assche 			       struct srp_rdma_ch *ch)
2219961e0be8SDavid Dillow {
2220509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2221961e0be8SDavid Dillow 	struct ib_qp_attr *qp_attr = NULL;
2222961e0be8SDavid Dillow 	int attr_mask = 0;
2223961e0be8SDavid Dillow 	int ret;
2224961e0be8SDavid Dillow 	int i;
2225961e0be8SDavid Dillow 
2226961e0be8SDavid Dillow 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2227509c07bcSBart Van Assche 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2228509c07bcSBart Van Assche 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2229961e0be8SDavid Dillow 
2230961e0be8SDavid Dillow 		/*
2231961e0be8SDavid Dillow 		 * Reserve credits for task management so we don't
2232961e0be8SDavid Dillow 		 * bounce requests back to the SCSI mid-layer.
2233961e0be8SDavid Dillow 		 */
2234961e0be8SDavid Dillow 		target->scsi_host->can_queue
2235509c07bcSBart Van Assche 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2236961e0be8SDavid Dillow 			      target->scsi_host->can_queue);
22374d73f95fSBart Van Assche 		target->scsi_host->cmd_per_lun
22384d73f95fSBart Van Assche 			= min_t(int, target->scsi_host->can_queue,
22394d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun);
2240961e0be8SDavid Dillow 	} else {
2241961e0be8SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
2242961e0be8SDavid Dillow 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2243961e0be8SDavid Dillow 		ret = -ECONNRESET;
2244961e0be8SDavid Dillow 		goto error;
2245961e0be8SDavid Dillow 	}
2246961e0be8SDavid Dillow 
2247509c07bcSBart Van Assche 	if (!ch->rx_ring) {
2248509c07bcSBart Van Assche 		ret = srp_alloc_iu_bufs(ch);
2249961e0be8SDavid Dillow 		if (ret)
2250961e0be8SDavid Dillow 			goto error;
2251961e0be8SDavid Dillow 	}
2252961e0be8SDavid Dillow 
2253961e0be8SDavid Dillow 	ret = -ENOMEM;
2254961e0be8SDavid Dillow 	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2255961e0be8SDavid Dillow 	if (!qp_attr)
2256961e0be8SDavid Dillow 		goto error;
2257961e0be8SDavid Dillow 
2258961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTR;
2259961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2260961e0be8SDavid Dillow 	if (ret)
2261961e0be8SDavid Dillow 		goto error_free;
2262961e0be8SDavid Dillow 
2263509c07bcSBart Van Assche 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2264961e0be8SDavid Dillow 	if (ret)
2265961e0be8SDavid Dillow 		goto error_free;
2266961e0be8SDavid Dillow 
22674d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; i++) {
2268509c07bcSBart Van Assche 		struct srp_iu *iu = ch->rx_ring[i];
2269509c07bcSBart Van Assche 
2270509c07bcSBart Van Assche 		ret = srp_post_recv(ch, iu);
2271961e0be8SDavid Dillow 		if (ret)
2272961e0be8SDavid Dillow 			goto error_free;
2273961e0be8SDavid Dillow 	}
2274961e0be8SDavid Dillow 
2275961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTS;
2276961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2277961e0be8SDavid Dillow 	if (ret)
2278961e0be8SDavid Dillow 		goto error_free;
2279961e0be8SDavid Dillow 
2280c9b03c1aSBart Van Assche 	target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2281c9b03c1aSBart Van Assche 
2282509c07bcSBart Van Assche 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2283961e0be8SDavid Dillow 	if (ret)
2284961e0be8SDavid Dillow 		goto error_free;
2285961e0be8SDavid Dillow 
2286961e0be8SDavid Dillow 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
2287961e0be8SDavid Dillow 
2288961e0be8SDavid Dillow error_free:
2289961e0be8SDavid Dillow 	kfree(qp_attr);
2290961e0be8SDavid Dillow 
2291961e0be8SDavid Dillow error:
2292509c07bcSBart Van Assche 	ch->status = ret;
2293961e0be8SDavid Dillow }
2294961e0be8SDavid Dillow 
2295aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2296aef9ec39SRoland Dreier 			       struct ib_cm_event *event,
2297509c07bcSBart Van Assche 			       struct srp_rdma_ch *ch)
2298aef9ec39SRoland Dreier {
2299509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
23007aa54bd7SDavid Dillow 	struct Scsi_Host *shost = target->scsi_host;
2301aef9ec39SRoland Dreier 	struct ib_class_port_info *cpi;
2302aef9ec39SRoland Dreier 	int opcode;
2303aef9ec39SRoland Dreier 
2304aef9ec39SRoland Dreier 	switch (event->param.rej_rcvd.reason) {
2305aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_CM_REDIRECT:
2306aef9ec39SRoland Dreier 		cpi = event->param.rej_rcvd.ari;
2307509c07bcSBart Van Assche 		ch->path.dlid = cpi->redirect_lid;
2308509c07bcSBart Van Assche 		ch->path.pkey = cpi->redirect_pkey;
2309aef9ec39SRoland Dreier 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2310509c07bcSBart Van Assche 		memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2311aef9ec39SRoland Dreier 
2312509c07bcSBart Van Assche 		ch->status = ch->path.dlid ?
2313aef9ec39SRoland Dreier 			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2314aef9ec39SRoland Dreier 		break;
2315aef9ec39SRoland Dreier 
2316aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_REDIRECT:
23175d7cbfd6SRoland Dreier 		if (srp_target_is_topspin(target)) {
2318aef9ec39SRoland Dreier 			/*
2319aef9ec39SRoland Dreier 			 * Topspin/Cisco SRP gateways incorrectly send
2320aef9ec39SRoland Dreier 			 * reject reason code 25 when they mean 24
2321aef9ec39SRoland Dreier 			 * (port redirect).
2322aef9ec39SRoland Dreier 			 */
2323509c07bcSBart Van Assche 			memcpy(ch->path.dgid.raw,
2324aef9ec39SRoland Dreier 			       event->param.rej_rcvd.ari, 16);
2325aef9ec39SRoland Dreier 
23267aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, shost,
23277aa54bd7SDavid Dillow 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2328509c07bcSBart Van Assche 				     be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2329509c07bcSBart Van Assche 				     be64_to_cpu(ch->path.dgid.global.interface_id));
2330aef9ec39SRoland Dreier 
2331509c07bcSBart Van Assche 			ch->status = SRP_PORT_REDIRECT;
2332aef9ec39SRoland Dreier 		} else {
23337aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
23347aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2335509c07bcSBart Van Assche 			ch->status = -ECONNRESET;
2336aef9ec39SRoland Dreier 		}
2337aef9ec39SRoland Dreier 		break;
2338aef9ec39SRoland Dreier 
2339aef9ec39SRoland Dreier 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
23407aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost,
23417aa54bd7SDavid Dillow 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2342509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2343aef9ec39SRoland Dreier 		break;
2344aef9ec39SRoland Dreier 
2345aef9ec39SRoland Dreier 	case IB_CM_REJ_CONSUMER_DEFINED:
2346aef9ec39SRoland Dreier 		opcode = *(u8 *) event->private_data;
2347aef9ec39SRoland Dreier 		if (opcode == SRP_LOGIN_REJ) {
2348aef9ec39SRoland Dreier 			struct srp_login_rej *rej = event->private_data;
2349aef9ec39SRoland Dreier 			u32 reason = be32_to_cpu(rej->reason);
2350aef9ec39SRoland Dreier 
2351aef9ec39SRoland Dreier 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
23527aa54bd7SDavid Dillow 				shost_printk(KERN_WARNING, shost,
23537aa54bd7SDavid Dillow 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2354aef9ec39SRoland Dreier 			else
2355e7ffde01SBart Van Assche 				shost_printk(KERN_WARNING, shost, PFX
2356e7ffde01SBart Van Assche 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2357747fe000SBart Van Assche 					     target->sgid.raw,
2358747fe000SBart Van Assche 					     target->orig_dgid.raw, reason);
2359aef9ec39SRoland Dreier 		} else
23607aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
23617aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2362aef9ec39SRoland Dreier 				     " opcode 0x%02x\n", opcode);
2363509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2364aef9ec39SRoland Dreier 		break;
2365aef9ec39SRoland Dreier 
23669fe4bcf4SDavid Dillow 	case IB_CM_REJ_STALE_CONN:
23679fe4bcf4SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2368509c07bcSBart Van Assche 		ch->status = SRP_STALE_CONN;
23699fe4bcf4SDavid Dillow 		break;
23709fe4bcf4SDavid Dillow 
2371aef9ec39SRoland Dreier 	default:
23727aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2373aef9ec39SRoland Dreier 			     event->param.rej_rcvd.reason);
2374509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2375aef9ec39SRoland Dreier 	}
2376aef9ec39SRoland Dreier }
2377aef9ec39SRoland Dreier 
2378aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2379aef9ec39SRoland Dreier {
2380509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = cm_id->context;
2381509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2382aef9ec39SRoland Dreier 	int comp = 0;
2383aef9ec39SRoland Dreier 
2384aef9ec39SRoland Dreier 	switch (event->event) {
2385aef9ec39SRoland Dreier 	case IB_CM_REQ_ERROR:
23867aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
23877aa54bd7SDavid Dillow 			     PFX "Sending CM REQ failed\n");
2388aef9ec39SRoland Dreier 		comp = 1;
2389509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2390aef9ec39SRoland Dreier 		break;
2391aef9ec39SRoland Dreier 
2392aef9ec39SRoland Dreier 	case IB_CM_REP_RECEIVED:
2393aef9ec39SRoland Dreier 		comp = 1;
2394509c07bcSBart Van Assche 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2395aef9ec39SRoland Dreier 		break;
2396aef9ec39SRoland Dreier 
2397aef9ec39SRoland Dreier 	case IB_CM_REJ_RECEIVED:
23987aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2399aef9ec39SRoland Dreier 		comp = 1;
2400aef9ec39SRoland Dreier 
2401509c07bcSBart Van Assche 		srp_cm_rej_handler(cm_id, event, ch);
2402aef9ec39SRoland Dreier 		break;
2403aef9ec39SRoland Dreier 
2404b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_RECEIVED:
24057aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
24067aa54bd7SDavid Dillow 			     PFX "DREQ received - connection closed\n");
2407c014c8cdSBart Van Assche 		ch->connected = false;
2408b7ac4ab4SIshai Rabinovitz 		if (ib_send_cm_drep(cm_id, NULL, 0))
24097aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
24107aa54bd7SDavid Dillow 				     PFX "Sending CM DREP failed\n");
2411c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
2412aef9ec39SRoland Dreier 		break;
2413aef9ec39SRoland Dreier 
2414aef9ec39SRoland Dreier 	case IB_CM_TIMEWAIT_EXIT:
24157aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
24167aa54bd7SDavid Dillow 			     PFX "connection closed\n");
2417ac72d766SBart Van Assche 		comp = 1;
2418aef9ec39SRoland Dreier 
2419509c07bcSBart Van Assche 		ch->status = 0;
2420aef9ec39SRoland Dreier 		break;
2421aef9ec39SRoland Dreier 
2422b7ac4ab4SIshai Rabinovitz 	case IB_CM_MRA_RECEIVED:
2423b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_ERROR:
2424b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREP_RECEIVED:
2425b7ac4ab4SIshai Rabinovitz 		break;
2426b7ac4ab4SIshai Rabinovitz 
2427aef9ec39SRoland Dreier 	default:
24287aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
24297aa54bd7SDavid Dillow 			     PFX "Unhandled CM event %d\n", event->event);
2430aef9ec39SRoland Dreier 		break;
2431aef9ec39SRoland Dreier 	}
2432aef9ec39SRoland Dreier 
2433aef9ec39SRoland Dreier 	if (comp)
2434509c07bcSBart Van Assche 		complete(&ch->done);
2435aef9ec39SRoland Dreier 
2436aef9ec39SRoland Dreier 	return 0;
2437aef9ec39SRoland Dreier }
2438aef9ec39SRoland Dreier 
243971444b97SJack Wang /**
244071444b97SJack Wang  * srp_change_queue_depth - setting device queue depth
244171444b97SJack Wang  * @sdev: scsi device struct
244271444b97SJack Wang  * @qdepth: requested queue depth
244371444b97SJack Wang  *
244471444b97SJack Wang  * Returns queue depth.
244571444b97SJack Wang  */
244671444b97SJack Wang static int
2447db5ed4dfSChristoph Hellwig srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
244871444b97SJack Wang {
244971444b97SJack Wang 	if (!sdev->tagged_supported)
24501e6f2416SChristoph Hellwig 		qdepth = 1;
2451db5ed4dfSChristoph Hellwig 	return scsi_change_queue_depth(sdev, qdepth);
245271444b97SJack Wang }
245371444b97SJack Wang 
2454985aa495SBart Van Assche static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2455985aa495SBart Van Assche 			     u8 func)
2456aef9ec39SRoland Dreier {
2457509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2458a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
245919081f31SDavid Dillow 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2460aef9ec39SRoland Dreier 	struct srp_iu *iu;
2461aef9ec39SRoland Dreier 	struct srp_tsk_mgmt *tsk_mgmt;
2462aef9ec39SRoland Dreier 
2463c014c8cdSBart Van Assche 	if (!ch->connected || target->qp_in_error)
24643780d1f0SBart Van Assche 		return -1;
24653780d1f0SBart Van Assche 
2466509c07bcSBart Van Assche 	init_completion(&ch->tsk_mgmt_done);
2467aef9ec39SRoland Dreier 
2468a95cadb9SBart Van Assche 	/*
2469509c07bcSBart Van Assche 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2470a95cadb9SBart Van Assche 	 * invoked while a task management function is being sent.
2471a95cadb9SBart Van Assche 	 */
2472a95cadb9SBart Van Assche 	mutex_lock(&rport->mutex);
2473509c07bcSBart Van Assche 	spin_lock_irq(&ch->lock);
2474509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2475509c07bcSBart Van Assche 	spin_unlock_irq(&ch->lock);
247676c75b25SBart Van Assche 
2477a95cadb9SBart Van Assche 	if (!iu) {
2478a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2479a95cadb9SBart Van Assche 
248076c75b25SBart Van Assche 		return -1;
2481a95cadb9SBart Van Assche 	}
2482aef9ec39SRoland Dreier 
248319081f31SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
248419081f31SDavid Dillow 				   DMA_TO_DEVICE);
2485aef9ec39SRoland Dreier 	tsk_mgmt = iu->buf;
2486aef9ec39SRoland Dreier 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2487aef9ec39SRoland Dreier 
2488aef9ec39SRoland Dreier 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2489985aa495SBart Van Assche 	int_to_scsilun(lun, &tsk_mgmt->lun);
2490f8b6e31eSDavid Dillow 	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
2491aef9ec39SRoland Dreier 	tsk_mgmt->tsk_mgmt_func = func;
2492f8b6e31eSDavid Dillow 	tsk_mgmt->task_tag	= req_tag;
2493aef9ec39SRoland Dreier 
249419081f31SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
249519081f31SDavid Dillow 				      DMA_TO_DEVICE);
2496509c07bcSBart Van Assche 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2497509c07bcSBart Van Assche 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2498a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2499a95cadb9SBart Van Assche 
250076c75b25SBart Van Assche 		return -1;
250176c75b25SBart Van Assche 	}
2502a95cadb9SBart Van Assche 	mutex_unlock(&rport->mutex);
2503d945e1dfSRoland Dreier 
2504509c07bcSBart Van Assche 	if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2505aef9ec39SRoland Dreier 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2506d945e1dfSRoland Dreier 		return -1;
2507aef9ec39SRoland Dreier 
2508d945e1dfSRoland Dreier 	return 0;
2509d945e1dfSRoland Dreier }
2510d945e1dfSRoland Dreier 
2511aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd)
2512aef9ec39SRoland Dreier {
2513d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2514f8b6e31eSDavid Dillow 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
251577f2c1a4SBart Van Assche 	u32 tag;
2516d92c0da7SBart Van Assche 	u16 ch_idx;
2517509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
2518086f44f5SBart Van Assche 	int ret;
2519d945e1dfSRoland Dreier 
25207aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2521aef9ec39SRoland Dreier 
2522d92c0da7SBart Van Assche 	if (!req)
252399b6697aSBart Van Assche 		return SUCCESS;
252477f2c1a4SBart Van Assche 	tag = blk_mq_unique_tag(scmnd->request);
2525d92c0da7SBart Van Assche 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2526d92c0da7SBart Van Assche 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2527d92c0da7SBart Van Assche 		return SUCCESS;
2528d92c0da7SBart Van Assche 	ch = &target->ch[ch_idx];
2529d92c0da7SBart Van Assche 	if (!srp_claim_req(ch, req, NULL, scmnd))
2530d92c0da7SBart Van Assche 		return SUCCESS;
2531d92c0da7SBart Van Assche 	shost_printk(KERN_ERR, target->scsi_host,
2532d92c0da7SBart Van Assche 		     "Sending SRP abort for tag %#x\n", tag);
253377f2c1a4SBart Van Assche 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
253480d5e8a2SBart Van Assche 			      SRP_TSK_ABORT_TASK) == 0)
2535086f44f5SBart Van Assche 		ret = SUCCESS;
2536ed9b2264SBart Van Assche 	else if (target->rport->state == SRP_RPORT_LOST)
253799e1c139SBart Van Assche 		ret = FAST_IO_FAIL;
2538086f44f5SBart Van Assche 	else
2539086f44f5SBart Van Assche 		ret = FAILED;
2540509c07bcSBart Van Assche 	srp_free_req(ch, req, scmnd, 0);
2541d945e1dfSRoland Dreier 	scmnd->result = DID_ABORT << 16;
2542d8536670SBart Van Assche 	scmnd->scsi_done(scmnd);
2543d945e1dfSRoland Dreier 
2544086f44f5SBart Van Assche 	return ret;
2545aef9ec39SRoland Dreier }
2546aef9ec39SRoland Dreier 
2547aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd)
2548aef9ec39SRoland Dreier {
2549d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2550d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
2551536ae14eSBart Van Assche 	int i;
2552d945e1dfSRoland Dreier 
25537aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2554aef9ec39SRoland Dreier 
2555d92c0da7SBart Van Assche 	ch = &target->ch[0];
2556509c07bcSBart Van Assche 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2557f8b6e31eSDavid Dillow 			      SRP_TSK_LUN_RESET))
2558d945e1dfSRoland Dreier 		return FAILED;
2559509c07bcSBart Van Assche 	if (ch->tsk_mgmt_status)
2560d945e1dfSRoland Dreier 		return FAILED;
2561d945e1dfSRoland Dreier 
2562d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
2563d92c0da7SBart Van Assche 		ch = &target->ch[i];
25644d73f95fSBart Van Assche 		for (i = 0; i < target->req_ring_size; ++i) {
2565509c07bcSBart Van Assche 			struct srp_request *req = &ch->req_ring[i];
2566509c07bcSBart Van Assche 
2567509c07bcSBart Van Assche 			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2568536ae14eSBart Van Assche 		}
2569d92c0da7SBart Van Assche 	}
2570d945e1dfSRoland Dreier 
2571d945e1dfSRoland Dreier 	return SUCCESS;
2572aef9ec39SRoland Dreier }
2573aef9ec39SRoland Dreier 
2574aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd)
2575aef9ec39SRoland Dreier {
2576aef9ec39SRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2577aef9ec39SRoland Dreier 
25787aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2579aef9ec39SRoland Dreier 
2580ed9b2264SBart Van Assche 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2581aef9ec39SRoland Dreier }
2582aef9ec39SRoland Dreier 
2583c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev)
2584c9b03c1aSBart Van Assche {
2585c9b03c1aSBart Van Assche 	struct Scsi_Host *shost = sdev->host;
2586c9b03c1aSBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2587c9b03c1aSBart Van Assche 	struct request_queue *q = sdev->request_queue;
2588c9b03c1aSBart Van Assche 	unsigned long timeout;
2589c9b03c1aSBart Van Assche 
2590c9b03c1aSBart Van Assche 	if (sdev->type == TYPE_DISK) {
2591c9b03c1aSBart Van Assche 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2592c9b03c1aSBart Van Assche 		blk_queue_rq_timeout(q, timeout);
2593c9b03c1aSBart Van Assche 	}
2594c9b03c1aSBart Van Assche 
2595c9b03c1aSBart Van Assche 	return 0;
2596c9b03c1aSBart Van Assche }
2597c9b03c1aSBart Van Assche 
2598ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2599ee959b00STony Jones 			   char *buf)
26006ecb0c84SRoland Dreier {
2601ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26026ecb0c84SRoland Dreier 
260345c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
26046ecb0c84SRoland Dreier }
26056ecb0c84SRoland Dreier 
2606ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2607ee959b00STony Jones 			     char *buf)
26086ecb0c84SRoland Dreier {
2609ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26106ecb0c84SRoland Dreier 
261145c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
26126ecb0c84SRoland Dreier }
26136ecb0c84SRoland Dreier 
2614ee959b00STony Jones static ssize_t show_service_id(struct device *dev,
2615ee959b00STony Jones 			       struct device_attribute *attr, char *buf)
26166ecb0c84SRoland Dreier {
2617ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26186ecb0c84SRoland Dreier 
261945c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
26206ecb0c84SRoland Dreier }
26216ecb0c84SRoland Dreier 
2622ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2623ee959b00STony Jones 			 char *buf)
26246ecb0c84SRoland Dreier {
2625ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26266ecb0c84SRoland Dreier 
2627747fe000SBart Van Assche 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
26286ecb0c84SRoland Dreier }
26296ecb0c84SRoland Dreier 
2630848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2631848b3082SBart Van Assche 			 char *buf)
2632848b3082SBart Van Assche {
2633848b3082SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2634848b3082SBart Van Assche 
2635747fe000SBart Van Assche 	return sprintf(buf, "%pI6\n", target->sgid.raw);
2636848b3082SBart Van Assche }
2637848b3082SBart Van Assche 
2638ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2639ee959b00STony Jones 			 char *buf)
26406ecb0c84SRoland Dreier {
2641ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2642d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch = &target->ch[0];
26436ecb0c84SRoland Dreier 
2644509c07bcSBart Van Assche 	return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
26456ecb0c84SRoland Dreier }
26466ecb0c84SRoland Dreier 
2647ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev,
2648ee959b00STony Jones 			      struct device_attribute *attr, char *buf)
26493633b3d0SIshai Rabinovitz {
2650ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26513633b3d0SIshai Rabinovitz 
2652747fe000SBart Van Assche 	return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
26533633b3d0SIshai Rabinovitz }
26543633b3d0SIshai Rabinovitz 
265589de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev,
265689de7486SBart Van Assche 			    struct device_attribute *attr, char *buf)
265789de7486SBart Van Assche {
265889de7486SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2659d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
2660d92c0da7SBart Van Assche 	int i, req_lim = INT_MAX;
266189de7486SBart Van Assche 
2662d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
2663d92c0da7SBart Van Assche 		ch = &target->ch[i];
2664d92c0da7SBart Van Assche 		req_lim = min(req_lim, ch->req_lim);
2665d92c0da7SBart Van Assche 	}
2666d92c0da7SBart Van Assche 	return sprintf(buf, "%d\n", req_lim);
266789de7486SBart Van Assche }
266889de7486SBart Van Assche 
2669ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev,
2670ee959b00STony Jones 				 struct device_attribute *attr, char *buf)
26716bfa24faSRoland Dreier {
2672ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26736bfa24faSRoland Dreier 
26746bfa24faSRoland Dreier 	return sprintf(buf, "%d\n", target->zero_req_lim);
26756bfa24faSRoland Dreier }
26766bfa24faSRoland Dreier 
2677ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev,
2678ee959b00STony Jones 				  struct device_attribute *attr, char *buf)
2679ded7f1a1SIshai Rabinovitz {
2680ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2681ded7f1a1SIshai Rabinovitz 
2682ded7f1a1SIshai Rabinovitz 	return sprintf(buf, "%d\n", target->srp_host->port);
2683ded7f1a1SIshai Rabinovitz }
2684ded7f1a1SIshai Rabinovitz 
2685ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev,
2686ee959b00STony Jones 				    struct device_attribute *attr, char *buf)
2687ded7f1a1SIshai Rabinovitz {
2688ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2689ded7f1a1SIshai Rabinovitz 
269005321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2691ded7f1a1SIshai Rabinovitz }
2692ded7f1a1SIshai Rabinovitz 
2693d92c0da7SBart Van Assche static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2694d92c0da7SBart Van Assche 			     char *buf)
2695d92c0da7SBart Van Assche {
2696d92c0da7SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2697d92c0da7SBart Van Assche 
2698d92c0da7SBart Van Assche 	return sprintf(buf, "%d\n", target->ch_count);
2699d92c0da7SBart Van Assche }
2700d92c0da7SBart Van Assche 
27014b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev,
27024b5e5f41SBart Van Assche 				struct device_attribute *attr, char *buf)
27034b5e5f41SBart Van Assche {
27044b5e5f41SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27054b5e5f41SBart Van Assche 
27064b5e5f41SBart Van Assche 	return sprintf(buf, "%d\n", target->comp_vector);
27074b5e5f41SBart Van Assche }
27084b5e5f41SBart Van Assche 
27097bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev,
27107bb312e4SVu Pham 				   struct device_attribute *attr, char *buf)
27117bb312e4SVu Pham {
27127bb312e4SVu Pham 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27137bb312e4SVu Pham 
27147bb312e4SVu Pham 	return sprintf(buf, "%d\n", target->tl_retry_count);
27157bb312e4SVu Pham }
27167bb312e4SVu Pham 
271749248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev,
271849248644SDavid Dillow 				   struct device_attribute *attr, char *buf)
271949248644SDavid Dillow {
272049248644SDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
272149248644SDavid Dillow 
272249248644SDavid Dillow 	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
272349248644SDavid Dillow }
272449248644SDavid Dillow 
2725c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev,
2726c07d424dSDavid Dillow 				 struct device_attribute *attr, char *buf)
2727c07d424dSDavid Dillow {
2728c07d424dSDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2729c07d424dSDavid Dillow 
2730c07d424dSDavid Dillow 	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2731c07d424dSDavid Dillow }
2732c07d424dSDavid Dillow 
2733ee959b00STony Jones static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
2734ee959b00STony Jones static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
2735ee959b00STony Jones static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
2736ee959b00STony Jones static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
2737848b3082SBart Van Assche static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
2738ee959b00STony Jones static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
2739ee959b00STony Jones static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
274089de7486SBart Van Assche static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2741ee959b00STony Jones static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
2742ee959b00STony Jones static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2743ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2744d92c0da7SBart Van Assche static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
27454b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
27467bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
274749248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2748c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
27496ecb0c84SRoland Dreier 
2750ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = {
2751ee959b00STony Jones 	&dev_attr_id_ext,
2752ee959b00STony Jones 	&dev_attr_ioc_guid,
2753ee959b00STony Jones 	&dev_attr_service_id,
2754ee959b00STony Jones 	&dev_attr_pkey,
2755848b3082SBart Van Assche 	&dev_attr_sgid,
2756ee959b00STony Jones 	&dev_attr_dgid,
2757ee959b00STony Jones 	&dev_attr_orig_dgid,
275889de7486SBart Van Assche 	&dev_attr_req_lim,
2759ee959b00STony Jones 	&dev_attr_zero_req_lim,
2760ee959b00STony Jones 	&dev_attr_local_ib_port,
2761ee959b00STony Jones 	&dev_attr_local_ib_device,
2762d92c0da7SBart Van Assche 	&dev_attr_ch_count,
27634b5e5f41SBart Van Assche 	&dev_attr_comp_vector,
27647bb312e4SVu Pham 	&dev_attr_tl_retry_count,
276549248644SDavid Dillow 	&dev_attr_cmd_sg_entries,
2766c07d424dSDavid Dillow 	&dev_attr_allow_ext_sg,
27676ecb0c84SRoland Dreier 	NULL
27686ecb0c84SRoland Dreier };
27696ecb0c84SRoland Dreier 
2770aef9ec39SRoland Dreier static struct scsi_host_template srp_template = {
2771aef9ec39SRoland Dreier 	.module				= THIS_MODULE,
2772b7f008fdSRoland Dreier 	.name				= "InfiniBand SRP initiator",
2773b7f008fdSRoland Dreier 	.proc_name			= DRV_NAME,
2774c9b03c1aSBart Van Assche 	.slave_configure		= srp_slave_configure,
2775aef9ec39SRoland Dreier 	.info				= srp_target_info,
2776aef9ec39SRoland Dreier 	.queuecommand			= srp_queuecommand,
277771444b97SJack Wang 	.change_queue_depth             = srp_change_queue_depth,
2778aef9ec39SRoland Dreier 	.eh_abort_handler		= srp_abort,
2779aef9ec39SRoland Dreier 	.eh_device_reset_handler	= srp_reset_device,
2780aef9ec39SRoland Dreier 	.eh_host_reset_handler		= srp_reset_host,
27812742c1daSBart Van Assche 	.skip_settle_delay		= true,
278249248644SDavid Dillow 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
27834d73f95fSBart Van Assche 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
2784aef9ec39SRoland Dreier 	.this_id			= -1,
27854d73f95fSBart Van Assche 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
27866ecb0c84SRoland Dreier 	.use_clustering			= ENABLE_CLUSTERING,
278777f2c1a4SBart Van Assche 	.shost_attrs			= srp_host_attrs,
2788c40ecc12SChristoph Hellwig 	.track_queue_depth		= 1,
2789aef9ec39SRoland Dreier };
2790aef9ec39SRoland Dreier 
279134aa654eSBart Van Assche static int srp_sdev_count(struct Scsi_Host *host)
279234aa654eSBart Van Assche {
279334aa654eSBart Van Assche 	struct scsi_device *sdev;
279434aa654eSBart Van Assche 	int c = 0;
279534aa654eSBart Van Assche 
279634aa654eSBart Van Assche 	shost_for_each_device(sdev, host)
279734aa654eSBart Van Assche 		c++;
279834aa654eSBart Van Assche 
279934aa654eSBart Van Assche 	return c;
280034aa654eSBart Van Assche }
280134aa654eSBart Van Assche 
2802bc44bd1dSBart Van Assche /*
2803bc44bd1dSBart Van Assche  * Return values:
2804bc44bd1dSBart Van Assche  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2805bc44bd1dSBart Van Assche  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2806bc44bd1dSBart Van Assche  *    removal has been scheduled.
2807bc44bd1dSBart Van Assche  * 0 and target->state != SRP_TARGET_REMOVED upon success.
2808bc44bd1dSBart Van Assche  */
2809aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2810aef9ec39SRoland Dreier {
28113236822bSFUJITA Tomonori 	struct srp_rport_identifiers ids;
28123236822bSFUJITA Tomonori 	struct srp_rport *rport;
28133236822bSFUJITA Tomonori 
281434aa654eSBart Van Assche 	target->state = SRP_TARGET_SCANNING;
2815aef9ec39SRoland Dreier 	sprintf(target->target_name, "SRP.T10:%016llX",
281645c37cadSBart Van Assche 		be64_to_cpu(target->id_ext));
2817aef9ec39SRoland Dreier 
281805321937SGreg Kroah-Hartman 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2819aef9ec39SRoland Dreier 		return -ENODEV;
2820aef9ec39SRoland Dreier 
28213236822bSFUJITA Tomonori 	memcpy(ids.port_id, &target->id_ext, 8);
28223236822bSFUJITA Tomonori 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2823aebd5e47SFUJITA Tomonori 	ids.roles = SRP_RPORT_ROLE_TARGET;
28243236822bSFUJITA Tomonori 	rport = srp_rport_add(target->scsi_host, &ids);
28253236822bSFUJITA Tomonori 	if (IS_ERR(rport)) {
28263236822bSFUJITA Tomonori 		scsi_remove_host(target->scsi_host);
28273236822bSFUJITA Tomonori 		return PTR_ERR(rport);
28283236822bSFUJITA Tomonori 	}
28293236822bSFUJITA Tomonori 
2830dc1bdbd9SBart Van Assche 	rport->lld_data = target;
28319dd69a60SBart Van Assche 	target->rport = rport;
2832dc1bdbd9SBart Van Assche 
2833b3589fd4SMatthew Wilcox 	spin_lock(&host->target_lock);
2834aef9ec39SRoland Dreier 	list_add_tail(&target->list, &host->target_list);
2835b3589fd4SMatthew Wilcox 	spin_unlock(&host->target_lock);
2836aef9ec39SRoland Dreier 
2837aef9ec39SRoland Dreier 	scsi_scan_target(&target->scsi_host->shost_gendev,
28381962a4a1SMatthew Wilcox 			 0, target->scsi_id, SCAN_WILD_CARD, 0);
2839aef9ec39SRoland Dreier 
2840c014c8cdSBart Van Assche 	if (srp_connected_ch(target) < target->ch_count ||
2841c014c8cdSBart Van Assche 	    target->qp_in_error) {
284234aa654eSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
284334aa654eSBart Van Assche 			     PFX "SCSI scan failed - removing SCSI host\n");
284434aa654eSBart Van Assche 		srp_queue_remove_work(target);
284534aa654eSBart Van Assche 		goto out;
284634aa654eSBart Van Assche 	}
284734aa654eSBart Van Assche 
2848cf1acab7SBart Van Assche 	pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
284934aa654eSBart Van Assche 		 dev_name(&target->scsi_host->shost_gendev),
285034aa654eSBart Van Assche 		 srp_sdev_count(target->scsi_host));
285134aa654eSBart Van Assche 
285234aa654eSBart Van Assche 	spin_lock_irq(&target->lock);
285334aa654eSBart Van Assche 	if (target->state == SRP_TARGET_SCANNING)
285434aa654eSBart Van Assche 		target->state = SRP_TARGET_LIVE;
285534aa654eSBart Van Assche 	spin_unlock_irq(&target->lock);
285634aa654eSBart Van Assche 
285734aa654eSBart Van Assche out:
2858aef9ec39SRoland Dreier 	return 0;
2859aef9ec39SRoland Dreier }
2860aef9ec39SRoland Dreier 
2861ee959b00STony Jones static void srp_release_dev(struct device *dev)
2862aef9ec39SRoland Dreier {
2863aef9ec39SRoland Dreier 	struct srp_host *host =
2864ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
2865aef9ec39SRoland Dreier 
2866aef9ec39SRoland Dreier 	complete(&host->released);
2867aef9ec39SRoland Dreier }
2868aef9ec39SRoland Dreier 
2869aef9ec39SRoland Dreier static struct class srp_class = {
2870aef9ec39SRoland Dreier 	.name    = "infiniband_srp",
2871ee959b00STony Jones 	.dev_release = srp_release_dev
2872aef9ec39SRoland Dreier };
2873aef9ec39SRoland Dreier 
287496fc248aSBart Van Assche /**
287596fc248aSBart Van Assche  * srp_conn_unique() - check whether the connection to a target is unique
2876af24663bSBart Van Assche  * @host:   SRP host.
2877af24663bSBart Van Assche  * @target: SRP target port.
287896fc248aSBart Van Assche  */
287996fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host,
288096fc248aSBart Van Assche 			    struct srp_target_port *target)
288196fc248aSBart Van Assche {
288296fc248aSBart Van Assche 	struct srp_target_port *t;
288396fc248aSBart Van Assche 	bool ret = false;
288496fc248aSBart Van Assche 
288596fc248aSBart Van Assche 	if (target->state == SRP_TARGET_REMOVED)
288696fc248aSBart Van Assche 		goto out;
288796fc248aSBart Van Assche 
288896fc248aSBart Van Assche 	ret = true;
288996fc248aSBart Van Assche 
289096fc248aSBart Van Assche 	spin_lock(&host->target_lock);
289196fc248aSBart Van Assche 	list_for_each_entry(t, &host->target_list, list) {
289296fc248aSBart Van Assche 		if (t != target &&
289396fc248aSBart Van Assche 		    target->id_ext == t->id_ext &&
289496fc248aSBart Van Assche 		    target->ioc_guid == t->ioc_guid &&
289596fc248aSBart Van Assche 		    target->initiator_ext == t->initiator_ext) {
289696fc248aSBart Van Assche 			ret = false;
289796fc248aSBart Van Assche 			break;
289896fc248aSBart Van Assche 		}
289996fc248aSBart Van Assche 	}
290096fc248aSBart Van Assche 	spin_unlock(&host->target_lock);
290196fc248aSBart Van Assche 
290296fc248aSBart Van Assche out:
290396fc248aSBart Van Assche 	return ret;
290496fc248aSBart Van Assche }
290596fc248aSBart Van Assche 
2906aef9ec39SRoland Dreier /*
2907aef9ec39SRoland Dreier  * Target ports are added by writing
2908aef9ec39SRoland Dreier  *
2909aef9ec39SRoland Dreier  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2910aef9ec39SRoland Dreier  *     pkey=<P_Key>,service_id=<service ID>
2911aef9ec39SRoland Dreier  *
2912aef9ec39SRoland Dreier  * to the add_target sysfs attribute.
2913aef9ec39SRoland Dreier  */
2914aef9ec39SRoland Dreier enum {
2915aef9ec39SRoland Dreier 	SRP_OPT_ERR		= 0,
2916aef9ec39SRoland Dreier 	SRP_OPT_ID_EXT		= 1 << 0,
2917aef9ec39SRoland Dreier 	SRP_OPT_IOC_GUID	= 1 << 1,
2918aef9ec39SRoland Dreier 	SRP_OPT_DGID		= 1 << 2,
2919aef9ec39SRoland Dreier 	SRP_OPT_PKEY		= 1 << 3,
2920aef9ec39SRoland Dreier 	SRP_OPT_SERVICE_ID	= 1 << 4,
2921aef9ec39SRoland Dreier 	SRP_OPT_MAX_SECT	= 1 << 5,
292252fb2b50SVu Pham 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
29230c0450dbSRamachandra K 	SRP_OPT_IO_CLASS	= 1 << 7,
292401cb9bcbSIshai Rabinovitz 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
292549248644SDavid Dillow 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
2926c07d424dSDavid Dillow 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
2927c07d424dSDavid Dillow 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
29284b5e5f41SBart Van Assche 	SRP_OPT_COMP_VECTOR	= 1 << 12,
29297bb312e4SVu Pham 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
29304d73f95fSBart Van Assche 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
2931aef9ec39SRoland Dreier 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
2932aef9ec39SRoland Dreier 				   SRP_OPT_IOC_GUID	|
2933aef9ec39SRoland Dreier 				   SRP_OPT_DGID		|
2934aef9ec39SRoland Dreier 				   SRP_OPT_PKEY		|
2935aef9ec39SRoland Dreier 				   SRP_OPT_SERVICE_ID),
2936aef9ec39SRoland Dreier };
2937aef9ec39SRoland Dreier 
2938a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = {
2939aef9ec39SRoland Dreier 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
2940aef9ec39SRoland Dreier 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
2941aef9ec39SRoland Dreier 	{ SRP_OPT_DGID,			"dgid=%s" 		},
2942aef9ec39SRoland Dreier 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
2943aef9ec39SRoland Dreier 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
2944aef9ec39SRoland Dreier 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
294552fb2b50SVu Pham 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
29460c0450dbSRamachandra K 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
294701cb9bcbSIshai Rabinovitz 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
294849248644SDavid Dillow 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
2949c07d424dSDavid Dillow 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
2950c07d424dSDavid Dillow 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
29514b5e5f41SBart Van Assche 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
29527bb312e4SVu Pham 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
29534d73f95fSBart Van Assche 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
2954aef9ec39SRoland Dreier 	{ SRP_OPT_ERR,			NULL 			}
2955aef9ec39SRoland Dreier };
2956aef9ec39SRoland Dreier 
2957aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target)
2958aef9ec39SRoland Dreier {
2959aef9ec39SRoland Dreier 	char *options, *sep_opt;
2960aef9ec39SRoland Dreier 	char *p;
2961aef9ec39SRoland Dreier 	char dgid[3];
2962aef9ec39SRoland Dreier 	substring_t args[MAX_OPT_ARGS];
2963aef9ec39SRoland Dreier 	int opt_mask = 0;
2964aef9ec39SRoland Dreier 	int token;
2965aef9ec39SRoland Dreier 	int ret = -EINVAL;
2966aef9ec39SRoland Dreier 	int i;
2967aef9ec39SRoland Dreier 
2968aef9ec39SRoland Dreier 	options = kstrdup(buf, GFP_KERNEL);
2969aef9ec39SRoland Dreier 	if (!options)
2970aef9ec39SRoland Dreier 		return -ENOMEM;
2971aef9ec39SRoland Dreier 
2972aef9ec39SRoland Dreier 	sep_opt = options;
29737dcf9c19SSagi Grimberg 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2974aef9ec39SRoland Dreier 		if (!*p)
2975aef9ec39SRoland Dreier 			continue;
2976aef9ec39SRoland Dreier 
2977aef9ec39SRoland Dreier 		token = match_token(p, srp_opt_tokens, args);
2978aef9ec39SRoland Dreier 		opt_mask |= token;
2979aef9ec39SRoland Dreier 
2980aef9ec39SRoland Dreier 		switch (token) {
2981aef9ec39SRoland Dreier 		case SRP_OPT_ID_EXT:
2982aef9ec39SRoland Dreier 			p = match_strdup(args);
2983a20f3a6dSIshai Rabinovitz 			if (!p) {
2984a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2985a20f3a6dSIshai Rabinovitz 				goto out;
2986a20f3a6dSIshai Rabinovitz 			}
2987aef9ec39SRoland Dreier 			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2988aef9ec39SRoland Dreier 			kfree(p);
2989aef9ec39SRoland Dreier 			break;
2990aef9ec39SRoland Dreier 
2991aef9ec39SRoland Dreier 		case SRP_OPT_IOC_GUID:
2992aef9ec39SRoland Dreier 			p = match_strdup(args);
2993a20f3a6dSIshai Rabinovitz 			if (!p) {
2994a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2995a20f3a6dSIshai Rabinovitz 				goto out;
2996a20f3a6dSIshai Rabinovitz 			}
2997aef9ec39SRoland Dreier 			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2998aef9ec39SRoland Dreier 			kfree(p);
2999aef9ec39SRoland Dreier 			break;
3000aef9ec39SRoland Dreier 
3001aef9ec39SRoland Dreier 		case SRP_OPT_DGID:
3002aef9ec39SRoland Dreier 			p = match_strdup(args);
3003a20f3a6dSIshai Rabinovitz 			if (!p) {
3004a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3005a20f3a6dSIshai Rabinovitz 				goto out;
3006a20f3a6dSIshai Rabinovitz 			}
3007aef9ec39SRoland Dreier 			if (strlen(p) != 32) {
3008e0bda7d8SBart Van Assche 				pr_warn("bad dest GID parameter '%s'\n", p);
3009ce1823f0SRoland Dreier 				kfree(p);
3010aef9ec39SRoland Dreier 				goto out;
3011aef9ec39SRoland Dreier 			}
3012aef9ec39SRoland Dreier 
3013aef9ec39SRoland Dreier 			for (i = 0; i < 16; ++i) {
3014747fe000SBart Van Assche 				strlcpy(dgid, p + i * 2, sizeof(dgid));
3015747fe000SBart Van Assche 				if (sscanf(dgid, "%hhx",
3016747fe000SBart Van Assche 					   &target->orig_dgid.raw[i]) < 1) {
3017747fe000SBart Van Assche 					ret = -EINVAL;
3018747fe000SBart Van Assche 					kfree(p);
3019747fe000SBart Van Assche 					goto out;
3020747fe000SBart Van Assche 				}
3021aef9ec39SRoland Dreier 			}
3022bf17c1c7SRoland Dreier 			kfree(p);
3023aef9ec39SRoland Dreier 			break;
3024aef9ec39SRoland Dreier 
3025aef9ec39SRoland Dreier 		case SRP_OPT_PKEY:
3026aef9ec39SRoland Dreier 			if (match_hex(args, &token)) {
3027e0bda7d8SBart Van Assche 				pr_warn("bad P_Key parameter '%s'\n", p);
3028aef9ec39SRoland Dreier 				goto out;
3029aef9ec39SRoland Dreier 			}
3030747fe000SBart Van Assche 			target->pkey = cpu_to_be16(token);
3031aef9ec39SRoland Dreier 			break;
3032aef9ec39SRoland Dreier 
3033aef9ec39SRoland Dreier 		case SRP_OPT_SERVICE_ID:
3034aef9ec39SRoland Dreier 			p = match_strdup(args);
3035a20f3a6dSIshai Rabinovitz 			if (!p) {
3036a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3037a20f3a6dSIshai Rabinovitz 				goto out;
3038a20f3a6dSIshai Rabinovitz 			}
3039aef9ec39SRoland Dreier 			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3040aef9ec39SRoland Dreier 			kfree(p);
3041aef9ec39SRoland Dreier 			break;
3042aef9ec39SRoland Dreier 
3043aef9ec39SRoland Dreier 		case SRP_OPT_MAX_SECT:
3044aef9ec39SRoland Dreier 			if (match_int(args, &token)) {
3045e0bda7d8SBart Van Assche 				pr_warn("bad max sect parameter '%s'\n", p);
3046aef9ec39SRoland Dreier 				goto out;
3047aef9ec39SRoland Dreier 			}
3048aef9ec39SRoland Dreier 			target->scsi_host->max_sectors = token;
3049aef9ec39SRoland Dreier 			break;
3050aef9ec39SRoland Dreier 
30514d73f95fSBart Van Assche 		case SRP_OPT_QUEUE_SIZE:
30524d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
30534d73f95fSBart Van Assche 				pr_warn("bad queue_size parameter '%s'\n", p);
30544d73f95fSBart Van Assche 				goto out;
30554d73f95fSBart Van Assche 			}
30564d73f95fSBart Van Assche 			target->scsi_host->can_queue = token;
30574d73f95fSBart Van Assche 			target->queue_size = token + SRP_RSP_SQ_SIZE +
30584d73f95fSBart Van Assche 					     SRP_TSK_MGMT_SQ_SIZE;
30594d73f95fSBart Van Assche 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
30604d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun = token;
30614d73f95fSBart Van Assche 			break;
30624d73f95fSBart Van Assche 
306352fb2b50SVu Pham 		case SRP_OPT_MAX_CMD_PER_LUN:
30644d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
3065e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3066e0bda7d8SBart Van Assche 					p);
306752fb2b50SVu Pham 				goto out;
306852fb2b50SVu Pham 			}
30694d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun = token;
307052fb2b50SVu Pham 			break;
307152fb2b50SVu Pham 
30720c0450dbSRamachandra K 		case SRP_OPT_IO_CLASS:
30730c0450dbSRamachandra K 			if (match_hex(args, &token)) {
3074e0bda7d8SBart Van Assche 				pr_warn("bad IO class parameter '%s'\n", p);
30750c0450dbSRamachandra K 				goto out;
30760c0450dbSRamachandra K 			}
30770c0450dbSRamachandra K 			if (token != SRP_REV10_IB_IO_CLASS &&
30780c0450dbSRamachandra K 			    token != SRP_REV16A_IB_IO_CLASS) {
3079e0bda7d8SBart Van Assche 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3080e0bda7d8SBart Van Assche 					token, SRP_REV10_IB_IO_CLASS,
3081e0bda7d8SBart Van Assche 					SRP_REV16A_IB_IO_CLASS);
30820c0450dbSRamachandra K 				goto out;
30830c0450dbSRamachandra K 			}
30840c0450dbSRamachandra K 			target->io_class = token;
30850c0450dbSRamachandra K 			break;
30860c0450dbSRamachandra K 
308701cb9bcbSIshai Rabinovitz 		case SRP_OPT_INITIATOR_EXT:
308801cb9bcbSIshai Rabinovitz 			p = match_strdup(args);
3089a20f3a6dSIshai Rabinovitz 			if (!p) {
3090a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3091a20f3a6dSIshai Rabinovitz 				goto out;
3092a20f3a6dSIshai Rabinovitz 			}
309301cb9bcbSIshai Rabinovitz 			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
309401cb9bcbSIshai Rabinovitz 			kfree(p);
309501cb9bcbSIshai Rabinovitz 			break;
309601cb9bcbSIshai Rabinovitz 
309749248644SDavid Dillow 		case SRP_OPT_CMD_SG_ENTRIES:
309849248644SDavid Dillow 			if (match_int(args, &token) || token < 1 || token > 255) {
3099e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3100e0bda7d8SBart Van Assche 					p);
310149248644SDavid Dillow 				goto out;
310249248644SDavid Dillow 			}
310349248644SDavid Dillow 			target->cmd_sg_cnt = token;
310449248644SDavid Dillow 			break;
310549248644SDavid Dillow 
3106c07d424dSDavid Dillow 		case SRP_OPT_ALLOW_EXT_SG:
3107c07d424dSDavid Dillow 			if (match_int(args, &token)) {
3108e0bda7d8SBart Van Assche 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3109c07d424dSDavid Dillow 				goto out;
3110c07d424dSDavid Dillow 			}
3111c07d424dSDavid Dillow 			target->allow_ext_sg = !!token;
3112c07d424dSDavid Dillow 			break;
3113c07d424dSDavid Dillow 
3114c07d424dSDavid Dillow 		case SRP_OPT_SG_TABLESIZE:
3115c07d424dSDavid Dillow 			if (match_int(args, &token) || token < 1 ||
3116c07d424dSDavid Dillow 					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3117e0bda7d8SBart Van Assche 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3118e0bda7d8SBart Van Assche 					p);
3119c07d424dSDavid Dillow 				goto out;
3120c07d424dSDavid Dillow 			}
3121c07d424dSDavid Dillow 			target->sg_tablesize = token;
3122c07d424dSDavid Dillow 			break;
3123c07d424dSDavid Dillow 
31244b5e5f41SBart Van Assche 		case SRP_OPT_COMP_VECTOR:
31254b5e5f41SBart Van Assche 			if (match_int(args, &token) || token < 0) {
31264b5e5f41SBart Van Assche 				pr_warn("bad comp_vector parameter '%s'\n", p);
31274b5e5f41SBart Van Assche 				goto out;
31284b5e5f41SBart Van Assche 			}
31294b5e5f41SBart Van Assche 			target->comp_vector = token;
31304b5e5f41SBart Van Assche 			break;
31314b5e5f41SBart Van Assche 
31327bb312e4SVu Pham 		case SRP_OPT_TL_RETRY_COUNT:
31337bb312e4SVu Pham 			if (match_int(args, &token) || token < 2 || token > 7) {
31347bb312e4SVu Pham 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
31357bb312e4SVu Pham 					p);
31367bb312e4SVu Pham 				goto out;
31377bb312e4SVu Pham 			}
31387bb312e4SVu Pham 			target->tl_retry_count = token;
31397bb312e4SVu Pham 			break;
31407bb312e4SVu Pham 
3141aef9ec39SRoland Dreier 		default:
3142e0bda7d8SBart Van Assche 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3143e0bda7d8SBart Van Assche 				p);
3144aef9ec39SRoland Dreier 			goto out;
3145aef9ec39SRoland Dreier 		}
3146aef9ec39SRoland Dreier 	}
3147aef9ec39SRoland Dreier 
3148aef9ec39SRoland Dreier 	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3149aef9ec39SRoland Dreier 		ret = 0;
3150aef9ec39SRoland Dreier 	else
3151aef9ec39SRoland Dreier 		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3152aef9ec39SRoland Dreier 			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3153aef9ec39SRoland Dreier 			    !(srp_opt_tokens[i].token & opt_mask))
3154e0bda7d8SBart Van Assche 				pr_warn("target creation request is missing parameter '%s'\n",
3155aef9ec39SRoland Dreier 					srp_opt_tokens[i].pattern);
3156aef9ec39SRoland Dreier 
31574d73f95fSBart Van Assche 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
31584d73f95fSBart Van Assche 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
31594d73f95fSBart Van Assche 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
31604d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun,
31614d73f95fSBart Van Assche 			target->scsi_host->can_queue);
31624d73f95fSBart Van Assche 
3163aef9ec39SRoland Dreier out:
3164aef9ec39SRoland Dreier 	kfree(options);
3165aef9ec39SRoland Dreier 	return ret;
3166aef9ec39SRoland Dreier }
3167aef9ec39SRoland Dreier 
3168ee959b00STony Jones static ssize_t srp_create_target(struct device *dev,
3169ee959b00STony Jones 				 struct device_attribute *attr,
3170aef9ec39SRoland Dreier 				 const char *buf, size_t count)
3171aef9ec39SRoland Dreier {
3172aef9ec39SRoland Dreier 	struct srp_host *host =
3173ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
3174aef9ec39SRoland Dreier 	struct Scsi_Host *target_host;
3175aef9ec39SRoland Dreier 	struct srp_target_port *target;
3176509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
3177d1b4289eSBart Van Assche 	struct srp_device *srp_dev = host->srp_dev;
3178d1b4289eSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
3179d92c0da7SBart Van Assche 	int ret, node_idx, node, cpu, i;
3180d92c0da7SBart Van Assche 	bool multich = false;
3181aef9ec39SRoland Dreier 
3182aef9ec39SRoland Dreier 	target_host = scsi_host_alloc(&srp_template,
3183aef9ec39SRoland Dreier 				      sizeof (struct srp_target_port));
3184aef9ec39SRoland Dreier 	if (!target_host)
3185aef9ec39SRoland Dreier 		return -ENOMEM;
3186aef9ec39SRoland Dreier 
31873236822bSFUJITA Tomonori 	target_host->transportt  = ib_srp_transport_template;
3188fd1b6c4aSBart Van Assche 	target_host->max_channel = 0;
3189fd1b6c4aSBart Van Assche 	target_host->max_id      = 1;
3190985aa495SBart Van Assche 	target_host->max_lun     = -1LL;
31913c8edf0eSArne Redlich 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
31925f068992SRoland Dreier 
3193aef9ec39SRoland Dreier 	target = host_to_target(target_host);
3194aef9ec39SRoland Dreier 
31950c0450dbSRamachandra K 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3196aef9ec39SRoland Dreier 	target->scsi_host	= target_host;
3197aef9ec39SRoland Dreier 	target->srp_host	= host;
3198e6bf5f48SJason Gunthorpe 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
319903f6fb93SBart Van Assche 	target->global_mr	= host->srp_dev->global_mr;
320049248644SDavid Dillow 	target->cmd_sg_cnt	= cmd_sg_entries;
3201c07d424dSDavid Dillow 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3202c07d424dSDavid Dillow 	target->allow_ext_sg	= allow_ext_sg;
32037bb312e4SVu Pham 	target->tl_retry_count	= 7;
32044d73f95fSBart Van Assche 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3205aef9ec39SRoland Dreier 
320634aa654eSBart Van Assche 	/*
320734aa654eSBart Van Assche 	 * Avoid that the SCSI host can be removed by srp_remove_target()
320834aa654eSBart Van Assche 	 * before this function returns.
320934aa654eSBart Van Assche 	 */
321034aa654eSBart Van Assche 	scsi_host_get(target->scsi_host);
321134aa654eSBart Van Assche 
32122d7091bcSBart Van Assche 	mutex_lock(&host->add_target_mutex);
32132d7091bcSBart Van Assche 
3214aef9ec39SRoland Dreier 	ret = srp_parse_options(buf, target);
3215aef9ec39SRoland Dreier 	if (ret)
3216fb49c8bbSBart Van Assche 		goto out;
3217aef9ec39SRoland Dreier 
32184d73f95fSBart Van Assche 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
32194d73f95fSBart Van Assche 
322096fc248aSBart Van Assche 	if (!srp_conn_unique(target->srp_host, target)) {
322196fc248aSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
322296fc248aSBart Van Assche 			     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
322396fc248aSBart Van Assche 			     be64_to_cpu(target->id_ext),
322496fc248aSBart Van Assche 			     be64_to_cpu(target->ioc_guid),
322596fc248aSBart Van Assche 			     be64_to_cpu(target->initiator_ext));
322696fc248aSBart Van Assche 		ret = -EEXIST;
3227fb49c8bbSBart Van Assche 		goto out;
322896fc248aSBart Van Assche 	}
322996fc248aSBart Van Assche 
32305cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3231c07d424dSDavid Dillow 	    target->cmd_sg_cnt < target->sg_tablesize) {
32325cfb1782SBart Van Assche 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3233c07d424dSDavid Dillow 		target->sg_tablesize = target->cmd_sg_cnt;
3234c07d424dSDavid Dillow 	}
3235c07d424dSDavid Dillow 
3236c07d424dSDavid Dillow 	target_host->sg_tablesize = target->sg_tablesize;
3237fa9863f8SBart Van Assche 	target->mr_pool_size = target->scsi_host->can_queue;
3238c07d424dSDavid Dillow 	target->indirect_size = target->sg_tablesize *
3239c07d424dSDavid Dillow 				sizeof (struct srp_direct_buf);
324049248644SDavid Dillow 	target->max_iu_len = sizeof (struct srp_cmd) +
324149248644SDavid Dillow 			     sizeof (struct srp_indirect_buf) +
324249248644SDavid Dillow 			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
324349248644SDavid Dillow 
3244c1120f89SBart Van Assche 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3245ef6c49d8SBart Van Assche 	INIT_WORK(&target->remove_work, srp_remove_work);
32468f26c9ffSDavid Dillow 	spin_lock_init(&target->lock);
324755ee3ab2SMatan Barak 	ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
32482088ca66SSagi Grimberg 	if (ret)
3249fb49c8bbSBart Van Assche 		goto out;
3250d92c0da7SBart Van Assche 
3251d92c0da7SBart Van Assche 	ret = -ENOMEM;
3252d92c0da7SBart Van Assche 	target->ch_count = max_t(unsigned, num_online_nodes(),
3253d92c0da7SBart Van Assche 				 min(ch_count ? :
3254d92c0da7SBart Van Assche 				     min(4 * num_online_nodes(),
3255d92c0da7SBart Van Assche 					 ibdev->num_comp_vectors),
3256d92c0da7SBart Van Assche 				     num_online_cpus()));
3257d92c0da7SBart Van Assche 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3258d92c0da7SBart Van Assche 			     GFP_KERNEL);
3259d92c0da7SBart Van Assche 	if (!target->ch)
3260fb49c8bbSBart Van Assche 		goto out;
3261d92c0da7SBart Van Assche 
3262d92c0da7SBart Van Assche 	node_idx = 0;
3263d92c0da7SBart Van Assche 	for_each_online_node(node) {
3264d92c0da7SBart Van Assche 		const int ch_start = (node_idx * target->ch_count /
3265d92c0da7SBart Van Assche 				      num_online_nodes());
3266d92c0da7SBart Van Assche 		const int ch_end = ((node_idx + 1) * target->ch_count /
3267d92c0da7SBart Van Assche 				    num_online_nodes());
3268d92c0da7SBart Van Assche 		const int cv_start = (node_idx * ibdev->num_comp_vectors /
3269d92c0da7SBart Van Assche 				      num_online_nodes() + target->comp_vector)
3270d92c0da7SBart Van Assche 				     % ibdev->num_comp_vectors;
3271d92c0da7SBart Van Assche 		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3272d92c0da7SBart Van Assche 				    num_online_nodes() + target->comp_vector)
3273d92c0da7SBart Van Assche 				   % ibdev->num_comp_vectors;
3274d92c0da7SBart Van Assche 		int cpu_idx = 0;
3275d92c0da7SBart Van Assche 
3276d92c0da7SBart Van Assche 		for_each_online_cpu(cpu) {
3277d92c0da7SBart Van Assche 			if (cpu_to_node(cpu) != node)
3278d92c0da7SBart Van Assche 				continue;
3279d92c0da7SBart Van Assche 			if (ch_start + cpu_idx >= ch_end)
3280d92c0da7SBart Van Assche 				continue;
3281d92c0da7SBart Van Assche 			ch = &target->ch[ch_start + cpu_idx];
3282d92c0da7SBart Van Assche 			ch->target = target;
3283d92c0da7SBart Van Assche 			ch->comp_vector = cv_start == cv_end ? cv_start :
3284d92c0da7SBart Van Assche 				cv_start + cpu_idx % (cv_end - cv_start);
3285d92c0da7SBart Van Assche 			spin_lock_init(&ch->lock);
3286d92c0da7SBart Van Assche 			INIT_LIST_HEAD(&ch->free_tx);
3287d92c0da7SBart Van Assche 			ret = srp_new_cm_id(ch);
3288d92c0da7SBart Van Assche 			if (ret)
3289d92c0da7SBart Van Assche 				goto err_disconnect;
3290aef9ec39SRoland Dreier 
3291509c07bcSBart Van Assche 			ret = srp_create_ch_ib(ch);
3292aef9ec39SRoland Dreier 			if (ret)
3293d92c0da7SBart Van Assche 				goto err_disconnect;
3294aef9ec39SRoland Dreier 
3295d92c0da7SBart Van Assche 			ret = srp_alloc_req_data(ch);
32969fe4bcf4SDavid Dillow 			if (ret)
3297d92c0da7SBart Van Assche 				goto err_disconnect;
3298aef9ec39SRoland Dreier 
3299d92c0da7SBart Van Assche 			ret = srp_connect_ch(ch, multich);
3300aef9ec39SRoland Dreier 			if (ret) {
33017aa54bd7SDavid Dillow 				shost_printk(KERN_ERR, target->scsi_host,
3302d92c0da7SBart Van Assche 					     PFX "Connection %d/%d failed\n",
3303d92c0da7SBart Van Assche 					     ch_start + cpu_idx,
3304d92c0da7SBart Van Assche 					     target->ch_count);
3305d92c0da7SBart Van Assche 				if (node_idx == 0 && cpu_idx == 0) {
3306d92c0da7SBart Van Assche 					goto err_disconnect;
3307d92c0da7SBart Van Assche 				} else {
3308d92c0da7SBart Van Assche 					srp_free_ch_ib(target, ch);
3309d92c0da7SBart Van Assche 					srp_free_req_data(target, ch);
3310d92c0da7SBart Van Assche 					target->ch_count = ch - target->ch;
3311c257ea6fSBart Van Assche 					goto connected;
3312aef9ec39SRoland Dreier 				}
3313d92c0da7SBart Van Assche 			}
3314d92c0da7SBart Van Assche 
3315d92c0da7SBart Van Assche 			multich = true;
3316d92c0da7SBart Van Assche 			cpu_idx++;
3317d92c0da7SBart Van Assche 		}
3318d92c0da7SBart Van Assche 		node_idx++;
3319d92c0da7SBart Van Assche 	}
3320d92c0da7SBart Van Assche 
3321c257ea6fSBart Van Assche connected:
3322d92c0da7SBart Van Assche 	target->scsi_host->nr_hw_queues = target->ch_count;
3323aef9ec39SRoland Dreier 
3324aef9ec39SRoland Dreier 	ret = srp_add_target(host, target);
3325aef9ec39SRoland Dreier 	if (ret)
3326aef9ec39SRoland Dreier 		goto err_disconnect;
3327aef9ec39SRoland Dreier 
332834aa654eSBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
3329e7ffde01SBart Van Assche 		shost_printk(KERN_DEBUG, target->scsi_host, PFX
3330e7ffde01SBart Van Assche 			     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3331e7ffde01SBart Van Assche 			     be64_to_cpu(target->id_ext),
3332e7ffde01SBart Van Assche 			     be64_to_cpu(target->ioc_guid),
3333747fe000SBart Van Assche 			     be16_to_cpu(target->pkey),
3334e7ffde01SBart Van Assche 			     be64_to_cpu(target->service_id),
3335747fe000SBart Van Assche 			     target->sgid.raw, target->orig_dgid.raw);
333634aa654eSBart Van Assche 	}
3337e7ffde01SBart Van Assche 
33382d7091bcSBart Van Assche 	ret = count;
33392d7091bcSBart Van Assche 
33402d7091bcSBart Van Assche out:
33412d7091bcSBart Van Assche 	mutex_unlock(&host->add_target_mutex);
334234aa654eSBart Van Assche 
334334aa654eSBart Van Assche 	scsi_host_put(target->scsi_host);
3344bc44bd1dSBart Van Assche 	if (ret < 0)
3345bc44bd1dSBart Van Assche 		scsi_host_put(target->scsi_host);
334634aa654eSBart Van Assche 
33472d7091bcSBart Van Assche 	return ret;
3348aef9ec39SRoland Dreier 
3349aef9ec39SRoland Dreier err_disconnect:
3350aef9ec39SRoland Dreier 	srp_disconnect_target(target);
3351aef9ec39SRoland Dreier 
3352d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
3353d92c0da7SBart Van Assche 		ch = &target->ch[i];
3354509c07bcSBart Van Assche 		srp_free_ch_ib(target, ch);
3355509c07bcSBart Van Assche 		srp_free_req_data(target, ch);
3356d92c0da7SBart Van Assche 	}
3357d92c0da7SBart Van Assche 
3358d92c0da7SBart Van Assche 	kfree(target->ch);
33592d7091bcSBart Van Assche 	goto out;
3360aef9ec39SRoland Dreier }
3361aef9ec39SRoland Dreier 
3362ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3363aef9ec39SRoland Dreier 
3364ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3365ee959b00STony Jones 			  char *buf)
3366aef9ec39SRoland Dreier {
3367ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3368aef9ec39SRoland Dreier 
336905321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3370aef9ec39SRoland Dreier }
3371aef9ec39SRoland Dreier 
3372ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3373aef9ec39SRoland Dreier 
3374ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3375ee959b00STony Jones 			 char *buf)
3376aef9ec39SRoland Dreier {
3377ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3378aef9ec39SRoland Dreier 
3379aef9ec39SRoland Dreier 	return sprintf(buf, "%d\n", host->port);
3380aef9ec39SRoland Dreier }
3381aef9ec39SRoland Dreier 
3382ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3383aef9ec39SRoland Dreier 
3384f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3385aef9ec39SRoland Dreier {
3386aef9ec39SRoland Dreier 	struct srp_host *host;
3387aef9ec39SRoland Dreier 
3388aef9ec39SRoland Dreier 	host = kzalloc(sizeof *host, GFP_KERNEL);
3389aef9ec39SRoland Dreier 	if (!host)
3390aef9ec39SRoland Dreier 		return NULL;
3391aef9ec39SRoland Dreier 
3392aef9ec39SRoland Dreier 	INIT_LIST_HEAD(&host->target_list);
3393b3589fd4SMatthew Wilcox 	spin_lock_init(&host->target_lock);
3394aef9ec39SRoland Dreier 	init_completion(&host->released);
33952d7091bcSBart Van Assche 	mutex_init(&host->add_target_mutex);
339605321937SGreg Kroah-Hartman 	host->srp_dev = device;
3397aef9ec39SRoland Dreier 	host->port = port;
3398aef9ec39SRoland Dreier 
3399ee959b00STony Jones 	host->dev.class = &srp_class;
3400ee959b00STony Jones 	host->dev.parent = device->dev->dma_device;
3401d927e38cSKay Sievers 	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3402aef9ec39SRoland Dreier 
3403ee959b00STony Jones 	if (device_register(&host->dev))
3404f5358a17SRoland Dreier 		goto free_host;
3405ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_add_target))
3406aef9ec39SRoland Dreier 		goto err_class;
3407ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3408aef9ec39SRoland Dreier 		goto err_class;
3409ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_port))
3410aef9ec39SRoland Dreier 		goto err_class;
3411aef9ec39SRoland Dreier 
3412aef9ec39SRoland Dreier 	return host;
3413aef9ec39SRoland Dreier 
3414aef9ec39SRoland Dreier err_class:
3415ee959b00STony Jones 	device_unregister(&host->dev);
3416aef9ec39SRoland Dreier 
3417f5358a17SRoland Dreier free_host:
3418aef9ec39SRoland Dreier 	kfree(host);
3419aef9ec39SRoland Dreier 
3420aef9ec39SRoland Dreier 	return NULL;
3421aef9ec39SRoland Dreier }
3422aef9ec39SRoland Dreier 
3423aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device)
3424aef9ec39SRoland Dreier {
3425f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3426aef9ec39SRoland Dreier 	struct srp_host *host;
34274139032bSHal Rosenstock 	int mr_page_shift, p;
342852ede08fSBart Van Assche 	u64 max_pages_per_mr;
3429aef9ec39SRoland Dreier 
3430f5358a17SRoland Dreier 	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3431f5358a17SRoland Dreier 	if (!srp_dev)
34324a061b28SOr Gerlitz 		return;
3433f5358a17SRoland Dreier 
3434d1b4289eSBart Van Assche 	srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3435d1b4289eSBart Van Assche 			    device->map_phys_fmr && device->unmap_fmr);
34364a061b28SOr Gerlitz 	srp_dev->has_fr = (device->attrs.device_cap_flags &
34375cfb1782SBart Van Assche 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
34385cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr)
34395cfb1782SBart Van Assche 		dev_warn(&device->dev, "neither FMR nor FR is supported\n");
34405cfb1782SBart Van Assche 
34415cfb1782SBart Van Assche 	srp_dev->use_fast_reg = (srp_dev->has_fr &&
34425cfb1782SBart Van Assche 				 (!srp_dev->has_fmr || prefer_fr));
3443002f1567SBart Van Assche 	srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3444d1b4289eSBart Van Assche 
3445f5358a17SRoland Dreier 	/*
3446f5358a17SRoland Dreier 	 * Use the smallest page size supported by the HCA, down to a
34478f26c9ffSDavid Dillow 	 * minimum of 4096 bytes. We're unlikely to build large sglists
34488f26c9ffSDavid Dillow 	 * out of smaller entries.
3449f5358a17SRoland Dreier 	 */
34504a061b28SOr Gerlitz 	mr_page_shift		= max(12, ffs(device->attrs.page_size_cap) - 1);
345152ede08fSBart Van Assche 	srp_dev->mr_page_size	= 1 << mr_page_shift;
345252ede08fSBart Van Assche 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
34534a061b28SOr Gerlitz 	max_pages_per_mr	= device->attrs.max_mr_size;
345452ede08fSBart Van Assche 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
345552ede08fSBart Van Assche 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
345652ede08fSBart Van Assche 					  max_pages_per_mr);
34575cfb1782SBart Van Assche 	if (srp_dev->use_fast_reg) {
34585cfb1782SBart Van Assche 		srp_dev->max_pages_per_mr =
34595cfb1782SBart Van Assche 			min_t(u32, srp_dev->max_pages_per_mr,
34604a061b28SOr Gerlitz 			      device->attrs.max_fast_reg_page_list_len);
34615cfb1782SBart Van Assche 	}
346252ede08fSBart Van Assche 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
346352ede08fSBart Van Assche 				   srp_dev->max_pages_per_mr;
34644a061b28SOr Gerlitz 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
34654a061b28SOr Gerlitz 		 device->name, mr_page_shift, device->attrs.max_mr_size,
34664a061b28SOr Gerlitz 		 device->attrs.max_fast_reg_page_list_len,
346752ede08fSBart Van Assche 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3468f5358a17SRoland Dreier 
3469f5358a17SRoland Dreier 	INIT_LIST_HEAD(&srp_dev->dev_list);
3470f5358a17SRoland Dreier 
3471f5358a17SRoland Dreier 	srp_dev->dev = device;
3472f5358a17SRoland Dreier 	srp_dev->pd  = ib_alloc_pd(device);
3473f5358a17SRoland Dreier 	if (IS_ERR(srp_dev->pd))
3474f5358a17SRoland Dreier 		goto free_dev;
3475f5358a17SRoland Dreier 
347603f6fb93SBart Van Assche 	if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
347703f6fb93SBart Van Assche 		srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3478f5358a17SRoland Dreier 						   IB_ACCESS_LOCAL_WRITE |
3479f5358a17SRoland Dreier 						   IB_ACCESS_REMOTE_READ |
3480f5358a17SRoland Dreier 						   IB_ACCESS_REMOTE_WRITE);
348103f6fb93SBart Van Assche 		if (IS_ERR(srp_dev->global_mr))
3482f5358a17SRoland Dreier 			goto err_pd;
348303f6fb93SBart Van Assche 	} else {
348403f6fb93SBart Van Assche 		srp_dev->global_mr = NULL;
348503f6fb93SBart Van Assche 	}
3486f5358a17SRoland Dreier 
34874139032bSHal Rosenstock 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3488f5358a17SRoland Dreier 		host = srp_add_port(srp_dev, p);
3489aef9ec39SRoland Dreier 		if (host)
3490f5358a17SRoland Dreier 			list_add_tail(&host->list, &srp_dev->dev_list);
3491aef9ec39SRoland Dreier 	}
3492aef9ec39SRoland Dreier 
3493f5358a17SRoland Dreier 	ib_set_client_data(device, &srp_client, srp_dev);
34944a061b28SOr Gerlitz 	return;
3495f5358a17SRoland Dreier 
3496f5358a17SRoland Dreier err_pd:
3497f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3498f5358a17SRoland Dreier 
3499f5358a17SRoland Dreier free_dev:
3500f5358a17SRoland Dreier 	kfree(srp_dev);
3501aef9ec39SRoland Dreier }
3502aef9ec39SRoland Dreier 
35037c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data)
3504aef9ec39SRoland Dreier {
3505f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3506aef9ec39SRoland Dreier 	struct srp_host *host, *tmp_host;
3507ef6c49d8SBart Van Assche 	struct srp_target_port *target;
3508aef9ec39SRoland Dreier 
35097c1eb45aSHaggai Eran 	srp_dev = client_data;
35101fe0cb84SDotan Barak 	if (!srp_dev)
35111fe0cb84SDotan Barak 		return;
3512aef9ec39SRoland Dreier 
3513f5358a17SRoland Dreier 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3514ee959b00STony Jones 		device_unregister(&host->dev);
3515aef9ec39SRoland Dreier 		/*
3516aef9ec39SRoland Dreier 		 * Wait for the sysfs entry to go away, so that no new
3517aef9ec39SRoland Dreier 		 * target ports can be created.
3518aef9ec39SRoland Dreier 		 */
3519aef9ec39SRoland Dreier 		wait_for_completion(&host->released);
3520aef9ec39SRoland Dreier 
3521aef9ec39SRoland Dreier 		/*
3522ef6c49d8SBart Van Assche 		 * Remove all target ports.
3523aef9ec39SRoland Dreier 		 */
3524b3589fd4SMatthew Wilcox 		spin_lock(&host->target_lock);
3525ef6c49d8SBart Van Assche 		list_for_each_entry(target, &host->target_list, list)
3526ef6c49d8SBart Van Assche 			srp_queue_remove_work(target);
3527b3589fd4SMatthew Wilcox 		spin_unlock(&host->target_lock);
3528aef9ec39SRoland Dreier 
3529aef9ec39SRoland Dreier 		/*
3530bcc05910SBart Van Assche 		 * Wait for tl_err and target port removal tasks.
3531aef9ec39SRoland Dreier 		 */
3532ef6c49d8SBart Van Assche 		flush_workqueue(system_long_wq);
3533bcc05910SBart Van Assche 		flush_workqueue(srp_remove_wq);
3534aef9ec39SRoland Dreier 
3535aef9ec39SRoland Dreier 		kfree(host);
3536aef9ec39SRoland Dreier 	}
3537aef9ec39SRoland Dreier 
353803f6fb93SBart Van Assche 	if (srp_dev->global_mr)
353903f6fb93SBart Van Assche 		ib_dereg_mr(srp_dev->global_mr);
3540f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3541f5358a17SRoland Dreier 
3542f5358a17SRoland Dreier 	kfree(srp_dev);
3543aef9ec39SRoland Dreier }
3544aef9ec39SRoland Dreier 
35453236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = {
3546ed9b2264SBart Van Assche 	.has_rport_state	 = true,
3547ed9b2264SBart Van Assche 	.reset_timer_if_blocked	 = true,
3548a95cadb9SBart Van Assche 	.reconnect_delay	 = &srp_reconnect_delay,
3549ed9b2264SBart Van Assche 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
3550ed9b2264SBart Van Assche 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
3551ed9b2264SBart Van Assche 	.reconnect		 = srp_rport_reconnect,
3552dc1bdbd9SBart Van Assche 	.rport_delete		 = srp_rport_delete,
3553ed9b2264SBart Van Assche 	.terminate_rport_io	 = srp_terminate_io,
35543236822bSFUJITA Tomonori };
35553236822bSFUJITA Tomonori 
3556aef9ec39SRoland Dreier static int __init srp_init_module(void)
3557aef9ec39SRoland Dreier {
3558aef9ec39SRoland Dreier 	int ret;
3559aef9ec39SRoland Dreier 
356049248644SDavid Dillow 	if (srp_sg_tablesize) {
3561e0bda7d8SBart Van Assche 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
356249248644SDavid Dillow 		if (!cmd_sg_entries)
356349248644SDavid Dillow 			cmd_sg_entries = srp_sg_tablesize;
356449248644SDavid Dillow 	}
356549248644SDavid Dillow 
356649248644SDavid Dillow 	if (!cmd_sg_entries)
356749248644SDavid Dillow 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
356849248644SDavid Dillow 
356949248644SDavid Dillow 	if (cmd_sg_entries > 255) {
3570e0bda7d8SBart Van Assche 		pr_warn("Clamping cmd_sg_entries to 255\n");
357149248644SDavid Dillow 		cmd_sg_entries = 255;
35721e89a194SDavid Dillow 	}
35731e89a194SDavid Dillow 
3574c07d424dSDavid Dillow 	if (!indirect_sg_entries)
3575c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3576c07d424dSDavid Dillow 	else if (indirect_sg_entries < cmd_sg_entries) {
3577e0bda7d8SBart Van Assche 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3578e0bda7d8SBart Van Assche 			cmd_sg_entries);
3579c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3580c07d424dSDavid Dillow 	}
3581c07d424dSDavid Dillow 
3582bcc05910SBart Van Assche 	srp_remove_wq = create_workqueue("srp_remove");
3583da05be29SWei Yongjun 	if (!srp_remove_wq) {
3584da05be29SWei Yongjun 		ret = -ENOMEM;
3585bcc05910SBart Van Assche 		goto out;
3586bcc05910SBart Van Assche 	}
3587bcc05910SBart Van Assche 
3588bcc05910SBart Van Assche 	ret = -ENOMEM;
35893236822bSFUJITA Tomonori 	ib_srp_transport_template =
35903236822bSFUJITA Tomonori 		srp_attach_transport(&ib_srp_transport_functions);
35913236822bSFUJITA Tomonori 	if (!ib_srp_transport_template)
3592bcc05910SBart Van Assche 		goto destroy_wq;
35933236822bSFUJITA Tomonori 
3594aef9ec39SRoland Dreier 	ret = class_register(&srp_class);
3595aef9ec39SRoland Dreier 	if (ret) {
3596e0bda7d8SBart Van Assche 		pr_err("couldn't register class infiniband_srp\n");
3597bcc05910SBart Van Assche 		goto release_tr;
3598aef9ec39SRoland Dreier 	}
3599aef9ec39SRoland Dreier 
3600c1a0b23bSMichael S. Tsirkin 	ib_sa_register_client(&srp_sa_client);
3601c1a0b23bSMichael S. Tsirkin 
3602aef9ec39SRoland Dreier 	ret = ib_register_client(&srp_client);
3603aef9ec39SRoland Dreier 	if (ret) {
3604e0bda7d8SBart Van Assche 		pr_err("couldn't register IB client\n");
3605bcc05910SBart Van Assche 		goto unreg_sa;
3606aef9ec39SRoland Dreier 	}
3607aef9ec39SRoland Dreier 
3608bcc05910SBart Van Assche out:
3609bcc05910SBart Van Assche 	return ret;
3610bcc05910SBart Van Assche 
3611bcc05910SBart Van Assche unreg_sa:
3612bcc05910SBart Van Assche 	ib_sa_unregister_client(&srp_sa_client);
3613bcc05910SBart Van Assche 	class_unregister(&srp_class);
3614bcc05910SBart Van Assche 
3615bcc05910SBart Van Assche release_tr:
3616bcc05910SBart Van Assche 	srp_release_transport(ib_srp_transport_template);
3617bcc05910SBart Van Assche 
3618bcc05910SBart Van Assche destroy_wq:
3619bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3620bcc05910SBart Van Assche 	goto out;
3621aef9ec39SRoland Dreier }
3622aef9ec39SRoland Dreier 
3623aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void)
3624aef9ec39SRoland Dreier {
3625aef9ec39SRoland Dreier 	ib_unregister_client(&srp_client);
3626c1a0b23bSMichael S. Tsirkin 	ib_sa_unregister_client(&srp_sa_client);
3627aef9ec39SRoland Dreier 	class_unregister(&srp_class);
36283236822bSFUJITA Tomonori 	srp_release_transport(ib_srp_transport_template);
3629bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3630aef9ec39SRoland Dreier }
3631aef9ec39SRoland Dreier 
3632aef9ec39SRoland Dreier module_init(srp_init_module);
3633aef9ec39SRoland Dreier module_exit(srp_cleanup_module);
3634