xref: /linux/drivers/infiniband/ulp/srp/ib_srp.c (revision 52bb8c626e0e5526c72b6ad17f1381f0bad283cc)
1aef9ec39SRoland Dreier /*
2aef9ec39SRoland Dreier  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3aef9ec39SRoland Dreier  *
4aef9ec39SRoland Dreier  * This software is available to you under a choice of one of two
5aef9ec39SRoland Dreier  * licenses.  You may choose to be licensed under the terms of the GNU
6aef9ec39SRoland Dreier  * General Public License (GPL) Version 2, available from the file
7aef9ec39SRoland Dreier  * COPYING in the main directory of this source tree, or the
8aef9ec39SRoland Dreier  * OpenIB.org BSD license below:
9aef9ec39SRoland Dreier  *
10aef9ec39SRoland Dreier  *     Redistribution and use in source and binary forms, with or
11aef9ec39SRoland Dreier  *     without modification, are permitted provided that the following
12aef9ec39SRoland Dreier  *     conditions are met:
13aef9ec39SRoland Dreier  *
14aef9ec39SRoland Dreier  *      - Redistributions of source code must retain the above
15aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
16aef9ec39SRoland Dreier  *        disclaimer.
17aef9ec39SRoland Dreier  *
18aef9ec39SRoland Dreier  *      - Redistributions in binary form must reproduce the above
19aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
20aef9ec39SRoland Dreier  *        disclaimer in the documentation and/or other materials
21aef9ec39SRoland Dreier  *        provided with the distribution.
22aef9ec39SRoland Dreier  *
23aef9ec39SRoland Dreier  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24aef9ec39SRoland Dreier  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25aef9ec39SRoland Dreier  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26aef9ec39SRoland Dreier  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27aef9ec39SRoland Dreier  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28aef9ec39SRoland Dreier  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29aef9ec39SRoland Dreier  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30aef9ec39SRoland Dreier  * SOFTWARE.
31aef9ec39SRoland Dreier  */
32aef9ec39SRoland Dreier 
33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34e0bda7d8SBart Van Assche 
35aef9ec39SRoland Dreier #include <linux/module.h>
36aef9ec39SRoland Dreier #include <linux/init.h>
37aef9ec39SRoland Dreier #include <linux/slab.h>
38aef9ec39SRoland Dreier #include <linux/err.h>
39aef9ec39SRoland Dreier #include <linux/string.h>
40aef9ec39SRoland Dreier #include <linux/parser.h>
41aef9ec39SRoland Dreier #include <linux/random.h>
42de25968cSTim Schmielau #include <linux/jiffies.h>
4356b5390cSBart Van Assche #include <rdma/ib_cache.h>
44aef9ec39SRoland Dreier 
4560063497SArun Sharma #include <linux/atomic.h>
46aef9ec39SRoland Dreier 
47aef9ec39SRoland Dreier #include <scsi/scsi.h>
48aef9ec39SRoland Dreier #include <scsi/scsi_device.h>
49aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h>
5071444b97SJack Wang #include <scsi/scsi_tcq.h>
51aef9ec39SRoland Dreier #include <scsi/srp.h>
523236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h>
53aef9ec39SRoland Dreier 
54aef9ec39SRoland Dreier #include "ib_srp.h"
55aef9ec39SRoland Dreier 
56aef9ec39SRoland Dreier #define DRV_NAME	"ib_srp"
57aef9ec39SRoland Dreier #define PFX		DRV_NAME ": "
58713ef24eSBart Van Assche #define DRV_VERSION	"2.0"
59713ef24eSBart Van Assche #define DRV_RELDATE	"July 26, 2015"
60aef9ec39SRoland Dreier 
61aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier");
6233ab3e5bSBart Van Assche MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL");
6433ab3e5bSBart Van Assche MODULE_VERSION(DRV_VERSION);
6533ab3e5bSBart Van Assche MODULE_INFO(release_date, DRV_RELDATE);
66aef9ec39SRoland Dreier 
6749248644SDavid Dillow static unsigned int srp_sg_tablesize;
6849248644SDavid Dillow static unsigned int cmd_sg_entries;
69c07d424dSDavid Dillow static unsigned int indirect_sg_entries;
70c07d424dSDavid Dillow static bool allow_ext_sg;
7103f6fb93SBart Van Assche static bool prefer_fr = true;
7203f6fb93SBart Van Assche static bool register_always = true;
73c222a39fSBart Van Assche static bool never_register;
74aef9ec39SRoland Dreier static int topspin_workarounds = 1;
75aef9ec39SRoland Dreier 
7649248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444);
7749248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
7849248644SDavid Dillow 
7949248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444);
8049248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries,
8149248644SDavid Dillow 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
8249248644SDavid Dillow 
83c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444);
84c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries,
8565e8617fSMing Lin 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
86c07d424dSDavid Dillow 
87c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444);
88c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg,
89c07d424dSDavid Dillow 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
90c07d424dSDavid Dillow 
91aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444);
92aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds,
93aef9ec39SRoland Dreier 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
94aef9ec39SRoland Dreier 
955cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444);
965cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr,
975cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported");
985cfb1782SBart Van Assche 
99b1b8854dSBart Van Assche module_param(register_always, bool, 0444);
100b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always,
101b1b8854dSBart Van Assche 		 "Use memory registration even for contiguous memory regions");
102b1b8854dSBart Van Assche 
103c222a39fSBart Van Assche module_param(never_register, bool, 0444);
104c222a39fSBart Van Assche MODULE_PARM_DESC(never_register, "Never register memory");
105c222a39fSBart Van Assche 
1069c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops;
107ed9b2264SBart Van Assche 
108a95cadb9SBart Van Assche static int srp_reconnect_delay = 10;
109a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
110a95cadb9SBart Van Assche 		S_IRUGO | S_IWUSR);
111a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
112a95cadb9SBart Van Assche 
113ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15;
114ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
115ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
116ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo,
117ed9b2264SBart Van Assche 		 "Number of seconds between the observation of a transport"
118ed9b2264SBart Van Assche 		 " layer error and failing all I/O. \"off\" means that this"
119ed9b2264SBart Van Assche 		 " functionality is disabled.");
120ed9b2264SBart Van Assche 
121a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600;
122ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
123ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
124ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo,
125ed9b2264SBart Van Assche 		 "Maximum number of seconds that the SRP transport should"
126ed9b2264SBart Van Assche 		 " insulate transport layer errors. After this time has been"
127ed9b2264SBart Van Assche 		 " exceeded the SCSI host is removed. Should be"
128ed9b2264SBart Van Assche 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
129ed9b2264SBart Van Assche 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
130ed9b2264SBart Van Assche 		 " this functionality is disabled.");
131ed9b2264SBart Van Assche 
132d92c0da7SBart Van Assche static unsigned ch_count;
133d92c0da7SBart Van Assche module_param(ch_count, uint, 0444);
134d92c0da7SBart Van Assche MODULE_PARM_DESC(ch_count,
135d92c0da7SBart Van Assche 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
136d92c0da7SBart Van Assche 
137aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device);
1387c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data);
1391dc7b1f1SChristoph Hellwig static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
1401dc7b1f1SChristoph Hellwig static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
1411dc7b1f1SChristoph Hellwig 		const char *opname);
142aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
143aef9ec39SRoland Dreier 
1443236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template;
145bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq;
1463236822bSFUJITA Tomonori 
147aef9ec39SRoland Dreier static struct ib_client srp_client = {
148aef9ec39SRoland Dreier 	.name   = "srp",
149aef9ec39SRoland Dreier 	.add    = srp_add_one,
150aef9ec39SRoland Dreier 	.remove = srp_remove_one
151aef9ec39SRoland Dreier };
152aef9ec39SRoland Dreier 
153c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client;
154c1a0b23bSMichael S. Tsirkin 
155ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
156ed9b2264SBart Van Assche {
157ed9b2264SBart Van Assche 	int tmo = *(int *)kp->arg;
158ed9b2264SBart Van Assche 
159ed9b2264SBart Van Assche 	if (tmo >= 0)
160ed9b2264SBart Van Assche 		return sprintf(buffer, "%d", tmo);
161ed9b2264SBart Van Assche 	else
162ed9b2264SBart Van Assche 		return sprintf(buffer, "off");
163ed9b2264SBart Van Assche }
164ed9b2264SBart Van Assche 
165ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp)
166ed9b2264SBart Van Assche {
167ed9b2264SBart Van Assche 	int tmo, res;
168ed9b2264SBart Van Assche 
1693fdf70acSSagi Grimberg 	res = srp_parse_tmo(&tmo, val);
170ed9b2264SBart Van Assche 	if (res)
171ed9b2264SBart Van Assche 		goto out;
1723fdf70acSSagi Grimberg 
173a95cadb9SBart Van Assche 	if (kp->arg == &srp_reconnect_delay)
174a95cadb9SBart Van Assche 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
175a95cadb9SBart Van Assche 				    srp_dev_loss_tmo);
176a95cadb9SBart Van Assche 	else if (kp->arg == &srp_fast_io_fail_tmo)
177a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
178ed9b2264SBart Van Assche 	else
179a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
180a95cadb9SBart Van Assche 				    tmo);
181ed9b2264SBart Van Assche 	if (res)
182ed9b2264SBart Van Assche 		goto out;
183ed9b2264SBart Van Assche 	*(int *)kp->arg = tmo;
184ed9b2264SBart Van Assche 
185ed9b2264SBart Van Assche out:
186ed9b2264SBart Van Assche 	return res;
187ed9b2264SBart Van Assche }
188ed9b2264SBart Van Assche 
1899c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops = {
190ed9b2264SBart Van Assche 	.get = srp_tmo_get,
191ed9b2264SBart Van Assche 	.set = srp_tmo_set,
192ed9b2264SBart Van Assche };
193ed9b2264SBart Van Assche 
194aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
195aef9ec39SRoland Dreier {
196aef9ec39SRoland Dreier 	return (struct srp_target_port *) host->hostdata;
197aef9ec39SRoland Dreier }
198aef9ec39SRoland Dreier 
199aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host)
200aef9ec39SRoland Dreier {
201aef9ec39SRoland Dreier 	return host_to_target(host)->target_name;
202aef9ec39SRoland Dreier }
203aef9ec39SRoland Dreier 
2045d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target)
2055d7cbfd6SRoland Dreier {
2065d7cbfd6SRoland Dreier 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
2073d1ff48dSRaghava Kondapalli 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
2085d7cbfd6SRoland Dreier 
2095d7cbfd6SRoland Dreier 	return topspin_workarounds &&
2103d1ff48dSRaghava Kondapalli 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
2113d1ff48dSRaghava Kondapalli 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
2125d7cbfd6SRoland Dreier }
2135d7cbfd6SRoland Dreier 
214aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
215aef9ec39SRoland Dreier 				   gfp_t gfp_mask,
216aef9ec39SRoland Dreier 				   enum dma_data_direction direction)
217aef9ec39SRoland Dreier {
218aef9ec39SRoland Dreier 	struct srp_iu *iu;
219aef9ec39SRoland Dreier 
220aef9ec39SRoland Dreier 	iu = kmalloc(sizeof *iu, gfp_mask);
221aef9ec39SRoland Dreier 	if (!iu)
222aef9ec39SRoland Dreier 		goto out;
223aef9ec39SRoland Dreier 
224aef9ec39SRoland Dreier 	iu->buf = kzalloc(size, gfp_mask);
225aef9ec39SRoland Dreier 	if (!iu->buf)
226aef9ec39SRoland Dreier 		goto out_free_iu;
227aef9ec39SRoland Dreier 
22805321937SGreg Kroah-Hartman 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
22905321937SGreg Kroah-Hartman 				    direction);
23005321937SGreg Kroah-Hartman 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
231aef9ec39SRoland Dreier 		goto out_free_buf;
232aef9ec39SRoland Dreier 
233aef9ec39SRoland Dreier 	iu->size      = size;
234aef9ec39SRoland Dreier 	iu->direction = direction;
235aef9ec39SRoland Dreier 
236aef9ec39SRoland Dreier 	return iu;
237aef9ec39SRoland Dreier 
238aef9ec39SRoland Dreier out_free_buf:
239aef9ec39SRoland Dreier 	kfree(iu->buf);
240aef9ec39SRoland Dreier out_free_iu:
241aef9ec39SRoland Dreier 	kfree(iu);
242aef9ec39SRoland Dreier out:
243aef9ec39SRoland Dreier 	return NULL;
244aef9ec39SRoland Dreier }
245aef9ec39SRoland Dreier 
246aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
247aef9ec39SRoland Dreier {
248aef9ec39SRoland Dreier 	if (!iu)
249aef9ec39SRoland Dreier 		return;
250aef9ec39SRoland Dreier 
25105321937SGreg Kroah-Hartman 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
25205321937SGreg Kroah-Hartman 			    iu->direction);
253aef9ec39SRoland Dreier 	kfree(iu->buf);
254aef9ec39SRoland Dreier 	kfree(iu);
255aef9ec39SRoland Dreier }
256aef9ec39SRoland Dreier 
257aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context)
258aef9ec39SRoland Dreier {
25957363d98SSagi Grimberg 	pr_debug("QP event %s (%d)\n",
26057363d98SSagi Grimberg 		 ib_event_msg(event->event), event->event);
261aef9ec39SRoland Dreier }
262aef9ec39SRoland Dreier 
263aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target,
264aef9ec39SRoland Dreier 		       struct ib_qp *qp)
265aef9ec39SRoland Dreier {
266aef9ec39SRoland Dreier 	struct ib_qp_attr *attr;
267aef9ec39SRoland Dreier 	int ret;
268aef9ec39SRoland Dreier 
269aef9ec39SRoland Dreier 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
270aef9ec39SRoland Dreier 	if (!attr)
271aef9ec39SRoland Dreier 		return -ENOMEM;
272aef9ec39SRoland Dreier 
27356b5390cSBart Van Assche 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
274aef9ec39SRoland Dreier 				  target->srp_host->port,
275747fe000SBart Van Assche 				  be16_to_cpu(target->pkey),
276aef9ec39SRoland Dreier 				  &attr->pkey_index);
277aef9ec39SRoland Dreier 	if (ret)
278aef9ec39SRoland Dreier 		goto out;
279aef9ec39SRoland Dreier 
280aef9ec39SRoland Dreier 	attr->qp_state        = IB_QPS_INIT;
281aef9ec39SRoland Dreier 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
282aef9ec39SRoland Dreier 				    IB_ACCESS_REMOTE_WRITE);
283aef9ec39SRoland Dreier 	attr->port_num        = target->srp_host->port;
284aef9ec39SRoland Dreier 
285aef9ec39SRoland Dreier 	ret = ib_modify_qp(qp, attr,
286aef9ec39SRoland Dreier 			   IB_QP_STATE		|
287aef9ec39SRoland Dreier 			   IB_QP_PKEY_INDEX	|
288aef9ec39SRoland Dreier 			   IB_QP_ACCESS_FLAGS	|
289aef9ec39SRoland Dreier 			   IB_QP_PORT);
290aef9ec39SRoland Dreier 
291aef9ec39SRoland Dreier out:
292aef9ec39SRoland Dreier 	kfree(attr);
293aef9ec39SRoland Dreier 	return ret;
294aef9ec39SRoland Dreier }
295aef9ec39SRoland Dreier 
296509c07bcSBart Van Assche static int srp_new_cm_id(struct srp_rdma_ch *ch)
2979fe4bcf4SDavid Dillow {
298509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2999fe4bcf4SDavid Dillow 	struct ib_cm_id *new_cm_id;
3009fe4bcf4SDavid Dillow 
30105321937SGreg Kroah-Hartman 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
302509c07bcSBart Van Assche 				    srp_cm_handler, ch);
3039fe4bcf4SDavid Dillow 	if (IS_ERR(new_cm_id))
3049fe4bcf4SDavid Dillow 		return PTR_ERR(new_cm_id);
3059fe4bcf4SDavid Dillow 
306509c07bcSBart Van Assche 	if (ch->cm_id)
307509c07bcSBart Van Assche 		ib_destroy_cm_id(ch->cm_id);
308509c07bcSBart Van Assche 	ch->cm_id = new_cm_id;
309509c07bcSBart Van Assche 	ch->path.sgid = target->sgid;
310509c07bcSBart Van Assche 	ch->path.dgid = target->orig_dgid;
311509c07bcSBart Van Assche 	ch->path.pkey = target->pkey;
312509c07bcSBart Van Assche 	ch->path.service_id = target->service_id;
3139fe4bcf4SDavid Dillow 
3149fe4bcf4SDavid Dillow 	return 0;
3159fe4bcf4SDavid Dillow }
3169fe4bcf4SDavid Dillow 
317d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
318d1b4289eSBart Van Assche {
319d1b4289eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
320d1b4289eSBart Van Assche 	struct ib_fmr_pool_param fmr_param;
321d1b4289eSBart Van Assche 
322d1b4289eSBart Van Assche 	memset(&fmr_param, 0, sizeof(fmr_param));
323fa9863f8SBart Van Assche 	fmr_param.pool_size	    = target->mr_pool_size;
324d1b4289eSBart Van Assche 	fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
325d1b4289eSBart Van Assche 	fmr_param.cache		    = 1;
32652ede08fSBart Van Assche 	fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
32752ede08fSBart Van Assche 	fmr_param.page_shift	    = ilog2(dev->mr_page_size);
328d1b4289eSBart Van Assche 	fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
329d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_WRITE |
330d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_READ);
331d1b4289eSBart Van Assche 
332d1b4289eSBart Van Assche 	return ib_create_fmr_pool(dev->pd, &fmr_param);
333d1b4289eSBart Van Assche }
334d1b4289eSBart Van Assche 
3355cfb1782SBart Van Assche /**
3365cfb1782SBart Van Assche  * srp_destroy_fr_pool() - free the resources owned by a pool
3375cfb1782SBart Van Assche  * @pool: Fast registration pool to be destroyed.
3385cfb1782SBart Van Assche  */
3395cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
3405cfb1782SBart Van Assche {
3415cfb1782SBart Van Assche 	int i;
3425cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3435cfb1782SBart Van Assche 
3445cfb1782SBart Van Assche 	if (!pool)
3455cfb1782SBart Van Assche 		return;
3465cfb1782SBart Van Assche 
3475cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
3485cfb1782SBart Van Assche 		if (d->mr)
3495cfb1782SBart Van Assche 			ib_dereg_mr(d->mr);
3505cfb1782SBart Van Assche 	}
3515cfb1782SBart Van Assche 	kfree(pool);
3525cfb1782SBart Van Assche }
3535cfb1782SBart Van Assche 
3545cfb1782SBart Van Assche /**
3555cfb1782SBart Van Assche  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
3565cfb1782SBart Van Assche  * @device:            IB device to allocate fast registration descriptors for.
3575cfb1782SBart Van Assche  * @pd:                Protection domain associated with the FR descriptors.
3585cfb1782SBart Van Assche  * @pool_size:         Number of descriptors to allocate.
3595cfb1782SBart Van Assche  * @max_page_list_len: Maximum fast registration work request page list length.
3605cfb1782SBart Van Assche  */
3615cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
3625cfb1782SBart Van Assche 					      struct ib_pd *pd, int pool_size,
3635cfb1782SBart Van Assche 					      int max_page_list_len)
3645cfb1782SBart Van Assche {
3655cfb1782SBart Van Assche 	struct srp_fr_pool *pool;
3665cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3675cfb1782SBart Van Assche 	struct ib_mr *mr;
3685cfb1782SBart Van Assche 	int i, ret = -EINVAL;
3695cfb1782SBart Van Assche 
3705cfb1782SBart Van Assche 	if (pool_size <= 0)
3715cfb1782SBart Van Assche 		goto err;
3725cfb1782SBart Van Assche 	ret = -ENOMEM;
3735cfb1782SBart Van Assche 	pool = kzalloc(sizeof(struct srp_fr_pool) +
3745cfb1782SBart Van Assche 		       pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
3755cfb1782SBart Van Assche 	if (!pool)
3765cfb1782SBart Van Assche 		goto err;
3775cfb1782SBart Van Assche 	pool->size = pool_size;
3785cfb1782SBart Van Assche 	pool->max_page_list_len = max_page_list_len;
3795cfb1782SBart Van Assche 	spin_lock_init(&pool->lock);
3805cfb1782SBart Van Assche 	INIT_LIST_HEAD(&pool->free_list);
3815cfb1782SBart Van Assche 
3825cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
383563b67c5SSagi Grimberg 		mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
384563b67c5SSagi Grimberg 				 max_page_list_len);
3855cfb1782SBart Van Assche 		if (IS_ERR(mr)) {
3865cfb1782SBart Van Assche 			ret = PTR_ERR(mr);
3875cfb1782SBart Van Assche 			goto destroy_pool;
3885cfb1782SBart Van Assche 		}
3895cfb1782SBart Van Assche 		d->mr = mr;
3905cfb1782SBart Van Assche 		list_add_tail(&d->entry, &pool->free_list);
3915cfb1782SBart Van Assche 	}
3925cfb1782SBart Van Assche 
3935cfb1782SBart Van Assche out:
3945cfb1782SBart Van Assche 	return pool;
3955cfb1782SBart Van Assche 
3965cfb1782SBart Van Assche destroy_pool:
3975cfb1782SBart Van Assche 	srp_destroy_fr_pool(pool);
3985cfb1782SBart Van Assche 
3995cfb1782SBart Van Assche err:
4005cfb1782SBart Van Assche 	pool = ERR_PTR(ret);
4015cfb1782SBart Van Assche 	goto out;
4025cfb1782SBart Van Assche }
4035cfb1782SBart Van Assche 
4045cfb1782SBart Van Assche /**
4055cfb1782SBart Van Assche  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
4065cfb1782SBart Van Assche  * @pool: Pool to obtain descriptor from.
4075cfb1782SBart Van Assche  */
4085cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
4095cfb1782SBart Van Assche {
4105cfb1782SBart Van Assche 	struct srp_fr_desc *d = NULL;
4115cfb1782SBart Van Assche 	unsigned long flags;
4125cfb1782SBart Van Assche 
4135cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4145cfb1782SBart Van Assche 	if (!list_empty(&pool->free_list)) {
4155cfb1782SBart Van Assche 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
4165cfb1782SBart Van Assche 		list_del(&d->entry);
4175cfb1782SBart Van Assche 	}
4185cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4195cfb1782SBart Van Assche 
4205cfb1782SBart Van Assche 	return d;
4215cfb1782SBart Van Assche }
4225cfb1782SBart Van Assche 
4235cfb1782SBart Van Assche /**
4245cfb1782SBart Van Assche  * srp_fr_pool_put() - put an FR descriptor back in the free list
4255cfb1782SBart Van Assche  * @pool: Pool the descriptor was allocated from.
4265cfb1782SBart Van Assche  * @desc: Pointer to an array of fast registration descriptor pointers.
4275cfb1782SBart Van Assche  * @n:    Number of descriptors to put back.
4285cfb1782SBart Van Assche  *
4295cfb1782SBart Van Assche  * Note: The caller must already have queued an invalidation request for
4305cfb1782SBart Van Assche  * desc->mr->rkey before calling this function.
4315cfb1782SBart Van Assche  */
4325cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
4335cfb1782SBart Van Assche 			    int n)
4345cfb1782SBart Van Assche {
4355cfb1782SBart Van Assche 	unsigned long flags;
4365cfb1782SBart Van Assche 	int i;
4375cfb1782SBart Van Assche 
4385cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4395cfb1782SBart Van Assche 	for (i = 0; i < n; i++)
4405cfb1782SBart Van Assche 		list_add(&desc[i]->entry, &pool->free_list);
4415cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4425cfb1782SBart Van Assche }
4435cfb1782SBart Van Assche 
4445cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
4455cfb1782SBart Van Assche {
4465cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
4475cfb1782SBart Van Assche 
448fa9863f8SBart Van Assche 	return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
4495cfb1782SBart Van Assche 				  dev->max_pages_per_mr);
4505cfb1782SBart Van Assche }
4515cfb1782SBart Van Assche 
4527dad6b2eSBart Van Assche /**
4537dad6b2eSBart Van Assche  * srp_destroy_qp() - destroy an RDMA queue pair
454f83b2561SBart Van Assche  * @qp: RDMA queue pair.
4557dad6b2eSBart Van Assche  *
456561392d4SSteve Wise  * Drain the qp before destroying it.  This avoids that the receive
457561392d4SSteve Wise  * completion handler can access the queue pair while it is
4587dad6b2eSBart Van Assche  * being destroyed.
4597dad6b2eSBart Van Assche  */
460f83b2561SBart Van Assche static void srp_destroy_qp(struct ib_qp *qp)
4617dad6b2eSBart Van Assche {
462f83b2561SBart Van Assche 	ib_drain_rq(qp);
463f83b2561SBart Van Assche 	ib_destroy_qp(qp);
4647dad6b2eSBart Van Assche }
4657dad6b2eSBart Van Assche 
466509c07bcSBart Van Assche static int srp_create_ch_ib(struct srp_rdma_ch *ch)
467aef9ec39SRoland Dreier {
468509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
46962154b2eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
470aef9ec39SRoland Dreier 	struct ib_qp_init_attr *init_attr;
47173aa89edSIshai Rabinovitz 	struct ib_cq *recv_cq, *send_cq;
47273aa89edSIshai Rabinovitz 	struct ib_qp *qp;
473d1b4289eSBart Van Assche 	struct ib_fmr_pool *fmr_pool = NULL;
4745cfb1782SBart Van Assche 	struct srp_fr_pool *fr_pool = NULL;
475509c5f33SBart Van Assche 	const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
476aef9ec39SRoland Dreier 	int ret;
477aef9ec39SRoland Dreier 
478aef9ec39SRoland Dreier 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
479aef9ec39SRoland Dreier 	if (!init_attr)
480aef9ec39SRoland Dreier 		return -ENOMEM;
481aef9ec39SRoland Dreier 
482561392d4SSteve Wise 	/* queue_size + 1 for ib_drain_rq() */
4831dc7b1f1SChristoph Hellwig 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
4841dc7b1f1SChristoph Hellwig 				ch->comp_vector, IB_POLL_SOFTIRQ);
48573aa89edSIshai Rabinovitz 	if (IS_ERR(recv_cq)) {
48673aa89edSIshai Rabinovitz 		ret = PTR_ERR(recv_cq);
487da9d2f07SRoland Dreier 		goto err;
488aef9ec39SRoland Dreier 	}
489aef9ec39SRoland Dreier 
4901dc7b1f1SChristoph Hellwig 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
4911dc7b1f1SChristoph Hellwig 				ch->comp_vector, IB_POLL_DIRECT);
49273aa89edSIshai Rabinovitz 	if (IS_ERR(send_cq)) {
49373aa89edSIshai Rabinovitz 		ret = PTR_ERR(send_cq);
494da9d2f07SRoland Dreier 		goto err_recv_cq;
4959c03dc9fSBart Van Assche 	}
4969c03dc9fSBart Van Assche 
497aef9ec39SRoland Dreier 	init_attr->event_handler       = srp_qp_event;
4985cfb1782SBart Van Assche 	init_attr->cap.max_send_wr     = m * target->queue_size;
4997dad6b2eSBart Van Assche 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
500aef9ec39SRoland Dreier 	init_attr->cap.max_recv_sge    = 1;
501aef9ec39SRoland Dreier 	init_attr->cap.max_send_sge    = 1;
5025cfb1782SBart Van Assche 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
503aef9ec39SRoland Dreier 	init_attr->qp_type             = IB_QPT_RC;
50473aa89edSIshai Rabinovitz 	init_attr->send_cq             = send_cq;
50573aa89edSIshai Rabinovitz 	init_attr->recv_cq             = recv_cq;
506aef9ec39SRoland Dreier 
50762154b2eSBart Van Assche 	qp = ib_create_qp(dev->pd, init_attr);
50873aa89edSIshai Rabinovitz 	if (IS_ERR(qp)) {
50973aa89edSIshai Rabinovitz 		ret = PTR_ERR(qp);
510da9d2f07SRoland Dreier 		goto err_send_cq;
511aef9ec39SRoland Dreier 	}
512aef9ec39SRoland Dreier 
51373aa89edSIshai Rabinovitz 	ret = srp_init_qp(target, qp);
514da9d2f07SRoland Dreier 	if (ret)
515da9d2f07SRoland Dreier 		goto err_qp;
516aef9ec39SRoland Dreier 
517002f1567SBart Van Assche 	if (dev->use_fast_reg) {
5185cfb1782SBart Van Assche 		fr_pool = srp_alloc_fr_pool(target);
5195cfb1782SBart Van Assche 		if (IS_ERR(fr_pool)) {
5205cfb1782SBart Van Assche 			ret = PTR_ERR(fr_pool);
5215cfb1782SBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
5225cfb1782SBart Van Assche 				     "FR pool allocation failed (%d)\n", ret);
5235cfb1782SBart Van Assche 			goto err_qp;
5245cfb1782SBart Van Assche 		}
525002f1567SBart Van Assche 	} else if (dev->use_fmr) {
526d1b4289eSBart Van Assche 		fmr_pool = srp_alloc_fmr_pool(target);
527d1b4289eSBart Van Assche 		if (IS_ERR(fmr_pool)) {
528d1b4289eSBart Van Assche 			ret = PTR_ERR(fmr_pool);
529d1b4289eSBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
530d1b4289eSBart Van Assche 				     "FMR pool allocation failed (%d)\n", ret);
531d1b4289eSBart Van Assche 			goto err_qp;
532d1b4289eSBart Van Assche 		}
533d1b4289eSBart Van Assche 	}
534d1b4289eSBart Van Assche 
535509c07bcSBart Van Assche 	if (ch->qp)
536f83b2561SBart Van Assche 		srp_destroy_qp(ch->qp);
537509c07bcSBart Van Assche 	if (ch->recv_cq)
5381dc7b1f1SChristoph Hellwig 		ib_free_cq(ch->recv_cq);
539509c07bcSBart Van Assche 	if (ch->send_cq)
5401dc7b1f1SChristoph Hellwig 		ib_free_cq(ch->send_cq);
54173aa89edSIshai Rabinovitz 
542509c07bcSBart Van Assche 	ch->qp = qp;
543509c07bcSBart Van Assche 	ch->recv_cq = recv_cq;
544509c07bcSBart Van Assche 	ch->send_cq = send_cq;
54573aa89edSIshai Rabinovitz 
5467fbc67dfSSagi Grimberg 	if (dev->use_fast_reg) {
5477fbc67dfSSagi Grimberg 		if (ch->fr_pool)
5487fbc67dfSSagi Grimberg 			srp_destroy_fr_pool(ch->fr_pool);
5497fbc67dfSSagi Grimberg 		ch->fr_pool = fr_pool;
5507fbc67dfSSagi Grimberg 	} else if (dev->use_fmr) {
5517fbc67dfSSagi Grimberg 		if (ch->fmr_pool)
5527fbc67dfSSagi Grimberg 			ib_destroy_fmr_pool(ch->fmr_pool);
5537fbc67dfSSagi Grimberg 		ch->fmr_pool = fmr_pool;
5547fbc67dfSSagi Grimberg 	}
5557fbc67dfSSagi Grimberg 
556da9d2f07SRoland Dreier 	kfree(init_attr);
557da9d2f07SRoland Dreier 	return 0;
558da9d2f07SRoland Dreier 
559da9d2f07SRoland Dreier err_qp:
560f83b2561SBart Van Assche 	srp_destroy_qp(qp);
561da9d2f07SRoland Dreier 
562da9d2f07SRoland Dreier err_send_cq:
5631dc7b1f1SChristoph Hellwig 	ib_free_cq(send_cq);
564da9d2f07SRoland Dreier 
565da9d2f07SRoland Dreier err_recv_cq:
5661dc7b1f1SChristoph Hellwig 	ib_free_cq(recv_cq);
567da9d2f07SRoland Dreier 
568da9d2f07SRoland Dreier err:
569aef9ec39SRoland Dreier 	kfree(init_attr);
570aef9ec39SRoland Dreier 	return ret;
571aef9ec39SRoland Dreier }
572aef9ec39SRoland Dreier 
5734d73f95fSBart Van Assche /*
5744d73f95fSBart Van Assche  * Note: this function may be called without srp_alloc_iu_bufs() having been
575509c07bcSBart Van Assche  * invoked. Hence the ch->[rt]x_ring checks.
5764d73f95fSBart Van Assche  */
577509c07bcSBart Van Assche static void srp_free_ch_ib(struct srp_target_port *target,
578509c07bcSBart Van Assche 			   struct srp_rdma_ch *ch)
579aef9ec39SRoland Dreier {
5805cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
581aef9ec39SRoland Dreier 	int i;
582aef9ec39SRoland Dreier 
583d92c0da7SBart Van Assche 	if (!ch->target)
584d92c0da7SBart Van Assche 		return;
585d92c0da7SBart Van Assche 
586509c07bcSBart Van Assche 	if (ch->cm_id) {
587509c07bcSBart Van Assche 		ib_destroy_cm_id(ch->cm_id);
588509c07bcSBart Van Assche 		ch->cm_id = NULL;
589394c595eSBart Van Assche 	}
590394c595eSBart Van Assche 
591d92c0da7SBart Van Assche 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
592d92c0da7SBart Van Assche 	if (!ch->qp)
593d92c0da7SBart Van Assche 		return;
594d92c0da7SBart Van Assche 
5955cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
596509c07bcSBart Van Assche 		if (ch->fr_pool)
597509c07bcSBart Van Assche 			srp_destroy_fr_pool(ch->fr_pool);
598002f1567SBart Van Assche 	} else if (dev->use_fmr) {
599509c07bcSBart Van Assche 		if (ch->fmr_pool)
600509c07bcSBart Van Assche 			ib_destroy_fmr_pool(ch->fmr_pool);
6015cfb1782SBart Van Assche 	}
6021dc7b1f1SChristoph Hellwig 
603f83b2561SBart Van Assche 	srp_destroy_qp(ch->qp);
6041dc7b1f1SChristoph Hellwig 	ib_free_cq(ch->send_cq);
6051dc7b1f1SChristoph Hellwig 	ib_free_cq(ch->recv_cq);
606aef9ec39SRoland Dreier 
607d92c0da7SBart Van Assche 	/*
608d92c0da7SBart Van Assche 	 * Avoid that the SCSI error handler tries to use this channel after
609d92c0da7SBart Van Assche 	 * it has been freed. The SCSI error handler can namely continue
610d92c0da7SBart Van Assche 	 * trying to perform recovery actions after scsi_remove_host()
611d92c0da7SBart Van Assche 	 * returned.
612d92c0da7SBart Van Assche 	 */
613d92c0da7SBart Van Assche 	ch->target = NULL;
614d92c0da7SBart Van Assche 
615509c07bcSBart Van Assche 	ch->qp = NULL;
616509c07bcSBart Van Assche 	ch->send_cq = ch->recv_cq = NULL;
61773aa89edSIshai Rabinovitz 
618509c07bcSBart Van Assche 	if (ch->rx_ring) {
6194d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
620509c07bcSBart Van Assche 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
621509c07bcSBart Van Assche 		kfree(ch->rx_ring);
622509c07bcSBart Van Assche 		ch->rx_ring = NULL;
6234d73f95fSBart Van Assche 	}
624509c07bcSBart Van Assche 	if (ch->tx_ring) {
6254d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
626509c07bcSBart Van Assche 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
627509c07bcSBart Van Assche 		kfree(ch->tx_ring);
628509c07bcSBart Van Assche 		ch->tx_ring = NULL;
6294d73f95fSBart Van Assche 	}
630aef9ec39SRoland Dreier }
631aef9ec39SRoland Dreier 
632aef9ec39SRoland Dreier static void srp_path_rec_completion(int status,
633aef9ec39SRoland Dreier 				    struct ib_sa_path_rec *pathrec,
634509c07bcSBart Van Assche 				    void *ch_ptr)
635aef9ec39SRoland Dreier {
636509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = ch_ptr;
637509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
638aef9ec39SRoland Dreier 
639509c07bcSBart Van Assche 	ch->status = status;
640aef9ec39SRoland Dreier 	if (status)
6417aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
6427aa54bd7SDavid Dillow 			     PFX "Got failed path rec status %d\n", status);
643aef9ec39SRoland Dreier 	else
644509c07bcSBart Van Assche 		ch->path = *pathrec;
645509c07bcSBart Van Assche 	complete(&ch->done);
646aef9ec39SRoland Dreier }
647aef9ec39SRoland Dreier 
648509c07bcSBart Van Assche static int srp_lookup_path(struct srp_rdma_ch *ch)
649aef9ec39SRoland Dreier {
650509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
651a702adceSBart Van Assche 	int ret;
652a702adceSBart Van Assche 
653509c07bcSBart Van Assche 	ch->path.numb_path = 1;
654aef9ec39SRoland Dreier 
655509c07bcSBart Van Assche 	init_completion(&ch->done);
656aef9ec39SRoland Dreier 
657509c07bcSBart Van Assche 	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
65805321937SGreg Kroah-Hartman 					       target->srp_host->srp_dev->dev,
659aef9ec39SRoland Dreier 					       target->srp_host->port,
660509c07bcSBart Van Assche 					       &ch->path,
661247e020eSSean Hefty 					       IB_SA_PATH_REC_SERVICE_ID |
662aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_DGID	 |
663aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_SGID	 |
664aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_NUMB_PATH	 |
665aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_PKEY,
666aef9ec39SRoland Dreier 					       SRP_PATH_REC_TIMEOUT_MS,
667aef9ec39SRoland Dreier 					       GFP_KERNEL,
668aef9ec39SRoland Dreier 					       srp_path_rec_completion,
669509c07bcSBart Van Assche 					       ch, &ch->path_query);
670509c07bcSBart Van Assche 	if (ch->path_query_id < 0)
671509c07bcSBart Van Assche 		return ch->path_query_id;
672aef9ec39SRoland Dreier 
673509c07bcSBart Van Assche 	ret = wait_for_completion_interruptible(&ch->done);
674a702adceSBart Van Assche 	if (ret < 0)
675a702adceSBart Van Assche 		return ret;
676aef9ec39SRoland Dreier 
677509c07bcSBart Van Assche 	if (ch->status < 0)
6787aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
6797aa54bd7SDavid Dillow 			     PFX "Path record query failed\n");
680aef9ec39SRoland Dreier 
681509c07bcSBart Van Assche 	return ch->status;
682aef9ec39SRoland Dreier }
683aef9ec39SRoland Dreier 
684d92c0da7SBart Van Assche static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
685aef9ec39SRoland Dreier {
686509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
687aef9ec39SRoland Dreier 	struct {
688aef9ec39SRoland Dreier 		struct ib_cm_req_param param;
689aef9ec39SRoland Dreier 		struct srp_login_req   priv;
690aef9ec39SRoland Dreier 	} *req = NULL;
691aef9ec39SRoland Dreier 	int status;
692aef9ec39SRoland Dreier 
693aef9ec39SRoland Dreier 	req = kzalloc(sizeof *req, GFP_KERNEL);
694aef9ec39SRoland Dreier 	if (!req)
695aef9ec39SRoland Dreier 		return -ENOMEM;
696aef9ec39SRoland Dreier 
697509c07bcSBart Van Assche 	req->param.primary_path		      = &ch->path;
698aef9ec39SRoland Dreier 	req->param.alternate_path 	      = NULL;
699aef9ec39SRoland Dreier 	req->param.service_id 		      = target->service_id;
700509c07bcSBart Van Assche 	req->param.qp_num		      = ch->qp->qp_num;
701509c07bcSBart Van Assche 	req->param.qp_type		      = ch->qp->qp_type;
702aef9ec39SRoland Dreier 	req->param.private_data 	      = &req->priv;
703aef9ec39SRoland Dreier 	req->param.private_data_len 	      = sizeof req->priv;
704aef9ec39SRoland Dreier 	req->param.flow_control 	      = 1;
705aef9ec39SRoland Dreier 
706aef9ec39SRoland Dreier 	get_random_bytes(&req->param.starting_psn, 4);
707aef9ec39SRoland Dreier 	req->param.starting_psn 	     &= 0xffffff;
708aef9ec39SRoland Dreier 
709aef9ec39SRoland Dreier 	/*
710aef9ec39SRoland Dreier 	 * Pick some arbitrary defaults here; we could make these
711aef9ec39SRoland Dreier 	 * module parameters if anyone cared about setting them.
712aef9ec39SRoland Dreier 	 */
713aef9ec39SRoland Dreier 	req->param.responder_resources	      = 4;
714aef9ec39SRoland Dreier 	req->param.remote_cm_response_timeout = 20;
715aef9ec39SRoland Dreier 	req->param.local_cm_response_timeout  = 20;
7167bb312e4SVu Pham 	req->param.retry_count                = target->tl_retry_count;
717aef9ec39SRoland Dreier 	req->param.rnr_retry_count 	      = 7;
718aef9ec39SRoland Dreier 	req->param.max_cm_retries 	      = 15;
719aef9ec39SRoland Dreier 
720aef9ec39SRoland Dreier 	req->priv.opcode     	= SRP_LOGIN_REQ;
721aef9ec39SRoland Dreier 	req->priv.tag        	= 0;
72249248644SDavid Dillow 	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
723aef9ec39SRoland Dreier 	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
724aef9ec39SRoland Dreier 					      SRP_BUF_FORMAT_INDIRECT);
725d92c0da7SBart Van Assche 	req->priv.req_flags	= (multich ? SRP_MULTICHAN_MULTI :
726d92c0da7SBart Van Assche 				   SRP_MULTICHAN_SINGLE);
7270c0450dbSRamachandra K 	/*
7280c0450dbSRamachandra K 	 * In the published SRP specification (draft rev. 16a), the
7290c0450dbSRamachandra K 	 * port identifier format is 8 bytes of ID extension followed
7300c0450dbSRamachandra K 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
7310c0450dbSRamachandra K 	 * opposite order, so that the GUID comes first.
7320c0450dbSRamachandra K 	 *
7330c0450dbSRamachandra K 	 * Targets conforming to these obsolete drafts can be
7340c0450dbSRamachandra K 	 * recognized by the I/O Class they report.
7350c0450dbSRamachandra K 	 */
7360c0450dbSRamachandra K 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
7370c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
738747fe000SBart Van Assche 		       &target->sgid.global.interface_id, 8);
7390c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id + 8,
74001cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
7410c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
7420c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
7430c0450dbSRamachandra K 	} else {
7440c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
74501cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
74601cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
747747fe000SBart Van Assche 		       &target->sgid.global.interface_id, 8);
7480c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
7490c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
7500c0450dbSRamachandra K 	}
7510c0450dbSRamachandra K 
752aef9ec39SRoland Dreier 	/*
753aef9ec39SRoland Dreier 	 * Topspin/Cisco SRP targets will reject our login unless we
75401cb9bcbSIshai Rabinovitz 	 * zero out the first 8 bytes of our initiator port ID and set
75501cb9bcbSIshai Rabinovitz 	 * the second 8 bytes to the local node GUID.
756aef9ec39SRoland Dreier 	 */
7575d7cbfd6SRoland Dreier 	if (srp_target_is_topspin(target)) {
7587aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
7597aa54bd7SDavid Dillow 			     PFX "Topspin/Cisco initiator port ID workaround "
760aef9ec39SRoland Dreier 			     "activated for target GUID %016llx\n",
76145c37cadSBart Van Assche 			     be64_to_cpu(target->ioc_guid));
762aef9ec39SRoland Dreier 		memset(req->priv.initiator_port_id, 0, 8);
76301cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
76405321937SGreg Kroah-Hartman 		       &target->srp_host->srp_dev->dev->node_guid, 8);
765aef9ec39SRoland Dreier 	}
766aef9ec39SRoland Dreier 
767509c07bcSBart Van Assche 	status = ib_send_cm_req(ch->cm_id, &req->param);
768aef9ec39SRoland Dreier 
769aef9ec39SRoland Dreier 	kfree(req);
770aef9ec39SRoland Dreier 
771aef9ec39SRoland Dreier 	return status;
772aef9ec39SRoland Dreier }
773aef9ec39SRoland Dreier 
774ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target)
775ef6c49d8SBart Van Assche {
776ef6c49d8SBart Van Assche 	bool changed = false;
777ef6c49d8SBart Van Assche 
778ef6c49d8SBart Van Assche 	spin_lock_irq(&target->lock);
779ef6c49d8SBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
780ef6c49d8SBart Van Assche 		target->state = SRP_TARGET_REMOVED;
781ef6c49d8SBart Van Assche 		changed = true;
782ef6c49d8SBart Van Assche 	}
783ef6c49d8SBart Van Assche 	spin_unlock_irq(&target->lock);
784ef6c49d8SBart Van Assche 
785ef6c49d8SBart Van Assche 	if (changed)
786bcc05910SBart Van Assche 		queue_work(srp_remove_wq, &target->remove_work);
787ef6c49d8SBart Van Assche 
788ef6c49d8SBart Van Assche 	return changed;
789ef6c49d8SBart Van Assche }
790ef6c49d8SBart Van Assche 
791aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target)
792aef9ec39SRoland Dreier {
793d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
794d92c0da7SBart Van Assche 	int i;
795509c07bcSBart Van Assche 
796aef9ec39SRoland Dreier 	/* XXX should send SRP_I_LOGOUT request */
797aef9ec39SRoland Dreier 
798d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
799d92c0da7SBart Van Assche 		ch = &target->ch[i];
800c014c8cdSBart Van Assche 		ch->connected = false;
801d92c0da7SBart Van Assche 		if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
8027aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, target->scsi_host,
8037aa54bd7SDavid Dillow 				     PFX "Sending CM DREQ failed\n");
804aef9ec39SRoland Dreier 		}
805294c875aSBart Van Assche 	}
806294c875aSBart Van Assche }
807aef9ec39SRoland Dreier 
808509c07bcSBart Van Assche static void srp_free_req_data(struct srp_target_port *target,
809509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
8108f26c9ffSDavid Dillow {
8115cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
8125cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
8138f26c9ffSDavid Dillow 	struct srp_request *req;
8148f26c9ffSDavid Dillow 	int i;
8158f26c9ffSDavid Dillow 
81647513cf4SBart Van Assche 	if (!ch->req_ring)
8174d73f95fSBart Van Assche 		return;
8184d73f95fSBart Van Assche 
8194d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
820509c07bcSBart Van Assche 		req = &ch->req_ring[i];
8219a21be53SSagi Grimberg 		if (dev->use_fast_reg) {
8225cfb1782SBart Van Assche 			kfree(req->fr_list);
8239a21be53SSagi Grimberg 		} else {
8248f26c9ffSDavid Dillow 			kfree(req->fmr_list);
8258f26c9ffSDavid Dillow 			kfree(req->map_page);
8269a21be53SSagi Grimberg 		}
827c07d424dSDavid Dillow 		if (req->indirect_dma_addr) {
828c07d424dSDavid Dillow 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
829c07d424dSDavid Dillow 					    target->indirect_size,
830c07d424dSDavid Dillow 					    DMA_TO_DEVICE);
831c07d424dSDavid Dillow 		}
832c07d424dSDavid Dillow 		kfree(req->indirect_desc);
8338f26c9ffSDavid Dillow 	}
8344d73f95fSBart Van Assche 
835509c07bcSBart Van Assche 	kfree(ch->req_ring);
836509c07bcSBart Van Assche 	ch->req_ring = NULL;
8378f26c9ffSDavid Dillow }
8388f26c9ffSDavid Dillow 
839509c07bcSBart Van Assche static int srp_alloc_req_data(struct srp_rdma_ch *ch)
840b81d00bdSBart Van Assche {
841509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
842b81d00bdSBart Van Assche 	struct srp_device *srp_dev = target->srp_host->srp_dev;
843b81d00bdSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
844b81d00bdSBart Van Assche 	struct srp_request *req;
8455cfb1782SBart Van Assche 	void *mr_list;
846b81d00bdSBart Van Assche 	dma_addr_t dma_addr;
847b81d00bdSBart Van Assche 	int i, ret = -ENOMEM;
848b81d00bdSBart Van Assche 
849509c07bcSBart Van Assche 	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
850509c07bcSBart Van Assche 			       GFP_KERNEL);
851509c07bcSBart Van Assche 	if (!ch->req_ring)
8524d73f95fSBart Van Assche 		goto out;
8534d73f95fSBart Van Assche 
8544d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
855509c07bcSBart Van Assche 		req = &ch->req_ring[i];
856509c5f33SBart Van Assche 		mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
857b81d00bdSBart Van Assche 				  GFP_KERNEL);
8585cfb1782SBart Van Assche 		if (!mr_list)
8595cfb1782SBart Van Assche 			goto out;
8609a21be53SSagi Grimberg 		if (srp_dev->use_fast_reg) {
8615cfb1782SBart Van Assche 			req->fr_list = mr_list;
8629a21be53SSagi Grimberg 		} else {
8635cfb1782SBart Van Assche 			req->fmr_list = mr_list;
86452ede08fSBart Van Assche 			req->map_page = kmalloc(srp_dev->max_pages_per_mr *
865d1b4289eSBart Van Assche 						sizeof(void *), GFP_KERNEL);
8665cfb1782SBart Van Assche 			if (!req->map_page)
8675cfb1782SBart Van Assche 				goto out;
8689a21be53SSagi Grimberg 		}
869b81d00bdSBart Van Assche 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
8705cfb1782SBart Van Assche 		if (!req->indirect_desc)
871b81d00bdSBart Van Assche 			goto out;
872b81d00bdSBart Van Assche 
873b81d00bdSBart Van Assche 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
874b81d00bdSBart Van Assche 					     target->indirect_size,
875b81d00bdSBart Van Assche 					     DMA_TO_DEVICE);
876b81d00bdSBart Van Assche 		if (ib_dma_mapping_error(ibdev, dma_addr))
877b81d00bdSBart Van Assche 			goto out;
878b81d00bdSBart Van Assche 
879b81d00bdSBart Van Assche 		req->indirect_dma_addr = dma_addr;
880b81d00bdSBart Van Assche 	}
881b81d00bdSBart Van Assche 	ret = 0;
882b81d00bdSBart Van Assche 
883b81d00bdSBart Van Assche out:
884b81d00bdSBart Van Assche 	return ret;
885b81d00bdSBart Van Assche }
886b81d00bdSBart Van Assche 
887683b159aSBart Van Assche /**
888683b159aSBart Van Assche  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
889683b159aSBart Van Assche  * @shost: SCSI host whose attributes to remove from sysfs.
890683b159aSBart Van Assche  *
891683b159aSBart Van Assche  * Note: Any attributes defined in the host template and that did not exist
892683b159aSBart Van Assche  * before invocation of this function will be ignored.
893683b159aSBart Van Assche  */
894683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
895683b159aSBart Van Assche {
896683b159aSBart Van Assche 	struct device_attribute **attr;
897683b159aSBart Van Assche 
898683b159aSBart Van Assche 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
899683b159aSBart Van Assche 		device_remove_file(&shost->shost_dev, *attr);
900683b159aSBart Van Assche }
901683b159aSBart Van Assche 
902ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target)
903ee12d6a8SBart Van Assche {
904d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
905d92c0da7SBart Van Assche 	int i;
906509c07bcSBart Van Assche 
907ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
908ef6c49d8SBart Van Assche 
909ee12d6a8SBart Van Assche 	srp_del_scsi_host_attr(target->scsi_host);
9109dd69a60SBart Van Assche 	srp_rport_get(target->rport);
911ee12d6a8SBart Van Assche 	srp_remove_host(target->scsi_host);
912ee12d6a8SBart Van Assche 	scsi_remove_host(target->scsi_host);
91393079162SBart Van Assche 	srp_stop_rport_timers(target->rport);
914ef6c49d8SBart Van Assche 	srp_disconnect_target(target);
915d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
916d92c0da7SBart Van Assche 		ch = &target->ch[i];
917509c07bcSBart Van Assche 		srp_free_ch_ib(target, ch);
918d92c0da7SBart Van Assche 	}
919c1120f89SBart Van Assche 	cancel_work_sync(&target->tl_err_work);
9209dd69a60SBart Van Assche 	srp_rport_put(target->rport);
921d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
922d92c0da7SBart Van Assche 		ch = &target->ch[i];
923509c07bcSBart Van Assche 		srp_free_req_data(target, ch);
924d92c0da7SBart Van Assche 	}
925d92c0da7SBart Van Assche 	kfree(target->ch);
926d92c0da7SBart Van Assche 	target->ch = NULL;
92765d7dd2fSVu Pham 
92865d7dd2fSVu Pham 	spin_lock(&target->srp_host->target_lock);
92965d7dd2fSVu Pham 	list_del(&target->list);
93065d7dd2fSVu Pham 	spin_unlock(&target->srp_host->target_lock);
93165d7dd2fSVu Pham 
932ee12d6a8SBart Van Assche 	scsi_host_put(target->scsi_host);
933ee12d6a8SBart Van Assche }
934ee12d6a8SBart Van Assche 
935c4028958SDavid Howells static void srp_remove_work(struct work_struct *work)
936aef9ec39SRoland Dreier {
937c4028958SDavid Howells 	struct srp_target_port *target =
938ef6c49d8SBart Van Assche 		container_of(work, struct srp_target_port, remove_work);
939aef9ec39SRoland Dreier 
940ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
941aef9ec39SRoland Dreier 
94296fc248aSBart Van Assche 	srp_remove_target(target);
943aef9ec39SRoland Dreier }
944aef9ec39SRoland Dreier 
945dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport)
946dc1bdbd9SBart Van Assche {
947dc1bdbd9SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
948dc1bdbd9SBart Van Assche 
949dc1bdbd9SBart Van Assche 	srp_queue_remove_work(target);
950dc1bdbd9SBart Van Assche }
951dc1bdbd9SBart Van Assche 
952c014c8cdSBart Van Assche /**
953c014c8cdSBart Van Assche  * srp_connected_ch() - number of connected channels
954c014c8cdSBart Van Assche  * @target: SRP target port.
955c014c8cdSBart Van Assche  */
956c014c8cdSBart Van Assche static int srp_connected_ch(struct srp_target_port *target)
957c014c8cdSBart Van Assche {
958c014c8cdSBart Van Assche 	int i, c = 0;
959c014c8cdSBart Van Assche 
960c014c8cdSBart Van Assche 	for (i = 0; i < target->ch_count; i++)
961c014c8cdSBart Van Assche 		c += target->ch[i].connected;
962c014c8cdSBart Van Assche 
963c014c8cdSBart Van Assche 	return c;
964c014c8cdSBart Van Assche }
965c014c8cdSBart Van Assche 
966d92c0da7SBart Van Assche static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
967aef9ec39SRoland Dreier {
968509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
969aef9ec39SRoland Dreier 	int ret;
970aef9ec39SRoland Dreier 
971c014c8cdSBart Van Assche 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
972294c875aSBart Van Assche 
973509c07bcSBart Van Assche 	ret = srp_lookup_path(ch);
974aef9ec39SRoland Dreier 	if (ret)
9754d59ad29SBart Van Assche 		goto out;
976aef9ec39SRoland Dreier 
977aef9ec39SRoland Dreier 	while (1) {
978509c07bcSBart Van Assche 		init_completion(&ch->done);
979d92c0da7SBart Van Assche 		ret = srp_send_req(ch, multich);
980aef9ec39SRoland Dreier 		if (ret)
9814d59ad29SBart Van Assche 			goto out;
982509c07bcSBart Van Assche 		ret = wait_for_completion_interruptible(&ch->done);
983a702adceSBart Van Assche 		if (ret < 0)
9844d59ad29SBart Van Assche 			goto out;
985aef9ec39SRoland Dreier 
986aef9ec39SRoland Dreier 		/*
987aef9ec39SRoland Dreier 		 * The CM event handling code will set status to
988aef9ec39SRoland Dreier 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
989aef9ec39SRoland Dreier 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
990aef9ec39SRoland Dreier 		 * redirect REJ back.
991aef9ec39SRoland Dreier 		 */
9924d59ad29SBart Van Assche 		ret = ch->status;
9934d59ad29SBart Van Assche 		switch (ret) {
994aef9ec39SRoland Dreier 		case 0:
995c014c8cdSBart Van Assche 			ch->connected = true;
9964d59ad29SBart Van Assche 			goto out;
997aef9ec39SRoland Dreier 
998aef9ec39SRoland Dreier 		case SRP_PORT_REDIRECT:
999509c07bcSBart Van Assche 			ret = srp_lookup_path(ch);
1000aef9ec39SRoland Dreier 			if (ret)
10014d59ad29SBart Van Assche 				goto out;
1002aef9ec39SRoland Dreier 			break;
1003aef9ec39SRoland Dreier 
1004aef9ec39SRoland Dreier 		case SRP_DLID_REDIRECT:
1005aef9ec39SRoland Dreier 			break;
1006aef9ec39SRoland Dreier 
10079fe4bcf4SDavid Dillow 		case SRP_STALE_CONN:
10089fe4bcf4SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host, PFX
10099fe4bcf4SDavid Dillow 				     "giving up on stale connection\n");
10104d59ad29SBart Van Assche 			ret = -ECONNRESET;
10114d59ad29SBart Van Assche 			goto out;
10129fe4bcf4SDavid Dillow 
1013aef9ec39SRoland Dreier 		default:
10144d59ad29SBart Van Assche 			goto out;
1015aef9ec39SRoland Dreier 		}
1016aef9ec39SRoland Dreier 	}
10174d59ad29SBart Van Assche 
10184d59ad29SBart Van Assche out:
10194d59ad29SBart Van Assche 	return ret <= 0 ? ret : -ENODEV;
1020aef9ec39SRoland Dreier }
1021aef9ec39SRoland Dreier 
10221dc7b1f1SChristoph Hellwig static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
10231dc7b1f1SChristoph Hellwig {
10241dc7b1f1SChristoph Hellwig 	srp_handle_qp_err(cq, wc, "INV RKEY");
10251dc7b1f1SChristoph Hellwig }
10261dc7b1f1SChristoph Hellwig 
10271dc7b1f1SChristoph Hellwig static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
10281dc7b1f1SChristoph Hellwig 		u32 rkey)
10295cfb1782SBart Van Assche {
10305cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
10315cfb1782SBart Van Assche 	struct ib_send_wr wr = {
10325cfb1782SBart Van Assche 		.opcode		    = IB_WR_LOCAL_INV,
10335cfb1782SBart Van Assche 		.next		    = NULL,
10345cfb1782SBart Van Assche 		.num_sge	    = 0,
10355cfb1782SBart Van Assche 		.send_flags	    = 0,
10365cfb1782SBart Van Assche 		.ex.invalidate_rkey = rkey,
10375cfb1782SBart Van Assche 	};
10385cfb1782SBart Van Assche 
10391dc7b1f1SChristoph Hellwig 	wr.wr_cqe = &req->reg_cqe;
10401dc7b1f1SChristoph Hellwig 	req->reg_cqe.done = srp_inv_rkey_err_done;
1041509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
10425cfb1782SBart Van Assche }
10435cfb1782SBart Van Assche 
1044d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd,
1045509c07bcSBart Van Assche 			   struct srp_rdma_ch *ch,
1046d945e1dfSRoland Dreier 			   struct srp_request *req)
1047d945e1dfSRoland Dreier {
1048509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
10495cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
10505cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
10515cfb1782SBart Van Assche 	int i, res;
10528f26c9ffSDavid Dillow 
1053bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) ||
1054d945e1dfSRoland Dreier 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1055d945e1dfSRoland Dreier 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1056d945e1dfSRoland Dreier 		return;
1057d945e1dfSRoland Dreier 
10585cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
10595cfb1782SBart Van Assche 		struct srp_fr_desc **pfr;
10605cfb1782SBart Van Assche 
10615cfb1782SBart Van Assche 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
10621dc7b1f1SChristoph Hellwig 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
10635cfb1782SBart Van Assche 			if (res < 0) {
10645cfb1782SBart Van Assche 				shost_printk(KERN_ERR, target->scsi_host, PFX
10655cfb1782SBart Van Assche 				  "Queueing INV WR for rkey %#x failed (%d)\n",
10665cfb1782SBart Van Assche 				  (*pfr)->mr->rkey, res);
10675cfb1782SBart Van Assche 				queue_work(system_long_wq,
10685cfb1782SBart Van Assche 					   &target->tl_err_work);
10695cfb1782SBart Van Assche 			}
10705cfb1782SBart Van Assche 		}
10715cfb1782SBart Van Assche 		if (req->nmdesc)
1072509c07bcSBart Van Assche 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
10735cfb1782SBart Van Assche 					req->nmdesc);
1074002f1567SBart Van Assche 	} else if (dev->use_fmr) {
10755cfb1782SBart Van Assche 		struct ib_pool_fmr **pfmr;
10765cfb1782SBart Van Assche 
10775cfb1782SBart Van Assche 		for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
10785cfb1782SBart Van Assche 			ib_fmr_pool_unmap(*pfmr);
10795cfb1782SBart Van Assche 	}
1080f5358a17SRoland Dreier 
10818f26c9ffSDavid Dillow 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
10828f26c9ffSDavid Dillow 			scmnd->sc_data_direction);
1083d945e1dfSRoland Dreier }
1084d945e1dfSRoland Dreier 
108522032991SBart Van Assche /**
108622032991SBart Van Assche  * srp_claim_req - Take ownership of the scmnd associated with a request.
1087509c07bcSBart Van Assche  * @ch: SRP RDMA channel.
108822032991SBart Van Assche  * @req: SRP request.
1089b3fe628dSBart Van Assche  * @sdev: If not NULL, only take ownership for this SCSI device.
109022032991SBart Van Assche  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
109122032991SBart Van Assche  *         ownership of @req->scmnd if it equals @scmnd.
109222032991SBart Van Assche  *
109322032991SBart Van Assche  * Return value:
109422032991SBart Van Assche  * Either NULL or a pointer to the SCSI command the caller became owner of.
109522032991SBart Van Assche  */
1096509c07bcSBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
109722032991SBart Van Assche 				       struct srp_request *req,
1098b3fe628dSBart Van Assche 				       struct scsi_device *sdev,
109922032991SBart Van Assche 				       struct scsi_cmnd *scmnd)
1100526b4caaSIshai Rabinovitz {
110194a9174cSBart Van Assche 	unsigned long flags;
110294a9174cSBart Van Assche 
1103509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1104b3fe628dSBart Van Assche 	if (req->scmnd &&
1105b3fe628dSBart Van Assche 	    (!sdev || req->scmnd->device == sdev) &&
1106b3fe628dSBart Van Assche 	    (!scmnd || req->scmnd == scmnd)) {
110722032991SBart Van Assche 		scmnd = req->scmnd;
110822032991SBart Van Assche 		req->scmnd = NULL;
110922032991SBart Van Assche 	} else {
111022032991SBart Van Assche 		scmnd = NULL;
111122032991SBart Van Assche 	}
1112509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
111322032991SBart Van Assche 
111422032991SBart Van Assche 	return scmnd;
111522032991SBart Van Assche }
111622032991SBart Van Assche 
111722032991SBart Van Assche /**
11186ec2ba02SBart Van Assche  * srp_free_req() - Unmap data and adjust ch->req_lim.
1119509c07bcSBart Van Assche  * @ch:     SRP RDMA channel.
1120af24663bSBart Van Assche  * @req:    Request to be freed.
1121af24663bSBart Van Assche  * @scmnd:  SCSI command associated with @req.
1122af24663bSBart Van Assche  * @req_lim_delta: Amount to be added to @target->req_lim.
112322032991SBart Van Assche  */
1124509c07bcSBart Van Assche static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1125509c07bcSBart Van Assche 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
112622032991SBart Van Assche {
112722032991SBart Van Assche 	unsigned long flags;
112822032991SBart Van Assche 
1129509c07bcSBart Van Assche 	srp_unmap_data(scmnd, ch, req);
113022032991SBart Van Assche 
1131509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1132509c07bcSBart Van Assche 	ch->req_lim += req_lim_delta;
1133509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
1134526b4caaSIshai Rabinovitz }
1135526b4caaSIshai Rabinovitz 
1136509c07bcSBart Van Assche static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1137509c07bcSBart Van Assche 			   struct scsi_device *sdev, int result)
1138526b4caaSIshai Rabinovitz {
1139509c07bcSBart Van Assche 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
114022032991SBart Van Assche 
114122032991SBart Van Assche 	if (scmnd) {
1142509c07bcSBart Van Assche 		srp_free_req(ch, req, scmnd, 0);
1143ed9b2264SBart Van Assche 		scmnd->result = result;
114422032991SBart Van Assche 		scmnd->scsi_done(scmnd);
114522032991SBart Van Assche 	}
1146526b4caaSIshai Rabinovitz }
1147526b4caaSIshai Rabinovitz 
1148ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport)
1149aef9ec39SRoland Dreier {
1150ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1151d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
1152b3fe628dSBart Van Assche 	struct Scsi_Host *shost = target->scsi_host;
1153b3fe628dSBart Van Assche 	struct scsi_device *sdev;
1154d92c0da7SBart Van Assche 	int i, j;
1155aef9ec39SRoland Dreier 
1156b3fe628dSBart Van Assche 	/*
1157b3fe628dSBart Van Assche 	 * Invoking srp_terminate_io() while srp_queuecommand() is running
1158b3fe628dSBart Van Assche 	 * is not safe. Hence the warning statement below.
1159b3fe628dSBart Van Assche 	 */
1160b3fe628dSBart Van Assche 	shost_for_each_device(sdev, shost)
1161b3fe628dSBart Van Assche 		WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1162b3fe628dSBart Van Assche 
1163d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1164d92c0da7SBart Van Assche 		ch = &target->ch[i];
1165509c07bcSBart Van Assche 
1166d92c0da7SBart Van Assche 		for (j = 0; j < target->req_ring_size; ++j) {
1167d92c0da7SBart Van Assche 			struct srp_request *req = &ch->req_ring[j];
1168d92c0da7SBart Van Assche 
1169d92c0da7SBart Van Assche 			srp_finish_req(ch, req, NULL,
1170d92c0da7SBart Van Assche 				       DID_TRANSPORT_FAILFAST << 16);
1171d92c0da7SBart Van Assche 		}
1172ed9b2264SBart Van Assche 	}
1173ed9b2264SBart Van Assche }
1174ed9b2264SBart Van Assche 
1175ed9b2264SBart Van Assche /*
1176ed9b2264SBart Van Assche  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1177ed9b2264SBart Van Assche  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1178ed9b2264SBart Van Assche  * srp_reset_device() or srp_reset_host() calls will occur while this function
1179ed9b2264SBart Van Assche  * is in progress. One way to realize that is not to call this function
1180ed9b2264SBart Van Assche  * directly but to call srp_reconnect_rport() instead since that last function
1181ed9b2264SBart Van Assche  * serializes calls of this function via rport->mutex and also blocks
1182ed9b2264SBart Van Assche  * srp_queuecommand() calls before invoking this function.
1183ed9b2264SBart Van Assche  */
1184ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport)
1185ed9b2264SBart Van Assche {
1186ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1187d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
1188d92c0da7SBart Van Assche 	int i, j, ret = 0;
1189d92c0da7SBart Van Assche 	bool multich = false;
119009be70a2SBart Van Assche 
1191aef9ec39SRoland Dreier 	srp_disconnect_target(target);
119234aa654eSBart Van Assche 
119334aa654eSBart Van Assche 	if (target->state == SRP_TARGET_SCANNING)
119434aa654eSBart Van Assche 		return -ENODEV;
119534aa654eSBart Van Assche 
1196aef9ec39SRoland Dreier 	/*
1197c7c4e7ffSBart Van Assche 	 * Now get a new local CM ID so that we avoid confusing the target in
1198c7c4e7ffSBart Van Assche 	 * case things are really fouled up. Doing so also ensures that all CM
1199c7c4e7ffSBart Van Assche 	 * callbacks will have finished before a new QP is allocated.
1200aef9ec39SRoland Dreier 	 */
1201d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1202d92c0da7SBart Van Assche 		ch = &target->ch[i];
1203d92c0da7SBart Van Assche 		ret += srp_new_cm_id(ch);
1204d92c0da7SBart Van Assche 	}
1205d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1206d92c0da7SBart Van Assche 		ch = &target->ch[i];
1207d92c0da7SBart Van Assche 		for (j = 0; j < target->req_ring_size; ++j) {
1208d92c0da7SBart Van Assche 			struct srp_request *req = &ch->req_ring[j];
1209509c07bcSBart Van Assche 
1210509c07bcSBart Van Assche 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
1211536ae14eSBart Van Assche 		}
1212d92c0da7SBart Van Assche 	}
1213d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1214d92c0da7SBart Van Assche 		ch = &target->ch[i];
12155cfb1782SBart Van Assche 		/*
12165cfb1782SBart Van Assche 		 * Whether or not creating a new CM ID succeeded, create a new
1217d92c0da7SBart Van Assche 		 * QP. This guarantees that all completion callback function
1218d92c0da7SBart Van Assche 		 * invocations have finished before request resetting starts.
12195cfb1782SBart Van Assche 		 */
1220509c07bcSBart Van Assche 		ret += srp_create_ch_ib(ch);
12215cfb1782SBart Van Assche 
1222509c07bcSBart Van Assche 		INIT_LIST_HEAD(&ch->free_tx);
1223d92c0da7SBart Van Assche 		for (j = 0; j < target->queue_size; ++j)
1224d92c0da7SBart Van Assche 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1225d92c0da7SBart Van Assche 	}
12268de9fe3aSBart Van Assche 
12278de9fe3aSBart Van Assche 	target->qp_in_error = false;
12288de9fe3aSBart Van Assche 
1229d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1230d92c0da7SBart Van Assche 		ch = &target->ch[i];
1231bbac5ccfSBart Van Assche 		if (ret)
1232d92c0da7SBart Van Assche 			break;
1233d92c0da7SBart Van Assche 		ret = srp_connect_ch(ch, multich);
1234d92c0da7SBart Van Assche 		multich = true;
1235d92c0da7SBart Van Assche 	}
123609be70a2SBart Van Assche 
1237ed9b2264SBart Van Assche 	if (ret == 0)
1238ed9b2264SBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
1239ed9b2264SBart Van Assche 			     PFX "reconnect succeeded\n");
1240aef9ec39SRoland Dreier 
1241aef9ec39SRoland Dreier 	return ret;
1242aef9ec39SRoland Dreier }
1243aef9ec39SRoland Dreier 
12448f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
12458f26c9ffSDavid Dillow 			 unsigned int dma_len, u32 rkey)
1246f5358a17SRoland Dreier {
12478f26c9ffSDavid Dillow 	struct srp_direct_buf *desc = state->desc;
12488f26c9ffSDavid Dillow 
12493ae95da8SBart Van Assche 	WARN_ON_ONCE(!dma_len);
12503ae95da8SBart Van Assche 
12518f26c9ffSDavid Dillow 	desc->va = cpu_to_be64(dma_addr);
12528f26c9ffSDavid Dillow 	desc->key = cpu_to_be32(rkey);
12538f26c9ffSDavid Dillow 	desc->len = cpu_to_be32(dma_len);
12548f26c9ffSDavid Dillow 
12558f26c9ffSDavid Dillow 	state->total_len += dma_len;
12568f26c9ffSDavid Dillow 	state->desc++;
12578f26c9ffSDavid Dillow 	state->ndesc++;
12588f26c9ffSDavid Dillow }
12598f26c9ffSDavid Dillow 
12608f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state,
1261509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
12628f26c9ffSDavid Dillow {
1263186fbc66SBart Van Assche 	struct srp_target_port *target = ch->target;
1264186fbc66SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
12655f071777SChristoph Hellwig 	struct ib_pd *pd = target->pd;
12668f26c9ffSDavid Dillow 	struct ib_pool_fmr *fmr;
1267f5358a17SRoland Dreier 	u64 io_addr = 0;
12688f26c9ffSDavid Dillow 
1269f731ed62SBart Van Assche 	if (state->fmr.next >= state->fmr.end)
1270f731ed62SBart Van Assche 		return -ENOMEM;
1271f731ed62SBart Van Assche 
127226630e8aSSagi Grimberg 	WARN_ON_ONCE(!dev->use_fmr);
127326630e8aSSagi Grimberg 
127426630e8aSSagi Grimberg 	if (state->npages == 0)
127526630e8aSSagi Grimberg 		return 0;
127626630e8aSSagi Grimberg 
12775f071777SChristoph Hellwig 	if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
127826630e8aSSagi Grimberg 		srp_map_desc(state, state->base_dma_addr, state->dma_len,
12795f071777SChristoph Hellwig 			     pd->unsafe_global_rkey);
128026630e8aSSagi Grimberg 		goto reset_state;
128126630e8aSSagi Grimberg 	}
128226630e8aSSagi Grimberg 
1283509c07bcSBart Van Assche 	fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
12848f26c9ffSDavid Dillow 				   state->npages, io_addr);
12858f26c9ffSDavid Dillow 	if (IS_ERR(fmr))
12868f26c9ffSDavid Dillow 		return PTR_ERR(fmr);
12878f26c9ffSDavid Dillow 
1288f731ed62SBart Van Assche 	*state->fmr.next++ = fmr;
128952ede08fSBart Van Assche 	state->nmdesc++;
12908f26c9ffSDavid Dillow 
1291186fbc66SBart Van Assche 	srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1292186fbc66SBart Van Assche 		     state->dma_len, fmr->fmr->rkey);
1293539dde6fSBart Van Assche 
129426630e8aSSagi Grimberg reset_state:
129526630e8aSSagi Grimberg 	state->npages = 0;
129626630e8aSSagi Grimberg 	state->dma_len = 0;
129726630e8aSSagi Grimberg 
12988f26c9ffSDavid Dillow 	return 0;
12998f26c9ffSDavid Dillow }
13008f26c9ffSDavid Dillow 
13011dc7b1f1SChristoph Hellwig static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
13021dc7b1f1SChristoph Hellwig {
13031dc7b1f1SChristoph Hellwig 	srp_handle_qp_err(cq, wc, "FAST REG");
13041dc7b1f1SChristoph Hellwig }
13051dc7b1f1SChristoph Hellwig 
1306509c5f33SBart Van Assche /*
1307509c5f33SBart Van Assche  * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1308509c5f33SBart Van Assche  * where to start in the first element. If sg_offset_p != NULL then
1309509c5f33SBart Van Assche  * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1310509c5f33SBart Van Assche  * byte that has not yet been mapped.
1311509c5f33SBart Van Assche  */
13125cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state,
13131dc7b1f1SChristoph Hellwig 			     struct srp_request *req,
1314509c5f33SBart Van Assche 			     struct srp_rdma_ch *ch, int sg_nents,
1315509c5f33SBart Van Assche 			     unsigned int *sg_offset_p)
13165cfb1782SBart Van Assche {
1317509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
13185cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
13195f071777SChristoph Hellwig 	struct ib_pd *pd = target->pd;
13205cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
1321f7f7aab1SSagi Grimberg 	struct ib_reg_wr wr;
13225cfb1782SBart Van Assche 	struct srp_fr_desc *desc;
13235cfb1782SBart Van Assche 	u32 rkey;
1324f7f7aab1SSagi Grimberg 	int n, err;
13255cfb1782SBart Van Assche 
1326f731ed62SBart Van Assche 	if (state->fr.next >= state->fr.end)
1327f731ed62SBart Van Assche 		return -ENOMEM;
1328f731ed62SBart Van Assche 
132926630e8aSSagi Grimberg 	WARN_ON_ONCE(!dev->use_fast_reg);
133026630e8aSSagi Grimberg 
13315f071777SChristoph Hellwig 	if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1332509c5f33SBart Van Assche 		unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1333509c5f33SBart Van Assche 
1334509c5f33SBart Van Assche 		srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1335509c5f33SBart Van Assche 			     sg_dma_len(state->sg) - sg_offset,
13365f071777SChristoph Hellwig 			     pd->unsafe_global_rkey);
1337509c5f33SBart Van Assche 		if (sg_offset_p)
1338509c5f33SBart Van Assche 			*sg_offset_p = 0;
1339f7f7aab1SSagi Grimberg 		return 1;
134026630e8aSSagi Grimberg 	}
134126630e8aSSagi Grimberg 
1342509c07bcSBart Van Assche 	desc = srp_fr_pool_get(ch->fr_pool);
13435cfb1782SBart Van Assche 	if (!desc)
13445cfb1782SBart Van Assche 		return -ENOMEM;
13455cfb1782SBart Van Assche 
13465cfb1782SBart Van Assche 	rkey = ib_inc_rkey(desc->mr->rkey);
13475cfb1782SBart Van Assche 	ib_update_fast_reg_key(desc->mr, rkey);
13485cfb1782SBart Van Assche 
1349509c5f33SBart Van Assche 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1350509c5f33SBart Van Assche 			 dev->mr_page_size);
13519d8e7d0dSBart Van Assche 	if (unlikely(n < 0)) {
13529d8e7d0dSBart Van Assche 		srp_fr_pool_put(ch->fr_pool, &desc, 1);
1353509c5f33SBart Van Assche 		pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
13549d8e7d0dSBart Van Assche 			 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1355509c5f33SBart Van Assche 			 sg_offset_p ? *sg_offset_p : -1, n);
1356f7f7aab1SSagi Grimberg 		return n;
13579d8e7d0dSBart Van Assche 	}
13585cfb1782SBart Van Assche 
1359509c5f33SBart Van Assche 	WARN_ON_ONCE(desc->mr->length == 0);
13605cfb1782SBart Van Assche 
13611dc7b1f1SChristoph Hellwig 	req->reg_cqe.done = srp_reg_mr_err_done;
13621dc7b1f1SChristoph Hellwig 
1363f7f7aab1SSagi Grimberg 	wr.wr.next = NULL;
1364f7f7aab1SSagi Grimberg 	wr.wr.opcode = IB_WR_REG_MR;
13651dc7b1f1SChristoph Hellwig 	wr.wr.wr_cqe = &req->reg_cqe;
1366f7f7aab1SSagi Grimberg 	wr.wr.num_sge = 0;
1367f7f7aab1SSagi Grimberg 	wr.wr.send_flags = 0;
1368f7f7aab1SSagi Grimberg 	wr.mr = desc->mr;
1369f7f7aab1SSagi Grimberg 	wr.key = desc->mr->rkey;
1370f7f7aab1SSagi Grimberg 	wr.access = (IB_ACCESS_LOCAL_WRITE |
13715cfb1782SBart Van Assche 		     IB_ACCESS_REMOTE_READ |
13725cfb1782SBart Van Assche 		     IB_ACCESS_REMOTE_WRITE);
13735cfb1782SBart Van Assche 
1374f731ed62SBart Van Assche 	*state->fr.next++ = desc;
13755cfb1782SBart Van Assche 	state->nmdesc++;
13765cfb1782SBart Van Assche 
1377f7f7aab1SSagi Grimberg 	srp_map_desc(state, desc->mr->iova,
1378f7f7aab1SSagi Grimberg 		     desc->mr->length, desc->mr->rkey);
13795cfb1782SBart Van Assche 
138026630e8aSSagi Grimberg 	err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1381509c5f33SBart Van Assche 	if (unlikely(err)) {
1382509c5f33SBart Van Assche 		WARN_ON_ONCE(err == -ENOMEM);
138326630e8aSSagi Grimberg 		return err;
1384509c5f33SBart Van Assche 	}
138526630e8aSSagi Grimberg 
1386f7f7aab1SSagi Grimberg 	return n;
13875cfb1782SBart Van Assche }
13885cfb1782SBart Van Assche 
13898f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state,
1390509c07bcSBart Van Assche 			    struct srp_rdma_ch *ch,
1391*52bb8c62SBart Van Assche 			    struct scatterlist *sg)
13928f26c9ffSDavid Dillow {
1393509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
139405321937SGreg Kroah-Hartman 	struct srp_device *dev = target->srp_host->srp_dev;
139585507bccSRalph Campbell 	struct ib_device *ibdev = dev->dev;
13968f26c9ffSDavid Dillow 	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1397bb350d1dSFUJITA Tomonori 	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
13983ae95da8SBart Van Assche 	unsigned int len = 0;
13998f26c9ffSDavid Dillow 	int ret;
140085507bccSRalph Campbell 
14013ae95da8SBart Van Assche 	WARN_ON_ONCE(!dma_len);
1402f5358a17SRoland Dreier 
14038f26c9ffSDavid Dillow 	while (dma_len) {
14045cfb1782SBart Van Assche 		unsigned offset = dma_addr & ~dev->mr_page_mask;
14055cfb1782SBart Van Assche 		if (state->npages == dev->max_pages_per_mr || offset != 0) {
1406f7f7aab1SSagi Grimberg 			ret = srp_map_finish_fmr(state, ch);
14078f26c9ffSDavid Dillow 			if (ret)
14088f26c9ffSDavid Dillow 				return ret;
140985507bccSRalph Campbell 		}
1410f5358a17SRoland Dreier 
14115cfb1782SBart Van Assche 		len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
14128f26c9ffSDavid Dillow 
14138f26c9ffSDavid Dillow 		if (!state->npages)
14148f26c9ffSDavid Dillow 			state->base_dma_addr = dma_addr;
14155cfb1782SBart Van Assche 		state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
141652ede08fSBart Van Assche 		state->dma_len += len;
14178f26c9ffSDavid Dillow 		dma_addr += len;
14188f26c9ffSDavid Dillow 		dma_len -= len;
1419f5358a17SRoland Dreier 	}
1420f5358a17SRoland Dreier 
14215cfb1782SBart Van Assche 	/*
14225cfb1782SBart Van Assche 	 * If the last entry of the MR wasn't a full page, then we need to
14238f26c9ffSDavid Dillow 	 * close it out and start a new one -- we can only merge at page
14241d3d98c4SBart Van Assche 	 * boundaries.
14258f26c9ffSDavid Dillow 	 */
1426f5358a17SRoland Dreier 	ret = 0;
14270e0d3a48SBart Van Assche 	if (len != dev->mr_page_size)
1428f7f7aab1SSagi Grimberg 		ret = srp_map_finish_fmr(state, ch);
1429f5358a17SRoland Dreier 	return ret;
1430f5358a17SRoland Dreier }
1431f5358a17SRoland Dreier 
143226630e8aSSagi Grimberg static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
143326630e8aSSagi Grimberg 			  struct srp_request *req, struct scatterlist *scat,
143426630e8aSSagi Grimberg 			  int count)
143526630e8aSSagi Grimberg {
143626630e8aSSagi Grimberg 	struct scatterlist *sg;
143726630e8aSSagi Grimberg 	int i, ret;
143826630e8aSSagi Grimberg 
143926630e8aSSagi Grimberg 	state->pages = req->map_page;
144026630e8aSSagi Grimberg 	state->fmr.next = req->fmr_list;
1441509c5f33SBart Van Assche 	state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
144226630e8aSSagi Grimberg 
144326630e8aSSagi Grimberg 	for_each_sg(scat, sg, count, i) {
1444*52bb8c62SBart Van Assche 		ret = srp_map_sg_entry(state, ch, sg);
144526630e8aSSagi Grimberg 		if (ret)
144626630e8aSSagi Grimberg 			return ret;
144726630e8aSSagi Grimberg 	}
144826630e8aSSagi Grimberg 
1449f7f7aab1SSagi Grimberg 	ret = srp_map_finish_fmr(state, ch);
145026630e8aSSagi Grimberg 	if (ret)
145126630e8aSSagi Grimberg 		return ret;
145226630e8aSSagi Grimberg 
145326630e8aSSagi Grimberg 	return 0;
145426630e8aSSagi Grimberg }
145526630e8aSSagi Grimberg 
145626630e8aSSagi Grimberg static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
145726630e8aSSagi Grimberg 			 struct srp_request *req, struct scatterlist *scat,
145826630e8aSSagi Grimberg 			 int count)
145926630e8aSSagi Grimberg {
1460509c5f33SBart Van Assche 	unsigned int sg_offset = 0;
1461509c5f33SBart Van Assche 
1462f7f7aab1SSagi Grimberg 	state->fr.next = req->fr_list;
1463509c5f33SBart Van Assche 	state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1464f7f7aab1SSagi Grimberg 	state->sg = scat;
146526630e8aSSagi Grimberg 
14663b59b7a6SBart Van Assche 	if (count == 0)
14673b59b7a6SBart Van Assche 		return 0;
14683b59b7a6SBart Van Assche 
146957b0be9cSBart Van Assche 	while (count) {
1470f7f7aab1SSagi Grimberg 		int i, n;
1471f7f7aab1SSagi Grimberg 
1472509c5f33SBart Van Assche 		n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1473f7f7aab1SSagi Grimberg 		if (unlikely(n < 0))
1474f7f7aab1SSagi Grimberg 			return n;
1475f7f7aab1SSagi Grimberg 
147657b0be9cSBart Van Assche 		count -= n;
1477f7f7aab1SSagi Grimberg 		for (i = 0; i < n; i++)
1478f7f7aab1SSagi Grimberg 			state->sg = sg_next(state->sg);
147926630e8aSSagi Grimberg 	}
148026630e8aSSagi Grimberg 
148126630e8aSSagi Grimberg 	return 0;
148226630e8aSSagi Grimberg }
148326630e8aSSagi Grimberg 
148426630e8aSSagi Grimberg static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1485509c07bcSBart Van Assche 			  struct srp_request *req, struct scatterlist *scat,
1486509c07bcSBart Van Assche 			  int count)
148776bc1e1dSBart Van Assche {
1488509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
148976bc1e1dSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
149076bc1e1dSBart Van Assche 	struct scatterlist *sg;
149126630e8aSSagi Grimberg 	int i;
149276bc1e1dSBart Van Assche 
14933ae95da8SBart Van Assche 	for_each_sg(scat, sg, count, i) {
14943ae95da8SBart Van Assche 		srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
149503f6fb93SBart Van Assche 			     ib_sg_dma_len(dev->dev, sg),
14965f071777SChristoph Hellwig 			     target->pd->unsafe_global_rkey);
14973ae95da8SBart Van Assche 	}
149876bc1e1dSBart Van Assche 
149926630e8aSSagi Grimberg 	return 0;
150076bc1e1dSBart Van Assche }
150176bc1e1dSBart Van Assche 
1502330179f2SBart Van Assche /*
1503330179f2SBart Van Assche  * Register the indirect data buffer descriptor with the HCA.
1504330179f2SBart Van Assche  *
1505330179f2SBart Van Assche  * Note: since the indirect data buffer descriptor has been allocated with
1506330179f2SBart Van Assche  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1507330179f2SBart Van Assche  * memory buffer.
1508330179f2SBart Van Assche  */
1509330179f2SBart Van Assche static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1510330179f2SBart Van Assche 		       void **next_mr, void **end_mr, u32 idb_len,
1511330179f2SBart Van Assche 		       __be32 *idb_rkey)
1512330179f2SBart Van Assche {
1513330179f2SBart Van Assche 	struct srp_target_port *target = ch->target;
1514330179f2SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
1515330179f2SBart Van Assche 	struct srp_map_state state;
1516330179f2SBart Van Assche 	struct srp_direct_buf idb_desc;
1517330179f2SBart Van Assche 	u64 idb_pages[1];
1518f7f7aab1SSagi Grimberg 	struct scatterlist idb_sg[1];
1519330179f2SBart Van Assche 	int ret;
1520330179f2SBart Van Assche 
1521330179f2SBart Van Assche 	memset(&state, 0, sizeof(state));
1522330179f2SBart Van Assche 	memset(&idb_desc, 0, sizeof(idb_desc));
1523330179f2SBart Van Assche 	state.gen.next = next_mr;
1524330179f2SBart Van Assche 	state.gen.end = end_mr;
1525330179f2SBart Van Assche 	state.desc = &idb_desc;
1526f7f7aab1SSagi Grimberg 	state.base_dma_addr = req->indirect_dma_addr;
1527f7f7aab1SSagi Grimberg 	state.dma_len = idb_len;
1528f7f7aab1SSagi Grimberg 
1529f7f7aab1SSagi Grimberg 	if (dev->use_fast_reg) {
1530f7f7aab1SSagi Grimberg 		state.sg = idb_sg;
153154f5c9c5SBart Van Assche 		sg_init_one(idb_sg, req->indirect_desc, idb_len);
1532f7f7aab1SSagi Grimberg 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1533fc925518SChristoph Hellwig #ifdef CONFIG_NEED_SG_DMA_LENGTH
1534fc925518SChristoph Hellwig 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1535fc925518SChristoph Hellwig #endif
1536509c5f33SBart Van Assche 		ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1537f7f7aab1SSagi Grimberg 		if (ret < 0)
1538f7f7aab1SSagi Grimberg 			return ret;
1539509c5f33SBart Van Assche 		WARN_ON_ONCE(ret < 1);
1540f7f7aab1SSagi Grimberg 	} else if (dev->use_fmr) {
1541330179f2SBart Van Assche 		state.pages = idb_pages;
1542330179f2SBart Van Assche 		state.pages[0] = (req->indirect_dma_addr &
1543330179f2SBart Van Assche 				  dev->mr_page_mask);
1544330179f2SBart Van Assche 		state.npages = 1;
1545f7f7aab1SSagi Grimberg 		ret = srp_map_finish_fmr(&state, ch);
1546330179f2SBart Van Assche 		if (ret < 0)
1547f7f7aab1SSagi Grimberg 			return ret;
1548f7f7aab1SSagi Grimberg 	} else {
1549f7f7aab1SSagi Grimberg 		return -EINVAL;
1550f7f7aab1SSagi Grimberg 	}
1551330179f2SBart Van Assche 
1552330179f2SBart Van Assche 	*idb_rkey = idb_desc.key;
1553330179f2SBart Van Assche 
1554f7f7aab1SSagi Grimberg 	return 0;
1555330179f2SBart Van Assche }
1556330179f2SBart Van Assche 
1557509c5f33SBart Van Assche #if defined(DYNAMIC_DATA_DEBUG)
1558509c5f33SBart Van Assche static void srp_check_mapping(struct srp_map_state *state,
1559509c5f33SBart Van Assche 			      struct srp_rdma_ch *ch, struct srp_request *req,
1560509c5f33SBart Van Assche 			      struct scatterlist *scat, int count)
1561509c5f33SBart Van Assche {
1562509c5f33SBart Van Assche 	struct srp_device *dev = ch->target->srp_host->srp_dev;
1563509c5f33SBart Van Assche 	struct srp_fr_desc **pfr;
1564509c5f33SBart Van Assche 	u64 desc_len = 0, mr_len = 0;
1565509c5f33SBart Van Assche 	int i;
1566509c5f33SBart Van Assche 
1567509c5f33SBart Van Assche 	for (i = 0; i < state->ndesc; i++)
1568509c5f33SBart Van Assche 		desc_len += be32_to_cpu(req->indirect_desc[i].len);
1569509c5f33SBart Van Assche 	if (dev->use_fast_reg)
1570509c5f33SBart Van Assche 		for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1571509c5f33SBart Van Assche 			mr_len += (*pfr)->mr->length;
1572509c5f33SBart Van Assche 	else if (dev->use_fmr)
1573509c5f33SBart Van Assche 		for (i = 0; i < state->nmdesc; i++)
1574509c5f33SBart Van Assche 			mr_len += be32_to_cpu(req->indirect_desc[i].len);
1575509c5f33SBart Van Assche 	if (desc_len != scsi_bufflen(req->scmnd) ||
1576509c5f33SBart Van Assche 	    mr_len > scsi_bufflen(req->scmnd))
1577509c5f33SBart Van Assche 		pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1578509c5f33SBart Van Assche 		       scsi_bufflen(req->scmnd), desc_len, mr_len,
1579509c5f33SBart Van Assche 		       state->ndesc, state->nmdesc);
1580509c5f33SBart Van Assche }
1581509c5f33SBart Van Assche #endif
1582509c5f33SBart Van Assche 
158377269cdfSBart Van Assche /**
158477269cdfSBart Van Assche  * srp_map_data() - map SCSI data buffer onto an SRP request
158577269cdfSBart Van Assche  * @scmnd: SCSI command to map
158677269cdfSBart Van Assche  * @ch: SRP RDMA channel
158777269cdfSBart Van Assche  * @req: SRP request
158877269cdfSBart Van Assche  *
158977269cdfSBart Van Assche  * Returns the length in bytes of the SRP_CMD IU or a negative value if
159077269cdfSBart Van Assche  * mapping failed.
159177269cdfSBart Van Assche  */
1592509c07bcSBart Van Assche static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1593aef9ec39SRoland Dreier 			struct srp_request *req)
1594aef9ec39SRoland Dreier {
1595509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
15965f071777SChristoph Hellwig 	struct ib_pd *pd = target->pd;
159776bc1e1dSBart Van Assche 	struct scatterlist *scat;
1598aef9ec39SRoland Dreier 	struct srp_cmd *cmd = req->cmd->buf;
1599330179f2SBart Van Assche 	int len, nents, count, ret;
160085507bccSRalph Campbell 	struct srp_device *dev;
160185507bccSRalph Campbell 	struct ib_device *ibdev;
16028f26c9ffSDavid Dillow 	struct srp_map_state state;
16038f26c9ffSDavid Dillow 	struct srp_indirect_buf *indirect_hdr;
1604330179f2SBart Van Assche 	u32 idb_len, table_len;
1605330179f2SBart Van Assche 	__be32 idb_rkey;
16068f26c9ffSDavid Dillow 	u8 fmt;
1607aef9ec39SRoland Dreier 
1608bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1609aef9ec39SRoland Dreier 		return sizeof (struct srp_cmd);
1610aef9ec39SRoland Dreier 
1611aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1612aef9ec39SRoland Dreier 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
16137aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
16147aa54bd7SDavid Dillow 			     PFX "Unhandled data direction %d\n",
1615aef9ec39SRoland Dreier 			     scmnd->sc_data_direction);
1616aef9ec39SRoland Dreier 		return -EINVAL;
1617aef9ec39SRoland Dreier 	}
1618aef9ec39SRoland Dreier 
1619bb350d1dSFUJITA Tomonori 	nents = scsi_sg_count(scmnd);
1620bb350d1dSFUJITA Tomonori 	scat  = scsi_sglist(scmnd);
1621aef9ec39SRoland Dreier 
162205321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev;
162385507bccSRalph Campbell 	ibdev = dev->dev;
162485507bccSRalph Campbell 
162585507bccSRalph Campbell 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
16268f26c9ffSDavid Dillow 	if (unlikely(count == 0))
16278f26c9ffSDavid Dillow 		return -EIO;
1628aef9ec39SRoland Dreier 
1629aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_DIRECT;
1630f5358a17SRoland Dreier 	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
1631f5358a17SRoland Dreier 
16325f071777SChristoph Hellwig 	if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1633f5358a17SRoland Dreier 		/*
1634f5358a17SRoland Dreier 		 * The midlayer only generated a single gather/scatter
1635f5358a17SRoland Dreier 		 * entry, or DMA mapping coalesced everything to a
1636f5358a17SRoland Dreier 		 * single entry.  So a direct descriptor along with
1637f5358a17SRoland Dreier 		 * the DMA MR suffices.
1638f5358a17SRoland Dreier 		 */
1639f5358a17SRoland Dreier 		struct srp_direct_buf *buf = (void *) cmd->add_data;
1640aef9ec39SRoland Dreier 
164185507bccSRalph Campbell 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
16425f071777SChristoph Hellwig 		buf->key = cpu_to_be32(pd->unsafe_global_rkey);
164385507bccSRalph Campbell 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
16448f26c9ffSDavid Dillow 
164552ede08fSBart Van Assche 		req->nmdesc = 0;
16468f26c9ffSDavid Dillow 		goto map_complete;
16478f26c9ffSDavid Dillow 	}
16488f26c9ffSDavid Dillow 
16495cfb1782SBart Van Assche 	/*
16505cfb1782SBart Van Assche 	 * We have more than one scatter/gather entry, so build our indirect
16515cfb1782SBart Van Assche 	 * descriptor table, trying to merge as many entries as we can.
1652f5358a17SRoland Dreier 	 */
16538f26c9ffSDavid Dillow 	indirect_hdr = (void *) cmd->add_data;
16548f26c9ffSDavid Dillow 
1655c07d424dSDavid Dillow 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1656c07d424dSDavid Dillow 				   target->indirect_size, DMA_TO_DEVICE);
1657c07d424dSDavid Dillow 
16588f26c9ffSDavid Dillow 	memset(&state, 0, sizeof(state));
16599edba790SBart Van Assche 	state.desc = req->indirect_desc;
166026630e8aSSagi Grimberg 	if (dev->use_fast_reg)
1661e012f363SBart Van Assche 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
166226630e8aSSagi Grimberg 	else if (dev->use_fmr)
1663e012f363SBart Van Assche 		ret = srp_map_sg_fmr(&state, ch, req, scat, count);
166426630e8aSSagi Grimberg 	else
1665e012f363SBart Van Assche 		ret = srp_map_sg_dma(&state, ch, req, scat, count);
1666e012f363SBart Van Assche 	req->nmdesc = state.nmdesc;
1667e012f363SBart Van Assche 	if (ret < 0)
1668e012f363SBart Van Assche 		goto unmap;
16698f26c9ffSDavid Dillow 
1670509c5f33SBart Van Assche #if defined(DYNAMIC_DEBUG)
1671509c5f33SBart Van Assche 	{
1672509c5f33SBart Van Assche 		DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1673509c5f33SBart Van Assche 			"Memory mapping consistency check");
1674509c5f33SBart Van Assche 		if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
1675509c5f33SBart Van Assche 			srp_check_mapping(&state, ch, req, scat, count);
1676509c5f33SBart Van Assche 	}
1677509c5f33SBart Van Assche #endif
16788f26c9ffSDavid Dillow 
1679c07d424dSDavid Dillow 	/* We've mapped the request, now pull as much of the indirect
1680c07d424dSDavid Dillow 	 * descriptor table as we can into the command buffer. If this
1681c07d424dSDavid Dillow 	 * target is not using an external indirect table, we are
1682c07d424dSDavid Dillow 	 * guaranteed to fit into the command, as the SCSI layer won't
1683c07d424dSDavid Dillow 	 * give us more S/G entries than we allow.
16848f26c9ffSDavid Dillow 	 */
16858f26c9ffSDavid Dillow 	if (state.ndesc == 1) {
16865cfb1782SBart Van Assche 		/*
16875cfb1782SBart Van Assche 		 * Memory registration collapsed the sg-list into one entry,
16888f26c9ffSDavid Dillow 		 * so use a direct descriptor.
16898f26c9ffSDavid Dillow 		 */
16908f26c9ffSDavid Dillow 		struct srp_direct_buf *buf = (void *) cmd->add_data;
16918f26c9ffSDavid Dillow 
1692c07d424dSDavid Dillow 		*buf = req->indirect_desc[0];
16938f26c9ffSDavid Dillow 		goto map_complete;
16948f26c9ffSDavid Dillow 	}
16958f26c9ffSDavid Dillow 
1696c07d424dSDavid Dillow 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1697c07d424dSDavid Dillow 						!target->allow_ext_sg)) {
1698c07d424dSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
1699c07d424dSDavid Dillow 			     "Could not fit S/G list into SRP_CMD\n");
1700e012f363SBart Van Assche 		ret = -EIO;
1701e012f363SBart Van Assche 		goto unmap;
1702c07d424dSDavid Dillow 	}
1703c07d424dSDavid Dillow 
1704c07d424dSDavid Dillow 	count = min(state.ndesc, target->cmd_sg_cnt);
17058f26c9ffSDavid Dillow 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1706330179f2SBart Van Assche 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1707aef9ec39SRoland Dreier 
1708aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_INDIRECT;
17098f26c9ffSDavid Dillow 	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1710c07d424dSDavid Dillow 	len += count * sizeof (struct srp_direct_buf);
1711f5358a17SRoland Dreier 
1712c07d424dSDavid Dillow 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1713c07d424dSDavid Dillow 	       count * sizeof (struct srp_direct_buf));
171485507bccSRalph Campbell 
17155f071777SChristoph Hellwig 	if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1716330179f2SBart Van Assche 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1717330179f2SBart Van Assche 				  idb_len, &idb_rkey);
1718330179f2SBart Van Assche 		if (ret < 0)
1719e012f363SBart Van Assche 			goto unmap;
1720330179f2SBart Van Assche 		req->nmdesc++;
1721330179f2SBart Van Assche 	} else {
17225f071777SChristoph Hellwig 		idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
1723330179f2SBart Van Assche 	}
1724330179f2SBart Van Assche 
1725c07d424dSDavid Dillow 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1726330179f2SBart Van Assche 	indirect_hdr->table_desc.key = idb_rkey;
17278f26c9ffSDavid Dillow 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
17288f26c9ffSDavid Dillow 	indirect_hdr->len = cpu_to_be32(state.total_len);
1729aef9ec39SRoland Dreier 
1730aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1731c07d424dSDavid Dillow 		cmd->data_out_desc_cnt = count;
1732aef9ec39SRoland Dreier 	else
1733c07d424dSDavid Dillow 		cmd->data_in_desc_cnt = count;
1734c07d424dSDavid Dillow 
1735c07d424dSDavid Dillow 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1736c07d424dSDavid Dillow 				      DMA_TO_DEVICE);
1737aef9ec39SRoland Dreier 
17388f26c9ffSDavid Dillow map_complete:
1739aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1740aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt << 4;
1741aef9ec39SRoland Dreier 	else
1742aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt;
1743aef9ec39SRoland Dreier 
1744aef9ec39SRoland Dreier 	return len;
1745e012f363SBart Van Assche 
1746e012f363SBart Van Assche unmap:
1747e012f363SBart Van Assche 	srp_unmap_data(scmnd, ch, req);
1748ffc548bbSBart Van Assche 	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1749ffc548bbSBart Van Assche 		ret = -E2BIG;
1750e012f363SBart Van Assche 	return ret;
1751aef9ec39SRoland Dreier }
1752aef9ec39SRoland Dreier 
175305a1d750SDavid Dillow /*
175476c75b25SBart Van Assche  * Return an IU and possible credit to the free pool
175576c75b25SBart Van Assche  */
1756509c07bcSBart Van Assche static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
175776c75b25SBart Van Assche 			  enum srp_iu_type iu_type)
175876c75b25SBart Van Assche {
175976c75b25SBart Van Assche 	unsigned long flags;
176076c75b25SBart Van Assche 
1761509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1762509c07bcSBart Van Assche 	list_add(&iu->list, &ch->free_tx);
176376c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP)
1764509c07bcSBart Van Assche 		++ch->req_lim;
1765509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
176676c75b25SBart Van Assche }
176776c75b25SBart Van Assche 
176876c75b25SBart Van Assche /*
1769509c07bcSBart Van Assche  * Must be called with ch->lock held to protect req_lim and free_tx.
1770e9684678SBart Van Assche  * If IU is not sent, it must be returned using srp_put_tx_iu().
177105a1d750SDavid Dillow  *
177205a1d750SDavid Dillow  * Note:
177305a1d750SDavid Dillow  * An upper limit for the number of allocated information units for each
177405a1d750SDavid Dillow  * request type is:
177505a1d750SDavid Dillow  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
177605a1d750SDavid Dillow  *   more than Scsi_Host.can_queue requests.
177705a1d750SDavid Dillow  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
177805a1d750SDavid Dillow  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
177905a1d750SDavid Dillow  *   one unanswered SRP request to an initiator.
178005a1d750SDavid Dillow  */
1781509c07bcSBart Van Assche static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
178205a1d750SDavid Dillow 				      enum srp_iu_type iu_type)
178305a1d750SDavid Dillow {
1784509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
178505a1d750SDavid Dillow 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
178605a1d750SDavid Dillow 	struct srp_iu *iu;
178705a1d750SDavid Dillow 
17881dc7b1f1SChristoph Hellwig 	ib_process_cq_direct(ch->send_cq, -1);
178905a1d750SDavid Dillow 
1790509c07bcSBart Van Assche 	if (list_empty(&ch->free_tx))
179105a1d750SDavid Dillow 		return NULL;
179205a1d750SDavid Dillow 
179305a1d750SDavid Dillow 	/* Initiator responses to target requests do not consume credits */
179476c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP) {
1795509c07bcSBart Van Assche 		if (ch->req_lim <= rsv) {
179605a1d750SDavid Dillow 			++target->zero_req_lim;
179705a1d750SDavid Dillow 			return NULL;
179805a1d750SDavid Dillow 		}
179905a1d750SDavid Dillow 
1800509c07bcSBart Van Assche 		--ch->req_lim;
180176c75b25SBart Van Assche 	}
180276c75b25SBart Van Assche 
1803509c07bcSBart Van Assche 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
180476c75b25SBart Van Assche 	list_del(&iu->list);
180505a1d750SDavid Dillow 	return iu;
180605a1d750SDavid Dillow }
180705a1d750SDavid Dillow 
18081dc7b1f1SChristoph Hellwig static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
18091dc7b1f1SChristoph Hellwig {
18101dc7b1f1SChristoph Hellwig 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
18111dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
18121dc7b1f1SChristoph Hellwig 
18131dc7b1f1SChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
18141dc7b1f1SChristoph Hellwig 		srp_handle_qp_err(cq, wc, "SEND");
18151dc7b1f1SChristoph Hellwig 		return;
18161dc7b1f1SChristoph Hellwig 	}
18171dc7b1f1SChristoph Hellwig 
18181dc7b1f1SChristoph Hellwig 	list_add(&iu->list, &ch->free_tx);
18191dc7b1f1SChristoph Hellwig }
18201dc7b1f1SChristoph Hellwig 
1821509c07bcSBart Van Assche static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
182205a1d750SDavid Dillow {
1823509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
182405a1d750SDavid Dillow 	struct ib_sge list;
182505a1d750SDavid Dillow 	struct ib_send_wr wr, *bad_wr;
182605a1d750SDavid Dillow 
182705a1d750SDavid Dillow 	list.addr   = iu->dma;
182805a1d750SDavid Dillow 	list.length = len;
18299af76271SDavid Dillow 	list.lkey   = target->lkey;
183005a1d750SDavid Dillow 
18311dc7b1f1SChristoph Hellwig 	iu->cqe.done = srp_send_done;
18321dc7b1f1SChristoph Hellwig 
183305a1d750SDavid Dillow 	wr.next       = NULL;
18341dc7b1f1SChristoph Hellwig 	wr.wr_cqe     = &iu->cqe;
183505a1d750SDavid Dillow 	wr.sg_list    = &list;
183605a1d750SDavid Dillow 	wr.num_sge    = 1;
183705a1d750SDavid Dillow 	wr.opcode     = IB_WR_SEND;
183805a1d750SDavid Dillow 	wr.send_flags = IB_SEND_SIGNALED;
183905a1d750SDavid Dillow 
1840509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
184105a1d750SDavid Dillow }
184205a1d750SDavid Dillow 
1843509c07bcSBart Van Assche static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1844c996bb47SBart Van Assche {
1845509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1846c996bb47SBart Van Assche 	struct ib_recv_wr wr, *bad_wr;
1847dcb4cb85SBart Van Assche 	struct ib_sge list;
1848c996bb47SBart Van Assche 
1849c996bb47SBart Van Assche 	list.addr   = iu->dma;
1850c996bb47SBart Van Assche 	list.length = iu->size;
18519af76271SDavid Dillow 	list.lkey   = target->lkey;
1852c996bb47SBart Van Assche 
18531dc7b1f1SChristoph Hellwig 	iu->cqe.done = srp_recv_done;
18541dc7b1f1SChristoph Hellwig 
1855c996bb47SBart Van Assche 	wr.next     = NULL;
18561dc7b1f1SChristoph Hellwig 	wr.wr_cqe   = &iu->cqe;
1857c996bb47SBart Van Assche 	wr.sg_list  = &list;
1858c996bb47SBart Van Assche 	wr.num_sge  = 1;
1859c996bb47SBart Van Assche 
1860509c07bcSBart Van Assche 	return ib_post_recv(ch->qp, &wr, &bad_wr);
1861c996bb47SBart Van Assche }
1862c996bb47SBart Van Assche 
1863509c07bcSBart Van Assche static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1864aef9ec39SRoland Dreier {
1865509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1866aef9ec39SRoland Dreier 	struct srp_request *req;
1867aef9ec39SRoland Dreier 	struct scsi_cmnd *scmnd;
1868aef9ec39SRoland Dreier 	unsigned long flags;
1869aef9ec39SRoland Dreier 
1870aef9ec39SRoland Dreier 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1871509c07bcSBart Van Assche 		spin_lock_irqsave(&ch->lock, flags);
1872509c07bcSBart Van Assche 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1873509c07bcSBart Van Assche 		spin_unlock_irqrestore(&ch->lock, flags);
187494a9174cSBart Van Assche 
1875509c07bcSBart Van Assche 		ch->tsk_mgmt_status = -1;
1876f8b6e31eSDavid Dillow 		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1877509c07bcSBart Van Assche 			ch->tsk_mgmt_status = rsp->data[3];
1878509c07bcSBart Van Assche 		complete(&ch->tsk_mgmt_done);
1879aef9ec39SRoland Dreier 	} else {
188077f2c1a4SBart Van Assche 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
188177f2c1a4SBart Van Assche 		if (scmnd) {
188277f2c1a4SBart Van Assche 			req = (void *)scmnd->host_scribble;
188377f2c1a4SBart Van Assche 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
188477f2c1a4SBart Van Assche 		}
188522032991SBart Van Assche 		if (!scmnd) {
18867aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
1887d92c0da7SBart Van Assche 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1888d92c0da7SBart Van Assche 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
188922032991SBart Van Assche 
1890509c07bcSBart Van Assche 			spin_lock_irqsave(&ch->lock, flags);
1891509c07bcSBart Van Assche 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1892509c07bcSBart Van Assche 			spin_unlock_irqrestore(&ch->lock, flags);
189322032991SBart Van Assche 
189422032991SBart Van Assche 			return;
189522032991SBart Van Assche 		}
1896aef9ec39SRoland Dreier 		scmnd->result = rsp->status;
1897aef9ec39SRoland Dreier 
1898aef9ec39SRoland Dreier 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1899aef9ec39SRoland Dreier 			memcpy(scmnd->sense_buffer, rsp->data +
1900aef9ec39SRoland Dreier 			       be32_to_cpu(rsp->resp_data_len),
1901aef9ec39SRoland Dreier 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1902aef9ec39SRoland Dreier 				     SCSI_SENSE_BUFFERSIZE));
1903aef9ec39SRoland Dreier 		}
1904aef9ec39SRoland Dreier 
1905e714531aSBart Van Assche 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1906bb350d1dSFUJITA Tomonori 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1907e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1908e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1909e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1910e714531aSBart Van Assche 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1911e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1912e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1913aef9ec39SRoland Dreier 
1914509c07bcSBart Van Assche 		srp_free_req(ch, req, scmnd,
191522032991SBart Van Assche 			     be32_to_cpu(rsp->req_lim_delta));
191622032991SBart Van Assche 
1917f8b6e31eSDavid Dillow 		scmnd->host_scribble = NULL;
1918aef9ec39SRoland Dreier 		scmnd->scsi_done(scmnd);
1919aef9ec39SRoland Dreier 	}
1920aef9ec39SRoland Dreier }
1921aef9ec39SRoland Dreier 
1922509c07bcSBart Van Assche static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1923bb12588aSDavid Dillow 			       void *rsp, int len)
1924bb12588aSDavid Dillow {
1925509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
192676c75b25SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1927bb12588aSDavid Dillow 	unsigned long flags;
1928bb12588aSDavid Dillow 	struct srp_iu *iu;
192976c75b25SBart Van Assche 	int err;
1930bb12588aSDavid Dillow 
1931509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1932509c07bcSBart Van Assche 	ch->req_lim += req_delta;
1933509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1934509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
193576c75b25SBart Van Assche 
1936bb12588aSDavid Dillow 	if (!iu) {
1937bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1938bb12588aSDavid Dillow 			     "no IU available to send response\n");
193976c75b25SBart Van Assche 		return 1;
1940bb12588aSDavid Dillow 	}
1941bb12588aSDavid Dillow 
1942bb12588aSDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1943bb12588aSDavid Dillow 	memcpy(iu->buf, rsp, len);
1944bb12588aSDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1945bb12588aSDavid Dillow 
1946509c07bcSBart Van Assche 	err = srp_post_send(ch, iu, len);
194776c75b25SBart Van Assche 	if (err) {
1948bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1949bb12588aSDavid Dillow 			     "unable to post response: %d\n", err);
1950509c07bcSBart Van Assche 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
195176c75b25SBart Van Assche 	}
1952bb12588aSDavid Dillow 
1953bb12588aSDavid Dillow 	return err;
1954bb12588aSDavid Dillow }
1955bb12588aSDavid Dillow 
1956509c07bcSBart Van Assche static void srp_process_cred_req(struct srp_rdma_ch *ch,
1957bb12588aSDavid Dillow 				 struct srp_cred_req *req)
1958bb12588aSDavid Dillow {
1959bb12588aSDavid Dillow 	struct srp_cred_rsp rsp = {
1960bb12588aSDavid Dillow 		.opcode = SRP_CRED_RSP,
1961bb12588aSDavid Dillow 		.tag = req->tag,
1962bb12588aSDavid Dillow 	};
1963bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1964bb12588aSDavid Dillow 
1965509c07bcSBart Van Assche 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1966509c07bcSBart Van Assche 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1967bb12588aSDavid Dillow 			     "problems processing SRP_CRED_REQ\n");
1968bb12588aSDavid Dillow }
1969bb12588aSDavid Dillow 
1970509c07bcSBart Van Assche static void srp_process_aer_req(struct srp_rdma_ch *ch,
1971bb12588aSDavid Dillow 				struct srp_aer_req *req)
1972bb12588aSDavid Dillow {
1973509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1974bb12588aSDavid Dillow 	struct srp_aer_rsp rsp = {
1975bb12588aSDavid Dillow 		.opcode = SRP_AER_RSP,
1976bb12588aSDavid Dillow 		.tag = req->tag,
1977bb12588aSDavid Dillow 	};
1978bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1979bb12588aSDavid Dillow 
1980bb12588aSDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX
1981985aa495SBart Van Assche 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1982bb12588aSDavid Dillow 
1983509c07bcSBart Van Assche 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1984bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1985bb12588aSDavid Dillow 			     "problems processing SRP_AER_REQ\n");
1986bb12588aSDavid Dillow }
1987bb12588aSDavid Dillow 
19881dc7b1f1SChristoph Hellwig static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1989aef9ec39SRoland Dreier {
19901dc7b1f1SChristoph Hellwig 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
19911dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
1992509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1993dcb4cb85SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1994c996bb47SBart Van Assche 	int res;
1995aef9ec39SRoland Dreier 	u8 opcode;
1996aef9ec39SRoland Dreier 
19971dc7b1f1SChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
19981dc7b1f1SChristoph Hellwig 		srp_handle_qp_err(cq, wc, "RECV");
19991dc7b1f1SChristoph Hellwig 		return;
20001dc7b1f1SChristoph Hellwig 	}
20011dc7b1f1SChristoph Hellwig 
2002509c07bcSBart Van Assche 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
200385507bccSRalph Campbell 				   DMA_FROM_DEVICE);
2004aef9ec39SRoland Dreier 
2005aef9ec39SRoland Dreier 	opcode = *(u8 *) iu->buf;
2006aef9ec39SRoland Dreier 
2007aef9ec39SRoland Dreier 	if (0) {
20087aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
20097aa54bd7SDavid Dillow 			     PFX "recv completion, opcode 0x%02x\n", opcode);
20107a700811SBart Van Assche 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
20117a700811SBart Van Assche 			       iu->buf, wc->byte_len, true);
2012aef9ec39SRoland Dreier 	}
2013aef9ec39SRoland Dreier 
2014aef9ec39SRoland Dreier 	switch (opcode) {
2015aef9ec39SRoland Dreier 	case SRP_RSP:
2016509c07bcSBart Van Assche 		srp_process_rsp(ch, iu->buf);
2017aef9ec39SRoland Dreier 		break;
2018aef9ec39SRoland Dreier 
2019bb12588aSDavid Dillow 	case SRP_CRED_REQ:
2020509c07bcSBart Van Assche 		srp_process_cred_req(ch, iu->buf);
2021bb12588aSDavid Dillow 		break;
2022bb12588aSDavid Dillow 
2023bb12588aSDavid Dillow 	case SRP_AER_REQ:
2024509c07bcSBart Van Assche 		srp_process_aer_req(ch, iu->buf);
2025bb12588aSDavid Dillow 		break;
2026bb12588aSDavid Dillow 
2027aef9ec39SRoland Dreier 	case SRP_T_LOGOUT:
2028aef9ec39SRoland Dreier 		/* XXX Handle target logout */
20297aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
20307aa54bd7SDavid Dillow 			     PFX "Got target logout request\n");
2031aef9ec39SRoland Dreier 		break;
2032aef9ec39SRoland Dreier 
2033aef9ec39SRoland Dreier 	default:
20347aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
20357aa54bd7SDavid Dillow 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2036aef9ec39SRoland Dreier 		break;
2037aef9ec39SRoland Dreier 	}
2038aef9ec39SRoland Dreier 
2039509c07bcSBart Van Assche 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
204085507bccSRalph Campbell 				      DMA_FROM_DEVICE);
2041c996bb47SBart Van Assche 
2042509c07bcSBart Van Assche 	res = srp_post_recv(ch, iu);
2043c996bb47SBart Van Assche 	if (res != 0)
2044c996bb47SBart Van Assche 		shost_printk(KERN_ERR, target->scsi_host,
2045c996bb47SBart Van Assche 			     PFX "Recv failed with error code %d\n", res);
2046aef9ec39SRoland Dreier }
2047aef9ec39SRoland Dreier 
2048c1120f89SBart Van Assche /**
2049c1120f89SBart Van Assche  * srp_tl_err_work() - handle a transport layer error
2050af24663bSBart Van Assche  * @work: Work structure embedded in an SRP target port.
2051c1120f89SBart Van Assche  *
2052c1120f89SBart Van Assche  * Note: This function may get invoked before the rport has been created,
2053c1120f89SBart Van Assche  * hence the target->rport test.
2054c1120f89SBart Van Assche  */
2055c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work)
2056c1120f89SBart Van Assche {
2057c1120f89SBart Van Assche 	struct srp_target_port *target;
2058c1120f89SBart Van Assche 
2059c1120f89SBart Van Assche 	target = container_of(work, struct srp_target_port, tl_err_work);
2060c1120f89SBart Van Assche 	if (target->rport)
2061c1120f89SBart Van Assche 		srp_start_tl_fail_timers(target->rport);
2062c1120f89SBart Van Assche }
2063c1120f89SBart Van Assche 
20641dc7b1f1SChristoph Hellwig static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
20651dc7b1f1SChristoph Hellwig 		const char *opname)
2066948d1e88SBart Van Assche {
20671dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
20687dad6b2eSBart Van Assche 	struct srp_target_port *target = ch->target;
20697dad6b2eSBart Van Assche 
2070c014c8cdSBart Van Assche 	if (ch->connected && !target->qp_in_error) {
20715cfb1782SBart Van Assche 		shost_printk(KERN_ERR, target->scsi_host,
20721dc7b1f1SChristoph Hellwig 			     PFX "failed %s status %s (%d) for CQE %p\n",
20731dc7b1f1SChristoph Hellwig 			     opname, ib_wc_status_msg(wc->status), wc->status,
20741dc7b1f1SChristoph Hellwig 			     wc->wr_cqe);
2075c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
20764f0af697SBart Van Assche 	}
2077948d1e88SBart Van Assche 	target->qp_in_error = true;
2078948d1e88SBart Van Assche }
2079948d1e88SBart Van Assche 
208076c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2081aef9ec39SRoland Dreier {
208276c75b25SBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2083a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
2084509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
2085aef9ec39SRoland Dreier 	struct srp_request *req;
2086aef9ec39SRoland Dreier 	struct srp_iu *iu;
2087aef9ec39SRoland Dreier 	struct srp_cmd *cmd;
208885507bccSRalph Campbell 	struct ib_device *dev;
208976c75b25SBart Van Assche 	unsigned long flags;
209077f2c1a4SBart Van Assche 	u32 tag;
209177f2c1a4SBart Van Assche 	u16 idx;
2092d1b4289eSBart Van Assche 	int len, ret;
2093a95cadb9SBart Van Assche 	const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2094a95cadb9SBart Van Assche 
2095a95cadb9SBart Van Assche 	/*
2096a95cadb9SBart Van Assche 	 * The SCSI EH thread is the only context from which srp_queuecommand()
2097a95cadb9SBart Van Assche 	 * can get invoked for blocked devices (SDEV_BLOCK /
2098a95cadb9SBart Van Assche 	 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2099a95cadb9SBart Van Assche 	 * locking the rport mutex if invoked from inside the SCSI EH.
2100a95cadb9SBart Van Assche 	 */
2101a95cadb9SBart Van Assche 	if (in_scsi_eh)
2102a95cadb9SBart Van Assche 		mutex_lock(&rport->mutex);
2103aef9ec39SRoland Dreier 
2104d1b4289eSBart Van Assche 	scmnd->result = srp_chkready(target->rport);
2105d1b4289eSBart Van Assche 	if (unlikely(scmnd->result))
2106d1b4289eSBart Van Assche 		goto err;
21072ce19e72SBart Van Assche 
210877f2c1a4SBart Van Assche 	WARN_ON_ONCE(scmnd->request->tag < 0);
210977f2c1a4SBart Van Assche 	tag = blk_mq_unique_tag(scmnd->request);
2110d92c0da7SBart Van Assche 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
211177f2c1a4SBart Van Assche 	idx = blk_mq_unique_tag_to_tag(tag);
211277f2c1a4SBart Van Assche 	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
211377f2c1a4SBart Van Assche 		  dev_name(&shost->shost_gendev), tag, idx,
211477f2c1a4SBart Van Assche 		  target->req_ring_size);
2115509c07bcSBart Van Assche 
2116509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
2117509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2118509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
2119aef9ec39SRoland Dreier 
212077f2c1a4SBart Van Assche 	if (!iu)
212177f2c1a4SBart Van Assche 		goto err;
212277f2c1a4SBart Van Assche 
212377f2c1a4SBart Van Assche 	req = &ch->req_ring[idx];
212405321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev->dev;
212549248644SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
212685507bccSRalph Campbell 				   DMA_TO_DEVICE);
2127aef9ec39SRoland Dreier 
2128f8b6e31eSDavid Dillow 	scmnd->host_scribble = (void *) req;
2129aef9ec39SRoland Dreier 
2130aef9ec39SRoland Dreier 	cmd = iu->buf;
2131aef9ec39SRoland Dreier 	memset(cmd, 0, sizeof *cmd);
2132aef9ec39SRoland Dreier 
2133aef9ec39SRoland Dreier 	cmd->opcode = SRP_CMD;
2134985aa495SBart Van Assche 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
213577f2c1a4SBart Van Assche 	cmd->tag    = tag;
2136aef9ec39SRoland Dreier 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2137aef9ec39SRoland Dreier 
2138aef9ec39SRoland Dreier 	req->scmnd    = scmnd;
2139aef9ec39SRoland Dreier 	req->cmd      = iu;
2140aef9ec39SRoland Dreier 
2141509c07bcSBart Van Assche 	len = srp_map_data(scmnd, ch, req);
2142aef9ec39SRoland Dreier 	if (len < 0) {
21437aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
2144d1b4289eSBart Van Assche 			     PFX "Failed to map data (%d)\n", len);
2145d1b4289eSBart Van Assche 		/*
2146d1b4289eSBart Van Assche 		 * If we ran out of memory descriptors (-ENOMEM) because an
2147d1b4289eSBart Van Assche 		 * application is queuing many requests with more than
214852ede08fSBart Van Assche 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2149d1b4289eSBart Van Assche 		 * to reduce queue depth temporarily.
2150d1b4289eSBart Van Assche 		 */
2151d1b4289eSBart Van Assche 		scmnd->result = len == -ENOMEM ?
2152d1b4289eSBart Van Assche 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
215376c75b25SBart Van Assche 		goto err_iu;
2154aef9ec39SRoland Dreier 	}
2155aef9ec39SRoland Dreier 
215649248644SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
215785507bccSRalph Campbell 				      DMA_TO_DEVICE);
2158aef9ec39SRoland Dreier 
2159509c07bcSBart Van Assche 	if (srp_post_send(ch, iu, len)) {
21607aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2161aef9ec39SRoland Dreier 		goto err_unmap;
2162aef9ec39SRoland Dreier 	}
2163aef9ec39SRoland Dreier 
2164d1b4289eSBart Van Assche 	ret = 0;
2165d1b4289eSBart Van Assche 
2166a95cadb9SBart Van Assche unlock_rport:
2167a95cadb9SBart Van Assche 	if (in_scsi_eh)
2168a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2169a95cadb9SBart Van Assche 
2170d1b4289eSBart Van Assche 	return ret;
2171aef9ec39SRoland Dreier 
2172aef9ec39SRoland Dreier err_unmap:
2173509c07bcSBart Van Assche 	srp_unmap_data(scmnd, ch, req);
2174aef9ec39SRoland Dreier 
217576c75b25SBart Van Assche err_iu:
2176509c07bcSBart Van Assche 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
217776c75b25SBart Van Assche 
2178024ca901SBart Van Assche 	/*
2179024ca901SBart Van Assche 	 * Avoid that the loops that iterate over the request ring can
2180024ca901SBart Van Assche 	 * encounter a dangling SCSI command pointer.
2181024ca901SBart Van Assche 	 */
2182024ca901SBart Van Assche 	req->scmnd = NULL;
2183024ca901SBart Van Assche 
2184d1b4289eSBart Van Assche err:
2185d1b4289eSBart Van Assche 	if (scmnd->result) {
2186d1b4289eSBart Van Assche 		scmnd->scsi_done(scmnd);
2187d1b4289eSBart Van Assche 		ret = 0;
2188d1b4289eSBart Van Assche 	} else {
2189d1b4289eSBart Van Assche 		ret = SCSI_MLQUEUE_HOST_BUSY;
2190d1b4289eSBart Van Assche 	}
2191a95cadb9SBart Van Assche 
2192d1b4289eSBart Van Assche 	goto unlock_rport;
2193aef9ec39SRoland Dreier }
2194aef9ec39SRoland Dreier 
21954d73f95fSBart Van Assche /*
21964d73f95fSBart Van Assche  * Note: the resources allocated in this function are freed in
2197509c07bcSBart Van Assche  * srp_free_ch_ib().
21984d73f95fSBart Van Assche  */
2199509c07bcSBart Van Assche static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2200aef9ec39SRoland Dreier {
2201509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2202aef9ec39SRoland Dreier 	int i;
2203aef9ec39SRoland Dreier 
2204509c07bcSBart Van Assche 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
22054d73f95fSBart Van Assche 			      GFP_KERNEL);
2206509c07bcSBart Van Assche 	if (!ch->rx_ring)
22074d73f95fSBart Van Assche 		goto err_no_ring;
2208509c07bcSBart Van Assche 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
22094d73f95fSBart Van Assche 			      GFP_KERNEL);
2210509c07bcSBart Van Assche 	if (!ch->tx_ring)
22114d73f95fSBart Van Assche 		goto err_no_ring;
22124d73f95fSBart Van Assche 
22134d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2214509c07bcSBart Van Assche 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2215509c07bcSBart Van Assche 					      ch->max_ti_iu_len,
2216aef9ec39SRoland Dreier 					      GFP_KERNEL, DMA_FROM_DEVICE);
2217509c07bcSBart Van Assche 		if (!ch->rx_ring[i])
2218aef9ec39SRoland Dreier 			goto err;
2219aef9ec39SRoland Dreier 	}
2220aef9ec39SRoland Dreier 
22214d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2222509c07bcSBart Van Assche 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
222349248644SDavid Dillow 					      target->max_iu_len,
2224aef9ec39SRoland Dreier 					      GFP_KERNEL, DMA_TO_DEVICE);
2225509c07bcSBart Van Assche 		if (!ch->tx_ring[i])
2226aef9ec39SRoland Dreier 			goto err;
2227dcb4cb85SBart Van Assche 
2228509c07bcSBart Van Assche 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2229aef9ec39SRoland Dreier 	}
2230aef9ec39SRoland Dreier 
2231aef9ec39SRoland Dreier 	return 0;
2232aef9ec39SRoland Dreier 
2233aef9ec39SRoland Dreier err:
22344d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2235509c07bcSBart Van Assche 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2236509c07bcSBart Van Assche 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2237aef9ec39SRoland Dreier 	}
2238aef9ec39SRoland Dreier 
22394d73f95fSBart Van Assche 
22404d73f95fSBart Van Assche err_no_ring:
2241509c07bcSBart Van Assche 	kfree(ch->tx_ring);
2242509c07bcSBart Van Assche 	ch->tx_ring = NULL;
2243509c07bcSBart Van Assche 	kfree(ch->rx_ring);
2244509c07bcSBart Van Assche 	ch->rx_ring = NULL;
2245aef9ec39SRoland Dreier 
2246aef9ec39SRoland Dreier 	return -ENOMEM;
2247aef9ec39SRoland Dreier }
2248aef9ec39SRoland Dreier 
2249c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2250c9b03c1aSBart Van Assche {
2251c9b03c1aSBart Van Assche 	uint64_t T_tr_ns, max_compl_time_ms;
2252c9b03c1aSBart Van Assche 	uint32_t rq_tmo_jiffies;
2253c9b03c1aSBart Van Assche 
2254c9b03c1aSBart Van Assche 	/*
2255c9b03c1aSBart Van Assche 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2256c9b03c1aSBart Van Assche 	 * table 91), both the QP timeout and the retry count have to be set
2257c9b03c1aSBart Van Assche 	 * for RC QP's during the RTR to RTS transition.
2258c9b03c1aSBart Van Assche 	 */
2259c9b03c1aSBart Van Assche 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2260c9b03c1aSBart Van Assche 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2261c9b03c1aSBart Van Assche 
2262c9b03c1aSBart Van Assche 	/*
2263c9b03c1aSBart Van Assche 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2264c9b03c1aSBart Van Assche 	 * it can take before an error completion is generated. See also
2265c9b03c1aSBart Van Assche 	 * C9-140..142 in the IBTA spec for more information about how to
2266c9b03c1aSBart Van Assche 	 * convert the QP Local ACK Timeout value to nanoseconds.
2267c9b03c1aSBart Van Assche 	 */
2268c9b03c1aSBart Van Assche 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2269c9b03c1aSBart Van Assche 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2270c9b03c1aSBart Van Assche 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2271c9b03c1aSBart Van Assche 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2272c9b03c1aSBart Van Assche 
2273c9b03c1aSBart Van Assche 	return rq_tmo_jiffies;
2274c9b03c1aSBart Van Assche }
2275c9b03c1aSBart Van Assche 
2276961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2277e6300cbdSBart Van Assche 			       const struct srp_login_rsp *lrsp,
2278509c07bcSBart Van Assche 			       struct srp_rdma_ch *ch)
2279961e0be8SDavid Dillow {
2280509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2281961e0be8SDavid Dillow 	struct ib_qp_attr *qp_attr = NULL;
2282961e0be8SDavid Dillow 	int attr_mask = 0;
2283961e0be8SDavid Dillow 	int ret;
2284961e0be8SDavid Dillow 	int i;
2285961e0be8SDavid Dillow 
2286961e0be8SDavid Dillow 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2287509c07bcSBart Van Assche 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2288509c07bcSBart Van Assche 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2289961e0be8SDavid Dillow 
2290961e0be8SDavid Dillow 		/*
2291961e0be8SDavid Dillow 		 * Reserve credits for task management so we don't
2292961e0be8SDavid Dillow 		 * bounce requests back to the SCSI mid-layer.
2293961e0be8SDavid Dillow 		 */
2294961e0be8SDavid Dillow 		target->scsi_host->can_queue
2295509c07bcSBart Van Assche 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2296961e0be8SDavid Dillow 			      target->scsi_host->can_queue);
22974d73f95fSBart Van Assche 		target->scsi_host->cmd_per_lun
22984d73f95fSBart Van Assche 			= min_t(int, target->scsi_host->can_queue,
22994d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun);
2300961e0be8SDavid Dillow 	} else {
2301961e0be8SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
2302961e0be8SDavid Dillow 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2303961e0be8SDavid Dillow 		ret = -ECONNRESET;
2304961e0be8SDavid Dillow 		goto error;
2305961e0be8SDavid Dillow 	}
2306961e0be8SDavid Dillow 
2307509c07bcSBart Van Assche 	if (!ch->rx_ring) {
2308509c07bcSBart Van Assche 		ret = srp_alloc_iu_bufs(ch);
2309961e0be8SDavid Dillow 		if (ret)
2310961e0be8SDavid Dillow 			goto error;
2311961e0be8SDavid Dillow 	}
2312961e0be8SDavid Dillow 
2313961e0be8SDavid Dillow 	ret = -ENOMEM;
2314961e0be8SDavid Dillow 	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2315961e0be8SDavid Dillow 	if (!qp_attr)
2316961e0be8SDavid Dillow 		goto error;
2317961e0be8SDavid Dillow 
2318961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTR;
2319961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2320961e0be8SDavid Dillow 	if (ret)
2321961e0be8SDavid Dillow 		goto error_free;
2322961e0be8SDavid Dillow 
2323509c07bcSBart Van Assche 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2324961e0be8SDavid Dillow 	if (ret)
2325961e0be8SDavid Dillow 		goto error_free;
2326961e0be8SDavid Dillow 
23274d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; i++) {
2328509c07bcSBart Van Assche 		struct srp_iu *iu = ch->rx_ring[i];
2329509c07bcSBart Van Assche 
2330509c07bcSBart Van Assche 		ret = srp_post_recv(ch, iu);
2331961e0be8SDavid Dillow 		if (ret)
2332961e0be8SDavid Dillow 			goto error_free;
2333961e0be8SDavid Dillow 	}
2334961e0be8SDavid Dillow 
2335961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTS;
2336961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2337961e0be8SDavid Dillow 	if (ret)
2338961e0be8SDavid Dillow 		goto error_free;
2339961e0be8SDavid Dillow 
2340c9b03c1aSBart Van Assche 	target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2341c9b03c1aSBart Van Assche 
2342509c07bcSBart Van Assche 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2343961e0be8SDavid Dillow 	if (ret)
2344961e0be8SDavid Dillow 		goto error_free;
2345961e0be8SDavid Dillow 
2346961e0be8SDavid Dillow 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
2347961e0be8SDavid Dillow 
2348961e0be8SDavid Dillow error_free:
2349961e0be8SDavid Dillow 	kfree(qp_attr);
2350961e0be8SDavid Dillow 
2351961e0be8SDavid Dillow error:
2352509c07bcSBart Van Assche 	ch->status = ret;
2353961e0be8SDavid Dillow }
2354961e0be8SDavid Dillow 
2355aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2356aef9ec39SRoland Dreier 			       struct ib_cm_event *event,
2357509c07bcSBart Van Assche 			       struct srp_rdma_ch *ch)
2358aef9ec39SRoland Dreier {
2359509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
23607aa54bd7SDavid Dillow 	struct Scsi_Host *shost = target->scsi_host;
2361aef9ec39SRoland Dreier 	struct ib_class_port_info *cpi;
2362aef9ec39SRoland Dreier 	int opcode;
2363aef9ec39SRoland Dreier 
2364aef9ec39SRoland Dreier 	switch (event->param.rej_rcvd.reason) {
2365aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_CM_REDIRECT:
2366aef9ec39SRoland Dreier 		cpi = event->param.rej_rcvd.ari;
2367509c07bcSBart Van Assche 		ch->path.dlid = cpi->redirect_lid;
2368509c07bcSBart Van Assche 		ch->path.pkey = cpi->redirect_pkey;
2369aef9ec39SRoland Dreier 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2370509c07bcSBart Van Assche 		memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2371aef9ec39SRoland Dreier 
2372509c07bcSBart Van Assche 		ch->status = ch->path.dlid ?
2373aef9ec39SRoland Dreier 			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2374aef9ec39SRoland Dreier 		break;
2375aef9ec39SRoland Dreier 
2376aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_REDIRECT:
23775d7cbfd6SRoland Dreier 		if (srp_target_is_topspin(target)) {
2378aef9ec39SRoland Dreier 			/*
2379aef9ec39SRoland Dreier 			 * Topspin/Cisco SRP gateways incorrectly send
2380aef9ec39SRoland Dreier 			 * reject reason code 25 when they mean 24
2381aef9ec39SRoland Dreier 			 * (port redirect).
2382aef9ec39SRoland Dreier 			 */
2383509c07bcSBart Van Assche 			memcpy(ch->path.dgid.raw,
2384aef9ec39SRoland Dreier 			       event->param.rej_rcvd.ari, 16);
2385aef9ec39SRoland Dreier 
23867aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, shost,
23877aa54bd7SDavid Dillow 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2388509c07bcSBart Van Assche 				     be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2389509c07bcSBart Van Assche 				     be64_to_cpu(ch->path.dgid.global.interface_id));
2390aef9ec39SRoland Dreier 
2391509c07bcSBart Van Assche 			ch->status = SRP_PORT_REDIRECT;
2392aef9ec39SRoland Dreier 		} else {
23937aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
23947aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2395509c07bcSBart Van Assche 			ch->status = -ECONNRESET;
2396aef9ec39SRoland Dreier 		}
2397aef9ec39SRoland Dreier 		break;
2398aef9ec39SRoland Dreier 
2399aef9ec39SRoland Dreier 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
24007aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost,
24017aa54bd7SDavid Dillow 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2402509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2403aef9ec39SRoland Dreier 		break;
2404aef9ec39SRoland Dreier 
2405aef9ec39SRoland Dreier 	case IB_CM_REJ_CONSUMER_DEFINED:
2406aef9ec39SRoland Dreier 		opcode = *(u8 *) event->private_data;
2407aef9ec39SRoland Dreier 		if (opcode == SRP_LOGIN_REJ) {
2408aef9ec39SRoland Dreier 			struct srp_login_rej *rej = event->private_data;
2409aef9ec39SRoland Dreier 			u32 reason = be32_to_cpu(rej->reason);
2410aef9ec39SRoland Dreier 
2411aef9ec39SRoland Dreier 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
24127aa54bd7SDavid Dillow 				shost_printk(KERN_WARNING, shost,
24137aa54bd7SDavid Dillow 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2414aef9ec39SRoland Dreier 			else
2415e7ffde01SBart Van Assche 				shost_printk(KERN_WARNING, shost, PFX
2416e7ffde01SBart Van Assche 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2417747fe000SBart Van Assche 					     target->sgid.raw,
2418747fe000SBart Van Assche 					     target->orig_dgid.raw, reason);
2419aef9ec39SRoland Dreier 		} else
24207aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
24217aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2422aef9ec39SRoland Dreier 				     " opcode 0x%02x\n", opcode);
2423509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2424aef9ec39SRoland Dreier 		break;
2425aef9ec39SRoland Dreier 
24269fe4bcf4SDavid Dillow 	case IB_CM_REJ_STALE_CONN:
24279fe4bcf4SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2428509c07bcSBart Van Assche 		ch->status = SRP_STALE_CONN;
24299fe4bcf4SDavid Dillow 		break;
24309fe4bcf4SDavid Dillow 
2431aef9ec39SRoland Dreier 	default:
24327aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2433aef9ec39SRoland Dreier 			     event->param.rej_rcvd.reason);
2434509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2435aef9ec39SRoland Dreier 	}
2436aef9ec39SRoland Dreier }
2437aef9ec39SRoland Dreier 
2438aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2439aef9ec39SRoland Dreier {
2440509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = cm_id->context;
2441509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2442aef9ec39SRoland Dreier 	int comp = 0;
2443aef9ec39SRoland Dreier 
2444aef9ec39SRoland Dreier 	switch (event->event) {
2445aef9ec39SRoland Dreier 	case IB_CM_REQ_ERROR:
24467aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
24477aa54bd7SDavid Dillow 			     PFX "Sending CM REQ failed\n");
2448aef9ec39SRoland Dreier 		comp = 1;
2449509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2450aef9ec39SRoland Dreier 		break;
2451aef9ec39SRoland Dreier 
2452aef9ec39SRoland Dreier 	case IB_CM_REP_RECEIVED:
2453aef9ec39SRoland Dreier 		comp = 1;
2454509c07bcSBart Van Assche 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2455aef9ec39SRoland Dreier 		break;
2456aef9ec39SRoland Dreier 
2457aef9ec39SRoland Dreier 	case IB_CM_REJ_RECEIVED:
24587aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2459aef9ec39SRoland Dreier 		comp = 1;
2460aef9ec39SRoland Dreier 
2461509c07bcSBart Van Assche 		srp_cm_rej_handler(cm_id, event, ch);
2462aef9ec39SRoland Dreier 		break;
2463aef9ec39SRoland Dreier 
2464b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_RECEIVED:
24657aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
24667aa54bd7SDavid Dillow 			     PFX "DREQ received - connection closed\n");
2467c014c8cdSBart Van Assche 		ch->connected = false;
2468b7ac4ab4SIshai Rabinovitz 		if (ib_send_cm_drep(cm_id, NULL, 0))
24697aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
24707aa54bd7SDavid Dillow 				     PFX "Sending CM DREP failed\n");
2471c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
2472aef9ec39SRoland Dreier 		break;
2473aef9ec39SRoland Dreier 
2474aef9ec39SRoland Dreier 	case IB_CM_TIMEWAIT_EXIT:
24757aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
24767aa54bd7SDavid Dillow 			     PFX "connection closed\n");
2477ac72d766SBart Van Assche 		comp = 1;
2478aef9ec39SRoland Dreier 
2479509c07bcSBart Van Assche 		ch->status = 0;
2480aef9ec39SRoland Dreier 		break;
2481aef9ec39SRoland Dreier 
2482b7ac4ab4SIshai Rabinovitz 	case IB_CM_MRA_RECEIVED:
2483b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_ERROR:
2484b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREP_RECEIVED:
2485b7ac4ab4SIshai Rabinovitz 		break;
2486b7ac4ab4SIshai Rabinovitz 
2487aef9ec39SRoland Dreier 	default:
24887aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
24897aa54bd7SDavid Dillow 			     PFX "Unhandled CM event %d\n", event->event);
2490aef9ec39SRoland Dreier 		break;
2491aef9ec39SRoland Dreier 	}
2492aef9ec39SRoland Dreier 
2493aef9ec39SRoland Dreier 	if (comp)
2494509c07bcSBart Van Assche 		complete(&ch->done);
2495aef9ec39SRoland Dreier 
2496aef9ec39SRoland Dreier 	return 0;
2497aef9ec39SRoland Dreier }
2498aef9ec39SRoland Dreier 
249971444b97SJack Wang /**
250071444b97SJack Wang  * srp_change_queue_depth - setting device queue depth
250171444b97SJack Wang  * @sdev: scsi device struct
250271444b97SJack Wang  * @qdepth: requested queue depth
250371444b97SJack Wang  *
250471444b97SJack Wang  * Returns queue depth.
250571444b97SJack Wang  */
250671444b97SJack Wang static int
2507db5ed4dfSChristoph Hellwig srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
250871444b97SJack Wang {
250971444b97SJack Wang 	if (!sdev->tagged_supported)
25101e6f2416SChristoph Hellwig 		qdepth = 1;
2511db5ed4dfSChristoph Hellwig 	return scsi_change_queue_depth(sdev, qdepth);
251271444b97SJack Wang }
251371444b97SJack Wang 
2514985aa495SBart Van Assche static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2515985aa495SBart Van Assche 			     u8 func)
2516aef9ec39SRoland Dreier {
2517509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2518a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
251919081f31SDavid Dillow 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2520aef9ec39SRoland Dreier 	struct srp_iu *iu;
2521aef9ec39SRoland Dreier 	struct srp_tsk_mgmt *tsk_mgmt;
2522aef9ec39SRoland Dreier 
2523c014c8cdSBart Van Assche 	if (!ch->connected || target->qp_in_error)
25243780d1f0SBart Van Assche 		return -1;
25253780d1f0SBart Van Assche 
2526509c07bcSBart Van Assche 	init_completion(&ch->tsk_mgmt_done);
2527aef9ec39SRoland Dreier 
2528a95cadb9SBart Van Assche 	/*
2529509c07bcSBart Van Assche 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2530a95cadb9SBart Van Assche 	 * invoked while a task management function is being sent.
2531a95cadb9SBart Van Assche 	 */
2532a95cadb9SBart Van Assche 	mutex_lock(&rport->mutex);
2533509c07bcSBart Van Assche 	spin_lock_irq(&ch->lock);
2534509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2535509c07bcSBart Van Assche 	spin_unlock_irq(&ch->lock);
253676c75b25SBart Van Assche 
2537a95cadb9SBart Van Assche 	if (!iu) {
2538a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2539a95cadb9SBart Van Assche 
254076c75b25SBart Van Assche 		return -1;
2541a95cadb9SBart Van Assche 	}
2542aef9ec39SRoland Dreier 
254319081f31SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
254419081f31SDavid Dillow 				   DMA_TO_DEVICE);
2545aef9ec39SRoland Dreier 	tsk_mgmt = iu->buf;
2546aef9ec39SRoland Dreier 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2547aef9ec39SRoland Dreier 
2548aef9ec39SRoland Dreier 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2549985aa495SBart Van Assche 	int_to_scsilun(lun, &tsk_mgmt->lun);
2550f8b6e31eSDavid Dillow 	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
2551aef9ec39SRoland Dreier 	tsk_mgmt->tsk_mgmt_func = func;
2552f8b6e31eSDavid Dillow 	tsk_mgmt->task_tag	= req_tag;
2553aef9ec39SRoland Dreier 
255419081f31SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
255519081f31SDavid Dillow 				      DMA_TO_DEVICE);
2556509c07bcSBart Van Assche 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2557509c07bcSBart Van Assche 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2558a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2559a95cadb9SBart Van Assche 
256076c75b25SBart Van Assche 		return -1;
256176c75b25SBart Van Assche 	}
2562a95cadb9SBart Van Assche 	mutex_unlock(&rport->mutex);
2563d945e1dfSRoland Dreier 
2564509c07bcSBart Van Assche 	if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2565aef9ec39SRoland Dreier 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2566d945e1dfSRoland Dreier 		return -1;
2567aef9ec39SRoland Dreier 
2568d945e1dfSRoland Dreier 	return 0;
2569d945e1dfSRoland Dreier }
2570d945e1dfSRoland Dreier 
2571aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd)
2572aef9ec39SRoland Dreier {
2573d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2574f8b6e31eSDavid Dillow 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
257577f2c1a4SBart Van Assche 	u32 tag;
2576d92c0da7SBart Van Assche 	u16 ch_idx;
2577509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
2578086f44f5SBart Van Assche 	int ret;
2579d945e1dfSRoland Dreier 
25807aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2581aef9ec39SRoland Dreier 
2582d92c0da7SBart Van Assche 	if (!req)
258399b6697aSBart Van Assche 		return SUCCESS;
258477f2c1a4SBart Van Assche 	tag = blk_mq_unique_tag(scmnd->request);
2585d92c0da7SBart Van Assche 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2586d92c0da7SBart Van Assche 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2587d92c0da7SBart Van Assche 		return SUCCESS;
2588d92c0da7SBart Van Assche 	ch = &target->ch[ch_idx];
2589d92c0da7SBart Van Assche 	if (!srp_claim_req(ch, req, NULL, scmnd))
2590d92c0da7SBart Van Assche 		return SUCCESS;
2591d92c0da7SBart Van Assche 	shost_printk(KERN_ERR, target->scsi_host,
2592d92c0da7SBart Van Assche 		     "Sending SRP abort for tag %#x\n", tag);
259377f2c1a4SBart Van Assche 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
259480d5e8a2SBart Van Assche 			      SRP_TSK_ABORT_TASK) == 0)
2595086f44f5SBart Van Assche 		ret = SUCCESS;
2596ed9b2264SBart Van Assche 	else if (target->rport->state == SRP_RPORT_LOST)
259799e1c139SBart Van Assche 		ret = FAST_IO_FAIL;
2598086f44f5SBart Van Assche 	else
2599086f44f5SBart Van Assche 		ret = FAILED;
2600509c07bcSBart Van Assche 	srp_free_req(ch, req, scmnd, 0);
2601d945e1dfSRoland Dreier 	scmnd->result = DID_ABORT << 16;
2602d8536670SBart Van Assche 	scmnd->scsi_done(scmnd);
2603d945e1dfSRoland Dreier 
2604086f44f5SBart Van Assche 	return ret;
2605aef9ec39SRoland Dreier }
2606aef9ec39SRoland Dreier 
2607aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd)
2608aef9ec39SRoland Dreier {
2609d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2610d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
2611536ae14eSBart Van Assche 	int i;
2612d945e1dfSRoland Dreier 
26137aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2614aef9ec39SRoland Dreier 
2615d92c0da7SBart Van Assche 	ch = &target->ch[0];
2616509c07bcSBart Van Assche 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2617f8b6e31eSDavid Dillow 			      SRP_TSK_LUN_RESET))
2618d945e1dfSRoland Dreier 		return FAILED;
2619509c07bcSBart Van Assche 	if (ch->tsk_mgmt_status)
2620d945e1dfSRoland Dreier 		return FAILED;
2621d945e1dfSRoland Dreier 
2622d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
2623d92c0da7SBart Van Assche 		ch = &target->ch[i];
26244d73f95fSBart Van Assche 		for (i = 0; i < target->req_ring_size; ++i) {
2625509c07bcSBart Van Assche 			struct srp_request *req = &ch->req_ring[i];
2626509c07bcSBart Van Assche 
2627509c07bcSBart Van Assche 			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2628536ae14eSBart Van Assche 		}
2629d92c0da7SBart Van Assche 	}
2630d945e1dfSRoland Dreier 
2631d945e1dfSRoland Dreier 	return SUCCESS;
2632aef9ec39SRoland Dreier }
2633aef9ec39SRoland Dreier 
2634aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd)
2635aef9ec39SRoland Dreier {
2636aef9ec39SRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2637aef9ec39SRoland Dreier 
26387aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2639aef9ec39SRoland Dreier 
2640ed9b2264SBart Van Assche 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2641aef9ec39SRoland Dreier }
2642aef9ec39SRoland Dreier 
2643509c5f33SBart Van Assche static int srp_slave_alloc(struct scsi_device *sdev)
2644509c5f33SBart Van Assche {
2645509c5f33SBart Van Assche 	struct Scsi_Host *shost = sdev->host;
2646509c5f33SBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2647509c5f33SBart Van Assche 	struct srp_device *srp_dev = target->srp_host->srp_dev;
2648509c5f33SBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
2649509c5f33SBart Van Assche 
2650509c5f33SBart Van Assche 	if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
2651509c5f33SBart Van Assche 		blk_queue_virt_boundary(sdev->request_queue,
2652509c5f33SBart Van Assche 					~srp_dev->mr_page_mask);
2653509c5f33SBart Van Assche 
2654509c5f33SBart Van Assche 	return 0;
2655509c5f33SBart Van Assche }
2656509c5f33SBart Van Assche 
2657c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev)
2658c9b03c1aSBart Van Assche {
2659c9b03c1aSBart Van Assche 	struct Scsi_Host *shost = sdev->host;
2660c9b03c1aSBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2661c9b03c1aSBart Van Assche 	struct request_queue *q = sdev->request_queue;
2662c9b03c1aSBart Van Assche 	unsigned long timeout;
2663c9b03c1aSBart Van Assche 
2664c9b03c1aSBart Van Assche 	if (sdev->type == TYPE_DISK) {
2665c9b03c1aSBart Van Assche 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2666c9b03c1aSBart Van Assche 		blk_queue_rq_timeout(q, timeout);
2667c9b03c1aSBart Van Assche 	}
2668c9b03c1aSBart Van Assche 
2669c9b03c1aSBart Van Assche 	return 0;
2670c9b03c1aSBart Van Assche }
2671c9b03c1aSBart Van Assche 
2672ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2673ee959b00STony Jones 			   char *buf)
26746ecb0c84SRoland Dreier {
2675ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26766ecb0c84SRoland Dreier 
267745c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
26786ecb0c84SRoland Dreier }
26796ecb0c84SRoland Dreier 
2680ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2681ee959b00STony Jones 			     char *buf)
26826ecb0c84SRoland Dreier {
2683ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26846ecb0c84SRoland Dreier 
268545c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
26866ecb0c84SRoland Dreier }
26876ecb0c84SRoland Dreier 
2688ee959b00STony Jones static ssize_t show_service_id(struct device *dev,
2689ee959b00STony Jones 			       struct device_attribute *attr, char *buf)
26906ecb0c84SRoland Dreier {
2691ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26926ecb0c84SRoland Dreier 
269345c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
26946ecb0c84SRoland Dreier }
26956ecb0c84SRoland Dreier 
2696ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2697ee959b00STony Jones 			 char *buf)
26986ecb0c84SRoland Dreier {
2699ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27006ecb0c84SRoland Dreier 
2701747fe000SBart Van Assche 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
27026ecb0c84SRoland Dreier }
27036ecb0c84SRoland Dreier 
2704848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2705848b3082SBart Van Assche 			 char *buf)
2706848b3082SBart Van Assche {
2707848b3082SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2708848b3082SBart Van Assche 
2709747fe000SBart Van Assche 	return sprintf(buf, "%pI6\n", target->sgid.raw);
2710848b3082SBart Van Assche }
2711848b3082SBart Van Assche 
2712ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2713ee959b00STony Jones 			 char *buf)
27146ecb0c84SRoland Dreier {
2715ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2716d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch = &target->ch[0];
27176ecb0c84SRoland Dreier 
2718509c07bcSBart Van Assche 	return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
27196ecb0c84SRoland Dreier }
27206ecb0c84SRoland Dreier 
2721ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev,
2722ee959b00STony Jones 			      struct device_attribute *attr, char *buf)
27233633b3d0SIshai Rabinovitz {
2724ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27253633b3d0SIshai Rabinovitz 
2726747fe000SBart Van Assche 	return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
27273633b3d0SIshai Rabinovitz }
27283633b3d0SIshai Rabinovitz 
272989de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev,
273089de7486SBart Van Assche 			    struct device_attribute *attr, char *buf)
273189de7486SBart Van Assche {
273289de7486SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2733d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
2734d92c0da7SBart Van Assche 	int i, req_lim = INT_MAX;
273589de7486SBart Van Assche 
2736d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
2737d92c0da7SBart Van Assche 		ch = &target->ch[i];
2738d92c0da7SBart Van Assche 		req_lim = min(req_lim, ch->req_lim);
2739d92c0da7SBart Van Assche 	}
2740d92c0da7SBart Van Assche 	return sprintf(buf, "%d\n", req_lim);
274189de7486SBart Van Assche }
274289de7486SBart Van Assche 
2743ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev,
2744ee959b00STony Jones 				 struct device_attribute *attr, char *buf)
27456bfa24faSRoland Dreier {
2746ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27476bfa24faSRoland Dreier 
27486bfa24faSRoland Dreier 	return sprintf(buf, "%d\n", target->zero_req_lim);
27496bfa24faSRoland Dreier }
27506bfa24faSRoland Dreier 
2751ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev,
2752ee959b00STony Jones 				  struct device_attribute *attr, char *buf)
2753ded7f1a1SIshai Rabinovitz {
2754ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2755ded7f1a1SIshai Rabinovitz 
2756ded7f1a1SIshai Rabinovitz 	return sprintf(buf, "%d\n", target->srp_host->port);
2757ded7f1a1SIshai Rabinovitz }
2758ded7f1a1SIshai Rabinovitz 
2759ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev,
2760ee959b00STony Jones 				    struct device_attribute *attr, char *buf)
2761ded7f1a1SIshai Rabinovitz {
2762ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2763ded7f1a1SIshai Rabinovitz 
276405321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2765ded7f1a1SIshai Rabinovitz }
2766ded7f1a1SIshai Rabinovitz 
2767d92c0da7SBart Van Assche static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2768d92c0da7SBart Van Assche 			     char *buf)
2769d92c0da7SBart Van Assche {
2770d92c0da7SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2771d92c0da7SBart Van Assche 
2772d92c0da7SBart Van Assche 	return sprintf(buf, "%d\n", target->ch_count);
2773d92c0da7SBart Van Assche }
2774d92c0da7SBart Van Assche 
27754b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev,
27764b5e5f41SBart Van Assche 				struct device_attribute *attr, char *buf)
27774b5e5f41SBart Van Assche {
27784b5e5f41SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27794b5e5f41SBart Van Assche 
27804b5e5f41SBart Van Assche 	return sprintf(buf, "%d\n", target->comp_vector);
27814b5e5f41SBart Van Assche }
27824b5e5f41SBart Van Assche 
27837bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev,
27847bb312e4SVu Pham 				   struct device_attribute *attr, char *buf)
27857bb312e4SVu Pham {
27867bb312e4SVu Pham 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27877bb312e4SVu Pham 
27887bb312e4SVu Pham 	return sprintf(buf, "%d\n", target->tl_retry_count);
27897bb312e4SVu Pham }
27907bb312e4SVu Pham 
279149248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev,
279249248644SDavid Dillow 				   struct device_attribute *attr, char *buf)
279349248644SDavid Dillow {
279449248644SDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
279549248644SDavid Dillow 
279649248644SDavid Dillow 	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
279749248644SDavid Dillow }
279849248644SDavid Dillow 
2799c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev,
2800c07d424dSDavid Dillow 				 struct device_attribute *attr, char *buf)
2801c07d424dSDavid Dillow {
2802c07d424dSDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2803c07d424dSDavid Dillow 
2804c07d424dSDavid Dillow 	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2805c07d424dSDavid Dillow }
2806c07d424dSDavid Dillow 
2807ee959b00STony Jones static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
2808ee959b00STony Jones static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
2809ee959b00STony Jones static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
2810ee959b00STony Jones static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
2811848b3082SBart Van Assche static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
2812ee959b00STony Jones static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
2813ee959b00STony Jones static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
281489de7486SBart Van Assche static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2815ee959b00STony Jones static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
2816ee959b00STony Jones static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2817ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2818d92c0da7SBart Van Assche static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
28194b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
28207bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
282149248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2822c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
28236ecb0c84SRoland Dreier 
2824ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = {
2825ee959b00STony Jones 	&dev_attr_id_ext,
2826ee959b00STony Jones 	&dev_attr_ioc_guid,
2827ee959b00STony Jones 	&dev_attr_service_id,
2828ee959b00STony Jones 	&dev_attr_pkey,
2829848b3082SBart Van Assche 	&dev_attr_sgid,
2830ee959b00STony Jones 	&dev_attr_dgid,
2831ee959b00STony Jones 	&dev_attr_orig_dgid,
283289de7486SBart Van Assche 	&dev_attr_req_lim,
2833ee959b00STony Jones 	&dev_attr_zero_req_lim,
2834ee959b00STony Jones 	&dev_attr_local_ib_port,
2835ee959b00STony Jones 	&dev_attr_local_ib_device,
2836d92c0da7SBart Van Assche 	&dev_attr_ch_count,
28374b5e5f41SBart Van Assche 	&dev_attr_comp_vector,
28387bb312e4SVu Pham 	&dev_attr_tl_retry_count,
283949248644SDavid Dillow 	&dev_attr_cmd_sg_entries,
2840c07d424dSDavid Dillow 	&dev_attr_allow_ext_sg,
28416ecb0c84SRoland Dreier 	NULL
28426ecb0c84SRoland Dreier };
28436ecb0c84SRoland Dreier 
2844aef9ec39SRoland Dreier static struct scsi_host_template srp_template = {
2845aef9ec39SRoland Dreier 	.module				= THIS_MODULE,
2846b7f008fdSRoland Dreier 	.name				= "InfiniBand SRP initiator",
2847b7f008fdSRoland Dreier 	.proc_name			= DRV_NAME,
2848509c5f33SBart Van Assche 	.slave_alloc			= srp_slave_alloc,
2849c9b03c1aSBart Van Assche 	.slave_configure		= srp_slave_configure,
2850aef9ec39SRoland Dreier 	.info				= srp_target_info,
2851aef9ec39SRoland Dreier 	.queuecommand			= srp_queuecommand,
285271444b97SJack Wang 	.change_queue_depth             = srp_change_queue_depth,
2853aef9ec39SRoland Dreier 	.eh_abort_handler		= srp_abort,
2854aef9ec39SRoland Dreier 	.eh_device_reset_handler	= srp_reset_device,
2855aef9ec39SRoland Dreier 	.eh_host_reset_handler		= srp_reset_host,
28562742c1daSBart Van Assche 	.skip_settle_delay		= true,
285749248644SDavid Dillow 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
28584d73f95fSBart Van Assche 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
2859aef9ec39SRoland Dreier 	.this_id			= -1,
28604d73f95fSBart Van Assche 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
28616ecb0c84SRoland Dreier 	.use_clustering			= ENABLE_CLUSTERING,
286277f2c1a4SBart Van Assche 	.shost_attrs			= srp_host_attrs,
2863c40ecc12SChristoph Hellwig 	.track_queue_depth		= 1,
2864aef9ec39SRoland Dreier };
2865aef9ec39SRoland Dreier 
286634aa654eSBart Van Assche static int srp_sdev_count(struct Scsi_Host *host)
286734aa654eSBart Van Assche {
286834aa654eSBart Van Assche 	struct scsi_device *sdev;
286934aa654eSBart Van Assche 	int c = 0;
287034aa654eSBart Van Assche 
287134aa654eSBart Van Assche 	shost_for_each_device(sdev, host)
287234aa654eSBart Van Assche 		c++;
287334aa654eSBart Van Assche 
287434aa654eSBart Van Assche 	return c;
287534aa654eSBart Van Assche }
287634aa654eSBart Van Assche 
2877bc44bd1dSBart Van Assche /*
2878bc44bd1dSBart Van Assche  * Return values:
2879bc44bd1dSBart Van Assche  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2880bc44bd1dSBart Van Assche  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2881bc44bd1dSBart Van Assche  *    removal has been scheduled.
2882bc44bd1dSBart Van Assche  * 0 and target->state != SRP_TARGET_REMOVED upon success.
2883bc44bd1dSBart Van Assche  */
2884aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2885aef9ec39SRoland Dreier {
28863236822bSFUJITA Tomonori 	struct srp_rport_identifiers ids;
28873236822bSFUJITA Tomonori 	struct srp_rport *rport;
28883236822bSFUJITA Tomonori 
288934aa654eSBart Van Assche 	target->state = SRP_TARGET_SCANNING;
2890aef9ec39SRoland Dreier 	sprintf(target->target_name, "SRP.T10:%016llX",
289145c37cadSBart Van Assche 		be64_to_cpu(target->id_ext));
2892aef9ec39SRoland Dreier 
289305321937SGreg Kroah-Hartman 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2894aef9ec39SRoland Dreier 		return -ENODEV;
2895aef9ec39SRoland Dreier 
28963236822bSFUJITA Tomonori 	memcpy(ids.port_id, &target->id_ext, 8);
28973236822bSFUJITA Tomonori 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2898aebd5e47SFUJITA Tomonori 	ids.roles = SRP_RPORT_ROLE_TARGET;
28993236822bSFUJITA Tomonori 	rport = srp_rport_add(target->scsi_host, &ids);
29003236822bSFUJITA Tomonori 	if (IS_ERR(rport)) {
29013236822bSFUJITA Tomonori 		scsi_remove_host(target->scsi_host);
29023236822bSFUJITA Tomonori 		return PTR_ERR(rport);
29033236822bSFUJITA Tomonori 	}
29043236822bSFUJITA Tomonori 
2905dc1bdbd9SBart Van Assche 	rport->lld_data = target;
29069dd69a60SBart Van Assche 	target->rport = rport;
2907dc1bdbd9SBart Van Assche 
2908b3589fd4SMatthew Wilcox 	spin_lock(&host->target_lock);
2909aef9ec39SRoland Dreier 	list_add_tail(&target->list, &host->target_list);
2910b3589fd4SMatthew Wilcox 	spin_unlock(&host->target_lock);
2911aef9ec39SRoland Dreier 
2912aef9ec39SRoland Dreier 	scsi_scan_target(&target->scsi_host->shost_gendev,
29131d645088SHannes Reinecke 			 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2914aef9ec39SRoland Dreier 
2915c014c8cdSBart Van Assche 	if (srp_connected_ch(target) < target->ch_count ||
2916c014c8cdSBart Van Assche 	    target->qp_in_error) {
291734aa654eSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
291834aa654eSBart Van Assche 			     PFX "SCSI scan failed - removing SCSI host\n");
291934aa654eSBart Van Assche 		srp_queue_remove_work(target);
292034aa654eSBart Van Assche 		goto out;
292134aa654eSBart Van Assche 	}
292234aa654eSBart Van Assche 
2923cf1acab7SBart Van Assche 	pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
292434aa654eSBart Van Assche 		 dev_name(&target->scsi_host->shost_gendev),
292534aa654eSBart Van Assche 		 srp_sdev_count(target->scsi_host));
292634aa654eSBart Van Assche 
292734aa654eSBart Van Assche 	spin_lock_irq(&target->lock);
292834aa654eSBart Van Assche 	if (target->state == SRP_TARGET_SCANNING)
292934aa654eSBart Van Assche 		target->state = SRP_TARGET_LIVE;
293034aa654eSBart Van Assche 	spin_unlock_irq(&target->lock);
293134aa654eSBart Van Assche 
293234aa654eSBart Van Assche out:
2933aef9ec39SRoland Dreier 	return 0;
2934aef9ec39SRoland Dreier }
2935aef9ec39SRoland Dreier 
2936ee959b00STony Jones static void srp_release_dev(struct device *dev)
2937aef9ec39SRoland Dreier {
2938aef9ec39SRoland Dreier 	struct srp_host *host =
2939ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
2940aef9ec39SRoland Dreier 
2941aef9ec39SRoland Dreier 	complete(&host->released);
2942aef9ec39SRoland Dreier }
2943aef9ec39SRoland Dreier 
2944aef9ec39SRoland Dreier static struct class srp_class = {
2945aef9ec39SRoland Dreier 	.name    = "infiniband_srp",
2946ee959b00STony Jones 	.dev_release = srp_release_dev
2947aef9ec39SRoland Dreier };
2948aef9ec39SRoland Dreier 
294996fc248aSBart Van Assche /**
295096fc248aSBart Van Assche  * srp_conn_unique() - check whether the connection to a target is unique
2951af24663bSBart Van Assche  * @host:   SRP host.
2952af24663bSBart Van Assche  * @target: SRP target port.
295396fc248aSBart Van Assche  */
295496fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host,
295596fc248aSBart Van Assche 			    struct srp_target_port *target)
295696fc248aSBart Van Assche {
295796fc248aSBart Van Assche 	struct srp_target_port *t;
295896fc248aSBart Van Assche 	bool ret = false;
295996fc248aSBart Van Assche 
296096fc248aSBart Van Assche 	if (target->state == SRP_TARGET_REMOVED)
296196fc248aSBart Van Assche 		goto out;
296296fc248aSBart Van Assche 
296396fc248aSBart Van Assche 	ret = true;
296496fc248aSBart Van Assche 
296596fc248aSBart Van Assche 	spin_lock(&host->target_lock);
296696fc248aSBart Van Assche 	list_for_each_entry(t, &host->target_list, list) {
296796fc248aSBart Van Assche 		if (t != target &&
296896fc248aSBart Van Assche 		    target->id_ext == t->id_ext &&
296996fc248aSBart Van Assche 		    target->ioc_guid == t->ioc_guid &&
297096fc248aSBart Van Assche 		    target->initiator_ext == t->initiator_ext) {
297196fc248aSBart Van Assche 			ret = false;
297296fc248aSBart Van Assche 			break;
297396fc248aSBart Van Assche 		}
297496fc248aSBart Van Assche 	}
297596fc248aSBart Van Assche 	spin_unlock(&host->target_lock);
297696fc248aSBart Van Assche 
297796fc248aSBart Van Assche out:
297896fc248aSBart Van Assche 	return ret;
297996fc248aSBart Van Assche }
298096fc248aSBart Van Assche 
2981aef9ec39SRoland Dreier /*
2982aef9ec39SRoland Dreier  * Target ports are added by writing
2983aef9ec39SRoland Dreier  *
2984aef9ec39SRoland Dreier  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2985aef9ec39SRoland Dreier  *     pkey=<P_Key>,service_id=<service ID>
2986aef9ec39SRoland Dreier  *
2987aef9ec39SRoland Dreier  * to the add_target sysfs attribute.
2988aef9ec39SRoland Dreier  */
2989aef9ec39SRoland Dreier enum {
2990aef9ec39SRoland Dreier 	SRP_OPT_ERR		= 0,
2991aef9ec39SRoland Dreier 	SRP_OPT_ID_EXT		= 1 << 0,
2992aef9ec39SRoland Dreier 	SRP_OPT_IOC_GUID	= 1 << 1,
2993aef9ec39SRoland Dreier 	SRP_OPT_DGID		= 1 << 2,
2994aef9ec39SRoland Dreier 	SRP_OPT_PKEY		= 1 << 3,
2995aef9ec39SRoland Dreier 	SRP_OPT_SERVICE_ID	= 1 << 4,
2996aef9ec39SRoland Dreier 	SRP_OPT_MAX_SECT	= 1 << 5,
299752fb2b50SVu Pham 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
29980c0450dbSRamachandra K 	SRP_OPT_IO_CLASS	= 1 << 7,
299901cb9bcbSIshai Rabinovitz 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
300049248644SDavid Dillow 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
3001c07d424dSDavid Dillow 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
3002c07d424dSDavid Dillow 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
30034b5e5f41SBart Van Assche 	SRP_OPT_COMP_VECTOR	= 1 << 12,
30047bb312e4SVu Pham 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
30054d73f95fSBart Van Assche 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
3006aef9ec39SRoland Dreier 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
3007aef9ec39SRoland Dreier 				   SRP_OPT_IOC_GUID	|
3008aef9ec39SRoland Dreier 				   SRP_OPT_DGID		|
3009aef9ec39SRoland Dreier 				   SRP_OPT_PKEY		|
3010aef9ec39SRoland Dreier 				   SRP_OPT_SERVICE_ID),
3011aef9ec39SRoland Dreier };
3012aef9ec39SRoland Dreier 
3013a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = {
3014aef9ec39SRoland Dreier 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
3015aef9ec39SRoland Dreier 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
3016aef9ec39SRoland Dreier 	{ SRP_OPT_DGID,			"dgid=%s" 		},
3017aef9ec39SRoland Dreier 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
3018aef9ec39SRoland Dreier 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
3019aef9ec39SRoland Dreier 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
302052fb2b50SVu Pham 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
30210c0450dbSRamachandra K 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
302201cb9bcbSIshai Rabinovitz 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
302349248644SDavid Dillow 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
3024c07d424dSDavid Dillow 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
3025c07d424dSDavid Dillow 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
30264b5e5f41SBart Van Assche 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
30277bb312e4SVu Pham 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
30284d73f95fSBart Van Assche 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
3029aef9ec39SRoland Dreier 	{ SRP_OPT_ERR,			NULL 			}
3030aef9ec39SRoland Dreier };
3031aef9ec39SRoland Dreier 
3032aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target)
3033aef9ec39SRoland Dreier {
3034aef9ec39SRoland Dreier 	char *options, *sep_opt;
3035aef9ec39SRoland Dreier 	char *p;
3036aef9ec39SRoland Dreier 	char dgid[3];
3037aef9ec39SRoland Dreier 	substring_t args[MAX_OPT_ARGS];
3038aef9ec39SRoland Dreier 	int opt_mask = 0;
3039aef9ec39SRoland Dreier 	int token;
3040aef9ec39SRoland Dreier 	int ret = -EINVAL;
3041aef9ec39SRoland Dreier 	int i;
3042aef9ec39SRoland Dreier 
3043aef9ec39SRoland Dreier 	options = kstrdup(buf, GFP_KERNEL);
3044aef9ec39SRoland Dreier 	if (!options)
3045aef9ec39SRoland Dreier 		return -ENOMEM;
3046aef9ec39SRoland Dreier 
3047aef9ec39SRoland Dreier 	sep_opt = options;
30487dcf9c19SSagi Grimberg 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3049aef9ec39SRoland Dreier 		if (!*p)
3050aef9ec39SRoland Dreier 			continue;
3051aef9ec39SRoland Dreier 
3052aef9ec39SRoland Dreier 		token = match_token(p, srp_opt_tokens, args);
3053aef9ec39SRoland Dreier 		opt_mask |= token;
3054aef9ec39SRoland Dreier 
3055aef9ec39SRoland Dreier 		switch (token) {
3056aef9ec39SRoland Dreier 		case SRP_OPT_ID_EXT:
3057aef9ec39SRoland Dreier 			p = match_strdup(args);
3058a20f3a6dSIshai Rabinovitz 			if (!p) {
3059a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3060a20f3a6dSIshai Rabinovitz 				goto out;
3061a20f3a6dSIshai Rabinovitz 			}
3062aef9ec39SRoland Dreier 			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3063aef9ec39SRoland Dreier 			kfree(p);
3064aef9ec39SRoland Dreier 			break;
3065aef9ec39SRoland Dreier 
3066aef9ec39SRoland Dreier 		case SRP_OPT_IOC_GUID:
3067aef9ec39SRoland Dreier 			p = match_strdup(args);
3068a20f3a6dSIshai Rabinovitz 			if (!p) {
3069a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3070a20f3a6dSIshai Rabinovitz 				goto out;
3071a20f3a6dSIshai Rabinovitz 			}
3072aef9ec39SRoland Dreier 			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3073aef9ec39SRoland Dreier 			kfree(p);
3074aef9ec39SRoland Dreier 			break;
3075aef9ec39SRoland Dreier 
3076aef9ec39SRoland Dreier 		case SRP_OPT_DGID:
3077aef9ec39SRoland Dreier 			p = match_strdup(args);
3078a20f3a6dSIshai Rabinovitz 			if (!p) {
3079a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3080a20f3a6dSIshai Rabinovitz 				goto out;
3081a20f3a6dSIshai Rabinovitz 			}
3082aef9ec39SRoland Dreier 			if (strlen(p) != 32) {
3083e0bda7d8SBart Van Assche 				pr_warn("bad dest GID parameter '%s'\n", p);
3084ce1823f0SRoland Dreier 				kfree(p);
3085aef9ec39SRoland Dreier 				goto out;
3086aef9ec39SRoland Dreier 			}
3087aef9ec39SRoland Dreier 
3088aef9ec39SRoland Dreier 			for (i = 0; i < 16; ++i) {
3089747fe000SBart Van Assche 				strlcpy(dgid, p + i * 2, sizeof(dgid));
3090747fe000SBart Van Assche 				if (sscanf(dgid, "%hhx",
3091747fe000SBart Van Assche 					   &target->orig_dgid.raw[i]) < 1) {
3092747fe000SBart Van Assche 					ret = -EINVAL;
3093747fe000SBart Van Assche 					kfree(p);
3094747fe000SBart Van Assche 					goto out;
3095747fe000SBart Van Assche 				}
3096aef9ec39SRoland Dreier 			}
3097bf17c1c7SRoland Dreier 			kfree(p);
3098aef9ec39SRoland Dreier 			break;
3099aef9ec39SRoland Dreier 
3100aef9ec39SRoland Dreier 		case SRP_OPT_PKEY:
3101aef9ec39SRoland Dreier 			if (match_hex(args, &token)) {
3102e0bda7d8SBart Van Assche 				pr_warn("bad P_Key parameter '%s'\n", p);
3103aef9ec39SRoland Dreier 				goto out;
3104aef9ec39SRoland Dreier 			}
3105747fe000SBart Van Assche 			target->pkey = cpu_to_be16(token);
3106aef9ec39SRoland Dreier 			break;
3107aef9ec39SRoland Dreier 
3108aef9ec39SRoland Dreier 		case SRP_OPT_SERVICE_ID:
3109aef9ec39SRoland Dreier 			p = match_strdup(args);
3110a20f3a6dSIshai Rabinovitz 			if (!p) {
3111a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3112a20f3a6dSIshai Rabinovitz 				goto out;
3113a20f3a6dSIshai Rabinovitz 			}
3114aef9ec39SRoland Dreier 			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3115aef9ec39SRoland Dreier 			kfree(p);
3116aef9ec39SRoland Dreier 			break;
3117aef9ec39SRoland Dreier 
3118aef9ec39SRoland Dreier 		case SRP_OPT_MAX_SECT:
3119aef9ec39SRoland Dreier 			if (match_int(args, &token)) {
3120e0bda7d8SBart Van Assche 				pr_warn("bad max sect parameter '%s'\n", p);
3121aef9ec39SRoland Dreier 				goto out;
3122aef9ec39SRoland Dreier 			}
3123aef9ec39SRoland Dreier 			target->scsi_host->max_sectors = token;
3124aef9ec39SRoland Dreier 			break;
3125aef9ec39SRoland Dreier 
31264d73f95fSBart Van Assche 		case SRP_OPT_QUEUE_SIZE:
31274d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
31284d73f95fSBart Van Assche 				pr_warn("bad queue_size parameter '%s'\n", p);
31294d73f95fSBart Van Assche 				goto out;
31304d73f95fSBart Van Assche 			}
31314d73f95fSBart Van Assche 			target->scsi_host->can_queue = token;
31324d73f95fSBart Van Assche 			target->queue_size = token + SRP_RSP_SQ_SIZE +
31334d73f95fSBart Van Assche 					     SRP_TSK_MGMT_SQ_SIZE;
31344d73f95fSBart Van Assche 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
31354d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun = token;
31364d73f95fSBart Van Assche 			break;
31374d73f95fSBart Van Assche 
313852fb2b50SVu Pham 		case SRP_OPT_MAX_CMD_PER_LUN:
31394d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
3140e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3141e0bda7d8SBart Van Assche 					p);
314252fb2b50SVu Pham 				goto out;
314352fb2b50SVu Pham 			}
31444d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun = token;
314552fb2b50SVu Pham 			break;
314652fb2b50SVu Pham 
31470c0450dbSRamachandra K 		case SRP_OPT_IO_CLASS:
31480c0450dbSRamachandra K 			if (match_hex(args, &token)) {
3149e0bda7d8SBart Van Assche 				pr_warn("bad IO class parameter '%s'\n", p);
31500c0450dbSRamachandra K 				goto out;
31510c0450dbSRamachandra K 			}
31520c0450dbSRamachandra K 			if (token != SRP_REV10_IB_IO_CLASS &&
31530c0450dbSRamachandra K 			    token != SRP_REV16A_IB_IO_CLASS) {
3154e0bda7d8SBart Van Assche 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3155e0bda7d8SBart Van Assche 					token, SRP_REV10_IB_IO_CLASS,
3156e0bda7d8SBart Van Assche 					SRP_REV16A_IB_IO_CLASS);
31570c0450dbSRamachandra K 				goto out;
31580c0450dbSRamachandra K 			}
31590c0450dbSRamachandra K 			target->io_class = token;
31600c0450dbSRamachandra K 			break;
31610c0450dbSRamachandra K 
316201cb9bcbSIshai Rabinovitz 		case SRP_OPT_INITIATOR_EXT:
316301cb9bcbSIshai Rabinovitz 			p = match_strdup(args);
3164a20f3a6dSIshai Rabinovitz 			if (!p) {
3165a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3166a20f3a6dSIshai Rabinovitz 				goto out;
3167a20f3a6dSIshai Rabinovitz 			}
316801cb9bcbSIshai Rabinovitz 			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
316901cb9bcbSIshai Rabinovitz 			kfree(p);
317001cb9bcbSIshai Rabinovitz 			break;
317101cb9bcbSIshai Rabinovitz 
317249248644SDavid Dillow 		case SRP_OPT_CMD_SG_ENTRIES:
317349248644SDavid Dillow 			if (match_int(args, &token) || token < 1 || token > 255) {
3174e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3175e0bda7d8SBart Van Assche 					p);
317649248644SDavid Dillow 				goto out;
317749248644SDavid Dillow 			}
317849248644SDavid Dillow 			target->cmd_sg_cnt = token;
317949248644SDavid Dillow 			break;
318049248644SDavid Dillow 
3181c07d424dSDavid Dillow 		case SRP_OPT_ALLOW_EXT_SG:
3182c07d424dSDavid Dillow 			if (match_int(args, &token)) {
3183e0bda7d8SBart Van Assche 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3184c07d424dSDavid Dillow 				goto out;
3185c07d424dSDavid Dillow 			}
3186c07d424dSDavid Dillow 			target->allow_ext_sg = !!token;
3187c07d424dSDavid Dillow 			break;
3188c07d424dSDavid Dillow 
3189c07d424dSDavid Dillow 		case SRP_OPT_SG_TABLESIZE:
3190c07d424dSDavid Dillow 			if (match_int(args, &token) || token < 1 ||
319165e8617fSMing Lin 					token > SG_MAX_SEGMENTS) {
3192e0bda7d8SBart Van Assche 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3193e0bda7d8SBart Van Assche 					p);
3194c07d424dSDavid Dillow 				goto out;
3195c07d424dSDavid Dillow 			}
3196c07d424dSDavid Dillow 			target->sg_tablesize = token;
3197c07d424dSDavid Dillow 			break;
3198c07d424dSDavid Dillow 
31994b5e5f41SBart Van Assche 		case SRP_OPT_COMP_VECTOR:
32004b5e5f41SBart Van Assche 			if (match_int(args, &token) || token < 0) {
32014b5e5f41SBart Van Assche 				pr_warn("bad comp_vector parameter '%s'\n", p);
32024b5e5f41SBart Van Assche 				goto out;
32034b5e5f41SBart Van Assche 			}
32044b5e5f41SBart Van Assche 			target->comp_vector = token;
32054b5e5f41SBart Van Assche 			break;
32064b5e5f41SBart Van Assche 
32077bb312e4SVu Pham 		case SRP_OPT_TL_RETRY_COUNT:
32087bb312e4SVu Pham 			if (match_int(args, &token) || token < 2 || token > 7) {
32097bb312e4SVu Pham 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
32107bb312e4SVu Pham 					p);
32117bb312e4SVu Pham 				goto out;
32127bb312e4SVu Pham 			}
32137bb312e4SVu Pham 			target->tl_retry_count = token;
32147bb312e4SVu Pham 			break;
32157bb312e4SVu Pham 
3216aef9ec39SRoland Dreier 		default:
3217e0bda7d8SBart Van Assche 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3218e0bda7d8SBart Van Assche 				p);
3219aef9ec39SRoland Dreier 			goto out;
3220aef9ec39SRoland Dreier 		}
3221aef9ec39SRoland Dreier 	}
3222aef9ec39SRoland Dreier 
3223aef9ec39SRoland Dreier 	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3224aef9ec39SRoland Dreier 		ret = 0;
3225aef9ec39SRoland Dreier 	else
3226aef9ec39SRoland Dreier 		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3227aef9ec39SRoland Dreier 			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3228aef9ec39SRoland Dreier 			    !(srp_opt_tokens[i].token & opt_mask))
3229e0bda7d8SBart Van Assche 				pr_warn("target creation request is missing parameter '%s'\n",
3230aef9ec39SRoland Dreier 					srp_opt_tokens[i].pattern);
3231aef9ec39SRoland Dreier 
32324d73f95fSBart Van Assche 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
32334d73f95fSBart Van Assche 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
32344d73f95fSBart Van Assche 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
32354d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun,
32364d73f95fSBart Van Assche 			target->scsi_host->can_queue);
32374d73f95fSBart Van Assche 
3238aef9ec39SRoland Dreier out:
3239aef9ec39SRoland Dreier 	kfree(options);
3240aef9ec39SRoland Dreier 	return ret;
3241aef9ec39SRoland Dreier }
3242aef9ec39SRoland Dreier 
3243ee959b00STony Jones static ssize_t srp_create_target(struct device *dev,
3244ee959b00STony Jones 				 struct device_attribute *attr,
3245aef9ec39SRoland Dreier 				 const char *buf, size_t count)
3246aef9ec39SRoland Dreier {
3247aef9ec39SRoland Dreier 	struct srp_host *host =
3248ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
3249aef9ec39SRoland Dreier 	struct Scsi_Host *target_host;
3250aef9ec39SRoland Dreier 	struct srp_target_port *target;
3251509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
3252d1b4289eSBart Van Assche 	struct srp_device *srp_dev = host->srp_dev;
3253d1b4289eSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
3254d92c0da7SBart Van Assche 	int ret, node_idx, node, cpu, i;
3255509c5f33SBart Van Assche 	unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3256d92c0da7SBart Van Assche 	bool multich = false;
3257aef9ec39SRoland Dreier 
3258aef9ec39SRoland Dreier 	target_host = scsi_host_alloc(&srp_template,
3259aef9ec39SRoland Dreier 				      sizeof (struct srp_target_port));
3260aef9ec39SRoland Dreier 	if (!target_host)
3261aef9ec39SRoland Dreier 		return -ENOMEM;
3262aef9ec39SRoland Dreier 
32633236822bSFUJITA Tomonori 	target_host->transportt  = ib_srp_transport_template;
3264fd1b6c4aSBart Van Assche 	target_host->max_channel = 0;
3265fd1b6c4aSBart Van Assche 	target_host->max_id      = 1;
3266985aa495SBart Van Assche 	target_host->max_lun     = -1LL;
32673c8edf0eSArne Redlich 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
32685f068992SRoland Dreier 
3269aef9ec39SRoland Dreier 	target = host_to_target(target_host);
3270aef9ec39SRoland Dreier 
32710c0450dbSRamachandra K 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3272aef9ec39SRoland Dreier 	target->scsi_host	= target_host;
3273aef9ec39SRoland Dreier 	target->srp_host	= host;
32745f071777SChristoph Hellwig 	target->pd		= host->srp_dev->pd;
3275e6bf5f48SJason Gunthorpe 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
327649248644SDavid Dillow 	target->cmd_sg_cnt	= cmd_sg_entries;
3277c07d424dSDavid Dillow 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3278c07d424dSDavid Dillow 	target->allow_ext_sg	= allow_ext_sg;
32797bb312e4SVu Pham 	target->tl_retry_count	= 7;
32804d73f95fSBart Van Assche 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3281aef9ec39SRoland Dreier 
328234aa654eSBart Van Assche 	/*
328334aa654eSBart Van Assche 	 * Avoid that the SCSI host can be removed by srp_remove_target()
328434aa654eSBart Van Assche 	 * before this function returns.
328534aa654eSBart Van Assche 	 */
328634aa654eSBart Van Assche 	scsi_host_get(target->scsi_host);
328734aa654eSBart Van Assche 
32882d7091bcSBart Van Assche 	mutex_lock(&host->add_target_mutex);
32892d7091bcSBart Van Assche 
3290aef9ec39SRoland Dreier 	ret = srp_parse_options(buf, target);
3291aef9ec39SRoland Dreier 	if (ret)
3292fb49c8bbSBart Van Assche 		goto out;
3293aef9ec39SRoland Dreier 
32944d73f95fSBart Van Assche 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
32954d73f95fSBart Van Assche 
329696fc248aSBart Van Assche 	if (!srp_conn_unique(target->srp_host, target)) {
329796fc248aSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
329896fc248aSBart Van Assche 			     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
329996fc248aSBart Van Assche 			     be64_to_cpu(target->id_ext),
330096fc248aSBart Van Assche 			     be64_to_cpu(target->ioc_guid),
330196fc248aSBart Van Assche 			     be64_to_cpu(target->initiator_ext));
330296fc248aSBart Van Assche 		ret = -EEXIST;
3303fb49c8bbSBart Van Assche 		goto out;
330496fc248aSBart Van Assche 	}
330596fc248aSBart Van Assche 
33065cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3307c07d424dSDavid Dillow 	    target->cmd_sg_cnt < target->sg_tablesize) {
33085cfb1782SBart Van Assche 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3309c07d424dSDavid Dillow 		target->sg_tablesize = target->cmd_sg_cnt;
3310c07d424dSDavid Dillow 	}
3311c07d424dSDavid Dillow 
3312509c5f33SBart Van Assche 	if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3313509c5f33SBart Van Assche 		/*
3314509c5f33SBart Van Assche 		 * FR and FMR can only map one HCA page per entry. If the
3315509c5f33SBart Van Assche 		 * start address is not aligned on a HCA page boundary two
3316509c5f33SBart Van Assche 		 * entries will be used for the head and the tail although
3317509c5f33SBart Van Assche 		 * these two entries combined contain at most one HCA page of
3318509c5f33SBart Van Assche 		 * data. Hence the "+ 1" in the calculation below.
3319509c5f33SBart Van Assche 		 *
3320509c5f33SBart Van Assche 		 * The indirect data buffer descriptor is contiguous so the
3321509c5f33SBart Van Assche 		 * memory for that buffer will only be registered if
3322509c5f33SBart Van Assche 		 * register_always is true. Hence add one to mr_per_cmd if
3323509c5f33SBart Van Assche 		 * register_always has been set.
3324509c5f33SBart Van Assche 		 */
3325509c5f33SBart Van Assche 		max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3326509c5f33SBart Van Assche 				  (ilog2(srp_dev->mr_page_size) - 9);
3327509c5f33SBart Van Assche 		mr_per_cmd = register_always +
3328509c5f33SBart Van Assche 			(target->scsi_host->max_sectors + 1 +
3329509c5f33SBart Van Assche 			 max_sectors_per_mr - 1) / max_sectors_per_mr;
3330509c5f33SBart Van Assche 		pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3331509c5f33SBart Van Assche 			 target->scsi_host->max_sectors,
3332509c5f33SBart Van Assche 			 srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3333509c5f33SBart Van Assche 			 max_sectors_per_mr, mr_per_cmd);
3334509c5f33SBart Van Assche 	}
3335509c5f33SBart Van Assche 
3336c07d424dSDavid Dillow 	target_host->sg_tablesize = target->sg_tablesize;
3337509c5f33SBart Van Assche 	target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3338509c5f33SBart Van Assche 	target->mr_per_cmd = mr_per_cmd;
3339c07d424dSDavid Dillow 	target->indirect_size = target->sg_tablesize *
3340c07d424dSDavid Dillow 				sizeof (struct srp_direct_buf);
334149248644SDavid Dillow 	target->max_iu_len = sizeof (struct srp_cmd) +
334249248644SDavid Dillow 			     sizeof (struct srp_indirect_buf) +
334349248644SDavid Dillow 			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
334449248644SDavid Dillow 
3345c1120f89SBart Van Assche 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3346ef6c49d8SBart Van Assche 	INIT_WORK(&target->remove_work, srp_remove_work);
33478f26c9ffSDavid Dillow 	spin_lock_init(&target->lock);
334855ee3ab2SMatan Barak 	ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
33492088ca66SSagi Grimberg 	if (ret)
3350fb49c8bbSBart Van Assche 		goto out;
3351d92c0da7SBart Van Assche 
3352d92c0da7SBart Van Assche 	ret = -ENOMEM;
3353d92c0da7SBart Van Assche 	target->ch_count = max_t(unsigned, num_online_nodes(),
3354d92c0da7SBart Van Assche 				 min(ch_count ? :
3355d92c0da7SBart Van Assche 				     min(4 * num_online_nodes(),
3356d92c0da7SBart Van Assche 					 ibdev->num_comp_vectors),
3357d92c0da7SBart Van Assche 				     num_online_cpus()));
3358d92c0da7SBart Van Assche 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3359d92c0da7SBart Van Assche 			     GFP_KERNEL);
3360d92c0da7SBart Van Assche 	if (!target->ch)
3361fb49c8bbSBart Van Assche 		goto out;
3362d92c0da7SBart Van Assche 
3363d92c0da7SBart Van Assche 	node_idx = 0;
3364d92c0da7SBart Van Assche 	for_each_online_node(node) {
3365d92c0da7SBart Van Assche 		const int ch_start = (node_idx * target->ch_count /
3366d92c0da7SBart Van Assche 				      num_online_nodes());
3367d92c0da7SBart Van Assche 		const int ch_end = ((node_idx + 1) * target->ch_count /
3368d92c0da7SBart Van Assche 				    num_online_nodes());
3369d92c0da7SBart Van Assche 		const int cv_start = (node_idx * ibdev->num_comp_vectors /
3370d92c0da7SBart Van Assche 				      num_online_nodes() + target->comp_vector)
3371d92c0da7SBart Van Assche 				     % ibdev->num_comp_vectors;
3372d92c0da7SBart Van Assche 		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3373d92c0da7SBart Van Assche 				    num_online_nodes() + target->comp_vector)
3374d92c0da7SBart Van Assche 				   % ibdev->num_comp_vectors;
3375d92c0da7SBart Van Assche 		int cpu_idx = 0;
3376d92c0da7SBart Van Assche 
3377d92c0da7SBart Van Assche 		for_each_online_cpu(cpu) {
3378d92c0da7SBart Van Assche 			if (cpu_to_node(cpu) != node)
3379d92c0da7SBart Van Assche 				continue;
3380d92c0da7SBart Van Assche 			if (ch_start + cpu_idx >= ch_end)
3381d92c0da7SBart Van Assche 				continue;
3382d92c0da7SBart Van Assche 			ch = &target->ch[ch_start + cpu_idx];
3383d92c0da7SBart Van Assche 			ch->target = target;
3384d92c0da7SBart Van Assche 			ch->comp_vector = cv_start == cv_end ? cv_start :
3385d92c0da7SBart Van Assche 				cv_start + cpu_idx % (cv_end - cv_start);
3386d92c0da7SBart Van Assche 			spin_lock_init(&ch->lock);
3387d92c0da7SBart Van Assche 			INIT_LIST_HEAD(&ch->free_tx);
3388d92c0da7SBart Van Assche 			ret = srp_new_cm_id(ch);
3389d92c0da7SBart Van Assche 			if (ret)
3390d92c0da7SBart Van Assche 				goto err_disconnect;
3391aef9ec39SRoland Dreier 
3392509c07bcSBart Van Assche 			ret = srp_create_ch_ib(ch);
3393aef9ec39SRoland Dreier 			if (ret)
3394d92c0da7SBart Van Assche 				goto err_disconnect;
3395aef9ec39SRoland Dreier 
3396d92c0da7SBart Van Assche 			ret = srp_alloc_req_data(ch);
33979fe4bcf4SDavid Dillow 			if (ret)
3398d92c0da7SBart Van Assche 				goto err_disconnect;
3399aef9ec39SRoland Dreier 
3400d92c0da7SBart Van Assche 			ret = srp_connect_ch(ch, multich);
3401aef9ec39SRoland Dreier 			if (ret) {
34027aa54bd7SDavid Dillow 				shost_printk(KERN_ERR, target->scsi_host,
3403d92c0da7SBart Van Assche 					     PFX "Connection %d/%d failed\n",
3404d92c0da7SBart Van Assche 					     ch_start + cpu_idx,
3405d92c0da7SBart Van Assche 					     target->ch_count);
3406d92c0da7SBart Van Assche 				if (node_idx == 0 && cpu_idx == 0) {
3407d92c0da7SBart Van Assche 					goto err_disconnect;
3408d92c0da7SBart Van Assche 				} else {
3409d92c0da7SBart Van Assche 					srp_free_ch_ib(target, ch);
3410d92c0da7SBart Van Assche 					srp_free_req_data(target, ch);
3411d92c0da7SBart Van Assche 					target->ch_count = ch - target->ch;
3412c257ea6fSBart Van Assche 					goto connected;
3413aef9ec39SRoland Dreier 				}
3414d92c0da7SBart Van Assche 			}
3415d92c0da7SBart Van Assche 
3416d92c0da7SBart Van Assche 			multich = true;
3417d92c0da7SBart Van Assche 			cpu_idx++;
3418d92c0da7SBart Van Assche 		}
3419d92c0da7SBart Van Assche 		node_idx++;
3420d92c0da7SBart Van Assche 	}
3421d92c0da7SBart Van Assche 
3422c257ea6fSBart Van Assche connected:
3423d92c0da7SBart Van Assche 	target->scsi_host->nr_hw_queues = target->ch_count;
3424aef9ec39SRoland Dreier 
3425aef9ec39SRoland Dreier 	ret = srp_add_target(host, target);
3426aef9ec39SRoland Dreier 	if (ret)
3427aef9ec39SRoland Dreier 		goto err_disconnect;
3428aef9ec39SRoland Dreier 
342934aa654eSBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
3430e7ffde01SBart Van Assche 		shost_printk(KERN_DEBUG, target->scsi_host, PFX
3431e7ffde01SBart Van Assche 			     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3432e7ffde01SBart Van Assche 			     be64_to_cpu(target->id_ext),
3433e7ffde01SBart Van Assche 			     be64_to_cpu(target->ioc_guid),
3434747fe000SBart Van Assche 			     be16_to_cpu(target->pkey),
3435e7ffde01SBart Van Assche 			     be64_to_cpu(target->service_id),
3436747fe000SBart Van Assche 			     target->sgid.raw, target->orig_dgid.raw);
343734aa654eSBart Van Assche 	}
3438e7ffde01SBart Van Assche 
34392d7091bcSBart Van Assche 	ret = count;
34402d7091bcSBart Van Assche 
34412d7091bcSBart Van Assche out:
34422d7091bcSBart Van Assche 	mutex_unlock(&host->add_target_mutex);
344334aa654eSBart Van Assche 
344434aa654eSBart Van Assche 	scsi_host_put(target->scsi_host);
3445bc44bd1dSBart Van Assche 	if (ret < 0)
3446bc44bd1dSBart Van Assche 		scsi_host_put(target->scsi_host);
344734aa654eSBart Van Assche 
34482d7091bcSBart Van Assche 	return ret;
3449aef9ec39SRoland Dreier 
3450aef9ec39SRoland Dreier err_disconnect:
3451aef9ec39SRoland Dreier 	srp_disconnect_target(target);
3452aef9ec39SRoland Dreier 
3453d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
3454d92c0da7SBart Van Assche 		ch = &target->ch[i];
3455509c07bcSBart Van Assche 		srp_free_ch_ib(target, ch);
3456509c07bcSBart Van Assche 		srp_free_req_data(target, ch);
3457d92c0da7SBart Van Assche 	}
3458d92c0da7SBart Van Assche 
3459d92c0da7SBart Van Assche 	kfree(target->ch);
34602d7091bcSBart Van Assche 	goto out;
3461aef9ec39SRoland Dreier }
3462aef9ec39SRoland Dreier 
3463ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3464aef9ec39SRoland Dreier 
3465ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3466ee959b00STony Jones 			  char *buf)
3467aef9ec39SRoland Dreier {
3468ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3469aef9ec39SRoland Dreier 
347005321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3471aef9ec39SRoland Dreier }
3472aef9ec39SRoland Dreier 
3473ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3474aef9ec39SRoland Dreier 
3475ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3476ee959b00STony Jones 			 char *buf)
3477aef9ec39SRoland Dreier {
3478ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3479aef9ec39SRoland Dreier 
3480aef9ec39SRoland Dreier 	return sprintf(buf, "%d\n", host->port);
3481aef9ec39SRoland Dreier }
3482aef9ec39SRoland Dreier 
3483ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3484aef9ec39SRoland Dreier 
3485f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3486aef9ec39SRoland Dreier {
3487aef9ec39SRoland Dreier 	struct srp_host *host;
3488aef9ec39SRoland Dreier 
3489aef9ec39SRoland Dreier 	host = kzalloc(sizeof *host, GFP_KERNEL);
3490aef9ec39SRoland Dreier 	if (!host)
3491aef9ec39SRoland Dreier 		return NULL;
3492aef9ec39SRoland Dreier 
3493aef9ec39SRoland Dreier 	INIT_LIST_HEAD(&host->target_list);
3494b3589fd4SMatthew Wilcox 	spin_lock_init(&host->target_lock);
3495aef9ec39SRoland Dreier 	init_completion(&host->released);
34962d7091bcSBart Van Assche 	mutex_init(&host->add_target_mutex);
349705321937SGreg Kroah-Hartman 	host->srp_dev = device;
3498aef9ec39SRoland Dreier 	host->port = port;
3499aef9ec39SRoland Dreier 
3500ee959b00STony Jones 	host->dev.class = &srp_class;
3501ee959b00STony Jones 	host->dev.parent = device->dev->dma_device;
3502d927e38cSKay Sievers 	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3503aef9ec39SRoland Dreier 
3504ee959b00STony Jones 	if (device_register(&host->dev))
3505f5358a17SRoland Dreier 		goto free_host;
3506ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_add_target))
3507aef9ec39SRoland Dreier 		goto err_class;
3508ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3509aef9ec39SRoland Dreier 		goto err_class;
3510ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_port))
3511aef9ec39SRoland Dreier 		goto err_class;
3512aef9ec39SRoland Dreier 
3513aef9ec39SRoland Dreier 	return host;
3514aef9ec39SRoland Dreier 
3515aef9ec39SRoland Dreier err_class:
3516ee959b00STony Jones 	device_unregister(&host->dev);
3517aef9ec39SRoland Dreier 
3518f5358a17SRoland Dreier free_host:
3519aef9ec39SRoland Dreier 	kfree(host);
3520aef9ec39SRoland Dreier 
3521aef9ec39SRoland Dreier 	return NULL;
3522aef9ec39SRoland Dreier }
3523aef9ec39SRoland Dreier 
3524aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device)
3525aef9ec39SRoland Dreier {
3526f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3527aef9ec39SRoland Dreier 	struct srp_host *host;
35284139032bSHal Rosenstock 	int mr_page_shift, p;
352952ede08fSBart Van Assche 	u64 max_pages_per_mr;
35305f071777SChristoph Hellwig 	unsigned int flags = 0;
3531aef9ec39SRoland Dreier 
3532249f0656SBart Van Assche 	srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3533f5358a17SRoland Dreier 	if (!srp_dev)
35344a061b28SOr Gerlitz 		return;
3535f5358a17SRoland Dreier 
3536f5358a17SRoland Dreier 	/*
3537f5358a17SRoland Dreier 	 * Use the smallest page size supported by the HCA, down to a
35388f26c9ffSDavid Dillow 	 * minimum of 4096 bytes. We're unlikely to build large sglists
35398f26c9ffSDavid Dillow 	 * out of smaller entries.
3540f5358a17SRoland Dreier 	 */
35414a061b28SOr Gerlitz 	mr_page_shift		= max(12, ffs(device->attrs.page_size_cap) - 1);
354252ede08fSBart Van Assche 	srp_dev->mr_page_size	= 1 << mr_page_shift;
354352ede08fSBart Van Assche 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
35444a061b28SOr Gerlitz 	max_pages_per_mr	= device->attrs.max_mr_size;
354552ede08fSBart Van Assche 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
3546509c5f33SBart Van Assche 	pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3547509c5f33SBart Van Assche 		 device->attrs.max_mr_size, srp_dev->mr_page_size,
3548509c5f33SBart Van Assche 		 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
354952ede08fSBart Van Assche 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
355052ede08fSBart Van Assche 					  max_pages_per_mr);
3551835ee624SBart Van Assche 
3552835ee624SBart Van Assche 	srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3553835ee624SBart Van Assche 			    device->map_phys_fmr && device->unmap_fmr);
3554835ee624SBart Van Assche 	srp_dev->has_fr = (device->attrs.device_cap_flags &
3555835ee624SBart Van Assche 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
3556c222a39fSBart Van Assche 	if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
3557835ee624SBart Van Assche 		dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3558c222a39fSBart Van Assche 	} else if (!never_register &&
3559c222a39fSBart Van Assche 		   device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
3560835ee624SBart Van Assche 		srp_dev->use_fast_reg = (srp_dev->has_fr &&
3561835ee624SBart Van Assche 					 (!srp_dev->has_fmr || prefer_fr));
3562835ee624SBart Van Assche 		srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3563509c5f33SBart Van Assche 	}
3564835ee624SBart Van Assche 
35655f071777SChristoph Hellwig 	if (never_register || !register_always ||
35665f071777SChristoph Hellwig 	    (!srp_dev->has_fmr && !srp_dev->has_fr))
35675f071777SChristoph Hellwig 		flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
35685f071777SChristoph Hellwig 
35695cfb1782SBart Van Assche 	if (srp_dev->use_fast_reg) {
35705cfb1782SBart Van Assche 		srp_dev->max_pages_per_mr =
35715cfb1782SBart Van Assche 			min_t(u32, srp_dev->max_pages_per_mr,
35724a061b28SOr Gerlitz 			      device->attrs.max_fast_reg_page_list_len);
35735cfb1782SBart Van Assche 	}
357452ede08fSBart Van Assche 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
357552ede08fSBart Van Assche 				   srp_dev->max_pages_per_mr;
35764a061b28SOr Gerlitz 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
35774a061b28SOr Gerlitz 		 device->name, mr_page_shift, device->attrs.max_mr_size,
35784a061b28SOr Gerlitz 		 device->attrs.max_fast_reg_page_list_len,
357952ede08fSBart Van Assche 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3580f5358a17SRoland Dreier 
3581f5358a17SRoland Dreier 	INIT_LIST_HEAD(&srp_dev->dev_list);
3582f5358a17SRoland Dreier 
3583f5358a17SRoland Dreier 	srp_dev->dev = device;
35845f071777SChristoph Hellwig 	srp_dev->pd  = ib_alloc_pd(device, flags);
3585f5358a17SRoland Dreier 	if (IS_ERR(srp_dev->pd))
3586f5358a17SRoland Dreier 		goto free_dev;
3587f5358a17SRoland Dreier 
3588f5358a17SRoland Dreier 
35894139032bSHal Rosenstock 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3590f5358a17SRoland Dreier 		host = srp_add_port(srp_dev, p);
3591aef9ec39SRoland Dreier 		if (host)
3592f5358a17SRoland Dreier 			list_add_tail(&host->list, &srp_dev->dev_list);
3593aef9ec39SRoland Dreier 	}
3594aef9ec39SRoland Dreier 
3595f5358a17SRoland Dreier 	ib_set_client_data(device, &srp_client, srp_dev);
35964a061b28SOr Gerlitz 	return;
3597f5358a17SRoland Dreier 
3598f5358a17SRoland Dreier free_dev:
3599f5358a17SRoland Dreier 	kfree(srp_dev);
3600aef9ec39SRoland Dreier }
3601aef9ec39SRoland Dreier 
36027c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data)
3603aef9ec39SRoland Dreier {
3604f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3605aef9ec39SRoland Dreier 	struct srp_host *host, *tmp_host;
3606ef6c49d8SBart Van Assche 	struct srp_target_port *target;
3607aef9ec39SRoland Dreier 
36087c1eb45aSHaggai Eran 	srp_dev = client_data;
36091fe0cb84SDotan Barak 	if (!srp_dev)
36101fe0cb84SDotan Barak 		return;
3611aef9ec39SRoland Dreier 
3612f5358a17SRoland Dreier 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3613ee959b00STony Jones 		device_unregister(&host->dev);
3614aef9ec39SRoland Dreier 		/*
3615aef9ec39SRoland Dreier 		 * Wait for the sysfs entry to go away, so that no new
3616aef9ec39SRoland Dreier 		 * target ports can be created.
3617aef9ec39SRoland Dreier 		 */
3618aef9ec39SRoland Dreier 		wait_for_completion(&host->released);
3619aef9ec39SRoland Dreier 
3620aef9ec39SRoland Dreier 		/*
3621ef6c49d8SBart Van Assche 		 * Remove all target ports.
3622aef9ec39SRoland Dreier 		 */
3623b3589fd4SMatthew Wilcox 		spin_lock(&host->target_lock);
3624ef6c49d8SBart Van Assche 		list_for_each_entry(target, &host->target_list, list)
3625ef6c49d8SBart Van Assche 			srp_queue_remove_work(target);
3626b3589fd4SMatthew Wilcox 		spin_unlock(&host->target_lock);
3627aef9ec39SRoland Dreier 
3628aef9ec39SRoland Dreier 		/*
3629bcc05910SBart Van Assche 		 * Wait for tl_err and target port removal tasks.
3630aef9ec39SRoland Dreier 		 */
3631ef6c49d8SBart Van Assche 		flush_workqueue(system_long_wq);
3632bcc05910SBart Van Assche 		flush_workqueue(srp_remove_wq);
3633aef9ec39SRoland Dreier 
3634aef9ec39SRoland Dreier 		kfree(host);
3635aef9ec39SRoland Dreier 	}
3636aef9ec39SRoland Dreier 
3637f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3638f5358a17SRoland Dreier 
3639f5358a17SRoland Dreier 	kfree(srp_dev);
3640aef9ec39SRoland Dreier }
3641aef9ec39SRoland Dreier 
36423236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = {
3643ed9b2264SBart Van Assche 	.has_rport_state	 = true,
3644ed9b2264SBart Van Assche 	.reset_timer_if_blocked	 = true,
3645a95cadb9SBart Van Assche 	.reconnect_delay	 = &srp_reconnect_delay,
3646ed9b2264SBart Van Assche 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
3647ed9b2264SBart Van Assche 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
3648ed9b2264SBart Van Assche 	.reconnect		 = srp_rport_reconnect,
3649dc1bdbd9SBart Van Assche 	.rport_delete		 = srp_rport_delete,
3650ed9b2264SBart Van Assche 	.terminate_rport_io	 = srp_terminate_io,
36513236822bSFUJITA Tomonori };
36523236822bSFUJITA Tomonori 
3653aef9ec39SRoland Dreier static int __init srp_init_module(void)
3654aef9ec39SRoland Dreier {
3655aef9ec39SRoland Dreier 	int ret;
3656aef9ec39SRoland Dreier 
365749248644SDavid Dillow 	if (srp_sg_tablesize) {
3658e0bda7d8SBart Van Assche 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
365949248644SDavid Dillow 		if (!cmd_sg_entries)
366049248644SDavid Dillow 			cmd_sg_entries = srp_sg_tablesize;
366149248644SDavid Dillow 	}
366249248644SDavid Dillow 
366349248644SDavid Dillow 	if (!cmd_sg_entries)
366449248644SDavid Dillow 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
366549248644SDavid Dillow 
366649248644SDavid Dillow 	if (cmd_sg_entries > 255) {
3667e0bda7d8SBart Van Assche 		pr_warn("Clamping cmd_sg_entries to 255\n");
366849248644SDavid Dillow 		cmd_sg_entries = 255;
36691e89a194SDavid Dillow 	}
36701e89a194SDavid Dillow 
3671c07d424dSDavid Dillow 	if (!indirect_sg_entries)
3672c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3673c07d424dSDavid Dillow 	else if (indirect_sg_entries < cmd_sg_entries) {
3674e0bda7d8SBart Van Assche 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3675e0bda7d8SBart Van Assche 			cmd_sg_entries);
3676c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3677c07d424dSDavid Dillow 	}
3678c07d424dSDavid Dillow 
3679bcc05910SBart Van Assche 	srp_remove_wq = create_workqueue("srp_remove");
3680da05be29SWei Yongjun 	if (!srp_remove_wq) {
3681da05be29SWei Yongjun 		ret = -ENOMEM;
3682bcc05910SBart Van Assche 		goto out;
3683bcc05910SBart Van Assche 	}
3684bcc05910SBart Van Assche 
3685bcc05910SBart Van Assche 	ret = -ENOMEM;
36863236822bSFUJITA Tomonori 	ib_srp_transport_template =
36873236822bSFUJITA Tomonori 		srp_attach_transport(&ib_srp_transport_functions);
36883236822bSFUJITA Tomonori 	if (!ib_srp_transport_template)
3689bcc05910SBart Van Assche 		goto destroy_wq;
36903236822bSFUJITA Tomonori 
3691aef9ec39SRoland Dreier 	ret = class_register(&srp_class);
3692aef9ec39SRoland Dreier 	if (ret) {
3693e0bda7d8SBart Van Assche 		pr_err("couldn't register class infiniband_srp\n");
3694bcc05910SBart Van Assche 		goto release_tr;
3695aef9ec39SRoland Dreier 	}
3696aef9ec39SRoland Dreier 
3697c1a0b23bSMichael S. Tsirkin 	ib_sa_register_client(&srp_sa_client);
3698c1a0b23bSMichael S. Tsirkin 
3699aef9ec39SRoland Dreier 	ret = ib_register_client(&srp_client);
3700aef9ec39SRoland Dreier 	if (ret) {
3701e0bda7d8SBart Van Assche 		pr_err("couldn't register IB client\n");
3702bcc05910SBart Van Assche 		goto unreg_sa;
3703aef9ec39SRoland Dreier 	}
3704aef9ec39SRoland Dreier 
3705bcc05910SBart Van Assche out:
3706bcc05910SBart Van Assche 	return ret;
3707bcc05910SBart Van Assche 
3708bcc05910SBart Van Assche unreg_sa:
3709bcc05910SBart Van Assche 	ib_sa_unregister_client(&srp_sa_client);
3710bcc05910SBart Van Assche 	class_unregister(&srp_class);
3711bcc05910SBart Van Assche 
3712bcc05910SBart Van Assche release_tr:
3713bcc05910SBart Van Assche 	srp_release_transport(ib_srp_transport_template);
3714bcc05910SBart Van Assche 
3715bcc05910SBart Van Assche destroy_wq:
3716bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3717bcc05910SBart Van Assche 	goto out;
3718aef9ec39SRoland Dreier }
3719aef9ec39SRoland Dreier 
3720aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void)
3721aef9ec39SRoland Dreier {
3722aef9ec39SRoland Dreier 	ib_unregister_client(&srp_client);
3723c1a0b23bSMichael S. Tsirkin 	ib_sa_unregister_client(&srp_sa_client);
3724aef9ec39SRoland Dreier 	class_unregister(&srp_class);
37253236822bSFUJITA Tomonori 	srp_release_transport(ib_srp_transport_template);
3726bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3727aef9ec39SRoland Dreier }
3728aef9ec39SRoland Dreier 
3729aef9ec39SRoland Dreier module_init(srp_init_module);
3730aef9ec39SRoland Dreier module_exit(srp_cleanup_module);
3731