xref: /linux/drivers/infiniband/ulp/srp/ib_srp.c (revision 4139032b4860c06ff3a7687041f06535fed901ed)
1aef9ec39SRoland Dreier /*
2aef9ec39SRoland Dreier  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3aef9ec39SRoland Dreier  *
4aef9ec39SRoland Dreier  * This software is available to you under a choice of one of two
5aef9ec39SRoland Dreier  * licenses.  You may choose to be licensed under the terms of the GNU
6aef9ec39SRoland Dreier  * General Public License (GPL) Version 2, available from the file
7aef9ec39SRoland Dreier  * COPYING in the main directory of this source tree, or the
8aef9ec39SRoland Dreier  * OpenIB.org BSD license below:
9aef9ec39SRoland Dreier  *
10aef9ec39SRoland Dreier  *     Redistribution and use in source and binary forms, with or
11aef9ec39SRoland Dreier  *     without modification, are permitted provided that the following
12aef9ec39SRoland Dreier  *     conditions are met:
13aef9ec39SRoland Dreier  *
14aef9ec39SRoland Dreier  *      - Redistributions of source code must retain the above
15aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
16aef9ec39SRoland Dreier  *        disclaimer.
17aef9ec39SRoland Dreier  *
18aef9ec39SRoland Dreier  *      - Redistributions in binary form must reproduce the above
19aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
20aef9ec39SRoland Dreier  *        disclaimer in the documentation and/or other materials
21aef9ec39SRoland Dreier  *        provided with the distribution.
22aef9ec39SRoland Dreier  *
23aef9ec39SRoland Dreier  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24aef9ec39SRoland Dreier  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25aef9ec39SRoland Dreier  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26aef9ec39SRoland Dreier  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27aef9ec39SRoland Dreier  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28aef9ec39SRoland Dreier  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29aef9ec39SRoland Dreier  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30aef9ec39SRoland Dreier  * SOFTWARE.
31aef9ec39SRoland Dreier  */
32aef9ec39SRoland Dreier 
33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34e0bda7d8SBart Van Assche 
35aef9ec39SRoland Dreier #include <linux/module.h>
36aef9ec39SRoland Dreier #include <linux/init.h>
37aef9ec39SRoland Dreier #include <linux/slab.h>
38aef9ec39SRoland Dreier #include <linux/err.h>
39aef9ec39SRoland Dreier #include <linux/string.h>
40aef9ec39SRoland Dreier #include <linux/parser.h>
41aef9ec39SRoland Dreier #include <linux/random.h>
42de25968cSTim Schmielau #include <linux/jiffies.h>
4356b5390cSBart Van Assche #include <rdma/ib_cache.h>
44aef9ec39SRoland Dreier 
4560063497SArun Sharma #include <linux/atomic.h>
46aef9ec39SRoland Dreier 
47aef9ec39SRoland Dreier #include <scsi/scsi.h>
48aef9ec39SRoland Dreier #include <scsi/scsi_device.h>
49aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h>
5071444b97SJack Wang #include <scsi/scsi_tcq.h>
51aef9ec39SRoland Dreier #include <scsi/srp.h>
523236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h>
53aef9ec39SRoland Dreier 
54aef9ec39SRoland Dreier #include "ib_srp.h"
55aef9ec39SRoland Dreier 
56aef9ec39SRoland Dreier #define DRV_NAME	"ib_srp"
57aef9ec39SRoland Dreier #define PFX		DRV_NAME ": "
58e8ca4135SVu Pham #define DRV_VERSION	"1.0"
59e8ca4135SVu Pham #define DRV_RELDATE	"July 1, 2013"
60aef9ec39SRoland Dreier 
61aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier");
6233ab3e5bSBart Van Assche MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL");
6433ab3e5bSBart Van Assche MODULE_VERSION(DRV_VERSION);
6533ab3e5bSBart Van Assche MODULE_INFO(release_date, DRV_RELDATE);
66aef9ec39SRoland Dreier 
6749248644SDavid Dillow static unsigned int srp_sg_tablesize;
6849248644SDavid Dillow static unsigned int cmd_sg_entries;
69c07d424dSDavid Dillow static unsigned int indirect_sg_entries;
70c07d424dSDavid Dillow static bool allow_ext_sg;
715cfb1782SBart Van Assche static bool prefer_fr;
72b1b8854dSBart Van Assche static bool register_always;
73aef9ec39SRoland Dreier static int topspin_workarounds = 1;
74aef9ec39SRoland Dreier 
7549248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444);
7649248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
7749248644SDavid Dillow 
7849248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444);
7949248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries,
8049248644SDavid Dillow 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
8149248644SDavid Dillow 
82c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444);
83c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries,
84c07d424dSDavid Dillow 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85c07d424dSDavid Dillow 
86c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444);
87c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg,
88c07d424dSDavid Dillow 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89c07d424dSDavid Dillow 
90aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444);
91aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds,
92aef9ec39SRoland Dreier 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93aef9ec39SRoland Dreier 
945cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444);
955cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr,
965cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported");
975cfb1782SBart Van Assche 
98b1b8854dSBart Van Assche module_param(register_always, bool, 0444);
99b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always,
100b1b8854dSBart Van Assche 		 "Use memory registration even for contiguous memory regions");
101b1b8854dSBart Van Assche 
1029c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops;
103ed9b2264SBart Van Assche 
104a95cadb9SBart Van Assche static int srp_reconnect_delay = 10;
105a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106a95cadb9SBart Van Assche 		S_IRUGO | S_IWUSR);
107a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108a95cadb9SBart Van Assche 
109ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15;
110ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
112ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo,
113ed9b2264SBart Van Assche 		 "Number of seconds between the observation of a transport"
114ed9b2264SBart Van Assche 		 " layer error and failing all I/O. \"off\" means that this"
115ed9b2264SBart Van Assche 		 " functionality is disabled.");
116ed9b2264SBart Van Assche 
117a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600;
118ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
120ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo,
121ed9b2264SBart Van Assche 		 "Maximum number of seconds that the SRP transport should"
122ed9b2264SBart Van Assche 		 " insulate transport layer errors. After this time has been"
123ed9b2264SBart Van Assche 		 " exceeded the SCSI host is removed. Should be"
124ed9b2264SBart Van Assche 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125ed9b2264SBart Van Assche 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
126ed9b2264SBart Van Assche 		 " this functionality is disabled.");
127ed9b2264SBart Van Assche 
128d92c0da7SBart Van Assche static unsigned ch_count;
129d92c0da7SBart Van Assche module_param(ch_count, uint, 0444);
130d92c0da7SBart Van Assche MODULE_PARM_DESC(ch_count,
131d92c0da7SBart Van Assche 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132d92c0da7SBart Van Assche 
133aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device);
134aef9ec39SRoland Dreier static void srp_remove_one(struct ib_device *device);
135509c07bcSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136509c07bcSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
137aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138aef9ec39SRoland Dreier 
1393236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template;
140bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq;
1413236822bSFUJITA Tomonori 
142aef9ec39SRoland Dreier static struct ib_client srp_client = {
143aef9ec39SRoland Dreier 	.name   = "srp",
144aef9ec39SRoland Dreier 	.add    = srp_add_one,
145aef9ec39SRoland Dreier 	.remove = srp_remove_one
146aef9ec39SRoland Dreier };
147aef9ec39SRoland Dreier 
148c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client;
149c1a0b23bSMichael S. Tsirkin 
150ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151ed9b2264SBart Van Assche {
152ed9b2264SBart Van Assche 	int tmo = *(int *)kp->arg;
153ed9b2264SBart Van Assche 
154ed9b2264SBart Van Assche 	if (tmo >= 0)
155ed9b2264SBart Van Assche 		return sprintf(buffer, "%d", tmo);
156ed9b2264SBart Van Assche 	else
157ed9b2264SBart Van Assche 		return sprintf(buffer, "off");
158ed9b2264SBart Van Assche }
159ed9b2264SBart Van Assche 
160ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161ed9b2264SBart Van Assche {
162ed9b2264SBart Van Assche 	int tmo, res;
163ed9b2264SBart Van Assche 
164ed9b2264SBart Van Assche 	if (strncmp(val, "off", 3) != 0) {
165ed9b2264SBart Van Assche 		res = kstrtoint(val, 0, &tmo);
166ed9b2264SBart Van Assche 		if (res)
167ed9b2264SBart Van Assche 			goto out;
168ed9b2264SBart Van Assche 	} else {
169ed9b2264SBart Van Assche 		tmo = -1;
170ed9b2264SBart Van Assche 	}
171a95cadb9SBart Van Assche 	if (kp->arg == &srp_reconnect_delay)
172a95cadb9SBart Van Assche 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
173a95cadb9SBart Van Assche 				    srp_dev_loss_tmo);
174a95cadb9SBart Van Assche 	else if (kp->arg == &srp_fast_io_fail_tmo)
175a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
176ed9b2264SBart Van Assche 	else
177a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
178a95cadb9SBart Van Assche 				    tmo);
179ed9b2264SBart Van Assche 	if (res)
180ed9b2264SBart Van Assche 		goto out;
181ed9b2264SBart Van Assche 	*(int *)kp->arg = tmo;
182ed9b2264SBart Van Assche 
183ed9b2264SBart Van Assche out:
184ed9b2264SBart Van Assche 	return res;
185ed9b2264SBart Van Assche }
186ed9b2264SBart Van Assche 
1879c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops = {
188ed9b2264SBart Van Assche 	.get = srp_tmo_get,
189ed9b2264SBart Van Assche 	.set = srp_tmo_set,
190ed9b2264SBart Van Assche };
191ed9b2264SBart Van Assche 
192aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
193aef9ec39SRoland Dreier {
194aef9ec39SRoland Dreier 	return (struct srp_target_port *) host->hostdata;
195aef9ec39SRoland Dreier }
196aef9ec39SRoland Dreier 
197aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host)
198aef9ec39SRoland Dreier {
199aef9ec39SRoland Dreier 	return host_to_target(host)->target_name;
200aef9ec39SRoland Dreier }
201aef9ec39SRoland Dreier 
2025d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target)
2035d7cbfd6SRoland Dreier {
2045d7cbfd6SRoland Dreier 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
2053d1ff48dSRaghava Kondapalli 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
2065d7cbfd6SRoland Dreier 
2075d7cbfd6SRoland Dreier 	return topspin_workarounds &&
2083d1ff48dSRaghava Kondapalli 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
2093d1ff48dSRaghava Kondapalli 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
2105d7cbfd6SRoland Dreier }
2115d7cbfd6SRoland Dreier 
212aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
213aef9ec39SRoland Dreier 				   gfp_t gfp_mask,
214aef9ec39SRoland Dreier 				   enum dma_data_direction direction)
215aef9ec39SRoland Dreier {
216aef9ec39SRoland Dreier 	struct srp_iu *iu;
217aef9ec39SRoland Dreier 
218aef9ec39SRoland Dreier 	iu = kmalloc(sizeof *iu, gfp_mask);
219aef9ec39SRoland Dreier 	if (!iu)
220aef9ec39SRoland Dreier 		goto out;
221aef9ec39SRoland Dreier 
222aef9ec39SRoland Dreier 	iu->buf = kzalloc(size, gfp_mask);
223aef9ec39SRoland Dreier 	if (!iu->buf)
224aef9ec39SRoland Dreier 		goto out_free_iu;
225aef9ec39SRoland Dreier 
22605321937SGreg Kroah-Hartman 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
22705321937SGreg Kroah-Hartman 				    direction);
22805321937SGreg Kroah-Hartman 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
229aef9ec39SRoland Dreier 		goto out_free_buf;
230aef9ec39SRoland Dreier 
231aef9ec39SRoland Dreier 	iu->size      = size;
232aef9ec39SRoland Dreier 	iu->direction = direction;
233aef9ec39SRoland Dreier 
234aef9ec39SRoland Dreier 	return iu;
235aef9ec39SRoland Dreier 
236aef9ec39SRoland Dreier out_free_buf:
237aef9ec39SRoland Dreier 	kfree(iu->buf);
238aef9ec39SRoland Dreier out_free_iu:
239aef9ec39SRoland Dreier 	kfree(iu);
240aef9ec39SRoland Dreier out:
241aef9ec39SRoland Dreier 	return NULL;
242aef9ec39SRoland Dreier }
243aef9ec39SRoland Dreier 
244aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
245aef9ec39SRoland Dreier {
246aef9ec39SRoland Dreier 	if (!iu)
247aef9ec39SRoland Dreier 		return;
248aef9ec39SRoland Dreier 
24905321937SGreg Kroah-Hartman 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
25005321937SGreg Kroah-Hartman 			    iu->direction);
251aef9ec39SRoland Dreier 	kfree(iu->buf);
252aef9ec39SRoland Dreier 	kfree(iu);
253aef9ec39SRoland Dreier }
254aef9ec39SRoland Dreier 
255aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context)
256aef9ec39SRoland Dreier {
25757363d98SSagi Grimberg 	pr_debug("QP event %s (%d)\n",
25857363d98SSagi Grimberg 		 ib_event_msg(event->event), event->event);
259aef9ec39SRoland Dreier }
260aef9ec39SRoland Dreier 
261aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target,
262aef9ec39SRoland Dreier 		       struct ib_qp *qp)
263aef9ec39SRoland Dreier {
264aef9ec39SRoland Dreier 	struct ib_qp_attr *attr;
265aef9ec39SRoland Dreier 	int ret;
266aef9ec39SRoland Dreier 
267aef9ec39SRoland Dreier 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
268aef9ec39SRoland Dreier 	if (!attr)
269aef9ec39SRoland Dreier 		return -ENOMEM;
270aef9ec39SRoland Dreier 
27156b5390cSBart Van Assche 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
272aef9ec39SRoland Dreier 				  target->srp_host->port,
273747fe000SBart Van Assche 				  be16_to_cpu(target->pkey),
274aef9ec39SRoland Dreier 				  &attr->pkey_index);
275aef9ec39SRoland Dreier 	if (ret)
276aef9ec39SRoland Dreier 		goto out;
277aef9ec39SRoland Dreier 
278aef9ec39SRoland Dreier 	attr->qp_state        = IB_QPS_INIT;
279aef9ec39SRoland Dreier 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
280aef9ec39SRoland Dreier 				    IB_ACCESS_REMOTE_WRITE);
281aef9ec39SRoland Dreier 	attr->port_num        = target->srp_host->port;
282aef9ec39SRoland Dreier 
283aef9ec39SRoland Dreier 	ret = ib_modify_qp(qp, attr,
284aef9ec39SRoland Dreier 			   IB_QP_STATE		|
285aef9ec39SRoland Dreier 			   IB_QP_PKEY_INDEX	|
286aef9ec39SRoland Dreier 			   IB_QP_ACCESS_FLAGS	|
287aef9ec39SRoland Dreier 			   IB_QP_PORT);
288aef9ec39SRoland Dreier 
289aef9ec39SRoland Dreier out:
290aef9ec39SRoland Dreier 	kfree(attr);
291aef9ec39SRoland Dreier 	return ret;
292aef9ec39SRoland Dreier }
293aef9ec39SRoland Dreier 
294509c07bcSBart Van Assche static int srp_new_cm_id(struct srp_rdma_ch *ch)
2959fe4bcf4SDavid Dillow {
296509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2979fe4bcf4SDavid Dillow 	struct ib_cm_id *new_cm_id;
2989fe4bcf4SDavid Dillow 
29905321937SGreg Kroah-Hartman 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
300509c07bcSBart Van Assche 				    srp_cm_handler, ch);
3019fe4bcf4SDavid Dillow 	if (IS_ERR(new_cm_id))
3029fe4bcf4SDavid Dillow 		return PTR_ERR(new_cm_id);
3039fe4bcf4SDavid Dillow 
304509c07bcSBart Van Assche 	if (ch->cm_id)
305509c07bcSBart Van Assche 		ib_destroy_cm_id(ch->cm_id);
306509c07bcSBart Van Assche 	ch->cm_id = new_cm_id;
307509c07bcSBart Van Assche 	ch->path.sgid = target->sgid;
308509c07bcSBart Van Assche 	ch->path.dgid = target->orig_dgid;
309509c07bcSBart Van Assche 	ch->path.pkey = target->pkey;
310509c07bcSBart Van Assche 	ch->path.service_id = target->service_id;
3119fe4bcf4SDavid Dillow 
3129fe4bcf4SDavid Dillow 	return 0;
3139fe4bcf4SDavid Dillow }
3149fe4bcf4SDavid Dillow 
315d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
316d1b4289eSBart Van Assche {
317d1b4289eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
318d1b4289eSBart Van Assche 	struct ib_fmr_pool_param fmr_param;
319d1b4289eSBart Van Assche 
320d1b4289eSBart Van Assche 	memset(&fmr_param, 0, sizeof(fmr_param));
321d1b4289eSBart Van Assche 	fmr_param.pool_size	    = target->scsi_host->can_queue;
322d1b4289eSBart Van Assche 	fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
323d1b4289eSBart Van Assche 	fmr_param.cache		    = 1;
32452ede08fSBart Van Assche 	fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
32552ede08fSBart Van Assche 	fmr_param.page_shift	    = ilog2(dev->mr_page_size);
326d1b4289eSBart Van Assche 	fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
327d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_WRITE |
328d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_READ);
329d1b4289eSBart Van Assche 
330d1b4289eSBart Van Assche 	return ib_create_fmr_pool(dev->pd, &fmr_param);
331d1b4289eSBart Van Assche }
332d1b4289eSBart Van Assche 
3335cfb1782SBart Van Assche /**
3345cfb1782SBart Van Assche  * srp_destroy_fr_pool() - free the resources owned by a pool
3355cfb1782SBart Van Assche  * @pool: Fast registration pool to be destroyed.
3365cfb1782SBart Van Assche  */
3375cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
3385cfb1782SBart Van Assche {
3395cfb1782SBart Van Assche 	int i;
3405cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3415cfb1782SBart Van Assche 
3425cfb1782SBart Van Assche 	if (!pool)
3435cfb1782SBart Van Assche 		return;
3445cfb1782SBart Van Assche 
3455cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
3465cfb1782SBart Van Assche 		if (d->frpl)
3475cfb1782SBart Van Assche 			ib_free_fast_reg_page_list(d->frpl);
3485cfb1782SBart Van Assche 		if (d->mr)
3495cfb1782SBart Van Assche 			ib_dereg_mr(d->mr);
3505cfb1782SBart Van Assche 	}
3515cfb1782SBart Van Assche 	kfree(pool);
3525cfb1782SBart Van Assche }
3535cfb1782SBart Van Assche 
3545cfb1782SBart Van Assche /**
3555cfb1782SBart Van Assche  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
3565cfb1782SBart Van Assche  * @device:            IB device to allocate fast registration descriptors for.
3575cfb1782SBart Van Assche  * @pd:                Protection domain associated with the FR descriptors.
3585cfb1782SBart Van Assche  * @pool_size:         Number of descriptors to allocate.
3595cfb1782SBart Van Assche  * @max_page_list_len: Maximum fast registration work request page list length.
3605cfb1782SBart Van Assche  */
3615cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
3625cfb1782SBart Van Assche 					      struct ib_pd *pd, int pool_size,
3635cfb1782SBart Van Assche 					      int max_page_list_len)
3645cfb1782SBart Van Assche {
3655cfb1782SBart Van Assche 	struct srp_fr_pool *pool;
3665cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3675cfb1782SBart Van Assche 	struct ib_mr *mr;
3685cfb1782SBart Van Assche 	struct ib_fast_reg_page_list *frpl;
3695cfb1782SBart Van Assche 	int i, ret = -EINVAL;
3705cfb1782SBart Van Assche 
3715cfb1782SBart Van Assche 	if (pool_size <= 0)
3725cfb1782SBart Van Assche 		goto err;
3735cfb1782SBart Van Assche 	ret = -ENOMEM;
3745cfb1782SBart Van Assche 	pool = kzalloc(sizeof(struct srp_fr_pool) +
3755cfb1782SBart Van Assche 		       pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
3765cfb1782SBart Van Assche 	if (!pool)
3775cfb1782SBart Van Assche 		goto err;
3785cfb1782SBart Van Assche 	pool->size = pool_size;
3795cfb1782SBart Van Assche 	pool->max_page_list_len = max_page_list_len;
3805cfb1782SBart Van Assche 	spin_lock_init(&pool->lock);
3815cfb1782SBart Van Assche 	INIT_LIST_HEAD(&pool->free_list);
3825cfb1782SBart Van Assche 
3835cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
3845cfb1782SBart Van Assche 		mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
3855cfb1782SBart Van Assche 		if (IS_ERR(mr)) {
3865cfb1782SBart Van Assche 			ret = PTR_ERR(mr);
3875cfb1782SBart Van Assche 			goto destroy_pool;
3885cfb1782SBart Van Assche 		}
3895cfb1782SBart Van Assche 		d->mr = mr;
3905cfb1782SBart Van Assche 		frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
3915cfb1782SBart Van Assche 		if (IS_ERR(frpl)) {
3925cfb1782SBart Van Assche 			ret = PTR_ERR(frpl);
3935cfb1782SBart Van Assche 			goto destroy_pool;
3945cfb1782SBart Van Assche 		}
3955cfb1782SBart Van Assche 		d->frpl = frpl;
3965cfb1782SBart Van Assche 		list_add_tail(&d->entry, &pool->free_list);
3975cfb1782SBart Van Assche 	}
3985cfb1782SBart Van Assche 
3995cfb1782SBart Van Assche out:
4005cfb1782SBart Van Assche 	return pool;
4015cfb1782SBart Van Assche 
4025cfb1782SBart Van Assche destroy_pool:
4035cfb1782SBart Van Assche 	srp_destroy_fr_pool(pool);
4045cfb1782SBart Van Assche 
4055cfb1782SBart Van Assche err:
4065cfb1782SBart Van Assche 	pool = ERR_PTR(ret);
4075cfb1782SBart Van Assche 	goto out;
4085cfb1782SBart Van Assche }
4095cfb1782SBart Van Assche 
4105cfb1782SBart Van Assche /**
4115cfb1782SBart Van Assche  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
4125cfb1782SBart Van Assche  * @pool: Pool to obtain descriptor from.
4135cfb1782SBart Van Assche  */
4145cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
4155cfb1782SBart Van Assche {
4165cfb1782SBart Van Assche 	struct srp_fr_desc *d = NULL;
4175cfb1782SBart Van Assche 	unsigned long flags;
4185cfb1782SBart Van Assche 
4195cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4205cfb1782SBart Van Assche 	if (!list_empty(&pool->free_list)) {
4215cfb1782SBart Van Assche 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
4225cfb1782SBart Van Assche 		list_del(&d->entry);
4235cfb1782SBart Van Assche 	}
4245cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4255cfb1782SBart Van Assche 
4265cfb1782SBart Van Assche 	return d;
4275cfb1782SBart Van Assche }
4285cfb1782SBart Van Assche 
4295cfb1782SBart Van Assche /**
4305cfb1782SBart Van Assche  * srp_fr_pool_put() - put an FR descriptor back in the free list
4315cfb1782SBart Van Assche  * @pool: Pool the descriptor was allocated from.
4325cfb1782SBart Van Assche  * @desc: Pointer to an array of fast registration descriptor pointers.
4335cfb1782SBart Van Assche  * @n:    Number of descriptors to put back.
4345cfb1782SBart Van Assche  *
4355cfb1782SBart Van Assche  * Note: The caller must already have queued an invalidation request for
4365cfb1782SBart Van Assche  * desc->mr->rkey before calling this function.
4375cfb1782SBart Van Assche  */
4385cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
4395cfb1782SBart Van Assche 			    int n)
4405cfb1782SBart Van Assche {
4415cfb1782SBart Van Assche 	unsigned long flags;
4425cfb1782SBart Van Assche 	int i;
4435cfb1782SBart Van Assche 
4445cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4455cfb1782SBart Van Assche 	for (i = 0; i < n; i++)
4465cfb1782SBart Van Assche 		list_add(&desc[i]->entry, &pool->free_list);
4475cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4485cfb1782SBart Van Assche }
4495cfb1782SBart Van Assche 
4505cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
4515cfb1782SBart Van Assche {
4525cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
4535cfb1782SBart Van Assche 
4545cfb1782SBart Van Assche 	return srp_create_fr_pool(dev->dev, dev->pd,
4555cfb1782SBart Van Assche 				  target->scsi_host->can_queue,
4565cfb1782SBart Van Assche 				  dev->max_pages_per_mr);
4575cfb1782SBart Van Assche }
4585cfb1782SBart Van Assche 
4597dad6b2eSBart Van Assche /**
4607dad6b2eSBart Van Assche  * srp_destroy_qp() - destroy an RDMA queue pair
4617dad6b2eSBart Van Assche  * @ch: SRP RDMA channel.
4627dad6b2eSBart Van Assche  *
4637dad6b2eSBart Van Assche  * Change a queue pair into the error state and wait until all receive
4647dad6b2eSBart Van Assche  * completions have been processed before destroying it. This avoids that
4657dad6b2eSBart Van Assche  * the receive completion handler can access the queue pair while it is
4667dad6b2eSBart Van Assche  * being destroyed.
4677dad6b2eSBart Van Assche  */
4687dad6b2eSBart Van Assche static void srp_destroy_qp(struct srp_rdma_ch *ch)
4697dad6b2eSBart Van Assche {
4707dad6b2eSBart Van Assche 	static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
4717dad6b2eSBart Van Assche 	static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
4727dad6b2eSBart Van Assche 	struct ib_recv_wr *bad_wr;
4737dad6b2eSBart Van Assche 	int ret;
4747dad6b2eSBart Van Assche 
4757dad6b2eSBart Van Assche 	/* Destroying a QP and reusing ch->done is only safe if not connected */
476c014c8cdSBart Van Assche 	WARN_ON_ONCE(ch->connected);
4777dad6b2eSBart Van Assche 
4787dad6b2eSBart Van Assche 	ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
4797dad6b2eSBart Van Assche 	WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
4807dad6b2eSBart Van Assche 	if (ret)
4817dad6b2eSBart Van Assche 		goto out;
4827dad6b2eSBart Van Assche 
4837dad6b2eSBart Van Assche 	init_completion(&ch->done);
4847dad6b2eSBart Van Assche 	ret = ib_post_recv(ch->qp, &wr, &bad_wr);
4857dad6b2eSBart Van Assche 	WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
4867dad6b2eSBart Van Assche 	if (ret == 0)
4877dad6b2eSBart Van Assche 		wait_for_completion(&ch->done);
4887dad6b2eSBart Van Assche 
4897dad6b2eSBart Van Assche out:
4907dad6b2eSBart Van Assche 	ib_destroy_qp(ch->qp);
4917dad6b2eSBart Van Assche }
4927dad6b2eSBart Van Assche 
493509c07bcSBart Van Assche static int srp_create_ch_ib(struct srp_rdma_ch *ch)
494aef9ec39SRoland Dreier {
495509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
49662154b2eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
497aef9ec39SRoland Dreier 	struct ib_qp_init_attr *init_attr;
49873aa89edSIshai Rabinovitz 	struct ib_cq *recv_cq, *send_cq;
49973aa89edSIshai Rabinovitz 	struct ib_qp *qp;
500d1b4289eSBart Van Assche 	struct ib_fmr_pool *fmr_pool = NULL;
5015cfb1782SBart Van Assche 	struct srp_fr_pool *fr_pool = NULL;
5025cfb1782SBart Van Assche 	const int m = 1 + dev->use_fast_reg;
5038e37210bSMatan Barak 	struct ib_cq_init_attr cq_attr = {};
504aef9ec39SRoland Dreier 	int ret;
505aef9ec39SRoland Dreier 
506aef9ec39SRoland Dreier 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
507aef9ec39SRoland Dreier 	if (!init_attr)
508aef9ec39SRoland Dreier 		return -ENOMEM;
509aef9ec39SRoland Dreier 
5107dad6b2eSBart Van Assche 	/* + 1 for SRP_LAST_WR_ID */
5118e37210bSMatan Barak 	cq_attr.cqe = target->queue_size + 1;
5128e37210bSMatan Barak 	cq_attr.comp_vector = ch->comp_vector;
513509c07bcSBart Van Assche 	recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
5148e37210bSMatan Barak 			       &cq_attr);
51573aa89edSIshai Rabinovitz 	if (IS_ERR(recv_cq)) {
51673aa89edSIshai Rabinovitz 		ret = PTR_ERR(recv_cq);
517da9d2f07SRoland Dreier 		goto err;
518aef9ec39SRoland Dreier 	}
519aef9ec39SRoland Dreier 
5208e37210bSMatan Barak 	cq_attr.cqe = m * target->queue_size;
5218e37210bSMatan Barak 	cq_attr.comp_vector = ch->comp_vector;
522509c07bcSBart Van Assche 	send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
5238e37210bSMatan Barak 			       &cq_attr);
52473aa89edSIshai Rabinovitz 	if (IS_ERR(send_cq)) {
52573aa89edSIshai Rabinovitz 		ret = PTR_ERR(send_cq);
526da9d2f07SRoland Dreier 		goto err_recv_cq;
5279c03dc9fSBart Van Assche 	}
5289c03dc9fSBart Van Assche 
52973aa89edSIshai Rabinovitz 	ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
530aef9ec39SRoland Dreier 
531aef9ec39SRoland Dreier 	init_attr->event_handler       = srp_qp_event;
5325cfb1782SBart Van Assche 	init_attr->cap.max_send_wr     = m * target->queue_size;
5337dad6b2eSBart Van Assche 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
534aef9ec39SRoland Dreier 	init_attr->cap.max_recv_sge    = 1;
535aef9ec39SRoland Dreier 	init_attr->cap.max_send_sge    = 1;
5365cfb1782SBart Van Assche 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
537aef9ec39SRoland Dreier 	init_attr->qp_type             = IB_QPT_RC;
53873aa89edSIshai Rabinovitz 	init_attr->send_cq             = send_cq;
53973aa89edSIshai Rabinovitz 	init_attr->recv_cq             = recv_cq;
540aef9ec39SRoland Dreier 
54162154b2eSBart Van Assche 	qp = ib_create_qp(dev->pd, init_attr);
54273aa89edSIshai Rabinovitz 	if (IS_ERR(qp)) {
54373aa89edSIshai Rabinovitz 		ret = PTR_ERR(qp);
544da9d2f07SRoland Dreier 		goto err_send_cq;
545aef9ec39SRoland Dreier 	}
546aef9ec39SRoland Dreier 
54773aa89edSIshai Rabinovitz 	ret = srp_init_qp(target, qp);
548da9d2f07SRoland Dreier 	if (ret)
549da9d2f07SRoland Dreier 		goto err_qp;
550aef9ec39SRoland Dreier 
5515cfb1782SBart Van Assche 	if (dev->use_fast_reg && dev->has_fr) {
5525cfb1782SBart Van Assche 		fr_pool = srp_alloc_fr_pool(target);
5535cfb1782SBart Van Assche 		if (IS_ERR(fr_pool)) {
5545cfb1782SBart Van Assche 			ret = PTR_ERR(fr_pool);
5555cfb1782SBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
5565cfb1782SBart Van Assche 				     "FR pool allocation failed (%d)\n", ret);
5575cfb1782SBart Van Assche 			goto err_qp;
5585cfb1782SBart Van Assche 		}
559509c07bcSBart Van Assche 		if (ch->fr_pool)
560509c07bcSBart Van Assche 			srp_destroy_fr_pool(ch->fr_pool);
561509c07bcSBart Van Assche 		ch->fr_pool = fr_pool;
5625cfb1782SBart Van Assche 	} else if (!dev->use_fast_reg && dev->has_fmr) {
563d1b4289eSBart Van Assche 		fmr_pool = srp_alloc_fmr_pool(target);
564d1b4289eSBart Van Assche 		if (IS_ERR(fmr_pool)) {
565d1b4289eSBart Van Assche 			ret = PTR_ERR(fmr_pool);
566d1b4289eSBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
567d1b4289eSBart Van Assche 				     "FMR pool allocation failed (%d)\n", ret);
568d1b4289eSBart Van Assche 			goto err_qp;
569d1b4289eSBart Van Assche 		}
570509c07bcSBart Van Assche 		if (ch->fmr_pool)
571509c07bcSBart Van Assche 			ib_destroy_fmr_pool(ch->fmr_pool);
572509c07bcSBart Van Assche 		ch->fmr_pool = fmr_pool;
573d1b4289eSBart Van Assche 	}
574d1b4289eSBart Van Assche 
575509c07bcSBart Van Assche 	if (ch->qp)
5767dad6b2eSBart Van Assche 		srp_destroy_qp(ch);
577509c07bcSBart Van Assche 	if (ch->recv_cq)
578509c07bcSBart Van Assche 		ib_destroy_cq(ch->recv_cq);
579509c07bcSBart Van Assche 	if (ch->send_cq)
580509c07bcSBart Van Assche 		ib_destroy_cq(ch->send_cq);
58173aa89edSIshai Rabinovitz 
582509c07bcSBart Van Assche 	ch->qp = qp;
583509c07bcSBart Van Assche 	ch->recv_cq = recv_cq;
584509c07bcSBart Van Assche 	ch->send_cq = send_cq;
58573aa89edSIshai Rabinovitz 
586da9d2f07SRoland Dreier 	kfree(init_attr);
587da9d2f07SRoland Dreier 	return 0;
588da9d2f07SRoland Dreier 
589da9d2f07SRoland Dreier err_qp:
59073aa89edSIshai Rabinovitz 	ib_destroy_qp(qp);
591da9d2f07SRoland Dreier 
592da9d2f07SRoland Dreier err_send_cq:
59373aa89edSIshai Rabinovitz 	ib_destroy_cq(send_cq);
594da9d2f07SRoland Dreier 
595da9d2f07SRoland Dreier err_recv_cq:
59673aa89edSIshai Rabinovitz 	ib_destroy_cq(recv_cq);
597da9d2f07SRoland Dreier 
598da9d2f07SRoland Dreier err:
599aef9ec39SRoland Dreier 	kfree(init_attr);
600aef9ec39SRoland Dreier 	return ret;
601aef9ec39SRoland Dreier }
602aef9ec39SRoland Dreier 
6034d73f95fSBart Van Assche /*
6044d73f95fSBart Van Assche  * Note: this function may be called without srp_alloc_iu_bufs() having been
605509c07bcSBart Van Assche  * invoked. Hence the ch->[rt]x_ring checks.
6064d73f95fSBart Van Assche  */
607509c07bcSBart Van Assche static void srp_free_ch_ib(struct srp_target_port *target,
608509c07bcSBart Van Assche 			   struct srp_rdma_ch *ch)
609aef9ec39SRoland Dreier {
6105cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
611aef9ec39SRoland Dreier 	int i;
612aef9ec39SRoland Dreier 
613d92c0da7SBart Van Assche 	if (!ch->target)
614d92c0da7SBart Van Assche 		return;
615d92c0da7SBart Van Assche 
616509c07bcSBart Van Assche 	if (ch->cm_id) {
617509c07bcSBart Van Assche 		ib_destroy_cm_id(ch->cm_id);
618509c07bcSBart Van Assche 		ch->cm_id = NULL;
619394c595eSBart Van Assche 	}
620394c595eSBart Van Assche 
621d92c0da7SBart Van Assche 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
622d92c0da7SBart Van Assche 	if (!ch->qp)
623d92c0da7SBart Van Assche 		return;
624d92c0da7SBart Van Assche 
6255cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
626509c07bcSBart Van Assche 		if (ch->fr_pool)
627509c07bcSBart Van Assche 			srp_destroy_fr_pool(ch->fr_pool);
6285cfb1782SBart Van Assche 	} else {
629509c07bcSBart Van Assche 		if (ch->fmr_pool)
630509c07bcSBart Van Assche 			ib_destroy_fmr_pool(ch->fmr_pool);
6315cfb1782SBart Van Assche 	}
6327dad6b2eSBart Van Assche 	srp_destroy_qp(ch);
633509c07bcSBart Van Assche 	ib_destroy_cq(ch->send_cq);
634509c07bcSBart Van Assche 	ib_destroy_cq(ch->recv_cq);
635aef9ec39SRoland Dreier 
636d92c0da7SBart Van Assche 	/*
637d92c0da7SBart Van Assche 	 * Avoid that the SCSI error handler tries to use this channel after
638d92c0da7SBart Van Assche 	 * it has been freed. The SCSI error handler can namely continue
639d92c0da7SBart Van Assche 	 * trying to perform recovery actions after scsi_remove_host()
640d92c0da7SBart Van Assche 	 * returned.
641d92c0da7SBart Van Assche 	 */
642d92c0da7SBart Van Assche 	ch->target = NULL;
643d92c0da7SBart Van Assche 
644509c07bcSBart Van Assche 	ch->qp = NULL;
645509c07bcSBart Van Assche 	ch->send_cq = ch->recv_cq = NULL;
64673aa89edSIshai Rabinovitz 
647509c07bcSBart Van Assche 	if (ch->rx_ring) {
6484d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
649509c07bcSBart Van Assche 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
650509c07bcSBart Van Assche 		kfree(ch->rx_ring);
651509c07bcSBart Van Assche 		ch->rx_ring = NULL;
6524d73f95fSBart Van Assche 	}
653509c07bcSBart Van Assche 	if (ch->tx_ring) {
6544d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
655509c07bcSBart Van Assche 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
656509c07bcSBart Van Assche 		kfree(ch->tx_ring);
657509c07bcSBart Van Assche 		ch->tx_ring = NULL;
6584d73f95fSBart Van Assche 	}
659aef9ec39SRoland Dreier }
660aef9ec39SRoland Dreier 
661aef9ec39SRoland Dreier static void srp_path_rec_completion(int status,
662aef9ec39SRoland Dreier 				    struct ib_sa_path_rec *pathrec,
663509c07bcSBart Van Assche 				    void *ch_ptr)
664aef9ec39SRoland Dreier {
665509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = ch_ptr;
666509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
667aef9ec39SRoland Dreier 
668509c07bcSBart Van Assche 	ch->status = status;
669aef9ec39SRoland Dreier 	if (status)
6707aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
6717aa54bd7SDavid Dillow 			     PFX "Got failed path rec status %d\n", status);
672aef9ec39SRoland Dreier 	else
673509c07bcSBart Van Assche 		ch->path = *pathrec;
674509c07bcSBart Van Assche 	complete(&ch->done);
675aef9ec39SRoland Dreier }
676aef9ec39SRoland Dreier 
677509c07bcSBart Van Assche static int srp_lookup_path(struct srp_rdma_ch *ch)
678aef9ec39SRoland Dreier {
679509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
680a702adceSBart Van Assche 	int ret;
681a702adceSBart Van Assche 
682509c07bcSBart Van Assche 	ch->path.numb_path = 1;
683aef9ec39SRoland Dreier 
684509c07bcSBart Van Assche 	init_completion(&ch->done);
685aef9ec39SRoland Dreier 
686509c07bcSBart Van Assche 	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
68705321937SGreg Kroah-Hartman 					       target->srp_host->srp_dev->dev,
688aef9ec39SRoland Dreier 					       target->srp_host->port,
689509c07bcSBart Van Assche 					       &ch->path,
690247e020eSSean Hefty 					       IB_SA_PATH_REC_SERVICE_ID |
691aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_DGID	 |
692aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_SGID	 |
693aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_NUMB_PATH	 |
694aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_PKEY,
695aef9ec39SRoland Dreier 					       SRP_PATH_REC_TIMEOUT_MS,
696aef9ec39SRoland Dreier 					       GFP_KERNEL,
697aef9ec39SRoland Dreier 					       srp_path_rec_completion,
698509c07bcSBart Van Assche 					       ch, &ch->path_query);
699509c07bcSBart Van Assche 	if (ch->path_query_id < 0)
700509c07bcSBart Van Assche 		return ch->path_query_id;
701aef9ec39SRoland Dreier 
702509c07bcSBart Van Assche 	ret = wait_for_completion_interruptible(&ch->done);
703a702adceSBart Van Assche 	if (ret < 0)
704a702adceSBart Van Assche 		return ret;
705aef9ec39SRoland Dreier 
706509c07bcSBart Van Assche 	if (ch->status < 0)
7077aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
7087aa54bd7SDavid Dillow 			     PFX "Path record query failed\n");
709aef9ec39SRoland Dreier 
710509c07bcSBart Van Assche 	return ch->status;
711aef9ec39SRoland Dreier }
712aef9ec39SRoland Dreier 
713d92c0da7SBart Van Assche static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
714aef9ec39SRoland Dreier {
715509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
716aef9ec39SRoland Dreier 	struct {
717aef9ec39SRoland Dreier 		struct ib_cm_req_param param;
718aef9ec39SRoland Dreier 		struct srp_login_req   priv;
719aef9ec39SRoland Dreier 	} *req = NULL;
720aef9ec39SRoland Dreier 	int status;
721aef9ec39SRoland Dreier 
722aef9ec39SRoland Dreier 	req = kzalloc(sizeof *req, GFP_KERNEL);
723aef9ec39SRoland Dreier 	if (!req)
724aef9ec39SRoland Dreier 		return -ENOMEM;
725aef9ec39SRoland Dreier 
726509c07bcSBart Van Assche 	req->param.primary_path		      = &ch->path;
727aef9ec39SRoland Dreier 	req->param.alternate_path 	      = NULL;
728aef9ec39SRoland Dreier 	req->param.service_id 		      = target->service_id;
729509c07bcSBart Van Assche 	req->param.qp_num		      = ch->qp->qp_num;
730509c07bcSBart Van Assche 	req->param.qp_type		      = ch->qp->qp_type;
731aef9ec39SRoland Dreier 	req->param.private_data 	      = &req->priv;
732aef9ec39SRoland Dreier 	req->param.private_data_len 	      = sizeof req->priv;
733aef9ec39SRoland Dreier 	req->param.flow_control 	      = 1;
734aef9ec39SRoland Dreier 
735aef9ec39SRoland Dreier 	get_random_bytes(&req->param.starting_psn, 4);
736aef9ec39SRoland Dreier 	req->param.starting_psn 	     &= 0xffffff;
737aef9ec39SRoland Dreier 
738aef9ec39SRoland Dreier 	/*
739aef9ec39SRoland Dreier 	 * Pick some arbitrary defaults here; we could make these
740aef9ec39SRoland Dreier 	 * module parameters if anyone cared about setting them.
741aef9ec39SRoland Dreier 	 */
742aef9ec39SRoland Dreier 	req->param.responder_resources	      = 4;
743aef9ec39SRoland Dreier 	req->param.remote_cm_response_timeout = 20;
744aef9ec39SRoland Dreier 	req->param.local_cm_response_timeout  = 20;
7457bb312e4SVu Pham 	req->param.retry_count                = target->tl_retry_count;
746aef9ec39SRoland Dreier 	req->param.rnr_retry_count 	      = 7;
747aef9ec39SRoland Dreier 	req->param.max_cm_retries 	      = 15;
748aef9ec39SRoland Dreier 
749aef9ec39SRoland Dreier 	req->priv.opcode     	= SRP_LOGIN_REQ;
750aef9ec39SRoland Dreier 	req->priv.tag        	= 0;
75149248644SDavid Dillow 	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
752aef9ec39SRoland Dreier 	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
753aef9ec39SRoland Dreier 					      SRP_BUF_FORMAT_INDIRECT);
754d92c0da7SBart Van Assche 	req->priv.req_flags	= (multich ? SRP_MULTICHAN_MULTI :
755d92c0da7SBart Van Assche 				   SRP_MULTICHAN_SINGLE);
7560c0450dbSRamachandra K 	/*
7570c0450dbSRamachandra K 	 * In the published SRP specification (draft rev. 16a), the
7580c0450dbSRamachandra K 	 * port identifier format is 8 bytes of ID extension followed
7590c0450dbSRamachandra K 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
7600c0450dbSRamachandra K 	 * opposite order, so that the GUID comes first.
7610c0450dbSRamachandra K 	 *
7620c0450dbSRamachandra K 	 * Targets conforming to these obsolete drafts can be
7630c0450dbSRamachandra K 	 * recognized by the I/O Class they report.
7640c0450dbSRamachandra K 	 */
7650c0450dbSRamachandra K 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
7660c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
767747fe000SBart Van Assche 		       &target->sgid.global.interface_id, 8);
7680c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id + 8,
76901cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
7700c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
7710c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
7720c0450dbSRamachandra K 	} else {
7730c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
77401cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
77501cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
776747fe000SBart Van Assche 		       &target->sgid.global.interface_id, 8);
7770c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
7780c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
7790c0450dbSRamachandra K 	}
7800c0450dbSRamachandra K 
781aef9ec39SRoland Dreier 	/*
782aef9ec39SRoland Dreier 	 * Topspin/Cisco SRP targets will reject our login unless we
78301cb9bcbSIshai Rabinovitz 	 * zero out the first 8 bytes of our initiator port ID and set
78401cb9bcbSIshai Rabinovitz 	 * the second 8 bytes to the local node GUID.
785aef9ec39SRoland Dreier 	 */
7865d7cbfd6SRoland Dreier 	if (srp_target_is_topspin(target)) {
7877aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
7887aa54bd7SDavid Dillow 			     PFX "Topspin/Cisco initiator port ID workaround "
789aef9ec39SRoland Dreier 			     "activated for target GUID %016llx\n",
79045c37cadSBart Van Assche 			     be64_to_cpu(target->ioc_guid));
791aef9ec39SRoland Dreier 		memset(req->priv.initiator_port_id, 0, 8);
79201cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
79305321937SGreg Kroah-Hartman 		       &target->srp_host->srp_dev->dev->node_guid, 8);
794aef9ec39SRoland Dreier 	}
795aef9ec39SRoland Dreier 
796509c07bcSBart Van Assche 	status = ib_send_cm_req(ch->cm_id, &req->param);
797aef9ec39SRoland Dreier 
798aef9ec39SRoland Dreier 	kfree(req);
799aef9ec39SRoland Dreier 
800aef9ec39SRoland Dreier 	return status;
801aef9ec39SRoland Dreier }
802aef9ec39SRoland Dreier 
803ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target)
804ef6c49d8SBart Van Assche {
805ef6c49d8SBart Van Assche 	bool changed = false;
806ef6c49d8SBart Van Assche 
807ef6c49d8SBart Van Assche 	spin_lock_irq(&target->lock);
808ef6c49d8SBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
809ef6c49d8SBart Van Assche 		target->state = SRP_TARGET_REMOVED;
810ef6c49d8SBart Van Assche 		changed = true;
811ef6c49d8SBart Van Assche 	}
812ef6c49d8SBart Van Assche 	spin_unlock_irq(&target->lock);
813ef6c49d8SBart Van Assche 
814ef6c49d8SBart Van Assche 	if (changed)
815bcc05910SBart Van Assche 		queue_work(srp_remove_wq, &target->remove_work);
816ef6c49d8SBart Van Assche 
817ef6c49d8SBart Van Assche 	return changed;
818ef6c49d8SBart Van Assche }
819ef6c49d8SBart Van Assche 
820aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target)
821aef9ec39SRoland Dreier {
822d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
823d92c0da7SBart Van Assche 	int i;
824509c07bcSBart Van Assche 
825aef9ec39SRoland Dreier 	/* XXX should send SRP_I_LOGOUT request */
826aef9ec39SRoland Dreier 
827d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
828d92c0da7SBart Van Assche 		ch = &target->ch[i];
829c014c8cdSBart Van Assche 		ch->connected = false;
830d92c0da7SBart Van Assche 		if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
8317aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, target->scsi_host,
8327aa54bd7SDavid Dillow 				     PFX "Sending CM DREQ failed\n");
833aef9ec39SRoland Dreier 		}
834294c875aSBart Van Assche 	}
835294c875aSBart Van Assche }
836aef9ec39SRoland Dreier 
837509c07bcSBart Van Assche static void srp_free_req_data(struct srp_target_port *target,
838509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
8398f26c9ffSDavid Dillow {
8405cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
8415cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
8428f26c9ffSDavid Dillow 	struct srp_request *req;
8438f26c9ffSDavid Dillow 	int i;
8448f26c9ffSDavid Dillow 
84547513cf4SBart Van Assche 	if (!ch->req_ring)
8464d73f95fSBart Van Assche 		return;
8474d73f95fSBart Van Assche 
8484d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
849509c07bcSBart Van Assche 		req = &ch->req_ring[i];
8505cfb1782SBart Van Assche 		if (dev->use_fast_reg)
8515cfb1782SBart Van Assche 			kfree(req->fr_list);
8525cfb1782SBart Van Assche 		else
8538f26c9ffSDavid Dillow 			kfree(req->fmr_list);
8548f26c9ffSDavid Dillow 		kfree(req->map_page);
855c07d424dSDavid Dillow 		if (req->indirect_dma_addr) {
856c07d424dSDavid Dillow 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
857c07d424dSDavid Dillow 					    target->indirect_size,
858c07d424dSDavid Dillow 					    DMA_TO_DEVICE);
859c07d424dSDavid Dillow 		}
860c07d424dSDavid Dillow 		kfree(req->indirect_desc);
8618f26c9ffSDavid Dillow 	}
8624d73f95fSBart Van Assche 
863509c07bcSBart Van Assche 	kfree(ch->req_ring);
864509c07bcSBart Van Assche 	ch->req_ring = NULL;
8658f26c9ffSDavid Dillow }
8668f26c9ffSDavid Dillow 
867509c07bcSBart Van Assche static int srp_alloc_req_data(struct srp_rdma_ch *ch)
868b81d00bdSBart Van Assche {
869509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
870b81d00bdSBart Van Assche 	struct srp_device *srp_dev = target->srp_host->srp_dev;
871b81d00bdSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
872b81d00bdSBart Van Assche 	struct srp_request *req;
8735cfb1782SBart Van Assche 	void *mr_list;
874b81d00bdSBart Van Assche 	dma_addr_t dma_addr;
875b81d00bdSBart Van Assche 	int i, ret = -ENOMEM;
876b81d00bdSBart Van Assche 
877509c07bcSBart Van Assche 	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
878509c07bcSBart Van Assche 			       GFP_KERNEL);
879509c07bcSBart Van Assche 	if (!ch->req_ring)
8804d73f95fSBart Van Assche 		goto out;
8814d73f95fSBart Van Assche 
8824d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
883509c07bcSBart Van Assche 		req = &ch->req_ring[i];
8845cfb1782SBart Van Assche 		mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
885b81d00bdSBart Van Assche 				  GFP_KERNEL);
8865cfb1782SBart Van Assche 		if (!mr_list)
8875cfb1782SBart Van Assche 			goto out;
8885cfb1782SBart Van Assche 		if (srp_dev->use_fast_reg)
8895cfb1782SBart Van Assche 			req->fr_list = mr_list;
8905cfb1782SBart Van Assche 		else
8915cfb1782SBart Van Assche 			req->fmr_list = mr_list;
89252ede08fSBart Van Assche 		req->map_page = kmalloc(srp_dev->max_pages_per_mr *
893d1b4289eSBart Van Assche 					sizeof(void *), GFP_KERNEL);
8945cfb1782SBart Van Assche 		if (!req->map_page)
8955cfb1782SBart Van Assche 			goto out;
896b81d00bdSBart Van Assche 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
8975cfb1782SBart Van Assche 		if (!req->indirect_desc)
898b81d00bdSBart Van Assche 			goto out;
899b81d00bdSBart Van Assche 
900b81d00bdSBart Van Assche 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
901b81d00bdSBart Van Assche 					     target->indirect_size,
902b81d00bdSBart Van Assche 					     DMA_TO_DEVICE);
903b81d00bdSBart Van Assche 		if (ib_dma_mapping_error(ibdev, dma_addr))
904b81d00bdSBart Van Assche 			goto out;
905b81d00bdSBart Van Assche 
906b81d00bdSBart Van Assche 		req->indirect_dma_addr = dma_addr;
907b81d00bdSBart Van Assche 	}
908b81d00bdSBart Van Assche 	ret = 0;
909b81d00bdSBart Van Assche 
910b81d00bdSBart Van Assche out:
911b81d00bdSBart Van Assche 	return ret;
912b81d00bdSBart Van Assche }
913b81d00bdSBart Van Assche 
914683b159aSBart Van Assche /**
915683b159aSBart Van Assche  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
916683b159aSBart Van Assche  * @shost: SCSI host whose attributes to remove from sysfs.
917683b159aSBart Van Assche  *
918683b159aSBart Van Assche  * Note: Any attributes defined in the host template and that did not exist
919683b159aSBart Van Assche  * before invocation of this function will be ignored.
920683b159aSBart Van Assche  */
921683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
922683b159aSBart Van Assche {
923683b159aSBart Van Assche 	struct device_attribute **attr;
924683b159aSBart Van Assche 
925683b159aSBart Van Assche 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
926683b159aSBart Van Assche 		device_remove_file(&shost->shost_dev, *attr);
927683b159aSBart Van Assche }
928683b159aSBart Van Assche 
929ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target)
930ee12d6a8SBart Van Assche {
931d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
932d92c0da7SBart Van Assche 	int i;
933509c07bcSBart Van Assche 
934ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
935ef6c49d8SBart Van Assche 
936ee12d6a8SBart Van Assche 	srp_del_scsi_host_attr(target->scsi_host);
9379dd69a60SBart Van Assche 	srp_rport_get(target->rport);
938ee12d6a8SBart Van Assche 	srp_remove_host(target->scsi_host);
939ee12d6a8SBart Van Assche 	scsi_remove_host(target->scsi_host);
94093079162SBart Van Assche 	srp_stop_rport_timers(target->rport);
941ef6c49d8SBart Van Assche 	srp_disconnect_target(target);
942d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
943d92c0da7SBart Van Assche 		ch = &target->ch[i];
944509c07bcSBart Van Assche 		srp_free_ch_ib(target, ch);
945d92c0da7SBart Van Assche 	}
946c1120f89SBart Van Assche 	cancel_work_sync(&target->tl_err_work);
9479dd69a60SBart Van Assche 	srp_rport_put(target->rport);
948d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
949d92c0da7SBart Van Assche 		ch = &target->ch[i];
950509c07bcSBart Van Assche 		srp_free_req_data(target, ch);
951d92c0da7SBart Van Assche 	}
952d92c0da7SBart Van Assche 	kfree(target->ch);
953d92c0da7SBart Van Assche 	target->ch = NULL;
95465d7dd2fSVu Pham 
95565d7dd2fSVu Pham 	spin_lock(&target->srp_host->target_lock);
95665d7dd2fSVu Pham 	list_del(&target->list);
95765d7dd2fSVu Pham 	spin_unlock(&target->srp_host->target_lock);
95865d7dd2fSVu Pham 
959ee12d6a8SBart Van Assche 	scsi_host_put(target->scsi_host);
960ee12d6a8SBart Van Assche }
961ee12d6a8SBart Van Assche 
962c4028958SDavid Howells static void srp_remove_work(struct work_struct *work)
963aef9ec39SRoland Dreier {
964c4028958SDavid Howells 	struct srp_target_port *target =
965ef6c49d8SBart Van Assche 		container_of(work, struct srp_target_port, remove_work);
966aef9ec39SRoland Dreier 
967ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
968aef9ec39SRoland Dreier 
96996fc248aSBart Van Assche 	srp_remove_target(target);
970aef9ec39SRoland Dreier }
971aef9ec39SRoland Dreier 
972dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport)
973dc1bdbd9SBart Van Assche {
974dc1bdbd9SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
975dc1bdbd9SBart Van Assche 
976dc1bdbd9SBart Van Assche 	srp_queue_remove_work(target);
977dc1bdbd9SBart Van Assche }
978dc1bdbd9SBart Van Assche 
979c014c8cdSBart Van Assche /**
980c014c8cdSBart Van Assche  * srp_connected_ch() - number of connected channels
981c014c8cdSBart Van Assche  * @target: SRP target port.
982c014c8cdSBart Van Assche  */
983c014c8cdSBart Van Assche static int srp_connected_ch(struct srp_target_port *target)
984c014c8cdSBart Van Assche {
985c014c8cdSBart Van Assche 	int i, c = 0;
986c014c8cdSBart Van Assche 
987c014c8cdSBart Van Assche 	for (i = 0; i < target->ch_count; i++)
988c014c8cdSBart Van Assche 		c += target->ch[i].connected;
989c014c8cdSBart Van Assche 
990c014c8cdSBart Van Assche 	return c;
991c014c8cdSBart Van Assche }
992c014c8cdSBart Van Assche 
993d92c0da7SBart Van Assche static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
994aef9ec39SRoland Dreier {
995509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
996aef9ec39SRoland Dreier 	int ret;
997aef9ec39SRoland Dreier 
998c014c8cdSBart Van Assche 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
999294c875aSBart Van Assche 
1000509c07bcSBart Van Assche 	ret = srp_lookup_path(ch);
1001aef9ec39SRoland Dreier 	if (ret)
1002aef9ec39SRoland Dreier 		return ret;
1003aef9ec39SRoland Dreier 
1004aef9ec39SRoland Dreier 	while (1) {
1005509c07bcSBart Van Assche 		init_completion(&ch->done);
1006d92c0da7SBart Van Assche 		ret = srp_send_req(ch, multich);
1007aef9ec39SRoland Dreier 		if (ret)
1008aef9ec39SRoland Dreier 			return ret;
1009509c07bcSBart Van Assche 		ret = wait_for_completion_interruptible(&ch->done);
1010a702adceSBart Van Assche 		if (ret < 0)
1011a702adceSBart Van Assche 			return ret;
1012aef9ec39SRoland Dreier 
1013aef9ec39SRoland Dreier 		/*
1014aef9ec39SRoland Dreier 		 * The CM event handling code will set status to
1015aef9ec39SRoland Dreier 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
1016aef9ec39SRoland Dreier 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1017aef9ec39SRoland Dreier 		 * redirect REJ back.
1018aef9ec39SRoland Dreier 		 */
1019509c07bcSBart Van Assche 		switch (ch->status) {
1020aef9ec39SRoland Dreier 		case 0:
1021c014c8cdSBart Van Assche 			ch->connected = true;
1022aef9ec39SRoland Dreier 			return 0;
1023aef9ec39SRoland Dreier 
1024aef9ec39SRoland Dreier 		case SRP_PORT_REDIRECT:
1025509c07bcSBart Van Assche 			ret = srp_lookup_path(ch);
1026aef9ec39SRoland Dreier 			if (ret)
1027aef9ec39SRoland Dreier 				return ret;
1028aef9ec39SRoland Dreier 			break;
1029aef9ec39SRoland Dreier 
1030aef9ec39SRoland Dreier 		case SRP_DLID_REDIRECT:
1031aef9ec39SRoland Dreier 			break;
1032aef9ec39SRoland Dreier 
10339fe4bcf4SDavid Dillow 		case SRP_STALE_CONN:
10349fe4bcf4SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host, PFX
10359fe4bcf4SDavid Dillow 				     "giving up on stale connection\n");
1036509c07bcSBart Van Assche 			ch->status = -ECONNRESET;
1037509c07bcSBart Van Assche 			return ch->status;
10389fe4bcf4SDavid Dillow 
1039aef9ec39SRoland Dreier 		default:
1040509c07bcSBart Van Assche 			return ch->status;
1041aef9ec39SRoland Dreier 		}
1042aef9ec39SRoland Dreier 	}
1043aef9ec39SRoland Dreier }
1044aef9ec39SRoland Dreier 
1045509c07bcSBart Van Assche static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
10465cfb1782SBart Van Assche {
10475cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
10485cfb1782SBart Van Assche 	struct ib_send_wr wr = {
10495cfb1782SBart Van Assche 		.opcode		    = IB_WR_LOCAL_INV,
10505cfb1782SBart Van Assche 		.wr_id		    = LOCAL_INV_WR_ID_MASK,
10515cfb1782SBart Van Assche 		.next		    = NULL,
10525cfb1782SBart Van Assche 		.num_sge	    = 0,
10535cfb1782SBart Van Assche 		.send_flags	    = 0,
10545cfb1782SBart Van Assche 		.ex.invalidate_rkey = rkey,
10555cfb1782SBart Van Assche 	};
10565cfb1782SBart Van Assche 
1057509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
10585cfb1782SBart Van Assche }
10595cfb1782SBart Van Assche 
1060d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd,
1061509c07bcSBart Van Assche 			   struct srp_rdma_ch *ch,
1062d945e1dfSRoland Dreier 			   struct srp_request *req)
1063d945e1dfSRoland Dreier {
1064509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
10655cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
10665cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
10675cfb1782SBart Van Assche 	int i, res;
10688f26c9ffSDavid Dillow 
1069bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) ||
1070d945e1dfSRoland Dreier 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1071d945e1dfSRoland Dreier 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1072d945e1dfSRoland Dreier 		return;
1073d945e1dfSRoland Dreier 
10745cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
10755cfb1782SBart Van Assche 		struct srp_fr_desc **pfr;
10765cfb1782SBart Van Assche 
10775cfb1782SBart Van Assche 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1078509c07bcSBart Van Assche 			res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
10795cfb1782SBart Van Assche 			if (res < 0) {
10805cfb1782SBart Van Assche 				shost_printk(KERN_ERR, target->scsi_host, PFX
10815cfb1782SBart Van Assche 				  "Queueing INV WR for rkey %#x failed (%d)\n",
10825cfb1782SBart Van Assche 				  (*pfr)->mr->rkey, res);
10835cfb1782SBart Van Assche 				queue_work(system_long_wq,
10845cfb1782SBart Van Assche 					   &target->tl_err_work);
10855cfb1782SBart Van Assche 			}
10865cfb1782SBart Van Assche 		}
10875cfb1782SBart Van Assche 		if (req->nmdesc)
1088509c07bcSBart Van Assche 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
10895cfb1782SBart Van Assche 					req->nmdesc);
10905cfb1782SBart Van Assche 	} else {
10915cfb1782SBart Van Assche 		struct ib_pool_fmr **pfmr;
10925cfb1782SBart Van Assche 
10935cfb1782SBart Van Assche 		for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
10945cfb1782SBart Van Assche 			ib_fmr_pool_unmap(*pfmr);
10955cfb1782SBart Van Assche 	}
1096f5358a17SRoland Dreier 
10978f26c9ffSDavid Dillow 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
10988f26c9ffSDavid Dillow 			scmnd->sc_data_direction);
1099d945e1dfSRoland Dreier }
1100d945e1dfSRoland Dreier 
110122032991SBart Van Assche /**
110222032991SBart Van Assche  * srp_claim_req - Take ownership of the scmnd associated with a request.
1103509c07bcSBart Van Assche  * @ch: SRP RDMA channel.
110422032991SBart Van Assche  * @req: SRP request.
1105b3fe628dSBart Van Assche  * @sdev: If not NULL, only take ownership for this SCSI device.
110622032991SBart Van Assche  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
110722032991SBart Van Assche  *         ownership of @req->scmnd if it equals @scmnd.
110822032991SBart Van Assche  *
110922032991SBart Van Assche  * Return value:
111022032991SBart Van Assche  * Either NULL or a pointer to the SCSI command the caller became owner of.
111122032991SBart Van Assche  */
1112509c07bcSBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
111322032991SBart Van Assche 				       struct srp_request *req,
1114b3fe628dSBart Van Assche 				       struct scsi_device *sdev,
111522032991SBart Van Assche 				       struct scsi_cmnd *scmnd)
1116526b4caaSIshai Rabinovitz {
111794a9174cSBart Van Assche 	unsigned long flags;
111894a9174cSBart Van Assche 
1119509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1120b3fe628dSBart Van Assche 	if (req->scmnd &&
1121b3fe628dSBart Van Assche 	    (!sdev || req->scmnd->device == sdev) &&
1122b3fe628dSBart Van Assche 	    (!scmnd || req->scmnd == scmnd)) {
112322032991SBart Van Assche 		scmnd = req->scmnd;
112422032991SBart Van Assche 		req->scmnd = NULL;
112522032991SBart Van Assche 	} else {
112622032991SBart Van Assche 		scmnd = NULL;
112722032991SBart Van Assche 	}
1128509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
112922032991SBart Van Assche 
113022032991SBart Van Assche 	return scmnd;
113122032991SBart Van Assche }
113222032991SBart Van Assche 
113322032991SBart Van Assche /**
113422032991SBart Van Assche  * srp_free_req() - Unmap data and add request to the free request list.
1135509c07bcSBart Van Assche  * @ch:     SRP RDMA channel.
1136af24663bSBart Van Assche  * @req:    Request to be freed.
1137af24663bSBart Van Assche  * @scmnd:  SCSI command associated with @req.
1138af24663bSBart Van Assche  * @req_lim_delta: Amount to be added to @target->req_lim.
113922032991SBart Van Assche  */
1140509c07bcSBart Van Assche static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1141509c07bcSBart Van Assche 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
114222032991SBart Van Assche {
114322032991SBart Van Assche 	unsigned long flags;
114422032991SBart Van Assche 
1145509c07bcSBart Van Assche 	srp_unmap_data(scmnd, ch, req);
114622032991SBart Van Assche 
1147509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1148509c07bcSBart Van Assche 	ch->req_lim += req_lim_delta;
1149509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
1150526b4caaSIshai Rabinovitz }
1151526b4caaSIshai Rabinovitz 
1152509c07bcSBart Van Assche static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1153509c07bcSBart Van Assche 			   struct scsi_device *sdev, int result)
1154526b4caaSIshai Rabinovitz {
1155509c07bcSBart Van Assche 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
115622032991SBart Van Assche 
115722032991SBart Van Assche 	if (scmnd) {
1158509c07bcSBart Van Assche 		srp_free_req(ch, req, scmnd, 0);
1159ed9b2264SBart Van Assche 		scmnd->result = result;
116022032991SBart Van Assche 		scmnd->scsi_done(scmnd);
116122032991SBart Van Assche 	}
1162526b4caaSIshai Rabinovitz }
1163526b4caaSIshai Rabinovitz 
1164ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport)
1165aef9ec39SRoland Dreier {
1166ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1167d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
1168b3fe628dSBart Van Assche 	struct Scsi_Host *shost = target->scsi_host;
1169b3fe628dSBart Van Assche 	struct scsi_device *sdev;
1170d92c0da7SBart Van Assche 	int i, j;
1171aef9ec39SRoland Dreier 
1172b3fe628dSBart Van Assche 	/*
1173b3fe628dSBart Van Assche 	 * Invoking srp_terminate_io() while srp_queuecommand() is running
1174b3fe628dSBart Van Assche 	 * is not safe. Hence the warning statement below.
1175b3fe628dSBart Van Assche 	 */
1176b3fe628dSBart Van Assche 	shost_for_each_device(sdev, shost)
1177b3fe628dSBart Van Assche 		WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1178b3fe628dSBart Van Assche 
1179d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1180d92c0da7SBart Van Assche 		ch = &target->ch[i];
1181509c07bcSBart Van Assche 
1182d92c0da7SBart Van Assche 		for (j = 0; j < target->req_ring_size; ++j) {
1183d92c0da7SBart Van Assche 			struct srp_request *req = &ch->req_ring[j];
1184d92c0da7SBart Van Assche 
1185d92c0da7SBart Van Assche 			srp_finish_req(ch, req, NULL,
1186d92c0da7SBart Van Assche 				       DID_TRANSPORT_FAILFAST << 16);
1187d92c0da7SBart Van Assche 		}
1188ed9b2264SBart Van Assche 	}
1189ed9b2264SBart Van Assche }
1190ed9b2264SBart Van Assche 
1191ed9b2264SBart Van Assche /*
1192ed9b2264SBart Van Assche  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1193ed9b2264SBart Van Assche  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1194ed9b2264SBart Van Assche  * srp_reset_device() or srp_reset_host() calls will occur while this function
1195ed9b2264SBart Van Assche  * is in progress. One way to realize that is not to call this function
1196ed9b2264SBart Van Assche  * directly but to call srp_reconnect_rport() instead since that last function
1197ed9b2264SBart Van Assche  * serializes calls of this function via rport->mutex and also blocks
1198ed9b2264SBart Van Assche  * srp_queuecommand() calls before invoking this function.
1199ed9b2264SBart Van Assche  */
1200ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport)
1201ed9b2264SBart Van Assche {
1202ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1203d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
1204d92c0da7SBart Van Assche 	int i, j, ret = 0;
1205d92c0da7SBart Van Assche 	bool multich = false;
120609be70a2SBart Van Assche 
1207aef9ec39SRoland Dreier 	srp_disconnect_target(target);
120834aa654eSBart Van Assche 
120934aa654eSBart Van Assche 	if (target->state == SRP_TARGET_SCANNING)
121034aa654eSBart Van Assche 		return -ENODEV;
121134aa654eSBart Van Assche 
1212aef9ec39SRoland Dreier 	/*
1213c7c4e7ffSBart Van Assche 	 * Now get a new local CM ID so that we avoid confusing the target in
1214c7c4e7ffSBart Van Assche 	 * case things are really fouled up. Doing so also ensures that all CM
1215c7c4e7ffSBart Van Assche 	 * callbacks will have finished before a new QP is allocated.
1216aef9ec39SRoland Dreier 	 */
1217d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1218d92c0da7SBart Van Assche 		ch = &target->ch[i];
1219d92c0da7SBart Van Assche 		ret += srp_new_cm_id(ch);
1220d92c0da7SBart Van Assche 	}
1221d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1222d92c0da7SBart Van Assche 		ch = &target->ch[i];
1223d92c0da7SBart Van Assche 		for (j = 0; j < target->req_ring_size; ++j) {
1224d92c0da7SBart Van Assche 			struct srp_request *req = &ch->req_ring[j];
1225509c07bcSBart Van Assche 
1226509c07bcSBart Van Assche 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
1227536ae14eSBart Van Assche 		}
1228d92c0da7SBart Van Assche 	}
1229d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1230d92c0da7SBart Van Assche 		ch = &target->ch[i];
12315cfb1782SBart Van Assche 		/*
12325cfb1782SBart Van Assche 		 * Whether or not creating a new CM ID succeeded, create a new
1233d92c0da7SBart Van Assche 		 * QP. This guarantees that all completion callback function
1234d92c0da7SBart Van Assche 		 * invocations have finished before request resetting starts.
12355cfb1782SBart Van Assche 		 */
1236509c07bcSBart Van Assche 		ret += srp_create_ch_ib(ch);
12375cfb1782SBart Van Assche 
1238509c07bcSBart Van Assche 		INIT_LIST_HEAD(&ch->free_tx);
1239d92c0da7SBart Van Assche 		for (j = 0; j < target->queue_size; ++j)
1240d92c0da7SBart Van Assche 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1241d92c0da7SBart Van Assche 	}
12428de9fe3aSBart Van Assche 
12438de9fe3aSBart Van Assche 	target->qp_in_error = false;
12448de9fe3aSBart Van Assche 
1245d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1246d92c0da7SBart Van Assche 		ch = &target->ch[i];
1247bbac5ccfSBart Van Assche 		if (ret)
1248d92c0da7SBart Van Assche 			break;
1249d92c0da7SBart Van Assche 		ret = srp_connect_ch(ch, multich);
1250d92c0da7SBart Van Assche 		multich = true;
1251d92c0da7SBart Van Assche 	}
125209be70a2SBart Van Assche 
1253ed9b2264SBart Van Assche 	if (ret == 0)
1254ed9b2264SBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
1255ed9b2264SBart Van Assche 			     PFX "reconnect succeeded\n");
1256aef9ec39SRoland Dreier 
1257aef9ec39SRoland Dreier 	return ret;
1258aef9ec39SRoland Dreier }
1259aef9ec39SRoland Dreier 
12608f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
12618f26c9ffSDavid Dillow 			 unsigned int dma_len, u32 rkey)
1262f5358a17SRoland Dreier {
12638f26c9ffSDavid Dillow 	struct srp_direct_buf *desc = state->desc;
12648f26c9ffSDavid Dillow 
12658f26c9ffSDavid Dillow 	desc->va = cpu_to_be64(dma_addr);
12668f26c9ffSDavid Dillow 	desc->key = cpu_to_be32(rkey);
12678f26c9ffSDavid Dillow 	desc->len = cpu_to_be32(dma_len);
12688f26c9ffSDavid Dillow 
12698f26c9ffSDavid Dillow 	state->total_len += dma_len;
12708f26c9ffSDavid Dillow 	state->desc++;
12718f26c9ffSDavid Dillow 	state->ndesc++;
12728f26c9ffSDavid Dillow }
12738f26c9ffSDavid Dillow 
12748f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state,
1275509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
12768f26c9ffSDavid Dillow {
12778f26c9ffSDavid Dillow 	struct ib_pool_fmr *fmr;
1278f5358a17SRoland Dreier 	u64 io_addr = 0;
12798f26c9ffSDavid Dillow 
1280509c07bcSBart Van Assche 	fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
12818f26c9ffSDavid Dillow 				   state->npages, io_addr);
12828f26c9ffSDavid Dillow 	if (IS_ERR(fmr))
12838f26c9ffSDavid Dillow 		return PTR_ERR(fmr);
12848f26c9ffSDavid Dillow 
12858f26c9ffSDavid Dillow 	*state->next_fmr++ = fmr;
128652ede08fSBart Van Assche 	state->nmdesc++;
12878f26c9ffSDavid Dillow 
128852ede08fSBart Van Assche 	srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
1289539dde6fSBart Van Assche 
12908f26c9ffSDavid Dillow 	return 0;
12918f26c9ffSDavid Dillow }
12928f26c9ffSDavid Dillow 
12935cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state,
1294509c07bcSBart Van Assche 			     struct srp_rdma_ch *ch)
12955cfb1782SBart Van Assche {
1296509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
12975cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
12985cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
12995cfb1782SBart Van Assche 	struct ib_send_wr wr;
13005cfb1782SBart Van Assche 	struct srp_fr_desc *desc;
13015cfb1782SBart Van Assche 	u32 rkey;
13025cfb1782SBart Van Assche 
1303509c07bcSBart Van Assche 	desc = srp_fr_pool_get(ch->fr_pool);
13045cfb1782SBart Van Assche 	if (!desc)
13055cfb1782SBart Van Assche 		return -ENOMEM;
13065cfb1782SBart Van Assche 
13075cfb1782SBart Van Assche 	rkey = ib_inc_rkey(desc->mr->rkey);
13085cfb1782SBart Van Assche 	ib_update_fast_reg_key(desc->mr, rkey);
13095cfb1782SBart Van Assche 
13105cfb1782SBart Van Assche 	memcpy(desc->frpl->page_list, state->pages,
13115cfb1782SBart Van Assche 	       sizeof(state->pages[0]) * state->npages);
13125cfb1782SBart Van Assche 
13135cfb1782SBart Van Assche 	memset(&wr, 0, sizeof(wr));
13145cfb1782SBart Van Assche 	wr.opcode = IB_WR_FAST_REG_MR;
13155cfb1782SBart Van Assche 	wr.wr_id = FAST_REG_WR_ID_MASK;
13165cfb1782SBart Van Assche 	wr.wr.fast_reg.iova_start = state->base_dma_addr;
13175cfb1782SBart Van Assche 	wr.wr.fast_reg.page_list = desc->frpl;
13185cfb1782SBart Van Assche 	wr.wr.fast_reg.page_list_len = state->npages;
13195cfb1782SBart Van Assche 	wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
13205cfb1782SBart Van Assche 	wr.wr.fast_reg.length = state->dma_len;
13215cfb1782SBart Van Assche 	wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
13225cfb1782SBart Van Assche 				       IB_ACCESS_REMOTE_READ |
13235cfb1782SBart Van Assche 				       IB_ACCESS_REMOTE_WRITE);
13245cfb1782SBart Van Assche 	wr.wr.fast_reg.rkey = desc->mr->lkey;
13255cfb1782SBart Van Assche 
13265cfb1782SBart Van Assche 	*state->next_fr++ = desc;
13275cfb1782SBart Van Assche 	state->nmdesc++;
13285cfb1782SBart Van Assche 
13295cfb1782SBart Van Assche 	srp_map_desc(state, state->base_dma_addr, state->dma_len,
13305cfb1782SBart Van Assche 		     desc->mr->rkey);
13315cfb1782SBart Van Assche 
1332509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
13335cfb1782SBart Van Assche }
13345cfb1782SBart Van Assche 
1335539dde6fSBart Van Assche static int srp_finish_mapping(struct srp_map_state *state,
1336509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
1337539dde6fSBart Van Assche {
1338509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1339539dde6fSBart Van Assche 	int ret = 0;
1340539dde6fSBart Van Assche 
1341539dde6fSBart Van Assche 	if (state->npages == 0)
1342539dde6fSBart Van Assche 		return 0;
1343539dde6fSBart Van Assche 
1344b1b8854dSBart Van Assche 	if (state->npages == 1 && !register_always)
134552ede08fSBart Van Assche 		srp_map_desc(state, state->base_dma_addr, state->dma_len,
1346539dde6fSBart Van Assche 			     target->rkey);
1347539dde6fSBart Van Assche 	else
13485cfb1782SBart Van Assche 		ret = target->srp_host->srp_dev->use_fast_reg ?
1349509c07bcSBart Van Assche 			srp_map_finish_fr(state, ch) :
1350509c07bcSBart Van Assche 			srp_map_finish_fmr(state, ch);
1351539dde6fSBart Van Assche 
1352539dde6fSBart Van Assche 	if (ret == 0) {
1353539dde6fSBart Van Assche 		state->npages = 0;
135452ede08fSBart Van Assche 		state->dma_len = 0;
1355539dde6fSBart Van Assche 	}
1356539dde6fSBart Van Assche 
1357539dde6fSBart Van Assche 	return ret;
1358539dde6fSBart Van Assche }
1359539dde6fSBart Van Assche 
13608f26c9ffSDavid Dillow static void srp_map_update_start(struct srp_map_state *state,
13618f26c9ffSDavid Dillow 				 struct scatterlist *sg, int sg_index,
13628f26c9ffSDavid Dillow 				 dma_addr_t dma_addr)
13638f26c9ffSDavid Dillow {
13648f26c9ffSDavid Dillow 	state->unmapped_sg = sg;
13658f26c9ffSDavid Dillow 	state->unmapped_index = sg_index;
13668f26c9ffSDavid Dillow 	state->unmapped_addr = dma_addr;
13678f26c9ffSDavid Dillow }
13688f26c9ffSDavid Dillow 
13698f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state,
1370509c07bcSBart Van Assche 			    struct srp_rdma_ch *ch,
13718f26c9ffSDavid Dillow 			    struct scatterlist *sg, int sg_index,
13725cfb1782SBart Van Assche 			    bool use_mr)
13738f26c9ffSDavid Dillow {
1374509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
137505321937SGreg Kroah-Hartman 	struct srp_device *dev = target->srp_host->srp_dev;
137685507bccSRalph Campbell 	struct ib_device *ibdev = dev->dev;
13778f26c9ffSDavid Dillow 	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1378bb350d1dSFUJITA Tomonori 	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
13798f26c9ffSDavid Dillow 	unsigned int len;
13808f26c9ffSDavid Dillow 	int ret;
138185507bccSRalph Campbell 
13828f26c9ffSDavid Dillow 	if (!dma_len)
13838f26c9ffSDavid Dillow 		return 0;
13848f26c9ffSDavid Dillow 
13855cfb1782SBart Van Assche 	if (!use_mr) {
13865cfb1782SBart Van Assche 		/*
13875cfb1782SBart Van Assche 		 * Once we're in direct map mode for a request, we don't
13885cfb1782SBart Van Assche 		 * go back to FMR or FR mode, so no need to update anything
13898f26c9ffSDavid Dillow 		 * other than the descriptor.
13908f26c9ffSDavid Dillow 		 */
13918f26c9ffSDavid Dillow 		srp_map_desc(state, dma_addr, dma_len, target->rkey);
13928f26c9ffSDavid Dillow 		return 0;
1393f5358a17SRoland Dreier 	}
1394f5358a17SRoland Dreier 
13955cfb1782SBart Van Assche 	/*
13965cfb1782SBart Van Assche 	 * Since not all RDMA HW drivers support non-zero page offsets for
13975cfb1782SBart Van Assche 	 * FMR, if we start at an offset into a page, don't merge into the
13985cfb1782SBart Van Assche 	 * current FMR mapping. Finish it out, and use the kernel's MR for
13995cfb1782SBart Van Assche 	 * this sg entry.
14008f26c9ffSDavid Dillow 	 */
14015cfb1782SBart Van Assche 	if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
14025cfb1782SBart Van Assche 	    dma_len > dev->mr_max_size) {
1403509c07bcSBart Van Assche 		ret = srp_finish_mapping(state, ch);
14048f26c9ffSDavid Dillow 		if (ret)
14058f26c9ffSDavid Dillow 			return ret;
14068f26c9ffSDavid Dillow 
14078f26c9ffSDavid Dillow 		srp_map_desc(state, dma_addr, dma_len, target->rkey);
14088f26c9ffSDavid Dillow 		srp_map_update_start(state, NULL, 0, 0);
14098f26c9ffSDavid Dillow 		return 0;
1410f5358a17SRoland Dreier 	}
1411f5358a17SRoland Dreier 
14125cfb1782SBart Van Assche 	/*
14135cfb1782SBart Van Assche 	 * If this is the first sg that will be mapped via FMR or via FR, save
14145cfb1782SBart Van Assche 	 * our position. We need to know the first unmapped entry, its index,
14155cfb1782SBart Van Assche 	 * and the first unmapped address within that entry to be able to
14165cfb1782SBart Van Assche 	 * restart mapping after an error.
14178f26c9ffSDavid Dillow 	 */
14188f26c9ffSDavid Dillow 	if (!state->unmapped_sg)
14198f26c9ffSDavid Dillow 		srp_map_update_start(state, sg, sg_index, dma_addr);
1420f5358a17SRoland Dreier 
14218f26c9ffSDavid Dillow 	while (dma_len) {
14225cfb1782SBart Van Assche 		unsigned offset = dma_addr & ~dev->mr_page_mask;
14235cfb1782SBart Van Assche 		if (state->npages == dev->max_pages_per_mr || offset != 0) {
1424509c07bcSBart Van Assche 			ret = srp_finish_mapping(state, ch);
14258f26c9ffSDavid Dillow 			if (ret)
14268f26c9ffSDavid Dillow 				return ret;
1427f5358a17SRoland Dreier 
14288f26c9ffSDavid Dillow 			srp_map_update_start(state, sg, sg_index, dma_addr);
142985507bccSRalph Campbell 		}
1430f5358a17SRoland Dreier 
14315cfb1782SBart Van Assche 		len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
14328f26c9ffSDavid Dillow 
14338f26c9ffSDavid Dillow 		if (!state->npages)
14348f26c9ffSDavid Dillow 			state->base_dma_addr = dma_addr;
14355cfb1782SBart Van Assche 		state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
143652ede08fSBart Van Assche 		state->dma_len += len;
14378f26c9ffSDavid Dillow 		dma_addr += len;
14388f26c9ffSDavid Dillow 		dma_len -= len;
1439f5358a17SRoland Dreier 	}
1440f5358a17SRoland Dreier 
14415cfb1782SBart Van Assche 	/*
14425cfb1782SBart Van Assche 	 * If the last entry of the MR wasn't a full page, then we need to
14438f26c9ffSDavid Dillow 	 * close it out and start a new one -- we can only merge at page
14448f26c9ffSDavid Dillow 	 * boundries.
14458f26c9ffSDavid Dillow 	 */
1446f5358a17SRoland Dreier 	ret = 0;
144752ede08fSBart Van Assche 	if (len != dev->mr_page_size) {
1448509c07bcSBart Van Assche 		ret = srp_finish_mapping(state, ch);
14498f26c9ffSDavid Dillow 		if (!ret)
14508f26c9ffSDavid Dillow 			srp_map_update_start(state, NULL, 0, 0);
14518f26c9ffSDavid Dillow 	}
1452f5358a17SRoland Dreier 	return ret;
1453f5358a17SRoland Dreier }
1454f5358a17SRoland Dreier 
1455509c07bcSBart Van Assche static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1456509c07bcSBart Van Assche 		      struct srp_request *req, struct scatterlist *scat,
1457509c07bcSBart Van Assche 		      int count)
145876bc1e1dSBart Van Assche {
1459509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
146076bc1e1dSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
146176bc1e1dSBart Van Assche 	struct ib_device *ibdev = dev->dev;
146276bc1e1dSBart Van Assche 	struct scatterlist *sg;
14635cfb1782SBart Van Assche 	int i;
14645cfb1782SBart Van Assche 	bool use_mr;
146576bc1e1dSBart Van Assche 
146676bc1e1dSBart Van Assche 	state->desc	= req->indirect_desc;
146776bc1e1dSBart Van Assche 	state->pages	= req->map_page;
14685cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
14695cfb1782SBart Van Assche 		state->next_fr = req->fr_list;
1470509c07bcSBart Van Assche 		use_mr = !!ch->fr_pool;
14715cfb1782SBart Van Assche 	} else {
147276bc1e1dSBart Van Assche 		state->next_fmr = req->fmr_list;
1473509c07bcSBart Van Assche 		use_mr = !!ch->fmr_pool;
14745cfb1782SBart Van Assche 	}
147576bc1e1dSBart Van Assche 
147676bc1e1dSBart Van Assche 	for_each_sg(scat, sg, count, i) {
1477509c07bcSBart Van Assche 		if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
14785cfb1782SBart Van Assche 			/*
14795cfb1782SBart Van Assche 			 * Memory registration failed, so backtrack to the
14805cfb1782SBart Van Assche 			 * first unmapped entry and continue on without using
14815cfb1782SBart Van Assche 			 * memory registration.
148276bc1e1dSBart Van Assche 			 */
148376bc1e1dSBart Van Assche 			dma_addr_t dma_addr;
148476bc1e1dSBart Van Assche 			unsigned int dma_len;
148576bc1e1dSBart Van Assche 
148676bc1e1dSBart Van Assche backtrack:
148776bc1e1dSBart Van Assche 			sg = state->unmapped_sg;
148876bc1e1dSBart Van Assche 			i = state->unmapped_index;
148976bc1e1dSBart Van Assche 
149076bc1e1dSBart Van Assche 			dma_addr = ib_sg_dma_address(ibdev, sg);
149176bc1e1dSBart Van Assche 			dma_len = ib_sg_dma_len(ibdev, sg);
149276bc1e1dSBart Van Assche 			dma_len -= (state->unmapped_addr - dma_addr);
149376bc1e1dSBart Van Assche 			dma_addr = state->unmapped_addr;
14945cfb1782SBart Van Assche 			use_mr = false;
149576bc1e1dSBart Van Assche 			srp_map_desc(state, dma_addr, dma_len, target->rkey);
149676bc1e1dSBart Van Assche 		}
149776bc1e1dSBart Van Assche 	}
149876bc1e1dSBart Van Assche 
1499509c07bcSBart Van Assche 	if (use_mr && srp_finish_mapping(state, ch))
150076bc1e1dSBart Van Assche 		goto backtrack;
150176bc1e1dSBart Van Assche 
150252ede08fSBart Van Assche 	req->nmdesc = state->nmdesc;
15035cfb1782SBart Van Assche 
15045cfb1782SBart Van Assche 	return 0;
150576bc1e1dSBart Van Assche }
150676bc1e1dSBart Van Assche 
1507509c07bcSBart Van Assche static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1508aef9ec39SRoland Dreier 			struct srp_request *req)
1509aef9ec39SRoland Dreier {
1510509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
151176bc1e1dSBart Van Assche 	struct scatterlist *scat;
1512aef9ec39SRoland Dreier 	struct srp_cmd *cmd = req->cmd->buf;
151376bc1e1dSBart Van Assche 	int len, nents, count;
151485507bccSRalph Campbell 	struct srp_device *dev;
151585507bccSRalph Campbell 	struct ib_device *ibdev;
15168f26c9ffSDavid Dillow 	struct srp_map_state state;
15178f26c9ffSDavid Dillow 	struct srp_indirect_buf *indirect_hdr;
15188f26c9ffSDavid Dillow 	u32 table_len;
15198f26c9ffSDavid Dillow 	u8 fmt;
1520aef9ec39SRoland Dreier 
1521bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1522aef9ec39SRoland Dreier 		return sizeof (struct srp_cmd);
1523aef9ec39SRoland Dreier 
1524aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1525aef9ec39SRoland Dreier 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
15267aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
15277aa54bd7SDavid Dillow 			     PFX "Unhandled data direction %d\n",
1528aef9ec39SRoland Dreier 			     scmnd->sc_data_direction);
1529aef9ec39SRoland Dreier 		return -EINVAL;
1530aef9ec39SRoland Dreier 	}
1531aef9ec39SRoland Dreier 
1532bb350d1dSFUJITA Tomonori 	nents = scsi_sg_count(scmnd);
1533bb350d1dSFUJITA Tomonori 	scat  = scsi_sglist(scmnd);
1534aef9ec39SRoland Dreier 
153505321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev;
153685507bccSRalph Campbell 	ibdev = dev->dev;
153785507bccSRalph Campbell 
153885507bccSRalph Campbell 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
15398f26c9ffSDavid Dillow 	if (unlikely(count == 0))
15408f26c9ffSDavid Dillow 		return -EIO;
1541aef9ec39SRoland Dreier 
1542aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_DIRECT;
1543f5358a17SRoland Dreier 	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
1544f5358a17SRoland Dreier 
1545b1b8854dSBart Van Assche 	if (count == 1 && !register_always) {
1546f5358a17SRoland Dreier 		/*
1547f5358a17SRoland Dreier 		 * The midlayer only generated a single gather/scatter
1548f5358a17SRoland Dreier 		 * entry, or DMA mapping coalesced everything to a
1549f5358a17SRoland Dreier 		 * single entry.  So a direct descriptor along with
1550f5358a17SRoland Dreier 		 * the DMA MR suffices.
1551f5358a17SRoland Dreier 		 */
1552f5358a17SRoland Dreier 		struct srp_direct_buf *buf = (void *) cmd->add_data;
1553aef9ec39SRoland Dreier 
155485507bccSRalph Campbell 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
15559af76271SDavid Dillow 		buf->key = cpu_to_be32(target->rkey);
155685507bccSRalph Campbell 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
15578f26c9ffSDavid Dillow 
155852ede08fSBart Van Assche 		req->nmdesc = 0;
15598f26c9ffSDavid Dillow 		goto map_complete;
15608f26c9ffSDavid Dillow 	}
15618f26c9ffSDavid Dillow 
15625cfb1782SBart Van Assche 	/*
15635cfb1782SBart Van Assche 	 * We have more than one scatter/gather entry, so build our indirect
15645cfb1782SBart Van Assche 	 * descriptor table, trying to merge as many entries as we can.
1565f5358a17SRoland Dreier 	 */
15668f26c9ffSDavid Dillow 	indirect_hdr = (void *) cmd->add_data;
15678f26c9ffSDavid Dillow 
1568c07d424dSDavid Dillow 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1569c07d424dSDavid Dillow 				   target->indirect_size, DMA_TO_DEVICE);
1570c07d424dSDavid Dillow 
15718f26c9ffSDavid Dillow 	memset(&state, 0, sizeof(state));
1572509c07bcSBart Van Assche 	srp_map_sg(&state, ch, req, scat, count);
15738f26c9ffSDavid Dillow 
1574c07d424dSDavid Dillow 	/* We've mapped the request, now pull as much of the indirect
1575c07d424dSDavid Dillow 	 * descriptor table as we can into the command buffer. If this
1576c07d424dSDavid Dillow 	 * target is not using an external indirect table, we are
1577c07d424dSDavid Dillow 	 * guaranteed to fit into the command, as the SCSI layer won't
1578c07d424dSDavid Dillow 	 * give us more S/G entries than we allow.
15798f26c9ffSDavid Dillow 	 */
15808f26c9ffSDavid Dillow 	if (state.ndesc == 1) {
15815cfb1782SBart Van Assche 		/*
15825cfb1782SBart Van Assche 		 * Memory registration collapsed the sg-list into one entry,
15838f26c9ffSDavid Dillow 		 * so use a direct descriptor.
15848f26c9ffSDavid Dillow 		 */
15858f26c9ffSDavid Dillow 		struct srp_direct_buf *buf = (void *) cmd->add_data;
15868f26c9ffSDavid Dillow 
1587c07d424dSDavid Dillow 		*buf = req->indirect_desc[0];
15888f26c9ffSDavid Dillow 		goto map_complete;
15898f26c9ffSDavid Dillow 	}
15908f26c9ffSDavid Dillow 
1591c07d424dSDavid Dillow 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1592c07d424dSDavid Dillow 						!target->allow_ext_sg)) {
1593c07d424dSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
1594c07d424dSDavid Dillow 			     "Could not fit S/G list into SRP_CMD\n");
1595c07d424dSDavid Dillow 		return -EIO;
1596c07d424dSDavid Dillow 	}
1597c07d424dSDavid Dillow 
1598c07d424dSDavid Dillow 	count = min(state.ndesc, target->cmd_sg_cnt);
15998f26c9ffSDavid Dillow 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1600aef9ec39SRoland Dreier 
1601aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_INDIRECT;
16028f26c9ffSDavid Dillow 	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1603c07d424dSDavid Dillow 	len += count * sizeof (struct srp_direct_buf);
1604f5358a17SRoland Dreier 
1605c07d424dSDavid Dillow 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1606c07d424dSDavid Dillow 	       count * sizeof (struct srp_direct_buf));
160785507bccSRalph Campbell 
1608c07d424dSDavid Dillow 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
16098f26c9ffSDavid Dillow 	indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
16108f26c9ffSDavid Dillow 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
16118f26c9ffSDavid Dillow 	indirect_hdr->len = cpu_to_be32(state.total_len);
1612aef9ec39SRoland Dreier 
1613aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1614c07d424dSDavid Dillow 		cmd->data_out_desc_cnt = count;
1615aef9ec39SRoland Dreier 	else
1616c07d424dSDavid Dillow 		cmd->data_in_desc_cnt = count;
1617c07d424dSDavid Dillow 
1618c07d424dSDavid Dillow 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1619c07d424dSDavid Dillow 				      DMA_TO_DEVICE);
1620aef9ec39SRoland Dreier 
16218f26c9ffSDavid Dillow map_complete:
1622aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1623aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt << 4;
1624aef9ec39SRoland Dreier 	else
1625aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt;
1626aef9ec39SRoland Dreier 
1627aef9ec39SRoland Dreier 	return len;
1628aef9ec39SRoland Dreier }
1629aef9ec39SRoland Dreier 
163005a1d750SDavid Dillow /*
163176c75b25SBart Van Assche  * Return an IU and possible credit to the free pool
163276c75b25SBart Van Assche  */
1633509c07bcSBart Van Assche static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
163476c75b25SBart Van Assche 			  enum srp_iu_type iu_type)
163576c75b25SBart Van Assche {
163676c75b25SBart Van Assche 	unsigned long flags;
163776c75b25SBart Van Assche 
1638509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1639509c07bcSBart Van Assche 	list_add(&iu->list, &ch->free_tx);
164076c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP)
1641509c07bcSBart Van Assche 		++ch->req_lim;
1642509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
164376c75b25SBart Van Assche }
164476c75b25SBart Van Assche 
164576c75b25SBart Van Assche /*
1646509c07bcSBart Van Assche  * Must be called with ch->lock held to protect req_lim and free_tx.
1647e9684678SBart Van Assche  * If IU is not sent, it must be returned using srp_put_tx_iu().
164805a1d750SDavid Dillow  *
164905a1d750SDavid Dillow  * Note:
165005a1d750SDavid Dillow  * An upper limit for the number of allocated information units for each
165105a1d750SDavid Dillow  * request type is:
165205a1d750SDavid Dillow  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
165305a1d750SDavid Dillow  *   more than Scsi_Host.can_queue requests.
165405a1d750SDavid Dillow  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
165505a1d750SDavid Dillow  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
165605a1d750SDavid Dillow  *   one unanswered SRP request to an initiator.
165705a1d750SDavid Dillow  */
1658509c07bcSBart Van Assche static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
165905a1d750SDavid Dillow 				      enum srp_iu_type iu_type)
166005a1d750SDavid Dillow {
1661509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
166205a1d750SDavid Dillow 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
166305a1d750SDavid Dillow 	struct srp_iu *iu;
166405a1d750SDavid Dillow 
1665509c07bcSBart Van Assche 	srp_send_completion(ch->send_cq, ch);
166605a1d750SDavid Dillow 
1667509c07bcSBart Van Assche 	if (list_empty(&ch->free_tx))
166805a1d750SDavid Dillow 		return NULL;
166905a1d750SDavid Dillow 
167005a1d750SDavid Dillow 	/* Initiator responses to target requests do not consume credits */
167176c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP) {
1672509c07bcSBart Van Assche 		if (ch->req_lim <= rsv) {
167305a1d750SDavid Dillow 			++target->zero_req_lim;
167405a1d750SDavid Dillow 			return NULL;
167505a1d750SDavid Dillow 		}
167605a1d750SDavid Dillow 
1677509c07bcSBart Van Assche 		--ch->req_lim;
167876c75b25SBart Van Assche 	}
167976c75b25SBart Van Assche 
1680509c07bcSBart Van Assche 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
168176c75b25SBart Van Assche 	list_del(&iu->list);
168205a1d750SDavid Dillow 	return iu;
168305a1d750SDavid Dillow }
168405a1d750SDavid Dillow 
1685509c07bcSBart Van Assche static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
168605a1d750SDavid Dillow {
1687509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
168805a1d750SDavid Dillow 	struct ib_sge list;
168905a1d750SDavid Dillow 	struct ib_send_wr wr, *bad_wr;
169005a1d750SDavid Dillow 
169105a1d750SDavid Dillow 	list.addr   = iu->dma;
169205a1d750SDavid Dillow 	list.length = len;
16939af76271SDavid Dillow 	list.lkey   = target->lkey;
169405a1d750SDavid Dillow 
169505a1d750SDavid Dillow 	wr.next       = NULL;
1696dcb4cb85SBart Van Assche 	wr.wr_id      = (uintptr_t) iu;
169705a1d750SDavid Dillow 	wr.sg_list    = &list;
169805a1d750SDavid Dillow 	wr.num_sge    = 1;
169905a1d750SDavid Dillow 	wr.opcode     = IB_WR_SEND;
170005a1d750SDavid Dillow 	wr.send_flags = IB_SEND_SIGNALED;
170105a1d750SDavid Dillow 
1702509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
170305a1d750SDavid Dillow }
170405a1d750SDavid Dillow 
1705509c07bcSBart Van Assche static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1706c996bb47SBart Van Assche {
1707509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1708c996bb47SBart Van Assche 	struct ib_recv_wr wr, *bad_wr;
1709dcb4cb85SBart Van Assche 	struct ib_sge list;
1710c996bb47SBart Van Assche 
1711c996bb47SBart Van Assche 	list.addr   = iu->dma;
1712c996bb47SBart Van Assche 	list.length = iu->size;
17139af76271SDavid Dillow 	list.lkey   = target->lkey;
1714c996bb47SBart Van Assche 
1715c996bb47SBart Van Assche 	wr.next     = NULL;
1716dcb4cb85SBart Van Assche 	wr.wr_id    = (uintptr_t) iu;
1717c996bb47SBart Van Assche 	wr.sg_list  = &list;
1718c996bb47SBart Van Assche 	wr.num_sge  = 1;
1719c996bb47SBart Van Assche 
1720509c07bcSBart Van Assche 	return ib_post_recv(ch->qp, &wr, &bad_wr);
1721c996bb47SBart Van Assche }
1722c996bb47SBart Van Assche 
1723509c07bcSBart Van Assche static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1724aef9ec39SRoland Dreier {
1725509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1726aef9ec39SRoland Dreier 	struct srp_request *req;
1727aef9ec39SRoland Dreier 	struct scsi_cmnd *scmnd;
1728aef9ec39SRoland Dreier 	unsigned long flags;
1729aef9ec39SRoland Dreier 
1730aef9ec39SRoland Dreier 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1731509c07bcSBart Van Assche 		spin_lock_irqsave(&ch->lock, flags);
1732509c07bcSBart Van Assche 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1733509c07bcSBart Van Assche 		spin_unlock_irqrestore(&ch->lock, flags);
173494a9174cSBart Van Assche 
1735509c07bcSBart Van Assche 		ch->tsk_mgmt_status = -1;
1736f8b6e31eSDavid Dillow 		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1737509c07bcSBart Van Assche 			ch->tsk_mgmt_status = rsp->data[3];
1738509c07bcSBart Van Assche 		complete(&ch->tsk_mgmt_done);
1739aef9ec39SRoland Dreier 	} else {
174077f2c1a4SBart Van Assche 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
174177f2c1a4SBart Van Assche 		if (scmnd) {
174277f2c1a4SBart Van Assche 			req = (void *)scmnd->host_scribble;
174377f2c1a4SBart Van Assche 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
174477f2c1a4SBart Van Assche 		}
174522032991SBart Van Assche 		if (!scmnd) {
17467aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
1747d92c0da7SBart Van Assche 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1748d92c0da7SBart Van Assche 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
174922032991SBart Van Assche 
1750509c07bcSBart Van Assche 			spin_lock_irqsave(&ch->lock, flags);
1751509c07bcSBart Van Assche 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1752509c07bcSBart Van Assche 			spin_unlock_irqrestore(&ch->lock, flags);
175322032991SBart Van Assche 
175422032991SBart Van Assche 			return;
175522032991SBart Van Assche 		}
1756aef9ec39SRoland Dreier 		scmnd->result = rsp->status;
1757aef9ec39SRoland Dreier 
1758aef9ec39SRoland Dreier 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1759aef9ec39SRoland Dreier 			memcpy(scmnd->sense_buffer, rsp->data +
1760aef9ec39SRoland Dreier 			       be32_to_cpu(rsp->resp_data_len),
1761aef9ec39SRoland Dreier 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1762aef9ec39SRoland Dreier 				     SCSI_SENSE_BUFFERSIZE));
1763aef9ec39SRoland Dreier 		}
1764aef9ec39SRoland Dreier 
1765e714531aSBart Van Assche 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1766bb350d1dSFUJITA Tomonori 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1767e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1768e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1769e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1770e714531aSBart Van Assche 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1771e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1772e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1773aef9ec39SRoland Dreier 
1774509c07bcSBart Van Assche 		srp_free_req(ch, req, scmnd,
177522032991SBart Van Assche 			     be32_to_cpu(rsp->req_lim_delta));
177622032991SBart Van Assche 
1777f8b6e31eSDavid Dillow 		scmnd->host_scribble = NULL;
1778aef9ec39SRoland Dreier 		scmnd->scsi_done(scmnd);
1779aef9ec39SRoland Dreier 	}
1780aef9ec39SRoland Dreier }
1781aef9ec39SRoland Dreier 
1782509c07bcSBart Van Assche static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1783bb12588aSDavid Dillow 			       void *rsp, int len)
1784bb12588aSDavid Dillow {
1785509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
178676c75b25SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1787bb12588aSDavid Dillow 	unsigned long flags;
1788bb12588aSDavid Dillow 	struct srp_iu *iu;
178976c75b25SBart Van Assche 	int err;
1790bb12588aSDavid Dillow 
1791509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1792509c07bcSBart Van Assche 	ch->req_lim += req_delta;
1793509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1794509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
179576c75b25SBart Van Assche 
1796bb12588aSDavid Dillow 	if (!iu) {
1797bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1798bb12588aSDavid Dillow 			     "no IU available to send response\n");
179976c75b25SBart Van Assche 		return 1;
1800bb12588aSDavid Dillow 	}
1801bb12588aSDavid Dillow 
1802bb12588aSDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1803bb12588aSDavid Dillow 	memcpy(iu->buf, rsp, len);
1804bb12588aSDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1805bb12588aSDavid Dillow 
1806509c07bcSBart Van Assche 	err = srp_post_send(ch, iu, len);
180776c75b25SBart Van Assche 	if (err) {
1808bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1809bb12588aSDavid Dillow 			     "unable to post response: %d\n", err);
1810509c07bcSBart Van Assche 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
181176c75b25SBart Van Assche 	}
1812bb12588aSDavid Dillow 
1813bb12588aSDavid Dillow 	return err;
1814bb12588aSDavid Dillow }
1815bb12588aSDavid Dillow 
1816509c07bcSBart Van Assche static void srp_process_cred_req(struct srp_rdma_ch *ch,
1817bb12588aSDavid Dillow 				 struct srp_cred_req *req)
1818bb12588aSDavid Dillow {
1819bb12588aSDavid Dillow 	struct srp_cred_rsp rsp = {
1820bb12588aSDavid Dillow 		.opcode = SRP_CRED_RSP,
1821bb12588aSDavid Dillow 		.tag = req->tag,
1822bb12588aSDavid Dillow 	};
1823bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1824bb12588aSDavid Dillow 
1825509c07bcSBart Van Assche 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1826509c07bcSBart Van Assche 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1827bb12588aSDavid Dillow 			     "problems processing SRP_CRED_REQ\n");
1828bb12588aSDavid Dillow }
1829bb12588aSDavid Dillow 
1830509c07bcSBart Van Assche static void srp_process_aer_req(struct srp_rdma_ch *ch,
1831bb12588aSDavid Dillow 				struct srp_aer_req *req)
1832bb12588aSDavid Dillow {
1833509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1834bb12588aSDavid Dillow 	struct srp_aer_rsp rsp = {
1835bb12588aSDavid Dillow 		.opcode = SRP_AER_RSP,
1836bb12588aSDavid Dillow 		.tag = req->tag,
1837bb12588aSDavid Dillow 	};
1838bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1839bb12588aSDavid Dillow 
1840bb12588aSDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX
1841985aa495SBart Van Assche 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1842bb12588aSDavid Dillow 
1843509c07bcSBart Van Assche 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1844bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1845bb12588aSDavid Dillow 			     "problems processing SRP_AER_REQ\n");
1846bb12588aSDavid Dillow }
1847bb12588aSDavid Dillow 
1848509c07bcSBart Van Assche static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1849aef9ec39SRoland Dreier {
1850509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1851dcb4cb85SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1852737b94ebSRoland Dreier 	struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1853c996bb47SBart Van Assche 	int res;
1854aef9ec39SRoland Dreier 	u8 opcode;
1855aef9ec39SRoland Dreier 
1856509c07bcSBart Van Assche 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
185785507bccSRalph Campbell 				   DMA_FROM_DEVICE);
1858aef9ec39SRoland Dreier 
1859aef9ec39SRoland Dreier 	opcode = *(u8 *) iu->buf;
1860aef9ec39SRoland Dreier 
1861aef9ec39SRoland Dreier 	if (0) {
18627aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
18637aa54bd7SDavid Dillow 			     PFX "recv completion, opcode 0x%02x\n", opcode);
18647a700811SBart Van Assche 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
18657a700811SBart Van Assche 			       iu->buf, wc->byte_len, true);
1866aef9ec39SRoland Dreier 	}
1867aef9ec39SRoland Dreier 
1868aef9ec39SRoland Dreier 	switch (opcode) {
1869aef9ec39SRoland Dreier 	case SRP_RSP:
1870509c07bcSBart Van Assche 		srp_process_rsp(ch, iu->buf);
1871aef9ec39SRoland Dreier 		break;
1872aef9ec39SRoland Dreier 
1873bb12588aSDavid Dillow 	case SRP_CRED_REQ:
1874509c07bcSBart Van Assche 		srp_process_cred_req(ch, iu->buf);
1875bb12588aSDavid Dillow 		break;
1876bb12588aSDavid Dillow 
1877bb12588aSDavid Dillow 	case SRP_AER_REQ:
1878509c07bcSBart Van Assche 		srp_process_aer_req(ch, iu->buf);
1879bb12588aSDavid Dillow 		break;
1880bb12588aSDavid Dillow 
1881aef9ec39SRoland Dreier 	case SRP_T_LOGOUT:
1882aef9ec39SRoland Dreier 		/* XXX Handle target logout */
18837aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
18847aa54bd7SDavid Dillow 			     PFX "Got target logout request\n");
1885aef9ec39SRoland Dreier 		break;
1886aef9ec39SRoland Dreier 
1887aef9ec39SRoland Dreier 	default:
18887aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
18897aa54bd7SDavid Dillow 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1890aef9ec39SRoland Dreier 		break;
1891aef9ec39SRoland Dreier 	}
1892aef9ec39SRoland Dreier 
1893509c07bcSBart Van Assche 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
189485507bccSRalph Campbell 				      DMA_FROM_DEVICE);
1895c996bb47SBart Van Assche 
1896509c07bcSBart Van Assche 	res = srp_post_recv(ch, iu);
1897c996bb47SBart Van Assche 	if (res != 0)
1898c996bb47SBart Van Assche 		shost_printk(KERN_ERR, target->scsi_host,
1899c996bb47SBart Van Assche 			     PFX "Recv failed with error code %d\n", res);
1900aef9ec39SRoland Dreier }
1901aef9ec39SRoland Dreier 
1902c1120f89SBart Van Assche /**
1903c1120f89SBart Van Assche  * srp_tl_err_work() - handle a transport layer error
1904af24663bSBart Van Assche  * @work: Work structure embedded in an SRP target port.
1905c1120f89SBart Van Assche  *
1906c1120f89SBart Van Assche  * Note: This function may get invoked before the rport has been created,
1907c1120f89SBart Van Assche  * hence the target->rport test.
1908c1120f89SBart Van Assche  */
1909c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work)
1910c1120f89SBart Van Assche {
1911c1120f89SBart Van Assche 	struct srp_target_port *target;
1912c1120f89SBart Van Assche 
1913c1120f89SBart Van Assche 	target = container_of(work, struct srp_target_port, tl_err_work);
1914c1120f89SBart Van Assche 	if (target->rport)
1915c1120f89SBart Van Assche 		srp_start_tl_fail_timers(target->rport);
1916c1120f89SBart Van Assche }
1917c1120f89SBart Van Assche 
19185cfb1782SBart Van Assche static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
19197dad6b2eSBart Van Assche 			      bool send_err, struct srp_rdma_ch *ch)
1920948d1e88SBart Van Assche {
19217dad6b2eSBart Van Assche 	struct srp_target_port *target = ch->target;
19227dad6b2eSBart Van Assche 
19237dad6b2eSBart Van Assche 	if (wr_id == SRP_LAST_WR_ID) {
19247dad6b2eSBart Van Assche 		complete(&ch->done);
19257dad6b2eSBart Van Assche 		return;
19267dad6b2eSBart Van Assche 	}
19277dad6b2eSBart Van Assche 
1928c014c8cdSBart Van Assche 	if (ch->connected && !target->qp_in_error) {
19295cfb1782SBart Van Assche 		if (wr_id & LOCAL_INV_WR_ID_MASK) {
19305cfb1782SBart Van Assche 			shost_printk(KERN_ERR, target->scsi_host, PFX
193157363d98SSagi Grimberg 				     "LOCAL_INV failed with status %s (%d)\n",
193257363d98SSagi Grimberg 				     ib_wc_status_msg(wc_status), wc_status);
19335cfb1782SBart Van Assche 		} else if (wr_id & FAST_REG_WR_ID_MASK) {
19345cfb1782SBart Van Assche 			shost_printk(KERN_ERR, target->scsi_host, PFX
193557363d98SSagi Grimberg 				     "FAST_REG_MR failed status %s (%d)\n",
193657363d98SSagi Grimberg 				     ib_wc_status_msg(wc_status), wc_status);
19375cfb1782SBart Van Assche 		} else {
19385cfb1782SBart Van Assche 			shost_printk(KERN_ERR, target->scsi_host,
193957363d98SSagi Grimberg 				     PFX "failed %s status %s (%d) for iu %p\n",
19405cfb1782SBart Van Assche 				     send_err ? "send" : "receive",
194157363d98SSagi Grimberg 				     ib_wc_status_msg(wc_status), wc_status,
194257363d98SSagi Grimberg 				     (void *)(uintptr_t)wr_id);
19435cfb1782SBart Van Assche 		}
1944c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
19454f0af697SBart Van Assche 	}
1946948d1e88SBart Van Assche 	target->qp_in_error = true;
1947948d1e88SBart Van Assche }
1948948d1e88SBart Van Assche 
1949509c07bcSBart Van Assche static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1950aef9ec39SRoland Dreier {
1951509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = ch_ptr;
1952aef9ec39SRoland Dreier 	struct ib_wc wc;
1953aef9ec39SRoland Dreier 
1954aef9ec39SRoland Dreier 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1955aef9ec39SRoland Dreier 	while (ib_poll_cq(cq, 1, &wc) > 0) {
1956948d1e88SBart Van Assche 		if (likely(wc.status == IB_WC_SUCCESS)) {
1957509c07bcSBart Van Assche 			srp_handle_recv(ch, &wc);
1958948d1e88SBart Van Assche 		} else {
19597dad6b2eSBart Van Assche 			srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1960aef9ec39SRoland Dreier 		}
19619c03dc9fSBart Van Assche 	}
19629c03dc9fSBart Van Assche }
19639c03dc9fSBart Van Assche 
1964509c07bcSBart Van Assche static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
19659c03dc9fSBart Van Assche {
1966509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = ch_ptr;
19679c03dc9fSBart Van Assche 	struct ib_wc wc;
1968dcb4cb85SBart Van Assche 	struct srp_iu *iu;
19699c03dc9fSBart Van Assche 
19709c03dc9fSBart Van Assche 	while (ib_poll_cq(cq, 1, &wc) > 0) {
1971948d1e88SBart Van Assche 		if (likely(wc.status == IB_WC_SUCCESS)) {
1972737b94ebSRoland Dreier 			iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1973509c07bcSBart Van Assche 			list_add(&iu->list, &ch->free_tx);
1974948d1e88SBart Van Assche 		} else {
19757dad6b2eSBart Van Assche 			srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1976948d1e88SBart Van Assche 		}
1977aef9ec39SRoland Dreier 	}
1978aef9ec39SRoland Dreier }
1979aef9ec39SRoland Dreier 
198076c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1981aef9ec39SRoland Dreier {
198276c75b25SBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
1983a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
1984509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
1985aef9ec39SRoland Dreier 	struct srp_request *req;
1986aef9ec39SRoland Dreier 	struct srp_iu *iu;
1987aef9ec39SRoland Dreier 	struct srp_cmd *cmd;
198885507bccSRalph Campbell 	struct ib_device *dev;
198976c75b25SBart Van Assche 	unsigned long flags;
199077f2c1a4SBart Van Assche 	u32 tag;
199177f2c1a4SBart Van Assche 	u16 idx;
1992d1b4289eSBart Van Assche 	int len, ret;
1993a95cadb9SBart Van Assche 	const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1994a95cadb9SBart Van Assche 
1995a95cadb9SBart Van Assche 	/*
1996a95cadb9SBart Van Assche 	 * The SCSI EH thread is the only context from which srp_queuecommand()
1997a95cadb9SBart Van Assche 	 * can get invoked for blocked devices (SDEV_BLOCK /
1998a95cadb9SBart Van Assche 	 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1999a95cadb9SBart Van Assche 	 * locking the rport mutex if invoked from inside the SCSI EH.
2000a95cadb9SBart Van Assche 	 */
2001a95cadb9SBart Van Assche 	if (in_scsi_eh)
2002a95cadb9SBart Van Assche 		mutex_lock(&rport->mutex);
2003aef9ec39SRoland Dreier 
2004d1b4289eSBart Van Assche 	scmnd->result = srp_chkready(target->rport);
2005d1b4289eSBart Van Assche 	if (unlikely(scmnd->result))
2006d1b4289eSBart Van Assche 		goto err;
20072ce19e72SBart Van Assche 
200877f2c1a4SBart Van Assche 	WARN_ON_ONCE(scmnd->request->tag < 0);
200977f2c1a4SBart Van Assche 	tag = blk_mq_unique_tag(scmnd->request);
2010d92c0da7SBart Van Assche 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
201177f2c1a4SBart Van Assche 	idx = blk_mq_unique_tag_to_tag(tag);
201277f2c1a4SBart Van Assche 	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
201377f2c1a4SBart Van Assche 		  dev_name(&shost->shost_gendev), tag, idx,
201477f2c1a4SBart Van Assche 		  target->req_ring_size);
2015509c07bcSBart Van Assche 
2016509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
2017509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2018509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
2019aef9ec39SRoland Dreier 
202077f2c1a4SBart Van Assche 	if (!iu)
202177f2c1a4SBart Van Assche 		goto err;
202277f2c1a4SBart Van Assche 
202377f2c1a4SBart Van Assche 	req = &ch->req_ring[idx];
202405321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev->dev;
202549248644SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
202685507bccSRalph Campbell 				   DMA_TO_DEVICE);
2027aef9ec39SRoland Dreier 
2028f8b6e31eSDavid Dillow 	scmnd->host_scribble = (void *) req;
2029aef9ec39SRoland Dreier 
2030aef9ec39SRoland Dreier 	cmd = iu->buf;
2031aef9ec39SRoland Dreier 	memset(cmd, 0, sizeof *cmd);
2032aef9ec39SRoland Dreier 
2033aef9ec39SRoland Dreier 	cmd->opcode = SRP_CMD;
2034985aa495SBart Van Assche 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
203577f2c1a4SBart Van Assche 	cmd->tag    = tag;
2036aef9ec39SRoland Dreier 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2037aef9ec39SRoland Dreier 
2038aef9ec39SRoland Dreier 	req->scmnd    = scmnd;
2039aef9ec39SRoland Dreier 	req->cmd      = iu;
2040aef9ec39SRoland Dreier 
2041509c07bcSBart Van Assche 	len = srp_map_data(scmnd, ch, req);
2042aef9ec39SRoland Dreier 	if (len < 0) {
20437aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
2044d1b4289eSBart Van Assche 			     PFX "Failed to map data (%d)\n", len);
2045d1b4289eSBart Van Assche 		/*
2046d1b4289eSBart Van Assche 		 * If we ran out of memory descriptors (-ENOMEM) because an
2047d1b4289eSBart Van Assche 		 * application is queuing many requests with more than
204852ede08fSBart Van Assche 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2049d1b4289eSBart Van Assche 		 * to reduce queue depth temporarily.
2050d1b4289eSBart Van Assche 		 */
2051d1b4289eSBart Van Assche 		scmnd->result = len == -ENOMEM ?
2052d1b4289eSBart Van Assche 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
205376c75b25SBart Van Assche 		goto err_iu;
2054aef9ec39SRoland Dreier 	}
2055aef9ec39SRoland Dreier 
205649248644SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
205785507bccSRalph Campbell 				      DMA_TO_DEVICE);
2058aef9ec39SRoland Dreier 
2059509c07bcSBart Van Assche 	if (srp_post_send(ch, iu, len)) {
20607aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2061aef9ec39SRoland Dreier 		goto err_unmap;
2062aef9ec39SRoland Dreier 	}
2063aef9ec39SRoland Dreier 
2064d1b4289eSBart Van Assche 	ret = 0;
2065d1b4289eSBart Van Assche 
2066a95cadb9SBart Van Assche unlock_rport:
2067a95cadb9SBart Van Assche 	if (in_scsi_eh)
2068a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2069a95cadb9SBart Van Assche 
2070d1b4289eSBart Van Assche 	return ret;
2071aef9ec39SRoland Dreier 
2072aef9ec39SRoland Dreier err_unmap:
2073509c07bcSBart Van Assche 	srp_unmap_data(scmnd, ch, req);
2074aef9ec39SRoland Dreier 
207576c75b25SBart Van Assche err_iu:
2076509c07bcSBart Van Assche 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
207776c75b25SBart Van Assche 
2078024ca901SBart Van Assche 	/*
2079024ca901SBart Van Assche 	 * Avoid that the loops that iterate over the request ring can
2080024ca901SBart Van Assche 	 * encounter a dangling SCSI command pointer.
2081024ca901SBart Van Assche 	 */
2082024ca901SBart Van Assche 	req->scmnd = NULL;
2083024ca901SBart Van Assche 
2084d1b4289eSBart Van Assche err:
2085d1b4289eSBart Van Assche 	if (scmnd->result) {
2086d1b4289eSBart Van Assche 		scmnd->scsi_done(scmnd);
2087d1b4289eSBart Van Assche 		ret = 0;
2088d1b4289eSBart Van Assche 	} else {
2089d1b4289eSBart Van Assche 		ret = SCSI_MLQUEUE_HOST_BUSY;
2090d1b4289eSBart Van Assche 	}
2091a95cadb9SBart Van Assche 
2092d1b4289eSBart Van Assche 	goto unlock_rport;
2093aef9ec39SRoland Dreier }
2094aef9ec39SRoland Dreier 
20954d73f95fSBart Van Assche /*
20964d73f95fSBart Van Assche  * Note: the resources allocated in this function are freed in
2097509c07bcSBart Van Assche  * srp_free_ch_ib().
20984d73f95fSBart Van Assche  */
2099509c07bcSBart Van Assche static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2100aef9ec39SRoland Dreier {
2101509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2102aef9ec39SRoland Dreier 	int i;
2103aef9ec39SRoland Dreier 
2104509c07bcSBart Van Assche 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
21054d73f95fSBart Van Assche 			      GFP_KERNEL);
2106509c07bcSBart Van Assche 	if (!ch->rx_ring)
21074d73f95fSBart Van Assche 		goto err_no_ring;
2108509c07bcSBart Van Assche 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
21094d73f95fSBart Van Assche 			      GFP_KERNEL);
2110509c07bcSBart Van Assche 	if (!ch->tx_ring)
21114d73f95fSBart Van Assche 		goto err_no_ring;
21124d73f95fSBart Van Assche 
21134d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2114509c07bcSBart Van Assche 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2115509c07bcSBart Van Assche 					      ch->max_ti_iu_len,
2116aef9ec39SRoland Dreier 					      GFP_KERNEL, DMA_FROM_DEVICE);
2117509c07bcSBart Van Assche 		if (!ch->rx_ring[i])
2118aef9ec39SRoland Dreier 			goto err;
2119aef9ec39SRoland Dreier 	}
2120aef9ec39SRoland Dreier 
21214d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2122509c07bcSBart Van Assche 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
212349248644SDavid Dillow 					      target->max_iu_len,
2124aef9ec39SRoland Dreier 					      GFP_KERNEL, DMA_TO_DEVICE);
2125509c07bcSBart Van Assche 		if (!ch->tx_ring[i])
2126aef9ec39SRoland Dreier 			goto err;
2127dcb4cb85SBart Van Assche 
2128509c07bcSBart Van Assche 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2129aef9ec39SRoland Dreier 	}
2130aef9ec39SRoland Dreier 
2131aef9ec39SRoland Dreier 	return 0;
2132aef9ec39SRoland Dreier 
2133aef9ec39SRoland Dreier err:
21344d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2135509c07bcSBart Van Assche 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2136509c07bcSBart Van Assche 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2137aef9ec39SRoland Dreier 	}
2138aef9ec39SRoland Dreier 
21394d73f95fSBart Van Assche 
21404d73f95fSBart Van Assche err_no_ring:
2141509c07bcSBart Van Assche 	kfree(ch->tx_ring);
2142509c07bcSBart Van Assche 	ch->tx_ring = NULL;
2143509c07bcSBart Van Assche 	kfree(ch->rx_ring);
2144509c07bcSBart Van Assche 	ch->rx_ring = NULL;
2145aef9ec39SRoland Dreier 
2146aef9ec39SRoland Dreier 	return -ENOMEM;
2147aef9ec39SRoland Dreier }
2148aef9ec39SRoland Dreier 
2149c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2150c9b03c1aSBart Van Assche {
2151c9b03c1aSBart Van Assche 	uint64_t T_tr_ns, max_compl_time_ms;
2152c9b03c1aSBart Van Assche 	uint32_t rq_tmo_jiffies;
2153c9b03c1aSBart Van Assche 
2154c9b03c1aSBart Van Assche 	/*
2155c9b03c1aSBart Van Assche 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2156c9b03c1aSBart Van Assche 	 * table 91), both the QP timeout and the retry count have to be set
2157c9b03c1aSBart Van Assche 	 * for RC QP's during the RTR to RTS transition.
2158c9b03c1aSBart Van Assche 	 */
2159c9b03c1aSBart Van Assche 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2160c9b03c1aSBart Van Assche 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2161c9b03c1aSBart Van Assche 
2162c9b03c1aSBart Van Assche 	/*
2163c9b03c1aSBart Van Assche 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2164c9b03c1aSBart Van Assche 	 * it can take before an error completion is generated. See also
2165c9b03c1aSBart Van Assche 	 * C9-140..142 in the IBTA spec for more information about how to
2166c9b03c1aSBart Van Assche 	 * convert the QP Local ACK Timeout value to nanoseconds.
2167c9b03c1aSBart Van Assche 	 */
2168c9b03c1aSBart Van Assche 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2169c9b03c1aSBart Van Assche 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2170c9b03c1aSBart Van Assche 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2171c9b03c1aSBart Van Assche 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2172c9b03c1aSBart Van Assche 
2173c9b03c1aSBart Van Assche 	return rq_tmo_jiffies;
2174c9b03c1aSBart Van Assche }
2175c9b03c1aSBart Van Assche 
2176961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2177961e0be8SDavid Dillow 			       struct srp_login_rsp *lrsp,
2178509c07bcSBart Van Assche 			       struct srp_rdma_ch *ch)
2179961e0be8SDavid Dillow {
2180509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2181961e0be8SDavid Dillow 	struct ib_qp_attr *qp_attr = NULL;
2182961e0be8SDavid Dillow 	int attr_mask = 0;
2183961e0be8SDavid Dillow 	int ret;
2184961e0be8SDavid Dillow 	int i;
2185961e0be8SDavid Dillow 
2186961e0be8SDavid Dillow 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2187509c07bcSBart Van Assche 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2188509c07bcSBart Van Assche 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2189961e0be8SDavid Dillow 
2190961e0be8SDavid Dillow 		/*
2191961e0be8SDavid Dillow 		 * Reserve credits for task management so we don't
2192961e0be8SDavid Dillow 		 * bounce requests back to the SCSI mid-layer.
2193961e0be8SDavid Dillow 		 */
2194961e0be8SDavid Dillow 		target->scsi_host->can_queue
2195509c07bcSBart Van Assche 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2196961e0be8SDavid Dillow 			      target->scsi_host->can_queue);
21974d73f95fSBart Van Assche 		target->scsi_host->cmd_per_lun
21984d73f95fSBart Van Assche 			= min_t(int, target->scsi_host->can_queue,
21994d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun);
2200961e0be8SDavid Dillow 	} else {
2201961e0be8SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
2202961e0be8SDavid Dillow 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2203961e0be8SDavid Dillow 		ret = -ECONNRESET;
2204961e0be8SDavid Dillow 		goto error;
2205961e0be8SDavid Dillow 	}
2206961e0be8SDavid Dillow 
2207509c07bcSBart Van Assche 	if (!ch->rx_ring) {
2208509c07bcSBart Van Assche 		ret = srp_alloc_iu_bufs(ch);
2209961e0be8SDavid Dillow 		if (ret)
2210961e0be8SDavid Dillow 			goto error;
2211961e0be8SDavid Dillow 	}
2212961e0be8SDavid Dillow 
2213961e0be8SDavid Dillow 	ret = -ENOMEM;
2214961e0be8SDavid Dillow 	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2215961e0be8SDavid Dillow 	if (!qp_attr)
2216961e0be8SDavid Dillow 		goto error;
2217961e0be8SDavid Dillow 
2218961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTR;
2219961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2220961e0be8SDavid Dillow 	if (ret)
2221961e0be8SDavid Dillow 		goto error_free;
2222961e0be8SDavid Dillow 
2223509c07bcSBart Van Assche 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2224961e0be8SDavid Dillow 	if (ret)
2225961e0be8SDavid Dillow 		goto error_free;
2226961e0be8SDavid Dillow 
22274d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; i++) {
2228509c07bcSBart Van Assche 		struct srp_iu *iu = ch->rx_ring[i];
2229509c07bcSBart Van Assche 
2230509c07bcSBart Van Assche 		ret = srp_post_recv(ch, iu);
2231961e0be8SDavid Dillow 		if (ret)
2232961e0be8SDavid Dillow 			goto error_free;
2233961e0be8SDavid Dillow 	}
2234961e0be8SDavid Dillow 
2235961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTS;
2236961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2237961e0be8SDavid Dillow 	if (ret)
2238961e0be8SDavid Dillow 		goto error_free;
2239961e0be8SDavid Dillow 
2240c9b03c1aSBart Van Assche 	target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2241c9b03c1aSBart Van Assche 
2242509c07bcSBart Van Assche 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2243961e0be8SDavid Dillow 	if (ret)
2244961e0be8SDavid Dillow 		goto error_free;
2245961e0be8SDavid Dillow 
2246961e0be8SDavid Dillow 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
2247961e0be8SDavid Dillow 
2248961e0be8SDavid Dillow error_free:
2249961e0be8SDavid Dillow 	kfree(qp_attr);
2250961e0be8SDavid Dillow 
2251961e0be8SDavid Dillow error:
2252509c07bcSBart Van Assche 	ch->status = ret;
2253961e0be8SDavid Dillow }
2254961e0be8SDavid Dillow 
2255aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2256aef9ec39SRoland Dreier 			       struct ib_cm_event *event,
2257509c07bcSBart Van Assche 			       struct srp_rdma_ch *ch)
2258aef9ec39SRoland Dreier {
2259509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
22607aa54bd7SDavid Dillow 	struct Scsi_Host *shost = target->scsi_host;
2261aef9ec39SRoland Dreier 	struct ib_class_port_info *cpi;
2262aef9ec39SRoland Dreier 	int opcode;
2263aef9ec39SRoland Dreier 
2264aef9ec39SRoland Dreier 	switch (event->param.rej_rcvd.reason) {
2265aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_CM_REDIRECT:
2266aef9ec39SRoland Dreier 		cpi = event->param.rej_rcvd.ari;
2267509c07bcSBart Van Assche 		ch->path.dlid = cpi->redirect_lid;
2268509c07bcSBart Van Assche 		ch->path.pkey = cpi->redirect_pkey;
2269aef9ec39SRoland Dreier 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2270509c07bcSBart Van Assche 		memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2271aef9ec39SRoland Dreier 
2272509c07bcSBart Van Assche 		ch->status = ch->path.dlid ?
2273aef9ec39SRoland Dreier 			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2274aef9ec39SRoland Dreier 		break;
2275aef9ec39SRoland Dreier 
2276aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_REDIRECT:
22775d7cbfd6SRoland Dreier 		if (srp_target_is_topspin(target)) {
2278aef9ec39SRoland Dreier 			/*
2279aef9ec39SRoland Dreier 			 * Topspin/Cisco SRP gateways incorrectly send
2280aef9ec39SRoland Dreier 			 * reject reason code 25 when they mean 24
2281aef9ec39SRoland Dreier 			 * (port redirect).
2282aef9ec39SRoland Dreier 			 */
2283509c07bcSBart Van Assche 			memcpy(ch->path.dgid.raw,
2284aef9ec39SRoland Dreier 			       event->param.rej_rcvd.ari, 16);
2285aef9ec39SRoland Dreier 
22867aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, shost,
22877aa54bd7SDavid Dillow 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2288509c07bcSBart Van Assche 				     be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2289509c07bcSBart Van Assche 				     be64_to_cpu(ch->path.dgid.global.interface_id));
2290aef9ec39SRoland Dreier 
2291509c07bcSBart Van Assche 			ch->status = SRP_PORT_REDIRECT;
2292aef9ec39SRoland Dreier 		} else {
22937aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
22947aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2295509c07bcSBart Van Assche 			ch->status = -ECONNRESET;
2296aef9ec39SRoland Dreier 		}
2297aef9ec39SRoland Dreier 		break;
2298aef9ec39SRoland Dreier 
2299aef9ec39SRoland Dreier 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
23007aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost,
23017aa54bd7SDavid Dillow 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2302509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2303aef9ec39SRoland Dreier 		break;
2304aef9ec39SRoland Dreier 
2305aef9ec39SRoland Dreier 	case IB_CM_REJ_CONSUMER_DEFINED:
2306aef9ec39SRoland Dreier 		opcode = *(u8 *) event->private_data;
2307aef9ec39SRoland Dreier 		if (opcode == SRP_LOGIN_REJ) {
2308aef9ec39SRoland Dreier 			struct srp_login_rej *rej = event->private_data;
2309aef9ec39SRoland Dreier 			u32 reason = be32_to_cpu(rej->reason);
2310aef9ec39SRoland Dreier 
2311aef9ec39SRoland Dreier 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
23127aa54bd7SDavid Dillow 				shost_printk(KERN_WARNING, shost,
23137aa54bd7SDavid Dillow 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2314aef9ec39SRoland Dreier 			else
2315e7ffde01SBart Van Assche 				shost_printk(KERN_WARNING, shost, PFX
2316e7ffde01SBart Van Assche 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2317747fe000SBart Van Assche 					     target->sgid.raw,
2318747fe000SBart Van Assche 					     target->orig_dgid.raw, reason);
2319aef9ec39SRoland Dreier 		} else
23207aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
23217aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2322aef9ec39SRoland Dreier 				     " opcode 0x%02x\n", opcode);
2323509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2324aef9ec39SRoland Dreier 		break;
2325aef9ec39SRoland Dreier 
23269fe4bcf4SDavid Dillow 	case IB_CM_REJ_STALE_CONN:
23279fe4bcf4SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2328509c07bcSBart Van Assche 		ch->status = SRP_STALE_CONN;
23299fe4bcf4SDavid Dillow 		break;
23309fe4bcf4SDavid Dillow 
2331aef9ec39SRoland Dreier 	default:
23327aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2333aef9ec39SRoland Dreier 			     event->param.rej_rcvd.reason);
2334509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2335aef9ec39SRoland Dreier 	}
2336aef9ec39SRoland Dreier }
2337aef9ec39SRoland Dreier 
2338aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2339aef9ec39SRoland Dreier {
2340509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = cm_id->context;
2341509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2342aef9ec39SRoland Dreier 	int comp = 0;
2343aef9ec39SRoland Dreier 
2344aef9ec39SRoland Dreier 	switch (event->event) {
2345aef9ec39SRoland Dreier 	case IB_CM_REQ_ERROR:
23467aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
23477aa54bd7SDavid Dillow 			     PFX "Sending CM REQ failed\n");
2348aef9ec39SRoland Dreier 		comp = 1;
2349509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2350aef9ec39SRoland Dreier 		break;
2351aef9ec39SRoland Dreier 
2352aef9ec39SRoland Dreier 	case IB_CM_REP_RECEIVED:
2353aef9ec39SRoland Dreier 		comp = 1;
2354509c07bcSBart Van Assche 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2355aef9ec39SRoland Dreier 		break;
2356aef9ec39SRoland Dreier 
2357aef9ec39SRoland Dreier 	case IB_CM_REJ_RECEIVED:
23587aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2359aef9ec39SRoland Dreier 		comp = 1;
2360aef9ec39SRoland Dreier 
2361509c07bcSBart Van Assche 		srp_cm_rej_handler(cm_id, event, ch);
2362aef9ec39SRoland Dreier 		break;
2363aef9ec39SRoland Dreier 
2364b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_RECEIVED:
23657aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
23667aa54bd7SDavid Dillow 			     PFX "DREQ received - connection closed\n");
2367c014c8cdSBart Van Assche 		ch->connected = false;
2368b7ac4ab4SIshai Rabinovitz 		if (ib_send_cm_drep(cm_id, NULL, 0))
23697aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
23707aa54bd7SDavid Dillow 				     PFX "Sending CM DREP failed\n");
2371c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
2372aef9ec39SRoland Dreier 		break;
2373aef9ec39SRoland Dreier 
2374aef9ec39SRoland Dreier 	case IB_CM_TIMEWAIT_EXIT:
23757aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
23767aa54bd7SDavid Dillow 			     PFX "connection closed\n");
2377ac72d766SBart Van Assche 		comp = 1;
2378aef9ec39SRoland Dreier 
2379509c07bcSBart Van Assche 		ch->status = 0;
2380aef9ec39SRoland Dreier 		break;
2381aef9ec39SRoland Dreier 
2382b7ac4ab4SIshai Rabinovitz 	case IB_CM_MRA_RECEIVED:
2383b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_ERROR:
2384b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREP_RECEIVED:
2385b7ac4ab4SIshai Rabinovitz 		break;
2386b7ac4ab4SIshai Rabinovitz 
2387aef9ec39SRoland Dreier 	default:
23887aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
23897aa54bd7SDavid Dillow 			     PFX "Unhandled CM event %d\n", event->event);
2390aef9ec39SRoland Dreier 		break;
2391aef9ec39SRoland Dreier 	}
2392aef9ec39SRoland Dreier 
2393aef9ec39SRoland Dreier 	if (comp)
2394509c07bcSBart Van Assche 		complete(&ch->done);
2395aef9ec39SRoland Dreier 
2396aef9ec39SRoland Dreier 	return 0;
2397aef9ec39SRoland Dreier }
2398aef9ec39SRoland Dreier 
239971444b97SJack Wang /**
240071444b97SJack Wang  * srp_change_queue_depth - setting device queue depth
240171444b97SJack Wang  * @sdev: scsi device struct
240271444b97SJack Wang  * @qdepth: requested queue depth
240371444b97SJack Wang  *
240471444b97SJack Wang  * Returns queue depth.
240571444b97SJack Wang  */
240671444b97SJack Wang static int
2407db5ed4dfSChristoph Hellwig srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
240871444b97SJack Wang {
240971444b97SJack Wang 	if (!sdev->tagged_supported)
24101e6f2416SChristoph Hellwig 		qdepth = 1;
2411db5ed4dfSChristoph Hellwig 	return scsi_change_queue_depth(sdev, qdepth);
241271444b97SJack Wang }
241371444b97SJack Wang 
2414985aa495SBart Van Assche static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2415985aa495SBart Van Assche 			     u8 func)
2416aef9ec39SRoland Dreier {
2417509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2418a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
241919081f31SDavid Dillow 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2420aef9ec39SRoland Dreier 	struct srp_iu *iu;
2421aef9ec39SRoland Dreier 	struct srp_tsk_mgmt *tsk_mgmt;
2422aef9ec39SRoland Dreier 
2423c014c8cdSBart Van Assche 	if (!ch->connected || target->qp_in_error)
24243780d1f0SBart Van Assche 		return -1;
24253780d1f0SBart Van Assche 
2426509c07bcSBart Van Assche 	init_completion(&ch->tsk_mgmt_done);
2427aef9ec39SRoland Dreier 
2428a95cadb9SBart Van Assche 	/*
2429509c07bcSBart Van Assche 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2430a95cadb9SBart Van Assche 	 * invoked while a task management function is being sent.
2431a95cadb9SBart Van Assche 	 */
2432a95cadb9SBart Van Assche 	mutex_lock(&rport->mutex);
2433509c07bcSBart Van Assche 	spin_lock_irq(&ch->lock);
2434509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2435509c07bcSBart Van Assche 	spin_unlock_irq(&ch->lock);
243676c75b25SBart Van Assche 
2437a95cadb9SBart Van Assche 	if (!iu) {
2438a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2439a95cadb9SBart Van Assche 
244076c75b25SBart Van Assche 		return -1;
2441a95cadb9SBart Van Assche 	}
2442aef9ec39SRoland Dreier 
244319081f31SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
244419081f31SDavid Dillow 				   DMA_TO_DEVICE);
2445aef9ec39SRoland Dreier 	tsk_mgmt = iu->buf;
2446aef9ec39SRoland Dreier 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2447aef9ec39SRoland Dreier 
2448aef9ec39SRoland Dreier 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2449985aa495SBart Van Assche 	int_to_scsilun(lun, &tsk_mgmt->lun);
2450f8b6e31eSDavid Dillow 	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
2451aef9ec39SRoland Dreier 	tsk_mgmt->tsk_mgmt_func = func;
2452f8b6e31eSDavid Dillow 	tsk_mgmt->task_tag	= req_tag;
2453aef9ec39SRoland Dreier 
245419081f31SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
245519081f31SDavid Dillow 				      DMA_TO_DEVICE);
2456509c07bcSBart Van Assche 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2457509c07bcSBart Van Assche 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2458a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2459a95cadb9SBart Van Assche 
246076c75b25SBart Van Assche 		return -1;
246176c75b25SBart Van Assche 	}
2462a95cadb9SBart Van Assche 	mutex_unlock(&rport->mutex);
2463d945e1dfSRoland Dreier 
2464509c07bcSBart Van Assche 	if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2465aef9ec39SRoland Dreier 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2466d945e1dfSRoland Dreier 		return -1;
2467aef9ec39SRoland Dreier 
2468d945e1dfSRoland Dreier 	return 0;
2469d945e1dfSRoland Dreier }
2470d945e1dfSRoland Dreier 
2471aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd)
2472aef9ec39SRoland Dreier {
2473d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2474f8b6e31eSDavid Dillow 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
247577f2c1a4SBart Van Assche 	u32 tag;
2476d92c0da7SBart Van Assche 	u16 ch_idx;
2477509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
2478086f44f5SBart Van Assche 	int ret;
2479d945e1dfSRoland Dreier 
24807aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2481aef9ec39SRoland Dreier 
2482d92c0da7SBart Van Assche 	if (!req)
248399b6697aSBart Van Assche 		return SUCCESS;
248477f2c1a4SBart Van Assche 	tag = blk_mq_unique_tag(scmnd->request);
2485d92c0da7SBart Van Assche 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2486d92c0da7SBart Van Assche 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2487d92c0da7SBart Van Assche 		return SUCCESS;
2488d92c0da7SBart Van Assche 	ch = &target->ch[ch_idx];
2489d92c0da7SBart Van Assche 	if (!srp_claim_req(ch, req, NULL, scmnd))
2490d92c0da7SBart Van Assche 		return SUCCESS;
2491d92c0da7SBart Van Assche 	shost_printk(KERN_ERR, target->scsi_host,
2492d92c0da7SBart Van Assche 		     "Sending SRP abort for tag %#x\n", tag);
249377f2c1a4SBart Van Assche 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
249480d5e8a2SBart Van Assche 			      SRP_TSK_ABORT_TASK) == 0)
2495086f44f5SBart Van Assche 		ret = SUCCESS;
2496ed9b2264SBart Van Assche 	else if (target->rport->state == SRP_RPORT_LOST)
249799e1c139SBart Van Assche 		ret = FAST_IO_FAIL;
2498086f44f5SBart Van Assche 	else
2499086f44f5SBart Van Assche 		ret = FAILED;
2500509c07bcSBart Van Assche 	srp_free_req(ch, req, scmnd, 0);
2501d945e1dfSRoland Dreier 	scmnd->result = DID_ABORT << 16;
2502d8536670SBart Van Assche 	scmnd->scsi_done(scmnd);
2503d945e1dfSRoland Dreier 
2504086f44f5SBart Van Assche 	return ret;
2505aef9ec39SRoland Dreier }
2506aef9ec39SRoland Dreier 
2507aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd)
2508aef9ec39SRoland Dreier {
2509d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2510d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
2511536ae14eSBart Van Assche 	int i;
2512d945e1dfSRoland Dreier 
25137aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2514aef9ec39SRoland Dreier 
2515d92c0da7SBart Van Assche 	ch = &target->ch[0];
2516509c07bcSBart Van Assche 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2517f8b6e31eSDavid Dillow 			      SRP_TSK_LUN_RESET))
2518d945e1dfSRoland Dreier 		return FAILED;
2519509c07bcSBart Van Assche 	if (ch->tsk_mgmt_status)
2520d945e1dfSRoland Dreier 		return FAILED;
2521d945e1dfSRoland Dreier 
2522d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
2523d92c0da7SBart Van Assche 		ch = &target->ch[i];
25244d73f95fSBart Van Assche 		for (i = 0; i < target->req_ring_size; ++i) {
2525509c07bcSBart Van Assche 			struct srp_request *req = &ch->req_ring[i];
2526509c07bcSBart Van Assche 
2527509c07bcSBart Van Assche 			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2528536ae14eSBart Van Assche 		}
2529d92c0da7SBart Van Assche 	}
2530d945e1dfSRoland Dreier 
2531d945e1dfSRoland Dreier 	return SUCCESS;
2532aef9ec39SRoland Dreier }
2533aef9ec39SRoland Dreier 
2534aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd)
2535aef9ec39SRoland Dreier {
2536aef9ec39SRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2537aef9ec39SRoland Dreier 
25387aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2539aef9ec39SRoland Dreier 
2540ed9b2264SBart Van Assche 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2541aef9ec39SRoland Dreier }
2542aef9ec39SRoland Dreier 
2543c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev)
2544c9b03c1aSBart Van Assche {
2545c9b03c1aSBart Van Assche 	struct Scsi_Host *shost = sdev->host;
2546c9b03c1aSBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2547c9b03c1aSBart Van Assche 	struct request_queue *q = sdev->request_queue;
2548c9b03c1aSBart Van Assche 	unsigned long timeout;
2549c9b03c1aSBart Van Assche 
2550c9b03c1aSBart Van Assche 	if (sdev->type == TYPE_DISK) {
2551c9b03c1aSBart Van Assche 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2552c9b03c1aSBart Van Assche 		blk_queue_rq_timeout(q, timeout);
2553c9b03c1aSBart Van Assche 	}
2554c9b03c1aSBart Van Assche 
2555c9b03c1aSBart Van Assche 	return 0;
2556c9b03c1aSBart Van Assche }
2557c9b03c1aSBart Van Assche 
2558ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2559ee959b00STony Jones 			   char *buf)
25606ecb0c84SRoland Dreier {
2561ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
25626ecb0c84SRoland Dreier 
256345c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
25646ecb0c84SRoland Dreier }
25656ecb0c84SRoland Dreier 
2566ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2567ee959b00STony Jones 			     char *buf)
25686ecb0c84SRoland Dreier {
2569ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
25706ecb0c84SRoland Dreier 
257145c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
25726ecb0c84SRoland Dreier }
25736ecb0c84SRoland Dreier 
2574ee959b00STony Jones static ssize_t show_service_id(struct device *dev,
2575ee959b00STony Jones 			       struct device_attribute *attr, char *buf)
25766ecb0c84SRoland Dreier {
2577ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
25786ecb0c84SRoland Dreier 
257945c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
25806ecb0c84SRoland Dreier }
25816ecb0c84SRoland Dreier 
2582ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2583ee959b00STony Jones 			 char *buf)
25846ecb0c84SRoland Dreier {
2585ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
25866ecb0c84SRoland Dreier 
2587747fe000SBart Van Assche 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
25886ecb0c84SRoland Dreier }
25896ecb0c84SRoland Dreier 
2590848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2591848b3082SBart Van Assche 			 char *buf)
2592848b3082SBart Van Assche {
2593848b3082SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2594848b3082SBart Van Assche 
2595747fe000SBart Van Assche 	return sprintf(buf, "%pI6\n", target->sgid.raw);
2596848b3082SBart Van Assche }
2597848b3082SBart Van Assche 
2598ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2599ee959b00STony Jones 			 char *buf)
26006ecb0c84SRoland Dreier {
2601ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2602d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch = &target->ch[0];
26036ecb0c84SRoland Dreier 
2604509c07bcSBart Van Assche 	return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
26056ecb0c84SRoland Dreier }
26066ecb0c84SRoland Dreier 
2607ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev,
2608ee959b00STony Jones 			      struct device_attribute *attr, char *buf)
26093633b3d0SIshai Rabinovitz {
2610ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26113633b3d0SIshai Rabinovitz 
2612747fe000SBart Van Assche 	return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
26133633b3d0SIshai Rabinovitz }
26143633b3d0SIshai Rabinovitz 
261589de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev,
261689de7486SBart Van Assche 			    struct device_attribute *attr, char *buf)
261789de7486SBart Van Assche {
261889de7486SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2619d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
2620d92c0da7SBart Van Assche 	int i, req_lim = INT_MAX;
262189de7486SBart Van Assche 
2622d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
2623d92c0da7SBart Van Assche 		ch = &target->ch[i];
2624d92c0da7SBart Van Assche 		req_lim = min(req_lim, ch->req_lim);
2625d92c0da7SBart Van Assche 	}
2626d92c0da7SBart Van Assche 	return sprintf(buf, "%d\n", req_lim);
262789de7486SBart Van Assche }
262889de7486SBart Van Assche 
2629ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev,
2630ee959b00STony Jones 				 struct device_attribute *attr, char *buf)
26316bfa24faSRoland Dreier {
2632ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26336bfa24faSRoland Dreier 
26346bfa24faSRoland Dreier 	return sprintf(buf, "%d\n", target->zero_req_lim);
26356bfa24faSRoland Dreier }
26366bfa24faSRoland Dreier 
2637ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev,
2638ee959b00STony Jones 				  struct device_attribute *attr, char *buf)
2639ded7f1a1SIshai Rabinovitz {
2640ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2641ded7f1a1SIshai Rabinovitz 
2642ded7f1a1SIshai Rabinovitz 	return sprintf(buf, "%d\n", target->srp_host->port);
2643ded7f1a1SIshai Rabinovitz }
2644ded7f1a1SIshai Rabinovitz 
2645ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev,
2646ee959b00STony Jones 				    struct device_attribute *attr, char *buf)
2647ded7f1a1SIshai Rabinovitz {
2648ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2649ded7f1a1SIshai Rabinovitz 
265005321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2651ded7f1a1SIshai Rabinovitz }
2652ded7f1a1SIshai Rabinovitz 
2653d92c0da7SBart Van Assche static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2654d92c0da7SBart Van Assche 			     char *buf)
2655d92c0da7SBart Van Assche {
2656d92c0da7SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2657d92c0da7SBart Van Assche 
2658d92c0da7SBart Van Assche 	return sprintf(buf, "%d\n", target->ch_count);
2659d92c0da7SBart Van Assche }
2660d92c0da7SBart Van Assche 
26614b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev,
26624b5e5f41SBart Van Assche 				struct device_attribute *attr, char *buf)
26634b5e5f41SBart Van Assche {
26644b5e5f41SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26654b5e5f41SBart Van Assche 
26664b5e5f41SBart Van Assche 	return sprintf(buf, "%d\n", target->comp_vector);
26674b5e5f41SBart Van Assche }
26684b5e5f41SBart Van Assche 
26697bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev,
26707bb312e4SVu Pham 				   struct device_attribute *attr, char *buf)
26717bb312e4SVu Pham {
26727bb312e4SVu Pham 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26737bb312e4SVu Pham 
26747bb312e4SVu Pham 	return sprintf(buf, "%d\n", target->tl_retry_count);
26757bb312e4SVu Pham }
26767bb312e4SVu Pham 
267749248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev,
267849248644SDavid Dillow 				   struct device_attribute *attr, char *buf)
267949248644SDavid Dillow {
268049248644SDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
268149248644SDavid Dillow 
268249248644SDavid Dillow 	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
268349248644SDavid Dillow }
268449248644SDavid Dillow 
2685c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev,
2686c07d424dSDavid Dillow 				 struct device_attribute *attr, char *buf)
2687c07d424dSDavid Dillow {
2688c07d424dSDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2689c07d424dSDavid Dillow 
2690c07d424dSDavid Dillow 	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2691c07d424dSDavid Dillow }
2692c07d424dSDavid Dillow 
2693ee959b00STony Jones static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
2694ee959b00STony Jones static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
2695ee959b00STony Jones static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
2696ee959b00STony Jones static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
2697848b3082SBart Van Assche static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
2698ee959b00STony Jones static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
2699ee959b00STony Jones static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
270089de7486SBart Van Assche static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2701ee959b00STony Jones static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
2702ee959b00STony Jones static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2703ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2704d92c0da7SBart Van Assche static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
27054b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
27067bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
270749248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2708c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
27096ecb0c84SRoland Dreier 
2710ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = {
2711ee959b00STony Jones 	&dev_attr_id_ext,
2712ee959b00STony Jones 	&dev_attr_ioc_guid,
2713ee959b00STony Jones 	&dev_attr_service_id,
2714ee959b00STony Jones 	&dev_attr_pkey,
2715848b3082SBart Van Assche 	&dev_attr_sgid,
2716ee959b00STony Jones 	&dev_attr_dgid,
2717ee959b00STony Jones 	&dev_attr_orig_dgid,
271889de7486SBart Van Assche 	&dev_attr_req_lim,
2719ee959b00STony Jones 	&dev_attr_zero_req_lim,
2720ee959b00STony Jones 	&dev_attr_local_ib_port,
2721ee959b00STony Jones 	&dev_attr_local_ib_device,
2722d92c0da7SBart Van Assche 	&dev_attr_ch_count,
27234b5e5f41SBart Van Assche 	&dev_attr_comp_vector,
27247bb312e4SVu Pham 	&dev_attr_tl_retry_count,
272549248644SDavid Dillow 	&dev_attr_cmd_sg_entries,
2726c07d424dSDavid Dillow 	&dev_attr_allow_ext_sg,
27276ecb0c84SRoland Dreier 	NULL
27286ecb0c84SRoland Dreier };
27296ecb0c84SRoland Dreier 
2730aef9ec39SRoland Dreier static struct scsi_host_template srp_template = {
2731aef9ec39SRoland Dreier 	.module				= THIS_MODULE,
2732b7f008fdSRoland Dreier 	.name				= "InfiniBand SRP initiator",
2733b7f008fdSRoland Dreier 	.proc_name			= DRV_NAME,
2734c9b03c1aSBart Van Assche 	.slave_configure		= srp_slave_configure,
2735aef9ec39SRoland Dreier 	.info				= srp_target_info,
2736aef9ec39SRoland Dreier 	.queuecommand			= srp_queuecommand,
273771444b97SJack Wang 	.change_queue_depth             = srp_change_queue_depth,
2738aef9ec39SRoland Dreier 	.eh_abort_handler		= srp_abort,
2739aef9ec39SRoland Dreier 	.eh_device_reset_handler	= srp_reset_device,
2740aef9ec39SRoland Dreier 	.eh_host_reset_handler		= srp_reset_host,
27412742c1daSBart Van Assche 	.skip_settle_delay		= true,
274249248644SDavid Dillow 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
27434d73f95fSBart Van Assche 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
2744aef9ec39SRoland Dreier 	.this_id			= -1,
27454d73f95fSBart Van Assche 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
27466ecb0c84SRoland Dreier 	.use_clustering			= ENABLE_CLUSTERING,
274777f2c1a4SBart Van Assche 	.shost_attrs			= srp_host_attrs,
274877f2c1a4SBart Van Assche 	.use_blk_tags			= 1,
2749c40ecc12SChristoph Hellwig 	.track_queue_depth		= 1,
2750aef9ec39SRoland Dreier };
2751aef9ec39SRoland Dreier 
275234aa654eSBart Van Assche static int srp_sdev_count(struct Scsi_Host *host)
275334aa654eSBart Van Assche {
275434aa654eSBart Van Assche 	struct scsi_device *sdev;
275534aa654eSBart Van Assche 	int c = 0;
275634aa654eSBart Van Assche 
275734aa654eSBart Van Assche 	shost_for_each_device(sdev, host)
275834aa654eSBart Van Assche 		c++;
275934aa654eSBart Van Assche 
276034aa654eSBart Van Assche 	return c;
276134aa654eSBart Van Assche }
276234aa654eSBart Van Assche 
2763aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2764aef9ec39SRoland Dreier {
27653236822bSFUJITA Tomonori 	struct srp_rport_identifiers ids;
27663236822bSFUJITA Tomonori 	struct srp_rport *rport;
27673236822bSFUJITA Tomonori 
276834aa654eSBart Van Assche 	target->state = SRP_TARGET_SCANNING;
2769aef9ec39SRoland Dreier 	sprintf(target->target_name, "SRP.T10:%016llX",
277045c37cadSBart Van Assche 		be64_to_cpu(target->id_ext));
2771aef9ec39SRoland Dreier 
277205321937SGreg Kroah-Hartman 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2773aef9ec39SRoland Dreier 		return -ENODEV;
2774aef9ec39SRoland Dreier 
27753236822bSFUJITA Tomonori 	memcpy(ids.port_id, &target->id_ext, 8);
27763236822bSFUJITA Tomonori 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2777aebd5e47SFUJITA Tomonori 	ids.roles = SRP_RPORT_ROLE_TARGET;
27783236822bSFUJITA Tomonori 	rport = srp_rport_add(target->scsi_host, &ids);
27793236822bSFUJITA Tomonori 	if (IS_ERR(rport)) {
27803236822bSFUJITA Tomonori 		scsi_remove_host(target->scsi_host);
27813236822bSFUJITA Tomonori 		return PTR_ERR(rport);
27823236822bSFUJITA Tomonori 	}
27833236822bSFUJITA Tomonori 
2784dc1bdbd9SBart Van Assche 	rport->lld_data = target;
27859dd69a60SBart Van Assche 	target->rport = rport;
2786dc1bdbd9SBart Van Assche 
2787b3589fd4SMatthew Wilcox 	spin_lock(&host->target_lock);
2788aef9ec39SRoland Dreier 	list_add_tail(&target->list, &host->target_list);
2789b3589fd4SMatthew Wilcox 	spin_unlock(&host->target_lock);
2790aef9ec39SRoland Dreier 
2791aef9ec39SRoland Dreier 	scsi_scan_target(&target->scsi_host->shost_gendev,
27921962a4a1SMatthew Wilcox 			 0, target->scsi_id, SCAN_WILD_CARD, 0);
2793aef9ec39SRoland Dreier 
2794c014c8cdSBart Van Assche 	if (srp_connected_ch(target) < target->ch_count ||
2795c014c8cdSBart Van Assche 	    target->qp_in_error) {
279634aa654eSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
279734aa654eSBart Van Assche 			     PFX "SCSI scan failed - removing SCSI host\n");
279834aa654eSBart Van Assche 		srp_queue_remove_work(target);
279934aa654eSBart Van Assche 		goto out;
280034aa654eSBart Van Assche 	}
280134aa654eSBart Van Assche 
280234aa654eSBart Van Assche 	pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
280334aa654eSBart Van Assche 		 dev_name(&target->scsi_host->shost_gendev),
280434aa654eSBart Van Assche 		 srp_sdev_count(target->scsi_host));
280534aa654eSBart Van Assche 
280634aa654eSBart Van Assche 	spin_lock_irq(&target->lock);
280734aa654eSBart Van Assche 	if (target->state == SRP_TARGET_SCANNING)
280834aa654eSBart Van Assche 		target->state = SRP_TARGET_LIVE;
280934aa654eSBart Van Assche 	spin_unlock_irq(&target->lock);
281034aa654eSBart Van Assche 
281134aa654eSBart Van Assche out:
2812aef9ec39SRoland Dreier 	return 0;
2813aef9ec39SRoland Dreier }
2814aef9ec39SRoland Dreier 
2815ee959b00STony Jones static void srp_release_dev(struct device *dev)
2816aef9ec39SRoland Dreier {
2817aef9ec39SRoland Dreier 	struct srp_host *host =
2818ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
2819aef9ec39SRoland Dreier 
2820aef9ec39SRoland Dreier 	complete(&host->released);
2821aef9ec39SRoland Dreier }
2822aef9ec39SRoland Dreier 
2823aef9ec39SRoland Dreier static struct class srp_class = {
2824aef9ec39SRoland Dreier 	.name    = "infiniband_srp",
2825ee959b00STony Jones 	.dev_release = srp_release_dev
2826aef9ec39SRoland Dreier };
2827aef9ec39SRoland Dreier 
282896fc248aSBart Van Assche /**
282996fc248aSBart Van Assche  * srp_conn_unique() - check whether the connection to a target is unique
2830af24663bSBart Van Assche  * @host:   SRP host.
2831af24663bSBart Van Assche  * @target: SRP target port.
283296fc248aSBart Van Assche  */
283396fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host,
283496fc248aSBart Van Assche 			    struct srp_target_port *target)
283596fc248aSBart Van Assche {
283696fc248aSBart Van Assche 	struct srp_target_port *t;
283796fc248aSBart Van Assche 	bool ret = false;
283896fc248aSBart Van Assche 
283996fc248aSBart Van Assche 	if (target->state == SRP_TARGET_REMOVED)
284096fc248aSBart Van Assche 		goto out;
284196fc248aSBart Van Assche 
284296fc248aSBart Van Assche 	ret = true;
284396fc248aSBart Van Assche 
284496fc248aSBart Van Assche 	spin_lock(&host->target_lock);
284596fc248aSBart Van Assche 	list_for_each_entry(t, &host->target_list, list) {
284696fc248aSBart Van Assche 		if (t != target &&
284796fc248aSBart Van Assche 		    target->id_ext == t->id_ext &&
284896fc248aSBart Van Assche 		    target->ioc_guid == t->ioc_guid &&
284996fc248aSBart Van Assche 		    target->initiator_ext == t->initiator_ext) {
285096fc248aSBart Van Assche 			ret = false;
285196fc248aSBart Van Assche 			break;
285296fc248aSBart Van Assche 		}
285396fc248aSBart Van Assche 	}
285496fc248aSBart Van Assche 	spin_unlock(&host->target_lock);
285596fc248aSBart Van Assche 
285696fc248aSBart Van Assche out:
285796fc248aSBart Van Assche 	return ret;
285896fc248aSBart Van Assche }
285996fc248aSBart Van Assche 
2860aef9ec39SRoland Dreier /*
2861aef9ec39SRoland Dreier  * Target ports are added by writing
2862aef9ec39SRoland Dreier  *
2863aef9ec39SRoland Dreier  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2864aef9ec39SRoland Dreier  *     pkey=<P_Key>,service_id=<service ID>
2865aef9ec39SRoland Dreier  *
2866aef9ec39SRoland Dreier  * to the add_target sysfs attribute.
2867aef9ec39SRoland Dreier  */
2868aef9ec39SRoland Dreier enum {
2869aef9ec39SRoland Dreier 	SRP_OPT_ERR		= 0,
2870aef9ec39SRoland Dreier 	SRP_OPT_ID_EXT		= 1 << 0,
2871aef9ec39SRoland Dreier 	SRP_OPT_IOC_GUID	= 1 << 1,
2872aef9ec39SRoland Dreier 	SRP_OPT_DGID		= 1 << 2,
2873aef9ec39SRoland Dreier 	SRP_OPT_PKEY		= 1 << 3,
2874aef9ec39SRoland Dreier 	SRP_OPT_SERVICE_ID	= 1 << 4,
2875aef9ec39SRoland Dreier 	SRP_OPT_MAX_SECT	= 1 << 5,
287652fb2b50SVu Pham 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
28770c0450dbSRamachandra K 	SRP_OPT_IO_CLASS	= 1 << 7,
287801cb9bcbSIshai Rabinovitz 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
287949248644SDavid Dillow 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
2880c07d424dSDavid Dillow 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
2881c07d424dSDavid Dillow 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
28824b5e5f41SBart Van Assche 	SRP_OPT_COMP_VECTOR	= 1 << 12,
28837bb312e4SVu Pham 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
28844d73f95fSBart Van Assche 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
2885aef9ec39SRoland Dreier 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
2886aef9ec39SRoland Dreier 				   SRP_OPT_IOC_GUID	|
2887aef9ec39SRoland Dreier 				   SRP_OPT_DGID		|
2888aef9ec39SRoland Dreier 				   SRP_OPT_PKEY		|
2889aef9ec39SRoland Dreier 				   SRP_OPT_SERVICE_ID),
2890aef9ec39SRoland Dreier };
2891aef9ec39SRoland Dreier 
2892a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = {
2893aef9ec39SRoland Dreier 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
2894aef9ec39SRoland Dreier 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
2895aef9ec39SRoland Dreier 	{ SRP_OPT_DGID,			"dgid=%s" 		},
2896aef9ec39SRoland Dreier 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
2897aef9ec39SRoland Dreier 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
2898aef9ec39SRoland Dreier 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
289952fb2b50SVu Pham 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
29000c0450dbSRamachandra K 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
290101cb9bcbSIshai Rabinovitz 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
290249248644SDavid Dillow 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
2903c07d424dSDavid Dillow 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
2904c07d424dSDavid Dillow 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
29054b5e5f41SBart Van Assche 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
29067bb312e4SVu Pham 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
29074d73f95fSBart Van Assche 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
2908aef9ec39SRoland Dreier 	{ SRP_OPT_ERR,			NULL 			}
2909aef9ec39SRoland Dreier };
2910aef9ec39SRoland Dreier 
2911aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target)
2912aef9ec39SRoland Dreier {
2913aef9ec39SRoland Dreier 	char *options, *sep_opt;
2914aef9ec39SRoland Dreier 	char *p;
2915aef9ec39SRoland Dreier 	char dgid[3];
2916aef9ec39SRoland Dreier 	substring_t args[MAX_OPT_ARGS];
2917aef9ec39SRoland Dreier 	int opt_mask = 0;
2918aef9ec39SRoland Dreier 	int token;
2919aef9ec39SRoland Dreier 	int ret = -EINVAL;
2920aef9ec39SRoland Dreier 	int i;
2921aef9ec39SRoland Dreier 
2922aef9ec39SRoland Dreier 	options = kstrdup(buf, GFP_KERNEL);
2923aef9ec39SRoland Dreier 	if (!options)
2924aef9ec39SRoland Dreier 		return -ENOMEM;
2925aef9ec39SRoland Dreier 
2926aef9ec39SRoland Dreier 	sep_opt = options;
29277dcf9c19SSagi Grimberg 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2928aef9ec39SRoland Dreier 		if (!*p)
2929aef9ec39SRoland Dreier 			continue;
2930aef9ec39SRoland Dreier 
2931aef9ec39SRoland Dreier 		token = match_token(p, srp_opt_tokens, args);
2932aef9ec39SRoland Dreier 		opt_mask |= token;
2933aef9ec39SRoland Dreier 
2934aef9ec39SRoland Dreier 		switch (token) {
2935aef9ec39SRoland Dreier 		case SRP_OPT_ID_EXT:
2936aef9ec39SRoland Dreier 			p = match_strdup(args);
2937a20f3a6dSIshai Rabinovitz 			if (!p) {
2938a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2939a20f3a6dSIshai Rabinovitz 				goto out;
2940a20f3a6dSIshai Rabinovitz 			}
2941aef9ec39SRoland Dreier 			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2942aef9ec39SRoland Dreier 			kfree(p);
2943aef9ec39SRoland Dreier 			break;
2944aef9ec39SRoland Dreier 
2945aef9ec39SRoland Dreier 		case SRP_OPT_IOC_GUID:
2946aef9ec39SRoland Dreier 			p = match_strdup(args);
2947a20f3a6dSIshai Rabinovitz 			if (!p) {
2948a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2949a20f3a6dSIshai Rabinovitz 				goto out;
2950a20f3a6dSIshai Rabinovitz 			}
2951aef9ec39SRoland Dreier 			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2952aef9ec39SRoland Dreier 			kfree(p);
2953aef9ec39SRoland Dreier 			break;
2954aef9ec39SRoland Dreier 
2955aef9ec39SRoland Dreier 		case SRP_OPT_DGID:
2956aef9ec39SRoland Dreier 			p = match_strdup(args);
2957a20f3a6dSIshai Rabinovitz 			if (!p) {
2958a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2959a20f3a6dSIshai Rabinovitz 				goto out;
2960a20f3a6dSIshai Rabinovitz 			}
2961aef9ec39SRoland Dreier 			if (strlen(p) != 32) {
2962e0bda7d8SBart Van Assche 				pr_warn("bad dest GID parameter '%s'\n", p);
2963ce1823f0SRoland Dreier 				kfree(p);
2964aef9ec39SRoland Dreier 				goto out;
2965aef9ec39SRoland Dreier 			}
2966aef9ec39SRoland Dreier 
2967aef9ec39SRoland Dreier 			for (i = 0; i < 16; ++i) {
2968747fe000SBart Van Assche 				strlcpy(dgid, p + i * 2, sizeof(dgid));
2969747fe000SBart Van Assche 				if (sscanf(dgid, "%hhx",
2970747fe000SBart Van Assche 					   &target->orig_dgid.raw[i]) < 1) {
2971747fe000SBart Van Assche 					ret = -EINVAL;
2972747fe000SBart Van Assche 					kfree(p);
2973747fe000SBart Van Assche 					goto out;
2974747fe000SBart Van Assche 				}
2975aef9ec39SRoland Dreier 			}
2976bf17c1c7SRoland Dreier 			kfree(p);
2977aef9ec39SRoland Dreier 			break;
2978aef9ec39SRoland Dreier 
2979aef9ec39SRoland Dreier 		case SRP_OPT_PKEY:
2980aef9ec39SRoland Dreier 			if (match_hex(args, &token)) {
2981e0bda7d8SBart Van Assche 				pr_warn("bad P_Key parameter '%s'\n", p);
2982aef9ec39SRoland Dreier 				goto out;
2983aef9ec39SRoland Dreier 			}
2984747fe000SBart Van Assche 			target->pkey = cpu_to_be16(token);
2985aef9ec39SRoland Dreier 			break;
2986aef9ec39SRoland Dreier 
2987aef9ec39SRoland Dreier 		case SRP_OPT_SERVICE_ID:
2988aef9ec39SRoland Dreier 			p = match_strdup(args);
2989a20f3a6dSIshai Rabinovitz 			if (!p) {
2990a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2991a20f3a6dSIshai Rabinovitz 				goto out;
2992a20f3a6dSIshai Rabinovitz 			}
2993aef9ec39SRoland Dreier 			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2994aef9ec39SRoland Dreier 			kfree(p);
2995aef9ec39SRoland Dreier 			break;
2996aef9ec39SRoland Dreier 
2997aef9ec39SRoland Dreier 		case SRP_OPT_MAX_SECT:
2998aef9ec39SRoland Dreier 			if (match_int(args, &token)) {
2999e0bda7d8SBart Van Assche 				pr_warn("bad max sect parameter '%s'\n", p);
3000aef9ec39SRoland Dreier 				goto out;
3001aef9ec39SRoland Dreier 			}
3002aef9ec39SRoland Dreier 			target->scsi_host->max_sectors = token;
3003aef9ec39SRoland Dreier 			break;
3004aef9ec39SRoland Dreier 
30054d73f95fSBart Van Assche 		case SRP_OPT_QUEUE_SIZE:
30064d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
30074d73f95fSBart Van Assche 				pr_warn("bad queue_size parameter '%s'\n", p);
30084d73f95fSBart Van Assche 				goto out;
30094d73f95fSBart Van Assche 			}
30104d73f95fSBart Van Assche 			target->scsi_host->can_queue = token;
30114d73f95fSBart Van Assche 			target->queue_size = token + SRP_RSP_SQ_SIZE +
30124d73f95fSBart Van Assche 					     SRP_TSK_MGMT_SQ_SIZE;
30134d73f95fSBart Van Assche 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
30144d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun = token;
30154d73f95fSBart Van Assche 			break;
30164d73f95fSBart Van Assche 
301752fb2b50SVu Pham 		case SRP_OPT_MAX_CMD_PER_LUN:
30184d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
3019e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3020e0bda7d8SBart Van Assche 					p);
302152fb2b50SVu Pham 				goto out;
302252fb2b50SVu Pham 			}
30234d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun = token;
302452fb2b50SVu Pham 			break;
302552fb2b50SVu Pham 
30260c0450dbSRamachandra K 		case SRP_OPT_IO_CLASS:
30270c0450dbSRamachandra K 			if (match_hex(args, &token)) {
3028e0bda7d8SBart Van Assche 				pr_warn("bad IO class parameter '%s'\n", p);
30290c0450dbSRamachandra K 				goto out;
30300c0450dbSRamachandra K 			}
30310c0450dbSRamachandra K 			if (token != SRP_REV10_IB_IO_CLASS &&
30320c0450dbSRamachandra K 			    token != SRP_REV16A_IB_IO_CLASS) {
3033e0bda7d8SBart Van Assche 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3034e0bda7d8SBart Van Assche 					token, SRP_REV10_IB_IO_CLASS,
3035e0bda7d8SBart Van Assche 					SRP_REV16A_IB_IO_CLASS);
30360c0450dbSRamachandra K 				goto out;
30370c0450dbSRamachandra K 			}
30380c0450dbSRamachandra K 			target->io_class = token;
30390c0450dbSRamachandra K 			break;
30400c0450dbSRamachandra K 
304101cb9bcbSIshai Rabinovitz 		case SRP_OPT_INITIATOR_EXT:
304201cb9bcbSIshai Rabinovitz 			p = match_strdup(args);
3043a20f3a6dSIshai Rabinovitz 			if (!p) {
3044a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3045a20f3a6dSIshai Rabinovitz 				goto out;
3046a20f3a6dSIshai Rabinovitz 			}
304701cb9bcbSIshai Rabinovitz 			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
304801cb9bcbSIshai Rabinovitz 			kfree(p);
304901cb9bcbSIshai Rabinovitz 			break;
305001cb9bcbSIshai Rabinovitz 
305149248644SDavid Dillow 		case SRP_OPT_CMD_SG_ENTRIES:
305249248644SDavid Dillow 			if (match_int(args, &token) || token < 1 || token > 255) {
3053e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3054e0bda7d8SBart Van Assche 					p);
305549248644SDavid Dillow 				goto out;
305649248644SDavid Dillow 			}
305749248644SDavid Dillow 			target->cmd_sg_cnt = token;
305849248644SDavid Dillow 			break;
305949248644SDavid Dillow 
3060c07d424dSDavid Dillow 		case SRP_OPT_ALLOW_EXT_SG:
3061c07d424dSDavid Dillow 			if (match_int(args, &token)) {
3062e0bda7d8SBart Van Assche 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3063c07d424dSDavid Dillow 				goto out;
3064c07d424dSDavid Dillow 			}
3065c07d424dSDavid Dillow 			target->allow_ext_sg = !!token;
3066c07d424dSDavid Dillow 			break;
3067c07d424dSDavid Dillow 
3068c07d424dSDavid Dillow 		case SRP_OPT_SG_TABLESIZE:
3069c07d424dSDavid Dillow 			if (match_int(args, &token) || token < 1 ||
3070c07d424dSDavid Dillow 					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3071e0bda7d8SBart Van Assche 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3072e0bda7d8SBart Van Assche 					p);
3073c07d424dSDavid Dillow 				goto out;
3074c07d424dSDavid Dillow 			}
3075c07d424dSDavid Dillow 			target->sg_tablesize = token;
3076c07d424dSDavid Dillow 			break;
3077c07d424dSDavid Dillow 
30784b5e5f41SBart Van Assche 		case SRP_OPT_COMP_VECTOR:
30794b5e5f41SBart Van Assche 			if (match_int(args, &token) || token < 0) {
30804b5e5f41SBart Van Assche 				pr_warn("bad comp_vector parameter '%s'\n", p);
30814b5e5f41SBart Van Assche 				goto out;
30824b5e5f41SBart Van Assche 			}
30834b5e5f41SBart Van Assche 			target->comp_vector = token;
30844b5e5f41SBart Van Assche 			break;
30854b5e5f41SBart Van Assche 
30867bb312e4SVu Pham 		case SRP_OPT_TL_RETRY_COUNT:
30877bb312e4SVu Pham 			if (match_int(args, &token) || token < 2 || token > 7) {
30887bb312e4SVu Pham 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
30897bb312e4SVu Pham 					p);
30907bb312e4SVu Pham 				goto out;
30917bb312e4SVu Pham 			}
30927bb312e4SVu Pham 			target->tl_retry_count = token;
30937bb312e4SVu Pham 			break;
30947bb312e4SVu Pham 
3095aef9ec39SRoland Dreier 		default:
3096e0bda7d8SBart Van Assche 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3097e0bda7d8SBart Van Assche 				p);
3098aef9ec39SRoland Dreier 			goto out;
3099aef9ec39SRoland Dreier 		}
3100aef9ec39SRoland Dreier 	}
3101aef9ec39SRoland Dreier 
3102aef9ec39SRoland Dreier 	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3103aef9ec39SRoland Dreier 		ret = 0;
3104aef9ec39SRoland Dreier 	else
3105aef9ec39SRoland Dreier 		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3106aef9ec39SRoland Dreier 			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3107aef9ec39SRoland Dreier 			    !(srp_opt_tokens[i].token & opt_mask))
3108e0bda7d8SBart Van Assche 				pr_warn("target creation request is missing parameter '%s'\n",
3109aef9ec39SRoland Dreier 					srp_opt_tokens[i].pattern);
3110aef9ec39SRoland Dreier 
31114d73f95fSBart Van Assche 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
31124d73f95fSBart Van Assche 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
31134d73f95fSBart Van Assche 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
31144d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun,
31154d73f95fSBart Van Assche 			target->scsi_host->can_queue);
31164d73f95fSBart Van Assche 
3117aef9ec39SRoland Dreier out:
3118aef9ec39SRoland Dreier 	kfree(options);
3119aef9ec39SRoland Dreier 	return ret;
3120aef9ec39SRoland Dreier }
3121aef9ec39SRoland Dreier 
3122ee959b00STony Jones static ssize_t srp_create_target(struct device *dev,
3123ee959b00STony Jones 				 struct device_attribute *attr,
3124aef9ec39SRoland Dreier 				 const char *buf, size_t count)
3125aef9ec39SRoland Dreier {
3126aef9ec39SRoland Dreier 	struct srp_host *host =
3127ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
3128aef9ec39SRoland Dreier 	struct Scsi_Host *target_host;
3129aef9ec39SRoland Dreier 	struct srp_target_port *target;
3130509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
3131d1b4289eSBart Van Assche 	struct srp_device *srp_dev = host->srp_dev;
3132d1b4289eSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
3133d92c0da7SBart Van Assche 	int ret, node_idx, node, cpu, i;
3134d92c0da7SBart Van Assche 	bool multich = false;
3135aef9ec39SRoland Dreier 
3136aef9ec39SRoland Dreier 	target_host = scsi_host_alloc(&srp_template,
3137aef9ec39SRoland Dreier 				      sizeof (struct srp_target_port));
3138aef9ec39SRoland Dreier 	if (!target_host)
3139aef9ec39SRoland Dreier 		return -ENOMEM;
3140aef9ec39SRoland Dreier 
31413236822bSFUJITA Tomonori 	target_host->transportt  = ib_srp_transport_template;
3142fd1b6c4aSBart Van Assche 	target_host->max_channel = 0;
3143fd1b6c4aSBart Van Assche 	target_host->max_id      = 1;
3144985aa495SBart Van Assche 	target_host->max_lun     = -1LL;
31453c8edf0eSArne Redlich 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
31465f068992SRoland Dreier 
3147aef9ec39SRoland Dreier 	target = host_to_target(target_host);
3148aef9ec39SRoland Dreier 
31490c0450dbSRamachandra K 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3150aef9ec39SRoland Dreier 	target->scsi_host	= target_host;
3151aef9ec39SRoland Dreier 	target->srp_host	= host;
31529af76271SDavid Dillow 	target->lkey		= host->srp_dev->mr->lkey;
31539af76271SDavid Dillow 	target->rkey		= host->srp_dev->mr->rkey;
315449248644SDavid Dillow 	target->cmd_sg_cnt	= cmd_sg_entries;
3155c07d424dSDavid Dillow 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3156c07d424dSDavid Dillow 	target->allow_ext_sg	= allow_ext_sg;
31577bb312e4SVu Pham 	target->tl_retry_count	= 7;
31584d73f95fSBart Van Assche 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3159aef9ec39SRoland Dreier 
316034aa654eSBart Van Assche 	/*
316134aa654eSBart Van Assche 	 * Avoid that the SCSI host can be removed by srp_remove_target()
316234aa654eSBart Van Assche 	 * before this function returns.
316334aa654eSBart Van Assche 	 */
316434aa654eSBart Van Assche 	scsi_host_get(target->scsi_host);
316534aa654eSBart Van Assche 
31662d7091bcSBart Van Assche 	mutex_lock(&host->add_target_mutex);
31672d7091bcSBart Van Assche 
3168aef9ec39SRoland Dreier 	ret = srp_parse_options(buf, target);
3169aef9ec39SRoland Dreier 	if (ret)
3170fb49c8bbSBart Van Assche 		goto out;
3171aef9ec39SRoland Dreier 
317277f2c1a4SBart Van Assche 	ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
317377f2c1a4SBart Van Assche 	if (ret)
3174fb49c8bbSBart Van Assche 		goto out;
317577f2c1a4SBart Van Assche 
31764d73f95fSBart Van Assche 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
31774d73f95fSBart Van Assche 
317896fc248aSBart Van Assche 	if (!srp_conn_unique(target->srp_host, target)) {
317996fc248aSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
318096fc248aSBart Van Assche 			     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
318196fc248aSBart Van Assche 			     be64_to_cpu(target->id_ext),
318296fc248aSBart Van Assche 			     be64_to_cpu(target->ioc_guid),
318396fc248aSBart Van Assche 			     be64_to_cpu(target->initiator_ext));
318496fc248aSBart Van Assche 		ret = -EEXIST;
3185fb49c8bbSBart Van Assche 		goto out;
318696fc248aSBart Van Assche 	}
318796fc248aSBart Van Assche 
31885cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3189c07d424dSDavid Dillow 	    target->cmd_sg_cnt < target->sg_tablesize) {
31905cfb1782SBart Van Assche 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3191c07d424dSDavid Dillow 		target->sg_tablesize = target->cmd_sg_cnt;
3192c07d424dSDavid Dillow 	}
3193c07d424dSDavid Dillow 
3194c07d424dSDavid Dillow 	target_host->sg_tablesize = target->sg_tablesize;
3195c07d424dSDavid Dillow 	target->indirect_size = target->sg_tablesize *
3196c07d424dSDavid Dillow 				sizeof (struct srp_direct_buf);
319749248644SDavid Dillow 	target->max_iu_len = sizeof (struct srp_cmd) +
319849248644SDavid Dillow 			     sizeof (struct srp_indirect_buf) +
319949248644SDavid Dillow 			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
320049248644SDavid Dillow 
3201c1120f89SBart Van Assche 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3202ef6c49d8SBart Van Assche 	INIT_WORK(&target->remove_work, srp_remove_work);
32038f26c9ffSDavid Dillow 	spin_lock_init(&target->lock);
3204747fe000SBart Van Assche 	ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
32052088ca66SSagi Grimberg 	if (ret)
3206fb49c8bbSBart Van Assche 		goto out;
3207d92c0da7SBart Van Assche 
3208d92c0da7SBart Van Assche 	ret = -ENOMEM;
3209d92c0da7SBart Van Assche 	target->ch_count = max_t(unsigned, num_online_nodes(),
3210d92c0da7SBart Van Assche 				 min(ch_count ? :
3211d92c0da7SBart Van Assche 				     min(4 * num_online_nodes(),
3212d92c0da7SBart Van Assche 					 ibdev->num_comp_vectors),
3213d92c0da7SBart Van Assche 				     num_online_cpus()));
3214d92c0da7SBart Van Assche 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3215d92c0da7SBart Van Assche 			     GFP_KERNEL);
3216d92c0da7SBart Van Assche 	if (!target->ch)
3217fb49c8bbSBart Van Assche 		goto out;
3218d92c0da7SBart Van Assche 
3219d92c0da7SBart Van Assche 	node_idx = 0;
3220d92c0da7SBart Van Assche 	for_each_online_node(node) {
3221d92c0da7SBart Van Assche 		const int ch_start = (node_idx * target->ch_count /
3222d92c0da7SBart Van Assche 				      num_online_nodes());
3223d92c0da7SBart Van Assche 		const int ch_end = ((node_idx + 1) * target->ch_count /
3224d92c0da7SBart Van Assche 				    num_online_nodes());
3225d92c0da7SBart Van Assche 		const int cv_start = (node_idx * ibdev->num_comp_vectors /
3226d92c0da7SBart Van Assche 				      num_online_nodes() + target->comp_vector)
3227d92c0da7SBart Van Assche 				     % ibdev->num_comp_vectors;
3228d92c0da7SBart Van Assche 		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3229d92c0da7SBart Van Assche 				    num_online_nodes() + target->comp_vector)
3230d92c0da7SBart Van Assche 				   % ibdev->num_comp_vectors;
3231d92c0da7SBart Van Assche 		int cpu_idx = 0;
3232d92c0da7SBart Van Assche 
3233d92c0da7SBart Van Assche 		for_each_online_cpu(cpu) {
3234d92c0da7SBart Van Assche 			if (cpu_to_node(cpu) != node)
3235d92c0da7SBart Van Assche 				continue;
3236d92c0da7SBart Van Assche 			if (ch_start + cpu_idx >= ch_end)
3237d92c0da7SBart Van Assche 				continue;
3238d92c0da7SBart Van Assche 			ch = &target->ch[ch_start + cpu_idx];
3239d92c0da7SBart Van Assche 			ch->target = target;
3240d92c0da7SBart Van Assche 			ch->comp_vector = cv_start == cv_end ? cv_start :
3241d92c0da7SBart Van Assche 				cv_start + cpu_idx % (cv_end - cv_start);
3242d92c0da7SBart Van Assche 			spin_lock_init(&ch->lock);
3243d92c0da7SBart Van Assche 			INIT_LIST_HEAD(&ch->free_tx);
3244d92c0da7SBart Van Assche 			ret = srp_new_cm_id(ch);
3245d92c0da7SBart Van Assche 			if (ret)
3246d92c0da7SBart Van Assche 				goto err_disconnect;
3247aef9ec39SRoland Dreier 
3248509c07bcSBart Van Assche 			ret = srp_create_ch_ib(ch);
3249aef9ec39SRoland Dreier 			if (ret)
3250d92c0da7SBart Van Assche 				goto err_disconnect;
3251aef9ec39SRoland Dreier 
3252d92c0da7SBart Van Assche 			ret = srp_alloc_req_data(ch);
32539fe4bcf4SDavid Dillow 			if (ret)
3254d92c0da7SBart Van Assche 				goto err_disconnect;
3255aef9ec39SRoland Dreier 
3256d92c0da7SBart Van Assche 			ret = srp_connect_ch(ch, multich);
3257aef9ec39SRoland Dreier 			if (ret) {
32587aa54bd7SDavid Dillow 				shost_printk(KERN_ERR, target->scsi_host,
3259d92c0da7SBart Van Assche 					     PFX "Connection %d/%d failed\n",
3260d92c0da7SBart Van Assche 					     ch_start + cpu_idx,
3261d92c0da7SBart Van Assche 					     target->ch_count);
3262d92c0da7SBart Van Assche 				if (node_idx == 0 && cpu_idx == 0) {
3263d92c0da7SBart Van Assche 					goto err_disconnect;
3264d92c0da7SBart Van Assche 				} else {
3265d92c0da7SBart Van Assche 					srp_free_ch_ib(target, ch);
3266d92c0da7SBart Van Assche 					srp_free_req_data(target, ch);
3267d92c0da7SBart Van Assche 					target->ch_count = ch - target->ch;
3268d92c0da7SBart Van Assche 					break;
3269aef9ec39SRoland Dreier 				}
3270d92c0da7SBart Van Assche 			}
3271d92c0da7SBart Van Assche 
3272d92c0da7SBart Van Assche 			multich = true;
3273d92c0da7SBart Van Assche 			cpu_idx++;
3274d92c0da7SBart Van Assche 		}
3275d92c0da7SBart Van Assche 		node_idx++;
3276d92c0da7SBart Van Assche 	}
3277d92c0da7SBart Van Assche 
3278d92c0da7SBart Van Assche 	target->scsi_host->nr_hw_queues = target->ch_count;
3279aef9ec39SRoland Dreier 
3280aef9ec39SRoland Dreier 	ret = srp_add_target(host, target);
3281aef9ec39SRoland Dreier 	if (ret)
3282aef9ec39SRoland Dreier 		goto err_disconnect;
3283aef9ec39SRoland Dreier 
328434aa654eSBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
3285e7ffde01SBart Van Assche 		shost_printk(KERN_DEBUG, target->scsi_host, PFX
3286e7ffde01SBart Van Assche 			     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3287e7ffde01SBart Van Assche 			     be64_to_cpu(target->id_ext),
3288e7ffde01SBart Van Assche 			     be64_to_cpu(target->ioc_guid),
3289747fe000SBart Van Assche 			     be16_to_cpu(target->pkey),
3290e7ffde01SBart Van Assche 			     be64_to_cpu(target->service_id),
3291747fe000SBart Van Assche 			     target->sgid.raw, target->orig_dgid.raw);
329234aa654eSBart Van Assche 	}
3293e7ffde01SBart Van Assche 
32942d7091bcSBart Van Assche 	ret = count;
32952d7091bcSBart Van Assche 
32962d7091bcSBart Van Assche out:
32972d7091bcSBart Van Assche 	mutex_unlock(&host->add_target_mutex);
329834aa654eSBart Van Assche 
329934aa654eSBart Van Assche 	scsi_host_put(target->scsi_host);
330034aa654eSBart Van Assche 
33012d7091bcSBart Van Assche 	return ret;
3302aef9ec39SRoland Dreier 
3303aef9ec39SRoland Dreier err_disconnect:
3304aef9ec39SRoland Dreier 	srp_disconnect_target(target);
3305aef9ec39SRoland Dreier 
3306d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
3307d92c0da7SBart Van Assche 		ch = &target->ch[i];
3308509c07bcSBart Van Assche 		srp_free_ch_ib(target, ch);
3309509c07bcSBart Van Assche 		srp_free_req_data(target, ch);
3310d92c0da7SBart Van Assche 	}
3311d92c0da7SBart Van Assche 
3312d92c0da7SBart Van Assche 	kfree(target->ch);
33132d7091bcSBart Van Assche 	goto out;
3314aef9ec39SRoland Dreier }
3315aef9ec39SRoland Dreier 
3316ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3317aef9ec39SRoland Dreier 
3318ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3319ee959b00STony Jones 			  char *buf)
3320aef9ec39SRoland Dreier {
3321ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3322aef9ec39SRoland Dreier 
332305321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3324aef9ec39SRoland Dreier }
3325aef9ec39SRoland Dreier 
3326ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3327aef9ec39SRoland Dreier 
3328ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3329ee959b00STony Jones 			 char *buf)
3330aef9ec39SRoland Dreier {
3331ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3332aef9ec39SRoland Dreier 
3333aef9ec39SRoland Dreier 	return sprintf(buf, "%d\n", host->port);
3334aef9ec39SRoland Dreier }
3335aef9ec39SRoland Dreier 
3336ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3337aef9ec39SRoland Dreier 
3338f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3339aef9ec39SRoland Dreier {
3340aef9ec39SRoland Dreier 	struct srp_host *host;
3341aef9ec39SRoland Dreier 
3342aef9ec39SRoland Dreier 	host = kzalloc(sizeof *host, GFP_KERNEL);
3343aef9ec39SRoland Dreier 	if (!host)
3344aef9ec39SRoland Dreier 		return NULL;
3345aef9ec39SRoland Dreier 
3346aef9ec39SRoland Dreier 	INIT_LIST_HEAD(&host->target_list);
3347b3589fd4SMatthew Wilcox 	spin_lock_init(&host->target_lock);
3348aef9ec39SRoland Dreier 	init_completion(&host->released);
33492d7091bcSBart Van Assche 	mutex_init(&host->add_target_mutex);
335005321937SGreg Kroah-Hartman 	host->srp_dev = device;
3351aef9ec39SRoland Dreier 	host->port = port;
3352aef9ec39SRoland Dreier 
3353ee959b00STony Jones 	host->dev.class = &srp_class;
3354ee959b00STony Jones 	host->dev.parent = device->dev->dma_device;
3355d927e38cSKay Sievers 	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3356aef9ec39SRoland Dreier 
3357ee959b00STony Jones 	if (device_register(&host->dev))
3358f5358a17SRoland Dreier 		goto free_host;
3359ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_add_target))
3360aef9ec39SRoland Dreier 		goto err_class;
3361ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3362aef9ec39SRoland Dreier 		goto err_class;
3363ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_port))
3364aef9ec39SRoland Dreier 		goto err_class;
3365aef9ec39SRoland Dreier 
3366aef9ec39SRoland Dreier 	return host;
3367aef9ec39SRoland Dreier 
3368aef9ec39SRoland Dreier err_class:
3369ee959b00STony Jones 	device_unregister(&host->dev);
3370aef9ec39SRoland Dreier 
3371f5358a17SRoland Dreier free_host:
3372aef9ec39SRoland Dreier 	kfree(host);
3373aef9ec39SRoland Dreier 
3374aef9ec39SRoland Dreier 	return NULL;
3375aef9ec39SRoland Dreier }
3376aef9ec39SRoland Dreier 
3377aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device)
3378aef9ec39SRoland Dreier {
3379f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3380f5358a17SRoland Dreier 	struct ib_device_attr *dev_attr;
3381aef9ec39SRoland Dreier 	struct srp_host *host;
3382*4139032bSHal Rosenstock 	int mr_page_shift, p;
338352ede08fSBart Van Assche 	u64 max_pages_per_mr;
3384aef9ec39SRoland Dreier 
3385f5358a17SRoland Dreier 	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3386f5358a17SRoland Dreier 	if (!dev_attr)
3387cf311cd4SSean Hefty 		return;
3388aef9ec39SRoland Dreier 
3389f5358a17SRoland Dreier 	if (ib_query_device(device, dev_attr)) {
3390e0bda7d8SBart Van Assche 		pr_warn("Query device failed for %s\n", device->name);
3391f5358a17SRoland Dreier 		goto free_attr;
3392f5358a17SRoland Dreier 	}
3393f5358a17SRoland Dreier 
3394f5358a17SRoland Dreier 	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3395f5358a17SRoland Dreier 	if (!srp_dev)
3396f5358a17SRoland Dreier 		goto free_attr;
3397f5358a17SRoland Dreier 
3398d1b4289eSBart Van Assche 	srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3399d1b4289eSBart Van Assche 			    device->map_phys_fmr && device->unmap_fmr);
34005cfb1782SBart Van Assche 	srp_dev->has_fr = (dev_attr->device_cap_flags &
34015cfb1782SBart Van Assche 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
34025cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr)
34035cfb1782SBart Van Assche 		dev_warn(&device->dev, "neither FMR nor FR is supported\n");
34045cfb1782SBart Van Assche 
34055cfb1782SBart Van Assche 	srp_dev->use_fast_reg = (srp_dev->has_fr &&
34065cfb1782SBart Van Assche 				 (!srp_dev->has_fmr || prefer_fr));
3407d1b4289eSBart Van Assche 
3408f5358a17SRoland Dreier 	/*
3409f5358a17SRoland Dreier 	 * Use the smallest page size supported by the HCA, down to a
34108f26c9ffSDavid Dillow 	 * minimum of 4096 bytes. We're unlikely to build large sglists
34118f26c9ffSDavid Dillow 	 * out of smaller entries.
3412f5358a17SRoland Dreier 	 */
341352ede08fSBart Van Assche 	mr_page_shift		= max(12, ffs(dev_attr->page_size_cap) - 1);
341452ede08fSBart Van Assche 	srp_dev->mr_page_size	= 1 << mr_page_shift;
341552ede08fSBart Van Assche 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
341652ede08fSBart Van Assche 	max_pages_per_mr	= dev_attr->max_mr_size;
341752ede08fSBart Van Assche 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
341852ede08fSBart Van Assche 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
341952ede08fSBart Van Assche 					  max_pages_per_mr);
34205cfb1782SBart Van Assche 	if (srp_dev->use_fast_reg) {
34215cfb1782SBart Van Assche 		srp_dev->max_pages_per_mr =
34225cfb1782SBart Van Assche 			min_t(u32, srp_dev->max_pages_per_mr,
34235cfb1782SBart Van Assche 			      dev_attr->max_fast_reg_page_list_len);
34245cfb1782SBart Van Assche 	}
342552ede08fSBart Van Assche 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
342652ede08fSBart Van Assche 				   srp_dev->max_pages_per_mr;
34275cfb1782SBart Van Assche 	pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
342852ede08fSBart Van Assche 		 device->name, mr_page_shift, dev_attr->max_mr_size,
34295cfb1782SBart Van Assche 		 dev_attr->max_fast_reg_page_list_len,
343052ede08fSBart Van Assche 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3431f5358a17SRoland Dreier 
3432f5358a17SRoland Dreier 	INIT_LIST_HEAD(&srp_dev->dev_list);
3433f5358a17SRoland Dreier 
3434f5358a17SRoland Dreier 	srp_dev->dev = device;
3435f5358a17SRoland Dreier 	srp_dev->pd  = ib_alloc_pd(device);
3436f5358a17SRoland Dreier 	if (IS_ERR(srp_dev->pd))
3437f5358a17SRoland Dreier 		goto free_dev;
3438f5358a17SRoland Dreier 
3439f5358a17SRoland Dreier 	srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3440f5358a17SRoland Dreier 				    IB_ACCESS_LOCAL_WRITE |
3441f5358a17SRoland Dreier 				    IB_ACCESS_REMOTE_READ |
3442f5358a17SRoland Dreier 				    IB_ACCESS_REMOTE_WRITE);
3443f5358a17SRoland Dreier 	if (IS_ERR(srp_dev->mr))
3444f5358a17SRoland Dreier 		goto err_pd;
3445f5358a17SRoland Dreier 
3446*4139032bSHal Rosenstock 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3447f5358a17SRoland Dreier 		host = srp_add_port(srp_dev, p);
3448aef9ec39SRoland Dreier 		if (host)
3449f5358a17SRoland Dreier 			list_add_tail(&host->list, &srp_dev->dev_list);
3450aef9ec39SRoland Dreier 	}
3451aef9ec39SRoland Dreier 
3452f5358a17SRoland Dreier 	ib_set_client_data(device, &srp_client, srp_dev);
3453f5358a17SRoland Dreier 
3454f5358a17SRoland Dreier 	goto free_attr;
3455f5358a17SRoland Dreier 
3456f5358a17SRoland Dreier err_pd:
3457f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3458f5358a17SRoland Dreier 
3459f5358a17SRoland Dreier free_dev:
3460f5358a17SRoland Dreier 	kfree(srp_dev);
3461f5358a17SRoland Dreier 
3462f5358a17SRoland Dreier free_attr:
3463f5358a17SRoland Dreier 	kfree(dev_attr);
3464aef9ec39SRoland Dreier }
3465aef9ec39SRoland Dreier 
3466aef9ec39SRoland Dreier static void srp_remove_one(struct ib_device *device)
3467aef9ec39SRoland Dreier {
3468f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3469aef9ec39SRoland Dreier 	struct srp_host *host, *tmp_host;
3470ef6c49d8SBart Van Assche 	struct srp_target_port *target;
3471aef9ec39SRoland Dreier 
3472f5358a17SRoland Dreier 	srp_dev = ib_get_client_data(device, &srp_client);
34731fe0cb84SDotan Barak 	if (!srp_dev)
34741fe0cb84SDotan Barak 		return;
3475aef9ec39SRoland Dreier 
3476f5358a17SRoland Dreier 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3477ee959b00STony Jones 		device_unregister(&host->dev);
3478aef9ec39SRoland Dreier 		/*
3479aef9ec39SRoland Dreier 		 * Wait for the sysfs entry to go away, so that no new
3480aef9ec39SRoland Dreier 		 * target ports can be created.
3481aef9ec39SRoland Dreier 		 */
3482aef9ec39SRoland Dreier 		wait_for_completion(&host->released);
3483aef9ec39SRoland Dreier 
3484aef9ec39SRoland Dreier 		/*
3485ef6c49d8SBart Van Assche 		 * Remove all target ports.
3486aef9ec39SRoland Dreier 		 */
3487b3589fd4SMatthew Wilcox 		spin_lock(&host->target_lock);
3488ef6c49d8SBart Van Assche 		list_for_each_entry(target, &host->target_list, list)
3489ef6c49d8SBart Van Assche 			srp_queue_remove_work(target);
3490b3589fd4SMatthew Wilcox 		spin_unlock(&host->target_lock);
3491aef9ec39SRoland Dreier 
3492aef9ec39SRoland Dreier 		/*
3493bcc05910SBart Van Assche 		 * Wait for tl_err and target port removal tasks.
3494aef9ec39SRoland Dreier 		 */
3495ef6c49d8SBart Van Assche 		flush_workqueue(system_long_wq);
3496bcc05910SBart Van Assche 		flush_workqueue(srp_remove_wq);
3497aef9ec39SRoland Dreier 
3498aef9ec39SRoland Dreier 		kfree(host);
3499aef9ec39SRoland Dreier 	}
3500aef9ec39SRoland Dreier 
3501f5358a17SRoland Dreier 	ib_dereg_mr(srp_dev->mr);
3502f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3503f5358a17SRoland Dreier 
3504f5358a17SRoland Dreier 	kfree(srp_dev);
3505aef9ec39SRoland Dreier }
3506aef9ec39SRoland Dreier 
35073236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = {
3508ed9b2264SBart Van Assche 	.has_rport_state	 = true,
3509ed9b2264SBart Van Assche 	.reset_timer_if_blocked	 = true,
3510a95cadb9SBart Van Assche 	.reconnect_delay	 = &srp_reconnect_delay,
3511ed9b2264SBart Van Assche 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
3512ed9b2264SBart Van Assche 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
3513ed9b2264SBart Van Assche 	.reconnect		 = srp_rport_reconnect,
3514dc1bdbd9SBart Van Assche 	.rport_delete		 = srp_rport_delete,
3515ed9b2264SBart Van Assche 	.terminate_rport_io	 = srp_terminate_io,
35163236822bSFUJITA Tomonori };
35173236822bSFUJITA Tomonori 
3518aef9ec39SRoland Dreier static int __init srp_init_module(void)
3519aef9ec39SRoland Dreier {
3520aef9ec39SRoland Dreier 	int ret;
3521aef9ec39SRoland Dreier 
3522dcb4cb85SBart Van Assche 	BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3523dd5e6e38SBart Van Assche 
352449248644SDavid Dillow 	if (srp_sg_tablesize) {
3525e0bda7d8SBart Van Assche 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
352649248644SDavid Dillow 		if (!cmd_sg_entries)
352749248644SDavid Dillow 			cmd_sg_entries = srp_sg_tablesize;
352849248644SDavid Dillow 	}
352949248644SDavid Dillow 
353049248644SDavid Dillow 	if (!cmd_sg_entries)
353149248644SDavid Dillow 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
353249248644SDavid Dillow 
353349248644SDavid Dillow 	if (cmd_sg_entries > 255) {
3534e0bda7d8SBart Van Assche 		pr_warn("Clamping cmd_sg_entries to 255\n");
353549248644SDavid Dillow 		cmd_sg_entries = 255;
35361e89a194SDavid Dillow 	}
35371e89a194SDavid Dillow 
3538c07d424dSDavid Dillow 	if (!indirect_sg_entries)
3539c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3540c07d424dSDavid Dillow 	else if (indirect_sg_entries < cmd_sg_entries) {
3541e0bda7d8SBart Van Assche 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3542e0bda7d8SBart Van Assche 			cmd_sg_entries);
3543c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3544c07d424dSDavid Dillow 	}
3545c07d424dSDavid Dillow 
3546bcc05910SBart Van Assche 	srp_remove_wq = create_workqueue("srp_remove");
3547da05be29SWei Yongjun 	if (!srp_remove_wq) {
3548da05be29SWei Yongjun 		ret = -ENOMEM;
3549bcc05910SBart Van Assche 		goto out;
3550bcc05910SBart Van Assche 	}
3551bcc05910SBart Van Assche 
3552bcc05910SBart Van Assche 	ret = -ENOMEM;
35533236822bSFUJITA Tomonori 	ib_srp_transport_template =
35543236822bSFUJITA Tomonori 		srp_attach_transport(&ib_srp_transport_functions);
35553236822bSFUJITA Tomonori 	if (!ib_srp_transport_template)
3556bcc05910SBart Van Assche 		goto destroy_wq;
35573236822bSFUJITA Tomonori 
3558aef9ec39SRoland Dreier 	ret = class_register(&srp_class);
3559aef9ec39SRoland Dreier 	if (ret) {
3560e0bda7d8SBart Van Assche 		pr_err("couldn't register class infiniband_srp\n");
3561bcc05910SBart Van Assche 		goto release_tr;
3562aef9ec39SRoland Dreier 	}
3563aef9ec39SRoland Dreier 
3564c1a0b23bSMichael S. Tsirkin 	ib_sa_register_client(&srp_sa_client);
3565c1a0b23bSMichael S. Tsirkin 
3566aef9ec39SRoland Dreier 	ret = ib_register_client(&srp_client);
3567aef9ec39SRoland Dreier 	if (ret) {
3568e0bda7d8SBart Van Assche 		pr_err("couldn't register IB client\n");
3569bcc05910SBart Van Assche 		goto unreg_sa;
3570aef9ec39SRoland Dreier 	}
3571aef9ec39SRoland Dreier 
3572bcc05910SBart Van Assche out:
3573bcc05910SBart Van Assche 	return ret;
3574bcc05910SBart Van Assche 
3575bcc05910SBart Van Assche unreg_sa:
3576bcc05910SBart Van Assche 	ib_sa_unregister_client(&srp_sa_client);
3577bcc05910SBart Van Assche 	class_unregister(&srp_class);
3578bcc05910SBart Van Assche 
3579bcc05910SBart Van Assche release_tr:
3580bcc05910SBart Van Assche 	srp_release_transport(ib_srp_transport_template);
3581bcc05910SBart Van Assche 
3582bcc05910SBart Van Assche destroy_wq:
3583bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3584bcc05910SBart Van Assche 	goto out;
3585aef9ec39SRoland Dreier }
3586aef9ec39SRoland Dreier 
3587aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void)
3588aef9ec39SRoland Dreier {
3589aef9ec39SRoland Dreier 	ib_unregister_client(&srp_client);
3590c1a0b23bSMichael S. Tsirkin 	ib_sa_unregister_client(&srp_sa_client);
3591aef9ec39SRoland Dreier 	class_unregister(&srp_class);
35923236822bSFUJITA Tomonori 	srp_release_transport(ib_srp_transport_template);
3593bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3594aef9ec39SRoland Dreier }
3595aef9ec39SRoland Dreier 
3596aef9ec39SRoland Dreier module_init(srp_init_module);
3597aef9ec39SRoland Dreier module_exit(srp_cleanup_module);
3598