xref: /linux/drivers/infiniband/ulp/srp/ib_srp.c (revision ff2ba9936591a1364ae21adf18366dca7608395a)
1aef9ec39SRoland Dreier /*
2aef9ec39SRoland Dreier  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3aef9ec39SRoland Dreier  *
4aef9ec39SRoland Dreier  * This software is available to you under a choice of one of two
5aef9ec39SRoland Dreier  * licenses.  You may choose to be licensed under the terms of the GNU
6aef9ec39SRoland Dreier  * General Public License (GPL) Version 2, available from the file
7aef9ec39SRoland Dreier  * COPYING in the main directory of this source tree, or the
8aef9ec39SRoland Dreier  * OpenIB.org BSD license below:
9aef9ec39SRoland Dreier  *
10aef9ec39SRoland Dreier  *     Redistribution and use in source and binary forms, with or
11aef9ec39SRoland Dreier  *     without modification, are permitted provided that the following
12aef9ec39SRoland Dreier  *     conditions are met:
13aef9ec39SRoland Dreier  *
14aef9ec39SRoland Dreier  *      - Redistributions of source code must retain the above
15aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
16aef9ec39SRoland Dreier  *        disclaimer.
17aef9ec39SRoland Dreier  *
18aef9ec39SRoland Dreier  *      - Redistributions in binary form must reproduce the above
19aef9ec39SRoland Dreier  *        copyright notice, this list of conditions and the following
20aef9ec39SRoland Dreier  *        disclaimer in the documentation and/or other materials
21aef9ec39SRoland Dreier  *        provided with the distribution.
22aef9ec39SRoland Dreier  *
23aef9ec39SRoland Dreier  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24aef9ec39SRoland Dreier  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25aef9ec39SRoland Dreier  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26aef9ec39SRoland Dreier  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27aef9ec39SRoland Dreier  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28aef9ec39SRoland Dreier  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29aef9ec39SRoland Dreier  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30aef9ec39SRoland Dreier  * SOFTWARE.
31aef9ec39SRoland Dreier  */
32aef9ec39SRoland Dreier 
33d236cd0eSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34e0bda7d8SBart Van Assche 
35aef9ec39SRoland Dreier #include <linux/module.h>
36aef9ec39SRoland Dreier #include <linux/init.h>
37aef9ec39SRoland Dreier #include <linux/slab.h>
38aef9ec39SRoland Dreier #include <linux/err.h>
39aef9ec39SRoland Dreier #include <linux/string.h>
40aef9ec39SRoland Dreier #include <linux/parser.h>
41aef9ec39SRoland Dreier #include <linux/random.h>
42de25968cSTim Schmielau #include <linux/jiffies.h>
4356b5390cSBart Van Assche #include <rdma/ib_cache.h>
44aef9ec39SRoland Dreier 
4560063497SArun Sharma #include <linux/atomic.h>
46aef9ec39SRoland Dreier 
47aef9ec39SRoland Dreier #include <scsi/scsi.h>
48aef9ec39SRoland Dreier #include <scsi/scsi_device.h>
49aef9ec39SRoland Dreier #include <scsi/scsi_dbg.h>
5071444b97SJack Wang #include <scsi/scsi_tcq.h>
51aef9ec39SRoland Dreier #include <scsi/srp.h>
523236822bSFUJITA Tomonori #include <scsi/scsi_transport_srp.h>
53aef9ec39SRoland Dreier 
54aef9ec39SRoland Dreier #include "ib_srp.h"
55aef9ec39SRoland Dreier 
56aef9ec39SRoland Dreier #define DRV_NAME	"ib_srp"
57aef9ec39SRoland Dreier #define PFX		DRV_NAME ": "
58713ef24eSBart Van Assche #define DRV_VERSION	"2.0"
59713ef24eSBart Van Assche #define DRV_RELDATE	"July 26, 2015"
60aef9ec39SRoland Dreier 
61aef9ec39SRoland Dreier MODULE_AUTHOR("Roland Dreier");
6233ab3e5bSBart Van Assche MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63aef9ec39SRoland Dreier MODULE_LICENSE("Dual BSD/GPL");
6433ab3e5bSBart Van Assche MODULE_VERSION(DRV_VERSION);
6533ab3e5bSBart Van Assche MODULE_INFO(release_date, DRV_RELDATE);
66aef9ec39SRoland Dreier 
6749248644SDavid Dillow static unsigned int srp_sg_tablesize;
6849248644SDavid Dillow static unsigned int cmd_sg_entries;
69c07d424dSDavid Dillow static unsigned int indirect_sg_entries;
70c07d424dSDavid Dillow static bool allow_ext_sg;
7103f6fb93SBart Van Assche static bool prefer_fr = true;
7203f6fb93SBart Van Assche static bool register_always = true;
73aef9ec39SRoland Dreier static int topspin_workarounds = 1;
74aef9ec39SRoland Dreier 
7549248644SDavid Dillow module_param(srp_sg_tablesize, uint, 0444);
7649248644SDavid Dillow MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
7749248644SDavid Dillow 
7849248644SDavid Dillow module_param(cmd_sg_entries, uint, 0444);
7949248644SDavid Dillow MODULE_PARM_DESC(cmd_sg_entries,
8049248644SDavid Dillow 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
8149248644SDavid Dillow 
82c07d424dSDavid Dillow module_param(indirect_sg_entries, uint, 0444);
83c07d424dSDavid Dillow MODULE_PARM_DESC(indirect_sg_entries,
84c07d424dSDavid Dillow 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85c07d424dSDavid Dillow 
86c07d424dSDavid Dillow module_param(allow_ext_sg, bool, 0444);
87c07d424dSDavid Dillow MODULE_PARM_DESC(allow_ext_sg,
88c07d424dSDavid Dillow 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89c07d424dSDavid Dillow 
90aef9ec39SRoland Dreier module_param(topspin_workarounds, int, 0444);
91aef9ec39SRoland Dreier MODULE_PARM_DESC(topspin_workarounds,
92aef9ec39SRoland Dreier 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93aef9ec39SRoland Dreier 
945cfb1782SBart Van Assche module_param(prefer_fr, bool, 0444);
955cfb1782SBart Van Assche MODULE_PARM_DESC(prefer_fr,
965cfb1782SBart Van Assche "Whether to use fast registration if both FMR and fast registration are supported");
975cfb1782SBart Van Assche 
98b1b8854dSBart Van Assche module_param(register_always, bool, 0444);
99b1b8854dSBart Van Assche MODULE_PARM_DESC(register_always,
100b1b8854dSBart Van Assche 		 "Use memory registration even for contiguous memory regions");
101b1b8854dSBart Van Assche 
1029c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops;
103ed9b2264SBart Van Assche 
104a95cadb9SBart Van Assche static int srp_reconnect_delay = 10;
105a95cadb9SBart Van Assche module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106a95cadb9SBart Van Assche 		S_IRUGO | S_IWUSR);
107a95cadb9SBart Van Assche MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108a95cadb9SBart Van Assche 
109ed9b2264SBart Van Assche static int srp_fast_io_fail_tmo = 15;
110ed9b2264SBart Van Assche module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
112ed9b2264SBart Van Assche MODULE_PARM_DESC(fast_io_fail_tmo,
113ed9b2264SBart Van Assche 		 "Number of seconds between the observation of a transport"
114ed9b2264SBart Van Assche 		 " layer error and failing all I/O. \"off\" means that this"
115ed9b2264SBart Van Assche 		 " functionality is disabled.");
116ed9b2264SBart Van Assche 
117a95cadb9SBart Van Assche static int srp_dev_loss_tmo = 600;
118ed9b2264SBart Van Assche module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119ed9b2264SBart Van Assche 		S_IRUGO | S_IWUSR);
120ed9b2264SBart Van Assche MODULE_PARM_DESC(dev_loss_tmo,
121ed9b2264SBart Van Assche 		 "Maximum number of seconds that the SRP transport should"
122ed9b2264SBart Van Assche 		 " insulate transport layer errors. After this time has been"
123ed9b2264SBart Van Assche 		 " exceeded the SCSI host is removed. Should be"
124ed9b2264SBart Van Assche 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125ed9b2264SBart Van Assche 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
126ed9b2264SBart Van Assche 		 " this functionality is disabled.");
127ed9b2264SBart Van Assche 
128d92c0da7SBart Van Assche static unsigned ch_count;
129d92c0da7SBart Van Assche module_param(ch_count, uint, 0444);
130d92c0da7SBart Van Assche MODULE_PARM_DESC(ch_count,
131d92c0da7SBart Van Assche 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132d92c0da7SBart Van Assche 
133aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device);
1347c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data);
1351dc7b1f1SChristoph Hellwig static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
1361dc7b1f1SChristoph Hellwig static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
1371dc7b1f1SChristoph Hellwig 		const char *opname);
138aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139aef9ec39SRoland Dreier 
1403236822bSFUJITA Tomonori static struct scsi_transport_template *ib_srp_transport_template;
141bcc05910SBart Van Assche static struct workqueue_struct *srp_remove_wq;
1423236822bSFUJITA Tomonori 
143aef9ec39SRoland Dreier static struct ib_client srp_client = {
144aef9ec39SRoland Dreier 	.name   = "srp",
145aef9ec39SRoland Dreier 	.add    = srp_add_one,
146aef9ec39SRoland Dreier 	.remove = srp_remove_one
147aef9ec39SRoland Dreier };
148aef9ec39SRoland Dreier 
149c1a0b23bSMichael S. Tsirkin static struct ib_sa_client srp_sa_client;
150c1a0b23bSMichael S. Tsirkin 
151ed9b2264SBart Van Assche static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152ed9b2264SBart Van Assche {
153ed9b2264SBart Van Assche 	int tmo = *(int *)kp->arg;
154ed9b2264SBart Van Assche 
155ed9b2264SBart Van Assche 	if (tmo >= 0)
156ed9b2264SBart Van Assche 		return sprintf(buffer, "%d", tmo);
157ed9b2264SBart Van Assche 	else
158ed9b2264SBart Van Assche 		return sprintf(buffer, "off");
159ed9b2264SBart Van Assche }
160ed9b2264SBart Van Assche 
161ed9b2264SBart Van Assche static int srp_tmo_set(const char *val, const struct kernel_param *kp)
162ed9b2264SBart Van Assche {
163ed9b2264SBart Van Assche 	int tmo, res;
164ed9b2264SBart Van Assche 
1653fdf70acSSagi Grimberg 	res = srp_parse_tmo(&tmo, val);
166ed9b2264SBart Van Assche 	if (res)
167ed9b2264SBart Van Assche 		goto out;
1683fdf70acSSagi Grimberg 
169a95cadb9SBart Van Assche 	if (kp->arg == &srp_reconnect_delay)
170a95cadb9SBart Van Assche 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171a95cadb9SBart Van Assche 				    srp_dev_loss_tmo);
172a95cadb9SBart Van Assche 	else if (kp->arg == &srp_fast_io_fail_tmo)
173a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
174ed9b2264SBart Van Assche 	else
175a95cadb9SBart Van Assche 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
176a95cadb9SBart Van Assche 				    tmo);
177ed9b2264SBart Van Assche 	if (res)
178ed9b2264SBart Van Assche 		goto out;
179ed9b2264SBart Van Assche 	*(int *)kp->arg = tmo;
180ed9b2264SBart Van Assche 
181ed9b2264SBart Van Assche out:
182ed9b2264SBart Van Assche 	return res;
183ed9b2264SBart Van Assche }
184ed9b2264SBart Van Assche 
1859c27847dSLuis R. Rodriguez static const struct kernel_param_ops srp_tmo_ops = {
186ed9b2264SBart Van Assche 	.get = srp_tmo_get,
187ed9b2264SBart Van Assche 	.set = srp_tmo_set,
188ed9b2264SBart Van Assche };
189ed9b2264SBart Van Assche 
190aef9ec39SRoland Dreier static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191aef9ec39SRoland Dreier {
192aef9ec39SRoland Dreier 	return (struct srp_target_port *) host->hostdata;
193aef9ec39SRoland Dreier }
194aef9ec39SRoland Dreier 
195aef9ec39SRoland Dreier static const char *srp_target_info(struct Scsi_Host *host)
196aef9ec39SRoland Dreier {
197aef9ec39SRoland Dreier 	return host_to_target(host)->target_name;
198aef9ec39SRoland Dreier }
199aef9ec39SRoland Dreier 
2005d7cbfd6SRoland Dreier static int srp_target_is_topspin(struct srp_target_port *target)
2015d7cbfd6SRoland Dreier {
2025d7cbfd6SRoland Dreier 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
2033d1ff48dSRaghava Kondapalli 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
2045d7cbfd6SRoland Dreier 
2055d7cbfd6SRoland Dreier 	return topspin_workarounds &&
2063d1ff48dSRaghava Kondapalli 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
2073d1ff48dSRaghava Kondapalli 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
2085d7cbfd6SRoland Dreier }
2095d7cbfd6SRoland Dreier 
210aef9ec39SRoland Dreier static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211aef9ec39SRoland Dreier 				   gfp_t gfp_mask,
212aef9ec39SRoland Dreier 				   enum dma_data_direction direction)
213aef9ec39SRoland Dreier {
214aef9ec39SRoland Dreier 	struct srp_iu *iu;
215aef9ec39SRoland Dreier 
216aef9ec39SRoland Dreier 	iu = kmalloc(sizeof *iu, gfp_mask);
217aef9ec39SRoland Dreier 	if (!iu)
218aef9ec39SRoland Dreier 		goto out;
219aef9ec39SRoland Dreier 
220aef9ec39SRoland Dreier 	iu->buf = kzalloc(size, gfp_mask);
221aef9ec39SRoland Dreier 	if (!iu->buf)
222aef9ec39SRoland Dreier 		goto out_free_iu;
223aef9ec39SRoland Dreier 
22405321937SGreg Kroah-Hartman 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
22505321937SGreg Kroah-Hartman 				    direction);
22605321937SGreg Kroah-Hartman 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
227aef9ec39SRoland Dreier 		goto out_free_buf;
228aef9ec39SRoland Dreier 
229aef9ec39SRoland Dreier 	iu->size      = size;
230aef9ec39SRoland Dreier 	iu->direction = direction;
231aef9ec39SRoland Dreier 
232aef9ec39SRoland Dreier 	return iu;
233aef9ec39SRoland Dreier 
234aef9ec39SRoland Dreier out_free_buf:
235aef9ec39SRoland Dreier 	kfree(iu->buf);
236aef9ec39SRoland Dreier out_free_iu:
237aef9ec39SRoland Dreier 	kfree(iu);
238aef9ec39SRoland Dreier out:
239aef9ec39SRoland Dreier 	return NULL;
240aef9ec39SRoland Dreier }
241aef9ec39SRoland Dreier 
242aef9ec39SRoland Dreier static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
243aef9ec39SRoland Dreier {
244aef9ec39SRoland Dreier 	if (!iu)
245aef9ec39SRoland Dreier 		return;
246aef9ec39SRoland Dreier 
24705321937SGreg Kroah-Hartman 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
24805321937SGreg Kroah-Hartman 			    iu->direction);
249aef9ec39SRoland Dreier 	kfree(iu->buf);
250aef9ec39SRoland Dreier 	kfree(iu);
251aef9ec39SRoland Dreier }
252aef9ec39SRoland Dreier 
253aef9ec39SRoland Dreier static void srp_qp_event(struct ib_event *event, void *context)
254aef9ec39SRoland Dreier {
25557363d98SSagi Grimberg 	pr_debug("QP event %s (%d)\n",
25657363d98SSagi Grimberg 		 ib_event_msg(event->event), event->event);
257aef9ec39SRoland Dreier }
258aef9ec39SRoland Dreier 
259aef9ec39SRoland Dreier static int srp_init_qp(struct srp_target_port *target,
260aef9ec39SRoland Dreier 		       struct ib_qp *qp)
261aef9ec39SRoland Dreier {
262aef9ec39SRoland Dreier 	struct ib_qp_attr *attr;
263aef9ec39SRoland Dreier 	int ret;
264aef9ec39SRoland Dreier 
265aef9ec39SRoland Dreier 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
266aef9ec39SRoland Dreier 	if (!attr)
267aef9ec39SRoland Dreier 		return -ENOMEM;
268aef9ec39SRoland Dreier 
26956b5390cSBart Van Assche 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
270aef9ec39SRoland Dreier 				  target->srp_host->port,
271747fe000SBart Van Assche 				  be16_to_cpu(target->pkey),
272aef9ec39SRoland Dreier 				  &attr->pkey_index);
273aef9ec39SRoland Dreier 	if (ret)
274aef9ec39SRoland Dreier 		goto out;
275aef9ec39SRoland Dreier 
276aef9ec39SRoland Dreier 	attr->qp_state        = IB_QPS_INIT;
277aef9ec39SRoland Dreier 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
278aef9ec39SRoland Dreier 				    IB_ACCESS_REMOTE_WRITE);
279aef9ec39SRoland Dreier 	attr->port_num        = target->srp_host->port;
280aef9ec39SRoland Dreier 
281aef9ec39SRoland Dreier 	ret = ib_modify_qp(qp, attr,
282aef9ec39SRoland Dreier 			   IB_QP_STATE		|
283aef9ec39SRoland Dreier 			   IB_QP_PKEY_INDEX	|
284aef9ec39SRoland Dreier 			   IB_QP_ACCESS_FLAGS	|
285aef9ec39SRoland Dreier 			   IB_QP_PORT);
286aef9ec39SRoland Dreier 
287aef9ec39SRoland Dreier out:
288aef9ec39SRoland Dreier 	kfree(attr);
289aef9ec39SRoland Dreier 	return ret;
290aef9ec39SRoland Dreier }
291aef9ec39SRoland Dreier 
292509c07bcSBart Van Assche static int srp_new_cm_id(struct srp_rdma_ch *ch)
2939fe4bcf4SDavid Dillow {
294509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2959fe4bcf4SDavid Dillow 	struct ib_cm_id *new_cm_id;
2969fe4bcf4SDavid Dillow 
29705321937SGreg Kroah-Hartman 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
298509c07bcSBart Van Assche 				    srp_cm_handler, ch);
2999fe4bcf4SDavid Dillow 	if (IS_ERR(new_cm_id))
3009fe4bcf4SDavid Dillow 		return PTR_ERR(new_cm_id);
3019fe4bcf4SDavid Dillow 
302509c07bcSBart Van Assche 	if (ch->cm_id)
303509c07bcSBart Van Assche 		ib_destroy_cm_id(ch->cm_id);
304509c07bcSBart Van Assche 	ch->cm_id = new_cm_id;
305509c07bcSBart Van Assche 	ch->path.sgid = target->sgid;
306509c07bcSBart Van Assche 	ch->path.dgid = target->orig_dgid;
307509c07bcSBart Van Assche 	ch->path.pkey = target->pkey;
308509c07bcSBart Van Assche 	ch->path.service_id = target->service_id;
3099fe4bcf4SDavid Dillow 
3109fe4bcf4SDavid Dillow 	return 0;
3119fe4bcf4SDavid Dillow }
3129fe4bcf4SDavid Dillow 
313d1b4289eSBart Van Assche static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314d1b4289eSBart Van Assche {
315d1b4289eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
316d1b4289eSBart Van Assche 	struct ib_fmr_pool_param fmr_param;
317d1b4289eSBart Van Assche 
318d1b4289eSBart Van Assche 	memset(&fmr_param, 0, sizeof(fmr_param));
319fa9863f8SBart Van Assche 	fmr_param.pool_size	    = target->mr_pool_size;
320d1b4289eSBart Van Assche 	fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
321d1b4289eSBart Van Assche 	fmr_param.cache		    = 1;
32252ede08fSBart Van Assche 	fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
32352ede08fSBart Van Assche 	fmr_param.page_shift	    = ilog2(dev->mr_page_size);
324d1b4289eSBart Van Assche 	fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
325d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_WRITE |
326d1b4289eSBart Van Assche 				       IB_ACCESS_REMOTE_READ);
327d1b4289eSBart Van Assche 
328d1b4289eSBart Van Assche 	return ib_create_fmr_pool(dev->pd, &fmr_param);
329d1b4289eSBart Van Assche }
330d1b4289eSBart Van Assche 
3315cfb1782SBart Van Assche /**
3325cfb1782SBart Van Assche  * srp_destroy_fr_pool() - free the resources owned by a pool
3335cfb1782SBart Van Assche  * @pool: Fast registration pool to be destroyed.
3345cfb1782SBart Van Assche  */
3355cfb1782SBart Van Assche static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
3365cfb1782SBart Van Assche {
3375cfb1782SBart Van Assche 	int i;
3385cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3395cfb1782SBart Van Assche 
3405cfb1782SBart Van Assche 	if (!pool)
3415cfb1782SBart Van Assche 		return;
3425cfb1782SBart Van Assche 
3435cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
3445cfb1782SBart Van Assche 		if (d->mr)
3455cfb1782SBart Van Assche 			ib_dereg_mr(d->mr);
3465cfb1782SBart Van Assche 	}
3475cfb1782SBart Van Assche 	kfree(pool);
3485cfb1782SBart Van Assche }
3495cfb1782SBart Van Assche 
3505cfb1782SBart Van Assche /**
3515cfb1782SBart Van Assche  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
3525cfb1782SBart Van Assche  * @device:            IB device to allocate fast registration descriptors for.
3535cfb1782SBart Van Assche  * @pd:                Protection domain associated with the FR descriptors.
3545cfb1782SBart Van Assche  * @pool_size:         Number of descriptors to allocate.
3555cfb1782SBart Van Assche  * @max_page_list_len: Maximum fast registration work request page list length.
3565cfb1782SBart Van Assche  */
3575cfb1782SBart Van Assche static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
3585cfb1782SBart Van Assche 					      struct ib_pd *pd, int pool_size,
3595cfb1782SBart Van Assche 					      int max_page_list_len)
3605cfb1782SBart Van Assche {
3615cfb1782SBart Van Assche 	struct srp_fr_pool *pool;
3625cfb1782SBart Van Assche 	struct srp_fr_desc *d;
3635cfb1782SBart Van Assche 	struct ib_mr *mr;
3645cfb1782SBart Van Assche 	int i, ret = -EINVAL;
3655cfb1782SBart Van Assche 
3665cfb1782SBart Van Assche 	if (pool_size <= 0)
3675cfb1782SBart Van Assche 		goto err;
3685cfb1782SBart Van Assche 	ret = -ENOMEM;
3695cfb1782SBart Van Assche 	pool = kzalloc(sizeof(struct srp_fr_pool) +
3705cfb1782SBart Van Assche 		       pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
3715cfb1782SBart Van Assche 	if (!pool)
3725cfb1782SBart Van Assche 		goto err;
3735cfb1782SBart Van Assche 	pool->size = pool_size;
3745cfb1782SBart Van Assche 	pool->max_page_list_len = max_page_list_len;
3755cfb1782SBart Van Assche 	spin_lock_init(&pool->lock);
3765cfb1782SBart Van Assche 	INIT_LIST_HEAD(&pool->free_list);
3775cfb1782SBart Van Assche 
3785cfb1782SBart Van Assche 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
379563b67c5SSagi Grimberg 		mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
380563b67c5SSagi Grimberg 				 max_page_list_len);
3815cfb1782SBart Van Assche 		if (IS_ERR(mr)) {
3825cfb1782SBart Van Assche 			ret = PTR_ERR(mr);
3835cfb1782SBart Van Assche 			goto destroy_pool;
3845cfb1782SBart Van Assche 		}
3855cfb1782SBart Van Assche 		d->mr = mr;
3865cfb1782SBart Van Assche 		list_add_tail(&d->entry, &pool->free_list);
3875cfb1782SBart Van Assche 	}
3885cfb1782SBart Van Assche 
3895cfb1782SBart Van Assche out:
3905cfb1782SBart Van Assche 	return pool;
3915cfb1782SBart Van Assche 
3925cfb1782SBart Van Assche destroy_pool:
3935cfb1782SBart Van Assche 	srp_destroy_fr_pool(pool);
3945cfb1782SBart Van Assche 
3955cfb1782SBart Van Assche err:
3965cfb1782SBart Van Assche 	pool = ERR_PTR(ret);
3975cfb1782SBart Van Assche 	goto out;
3985cfb1782SBart Van Assche }
3995cfb1782SBart Van Assche 
4005cfb1782SBart Van Assche /**
4015cfb1782SBart Van Assche  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
4025cfb1782SBart Van Assche  * @pool: Pool to obtain descriptor from.
4035cfb1782SBart Van Assche  */
4045cfb1782SBart Van Assche static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
4055cfb1782SBart Van Assche {
4065cfb1782SBart Van Assche 	struct srp_fr_desc *d = NULL;
4075cfb1782SBart Van Assche 	unsigned long flags;
4085cfb1782SBart Van Assche 
4095cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4105cfb1782SBart Van Assche 	if (!list_empty(&pool->free_list)) {
4115cfb1782SBart Van Assche 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
4125cfb1782SBart Van Assche 		list_del(&d->entry);
4135cfb1782SBart Van Assche 	}
4145cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4155cfb1782SBart Van Assche 
4165cfb1782SBart Van Assche 	return d;
4175cfb1782SBart Van Assche }
4185cfb1782SBart Van Assche 
4195cfb1782SBart Van Assche /**
4205cfb1782SBart Van Assche  * srp_fr_pool_put() - put an FR descriptor back in the free list
4215cfb1782SBart Van Assche  * @pool: Pool the descriptor was allocated from.
4225cfb1782SBart Van Assche  * @desc: Pointer to an array of fast registration descriptor pointers.
4235cfb1782SBart Van Assche  * @n:    Number of descriptors to put back.
4245cfb1782SBart Van Assche  *
4255cfb1782SBart Van Assche  * Note: The caller must already have queued an invalidation request for
4265cfb1782SBart Van Assche  * desc->mr->rkey before calling this function.
4275cfb1782SBart Van Assche  */
4285cfb1782SBart Van Assche static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
4295cfb1782SBart Van Assche 			    int n)
4305cfb1782SBart Van Assche {
4315cfb1782SBart Van Assche 	unsigned long flags;
4325cfb1782SBart Van Assche 	int i;
4335cfb1782SBart Van Assche 
4345cfb1782SBart Van Assche 	spin_lock_irqsave(&pool->lock, flags);
4355cfb1782SBart Van Assche 	for (i = 0; i < n; i++)
4365cfb1782SBart Van Assche 		list_add(&desc[i]->entry, &pool->free_list);
4375cfb1782SBart Van Assche 	spin_unlock_irqrestore(&pool->lock, flags);
4385cfb1782SBart Van Assche }
4395cfb1782SBart Van Assche 
4405cfb1782SBart Van Assche static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
4415cfb1782SBart Van Assche {
4425cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
4435cfb1782SBart Van Assche 
444fa9863f8SBart Van Assche 	return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
4455cfb1782SBart Van Assche 				  dev->max_pages_per_mr);
4465cfb1782SBart Van Assche }
4475cfb1782SBart Van Assche 
4487dad6b2eSBart Van Assche /**
4497dad6b2eSBart Van Assche  * srp_destroy_qp() - destroy an RDMA queue pair
4507dad6b2eSBart Van Assche  * @ch: SRP RDMA channel.
4517dad6b2eSBart Van Assche  *
452561392d4SSteve Wise  * Drain the qp before destroying it.  This avoids that the receive
453561392d4SSteve Wise  * completion handler can access the queue pair while it is
4547dad6b2eSBart Van Assche  * being destroyed.
4557dad6b2eSBart Van Assche  */
4567dad6b2eSBart Van Assche static void srp_destroy_qp(struct srp_rdma_ch *ch)
4577dad6b2eSBart Van Assche {
458561392d4SSteve Wise 	ib_drain_rq(ch->qp);
4597dad6b2eSBart Van Assche 	ib_destroy_qp(ch->qp);
4607dad6b2eSBart Van Assche }
4617dad6b2eSBart Van Assche 
462509c07bcSBart Van Assche static int srp_create_ch_ib(struct srp_rdma_ch *ch)
463aef9ec39SRoland Dreier {
464509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
46562154b2eSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
466aef9ec39SRoland Dreier 	struct ib_qp_init_attr *init_attr;
46773aa89edSIshai Rabinovitz 	struct ib_cq *recv_cq, *send_cq;
46873aa89edSIshai Rabinovitz 	struct ib_qp *qp;
469d1b4289eSBart Van Assche 	struct ib_fmr_pool *fmr_pool = NULL;
4705cfb1782SBart Van Assche 	struct srp_fr_pool *fr_pool = NULL;
47109c0c0beSSagi Grimberg 	const int m = dev->use_fast_reg ? 3 : 1;
472aef9ec39SRoland Dreier 	int ret;
473aef9ec39SRoland Dreier 
474aef9ec39SRoland Dreier 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
475aef9ec39SRoland Dreier 	if (!init_attr)
476aef9ec39SRoland Dreier 		return -ENOMEM;
477aef9ec39SRoland Dreier 
478561392d4SSteve Wise 	/* queue_size + 1 for ib_drain_rq() */
4791dc7b1f1SChristoph Hellwig 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
4801dc7b1f1SChristoph Hellwig 				ch->comp_vector, IB_POLL_SOFTIRQ);
48173aa89edSIshai Rabinovitz 	if (IS_ERR(recv_cq)) {
48273aa89edSIshai Rabinovitz 		ret = PTR_ERR(recv_cq);
483da9d2f07SRoland Dreier 		goto err;
484aef9ec39SRoland Dreier 	}
485aef9ec39SRoland Dreier 
4861dc7b1f1SChristoph Hellwig 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
4871dc7b1f1SChristoph Hellwig 				ch->comp_vector, IB_POLL_DIRECT);
48873aa89edSIshai Rabinovitz 	if (IS_ERR(send_cq)) {
48973aa89edSIshai Rabinovitz 		ret = PTR_ERR(send_cq);
490da9d2f07SRoland Dreier 		goto err_recv_cq;
4919c03dc9fSBart Van Assche 	}
4929c03dc9fSBart Van Assche 
493aef9ec39SRoland Dreier 	init_attr->event_handler       = srp_qp_event;
4945cfb1782SBart Van Assche 	init_attr->cap.max_send_wr     = m * target->queue_size;
4957dad6b2eSBart Van Assche 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
496aef9ec39SRoland Dreier 	init_attr->cap.max_recv_sge    = 1;
497aef9ec39SRoland Dreier 	init_attr->cap.max_send_sge    = 1;
4985cfb1782SBart Van Assche 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
499aef9ec39SRoland Dreier 	init_attr->qp_type             = IB_QPT_RC;
50073aa89edSIshai Rabinovitz 	init_attr->send_cq             = send_cq;
50173aa89edSIshai Rabinovitz 	init_attr->recv_cq             = recv_cq;
502aef9ec39SRoland Dreier 
50362154b2eSBart Van Assche 	qp = ib_create_qp(dev->pd, init_attr);
50473aa89edSIshai Rabinovitz 	if (IS_ERR(qp)) {
50573aa89edSIshai Rabinovitz 		ret = PTR_ERR(qp);
506da9d2f07SRoland Dreier 		goto err_send_cq;
507aef9ec39SRoland Dreier 	}
508aef9ec39SRoland Dreier 
50973aa89edSIshai Rabinovitz 	ret = srp_init_qp(target, qp);
510da9d2f07SRoland Dreier 	if (ret)
511da9d2f07SRoland Dreier 		goto err_qp;
512aef9ec39SRoland Dreier 
513002f1567SBart Van Assche 	if (dev->use_fast_reg) {
5145cfb1782SBart Van Assche 		fr_pool = srp_alloc_fr_pool(target);
5155cfb1782SBart Van Assche 		if (IS_ERR(fr_pool)) {
5165cfb1782SBart Van Assche 			ret = PTR_ERR(fr_pool);
5175cfb1782SBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
5185cfb1782SBart Van Assche 				     "FR pool allocation failed (%d)\n", ret);
5195cfb1782SBart Van Assche 			goto err_qp;
5205cfb1782SBart Van Assche 		}
521002f1567SBart Van Assche 	} else if (dev->use_fmr) {
522d1b4289eSBart Van Assche 		fmr_pool = srp_alloc_fmr_pool(target);
523d1b4289eSBart Van Assche 		if (IS_ERR(fmr_pool)) {
524d1b4289eSBart Van Assche 			ret = PTR_ERR(fmr_pool);
525d1b4289eSBart Van Assche 			shost_printk(KERN_WARNING, target->scsi_host, PFX
526d1b4289eSBart Van Assche 				     "FMR pool allocation failed (%d)\n", ret);
527d1b4289eSBart Van Assche 			goto err_qp;
528d1b4289eSBart Van Assche 		}
529d1b4289eSBart Van Assche 	}
530d1b4289eSBart Van Assche 
531509c07bcSBart Van Assche 	if (ch->qp)
5327dad6b2eSBart Van Assche 		srp_destroy_qp(ch);
533509c07bcSBart Van Assche 	if (ch->recv_cq)
5341dc7b1f1SChristoph Hellwig 		ib_free_cq(ch->recv_cq);
535509c07bcSBart Van Assche 	if (ch->send_cq)
5361dc7b1f1SChristoph Hellwig 		ib_free_cq(ch->send_cq);
53773aa89edSIshai Rabinovitz 
538509c07bcSBart Van Assche 	ch->qp = qp;
539509c07bcSBart Van Assche 	ch->recv_cq = recv_cq;
540509c07bcSBart Van Assche 	ch->send_cq = send_cq;
54173aa89edSIshai Rabinovitz 
5427fbc67dfSSagi Grimberg 	if (dev->use_fast_reg) {
5437fbc67dfSSagi Grimberg 		if (ch->fr_pool)
5447fbc67dfSSagi Grimberg 			srp_destroy_fr_pool(ch->fr_pool);
5457fbc67dfSSagi Grimberg 		ch->fr_pool = fr_pool;
5467fbc67dfSSagi Grimberg 	} else if (dev->use_fmr) {
5477fbc67dfSSagi Grimberg 		if (ch->fmr_pool)
5487fbc67dfSSagi Grimberg 			ib_destroy_fmr_pool(ch->fmr_pool);
5497fbc67dfSSagi Grimberg 		ch->fmr_pool = fmr_pool;
5507fbc67dfSSagi Grimberg 	}
5517fbc67dfSSagi Grimberg 
552da9d2f07SRoland Dreier 	kfree(init_attr);
553da9d2f07SRoland Dreier 	return 0;
554da9d2f07SRoland Dreier 
555da9d2f07SRoland Dreier err_qp:
5561dc7b1f1SChristoph Hellwig 	srp_destroy_qp(ch);
557da9d2f07SRoland Dreier 
558da9d2f07SRoland Dreier err_send_cq:
5591dc7b1f1SChristoph Hellwig 	ib_free_cq(send_cq);
560da9d2f07SRoland Dreier 
561da9d2f07SRoland Dreier err_recv_cq:
5621dc7b1f1SChristoph Hellwig 	ib_free_cq(recv_cq);
563da9d2f07SRoland Dreier 
564da9d2f07SRoland Dreier err:
565aef9ec39SRoland Dreier 	kfree(init_attr);
566aef9ec39SRoland Dreier 	return ret;
567aef9ec39SRoland Dreier }
568aef9ec39SRoland Dreier 
5694d73f95fSBart Van Assche /*
5704d73f95fSBart Van Assche  * Note: this function may be called without srp_alloc_iu_bufs() having been
571509c07bcSBart Van Assche  * invoked. Hence the ch->[rt]x_ring checks.
5724d73f95fSBart Van Assche  */
573509c07bcSBart Van Assche static void srp_free_ch_ib(struct srp_target_port *target,
574509c07bcSBart Van Assche 			   struct srp_rdma_ch *ch)
575aef9ec39SRoland Dreier {
5765cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
577aef9ec39SRoland Dreier 	int i;
578aef9ec39SRoland Dreier 
579d92c0da7SBart Van Assche 	if (!ch->target)
580d92c0da7SBart Van Assche 		return;
581d92c0da7SBart Van Assche 
582509c07bcSBart Van Assche 	if (ch->cm_id) {
583509c07bcSBart Van Assche 		ib_destroy_cm_id(ch->cm_id);
584509c07bcSBart Van Assche 		ch->cm_id = NULL;
585394c595eSBart Van Assche 	}
586394c595eSBart Van Assche 
587d92c0da7SBart Van Assche 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
588d92c0da7SBart Van Assche 	if (!ch->qp)
589d92c0da7SBart Van Assche 		return;
590d92c0da7SBart Van Assche 
5915cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
592509c07bcSBart Van Assche 		if (ch->fr_pool)
593509c07bcSBart Van Assche 			srp_destroy_fr_pool(ch->fr_pool);
594002f1567SBart Van Assche 	} else if (dev->use_fmr) {
595509c07bcSBart Van Assche 		if (ch->fmr_pool)
596509c07bcSBart Van Assche 			ib_destroy_fmr_pool(ch->fmr_pool);
5975cfb1782SBart Van Assche 	}
5981dc7b1f1SChristoph Hellwig 
5997dad6b2eSBart Van Assche 	srp_destroy_qp(ch);
6001dc7b1f1SChristoph Hellwig 	ib_free_cq(ch->send_cq);
6011dc7b1f1SChristoph Hellwig 	ib_free_cq(ch->recv_cq);
602aef9ec39SRoland Dreier 
603d92c0da7SBart Van Assche 	/*
604d92c0da7SBart Van Assche 	 * Avoid that the SCSI error handler tries to use this channel after
605d92c0da7SBart Van Assche 	 * it has been freed. The SCSI error handler can namely continue
606d92c0da7SBart Van Assche 	 * trying to perform recovery actions after scsi_remove_host()
607d92c0da7SBart Van Assche 	 * returned.
608d92c0da7SBart Van Assche 	 */
609d92c0da7SBart Van Assche 	ch->target = NULL;
610d92c0da7SBart Van Assche 
611509c07bcSBart Van Assche 	ch->qp = NULL;
612509c07bcSBart Van Assche 	ch->send_cq = ch->recv_cq = NULL;
61373aa89edSIshai Rabinovitz 
614509c07bcSBart Van Assche 	if (ch->rx_ring) {
6154d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
616509c07bcSBart Van Assche 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
617509c07bcSBart Van Assche 		kfree(ch->rx_ring);
618509c07bcSBart Van Assche 		ch->rx_ring = NULL;
6194d73f95fSBart Van Assche 	}
620509c07bcSBart Van Assche 	if (ch->tx_ring) {
6214d73f95fSBart Van Assche 		for (i = 0; i < target->queue_size; ++i)
622509c07bcSBart Van Assche 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
623509c07bcSBart Van Assche 		kfree(ch->tx_ring);
624509c07bcSBart Van Assche 		ch->tx_ring = NULL;
6254d73f95fSBart Van Assche 	}
626aef9ec39SRoland Dreier }
627aef9ec39SRoland Dreier 
628aef9ec39SRoland Dreier static void srp_path_rec_completion(int status,
629aef9ec39SRoland Dreier 				    struct ib_sa_path_rec *pathrec,
630509c07bcSBart Van Assche 				    void *ch_ptr)
631aef9ec39SRoland Dreier {
632509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = ch_ptr;
633509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
634aef9ec39SRoland Dreier 
635509c07bcSBart Van Assche 	ch->status = status;
636aef9ec39SRoland Dreier 	if (status)
6377aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
6387aa54bd7SDavid Dillow 			     PFX "Got failed path rec status %d\n", status);
639aef9ec39SRoland Dreier 	else
640509c07bcSBart Van Assche 		ch->path = *pathrec;
641509c07bcSBart Van Assche 	complete(&ch->done);
642aef9ec39SRoland Dreier }
643aef9ec39SRoland Dreier 
644509c07bcSBart Van Assche static int srp_lookup_path(struct srp_rdma_ch *ch)
645aef9ec39SRoland Dreier {
646509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
647a702adceSBart Van Assche 	int ret;
648a702adceSBart Van Assche 
649509c07bcSBart Van Assche 	ch->path.numb_path = 1;
650aef9ec39SRoland Dreier 
651509c07bcSBart Van Assche 	init_completion(&ch->done);
652aef9ec39SRoland Dreier 
653509c07bcSBart Van Assche 	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
65405321937SGreg Kroah-Hartman 					       target->srp_host->srp_dev->dev,
655aef9ec39SRoland Dreier 					       target->srp_host->port,
656509c07bcSBart Van Assche 					       &ch->path,
657247e020eSSean Hefty 					       IB_SA_PATH_REC_SERVICE_ID |
658aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_DGID	 |
659aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_SGID	 |
660aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_NUMB_PATH	 |
661aef9ec39SRoland Dreier 					       IB_SA_PATH_REC_PKEY,
662aef9ec39SRoland Dreier 					       SRP_PATH_REC_TIMEOUT_MS,
663aef9ec39SRoland Dreier 					       GFP_KERNEL,
664aef9ec39SRoland Dreier 					       srp_path_rec_completion,
665509c07bcSBart Van Assche 					       ch, &ch->path_query);
666509c07bcSBart Van Assche 	if (ch->path_query_id < 0)
667509c07bcSBart Van Assche 		return ch->path_query_id;
668aef9ec39SRoland Dreier 
669509c07bcSBart Van Assche 	ret = wait_for_completion_interruptible(&ch->done);
670a702adceSBart Van Assche 	if (ret < 0)
671a702adceSBart Van Assche 		return ret;
672aef9ec39SRoland Dreier 
673509c07bcSBart Van Assche 	if (ch->status < 0)
6747aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
6757aa54bd7SDavid Dillow 			     PFX "Path record query failed\n");
676aef9ec39SRoland Dreier 
677509c07bcSBart Van Assche 	return ch->status;
678aef9ec39SRoland Dreier }
679aef9ec39SRoland Dreier 
680d92c0da7SBart Van Assche static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
681aef9ec39SRoland Dreier {
682509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
683aef9ec39SRoland Dreier 	struct {
684aef9ec39SRoland Dreier 		struct ib_cm_req_param param;
685aef9ec39SRoland Dreier 		struct srp_login_req   priv;
686aef9ec39SRoland Dreier 	} *req = NULL;
687aef9ec39SRoland Dreier 	int status;
688aef9ec39SRoland Dreier 
689aef9ec39SRoland Dreier 	req = kzalloc(sizeof *req, GFP_KERNEL);
690aef9ec39SRoland Dreier 	if (!req)
691aef9ec39SRoland Dreier 		return -ENOMEM;
692aef9ec39SRoland Dreier 
693509c07bcSBart Van Assche 	req->param.primary_path		      = &ch->path;
694aef9ec39SRoland Dreier 	req->param.alternate_path 	      = NULL;
695aef9ec39SRoland Dreier 	req->param.service_id 		      = target->service_id;
696509c07bcSBart Van Assche 	req->param.qp_num		      = ch->qp->qp_num;
697509c07bcSBart Van Assche 	req->param.qp_type		      = ch->qp->qp_type;
698aef9ec39SRoland Dreier 	req->param.private_data 	      = &req->priv;
699aef9ec39SRoland Dreier 	req->param.private_data_len 	      = sizeof req->priv;
700aef9ec39SRoland Dreier 	req->param.flow_control 	      = 1;
701aef9ec39SRoland Dreier 
702aef9ec39SRoland Dreier 	get_random_bytes(&req->param.starting_psn, 4);
703aef9ec39SRoland Dreier 	req->param.starting_psn 	     &= 0xffffff;
704aef9ec39SRoland Dreier 
705aef9ec39SRoland Dreier 	/*
706aef9ec39SRoland Dreier 	 * Pick some arbitrary defaults here; we could make these
707aef9ec39SRoland Dreier 	 * module parameters if anyone cared about setting them.
708aef9ec39SRoland Dreier 	 */
709aef9ec39SRoland Dreier 	req->param.responder_resources	      = 4;
710aef9ec39SRoland Dreier 	req->param.remote_cm_response_timeout = 20;
711aef9ec39SRoland Dreier 	req->param.local_cm_response_timeout  = 20;
7127bb312e4SVu Pham 	req->param.retry_count                = target->tl_retry_count;
713aef9ec39SRoland Dreier 	req->param.rnr_retry_count 	      = 7;
714aef9ec39SRoland Dreier 	req->param.max_cm_retries 	      = 15;
715aef9ec39SRoland Dreier 
716aef9ec39SRoland Dreier 	req->priv.opcode     	= SRP_LOGIN_REQ;
717aef9ec39SRoland Dreier 	req->priv.tag        	= 0;
71849248644SDavid Dillow 	req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
719aef9ec39SRoland Dreier 	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
720aef9ec39SRoland Dreier 					      SRP_BUF_FORMAT_INDIRECT);
721d92c0da7SBart Van Assche 	req->priv.req_flags	= (multich ? SRP_MULTICHAN_MULTI :
722d92c0da7SBart Van Assche 				   SRP_MULTICHAN_SINGLE);
7230c0450dbSRamachandra K 	/*
7240c0450dbSRamachandra K 	 * In the published SRP specification (draft rev. 16a), the
7250c0450dbSRamachandra K 	 * port identifier format is 8 bytes of ID extension followed
7260c0450dbSRamachandra K 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
7270c0450dbSRamachandra K 	 * opposite order, so that the GUID comes first.
7280c0450dbSRamachandra K 	 *
7290c0450dbSRamachandra K 	 * Targets conforming to these obsolete drafts can be
7300c0450dbSRamachandra K 	 * recognized by the I/O Class they report.
7310c0450dbSRamachandra K 	 */
7320c0450dbSRamachandra K 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
7330c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
734747fe000SBart Van Assche 		       &target->sgid.global.interface_id, 8);
7350c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id + 8,
73601cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
7370c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
7380c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
7390c0450dbSRamachandra K 	} else {
7400c0450dbSRamachandra K 		memcpy(req->priv.initiator_port_id,
74101cb9bcbSIshai Rabinovitz 		       &target->initiator_ext, 8);
74201cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
743747fe000SBart Van Assche 		       &target->sgid.global.interface_id, 8);
7440c0450dbSRamachandra K 		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
7450c0450dbSRamachandra K 		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
7460c0450dbSRamachandra K 	}
7470c0450dbSRamachandra K 
748aef9ec39SRoland Dreier 	/*
749aef9ec39SRoland Dreier 	 * Topspin/Cisco SRP targets will reject our login unless we
75001cb9bcbSIshai Rabinovitz 	 * zero out the first 8 bytes of our initiator port ID and set
75101cb9bcbSIshai Rabinovitz 	 * the second 8 bytes to the local node GUID.
752aef9ec39SRoland Dreier 	 */
7535d7cbfd6SRoland Dreier 	if (srp_target_is_topspin(target)) {
7547aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
7557aa54bd7SDavid Dillow 			     PFX "Topspin/Cisco initiator port ID workaround "
756aef9ec39SRoland Dreier 			     "activated for target GUID %016llx\n",
75745c37cadSBart Van Assche 			     be64_to_cpu(target->ioc_guid));
758aef9ec39SRoland Dreier 		memset(req->priv.initiator_port_id, 0, 8);
75901cb9bcbSIshai Rabinovitz 		memcpy(req->priv.initiator_port_id + 8,
76005321937SGreg Kroah-Hartman 		       &target->srp_host->srp_dev->dev->node_guid, 8);
761aef9ec39SRoland Dreier 	}
762aef9ec39SRoland Dreier 
763509c07bcSBart Van Assche 	status = ib_send_cm_req(ch->cm_id, &req->param);
764aef9ec39SRoland Dreier 
765aef9ec39SRoland Dreier 	kfree(req);
766aef9ec39SRoland Dreier 
767aef9ec39SRoland Dreier 	return status;
768aef9ec39SRoland Dreier }
769aef9ec39SRoland Dreier 
770ef6c49d8SBart Van Assche static bool srp_queue_remove_work(struct srp_target_port *target)
771ef6c49d8SBart Van Assche {
772ef6c49d8SBart Van Assche 	bool changed = false;
773ef6c49d8SBart Van Assche 
774ef6c49d8SBart Van Assche 	spin_lock_irq(&target->lock);
775ef6c49d8SBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
776ef6c49d8SBart Van Assche 		target->state = SRP_TARGET_REMOVED;
777ef6c49d8SBart Van Assche 		changed = true;
778ef6c49d8SBart Van Assche 	}
779ef6c49d8SBart Van Assche 	spin_unlock_irq(&target->lock);
780ef6c49d8SBart Van Assche 
781ef6c49d8SBart Van Assche 	if (changed)
782bcc05910SBart Van Assche 		queue_work(srp_remove_wq, &target->remove_work);
783ef6c49d8SBart Van Assche 
784ef6c49d8SBart Van Assche 	return changed;
785ef6c49d8SBart Van Assche }
786ef6c49d8SBart Van Assche 
787aef9ec39SRoland Dreier static void srp_disconnect_target(struct srp_target_port *target)
788aef9ec39SRoland Dreier {
789d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
790d92c0da7SBart Van Assche 	int i;
791509c07bcSBart Van Assche 
792aef9ec39SRoland Dreier 	/* XXX should send SRP_I_LOGOUT request */
793aef9ec39SRoland Dreier 
794d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
795d92c0da7SBart Van Assche 		ch = &target->ch[i];
796c014c8cdSBart Van Assche 		ch->connected = false;
797d92c0da7SBart Van Assche 		if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
7987aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, target->scsi_host,
7997aa54bd7SDavid Dillow 				     PFX "Sending CM DREQ failed\n");
800aef9ec39SRoland Dreier 		}
801294c875aSBart Van Assche 	}
802294c875aSBart Van Assche }
803aef9ec39SRoland Dreier 
804509c07bcSBart Van Assche static void srp_free_req_data(struct srp_target_port *target,
805509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
8068f26c9ffSDavid Dillow {
8075cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
8085cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
8098f26c9ffSDavid Dillow 	struct srp_request *req;
8108f26c9ffSDavid Dillow 	int i;
8118f26c9ffSDavid Dillow 
81247513cf4SBart Van Assche 	if (!ch->req_ring)
8134d73f95fSBart Van Assche 		return;
8144d73f95fSBart Van Assche 
8154d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
816509c07bcSBart Van Assche 		req = &ch->req_ring[i];
8179a21be53SSagi Grimberg 		if (dev->use_fast_reg) {
8185cfb1782SBart Van Assche 			kfree(req->fr_list);
8199a21be53SSagi Grimberg 		} else {
8208f26c9ffSDavid Dillow 			kfree(req->fmr_list);
8218f26c9ffSDavid Dillow 			kfree(req->map_page);
8229a21be53SSagi Grimberg 		}
823c07d424dSDavid Dillow 		if (req->indirect_dma_addr) {
824c07d424dSDavid Dillow 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
825c07d424dSDavid Dillow 					    target->indirect_size,
826c07d424dSDavid Dillow 					    DMA_TO_DEVICE);
827c07d424dSDavid Dillow 		}
828c07d424dSDavid Dillow 		kfree(req->indirect_desc);
8298f26c9ffSDavid Dillow 	}
8304d73f95fSBart Van Assche 
831509c07bcSBart Van Assche 	kfree(ch->req_ring);
832509c07bcSBart Van Assche 	ch->req_ring = NULL;
8338f26c9ffSDavid Dillow }
8348f26c9ffSDavid Dillow 
835509c07bcSBart Van Assche static int srp_alloc_req_data(struct srp_rdma_ch *ch)
836b81d00bdSBart Van Assche {
837509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
838b81d00bdSBart Van Assche 	struct srp_device *srp_dev = target->srp_host->srp_dev;
839b81d00bdSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
840b81d00bdSBart Van Assche 	struct srp_request *req;
8415cfb1782SBart Van Assche 	void *mr_list;
842b81d00bdSBart Van Assche 	dma_addr_t dma_addr;
843b81d00bdSBart Van Assche 	int i, ret = -ENOMEM;
844b81d00bdSBart Van Assche 
845509c07bcSBart Van Assche 	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
846509c07bcSBart Van Assche 			       GFP_KERNEL);
847509c07bcSBart Van Assche 	if (!ch->req_ring)
8484d73f95fSBart Van Assche 		goto out;
8494d73f95fSBart Van Assche 
8504d73f95fSBart Van Assche 	for (i = 0; i < target->req_ring_size; ++i) {
851509c07bcSBart Van Assche 		req = &ch->req_ring[i];
8525cfb1782SBart Van Assche 		mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
853b81d00bdSBart Van Assche 				  GFP_KERNEL);
8545cfb1782SBart Van Assche 		if (!mr_list)
8555cfb1782SBart Van Assche 			goto out;
8569a21be53SSagi Grimberg 		if (srp_dev->use_fast_reg) {
8575cfb1782SBart Van Assche 			req->fr_list = mr_list;
8589a21be53SSagi Grimberg 		} else {
8595cfb1782SBart Van Assche 			req->fmr_list = mr_list;
86052ede08fSBart Van Assche 			req->map_page = kmalloc(srp_dev->max_pages_per_mr *
861d1b4289eSBart Van Assche 						sizeof(void *), GFP_KERNEL);
8625cfb1782SBart Van Assche 			if (!req->map_page)
8635cfb1782SBart Van Assche 				goto out;
8649a21be53SSagi Grimberg 		}
865b81d00bdSBart Van Assche 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
8665cfb1782SBart Van Assche 		if (!req->indirect_desc)
867b81d00bdSBart Van Assche 			goto out;
868b81d00bdSBart Van Assche 
869b81d00bdSBart Van Assche 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
870b81d00bdSBart Van Assche 					     target->indirect_size,
871b81d00bdSBart Van Assche 					     DMA_TO_DEVICE);
872b81d00bdSBart Van Assche 		if (ib_dma_mapping_error(ibdev, dma_addr))
873b81d00bdSBart Van Assche 			goto out;
874b81d00bdSBart Van Assche 
875b81d00bdSBart Van Assche 		req->indirect_dma_addr = dma_addr;
876b81d00bdSBart Van Assche 	}
877b81d00bdSBart Van Assche 	ret = 0;
878b81d00bdSBart Van Assche 
879b81d00bdSBart Van Assche out:
880b81d00bdSBart Van Assche 	return ret;
881b81d00bdSBart Van Assche }
882b81d00bdSBart Van Assche 
883683b159aSBart Van Assche /**
884683b159aSBart Van Assche  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
885683b159aSBart Van Assche  * @shost: SCSI host whose attributes to remove from sysfs.
886683b159aSBart Van Assche  *
887683b159aSBart Van Assche  * Note: Any attributes defined in the host template and that did not exist
888683b159aSBart Van Assche  * before invocation of this function will be ignored.
889683b159aSBart Van Assche  */
890683b159aSBart Van Assche static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
891683b159aSBart Van Assche {
892683b159aSBart Van Assche 	struct device_attribute **attr;
893683b159aSBart Van Assche 
894683b159aSBart Van Assche 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
895683b159aSBart Van Assche 		device_remove_file(&shost->shost_dev, *attr);
896683b159aSBart Van Assche }
897683b159aSBart Van Assche 
898ee12d6a8SBart Van Assche static void srp_remove_target(struct srp_target_port *target)
899ee12d6a8SBart Van Assche {
900d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
901d92c0da7SBart Van Assche 	int i;
902509c07bcSBart Van Assche 
903ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
904ef6c49d8SBart Van Assche 
905ee12d6a8SBart Van Assche 	srp_del_scsi_host_attr(target->scsi_host);
9069dd69a60SBart Van Assche 	srp_rport_get(target->rport);
907ee12d6a8SBart Van Assche 	srp_remove_host(target->scsi_host);
908ee12d6a8SBart Van Assche 	scsi_remove_host(target->scsi_host);
90993079162SBart Van Assche 	srp_stop_rport_timers(target->rport);
910ef6c49d8SBart Van Assche 	srp_disconnect_target(target);
911d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
912d92c0da7SBart Van Assche 		ch = &target->ch[i];
913509c07bcSBart Van Assche 		srp_free_ch_ib(target, ch);
914d92c0da7SBart Van Assche 	}
915c1120f89SBart Van Assche 	cancel_work_sync(&target->tl_err_work);
9169dd69a60SBart Van Assche 	srp_rport_put(target->rport);
917d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
918d92c0da7SBart Van Assche 		ch = &target->ch[i];
919509c07bcSBart Van Assche 		srp_free_req_data(target, ch);
920d92c0da7SBart Van Assche 	}
921d92c0da7SBart Van Assche 	kfree(target->ch);
922d92c0da7SBart Van Assche 	target->ch = NULL;
92365d7dd2fSVu Pham 
92465d7dd2fSVu Pham 	spin_lock(&target->srp_host->target_lock);
92565d7dd2fSVu Pham 	list_del(&target->list);
92665d7dd2fSVu Pham 	spin_unlock(&target->srp_host->target_lock);
92765d7dd2fSVu Pham 
928ee12d6a8SBart Van Assche 	scsi_host_put(target->scsi_host);
929ee12d6a8SBart Van Assche }
930ee12d6a8SBart Van Assche 
931c4028958SDavid Howells static void srp_remove_work(struct work_struct *work)
932aef9ec39SRoland Dreier {
933c4028958SDavid Howells 	struct srp_target_port *target =
934ef6c49d8SBart Van Assche 		container_of(work, struct srp_target_port, remove_work);
935aef9ec39SRoland Dreier 
936ef6c49d8SBart Van Assche 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
937aef9ec39SRoland Dreier 
93896fc248aSBart Van Assche 	srp_remove_target(target);
939aef9ec39SRoland Dreier }
940aef9ec39SRoland Dreier 
941dc1bdbd9SBart Van Assche static void srp_rport_delete(struct srp_rport *rport)
942dc1bdbd9SBart Van Assche {
943dc1bdbd9SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
944dc1bdbd9SBart Van Assche 
945dc1bdbd9SBart Van Assche 	srp_queue_remove_work(target);
946dc1bdbd9SBart Van Assche }
947dc1bdbd9SBart Van Assche 
948c014c8cdSBart Van Assche /**
949c014c8cdSBart Van Assche  * srp_connected_ch() - number of connected channels
950c014c8cdSBart Van Assche  * @target: SRP target port.
951c014c8cdSBart Van Assche  */
952c014c8cdSBart Van Assche static int srp_connected_ch(struct srp_target_port *target)
953c014c8cdSBart Van Assche {
954c014c8cdSBart Van Assche 	int i, c = 0;
955c014c8cdSBart Van Assche 
956c014c8cdSBart Van Assche 	for (i = 0; i < target->ch_count; i++)
957c014c8cdSBart Van Assche 		c += target->ch[i].connected;
958c014c8cdSBart Van Assche 
959c014c8cdSBart Van Assche 	return c;
960c014c8cdSBart Van Assche }
961c014c8cdSBart Van Assche 
962d92c0da7SBart Van Assche static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
963aef9ec39SRoland Dreier {
964509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
965aef9ec39SRoland Dreier 	int ret;
966aef9ec39SRoland Dreier 
967c014c8cdSBart Van Assche 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
968294c875aSBart Van Assche 
969509c07bcSBart Van Assche 	ret = srp_lookup_path(ch);
970aef9ec39SRoland Dreier 	if (ret)
9714d59ad29SBart Van Assche 		goto out;
972aef9ec39SRoland Dreier 
973aef9ec39SRoland Dreier 	while (1) {
974509c07bcSBart Van Assche 		init_completion(&ch->done);
975d92c0da7SBart Van Assche 		ret = srp_send_req(ch, multich);
976aef9ec39SRoland Dreier 		if (ret)
9774d59ad29SBart Van Assche 			goto out;
978509c07bcSBart Van Assche 		ret = wait_for_completion_interruptible(&ch->done);
979a702adceSBart Van Assche 		if (ret < 0)
9804d59ad29SBart Van Assche 			goto out;
981aef9ec39SRoland Dreier 
982aef9ec39SRoland Dreier 		/*
983aef9ec39SRoland Dreier 		 * The CM event handling code will set status to
984aef9ec39SRoland Dreier 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
985aef9ec39SRoland Dreier 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
986aef9ec39SRoland Dreier 		 * redirect REJ back.
987aef9ec39SRoland Dreier 		 */
9884d59ad29SBart Van Assche 		ret = ch->status;
9894d59ad29SBart Van Assche 		switch (ret) {
990aef9ec39SRoland Dreier 		case 0:
991c014c8cdSBart Van Assche 			ch->connected = true;
9924d59ad29SBart Van Assche 			goto out;
993aef9ec39SRoland Dreier 
994aef9ec39SRoland Dreier 		case SRP_PORT_REDIRECT:
995509c07bcSBart Van Assche 			ret = srp_lookup_path(ch);
996aef9ec39SRoland Dreier 			if (ret)
9974d59ad29SBart Van Assche 				goto out;
998aef9ec39SRoland Dreier 			break;
999aef9ec39SRoland Dreier 
1000aef9ec39SRoland Dreier 		case SRP_DLID_REDIRECT:
1001aef9ec39SRoland Dreier 			break;
1002aef9ec39SRoland Dreier 
10039fe4bcf4SDavid Dillow 		case SRP_STALE_CONN:
10049fe4bcf4SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host, PFX
10059fe4bcf4SDavid Dillow 				     "giving up on stale connection\n");
10064d59ad29SBart Van Assche 			ret = -ECONNRESET;
10074d59ad29SBart Van Assche 			goto out;
10089fe4bcf4SDavid Dillow 
1009aef9ec39SRoland Dreier 		default:
10104d59ad29SBart Van Assche 			goto out;
1011aef9ec39SRoland Dreier 		}
1012aef9ec39SRoland Dreier 	}
10134d59ad29SBart Van Assche 
10144d59ad29SBart Van Assche out:
10154d59ad29SBart Van Assche 	return ret <= 0 ? ret : -ENODEV;
1016aef9ec39SRoland Dreier }
1017aef9ec39SRoland Dreier 
10181dc7b1f1SChristoph Hellwig static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
10191dc7b1f1SChristoph Hellwig {
10201dc7b1f1SChristoph Hellwig 	srp_handle_qp_err(cq, wc, "INV RKEY");
10211dc7b1f1SChristoph Hellwig }
10221dc7b1f1SChristoph Hellwig 
10231dc7b1f1SChristoph Hellwig static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
10241dc7b1f1SChristoph Hellwig 		u32 rkey)
10255cfb1782SBart Van Assche {
10265cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
10275cfb1782SBart Van Assche 	struct ib_send_wr wr = {
10285cfb1782SBart Van Assche 		.opcode		    = IB_WR_LOCAL_INV,
10295cfb1782SBart Van Assche 		.next		    = NULL,
10305cfb1782SBart Van Assche 		.num_sge	    = 0,
10315cfb1782SBart Van Assche 		.send_flags	    = 0,
10325cfb1782SBart Van Assche 		.ex.invalidate_rkey = rkey,
10335cfb1782SBart Van Assche 	};
10345cfb1782SBart Van Assche 
10351dc7b1f1SChristoph Hellwig 	wr.wr_cqe = &req->reg_cqe;
10361dc7b1f1SChristoph Hellwig 	req->reg_cqe.done = srp_inv_rkey_err_done;
1037509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
10385cfb1782SBart Van Assche }
10395cfb1782SBart Van Assche 
1040d945e1dfSRoland Dreier static void srp_unmap_data(struct scsi_cmnd *scmnd,
1041509c07bcSBart Van Assche 			   struct srp_rdma_ch *ch,
1042d945e1dfSRoland Dreier 			   struct srp_request *req)
1043d945e1dfSRoland Dreier {
1044509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
10455cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
10465cfb1782SBart Van Assche 	struct ib_device *ibdev = dev->dev;
10475cfb1782SBart Van Assche 	int i, res;
10488f26c9ffSDavid Dillow 
1049bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) ||
1050d945e1dfSRoland Dreier 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1051d945e1dfSRoland Dreier 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1052d945e1dfSRoland Dreier 		return;
1053d945e1dfSRoland Dreier 
10545cfb1782SBart Van Assche 	if (dev->use_fast_reg) {
10555cfb1782SBart Van Assche 		struct srp_fr_desc **pfr;
10565cfb1782SBart Van Assche 
10575cfb1782SBart Van Assche 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
10581dc7b1f1SChristoph Hellwig 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
10595cfb1782SBart Van Assche 			if (res < 0) {
10605cfb1782SBart Van Assche 				shost_printk(KERN_ERR, target->scsi_host, PFX
10615cfb1782SBart Van Assche 				  "Queueing INV WR for rkey %#x failed (%d)\n",
10625cfb1782SBart Van Assche 				  (*pfr)->mr->rkey, res);
10635cfb1782SBart Van Assche 				queue_work(system_long_wq,
10645cfb1782SBart Van Assche 					   &target->tl_err_work);
10655cfb1782SBart Van Assche 			}
10665cfb1782SBart Van Assche 		}
10675cfb1782SBart Van Assche 		if (req->nmdesc)
1068509c07bcSBart Van Assche 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
10695cfb1782SBart Van Assche 					req->nmdesc);
1070002f1567SBart Van Assche 	} else if (dev->use_fmr) {
10715cfb1782SBart Van Assche 		struct ib_pool_fmr **pfmr;
10725cfb1782SBart Van Assche 
10735cfb1782SBart Van Assche 		for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
10745cfb1782SBart Van Assche 			ib_fmr_pool_unmap(*pfmr);
10755cfb1782SBart Van Assche 	}
1076f5358a17SRoland Dreier 
10778f26c9ffSDavid Dillow 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
10788f26c9ffSDavid Dillow 			scmnd->sc_data_direction);
1079d945e1dfSRoland Dreier }
1080d945e1dfSRoland Dreier 
108122032991SBart Van Assche /**
108222032991SBart Van Assche  * srp_claim_req - Take ownership of the scmnd associated with a request.
1083509c07bcSBart Van Assche  * @ch: SRP RDMA channel.
108422032991SBart Van Assche  * @req: SRP request.
1085b3fe628dSBart Van Assche  * @sdev: If not NULL, only take ownership for this SCSI device.
108622032991SBart Van Assche  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
108722032991SBart Van Assche  *         ownership of @req->scmnd if it equals @scmnd.
108822032991SBart Van Assche  *
108922032991SBart Van Assche  * Return value:
109022032991SBart Van Assche  * Either NULL or a pointer to the SCSI command the caller became owner of.
109122032991SBart Van Assche  */
1092509c07bcSBart Van Assche static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
109322032991SBart Van Assche 				       struct srp_request *req,
1094b3fe628dSBart Van Assche 				       struct scsi_device *sdev,
109522032991SBart Van Assche 				       struct scsi_cmnd *scmnd)
1096526b4caaSIshai Rabinovitz {
109794a9174cSBart Van Assche 	unsigned long flags;
109894a9174cSBart Van Assche 
1099509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1100b3fe628dSBart Van Assche 	if (req->scmnd &&
1101b3fe628dSBart Van Assche 	    (!sdev || req->scmnd->device == sdev) &&
1102b3fe628dSBart Van Assche 	    (!scmnd || req->scmnd == scmnd)) {
110322032991SBart Van Assche 		scmnd = req->scmnd;
110422032991SBart Van Assche 		req->scmnd = NULL;
110522032991SBart Van Assche 	} else {
110622032991SBart Van Assche 		scmnd = NULL;
110722032991SBart Van Assche 	}
1108509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
110922032991SBart Van Assche 
111022032991SBart Van Assche 	return scmnd;
111122032991SBart Van Assche }
111222032991SBart Van Assche 
111322032991SBart Van Assche /**
11146ec2ba02SBart Van Assche  * srp_free_req() - Unmap data and adjust ch->req_lim.
1115509c07bcSBart Van Assche  * @ch:     SRP RDMA channel.
1116af24663bSBart Van Assche  * @req:    Request to be freed.
1117af24663bSBart Van Assche  * @scmnd:  SCSI command associated with @req.
1118af24663bSBart Van Assche  * @req_lim_delta: Amount to be added to @target->req_lim.
111922032991SBart Van Assche  */
1120509c07bcSBart Van Assche static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1121509c07bcSBart Van Assche 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
112222032991SBart Van Assche {
112322032991SBart Van Assche 	unsigned long flags;
112422032991SBart Van Assche 
1125509c07bcSBart Van Assche 	srp_unmap_data(scmnd, ch, req);
112622032991SBart Van Assche 
1127509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1128509c07bcSBart Van Assche 	ch->req_lim += req_lim_delta;
1129509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
1130526b4caaSIshai Rabinovitz }
1131526b4caaSIshai Rabinovitz 
1132509c07bcSBart Van Assche static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1133509c07bcSBart Van Assche 			   struct scsi_device *sdev, int result)
1134526b4caaSIshai Rabinovitz {
1135509c07bcSBart Van Assche 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
113622032991SBart Van Assche 
113722032991SBart Van Assche 	if (scmnd) {
1138509c07bcSBart Van Assche 		srp_free_req(ch, req, scmnd, 0);
1139ed9b2264SBart Van Assche 		scmnd->result = result;
114022032991SBart Van Assche 		scmnd->scsi_done(scmnd);
114122032991SBart Van Assche 	}
1142526b4caaSIshai Rabinovitz }
1143526b4caaSIshai Rabinovitz 
1144ed9b2264SBart Van Assche static void srp_terminate_io(struct srp_rport *rport)
1145aef9ec39SRoland Dreier {
1146ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1147d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
1148b3fe628dSBart Van Assche 	struct Scsi_Host *shost = target->scsi_host;
1149b3fe628dSBart Van Assche 	struct scsi_device *sdev;
1150d92c0da7SBart Van Assche 	int i, j;
1151aef9ec39SRoland Dreier 
1152b3fe628dSBart Van Assche 	/*
1153b3fe628dSBart Van Assche 	 * Invoking srp_terminate_io() while srp_queuecommand() is running
1154b3fe628dSBart Van Assche 	 * is not safe. Hence the warning statement below.
1155b3fe628dSBart Van Assche 	 */
1156b3fe628dSBart Van Assche 	shost_for_each_device(sdev, shost)
1157b3fe628dSBart Van Assche 		WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1158b3fe628dSBart Van Assche 
1159d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1160d92c0da7SBart Van Assche 		ch = &target->ch[i];
1161509c07bcSBart Van Assche 
1162d92c0da7SBart Van Assche 		for (j = 0; j < target->req_ring_size; ++j) {
1163d92c0da7SBart Van Assche 			struct srp_request *req = &ch->req_ring[j];
1164d92c0da7SBart Van Assche 
1165d92c0da7SBart Van Assche 			srp_finish_req(ch, req, NULL,
1166d92c0da7SBart Van Assche 				       DID_TRANSPORT_FAILFAST << 16);
1167d92c0da7SBart Van Assche 		}
1168ed9b2264SBart Van Assche 	}
1169ed9b2264SBart Van Assche }
1170ed9b2264SBart Van Assche 
1171ed9b2264SBart Van Assche /*
1172ed9b2264SBart Van Assche  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1173ed9b2264SBart Van Assche  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1174ed9b2264SBart Van Assche  * srp_reset_device() or srp_reset_host() calls will occur while this function
1175ed9b2264SBart Van Assche  * is in progress. One way to realize that is not to call this function
1176ed9b2264SBart Van Assche  * directly but to call srp_reconnect_rport() instead since that last function
1177ed9b2264SBart Van Assche  * serializes calls of this function via rport->mutex and also blocks
1178ed9b2264SBart Van Assche  * srp_queuecommand() calls before invoking this function.
1179ed9b2264SBart Van Assche  */
1180ed9b2264SBart Van Assche static int srp_rport_reconnect(struct srp_rport *rport)
1181ed9b2264SBart Van Assche {
1182ed9b2264SBart Van Assche 	struct srp_target_port *target = rport->lld_data;
1183d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
1184d92c0da7SBart Van Assche 	int i, j, ret = 0;
1185d92c0da7SBart Van Assche 	bool multich = false;
118609be70a2SBart Van Assche 
1187aef9ec39SRoland Dreier 	srp_disconnect_target(target);
118834aa654eSBart Van Assche 
118934aa654eSBart Van Assche 	if (target->state == SRP_TARGET_SCANNING)
119034aa654eSBart Van Assche 		return -ENODEV;
119134aa654eSBart Van Assche 
1192aef9ec39SRoland Dreier 	/*
1193c7c4e7ffSBart Van Assche 	 * Now get a new local CM ID so that we avoid confusing the target in
1194c7c4e7ffSBart Van Assche 	 * case things are really fouled up. Doing so also ensures that all CM
1195c7c4e7ffSBart Van Assche 	 * callbacks will have finished before a new QP is allocated.
1196aef9ec39SRoland Dreier 	 */
1197d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1198d92c0da7SBart Van Assche 		ch = &target->ch[i];
1199d92c0da7SBart Van Assche 		ret += srp_new_cm_id(ch);
1200d92c0da7SBart Van Assche 	}
1201d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1202d92c0da7SBart Van Assche 		ch = &target->ch[i];
1203d92c0da7SBart Van Assche 		for (j = 0; j < target->req_ring_size; ++j) {
1204d92c0da7SBart Van Assche 			struct srp_request *req = &ch->req_ring[j];
1205509c07bcSBart Van Assche 
1206509c07bcSBart Van Assche 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
1207536ae14eSBart Van Assche 		}
1208d92c0da7SBart Van Assche 	}
1209d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1210d92c0da7SBart Van Assche 		ch = &target->ch[i];
12115cfb1782SBart Van Assche 		/*
12125cfb1782SBart Van Assche 		 * Whether or not creating a new CM ID succeeded, create a new
1213d92c0da7SBart Van Assche 		 * QP. This guarantees that all completion callback function
1214d92c0da7SBart Van Assche 		 * invocations have finished before request resetting starts.
12155cfb1782SBart Van Assche 		 */
1216509c07bcSBart Van Assche 		ret += srp_create_ch_ib(ch);
12175cfb1782SBart Van Assche 
1218509c07bcSBart Van Assche 		INIT_LIST_HEAD(&ch->free_tx);
1219d92c0da7SBart Van Assche 		for (j = 0; j < target->queue_size; ++j)
1220d92c0da7SBart Van Assche 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1221d92c0da7SBart Van Assche 	}
12228de9fe3aSBart Van Assche 
12238de9fe3aSBart Van Assche 	target->qp_in_error = false;
12248de9fe3aSBart Van Assche 
1225d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
1226d92c0da7SBart Van Assche 		ch = &target->ch[i];
1227bbac5ccfSBart Van Assche 		if (ret)
1228d92c0da7SBart Van Assche 			break;
1229d92c0da7SBart Van Assche 		ret = srp_connect_ch(ch, multich);
1230d92c0da7SBart Van Assche 		multich = true;
1231d92c0da7SBart Van Assche 	}
123209be70a2SBart Van Assche 
1233ed9b2264SBart Van Assche 	if (ret == 0)
1234ed9b2264SBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
1235ed9b2264SBart Van Assche 			     PFX "reconnect succeeded\n");
1236aef9ec39SRoland Dreier 
1237aef9ec39SRoland Dreier 	return ret;
1238aef9ec39SRoland Dreier }
1239aef9ec39SRoland Dreier 
12408f26c9ffSDavid Dillow static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
12418f26c9ffSDavid Dillow 			 unsigned int dma_len, u32 rkey)
1242f5358a17SRoland Dreier {
12438f26c9ffSDavid Dillow 	struct srp_direct_buf *desc = state->desc;
12448f26c9ffSDavid Dillow 
12453ae95da8SBart Van Assche 	WARN_ON_ONCE(!dma_len);
12463ae95da8SBart Van Assche 
12478f26c9ffSDavid Dillow 	desc->va = cpu_to_be64(dma_addr);
12488f26c9ffSDavid Dillow 	desc->key = cpu_to_be32(rkey);
12498f26c9ffSDavid Dillow 	desc->len = cpu_to_be32(dma_len);
12508f26c9ffSDavid Dillow 
12518f26c9ffSDavid Dillow 	state->total_len += dma_len;
12528f26c9ffSDavid Dillow 	state->desc++;
12538f26c9ffSDavid Dillow 	state->ndesc++;
12548f26c9ffSDavid Dillow }
12558f26c9ffSDavid Dillow 
12568f26c9ffSDavid Dillow static int srp_map_finish_fmr(struct srp_map_state *state,
1257509c07bcSBart Van Assche 			      struct srp_rdma_ch *ch)
12588f26c9ffSDavid Dillow {
1259186fbc66SBart Van Assche 	struct srp_target_port *target = ch->target;
1260186fbc66SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
12618f26c9ffSDavid Dillow 	struct ib_pool_fmr *fmr;
1262f5358a17SRoland Dreier 	u64 io_addr = 0;
12638f26c9ffSDavid Dillow 
1264f731ed62SBart Van Assche 	if (state->fmr.next >= state->fmr.end)
1265f731ed62SBart Van Assche 		return -ENOMEM;
1266f731ed62SBart Van Assche 
126726630e8aSSagi Grimberg 	WARN_ON_ONCE(!dev->use_fmr);
126826630e8aSSagi Grimberg 
126926630e8aSSagi Grimberg 	if (state->npages == 0)
127026630e8aSSagi Grimberg 		return 0;
127126630e8aSSagi Grimberg 
127226630e8aSSagi Grimberg 	if (state->npages == 1 && target->global_mr) {
127326630e8aSSagi Grimberg 		srp_map_desc(state, state->base_dma_addr, state->dma_len,
127426630e8aSSagi Grimberg 			     target->global_mr->rkey);
127526630e8aSSagi Grimberg 		goto reset_state;
127626630e8aSSagi Grimberg 	}
127726630e8aSSagi Grimberg 
1278509c07bcSBart Van Assche 	fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
12798f26c9ffSDavid Dillow 				   state->npages, io_addr);
12808f26c9ffSDavid Dillow 	if (IS_ERR(fmr))
12818f26c9ffSDavid Dillow 		return PTR_ERR(fmr);
12828f26c9ffSDavid Dillow 
1283f731ed62SBart Van Assche 	*state->fmr.next++ = fmr;
128452ede08fSBart Van Assche 	state->nmdesc++;
12858f26c9ffSDavid Dillow 
1286186fbc66SBart Van Assche 	srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1287186fbc66SBart Van Assche 		     state->dma_len, fmr->fmr->rkey);
1288539dde6fSBart Van Assche 
128926630e8aSSagi Grimberg reset_state:
129026630e8aSSagi Grimberg 	state->npages = 0;
129126630e8aSSagi Grimberg 	state->dma_len = 0;
129226630e8aSSagi Grimberg 
12938f26c9ffSDavid Dillow 	return 0;
12948f26c9ffSDavid Dillow }
12958f26c9ffSDavid Dillow 
12961dc7b1f1SChristoph Hellwig static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
12971dc7b1f1SChristoph Hellwig {
12981dc7b1f1SChristoph Hellwig 	srp_handle_qp_err(cq, wc, "FAST REG");
12991dc7b1f1SChristoph Hellwig }
13001dc7b1f1SChristoph Hellwig 
13015cfb1782SBart Van Assche static int srp_map_finish_fr(struct srp_map_state *state,
13021dc7b1f1SChristoph Hellwig 			     struct srp_request *req,
130357b0be9cSBart Van Assche 			     struct srp_rdma_ch *ch, int sg_nents)
13045cfb1782SBart Van Assche {
1305509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
13065cfb1782SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
13075cfb1782SBart Van Assche 	struct ib_send_wr *bad_wr;
1308f7f7aab1SSagi Grimberg 	struct ib_reg_wr wr;
13095cfb1782SBart Van Assche 	struct srp_fr_desc *desc;
13105cfb1782SBart Van Assche 	u32 rkey;
1311f7f7aab1SSagi Grimberg 	int n, err;
13125cfb1782SBart Van Assche 
1313f731ed62SBart Van Assche 	if (state->fr.next >= state->fr.end)
1314f731ed62SBart Van Assche 		return -ENOMEM;
1315f731ed62SBart Van Assche 
131626630e8aSSagi Grimberg 	WARN_ON_ONCE(!dev->use_fast_reg);
131726630e8aSSagi Grimberg 
131857b0be9cSBart Van Assche 	if (sg_nents == 1 && target->global_mr) {
1319f7f7aab1SSagi Grimberg 		srp_map_desc(state, sg_dma_address(state->sg),
1320f7f7aab1SSagi Grimberg 			     sg_dma_len(state->sg),
132126630e8aSSagi Grimberg 			     target->global_mr->rkey);
1322f7f7aab1SSagi Grimberg 		return 1;
132326630e8aSSagi Grimberg 	}
132426630e8aSSagi Grimberg 
1325509c07bcSBart Van Assche 	desc = srp_fr_pool_get(ch->fr_pool);
13265cfb1782SBart Van Assche 	if (!desc)
13275cfb1782SBart Van Assche 		return -ENOMEM;
13285cfb1782SBart Van Assche 
13295cfb1782SBart Van Assche 	rkey = ib_inc_rkey(desc->mr->rkey);
13305cfb1782SBart Van Assche 	ib_update_fast_reg_key(desc->mr, rkey);
13315cfb1782SBart Van Assche 
1332*ff2ba993SChristoph Hellwig 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, 0, dev->mr_page_size);
1333f7f7aab1SSagi Grimberg 	if (unlikely(n < 0))
1334f7f7aab1SSagi Grimberg 		return n;
13355cfb1782SBart Van Assche 
13361dc7b1f1SChristoph Hellwig 	req->reg_cqe.done = srp_reg_mr_err_done;
13371dc7b1f1SChristoph Hellwig 
1338f7f7aab1SSagi Grimberg 	wr.wr.next = NULL;
1339f7f7aab1SSagi Grimberg 	wr.wr.opcode = IB_WR_REG_MR;
13401dc7b1f1SChristoph Hellwig 	wr.wr.wr_cqe = &req->reg_cqe;
1341f7f7aab1SSagi Grimberg 	wr.wr.num_sge = 0;
1342f7f7aab1SSagi Grimberg 	wr.wr.send_flags = 0;
1343f7f7aab1SSagi Grimberg 	wr.mr = desc->mr;
1344f7f7aab1SSagi Grimberg 	wr.key = desc->mr->rkey;
1345f7f7aab1SSagi Grimberg 	wr.access = (IB_ACCESS_LOCAL_WRITE |
13465cfb1782SBart Van Assche 		     IB_ACCESS_REMOTE_READ |
13475cfb1782SBart Van Assche 		     IB_ACCESS_REMOTE_WRITE);
13485cfb1782SBart Van Assche 
1349f731ed62SBart Van Assche 	*state->fr.next++ = desc;
13505cfb1782SBart Van Assche 	state->nmdesc++;
13515cfb1782SBart Van Assche 
1352f7f7aab1SSagi Grimberg 	srp_map_desc(state, desc->mr->iova,
1353f7f7aab1SSagi Grimberg 		     desc->mr->length, desc->mr->rkey);
13545cfb1782SBart Van Assche 
135526630e8aSSagi Grimberg 	err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1356f7f7aab1SSagi Grimberg 	if (unlikely(err))
135726630e8aSSagi Grimberg 		return err;
135826630e8aSSagi Grimberg 
1359f7f7aab1SSagi Grimberg 	return n;
13605cfb1782SBart Van Assche }
13615cfb1782SBart Van Assche 
13628f26c9ffSDavid Dillow static int srp_map_sg_entry(struct srp_map_state *state,
1363509c07bcSBart Van Assche 			    struct srp_rdma_ch *ch,
13643ae95da8SBart Van Assche 			    struct scatterlist *sg, int sg_index)
13658f26c9ffSDavid Dillow {
1366509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
136705321937SGreg Kroah-Hartman 	struct srp_device *dev = target->srp_host->srp_dev;
136885507bccSRalph Campbell 	struct ib_device *ibdev = dev->dev;
13698f26c9ffSDavid Dillow 	dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1370bb350d1dSFUJITA Tomonori 	unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
13713ae95da8SBart Van Assche 	unsigned int len = 0;
13728f26c9ffSDavid Dillow 	int ret;
137385507bccSRalph Campbell 
13743ae95da8SBart Van Assche 	WARN_ON_ONCE(!dma_len);
1375f5358a17SRoland Dreier 
13768f26c9ffSDavid Dillow 	while (dma_len) {
13775cfb1782SBart Van Assche 		unsigned offset = dma_addr & ~dev->mr_page_mask;
13785cfb1782SBart Van Assche 		if (state->npages == dev->max_pages_per_mr || offset != 0) {
1379f7f7aab1SSagi Grimberg 			ret = srp_map_finish_fmr(state, ch);
13808f26c9ffSDavid Dillow 			if (ret)
13818f26c9ffSDavid Dillow 				return ret;
138285507bccSRalph Campbell 		}
1383f5358a17SRoland Dreier 
13845cfb1782SBart Van Assche 		len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
13858f26c9ffSDavid Dillow 
13868f26c9ffSDavid Dillow 		if (!state->npages)
13878f26c9ffSDavid Dillow 			state->base_dma_addr = dma_addr;
13885cfb1782SBart Van Assche 		state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
138952ede08fSBart Van Assche 		state->dma_len += len;
13908f26c9ffSDavid Dillow 		dma_addr += len;
13918f26c9ffSDavid Dillow 		dma_len -= len;
1392f5358a17SRoland Dreier 	}
1393f5358a17SRoland Dreier 
13945cfb1782SBart Van Assche 	/*
13955cfb1782SBart Van Assche 	 * If the last entry of the MR wasn't a full page, then we need to
13968f26c9ffSDavid Dillow 	 * close it out and start a new one -- we can only merge at page
13971d3d98c4SBart Van Assche 	 * boundaries.
13988f26c9ffSDavid Dillow 	 */
1399f5358a17SRoland Dreier 	ret = 0;
14000e0d3a48SBart Van Assche 	if (len != dev->mr_page_size)
1401f7f7aab1SSagi Grimberg 		ret = srp_map_finish_fmr(state, ch);
1402f5358a17SRoland Dreier 	return ret;
1403f5358a17SRoland Dreier }
1404f5358a17SRoland Dreier 
140526630e8aSSagi Grimberg static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
140626630e8aSSagi Grimberg 			  struct srp_request *req, struct scatterlist *scat,
140726630e8aSSagi Grimberg 			  int count)
140826630e8aSSagi Grimberg {
140926630e8aSSagi Grimberg 	struct scatterlist *sg;
141026630e8aSSagi Grimberg 	int i, ret;
141126630e8aSSagi Grimberg 
141226630e8aSSagi Grimberg 	state->pages = req->map_page;
141326630e8aSSagi Grimberg 	state->fmr.next = req->fmr_list;
141426630e8aSSagi Grimberg 	state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
141526630e8aSSagi Grimberg 
141626630e8aSSagi Grimberg 	for_each_sg(scat, sg, count, i) {
141726630e8aSSagi Grimberg 		ret = srp_map_sg_entry(state, ch, sg, i);
141826630e8aSSagi Grimberg 		if (ret)
141926630e8aSSagi Grimberg 			return ret;
142026630e8aSSagi Grimberg 	}
142126630e8aSSagi Grimberg 
1422f7f7aab1SSagi Grimberg 	ret = srp_map_finish_fmr(state, ch);
142326630e8aSSagi Grimberg 	if (ret)
142426630e8aSSagi Grimberg 		return ret;
142526630e8aSSagi Grimberg 
142626630e8aSSagi Grimberg 	return 0;
142726630e8aSSagi Grimberg }
142826630e8aSSagi Grimberg 
142926630e8aSSagi Grimberg static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
143026630e8aSSagi Grimberg 			 struct srp_request *req, struct scatterlist *scat,
143126630e8aSSagi Grimberg 			 int count)
143226630e8aSSagi Grimberg {
143326630e8aSSagi Grimberg 	state->desc = req->indirect_desc;
1434f7f7aab1SSagi Grimberg 	state->fr.next = req->fr_list;
1435f7f7aab1SSagi Grimberg 	state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1436f7f7aab1SSagi Grimberg 	state->sg = scat;
143726630e8aSSagi Grimberg 
14383b59b7a6SBart Van Assche 	if (count == 0)
14393b59b7a6SBart Van Assche 		return 0;
14403b59b7a6SBart Van Assche 
144157b0be9cSBart Van Assche 	while (count) {
1442f7f7aab1SSagi Grimberg 		int i, n;
1443f7f7aab1SSagi Grimberg 
1444c6333f9fSDoug Ledford 		n = srp_map_finish_fr(state, req, ch, count);
1445f7f7aab1SSagi Grimberg 		if (unlikely(n < 0))
1446f7f7aab1SSagi Grimberg 			return n;
1447f7f7aab1SSagi Grimberg 
144857b0be9cSBart Van Assche 		count -= n;
1449f7f7aab1SSagi Grimberg 		for (i = 0; i < n; i++)
1450f7f7aab1SSagi Grimberg 			state->sg = sg_next(state->sg);
145126630e8aSSagi Grimberg 	}
145226630e8aSSagi Grimberg 
145326630e8aSSagi Grimberg 	return 0;
145426630e8aSSagi Grimberg }
145526630e8aSSagi Grimberg 
145626630e8aSSagi Grimberg static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1457509c07bcSBart Van Assche 			  struct srp_request *req, struct scatterlist *scat,
1458509c07bcSBart Van Assche 			  int count)
145976bc1e1dSBart Van Assche {
1460509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
146176bc1e1dSBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
146276bc1e1dSBart Van Assche 	struct scatterlist *sg;
146326630e8aSSagi Grimberg 	int i;
146476bc1e1dSBart Van Assche 
146576bc1e1dSBart Van Assche 	state->desc = req->indirect_desc;
14663ae95da8SBart Van Assche 	for_each_sg(scat, sg, count, i) {
14673ae95da8SBart Van Assche 		srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
146803f6fb93SBart Van Assche 			     ib_sg_dma_len(dev->dev, sg),
146903f6fb93SBart Van Assche 			     target->global_mr->rkey);
14703ae95da8SBart Van Assche 	}
147176bc1e1dSBart Van Assche 
147226630e8aSSagi Grimberg 	return 0;
147376bc1e1dSBart Van Assche }
147476bc1e1dSBart Van Assche 
1475330179f2SBart Van Assche /*
1476330179f2SBart Van Assche  * Register the indirect data buffer descriptor with the HCA.
1477330179f2SBart Van Assche  *
1478330179f2SBart Van Assche  * Note: since the indirect data buffer descriptor has been allocated with
1479330179f2SBart Van Assche  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1480330179f2SBart Van Assche  * memory buffer.
1481330179f2SBart Van Assche  */
1482330179f2SBart Van Assche static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1483330179f2SBart Van Assche 		       void **next_mr, void **end_mr, u32 idb_len,
1484330179f2SBart Van Assche 		       __be32 *idb_rkey)
1485330179f2SBart Van Assche {
1486330179f2SBart Van Assche 	struct srp_target_port *target = ch->target;
1487330179f2SBart Van Assche 	struct srp_device *dev = target->srp_host->srp_dev;
1488330179f2SBart Van Assche 	struct srp_map_state state;
1489330179f2SBart Van Assche 	struct srp_direct_buf idb_desc;
1490330179f2SBart Van Assche 	u64 idb_pages[1];
1491f7f7aab1SSagi Grimberg 	struct scatterlist idb_sg[1];
1492330179f2SBart Van Assche 	int ret;
1493330179f2SBart Van Assche 
1494330179f2SBart Van Assche 	memset(&state, 0, sizeof(state));
1495330179f2SBart Van Assche 	memset(&idb_desc, 0, sizeof(idb_desc));
1496330179f2SBart Van Assche 	state.gen.next = next_mr;
1497330179f2SBart Van Assche 	state.gen.end = end_mr;
1498330179f2SBart Van Assche 	state.desc = &idb_desc;
1499f7f7aab1SSagi Grimberg 	state.base_dma_addr = req->indirect_dma_addr;
1500f7f7aab1SSagi Grimberg 	state.dma_len = idb_len;
1501f7f7aab1SSagi Grimberg 
1502f7f7aab1SSagi Grimberg 	if (dev->use_fast_reg) {
1503f7f7aab1SSagi Grimberg 		state.sg = idb_sg;
1504f7f7aab1SSagi Grimberg 		sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1505f7f7aab1SSagi Grimberg 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1506fc925518SChristoph Hellwig #ifdef CONFIG_NEED_SG_DMA_LENGTH
1507fc925518SChristoph Hellwig 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1508fc925518SChristoph Hellwig #endif
1509c6333f9fSDoug Ledford 		ret = srp_map_finish_fr(&state, req, ch, 1);
1510f7f7aab1SSagi Grimberg 		if (ret < 0)
1511f7f7aab1SSagi Grimberg 			return ret;
1512f7f7aab1SSagi Grimberg 	} else if (dev->use_fmr) {
1513330179f2SBart Van Assche 		state.pages = idb_pages;
1514330179f2SBart Van Assche 		state.pages[0] = (req->indirect_dma_addr &
1515330179f2SBart Van Assche 				  dev->mr_page_mask);
1516330179f2SBart Van Assche 		state.npages = 1;
1517f7f7aab1SSagi Grimberg 		ret = srp_map_finish_fmr(&state, ch);
1518330179f2SBart Van Assche 		if (ret < 0)
1519f7f7aab1SSagi Grimberg 			return ret;
1520f7f7aab1SSagi Grimberg 	} else {
1521f7f7aab1SSagi Grimberg 		return -EINVAL;
1522f7f7aab1SSagi Grimberg 	}
1523330179f2SBart Van Assche 
1524330179f2SBart Van Assche 	*idb_rkey = idb_desc.key;
1525330179f2SBart Van Assche 
1526f7f7aab1SSagi Grimberg 	return 0;
1527330179f2SBart Van Assche }
1528330179f2SBart Van Assche 
152977269cdfSBart Van Assche /**
153077269cdfSBart Van Assche  * srp_map_data() - map SCSI data buffer onto an SRP request
153177269cdfSBart Van Assche  * @scmnd: SCSI command to map
153277269cdfSBart Van Assche  * @ch: SRP RDMA channel
153377269cdfSBart Van Assche  * @req: SRP request
153477269cdfSBart Van Assche  *
153577269cdfSBart Van Assche  * Returns the length in bytes of the SRP_CMD IU or a negative value if
153677269cdfSBart Van Assche  * mapping failed.
153777269cdfSBart Van Assche  */
1538509c07bcSBart Van Assche static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1539aef9ec39SRoland Dreier 			struct srp_request *req)
1540aef9ec39SRoland Dreier {
1541509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
154276bc1e1dSBart Van Assche 	struct scatterlist *scat;
1543aef9ec39SRoland Dreier 	struct srp_cmd *cmd = req->cmd->buf;
1544330179f2SBart Van Assche 	int len, nents, count, ret;
154585507bccSRalph Campbell 	struct srp_device *dev;
154685507bccSRalph Campbell 	struct ib_device *ibdev;
15478f26c9ffSDavid Dillow 	struct srp_map_state state;
15488f26c9ffSDavid Dillow 	struct srp_indirect_buf *indirect_hdr;
1549330179f2SBart Van Assche 	u32 idb_len, table_len;
1550330179f2SBart Van Assche 	__be32 idb_rkey;
15518f26c9ffSDavid Dillow 	u8 fmt;
1552aef9ec39SRoland Dreier 
1553bb350d1dSFUJITA Tomonori 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1554aef9ec39SRoland Dreier 		return sizeof (struct srp_cmd);
1555aef9ec39SRoland Dreier 
1556aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1557aef9ec39SRoland Dreier 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
15587aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
15597aa54bd7SDavid Dillow 			     PFX "Unhandled data direction %d\n",
1560aef9ec39SRoland Dreier 			     scmnd->sc_data_direction);
1561aef9ec39SRoland Dreier 		return -EINVAL;
1562aef9ec39SRoland Dreier 	}
1563aef9ec39SRoland Dreier 
1564bb350d1dSFUJITA Tomonori 	nents = scsi_sg_count(scmnd);
1565bb350d1dSFUJITA Tomonori 	scat  = scsi_sglist(scmnd);
1566aef9ec39SRoland Dreier 
156705321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev;
156885507bccSRalph Campbell 	ibdev = dev->dev;
156985507bccSRalph Campbell 
157085507bccSRalph Campbell 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
15718f26c9ffSDavid Dillow 	if (unlikely(count == 0))
15728f26c9ffSDavid Dillow 		return -EIO;
1573aef9ec39SRoland Dreier 
1574aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_DIRECT;
1575f5358a17SRoland Dreier 	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
1576f5358a17SRoland Dreier 
157703f6fb93SBart Van Assche 	if (count == 1 && target->global_mr) {
1578f5358a17SRoland Dreier 		/*
1579f5358a17SRoland Dreier 		 * The midlayer only generated a single gather/scatter
1580f5358a17SRoland Dreier 		 * entry, or DMA mapping coalesced everything to a
1581f5358a17SRoland Dreier 		 * single entry.  So a direct descriptor along with
1582f5358a17SRoland Dreier 		 * the DMA MR suffices.
1583f5358a17SRoland Dreier 		 */
1584f5358a17SRoland Dreier 		struct srp_direct_buf *buf = (void *) cmd->add_data;
1585aef9ec39SRoland Dreier 
158685507bccSRalph Campbell 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
158703f6fb93SBart Van Assche 		buf->key = cpu_to_be32(target->global_mr->rkey);
158885507bccSRalph Campbell 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
15898f26c9ffSDavid Dillow 
159052ede08fSBart Van Assche 		req->nmdesc = 0;
15918f26c9ffSDavid Dillow 		goto map_complete;
15928f26c9ffSDavid Dillow 	}
15938f26c9ffSDavid Dillow 
15945cfb1782SBart Van Assche 	/*
15955cfb1782SBart Van Assche 	 * We have more than one scatter/gather entry, so build our indirect
15965cfb1782SBart Van Assche 	 * descriptor table, trying to merge as many entries as we can.
1597f5358a17SRoland Dreier 	 */
15988f26c9ffSDavid Dillow 	indirect_hdr = (void *) cmd->add_data;
15998f26c9ffSDavid Dillow 
1600c07d424dSDavid Dillow 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1601c07d424dSDavid Dillow 				   target->indirect_size, DMA_TO_DEVICE);
1602c07d424dSDavid Dillow 
16038f26c9ffSDavid Dillow 	memset(&state, 0, sizeof(state));
160426630e8aSSagi Grimberg 	if (dev->use_fast_reg)
1605e012f363SBart Van Assche 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
160626630e8aSSagi Grimberg 	else if (dev->use_fmr)
1607e012f363SBart Van Assche 		ret = srp_map_sg_fmr(&state, ch, req, scat, count);
160826630e8aSSagi Grimberg 	else
1609e012f363SBart Van Assche 		ret = srp_map_sg_dma(&state, ch, req, scat, count);
1610e012f363SBart Van Assche 	req->nmdesc = state.nmdesc;
1611e012f363SBart Van Assche 	if (ret < 0)
1612e012f363SBart Van Assche 		goto unmap;
16138f26c9ffSDavid Dillow 
1614c07d424dSDavid Dillow 	/* We've mapped the request, now pull as much of the indirect
1615c07d424dSDavid Dillow 	 * descriptor table as we can into the command buffer. If this
1616c07d424dSDavid Dillow 	 * target is not using an external indirect table, we are
1617c07d424dSDavid Dillow 	 * guaranteed to fit into the command, as the SCSI layer won't
1618c07d424dSDavid Dillow 	 * give us more S/G entries than we allow.
16198f26c9ffSDavid Dillow 	 */
16208f26c9ffSDavid Dillow 	if (state.ndesc == 1) {
16215cfb1782SBart Van Assche 		/*
16225cfb1782SBart Van Assche 		 * Memory registration collapsed the sg-list into one entry,
16238f26c9ffSDavid Dillow 		 * so use a direct descriptor.
16248f26c9ffSDavid Dillow 		 */
16258f26c9ffSDavid Dillow 		struct srp_direct_buf *buf = (void *) cmd->add_data;
16268f26c9ffSDavid Dillow 
1627c07d424dSDavid Dillow 		*buf = req->indirect_desc[0];
16288f26c9ffSDavid Dillow 		goto map_complete;
16298f26c9ffSDavid Dillow 	}
16308f26c9ffSDavid Dillow 
1631c07d424dSDavid Dillow 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1632c07d424dSDavid Dillow 						!target->allow_ext_sg)) {
1633c07d424dSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
1634c07d424dSDavid Dillow 			     "Could not fit S/G list into SRP_CMD\n");
1635e012f363SBart Van Assche 		ret = -EIO;
1636e012f363SBart Van Assche 		goto unmap;
1637c07d424dSDavid Dillow 	}
1638c07d424dSDavid Dillow 
1639c07d424dSDavid Dillow 	count = min(state.ndesc, target->cmd_sg_cnt);
16408f26c9ffSDavid Dillow 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1641330179f2SBart Van Assche 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1642aef9ec39SRoland Dreier 
1643aef9ec39SRoland Dreier 	fmt = SRP_DATA_DESC_INDIRECT;
16448f26c9ffSDavid Dillow 	len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1645c07d424dSDavid Dillow 	len += count * sizeof (struct srp_direct_buf);
1646f5358a17SRoland Dreier 
1647c07d424dSDavid Dillow 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1648c07d424dSDavid Dillow 	       count * sizeof (struct srp_direct_buf));
164985507bccSRalph Campbell 
165003f6fb93SBart Van Assche 	if (!target->global_mr) {
1651330179f2SBart Van Assche 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1652330179f2SBart Van Assche 				  idb_len, &idb_rkey);
1653330179f2SBart Van Assche 		if (ret < 0)
1654e012f363SBart Van Assche 			goto unmap;
1655330179f2SBart Van Assche 		req->nmdesc++;
1656330179f2SBart Van Assche 	} else {
1657a745f4f4SBart Van Assche 		idb_rkey = cpu_to_be32(target->global_mr->rkey);
1658330179f2SBart Van Assche 	}
1659330179f2SBart Van Assche 
1660c07d424dSDavid Dillow 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1661330179f2SBart Van Assche 	indirect_hdr->table_desc.key = idb_rkey;
16628f26c9ffSDavid Dillow 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
16638f26c9ffSDavid Dillow 	indirect_hdr->len = cpu_to_be32(state.total_len);
1664aef9ec39SRoland Dreier 
1665aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1666c07d424dSDavid Dillow 		cmd->data_out_desc_cnt = count;
1667aef9ec39SRoland Dreier 	else
1668c07d424dSDavid Dillow 		cmd->data_in_desc_cnt = count;
1669c07d424dSDavid Dillow 
1670c07d424dSDavid Dillow 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1671c07d424dSDavid Dillow 				      DMA_TO_DEVICE);
1672aef9ec39SRoland Dreier 
16738f26c9ffSDavid Dillow map_complete:
1674aef9ec39SRoland Dreier 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1675aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt << 4;
1676aef9ec39SRoland Dreier 	else
1677aef9ec39SRoland Dreier 		cmd->buf_fmt = fmt;
1678aef9ec39SRoland Dreier 
1679aef9ec39SRoland Dreier 	return len;
1680e012f363SBart Van Assche 
1681e012f363SBart Van Assche unmap:
1682e012f363SBart Van Assche 	srp_unmap_data(scmnd, ch, req);
1683ffc548bbSBart Van Assche 	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1684ffc548bbSBart Van Assche 		ret = -E2BIG;
1685e012f363SBart Van Assche 	return ret;
1686aef9ec39SRoland Dreier }
1687aef9ec39SRoland Dreier 
168805a1d750SDavid Dillow /*
168976c75b25SBart Van Assche  * Return an IU and possible credit to the free pool
169076c75b25SBart Van Assche  */
1691509c07bcSBart Van Assche static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
169276c75b25SBart Van Assche 			  enum srp_iu_type iu_type)
169376c75b25SBart Van Assche {
169476c75b25SBart Van Assche 	unsigned long flags;
169576c75b25SBart Van Assche 
1696509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1697509c07bcSBart Van Assche 	list_add(&iu->list, &ch->free_tx);
169876c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP)
1699509c07bcSBart Van Assche 		++ch->req_lim;
1700509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
170176c75b25SBart Van Assche }
170276c75b25SBart Van Assche 
170376c75b25SBart Van Assche /*
1704509c07bcSBart Van Assche  * Must be called with ch->lock held to protect req_lim and free_tx.
1705e9684678SBart Van Assche  * If IU is not sent, it must be returned using srp_put_tx_iu().
170605a1d750SDavid Dillow  *
170705a1d750SDavid Dillow  * Note:
170805a1d750SDavid Dillow  * An upper limit for the number of allocated information units for each
170905a1d750SDavid Dillow  * request type is:
171005a1d750SDavid Dillow  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
171105a1d750SDavid Dillow  *   more than Scsi_Host.can_queue requests.
171205a1d750SDavid Dillow  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
171305a1d750SDavid Dillow  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
171405a1d750SDavid Dillow  *   one unanswered SRP request to an initiator.
171505a1d750SDavid Dillow  */
1716509c07bcSBart Van Assche static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
171705a1d750SDavid Dillow 				      enum srp_iu_type iu_type)
171805a1d750SDavid Dillow {
1719509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
172005a1d750SDavid Dillow 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
172105a1d750SDavid Dillow 	struct srp_iu *iu;
172205a1d750SDavid Dillow 
17231dc7b1f1SChristoph Hellwig 	ib_process_cq_direct(ch->send_cq, -1);
172405a1d750SDavid Dillow 
1725509c07bcSBart Van Assche 	if (list_empty(&ch->free_tx))
172605a1d750SDavid Dillow 		return NULL;
172705a1d750SDavid Dillow 
172805a1d750SDavid Dillow 	/* Initiator responses to target requests do not consume credits */
172976c75b25SBart Van Assche 	if (iu_type != SRP_IU_RSP) {
1730509c07bcSBart Van Assche 		if (ch->req_lim <= rsv) {
173105a1d750SDavid Dillow 			++target->zero_req_lim;
173205a1d750SDavid Dillow 			return NULL;
173305a1d750SDavid Dillow 		}
173405a1d750SDavid Dillow 
1735509c07bcSBart Van Assche 		--ch->req_lim;
173676c75b25SBart Van Assche 	}
173776c75b25SBart Van Assche 
1738509c07bcSBart Van Assche 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
173976c75b25SBart Van Assche 	list_del(&iu->list);
174005a1d750SDavid Dillow 	return iu;
174105a1d750SDavid Dillow }
174205a1d750SDavid Dillow 
17431dc7b1f1SChristoph Hellwig static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
17441dc7b1f1SChristoph Hellwig {
17451dc7b1f1SChristoph Hellwig 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
17461dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
17471dc7b1f1SChristoph Hellwig 
17481dc7b1f1SChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
17491dc7b1f1SChristoph Hellwig 		srp_handle_qp_err(cq, wc, "SEND");
17501dc7b1f1SChristoph Hellwig 		return;
17511dc7b1f1SChristoph Hellwig 	}
17521dc7b1f1SChristoph Hellwig 
17531dc7b1f1SChristoph Hellwig 	list_add(&iu->list, &ch->free_tx);
17541dc7b1f1SChristoph Hellwig }
17551dc7b1f1SChristoph Hellwig 
1756509c07bcSBart Van Assche static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
175705a1d750SDavid Dillow {
1758509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
175905a1d750SDavid Dillow 	struct ib_sge list;
176005a1d750SDavid Dillow 	struct ib_send_wr wr, *bad_wr;
176105a1d750SDavid Dillow 
176205a1d750SDavid Dillow 	list.addr   = iu->dma;
176305a1d750SDavid Dillow 	list.length = len;
17649af76271SDavid Dillow 	list.lkey   = target->lkey;
176505a1d750SDavid Dillow 
17661dc7b1f1SChristoph Hellwig 	iu->cqe.done = srp_send_done;
17671dc7b1f1SChristoph Hellwig 
176805a1d750SDavid Dillow 	wr.next       = NULL;
17691dc7b1f1SChristoph Hellwig 	wr.wr_cqe     = &iu->cqe;
177005a1d750SDavid Dillow 	wr.sg_list    = &list;
177105a1d750SDavid Dillow 	wr.num_sge    = 1;
177205a1d750SDavid Dillow 	wr.opcode     = IB_WR_SEND;
177305a1d750SDavid Dillow 	wr.send_flags = IB_SEND_SIGNALED;
177405a1d750SDavid Dillow 
1775509c07bcSBart Van Assche 	return ib_post_send(ch->qp, &wr, &bad_wr);
177605a1d750SDavid Dillow }
177705a1d750SDavid Dillow 
1778509c07bcSBart Van Assche static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1779c996bb47SBart Van Assche {
1780509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1781c996bb47SBart Van Assche 	struct ib_recv_wr wr, *bad_wr;
1782dcb4cb85SBart Van Assche 	struct ib_sge list;
1783c996bb47SBart Van Assche 
1784c996bb47SBart Van Assche 	list.addr   = iu->dma;
1785c996bb47SBart Van Assche 	list.length = iu->size;
17869af76271SDavid Dillow 	list.lkey   = target->lkey;
1787c996bb47SBart Van Assche 
17881dc7b1f1SChristoph Hellwig 	iu->cqe.done = srp_recv_done;
17891dc7b1f1SChristoph Hellwig 
1790c996bb47SBart Van Assche 	wr.next     = NULL;
17911dc7b1f1SChristoph Hellwig 	wr.wr_cqe   = &iu->cqe;
1792c996bb47SBart Van Assche 	wr.sg_list  = &list;
1793c996bb47SBart Van Assche 	wr.num_sge  = 1;
1794c996bb47SBart Van Assche 
1795509c07bcSBart Van Assche 	return ib_post_recv(ch->qp, &wr, &bad_wr);
1796c996bb47SBart Van Assche }
1797c996bb47SBart Van Assche 
1798509c07bcSBart Van Assche static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1799aef9ec39SRoland Dreier {
1800509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1801aef9ec39SRoland Dreier 	struct srp_request *req;
1802aef9ec39SRoland Dreier 	struct scsi_cmnd *scmnd;
1803aef9ec39SRoland Dreier 	unsigned long flags;
1804aef9ec39SRoland Dreier 
1805aef9ec39SRoland Dreier 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1806509c07bcSBart Van Assche 		spin_lock_irqsave(&ch->lock, flags);
1807509c07bcSBart Van Assche 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1808509c07bcSBart Van Assche 		spin_unlock_irqrestore(&ch->lock, flags);
180994a9174cSBart Van Assche 
1810509c07bcSBart Van Assche 		ch->tsk_mgmt_status = -1;
1811f8b6e31eSDavid Dillow 		if (be32_to_cpu(rsp->resp_data_len) >= 4)
1812509c07bcSBart Van Assche 			ch->tsk_mgmt_status = rsp->data[3];
1813509c07bcSBart Van Assche 		complete(&ch->tsk_mgmt_done);
1814aef9ec39SRoland Dreier 	} else {
181577f2c1a4SBart Van Assche 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
181677f2c1a4SBart Van Assche 		if (scmnd) {
181777f2c1a4SBart Van Assche 			req = (void *)scmnd->host_scribble;
181877f2c1a4SBart Van Assche 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
181977f2c1a4SBart Van Assche 		}
182022032991SBart Van Assche 		if (!scmnd) {
18217aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
1822d92c0da7SBart Van Assche 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1823d92c0da7SBart Van Assche 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
182422032991SBart Van Assche 
1825509c07bcSBart Van Assche 			spin_lock_irqsave(&ch->lock, flags);
1826509c07bcSBart Van Assche 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1827509c07bcSBart Van Assche 			spin_unlock_irqrestore(&ch->lock, flags);
182822032991SBart Van Assche 
182922032991SBart Van Assche 			return;
183022032991SBart Van Assche 		}
1831aef9ec39SRoland Dreier 		scmnd->result = rsp->status;
1832aef9ec39SRoland Dreier 
1833aef9ec39SRoland Dreier 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1834aef9ec39SRoland Dreier 			memcpy(scmnd->sense_buffer, rsp->data +
1835aef9ec39SRoland Dreier 			       be32_to_cpu(rsp->resp_data_len),
1836aef9ec39SRoland Dreier 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1837aef9ec39SRoland Dreier 				     SCSI_SENSE_BUFFERSIZE));
1838aef9ec39SRoland Dreier 		}
1839aef9ec39SRoland Dreier 
1840e714531aSBart Van Assche 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1841bb350d1dSFUJITA Tomonori 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1842e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1843e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1844e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1845e714531aSBart Van Assche 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1846e714531aSBart Van Assche 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1847e714531aSBart Van Assche 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1848aef9ec39SRoland Dreier 
1849509c07bcSBart Van Assche 		srp_free_req(ch, req, scmnd,
185022032991SBart Van Assche 			     be32_to_cpu(rsp->req_lim_delta));
185122032991SBart Van Assche 
1852f8b6e31eSDavid Dillow 		scmnd->host_scribble = NULL;
1853aef9ec39SRoland Dreier 		scmnd->scsi_done(scmnd);
1854aef9ec39SRoland Dreier 	}
1855aef9ec39SRoland Dreier }
1856aef9ec39SRoland Dreier 
1857509c07bcSBart Van Assche static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1858bb12588aSDavid Dillow 			       void *rsp, int len)
1859bb12588aSDavid Dillow {
1860509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
186176c75b25SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1862bb12588aSDavid Dillow 	unsigned long flags;
1863bb12588aSDavid Dillow 	struct srp_iu *iu;
186476c75b25SBart Van Assche 	int err;
1865bb12588aSDavid Dillow 
1866509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
1867509c07bcSBart Van Assche 	ch->req_lim += req_delta;
1868509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1869509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
187076c75b25SBart Van Assche 
1871bb12588aSDavid Dillow 	if (!iu) {
1872bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1873bb12588aSDavid Dillow 			     "no IU available to send response\n");
187476c75b25SBart Van Assche 		return 1;
1875bb12588aSDavid Dillow 	}
1876bb12588aSDavid Dillow 
1877bb12588aSDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1878bb12588aSDavid Dillow 	memcpy(iu->buf, rsp, len);
1879bb12588aSDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1880bb12588aSDavid Dillow 
1881509c07bcSBart Van Assche 	err = srp_post_send(ch, iu, len);
188276c75b25SBart Van Assche 	if (err) {
1883bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1884bb12588aSDavid Dillow 			     "unable to post response: %d\n", err);
1885509c07bcSBart Van Assche 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
188676c75b25SBart Van Assche 	}
1887bb12588aSDavid Dillow 
1888bb12588aSDavid Dillow 	return err;
1889bb12588aSDavid Dillow }
1890bb12588aSDavid Dillow 
1891509c07bcSBart Van Assche static void srp_process_cred_req(struct srp_rdma_ch *ch,
1892bb12588aSDavid Dillow 				 struct srp_cred_req *req)
1893bb12588aSDavid Dillow {
1894bb12588aSDavid Dillow 	struct srp_cred_rsp rsp = {
1895bb12588aSDavid Dillow 		.opcode = SRP_CRED_RSP,
1896bb12588aSDavid Dillow 		.tag = req->tag,
1897bb12588aSDavid Dillow 	};
1898bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1899bb12588aSDavid Dillow 
1900509c07bcSBart Van Assche 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1901509c07bcSBart Van Assche 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1902bb12588aSDavid Dillow 			     "problems processing SRP_CRED_REQ\n");
1903bb12588aSDavid Dillow }
1904bb12588aSDavid Dillow 
1905509c07bcSBart Van Assche static void srp_process_aer_req(struct srp_rdma_ch *ch,
1906bb12588aSDavid Dillow 				struct srp_aer_req *req)
1907bb12588aSDavid Dillow {
1908509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1909bb12588aSDavid Dillow 	struct srp_aer_rsp rsp = {
1910bb12588aSDavid Dillow 		.opcode = SRP_AER_RSP,
1911bb12588aSDavid Dillow 		.tag = req->tag,
1912bb12588aSDavid Dillow 	};
1913bb12588aSDavid Dillow 	s32 delta = be32_to_cpu(req->req_lim_delta);
1914bb12588aSDavid Dillow 
1915bb12588aSDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX
1916985aa495SBart Van Assche 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1917bb12588aSDavid Dillow 
1918509c07bcSBart Van Assche 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1919bb12588aSDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX
1920bb12588aSDavid Dillow 			     "problems processing SRP_AER_REQ\n");
1921bb12588aSDavid Dillow }
1922bb12588aSDavid Dillow 
19231dc7b1f1SChristoph Hellwig static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1924aef9ec39SRoland Dreier {
19251dc7b1f1SChristoph Hellwig 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
19261dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
1927509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
1928dcb4cb85SBart Van Assche 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1929c996bb47SBart Van Assche 	int res;
1930aef9ec39SRoland Dreier 	u8 opcode;
1931aef9ec39SRoland Dreier 
19321dc7b1f1SChristoph Hellwig 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
19331dc7b1f1SChristoph Hellwig 		srp_handle_qp_err(cq, wc, "RECV");
19341dc7b1f1SChristoph Hellwig 		return;
19351dc7b1f1SChristoph Hellwig 	}
19361dc7b1f1SChristoph Hellwig 
1937509c07bcSBart Van Assche 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
193885507bccSRalph Campbell 				   DMA_FROM_DEVICE);
1939aef9ec39SRoland Dreier 
1940aef9ec39SRoland Dreier 	opcode = *(u8 *) iu->buf;
1941aef9ec39SRoland Dreier 
1942aef9ec39SRoland Dreier 	if (0) {
19437aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
19447aa54bd7SDavid Dillow 			     PFX "recv completion, opcode 0x%02x\n", opcode);
19457a700811SBart Van Assche 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
19467a700811SBart Van Assche 			       iu->buf, wc->byte_len, true);
1947aef9ec39SRoland Dreier 	}
1948aef9ec39SRoland Dreier 
1949aef9ec39SRoland Dreier 	switch (opcode) {
1950aef9ec39SRoland Dreier 	case SRP_RSP:
1951509c07bcSBart Van Assche 		srp_process_rsp(ch, iu->buf);
1952aef9ec39SRoland Dreier 		break;
1953aef9ec39SRoland Dreier 
1954bb12588aSDavid Dillow 	case SRP_CRED_REQ:
1955509c07bcSBart Van Assche 		srp_process_cred_req(ch, iu->buf);
1956bb12588aSDavid Dillow 		break;
1957bb12588aSDavid Dillow 
1958bb12588aSDavid Dillow 	case SRP_AER_REQ:
1959509c07bcSBart Van Assche 		srp_process_aer_req(ch, iu->buf);
1960bb12588aSDavid Dillow 		break;
1961bb12588aSDavid Dillow 
1962aef9ec39SRoland Dreier 	case SRP_T_LOGOUT:
1963aef9ec39SRoland Dreier 		/* XXX Handle target logout */
19647aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
19657aa54bd7SDavid Dillow 			     PFX "Got target logout request\n");
1966aef9ec39SRoland Dreier 		break;
1967aef9ec39SRoland Dreier 
1968aef9ec39SRoland Dreier 	default:
19697aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
19707aa54bd7SDavid Dillow 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1971aef9ec39SRoland Dreier 		break;
1972aef9ec39SRoland Dreier 	}
1973aef9ec39SRoland Dreier 
1974509c07bcSBart Van Assche 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
197585507bccSRalph Campbell 				      DMA_FROM_DEVICE);
1976c996bb47SBart Van Assche 
1977509c07bcSBart Van Assche 	res = srp_post_recv(ch, iu);
1978c996bb47SBart Van Assche 	if (res != 0)
1979c996bb47SBart Van Assche 		shost_printk(KERN_ERR, target->scsi_host,
1980c996bb47SBart Van Assche 			     PFX "Recv failed with error code %d\n", res);
1981aef9ec39SRoland Dreier }
1982aef9ec39SRoland Dreier 
1983c1120f89SBart Van Assche /**
1984c1120f89SBart Van Assche  * srp_tl_err_work() - handle a transport layer error
1985af24663bSBart Van Assche  * @work: Work structure embedded in an SRP target port.
1986c1120f89SBart Van Assche  *
1987c1120f89SBart Van Assche  * Note: This function may get invoked before the rport has been created,
1988c1120f89SBart Van Assche  * hence the target->rport test.
1989c1120f89SBart Van Assche  */
1990c1120f89SBart Van Assche static void srp_tl_err_work(struct work_struct *work)
1991c1120f89SBart Van Assche {
1992c1120f89SBart Van Assche 	struct srp_target_port *target;
1993c1120f89SBart Van Assche 
1994c1120f89SBart Van Assche 	target = container_of(work, struct srp_target_port, tl_err_work);
1995c1120f89SBart Van Assche 	if (target->rport)
1996c1120f89SBart Van Assche 		srp_start_tl_fail_timers(target->rport);
1997c1120f89SBart Van Assche }
1998c1120f89SBart Van Assche 
19991dc7b1f1SChristoph Hellwig static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
20001dc7b1f1SChristoph Hellwig 		const char *opname)
2001948d1e88SBart Van Assche {
20021dc7b1f1SChristoph Hellwig 	struct srp_rdma_ch *ch = cq->cq_context;
20037dad6b2eSBart Van Assche 	struct srp_target_port *target = ch->target;
20047dad6b2eSBart Van Assche 
2005c014c8cdSBart Van Assche 	if (ch->connected && !target->qp_in_error) {
20065cfb1782SBart Van Assche 		shost_printk(KERN_ERR, target->scsi_host,
20071dc7b1f1SChristoph Hellwig 			     PFX "failed %s status %s (%d) for CQE %p\n",
20081dc7b1f1SChristoph Hellwig 			     opname, ib_wc_status_msg(wc->status), wc->status,
20091dc7b1f1SChristoph Hellwig 			     wc->wr_cqe);
2010c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
20114f0af697SBart Van Assche 	}
2012948d1e88SBart Van Assche 	target->qp_in_error = true;
2013948d1e88SBart Van Assche }
2014948d1e88SBart Van Assche 
201576c75b25SBart Van Assche static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2016aef9ec39SRoland Dreier {
201776c75b25SBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2018a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
2019509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
2020aef9ec39SRoland Dreier 	struct srp_request *req;
2021aef9ec39SRoland Dreier 	struct srp_iu *iu;
2022aef9ec39SRoland Dreier 	struct srp_cmd *cmd;
202385507bccSRalph Campbell 	struct ib_device *dev;
202476c75b25SBart Van Assche 	unsigned long flags;
202577f2c1a4SBart Van Assche 	u32 tag;
202677f2c1a4SBart Van Assche 	u16 idx;
2027d1b4289eSBart Van Assche 	int len, ret;
2028a95cadb9SBart Van Assche 	const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2029a95cadb9SBart Van Assche 
2030a95cadb9SBart Van Assche 	/*
2031a95cadb9SBart Van Assche 	 * The SCSI EH thread is the only context from which srp_queuecommand()
2032a95cadb9SBart Van Assche 	 * can get invoked for blocked devices (SDEV_BLOCK /
2033a95cadb9SBart Van Assche 	 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2034a95cadb9SBart Van Assche 	 * locking the rport mutex if invoked from inside the SCSI EH.
2035a95cadb9SBart Van Assche 	 */
2036a95cadb9SBart Van Assche 	if (in_scsi_eh)
2037a95cadb9SBart Van Assche 		mutex_lock(&rport->mutex);
2038aef9ec39SRoland Dreier 
2039d1b4289eSBart Van Assche 	scmnd->result = srp_chkready(target->rport);
2040d1b4289eSBart Van Assche 	if (unlikely(scmnd->result))
2041d1b4289eSBart Van Assche 		goto err;
20422ce19e72SBart Van Assche 
204377f2c1a4SBart Van Assche 	WARN_ON_ONCE(scmnd->request->tag < 0);
204477f2c1a4SBart Van Assche 	tag = blk_mq_unique_tag(scmnd->request);
2045d92c0da7SBart Van Assche 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
204677f2c1a4SBart Van Assche 	idx = blk_mq_unique_tag_to_tag(tag);
204777f2c1a4SBart Van Assche 	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
204877f2c1a4SBart Van Assche 		  dev_name(&shost->shost_gendev), tag, idx,
204977f2c1a4SBart Van Assche 		  target->req_ring_size);
2050509c07bcSBart Van Assche 
2051509c07bcSBart Van Assche 	spin_lock_irqsave(&ch->lock, flags);
2052509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2053509c07bcSBart Van Assche 	spin_unlock_irqrestore(&ch->lock, flags);
2054aef9ec39SRoland Dreier 
205577f2c1a4SBart Van Assche 	if (!iu)
205677f2c1a4SBart Van Assche 		goto err;
205777f2c1a4SBart Van Assche 
205877f2c1a4SBart Van Assche 	req = &ch->req_ring[idx];
205905321937SGreg Kroah-Hartman 	dev = target->srp_host->srp_dev->dev;
206049248644SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
206185507bccSRalph Campbell 				   DMA_TO_DEVICE);
2062aef9ec39SRoland Dreier 
2063f8b6e31eSDavid Dillow 	scmnd->host_scribble = (void *) req;
2064aef9ec39SRoland Dreier 
2065aef9ec39SRoland Dreier 	cmd = iu->buf;
2066aef9ec39SRoland Dreier 	memset(cmd, 0, sizeof *cmd);
2067aef9ec39SRoland Dreier 
2068aef9ec39SRoland Dreier 	cmd->opcode = SRP_CMD;
2069985aa495SBart Van Assche 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
207077f2c1a4SBart Van Assche 	cmd->tag    = tag;
2071aef9ec39SRoland Dreier 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2072aef9ec39SRoland Dreier 
2073aef9ec39SRoland Dreier 	req->scmnd    = scmnd;
2074aef9ec39SRoland Dreier 	req->cmd      = iu;
2075aef9ec39SRoland Dreier 
2076509c07bcSBart Van Assche 	len = srp_map_data(scmnd, ch, req);
2077aef9ec39SRoland Dreier 	if (len < 0) {
20787aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
2079d1b4289eSBart Van Assche 			     PFX "Failed to map data (%d)\n", len);
2080d1b4289eSBart Van Assche 		/*
2081d1b4289eSBart Van Assche 		 * If we ran out of memory descriptors (-ENOMEM) because an
2082d1b4289eSBart Van Assche 		 * application is queuing many requests with more than
208352ede08fSBart Van Assche 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2084d1b4289eSBart Van Assche 		 * to reduce queue depth temporarily.
2085d1b4289eSBart Van Assche 		 */
2086d1b4289eSBart Van Assche 		scmnd->result = len == -ENOMEM ?
2087d1b4289eSBart Van Assche 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
208876c75b25SBart Van Assche 		goto err_iu;
2089aef9ec39SRoland Dreier 	}
2090aef9ec39SRoland Dreier 
209149248644SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
209285507bccSRalph Campbell 				      DMA_TO_DEVICE);
2093aef9ec39SRoland Dreier 
2094509c07bcSBart Van Assche 	if (srp_post_send(ch, iu, len)) {
20957aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2096aef9ec39SRoland Dreier 		goto err_unmap;
2097aef9ec39SRoland Dreier 	}
2098aef9ec39SRoland Dreier 
2099d1b4289eSBart Van Assche 	ret = 0;
2100d1b4289eSBart Van Assche 
2101a95cadb9SBart Van Assche unlock_rport:
2102a95cadb9SBart Van Assche 	if (in_scsi_eh)
2103a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2104a95cadb9SBart Van Assche 
2105d1b4289eSBart Van Assche 	return ret;
2106aef9ec39SRoland Dreier 
2107aef9ec39SRoland Dreier err_unmap:
2108509c07bcSBart Van Assche 	srp_unmap_data(scmnd, ch, req);
2109aef9ec39SRoland Dreier 
211076c75b25SBart Van Assche err_iu:
2111509c07bcSBart Van Assche 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
211276c75b25SBart Van Assche 
2113024ca901SBart Van Assche 	/*
2114024ca901SBart Van Assche 	 * Avoid that the loops that iterate over the request ring can
2115024ca901SBart Van Assche 	 * encounter a dangling SCSI command pointer.
2116024ca901SBart Van Assche 	 */
2117024ca901SBart Van Assche 	req->scmnd = NULL;
2118024ca901SBart Van Assche 
2119d1b4289eSBart Van Assche err:
2120d1b4289eSBart Van Assche 	if (scmnd->result) {
2121d1b4289eSBart Van Assche 		scmnd->scsi_done(scmnd);
2122d1b4289eSBart Van Assche 		ret = 0;
2123d1b4289eSBart Van Assche 	} else {
2124d1b4289eSBart Van Assche 		ret = SCSI_MLQUEUE_HOST_BUSY;
2125d1b4289eSBart Van Assche 	}
2126a95cadb9SBart Van Assche 
2127d1b4289eSBart Van Assche 	goto unlock_rport;
2128aef9ec39SRoland Dreier }
2129aef9ec39SRoland Dreier 
21304d73f95fSBart Van Assche /*
21314d73f95fSBart Van Assche  * Note: the resources allocated in this function are freed in
2132509c07bcSBart Van Assche  * srp_free_ch_ib().
21334d73f95fSBart Van Assche  */
2134509c07bcSBart Van Assche static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2135aef9ec39SRoland Dreier {
2136509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2137aef9ec39SRoland Dreier 	int i;
2138aef9ec39SRoland Dreier 
2139509c07bcSBart Van Assche 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
21404d73f95fSBart Van Assche 			      GFP_KERNEL);
2141509c07bcSBart Van Assche 	if (!ch->rx_ring)
21424d73f95fSBart Van Assche 		goto err_no_ring;
2143509c07bcSBart Van Assche 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
21444d73f95fSBart Van Assche 			      GFP_KERNEL);
2145509c07bcSBart Van Assche 	if (!ch->tx_ring)
21464d73f95fSBart Van Assche 		goto err_no_ring;
21474d73f95fSBart Van Assche 
21484d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2149509c07bcSBart Van Assche 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2150509c07bcSBart Van Assche 					      ch->max_ti_iu_len,
2151aef9ec39SRoland Dreier 					      GFP_KERNEL, DMA_FROM_DEVICE);
2152509c07bcSBart Van Assche 		if (!ch->rx_ring[i])
2153aef9ec39SRoland Dreier 			goto err;
2154aef9ec39SRoland Dreier 	}
2155aef9ec39SRoland Dreier 
21564d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2157509c07bcSBart Van Assche 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
215849248644SDavid Dillow 					      target->max_iu_len,
2159aef9ec39SRoland Dreier 					      GFP_KERNEL, DMA_TO_DEVICE);
2160509c07bcSBart Van Assche 		if (!ch->tx_ring[i])
2161aef9ec39SRoland Dreier 			goto err;
2162dcb4cb85SBart Van Assche 
2163509c07bcSBart Van Assche 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2164aef9ec39SRoland Dreier 	}
2165aef9ec39SRoland Dreier 
2166aef9ec39SRoland Dreier 	return 0;
2167aef9ec39SRoland Dreier 
2168aef9ec39SRoland Dreier err:
21694d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; ++i) {
2170509c07bcSBart Van Assche 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2171509c07bcSBart Van Assche 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2172aef9ec39SRoland Dreier 	}
2173aef9ec39SRoland Dreier 
21744d73f95fSBart Van Assche 
21754d73f95fSBart Van Assche err_no_ring:
2176509c07bcSBart Van Assche 	kfree(ch->tx_ring);
2177509c07bcSBart Van Assche 	ch->tx_ring = NULL;
2178509c07bcSBart Van Assche 	kfree(ch->rx_ring);
2179509c07bcSBart Van Assche 	ch->rx_ring = NULL;
2180aef9ec39SRoland Dreier 
2181aef9ec39SRoland Dreier 	return -ENOMEM;
2182aef9ec39SRoland Dreier }
2183aef9ec39SRoland Dreier 
2184c9b03c1aSBart Van Assche static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2185c9b03c1aSBart Van Assche {
2186c9b03c1aSBart Van Assche 	uint64_t T_tr_ns, max_compl_time_ms;
2187c9b03c1aSBart Van Assche 	uint32_t rq_tmo_jiffies;
2188c9b03c1aSBart Van Assche 
2189c9b03c1aSBart Van Assche 	/*
2190c9b03c1aSBart Van Assche 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2191c9b03c1aSBart Van Assche 	 * table 91), both the QP timeout and the retry count have to be set
2192c9b03c1aSBart Van Assche 	 * for RC QP's during the RTR to RTS transition.
2193c9b03c1aSBart Van Assche 	 */
2194c9b03c1aSBart Van Assche 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2195c9b03c1aSBart Van Assche 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2196c9b03c1aSBart Van Assche 
2197c9b03c1aSBart Van Assche 	/*
2198c9b03c1aSBart Van Assche 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2199c9b03c1aSBart Van Assche 	 * it can take before an error completion is generated. See also
2200c9b03c1aSBart Van Assche 	 * C9-140..142 in the IBTA spec for more information about how to
2201c9b03c1aSBart Van Assche 	 * convert the QP Local ACK Timeout value to nanoseconds.
2202c9b03c1aSBart Van Assche 	 */
2203c9b03c1aSBart Van Assche 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2204c9b03c1aSBart Van Assche 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2205c9b03c1aSBart Van Assche 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2206c9b03c1aSBart Van Assche 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2207c9b03c1aSBart Van Assche 
2208c9b03c1aSBart Van Assche 	return rq_tmo_jiffies;
2209c9b03c1aSBart Van Assche }
2210c9b03c1aSBart Van Assche 
2211961e0be8SDavid Dillow static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2212e6300cbdSBart Van Assche 			       const struct srp_login_rsp *lrsp,
2213509c07bcSBart Van Assche 			       struct srp_rdma_ch *ch)
2214961e0be8SDavid Dillow {
2215509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2216961e0be8SDavid Dillow 	struct ib_qp_attr *qp_attr = NULL;
2217961e0be8SDavid Dillow 	int attr_mask = 0;
2218961e0be8SDavid Dillow 	int ret;
2219961e0be8SDavid Dillow 	int i;
2220961e0be8SDavid Dillow 
2221961e0be8SDavid Dillow 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2222509c07bcSBart Van Assche 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2223509c07bcSBart Van Assche 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2224961e0be8SDavid Dillow 
2225961e0be8SDavid Dillow 		/*
2226961e0be8SDavid Dillow 		 * Reserve credits for task management so we don't
2227961e0be8SDavid Dillow 		 * bounce requests back to the SCSI mid-layer.
2228961e0be8SDavid Dillow 		 */
2229961e0be8SDavid Dillow 		target->scsi_host->can_queue
2230509c07bcSBart Van Assche 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2231961e0be8SDavid Dillow 			      target->scsi_host->can_queue);
22324d73f95fSBart Van Assche 		target->scsi_host->cmd_per_lun
22334d73f95fSBart Van Assche 			= min_t(int, target->scsi_host->can_queue,
22344d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun);
2235961e0be8SDavid Dillow 	} else {
2236961e0be8SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
2237961e0be8SDavid Dillow 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2238961e0be8SDavid Dillow 		ret = -ECONNRESET;
2239961e0be8SDavid Dillow 		goto error;
2240961e0be8SDavid Dillow 	}
2241961e0be8SDavid Dillow 
2242509c07bcSBart Van Assche 	if (!ch->rx_ring) {
2243509c07bcSBart Van Assche 		ret = srp_alloc_iu_bufs(ch);
2244961e0be8SDavid Dillow 		if (ret)
2245961e0be8SDavid Dillow 			goto error;
2246961e0be8SDavid Dillow 	}
2247961e0be8SDavid Dillow 
2248961e0be8SDavid Dillow 	ret = -ENOMEM;
2249961e0be8SDavid Dillow 	qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2250961e0be8SDavid Dillow 	if (!qp_attr)
2251961e0be8SDavid Dillow 		goto error;
2252961e0be8SDavid Dillow 
2253961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTR;
2254961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2255961e0be8SDavid Dillow 	if (ret)
2256961e0be8SDavid Dillow 		goto error_free;
2257961e0be8SDavid Dillow 
2258509c07bcSBart Van Assche 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2259961e0be8SDavid Dillow 	if (ret)
2260961e0be8SDavid Dillow 		goto error_free;
2261961e0be8SDavid Dillow 
22624d73f95fSBart Van Assche 	for (i = 0; i < target->queue_size; i++) {
2263509c07bcSBart Van Assche 		struct srp_iu *iu = ch->rx_ring[i];
2264509c07bcSBart Van Assche 
2265509c07bcSBart Van Assche 		ret = srp_post_recv(ch, iu);
2266961e0be8SDavid Dillow 		if (ret)
2267961e0be8SDavid Dillow 			goto error_free;
2268961e0be8SDavid Dillow 	}
2269961e0be8SDavid Dillow 
2270961e0be8SDavid Dillow 	qp_attr->qp_state = IB_QPS_RTS;
2271961e0be8SDavid Dillow 	ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2272961e0be8SDavid Dillow 	if (ret)
2273961e0be8SDavid Dillow 		goto error_free;
2274961e0be8SDavid Dillow 
2275c9b03c1aSBart Van Assche 	target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2276c9b03c1aSBart Van Assche 
2277509c07bcSBart Van Assche 	ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2278961e0be8SDavid Dillow 	if (ret)
2279961e0be8SDavid Dillow 		goto error_free;
2280961e0be8SDavid Dillow 
2281961e0be8SDavid Dillow 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
2282961e0be8SDavid Dillow 
2283961e0be8SDavid Dillow error_free:
2284961e0be8SDavid Dillow 	kfree(qp_attr);
2285961e0be8SDavid Dillow 
2286961e0be8SDavid Dillow error:
2287509c07bcSBart Van Assche 	ch->status = ret;
2288961e0be8SDavid Dillow }
2289961e0be8SDavid Dillow 
2290aef9ec39SRoland Dreier static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2291aef9ec39SRoland Dreier 			       struct ib_cm_event *event,
2292509c07bcSBart Van Assche 			       struct srp_rdma_ch *ch)
2293aef9ec39SRoland Dreier {
2294509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
22957aa54bd7SDavid Dillow 	struct Scsi_Host *shost = target->scsi_host;
2296aef9ec39SRoland Dreier 	struct ib_class_port_info *cpi;
2297aef9ec39SRoland Dreier 	int opcode;
2298aef9ec39SRoland Dreier 
2299aef9ec39SRoland Dreier 	switch (event->param.rej_rcvd.reason) {
2300aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_CM_REDIRECT:
2301aef9ec39SRoland Dreier 		cpi = event->param.rej_rcvd.ari;
2302509c07bcSBart Van Assche 		ch->path.dlid = cpi->redirect_lid;
2303509c07bcSBart Van Assche 		ch->path.pkey = cpi->redirect_pkey;
2304aef9ec39SRoland Dreier 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2305509c07bcSBart Van Assche 		memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2306aef9ec39SRoland Dreier 
2307509c07bcSBart Van Assche 		ch->status = ch->path.dlid ?
2308aef9ec39SRoland Dreier 			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2309aef9ec39SRoland Dreier 		break;
2310aef9ec39SRoland Dreier 
2311aef9ec39SRoland Dreier 	case IB_CM_REJ_PORT_REDIRECT:
23125d7cbfd6SRoland Dreier 		if (srp_target_is_topspin(target)) {
2313aef9ec39SRoland Dreier 			/*
2314aef9ec39SRoland Dreier 			 * Topspin/Cisco SRP gateways incorrectly send
2315aef9ec39SRoland Dreier 			 * reject reason code 25 when they mean 24
2316aef9ec39SRoland Dreier 			 * (port redirect).
2317aef9ec39SRoland Dreier 			 */
2318509c07bcSBart Van Assche 			memcpy(ch->path.dgid.raw,
2319aef9ec39SRoland Dreier 			       event->param.rej_rcvd.ari, 16);
2320aef9ec39SRoland Dreier 
23217aa54bd7SDavid Dillow 			shost_printk(KERN_DEBUG, shost,
23227aa54bd7SDavid Dillow 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2323509c07bcSBart Van Assche 				     be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2324509c07bcSBart Van Assche 				     be64_to_cpu(ch->path.dgid.global.interface_id));
2325aef9ec39SRoland Dreier 
2326509c07bcSBart Van Assche 			ch->status = SRP_PORT_REDIRECT;
2327aef9ec39SRoland Dreier 		} else {
23287aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
23297aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2330509c07bcSBart Van Assche 			ch->status = -ECONNRESET;
2331aef9ec39SRoland Dreier 		}
2332aef9ec39SRoland Dreier 		break;
2333aef9ec39SRoland Dreier 
2334aef9ec39SRoland Dreier 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
23357aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost,
23367aa54bd7SDavid Dillow 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2337509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2338aef9ec39SRoland Dreier 		break;
2339aef9ec39SRoland Dreier 
2340aef9ec39SRoland Dreier 	case IB_CM_REJ_CONSUMER_DEFINED:
2341aef9ec39SRoland Dreier 		opcode = *(u8 *) event->private_data;
2342aef9ec39SRoland Dreier 		if (opcode == SRP_LOGIN_REJ) {
2343aef9ec39SRoland Dreier 			struct srp_login_rej *rej = event->private_data;
2344aef9ec39SRoland Dreier 			u32 reason = be32_to_cpu(rej->reason);
2345aef9ec39SRoland Dreier 
2346aef9ec39SRoland Dreier 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
23477aa54bd7SDavid Dillow 				shost_printk(KERN_WARNING, shost,
23487aa54bd7SDavid Dillow 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2349aef9ec39SRoland Dreier 			else
2350e7ffde01SBart Van Assche 				shost_printk(KERN_WARNING, shost, PFX
2351e7ffde01SBart Van Assche 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2352747fe000SBart Van Assche 					     target->sgid.raw,
2353747fe000SBart Van Assche 					     target->orig_dgid.raw, reason);
2354aef9ec39SRoland Dreier 		} else
23557aa54bd7SDavid Dillow 			shost_printk(KERN_WARNING, shost,
23567aa54bd7SDavid Dillow 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2357aef9ec39SRoland Dreier 				     " opcode 0x%02x\n", opcode);
2358509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2359aef9ec39SRoland Dreier 		break;
2360aef9ec39SRoland Dreier 
23619fe4bcf4SDavid Dillow 	case IB_CM_REJ_STALE_CONN:
23629fe4bcf4SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2363509c07bcSBart Van Assche 		ch->status = SRP_STALE_CONN;
23649fe4bcf4SDavid Dillow 		break;
23659fe4bcf4SDavid Dillow 
2366aef9ec39SRoland Dreier 	default:
23677aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2368aef9ec39SRoland Dreier 			     event->param.rej_rcvd.reason);
2369509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2370aef9ec39SRoland Dreier 	}
2371aef9ec39SRoland Dreier }
2372aef9ec39SRoland Dreier 
2373aef9ec39SRoland Dreier static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2374aef9ec39SRoland Dreier {
2375509c07bcSBart Van Assche 	struct srp_rdma_ch *ch = cm_id->context;
2376509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2377aef9ec39SRoland Dreier 	int comp = 0;
2378aef9ec39SRoland Dreier 
2379aef9ec39SRoland Dreier 	switch (event->event) {
2380aef9ec39SRoland Dreier 	case IB_CM_REQ_ERROR:
23817aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host,
23827aa54bd7SDavid Dillow 			     PFX "Sending CM REQ failed\n");
2383aef9ec39SRoland Dreier 		comp = 1;
2384509c07bcSBart Van Assche 		ch->status = -ECONNRESET;
2385aef9ec39SRoland Dreier 		break;
2386aef9ec39SRoland Dreier 
2387aef9ec39SRoland Dreier 	case IB_CM_REP_RECEIVED:
2388aef9ec39SRoland Dreier 		comp = 1;
2389509c07bcSBart Van Assche 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2390aef9ec39SRoland Dreier 		break;
2391aef9ec39SRoland Dreier 
2392aef9ec39SRoland Dreier 	case IB_CM_REJ_RECEIVED:
23937aa54bd7SDavid Dillow 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2394aef9ec39SRoland Dreier 		comp = 1;
2395aef9ec39SRoland Dreier 
2396509c07bcSBart Van Assche 		srp_cm_rej_handler(cm_id, event, ch);
2397aef9ec39SRoland Dreier 		break;
2398aef9ec39SRoland Dreier 
2399b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_RECEIVED:
24007aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
24017aa54bd7SDavid Dillow 			     PFX "DREQ received - connection closed\n");
2402c014c8cdSBart Van Assche 		ch->connected = false;
2403b7ac4ab4SIshai Rabinovitz 		if (ib_send_cm_drep(cm_id, NULL, 0))
24047aa54bd7SDavid Dillow 			shost_printk(KERN_ERR, target->scsi_host,
24057aa54bd7SDavid Dillow 				     PFX "Sending CM DREP failed\n");
2406c1120f89SBart Van Assche 		queue_work(system_long_wq, &target->tl_err_work);
2407aef9ec39SRoland Dreier 		break;
2408aef9ec39SRoland Dreier 
2409aef9ec39SRoland Dreier 	case IB_CM_TIMEWAIT_EXIT:
24107aa54bd7SDavid Dillow 		shost_printk(KERN_ERR, target->scsi_host,
24117aa54bd7SDavid Dillow 			     PFX "connection closed\n");
2412ac72d766SBart Van Assche 		comp = 1;
2413aef9ec39SRoland Dreier 
2414509c07bcSBart Van Assche 		ch->status = 0;
2415aef9ec39SRoland Dreier 		break;
2416aef9ec39SRoland Dreier 
2417b7ac4ab4SIshai Rabinovitz 	case IB_CM_MRA_RECEIVED:
2418b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREQ_ERROR:
2419b7ac4ab4SIshai Rabinovitz 	case IB_CM_DREP_RECEIVED:
2420b7ac4ab4SIshai Rabinovitz 		break;
2421b7ac4ab4SIshai Rabinovitz 
2422aef9ec39SRoland Dreier 	default:
24237aa54bd7SDavid Dillow 		shost_printk(KERN_WARNING, target->scsi_host,
24247aa54bd7SDavid Dillow 			     PFX "Unhandled CM event %d\n", event->event);
2425aef9ec39SRoland Dreier 		break;
2426aef9ec39SRoland Dreier 	}
2427aef9ec39SRoland Dreier 
2428aef9ec39SRoland Dreier 	if (comp)
2429509c07bcSBart Van Assche 		complete(&ch->done);
2430aef9ec39SRoland Dreier 
2431aef9ec39SRoland Dreier 	return 0;
2432aef9ec39SRoland Dreier }
2433aef9ec39SRoland Dreier 
243471444b97SJack Wang /**
243571444b97SJack Wang  * srp_change_queue_depth - setting device queue depth
243671444b97SJack Wang  * @sdev: scsi device struct
243771444b97SJack Wang  * @qdepth: requested queue depth
243871444b97SJack Wang  *
243971444b97SJack Wang  * Returns queue depth.
244071444b97SJack Wang  */
244171444b97SJack Wang static int
2442db5ed4dfSChristoph Hellwig srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
244371444b97SJack Wang {
244471444b97SJack Wang 	if (!sdev->tagged_supported)
24451e6f2416SChristoph Hellwig 		qdepth = 1;
2446db5ed4dfSChristoph Hellwig 	return scsi_change_queue_depth(sdev, qdepth);
244771444b97SJack Wang }
244871444b97SJack Wang 
2449985aa495SBart Van Assche static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2450985aa495SBart Van Assche 			     u8 func)
2451aef9ec39SRoland Dreier {
2452509c07bcSBart Van Assche 	struct srp_target_port *target = ch->target;
2453a95cadb9SBart Van Assche 	struct srp_rport *rport = target->rport;
245419081f31SDavid Dillow 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2455aef9ec39SRoland Dreier 	struct srp_iu *iu;
2456aef9ec39SRoland Dreier 	struct srp_tsk_mgmt *tsk_mgmt;
2457aef9ec39SRoland Dreier 
2458c014c8cdSBart Van Assche 	if (!ch->connected || target->qp_in_error)
24593780d1f0SBart Van Assche 		return -1;
24603780d1f0SBart Van Assche 
2461509c07bcSBart Van Assche 	init_completion(&ch->tsk_mgmt_done);
2462aef9ec39SRoland Dreier 
2463a95cadb9SBart Van Assche 	/*
2464509c07bcSBart Van Assche 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2465a95cadb9SBart Van Assche 	 * invoked while a task management function is being sent.
2466a95cadb9SBart Van Assche 	 */
2467a95cadb9SBart Van Assche 	mutex_lock(&rport->mutex);
2468509c07bcSBart Van Assche 	spin_lock_irq(&ch->lock);
2469509c07bcSBart Van Assche 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2470509c07bcSBart Van Assche 	spin_unlock_irq(&ch->lock);
247176c75b25SBart Van Assche 
2472a95cadb9SBart Van Assche 	if (!iu) {
2473a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2474a95cadb9SBart Van Assche 
247576c75b25SBart Van Assche 		return -1;
2476a95cadb9SBart Van Assche 	}
2477aef9ec39SRoland Dreier 
247819081f31SDavid Dillow 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
247919081f31SDavid Dillow 				   DMA_TO_DEVICE);
2480aef9ec39SRoland Dreier 	tsk_mgmt = iu->buf;
2481aef9ec39SRoland Dreier 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2482aef9ec39SRoland Dreier 
2483aef9ec39SRoland Dreier 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2484985aa495SBart Van Assche 	int_to_scsilun(lun, &tsk_mgmt->lun);
2485f8b6e31eSDavid Dillow 	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
2486aef9ec39SRoland Dreier 	tsk_mgmt->tsk_mgmt_func = func;
2487f8b6e31eSDavid Dillow 	tsk_mgmt->task_tag	= req_tag;
2488aef9ec39SRoland Dreier 
248919081f31SDavid Dillow 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
249019081f31SDavid Dillow 				      DMA_TO_DEVICE);
2491509c07bcSBart Van Assche 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2492509c07bcSBart Van Assche 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2493a95cadb9SBart Van Assche 		mutex_unlock(&rport->mutex);
2494a95cadb9SBart Van Assche 
249576c75b25SBart Van Assche 		return -1;
249676c75b25SBart Van Assche 	}
2497a95cadb9SBart Van Assche 	mutex_unlock(&rport->mutex);
2498d945e1dfSRoland Dreier 
2499509c07bcSBart Van Assche 	if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2500aef9ec39SRoland Dreier 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2501d945e1dfSRoland Dreier 		return -1;
2502aef9ec39SRoland Dreier 
2503d945e1dfSRoland Dreier 	return 0;
2504d945e1dfSRoland Dreier }
2505d945e1dfSRoland Dreier 
2506aef9ec39SRoland Dreier static int srp_abort(struct scsi_cmnd *scmnd)
2507aef9ec39SRoland Dreier {
2508d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2509f8b6e31eSDavid Dillow 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
251077f2c1a4SBart Van Assche 	u32 tag;
2511d92c0da7SBart Van Assche 	u16 ch_idx;
2512509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
2513086f44f5SBart Van Assche 	int ret;
2514d945e1dfSRoland Dreier 
25157aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2516aef9ec39SRoland Dreier 
2517d92c0da7SBart Van Assche 	if (!req)
251899b6697aSBart Van Assche 		return SUCCESS;
251977f2c1a4SBart Van Assche 	tag = blk_mq_unique_tag(scmnd->request);
2520d92c0da7SBart Van Assche 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2521d92c0da7SBart Van Assche 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2522d92c0da7SBart Van Assche 		return SUCCESS;
2523d92c0da7SBart Van Assche 	ch = &target->ch[ch_idx];
2524d92c0da7SBart Van Assche 	if (!srp_claim_req(ch, req, NULL, scmnd))
2525d92c0da7SBart Van Assche 		return SUCCESS;
2526d92c0da7SBart Van Assche 	shost_printk(KERN_ERR, target->scsi_host,
2527d92c0da7SBart Van Assche 		     "Sending SRP abort for tag %#x\n", tag);
252877f2c1a4SBart Van Assche 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
252980d5e8a2SBart Van Assche 			      SRP_TSK_ABORT_TASK) == 0)
2530086f44f5SBart Van Assche 		ret = SUCCESS;
2531ed9b2264SBart Van Assche 	else if (target->rport->state == SRP_RPORT_LOST)
253299e1c139SBart Van Assche 		ret = FAST_IO_FAIL;
2533086f44f5SBart Van Assche 	else
2534086f44f5SBart Van Assche 		ret = FAILED;
2535509c07bcSBart Van Assche 	srp_free_req(ch, req, scmnd, 0);
2536d945e1dfSRoland Dreier 	scmnd->result = DID_ABORT << 16;
2537d8536670SBart Van Assche 	scmnd->scsi_done(scmnd);
2538d945e1dfSRoland Dreier 
2539086f44f5SBart Van Assche 	return ret;
2540aef9ec39SRoland Dreier }
2541aef9ec39SRoland Dreier 
2542aef9ec39SRoland Dreier static int srp_reset_device(struct scsi_cmnd *scmnd)
2543aef9ec39SRoland Dreier {
2544d945e1dfSRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2545d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
2546536ae14eSBart Van Assche 	int i;
2547d945e1dfSRoland Dreier 
25487aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2549aef9ec39SRoland Dreier 
2550d92c0da7SBart Van Assche 	ch = &target->ch[0];
2551509c07bcSBart Van Assche 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2552f8b6e31eSDavid Dillow 			      SRP_TSK_LUN_RESET))
2553d945e1dfSRoland Dreier 		return FAILED;
2554509c07bcSBart Van Assche 	if (ch->tsk_mgmt_status)
2555d945e1dfSRoland Dreier 		return FAILED;
2556d945e1dfSRoland Dreier 
2557d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
2558d92c0da7SBart Van Assche 		ch = &target->ch[i];
25594d73f95fSBart Van Assche 		for (i = 0; i < target->req_ring_size; ++i) {
2560509c07bcSBart Van Assche 			struct srp_request *req = &ch->req_ring[i];
2561509c07bcSBart Van Assche 
2562509c07bcSBart Van Assche 			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2563536ae14eSBart Van Assche 		}
2564d92c0da7SBart Van Assche 	}
2565d945e1dfSRoland Dreier 
2566d945e1dfSRoland Dreier 	return SUCCESS;
2567aef9ec39SRoland Dreier }
2568aef9ec39SRoland Dreier 
2569aef9ec39SRoland Dreier static int srp_reset_host(struct scsi_cmnd *scmnd)
2570aef9ec39SRoland Dreier {
2571aef9ec39SRoland Dreier 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2572aef9ec39SRoland Dreier 
25737aa54bd7SDavid Dillow 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2574aef9ec39SRoland Dreier 
2575ed9b2264SBart Van Assche 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2576aef9ec39SRoland Dreier }
2577aef9ec39SRoland Dreier 
2578c9b03c1aSBart Van Assche static int srp_slave_configure(struct scsi_device *sdev)
2579c9b03c1aSBart Van Assche {
2580c9b03c1aSBart Van Assche 	struct Scsi_Host *shost = sdev->host;
2581c9b03c1aSBart Van Assche 	struct srp_target_port *target = host_to_target(shost);
2582c9b03c1aSBart Van Assche 	struct request_queue *q = sdev->request_queue;
2583c9b03c1aSBart Van Assche 	unsigned long timeout;
2584c9b03c1aSBart Van Assche 
2585c9b03c1aSBart Van Assche 	if (sdev->type == TYPE_DISK) {
2586c9b03c1aSBart Van Assche 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2587c9b03c1aSBart Van Assche 		blk_queue_rq_timeout(q, timeout);
2588c9b03c1aSBart Van Assche 	}
2589c9b03c1aSBart Van Assche 
2590c9b03c1aSBart Van Assche 	return 0;
2591c9b03c1aSBart Van Assche }
2592c9b03c1aSBart Van Assche 
2593ee959b00STony Jones static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2594ee959b00STony Jones 			   char *buf)
25956ecb0c84SRoland Dreier {
2596ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
25976ecb0c84SRoland Dreier 
259845c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
25996ecb0c84SRoland Dreier }
26006ecb0c84SRoland Dreier 
2601ee959b00STony Jones static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2602ee959b00STony Jones 			     char *buf)
26036ecb0c84SRoland Dreier {
2604ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26056ecb0c84SRoland Dreier 
260645c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
26076ecb0c84SRoland Dreier }
26086ecb0c84SRoland Dreier 
2609ee959b00STony Jones static ssize_t show_service_id(struct device *dev,
2610ee959b00STony Jones 			       struct device_attribute *attr, char *buf)
26116ecb0c84SRoland Dreier {
2612ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26136ecb0c84SRoland Dreier 
261445c37cadSBart Van Assche 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
26156ecb0c84SRoland Dreier }
26166ecb0c84SRoland Dreier 
2617ee959b00STony Jones static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2618ee959b00STony Jones 			 char *buf)
26196ecb0c84SRoland Dreier {
2620ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26216ecb0c84SRoland Dreier 
2622747fe000SBart Van Assche 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
26236ecb0c84SRoland Dreier }
26246ecb0c84SRoland Dreier 
2625848b3082SBart Van Assche static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2626848b3082SBart Van Assche 			 char *buf)
2627848b3082SBart Van Assche {
2628848b3082SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2629848b3082SBart Van Assche 
2630747fe000SBart Van Assche 	return sprintf(buf, "%pI6\n", target->sgid.raw);
2631848b3082SBart Van Assche }
2632848b3082SBart Van Assche 
2633ee959b00STony Jones static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2634ee959b00STony Jones 			 char *buf)
26356ecb0c84SRoland Dreier {
2636ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2637d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch = &target->ch[0];
26386ecb0c84SRoland Dreier 
2639509c07bcSBart Van Assche 	return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
26406ecb0c84SRoland Dreier }
26416ecb0c84SRoland Dreier 
2642ee959b00STony Jones static ssize_t show_orig_dgid(struct device *dev,
2643ee959b00STony Jones 			      struct device_attribute *attr, char *buf)
26443633b3d0SIshai Rabinovitz {
2645ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26463633b3d0SIshai Rabinovitz 
2647747fe000SBart Van Assche 	return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
26483633b3d0SIshai Rabinovitz }
26493633b3d0SIshai Rabinovitz 
265089de7486SBart Van Assche static ssize_t show_req_lim(struct device *dev,
265189de7486SBart Van Assche 			    struct device_attribute *attr, char *buf)
265289de7486SBart Van Assche {
265389de7486SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2654d92c0da7SBart Van Assche 	struct srp_rdma_ch *ch;
2655d92c0da7SBart Van Assche 	int i, req_lim = INT_MAX;
265689de7486SBart Van Assche 
2657d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
2658d92c0da7SBart Van Assche 		ch = &target->ch[i];
2659d92c0da7SBart Van Assche 		req_lim = min(req_lim, ch->req_lim);
2660d92c0da7SBart Van Assche 	}
2661d92c0da7SBart Van Assche 	return sprintf(buf, "%d\n", req_lim);
266289de7486SBart Van Assche }
266389de7486SBart Van Assche 
2664ee959b00STony Jones static ssize_t show_zero_req_lim(struct device *dev,
2665ee959b00STony Jones 				 struct device_attribute *attr, char *buf)
26666bfa24faSRoland Dreier {
2667ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
26686bfa24faSRoland Dreier 
26696bfa24faSRoland Dreier 	return sprintf(buf, "%d\n", target->zero_req_lim);
26706bfa24faSRoland Dreier }
26716bfa24faSRoland Dreier 
2672ee959b00STony Jones static ssize_t show_local_ib_port(struct device *dev,
2673ee959b00STony Jones 				  struct device_attribute *attr, char *buf)
2674ded7f1a1SIshai Rabinovitz {
2675ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2676ded7f1a1SIshai Rabinovitz 
2677ded7f1a1SIshai Rabinovitz 	return sprintf(buf, "%d\n", target->srp_host->port);
2678ded7f1a1SIshai Rabinovitz }
2679ded7f1a1SIshai Rabinovitz 
2680ee959b00STony Jones static ssize_t show_local_ib_device(struct device *dev,
2681ee959b00STony Jones 				    struct device_attribute *attr, char *buf)
2682ded7f1a1SIshai Rabinovitz {
2683ee959b00STony Jones 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2684ded7f1a1SIshai Rabinovitz 
268505321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2686ded7f1a1SIshai Rabinovitz }
2687ded7f1a1SIshai Rabinovitz 
2688d92c0da7SBart Van Assche static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2689d92c0da7SBart Van Assche 			     char *buf)
2690d92c0da7SBart Van Assche {
2691d92c0da7SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2692d92c0da7SBart Van Assche 
2693d92c0da7SBart Van Assche 	return sprintf(buf, "%d\n", target->ch_count);
2694d92c0da7SBart Van Assche }
2695d92c0da7SBart Van Assche 
26964b5e5f41SBart Van Assche static ssize_t show_comp_vector(struct device *dev,
26974b5e5f41SBart Van Assche 				struct device_attribute *attr, char *buf)
26984b5e5f41SBart Van Assche {
26994b5e5f41SBart Van Assche 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27004b5e5f41SBart Van Assche 
27014b5e5f41SBart Van Assche 	return sprintf(buf, "%d\n", target->comp_vector);
27024b5e5f41SBart Van Assche }
27034b5e5f41SBart Van Assche 
27047bb312e4SVu Pham static ssize_t show_tl_retry_count(struct device *dev,
27057bb312e4SVu Pham 				   struct device_attribute *attr, char *buf)
27067bb312e4SVu Pham {
27077bb312e4SVu Pham 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
27087bb312e4SVu Pham 
27097bb312e4SVu Pham 	return sprintf(buf, "%d\n", target->tl_retry_count);
27107bb312e4SVu Pham }
27117bb312e4SVu Pham 
271249248644SDavid Dillow static ssize_t show_cmd_sg_entries(struct device *dev,
271349248644SDavid Dillow 				   struct device_attribute *attr, char *buf)
271449248644SDavid Dillow {
271549248644SDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
271649248644SDavid Dillow 
271749248644SDavid Dillow 	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
271849248644SDavid Dillow }
271949248644SDavid Dillow 
2720c07d424dSDavid Dillow static ssize_t show_allow_ext_sg(struct device *dev,
2721c07d424dSDavid Dillow 				 struct device_attribute *attr, char *buf)
2722c07d424dSDavid Dillow {
2723c07d424dSDavid Dillow 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2724c07d424dSDavid Dillow 
2725c07d424dSDavid Dillow 	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2726c07d424dSDavid Dillow }
2727c07d424dSDavid Dillow 
2728ee959b00STony Jones static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
2729ee959b00STony Jones static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
2730ee959b00STony Jones static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
2731ee959b00STony Jones static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
2732848b3082SBart Van Assche static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
2733ee959b00STony Jones static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
2734ee959b00STony Jones static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
273589de7486SBart Van Assche static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2736ee959b00STony Jones static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
2737ee959b00STony Jones static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2738ee959b00STony Jones static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2739d92c0da7SBart Van Assche static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
27404b5e5f41SBart Van Assche static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
27417bb312e4SVu Pham static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
274249248644SDavid Dillow static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2743c07d424dSDavid Dillow static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
27446ecb0c84SRoland Dreier 
2745ee959b00STony Jones static struct device_attribute *srp_host_attrs[] = {
2746ee959b00STony Jones 	&dev_attr_id_ext,
2747ee959b00STony Jones 	&dev_attr_ioc_guid,
2748ee959b00STony Jones 	&dev_attr_service_id,
2749ee959b00STony Jones 	&dev_attr_pkey,
2750848b3082SBart Van Assche 	&dev_attr_sgid,
2751ee959b00STony Jones 	&dev_attr_dgid,
2752ee959b00STony Jones 	&dev_attr_orig_dgid,
275389de7486SBart Van Assche 	&dev_attr_req_lim,
2754ee959b00STony Jones 	&dev_attr_zero_req_lim,
2755ee959b00STony Jones 	&dev_attr_local_ib_port,
2756ee959b00STony Jones 	&dev_attr_local_ib_device,
2757d92c0da7SBart Van Assche 	&dev_attr_ch_count,
27584b5e5f41SBart Van Assche 	&dev_attr_comp_vector,
27597bb312e4SVu Pham 	&dev_attr_tl_retry_count,
276049248644SDavid Dillow 	&dev_attr_cmd_sg_entries,
2761c07d424dSDavid Dillow 	&dev_attr_allow_ext_sg,
27626ecb0c84SRoland Dreier 	NULL
27636ecb0c84SRoland Dreier };
27646ecb0c84SRoland Dreier 
2765aef9ec39SRoland Dreier static struct scsi_host_template srp_template = {
2766aef9ec39SRoland Dreier 	.module				= THIS_MODULE,
2767b7f008fdSRoland Dreier 	.name				= "InfiniBand SRP initiator",
2768b7f008fdSRoland Dreier 	.proc_name			= DRV_NAME,
2769c9b03c1aSBart Van Assche 	.slave_configure		= srp_slave_configure,
2770aef9ec39SRoland Dreier 	.info				= srp_target_info,
2771aef9ec39SRoland Dreier 	.queuecommand			= srp_queuecommand,
277271444b97SJack Wang 	.change_queue_depth             = srp_change_queue_depth,
2773aef9ec39SRoland Dreier 	.eh_abort_handler		= srp_abort,
2774aef9ec39SRoland Dreier 	.eh_device_reset_handler	= srp_reset_device,
2775aef9ec39SRoland Dreier 	.eh_host_reset_handler		= srp_reset_host,
27762742c1daSBart Van Assche 	.skip_settle_delay		= true,
277749248644SDavid Dillow 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
27784d73f95fSBart Van Assche 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
2779aef9ec39SRoland Dreier 	.this_id			= -1,
27804d73f95fSBart Van Assche 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
27816ecb0c84SRoland Dreier 	.use_clustering			= ENABLE_CLUSTERING,
278277f2c1a4SBart Van Assche 	.shost_attrs			= srp_host_attrs,
2783c40ecc12SChristoph Hellwig 	.track_queue_depth		= 1,
2784aef9ec39SRoland Dreier };
2785aef9ec39SRoland Dreier 
278634aa654eSBart Van Assche static int srp_sdev_count(struct Scsi_Host *host)
278734aa654eSBart Van Assche {
278834aa654eSBart Van Assche 	struct scsi_device *sdev;
278934aa654eSBart Van Assche 	int c = 0;
279034aa654eSBart Van Assche 
279134aa654eSBart Van Assche 	shost_for_each_device(sdev, host)
279234aa654eSBart Van Assche 		c++;
279334aa654eSBart Van Assche 
279434aa654eSBart Van Assche 	return c;
279534aa654eSBart Van Assche }
279634aa654eSBart Van Assche 
2797bc44bd1dSBart Van Assche /*
2798bc44bd1dSBart Van Assche  * Return values:
2799bc44bd1dSBart Van Assche  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2800bc44bd1dSBart Van Assche  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2801bc44bd1dSBart Van Assche  *    removal has been scheduled.
2802bc44bd1dSBart Van Assche  * 0 and target->state != SRP_TARGET_REMOVED upon success.
2803bc44bd1dSBart Van Assche  */
2804aef9ec39SRoland Dreier static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2805aef9ec39SRoland Dreier {
28063236822bSFUJITA Tomonori 	struct srp_rport_identifiers ids;
28073236822bSFUJITA Tomonori 	struct srp_rport *rport;
28083236822bSFUJITA Tomonori 
280934aa654eSBart Van Assche 	target->state = SRP_TARGET_SCANNING;
2810aef9ec39SRoland Dreier 	sprintf(target->target_name, "SRP.T10:%016llX",
281145c37cadSBart Van Assche 		be64_to_cpu(target->id_ext));
2812aef9ec39SRoland Dreier 
281305321937SGreg Kroah-Hartman 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2814aef9ec39SRoland Dreier 		return -ENODEV;
2815aef9ec39SRoland Dreier 
28163236822bSFUJITA Tomonori 	memcpy(ids.port_id, &target->id_ext, 8);
28173236822bSFUJITA Tomonori 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2818aebd5e47SFUJITA Tomonori 	ids.roles = SRP_RPORT_ROLE_TARGET;
28193236822bSFUJITA Tomonori 	rport = srp_rport_add(target->scsi_host, &ids);
28203236822bSFUJITA Tomonori 	if (IS_ERR(rport)) {
28213236822bSFUJITA Tomonori 		scsi_remove_host(target->scsi_host);
28223236822bSFUJITA Tomonori 		return PTR_ERR(rport);
28233236822bSFUJITA Tomonori 	}
28243236822bSFUJITA Tomonori 
2825dc1bdbd9SBart Van Assche 	rport->lld_data = target;
28269dd69a60SBart Van Assche 	target->rport = rport;
2827dc1bdbd9SBart Van Assche 
2828b3589fd4SMatthew Wilcox 	spin_lock(&host->target_lock);
2829aef9ec39SRoland Dreier 	list_add_tail(&target->list, &host->target_list);
2830b3589fd4SMatthew Wilcox 	spin_unlock(&host->target_lock);
2831aef9ec39SRoland Dreier 
2832aef9ec39SRoland Dreier 	scsi_scan_target(&target->scsi_host->shost_gendev,
28331962a4a1SMatthew Wilcox 			 0, target->scsi_id, SCAN_WILD_CARD, 0);
2834aef9ec39SRoland Dreier 
2835c014c8cdSBart Van Assche 	if (srp_connected_ch(target) < target->ch_count ||
2836c014c8cdSBart Van Assche 	    target->qp_in_error) {
283734aa654eSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
283834aa654eSBart Van Assche 			     PFX "SCSI scan failed - removing SCSI host\n");
283934aa654eSBart Van Assche 		srp_queue_remove_work(target);
284034aa654eSBart Van Assche 		goto out;
284134aa654eSBart Van Assche 	}
284234aa654eSBart Van Assche 
284334aa654eSBart Van Assche 	pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
284434aa654eSBart Van Assche 		 dev_name(&target->scsi_host->shost_gendev),
284534aa654eSBart Van Assche 		 srp_sdev_count(target->scsi_host));
284634aa654eSBart Van Assche 
284734aa654eSBart Van Assche 	spin_lock_irq(&target->lock);
284834aa654eSBart Van Assche 	if (target->state == SRP_TARGET_SCANNING)
284934aa654eSBart Van Assche 		target->state = SRP_TARGET_LIVE;
285034aa654eSBart Van Assche 	spin_unlock_irq(&target->lock);
285134aa654eSBart Van Assche 
285234aa654eSBart Van Assche out:
2853aef9ec39SRoland Dreier 	return 0;
2854aef9ec39SRoland Dreier }
2855aef9ec39SRoland Dreier 
2856ee959b00STony Jones static void srp_release_dev(struct device *dev)
2857aef9ec39SRoland Dreier {
2858aef9ec39SRoland Dreier 	struct srp_host *host =
2859ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
2860aef9ec39SRoland Dreier 
2861aef9ec39SRoland Dreier 	complete(&host->released);
2862aef9ec39SRoland Dreier }
2863aef9ec39SRoland Dreier 
2864aef9ec39SRoland Dreier static struct class srp_class = {
2865aef9ec39SRoland Dreier 	.name    = "infiniband_srp",
2866ee959b00STony Jones 	.dev_release = srp_release_dev
2867aef9ec39SRoland Dreier };
2868aef9ec39SRoland Dreier 
286996fc248aSBart Van Assche /**
287096fc248aSBart Van Assche  * srp_conn_unique() - check whether the connection to a target is unique
2871af24663bSBart Van Assche  * @host:   SRP host.
2872af24663bSBart Van Assche  * @target: SRP target port.
287396fc248aSBart Van Assche  */
287496fc248aSBart Van Assche static bool srp_conn_unique(struct srp_host *host,
287596fc248aSBart Van Assche 			    struct srp_target_port *target)
287696fc248aSBart Van Assche {
287796fc248aSBart Van Assche 	struct srp_target_port *t;
287896fc248aSBart Van Assche 	bool ret = false;
287996fc248aSBart Van Assche 
288096fc248aSBart Van Assche 	if (target->state == SRP_TARGET_REMOVED)
288196fc248aSBart Van Assche 		goto out;
288296fc248aSBart Van Assche 
288396fc248aSBart Van Assche 	ret = true;
288496fc248aSBart Van Assche 
288596fc248aSBart Van Assche 	spin_lock(&host->target_lock);
288696fc248aSBart Van Assche 	list_for_each_entry(t, &host->target_list, list) {
288796fc248aSBart Van Assche 		if (t != target &&
288896fc248aSBart Van Assche 		    target->id_ext == t->id_ext &&
288996fc248aSBart Van Assche 		    target->ioc_guid == t->ioc_guid &&
289096fc248aSBart Van Assche 		    target->initiator_ext == t->initiator_ext) {
289196fc248aSBart Van Assche 			ret = false;
289296fc248aSBart Van Assche 			break;
289396fc248aSBart Van Assche 		}
289496fc248aSBart Van Assche 	}
289596fc248aSBart Van Assche 	spin_unlock(&host->target_lock);
289696fc248aSBart Van Assche 
289796fc248aSBart Van Assche out:
289896fc248aSBart Van Assche 	return ret;
289996fc248aSBart Van Assche }
290096fc248aSBart Van Assche 
2901aef9ec39SRoland Dreier /*
2902aef9ec39SRoland Dreier  * Target ports are added by writing
2903aef9ec39SRoland Dreier  *
2904aef9ec39SRoland Dreier  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2905aef9ec39SRoland Dreier  *     pkey=<P_Key>,service_id=<service ID>
2906aef9ec39SRoland Dreier  *
2907aef9ec39SRoland Dreier  * to the add_target sysfs attribute.
2908aef9ec39SRoland Dreier  */
2909aef9ec39SRoland Dreier enum {
2910aef9ec39SRoland Dreier 	SRP_OPT_ERR		= 0,
2911aef9ec39SRoland Dreier 	SRP_OPT_ID_EXT		= 1 << 0,
2912aef9ec39SRoland Dreier 	SRP_OPT_IOC_GUID	= 1 << 1,
2913aef9ec39SRoland Dreier 	SRP_OPT_DGID		= 1 << 2,
2914aef9ec39SRoland Dreier 	SRP_OPT_PKEY		= 1 << 3,
2915aef9ec39SRoland Dreier 	SRP_OPT_SERVICE_ID	= 1 << 4,
2916aef9ec39SRoland Dreier 	SRP_OPT_MAX_SECT	= 1 << 5,
291752fb2b50SVu Pham 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
29180c0450dbSRamachandra K 	SRP_OPT_IO_CLASS	= 1 << 7,
291901cb9bcbSIshai Rabinovitz 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
292049248644SDavid Dillow 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
2921c07d424dSDavid Dillow 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
2922c07d424dSDavid Dillow 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
29234b5e5f41SBart Van Assche 	SRP_OPT_COMP_VECTOR	= 1 << 12,
29247bb312e4SVu Pham 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
29254d73f95fSBart Van Assche 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
2926aef9ec39SRoland Dreier 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
2927aef9ec39SRoland Dreier 				   SRP_OPT_IOC_GUID	|
2928aef9ec39SRoland Dreier 				   SRP_OPT_DGID		|
2929aef9ec39SRoland Dreier 				   SRP_OPT_PKEY		|
2930aef9ec39SRoland Dreier 				   SRP_OPT_SERVICE_ID),
2931aef9ec39SRoland Dreier };
2932aef9ec39SRoland Dreier 
2933a447c093SSteven Whitehouse static const match_table_t srp_opt_tokens = {
2934aef9ec39SRoland Dreier 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
2935aef9ec39SRoland Dreier 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
2936aef9ec39SRoland Dreier 	{ SRP_OPT_DGID,			"dgid=%s" 		},
2937aef9ec39SRoland Dreier 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
2938aef9ec39SRoland Dreier 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
2939aef9ec39SRoland Dreier 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
294052fb2b50SVu Pham 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
29410c0450dbSRamachandra K 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
294201cb9bcbSIshai Rabinovitz 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
294349248644SDavid Dillow 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
2944c07d424dSDavid Dillow 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
2945c07d424dSDavid Dillow 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
29464b5e5f41SBart Van Assche 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
29477bb312e4SVu Pham 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
29484d73f95fSBart Van Assche 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
2949aef9ec39SRoland Dreier 	{ SRP_OPT_ERR,			NULL 			}
2950aef9ec39SRoland Dreier };
2951aef9ec39SRoland Dreier 
2952aef9ec39SRoland Dreier static int srp_parse_options(const char *buf, struct srp_target_port *target)
2953aef9ec39SRoland Dreier {
2954aef9ec39SRoland Dreier 	char *options, *sep_opt;
2955aef9ec39SRoland Dreier 	char *p;
2956aef9ec39SRoland Dreier 	char dgid[3];
2957aef9ec39SRoland Dreier 	substring_t args[MAX_OPT_ARGS];
2958aef9ec39SRoland Dreier 	int opt_mask = 0;
2959aef9ec39SRoland Dreier 	int token;
2960aef9ec39SRoland Dreier 	int ret = -EINVAL;
2961aef9ec39SRoland Dreier 	int i;
2962aef9ec39SRoland Dreier 
2963aef9ec39SRoland Dreier 	options = kstrdup(buf, GFP_KERNEL);
2964aef9ec39SRoland Dreier 	if (!options)
2965aef9ec39SRoland Dreier 		return -ENOMEM;
2966aef9ec39SRoland Dreier 
2967aef9ec39SRoland Dreier 	sep_opt = options;
29687dcf9c19SSagi Grimberg 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2969aef9ec39SRoland Dreier 		if (!*p)
2970aef9ec39SRoland Dreier 			continue;
2971aef9ec39SRoland Dreier 
2972aef9ec39SRoland Dreier 		token = match_token(p, srp_opt_tokens, args);
2973aef9ec39SRoland Dreier 		opt_mask |= token;
2974aef9ec39SRoland Dreier 
2975aef9ec39SRoland Dreier 		switch (token) {
2976aef9ec39SRoland Dreier 		case SRP_OPT_ID_EXT:
2977aef9ec39SRoland Dreier 			p = match_strdup(args);
2978a20f3a6dSIshai Rabinovitz 			if (!p) {
2979a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2980a20f3a6dSIshai Rabinovitz 				goto out;
2981a20f3a6dSIshai Rabinovitz 			}
2982aef9ec39SRoland Dreier 			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2983aef9ec39SRoland Dreier 			kfree(p);
2984aef9ec39SRoland Dreier 			break;
2985aef9ec39SRoland Dreier 
2986aef9ec39SRoland Dreier 		case SRP_OPT_IOC_GUID:
2987aef9ec39SRoland Dreier 			p = match_strdup(args);
2988a20f3a6dSIshai Rabinovitz 			if (!p) {
2989a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
2990a20f3a6dSIshai Rabinovitz 				goto out;
2991a20f3a6dSIshai Rabinovitz 			}
2992aef9ec39SRoland Dreier 			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2993aef9ec39SRoland Dreier 			kfree(p);
2994aef9ec39SRoland Dreier 			break;
2995aef9ec39SRoland Dreier 
2996aef9ec39SRoland Dreier 		case SRP_OPT_DGID:
2997aef9ec39SRoland Dreier 			p = match_strdup(args);
2998a20f3a6dSIshai Rabinovitz 			if (!p) {
2999a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3000a20f3a6dSIshai Rabinovitz 				goto out;
3001a20f3a6dSIshai Rabinovitz 			}
3002aef9ec39SRoland Dreier 			if (strlen(p) != 32) {
3003e0bda7d8SBart Van Assche 				pr_warn("bad dest GID parameter '%s'\n", p);
3004ce1823f0SRoland Dreier 				kfree(p);
3005aef9ec39SRoland Dreier 				goto out;
3006aef9ec39SRoland Dreier 			}
3007aef9ec39SRoland Dreier 
3008aef9ec39SRoland Dreier 			for (i = 0; i < 16; ++i) {
3009747fe000SBart Van Assche 				strlcpy(dgid, p + i * 2, sizeof(dgid));
3010747fe000SBart Van Assche 				if (sscanf(dgid, "%hhx",
3011747fe000SBart Van Assche 					   &target->orig_dgid.raw[i]) < 1) {
3012747fe000SBart Van Assche 					ret = -EINVAL;
3013747fe000SBart Van Assche 					kfree(p);
3014747fe000SBart Van Assche 					goto out;
3015747fe000SBart Van Assche 				}
3016aef9ec39SRoland Dreier 			}
3017bf17c1c7SRoland Dreier 			kfree(p);
3018aef9ec39SRoland Dreier 			break;
3019aef9ec39SRoland Dreier 
3020aef9ec39SRoland Dreier 		case SRP_OPT_PKEY:
3021aef9ec39SRoland Dreier 			if (match_hex(args, &token)) {
3022e0bda7d8SBart Van Assche 				pr_warn("bad P_Key parameter '%s'\n", p);
3023aef9ec39SRoland Dreier 				goto out;
3024aef9ec39SRoland Dreier 			}
3025747fe000SBart Van Assche 			target->pkey = cpu_to_be16(token);
3026aef9ec39SRoland Dreier 			break;
3027aef9ec39SRoland Dreier 
3028aef9ec39SRoland Dreier 		case SRP_OPT_SERVICE_ID:
3029aef9ec39SRoland Dreier 			p = match_strdup(args);
3030a20f3a6dSIshai Rabinovitz 			if (!p) {
3031a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3032a20f3a6dSIshai Rabinovitz 				goto out;
3033a20f3a6dSIshai Rabinovitz 			}
3034aef9ec39SRoland Dreier 			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3035aef9ec39SRoland Dreier 			kfree(p);
3036aef9ec39SRoland Dreier 			break;
3037aef9ec39SRoland Dreier 
3038aef9ec39SRoland Dreier 		case SRP_OPT_MAX_SECT:
3039aef9ec39SRoland Dreier 			if (match_int(args, &token)) {
3040e0bda7d8SBart Van Assche 				pr_warn("bad max sect parameter '%s'\n", p);
3041aef9ec39SRoland Dreier 				goto out;
3042aef9ec39SRoland Dreier 			}
3043aef9ec39SRoland Dreier 			target->scsi_host->max_sectors = token;
3044aef9ec39SRoland Dreier 			break;
3045aef9ec39SRoland Dreier 
30464d73f95fSBart Van Assche 		case SRP_OPT_QUEUE_SIZE:
30474d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
30484d73f95fSBart Van Assche 				pr_warn("bad queue_size parameter '%s'\n", p);
30494d73f95fSBart Van Assche 				goto out;
30504d73f95fSBart Van Assche 			}
30514d73f95fSBart Van Assche 			target->scsi_host->can_queue = token;
30524d73f95fSBart Van Assche 			target->queue_size = token + SRP_RSP_SQ_SIZE +
30534d73f95fSBart Van Assche 					     SRP_TSK_MGMT_SQ_SIZE;
30544d73f95fSBart Van Assche 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
30554d73f95fSBart Van Assche 				target->scsi_host->cmd_per_lun = token;
30564d73f95fSBart Van Assche 			break;
30574d73f95fSBart Van Assche 
305852fb2b50SVu Pham 		case SRP_OPT_MAX_CMD_PER_LUN:
30594d73f95fSBart Van Assche 			if (match_int(args, &token) || token < 1) {
3060e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3061e0bda7d8SBart Van Assche 					p);
306252fb2b50SVu Pham 				goto out;
306352fb2b50SVu Pham 			}
30644d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun = token;
306552fb2b50SVu Pham 			break;
306652fb2b50SVu Pham 
30670c0450dbSRamachandra K 		case SRP_OPT_IO_CLASS:
30680c0450dbSRamachandra K 			if (match_hex(args, &token)) {
3069e0bda7d8SBart Van Assche 				pr_warn("bad IO class parameter '%s'\n", p);
30700c0450dbSRamachandra K 				goto out;
30710c0450dbSRamachandra K 			}
30720c0450dbSRamachandra K 			if (token != SRP_REV10_IB_IO_CLASS &&
30730c0450dbSRamachandra K 			    token != SRP_REV16A_IB_IO_CLASS) {
3074e0bda7d8SBart Van Assche 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3075e0bda7d8SBart Van Assche 					token, SRP_REV10_IB_IO_CLASS,
3076e0bda7d8SBart Van Assche 					SRP_REV16A_IB_IO_CLASS);
30770c0450dbSRamachandra K 				goto out;
30780c0450dbSRamachandra K 			}
30790c0450dbSRamachandra K 			target->io_class = token;
30800c0450dbSRamachandra K 			break;
30810c0450dbSRamachandra K 
308201cb9bcbSIshai Rabinovitz 		case SRP_OPT_INITIATOR_EXT:
308301cb9bcbSIshai Rabinovitz 			p = match_strdup(args);
3084a20f3a6dSIshai Rabinovitz 			if (!p) {
3085a20f3a6dSIshai Rabinovitz 				ret = -ENOMEM;
3086a20f3a6dSIshai Rabinovitz 				goto out;
3087a20f3a6dSIshai Rabinovitz 			}
308801cb9bcbSIshai Rabinovitz 			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
308901cb9bcbSIshai Rabinovitz 			kfree(p);
309001cb9bcbSIshai Rabinovitz 			break;
309101cb9bcbSIshai Rabinovitz 
309249248644SDavid Dillow 		case SRP_OPT_CMD_SG_ENTRIES:
309349248644SDavid Dillow 			if (match_int(args, &token) || token < 1 || token > 255) {
3094e0bda7d8SBart Van Assche 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3095e0bda7d8SBart Van Assche 					p);
309649248644SDavid Dillow 				goto out;
309749248644SDavid Dillow 			}
309849248644SDavid Dillow 			target->cmd_sg_cnt = token;
309949248644SDavid Dillow 			break;
310049248644SDavid Dillow 
3101c07d424dSDavid Dillow 		case SRP_OPT_ALLOW_EXT_SG:
3102c07d424dSDavid Dillow 			if (match_int(args, &token)) {
3103e0bda7d8SBart Van Assche 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3104c07d424dSDavid Dillow 				goto out;
3105c07d424dSDavid Dillow 			}
3106c07d424dSDavid Dillow 			target->allow_ext_sg = !!token;
3107c07d424dSDavid Dillow 			break;
3108c07d424dSDavid Dillow 
3109c07d424dSDavid Dillow 		case SRP_OPT_SG_TABLESIZE:
3110c07d424dSDavid Dillow 			if (match_int(args, &token) || token < 1 ||
3111c07d424dSDavid Dillow 					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3112e0bda7d8SBart Van Assche 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3113e0bda7d8SBart Van Assche 					p);
3114c07d424dSDavid Dillow 				goto out;
3115c07d424dSDavid Dillow 			}
3116c07d424dSDavid Dillow 			target->sg_tablesize = token;
3117c07d424dSDavid Dillow 			break;
3118c07d424dSDavid Dillow 
31194b5e5f41SBart Van Assche 		case SRP_OPT_COMP_VECTOR:
31204b5e5f41SBart Van Assche 			if (match_int(args, &token) || token < 0) {
31214b5e5f41SBart Van Assche 				pr_warn("bad comp_vector parameter '%s'\n", p);
31224b5e5f41SBart Van Assche 				goto out;
31234b5e5f41SBart Van Assche 			}
31244b5e5f41SBart Van Assche 			target->comp_vector = token;
31254b5e5f41SBart Van Assche 			break;
31264b5e5f41SBart Van Assche 
31277bb312e4SVu Pham 		case SRP_OPT_TL_RETRY_COUNT:
31287bb312e4SVu Pham 			if (match_int(args, &token) || token < 2 || token > 7) {
31297bb312e4SVu Pham 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
31307bb312e4SVu Pham 					p);
31317bb312e4SVu Pham 				goto out;
31327bb312e4SVu Pham 			}
31337bb312e4SVu Pham 			target->tl_retry_count = token;
31347bb312e4SVu Pham 			break;
31357bb312e4SVu Pham 
3136aef9ec39SRoland Dreier 		default:
3137e0bda7d8SBart Van Assche 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3138e0bda7d8SBart Van Assche 				p);
3139aef9ec39SRoland Dreier 			goto out;
3140aef9ec39SRoland Dreier 		}
3141aef9ec39SRoland Dreier 	}
3142aef9ec39SRoland Dreier 
3143aef9ec39SRoland Dreier 	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3144aef9ec39SRoland Dreier 		ret = 0;
3145aef9ec39SRoland Dreier 	else
3146aef9ec39SRoland Dreier 		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3147aef9ec39SRoland Dreier 			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3148aef9ec39SRoland Dreier 			    !(srp_opt_tokens[i].token & opt_mask))
3149e0bda7d8SBart Van Assche 				pr_warn("target creation request is missing parameter '%s'\n",
3150aef9ec39SRoland Dreier 					srp_opt_tokens[i].pattern);
3151aef9ec39SRoland Dreier 
31524d73f95fSBart Van Assche 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
31534d73f95fSBart Van Assche 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
31544d73f95fSBart Van Assche 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
31554d73f95fSBart Van Assche 			target->scsi_host->cmd_per_lun,
31564d73f95fSBart Van Assche 			target->scsi_host->can_queue);
31574d73f95fSBart Van Assche 
3158aef9ec39SRoland Dreier out:
3159aef9ec39SRoland Dreier 	kfree(options);
3160aef9ec39SRoland Dreier 	return ret;
3161aef9ec39SRoland Dreier }
3162aef9ec39SRoland Dreier 
3163ee959b00STony Jones static ssize_t srp_create_target(struct device *dev,
3164ee959b00STony Jones 				 struct device_attribute *attr,
3165aef9ec39SRoland Dreier 				 const char *buf, size_t count)
3166aef9ec39SRoland Dreier {
3167aef9ec39SRoland Dreier 	struct srp_host *host =
3168ee959b00STony Jones 		container_of(dev, struct srp_host, dev);
3169aef9ec39SRoland Dreier 	struct Scsi_Host *target_host;
3170aef9ec39SRoland Dreier 	struct srp_target_port *target;
3171509c07bcSBart Van Assche 	struct srp_rdma_ch *ch;
3172d1b4289eSBart Van Assche 	struct srp_device *srp_dev = host->srp_dev;
3173d1b4289eSBart Van Assche 	struct ib_device *ibdev = srp_dev->dev;
3174d92c0da7SBart Van Assche 	int ret, node_idx, node, cpu, i;
3175d92c0da7SBart Van Assche 	bool multich = false;
3176aef9ec39SRoland Dreier 
3177aef9ec39SRoland Dreier 	target_host = scsi_host_alloc(&srp_template,
3178aef9ec39SRoland Dreier 				      sizeof (struct srp_target_port));
3179aef9ec39SRoland Dreier 	if (!target_host)
3180aef9ec39SRoland Dreier 		return -ENOMEM;
3181aef9ec39SRoland Dreier 
31823236822bSFUJITA Tomonori 	target_host->transportt  = ib_srp_transport_template;
3183fd1b6c4aSBart Van Assche 	target_host->max_channel = 0;
3184fd1b6c4aSBart Van Assche 	target_host->max_id      = 1;
3185985aa495SBart Van Assche 	target_host->max_lun     = -1LL;
31863c8edf0eSArne Redlich 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
31875f068992SRoland Dreier 
3188aef9ec39SRoland Dreier 	target = host_to_target(target_host);
3189aef9ec39SRoland Dreier 
31900c0450dbSRamachandra K 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3191aef9ec39SRoland Dreier 	target->scsi_host	= target_host;
3192aef9ec39SRoland Dreier 	target->srp_host	= host;
3193e6bf5f48SJason Gunthorpe 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
319403f6fb93SBart Van Assche 	target->global_mr	= host->srp_dev->global_mr;
319549248644SDavid Dillow 	target->cmd_sg_cnt	= cmd_sg_entries;
3196c07d424dSDavid Dillow 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3197c07d424dSDavid Dillow 	target->allow_ext_sg	= allow_ext_sg;
31987bb312e4SVu Pham 	target->tl_retry_count	= 7;
31994d73f95fSBart Van Assche 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3200aef9ec39SRoland Dreier 
320134aa654eSBart Van Assche 	/*
320234aa654eSBart Van Assche 	 * Avoid that the SCSI host can be removed by srp_remove_target()
320334aa654eSBart Van Assche 	 * before this function returns.
320434aa654eSBart Van Assche 	 */
320534aa654eSBart Van Assche 	scsi_host_get(target->scsi_host);
320634aa654eSBart Van Assche 
32072d7091bcSBart Van Assche 	mutex_lock(&host->add_target_mutex);
32082d7091bcSBart Van Assche 
3209aef9ec39SRoland Dreier 	ret = srp_parse_options(buf, target);
3210aef9ec39SRoland Dreier 	if (ret)
3211fb49c8bbSBart Van Assche 		goto out;
3212aef9ec39SRoland Dreier 
32134d73f95fSBart Van Assche 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
32144d73f95fSBart Van Assche 
321596fc248aSBart Van Assche 	if (!srp_conn_unique(target->srp_host, target)) {
321696fc248aSBart Van Assche 		shost_printk(KERN_INFO, target->scsi_host,
321796fc248aSBart Van Assche 			     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
321896fc248aSBart Van Assche 			     be64_to_cpu(target->id_ext),
321996fc248aSBart Van Assche 			     be64_to_cpu(target->ioc_guid),
322096fc248aSBart Van Assche 			     be64_to_cpu(target->initiator_ext));
322196fc248aSBart Van Assche 		ret = -EEXIST;
3222fb49c8bbSBart Van Assche 		goto out;
322396fc248aSBart Van Assche 	}
322496fc248aSBart Van Assche 
32255cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3226c07d424dSDavid Dillow 	    target->cmd_sg_cnt < target->sg_tablesize) {
32275cfb1782SBart Van Assche 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3228c07d424dSDavid Dillow 		target->sg_tablesize = target->cmd_sg_cnt;
3229c07d424dSDavid Dillow 	}
3230c07d424dSDavid Dillow 
3231c07d424dSDavid Dillow 	target_host->sg_tablesize = target->sg_tablesize;
3232fa9863f8SBart Van Assche 	target->mr_pool_size = target->scsi_host->can_queue;
3233c07d424dSDavid Dillow 	target->indirect_size = target->sg_tablesize *
3234c07d424dSDavid Dillow 				sizeof (struct srp_direct_buf);
323549248644SDavid Dillow 	target->max_iu_len = sizeof (struct srp_cmd) +
323649248644SDavid Dillow 			     sizeof (struct srp_indirect_buf) +
323749248644SDavid Dillow 			     target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
323849248644SDavid Dillow 
3239c1120f89SBart Van Assche 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3240ef6c49d8SBart Van Assche 	INIT_WORK(&target->remove_work, srp_remove_work);
32418f26c9ffSDavid Dillow 	spin_lock_init(&target->lock);
324255ee3ab2SMatan Barak 	ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
32432088ca66SSagi Grimberg 	if (ret)
3244fb49c8bbSBart Van Assche 		goto out;
3245d92c0da7SBart Van Assche 
3246d92c0da7SBart Van Assche 	ret = -ENOMEM;
3247d92c0da7SBart Van Assche 	target->ch_count = max_t(unsigned, num_online_nodes(),
3248d92c0da7SBart Van Assche 				 min(ch_count ? :
3249d92c0da7SBart Van Assche 				     min(4 * num_online_nodes(),
3250d92c0da7SBart Van Assche 					 ibdev->num_comp_vectors),
3251d92c0da7SBart Van Assche 				     num_online_cpus()));
3252d92c0da7SBart Van Assche 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3253d92c0da7SBart Van Assche 			     GFP_KERNEL);
3254d92c0da7SBart Van Assche 	if (!target->ch)
3255fb49c8bbSBart Van Assche 		goto out;
3256d92c0da7SBart Van Assche 
3257d92c0da7SBart Van Assche 	node_idx = 0;
3258d92c0da7SBart Van Assche 	for_each_online_node(node) {
3259d92c0da7SBart Van Assche 		const int ch_start = (node_idx * target->ch_count /
3260d92c0da7SBart Van Assche 				      num_online_nodes());
3261d92c0da7SBart Van Assche 		const int ch_end = ((node_idx + 1) * target->ch_count /
3262d92c0da7SBart Van Assche 				    num_online_nodes());
3263d92c0da7SBart Van Assche 		const int cv_start = (node_idx * ibdev->num_comp_vectors /
3264d92c0da7SBart Van Assche 				      num_online_nodes() + target->comp_vector)
3265d92c0da7SBart Van Assche 				     % ibdev->num_comp_vectors;
3266d92c0da7SBart Van Assche 		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3267d92c0da7SBart Van Assche 				    num_online_nodes() + target->comp_vector)
3268d92c0da7SBart Van Assche 				   % ibdev->num_comp_vectors;
3269d92c0da7SBart Van Assche 		int cpu_idx = 0;
3270d92c0da7SBart Van Assche 
3271d92c0da7SBart Van Assche 		for_each_online_cpu(cpu) {
3272d92c0da7SBart Van Assche 			if (cpu_to_node(cpu) != node)
3273d92c0da7SBart Van Assche 				continue;
3274d92c0da7SBart Van Assche 			if (ch_start + cpu_idx >= ch_end)
3275d92c0da7SBart Van Assche 				continue;
3276d92c0da7SBart Van Assche 			ch = &target->ch[ch_start + cpu_idx];
3277d92c0da7SBart Van Assche 			ch->target = target;
3278d92c0da7SBart Van Assche 			ch->comp_vector = cv_start == cv_end ? cv_start :
3279d92c0da7SBart Van Assche 				cv_start + cpu_idx % (cv_end - cv_start);
3280d92c0da7SBart Van Assche 			spin_lock_init(&ch->lock);
3281d92c0da7SBart Van Assche 			INIT_LIST_HEAD(&ch->free_tx);
3282d92c0da7SBart Van Assche 			ret = srp_new_cm_id(ch);
3283d92c0da7SBart Van Assche 			if (ret)
3284d92c0da7SBart Van Assche 				goto err_disconnect;
3285aef9ec39SRoland Dreier 
3286509c07bcSBart Van Assche 			ret = srp_create_ch_ib(ch);
3287aef9ec39SRoland Dreier 			if (ret)
3288d92c0da7SBart Van Assche 				goto err_disconnect;
3289aef9ec39SRoland Dreier 
3290d92c0da7SBart Van Assche 			ret = srp_alloc_req_data(ch);
32919fe4bcf4SDavid Dillow 			if (ret)
3292d92c0da7SBart Van Assche 				goto err_disconnect;
3293aef9ec39SRoland Dreier 
3294d92c0da7SBart Van Assche 			ret = srp_connect_ch(ch, multich);
3295aef9ec39SRoland Dreier 			if (ret) {
32967aa54bd7SDavid Dillow 				shost_printk(KERN_ERR, target->scsi_host,
3297d92c0da7SBart Van Assche 					     PFX "Connection %d/%d failed\n",
3298d92c0da7SBart Van Assche 					     ch_start + cpu_idx,
3299d92c0da7SBart Van Assche 					     target->ch_count);
3300d92c0da7SBart Van Assche 				if (node_idx == 0 && cpu_idx == 0) {
3301d92c0da7SBart Van Assche 					goto err_disconnect;
3302d92c0da7SBart Van Assche 				} else {
3303d92c0da7SBart Van Assche 					srp_free_ch_ib(target, ch);
3304d92c0da7SBart Van Assche 					srp_free_req_data(target, ch);
3305d92c0da7SBart Van Assche 					target->ch_count = ch - target->ch;
3306c257ea6fSBart Van Assche 					goto connected;
3307aef9ec39SRoland Dreier 				}
3308d92c0da7SBart Van Assche 			}
3309d92c0da7SBart Van Assche 
3310d92c0da7SBart Van Assche 			multich = true;
3311d92c0da7SBart Van Assche 			cpu_idx++;
3312d92c0da7SBart Van Assche 		}
3313d92c0da7SBart Van Assche 		node_idx++;
3314d92c0da7SBart Van Assche 	}
3315d92c0da7SBart Van Assche 
3316c257ea6fSBart Van Assche connected:
3317d92c0da7SBart Van Assche 	target->scsi_host->nr_hw_queues = target->ch_count;
3318aef9ec39SRoland Dreier 
3319aef9ec39SRoland Dreier 	ret = srp_add_target(host, target);
3320aef9ec39SRoland Dreier 	if (ret)
3321aef9ec39SRoland Dreier 		goto err_disconnect;
3322aef9ec39SRoland Dreier 
332334aa654eSBart Van Assche 	if (target->state != SRP_TARGET_REMOVED) {
3324e7ffde01SBart Van Assche 		shost_printk(KERN_DEBUG, target->scsi_host, PFX
3325e7ffde01SBart Van Assche 			     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3326e7ffde01SBart Van Assche 			     be64_to_cpu(target->id_ext),
3327e7ffde01SBart Van Assche 			     be64_to_cpu(target->ioc_guid),
3328747fe000SBart Van Assche 			     be16_to_cpu(target->pkey),
3329e7ffde01SBart Van Assche 			     be64_to_cpu(target->service_id),
3330747fe000SBart Van Assche 			     target->sgid.raw, target->orig_dgid.raw);
333134aa654eSBart Van Assche 	}
3332e7ffde01SBart Van Assche 
33332d7091bcSBart Van Assche 	ret = count;
33342d7091bcSBart Van Assche 
33352d7091bcSBart Van Assche out:
33362d7091bcSBart Van Assche 	mutex_unlock(&host->add_target_mutex);
333734aa654eSBart Van Assche 
333834aa654eSBart Van Assche 	scsi_host_put(target->scsi_host);
3339bc44bd1dSBart Van Assche 	if (ret < 0)
3340bc44bd1dSBart Van Assche 		scsi_host_put(target->scsi_host);
334134aa654eSBart Van Assche 
33422d7091bcSBart Van Assche 	return ret;
3343aef9ec39SRoland Dreier 
3344aef9ec39SRoland Dreier err_disconnect:
3345aef9ec39SRoland Dreier 	srp_disconnect_target(target);
3346aef9ec39SRoland Dreier 
3347d92c0da7SBart Van Assche 	for (i = 0; i < target->ch_count; i++) {
3348d92c0da7SBart Van Assche 		ch = &target->ch[i];
3349509c07bcSBart Van Assche 		srp_free_ch_ib(target, ch);
3350509c07bcSBart Van Assche 		srp_free_req_data(target, ch);
3351d92c0da7SBart Van Assche 	}
3352d92c0da7SBart Van Assche 
3353d92c0da7SBart Van Assche 	kfree(target->ch);
33542d7091bcSBart Van Assche 	goto out;
3355aef9ec39SRoland Dreier }
3356aef9ec39SRoland Dreier 
3357ee959b00STony Jones static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3358aef9ec39SRoland Dreier 
3359ee959b00STony Jones static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3360ee959b00STony Jones 			  char *buf)
3361aef9ec39SRoland Dreier {
3362ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3363aef9ec39SRoland Dreier 
336405321937SGreg Kroah-Hartman 	return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3365aef9ec39SRoland Dreier }
3366aef9ec39SRoland Dreier 
3367ee959b00STony Jones static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3368aef9ec39SRoland Dreier 
3369ee959b00STony Jones static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3370ee959b00STony Jones 			 char *buf)
3371aef9ec39SRoland Dreier {
3372ee959b00STony Jones 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3373aef9ec39SRoland Dreier 
3374aef9ec39SRoland Dreier 	return sprintf(buf, "%d\n", host->port);
3375aef9ec39SRoland Dreier }
3376aef9ec39SRoland Dreier 
3377ee959b00STony Jones static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3378aef9ec39SRoland Dreier 
3379f5358a17SRoland Dreier static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3380aef9ec39SRoland Dreier {
3381aef9ec39SRoland Dreier 	struct srp_host *host;
3382aef9ec39SRoland Dreier 
3383aef9ec39SRoland Dreier 	host = kzalloc(sizeof *host, GFP_KERNEL);
3384aef9ec39SRoland Dreier 	if (!host)
3385aef9ec39SRoland Dreier 		return NULL;
3386aef9ec39SRoland Dreier 
3387aef9ec39SRoland Dreier 	INIT_LIST_HEAD(&host->target_list);
3388b3589fd4SMatthew Wilcox 	spin_lock_init(&host->target_lock);
3389aef9ec39SRoland Dreier 	init_completion(&host->released);
33902d7091bcSBart Van Assche 	mutex_init(&host->add_target_mutex);
339105321937SGreg Kroah-Hartman 	host->srp_dev = device;
3392aef9ec39SRoland Dreier 	host->port = port;
3393aef9ec39SRoland Dreier 
3394ee959b00STony Jones 	host->dev.class = &srp_class;
3395ee959b00STony Jones 	host->dev.parent = device->dev->dma_device;
3396d927e38cSKay Sievers 	dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3397aef9ec39SRoland Dreier 
3398ee959b00STony Jones 	if (device_register(&host->dev))
3399f5358a17SRoland Dreier 		goto free_host;
3400ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_add_target))
3401aef9ec39SRoland Dreier 		goto err_class;
3402ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3403aef9ec39SRoland Dreier 		goto err_class;
3404ee959b00STony Jones 	if (device_create_file(&host->dev, &dev_attr_port))
3405aef9ec39SRoland Dreier 		goto err_class;
3406aef9ec39SRoland Dreier 
3407aef9ec39SRoland Dreier 	return host;
3408aef9ec39SRoland Dreier 
3409aef9ec39SRoland Dreier err_class:
3410ee959b00STony Jones 	device_unregister(&host->dev);
3411aef9ec39SRoland Dreier 
3412f5358a17SRoland Dreier free_host:
3413aef9ec39SRoland Dreier 	kfree(host);
3414aef9ec39SRoland Dreier 
3415aef9ec39SRoland Dreier 	return NULL;
3416aef9ec39SRoland Dreier }
3417aef9ec39SRoland Dreier 
3418aef9ec39SRoland Dreier static void srp_add_one(struct ib_device *device)
3419aef9ec39SRoland Dreier {
3420f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3421aef9ec39SRoland Dreier 	struct srp_host *host;
34224139032bSHal Rosenstock 	int mr_page_shift, p;
342352ede08fSBart Van Assche 	u64 max_pages_per_mr;
3424aef9ec39SRoland Dreier 
3425f5358a17SRoland Dreier 	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3426f5358a17SRoland Dreier 	if (!srp_dev)
34274a061b28SOr Gerlitz 		return;
3428f5358a17SRoland Dreier 
3429d1b4289eSBart Van Assche 	srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3430d1b4289eSBart Van Assche 			    device->map_phys_fmr && device->unmap_fmr);
34314a061b28SOr Gerlitz 	srp_dev->has_fr = (device->attrs.device_cap_flags &
34325cfb1782SBart Van Assche 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
34335cfb1782SBart Van Assche 	if (!srp_dev->has_fmr && !srp_dev->has_fr)
34345cfb1782SBart Van Assche 		dev_warn(&device->dev, "neither FMR nor FR is supported\n");
34355cfb1782SBart Van Assche 
34365cfb1782SBart Van Assche 	srp_dev->use_fast_reg = (srp_dev->has_fr &&
34375cfb1782SBart Van Assche 				 (!srp_dev->has_fmr || prefer_fr));
3438002f1567SBart Van Assche 	srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3439d1b4289eSBart Van Assche 
3440f5358a17SRoland Dreier 	/*
3441f5358a17SRoland Dreier 	 * Use the smallest page size supported by the HCA, down to a
34428f26c9ffSDavid Dillow 	 * minimum of 4096 bytes. We're unlikely to build large sglists
34438f26c9ffSDavid Dillow 	 * out of smaller entries.
3444f5358a17SRoland Dreier 	 */
34454a061b28SOr Gerlitz 	mr_page_shift		= max(12, ffs(device->attrs.page_size_cap) - 1);
344652ede08fSBart Van Assche 	srp_dev->mr_page_size	= 1 << mr_page_shift;
344752ede08fSBart Van Assche 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
34484a061b28SOr Gerlitz 	max_pages_per_mr	= device->attrs.max_mr_size;
344952ede08fSBart Van Assche 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
345052ede08fSBart Van Assche 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
345152ede08fSBart Van Assche 					  max_pages_per_mr);
34525cfb1782SBart Van Assche 	if (srp_dev->use_fast_reg) {
34535cfb1782SBart Van Assche 		srp_dev->max_pages_per_mr =
34545cfb1782SBart Van Assche 			min_t(u32, srp_dev->max_pages_per_mr,
34554a061b28SOr Gerlitz 			      device->attrs.max_fast_reg_page_list_len);
34565cfb1782SBart Van Assche 	}
345752ede08fSBart Van Assche 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
345852ede08fSBart Van Assche 				   srp_dev->max_pages_per_mr;
34594a061b28SOr Gerlitz 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
34604a061b28SOr Gerlitz 		 device->name, mr_page_shift, device->attrs.max_mr_size,
34614a061b28SOr Gerlitz 		 device->attrs.max_fast_reg_page_list_len,
346252ede08fSBart Van Assche 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3463f5358a17SRoland Dreier 
3464f5358a17SRoland Dreier 	INIT_LIST_HEAD(&srp_dev->dev_list);
3465f5358a17SRoland Dreier 
3466f5358a17SRoland Dreier 	srp_dev->dev = device;
3467f5358a17SRoland Dreier 	srp_dev->pd  = ib_alloc_pd(device);
3468f5358a17SRoland Dreier 	if (IS_ERR(srp_dev->pd))
3469f5358a17SRoland Dreier 		goto free_dev;
3470f5358a17SRoland Dreier 
347103f6fb93SBart Van Assche 	if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
347203f6fb93SBart Van Assche 		srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3473f5358a17SRoland Dreier 						   IB_ACCESS_LOCAL_WRITE |
3474f5358a17SRoland Dreier 						   IB_ACCESS_REMOTE_READ |
3475f5358a17SRoland Dreier 						   IB_ACCESS_REMOTE_WRITE);
347603f6fb93SBart Van Assche 		if (IS_ERR(srp_dev->global_mr))
3477f5358a17SRoland Dreier 			goto err_pd;
347803f6fb93SBart Van Assche 	} else {
347903f6fb93SBart Van Assche 		srp_dev->global_mr = NULL;
348003f6fb93SBart Van Assche 	}
3481f5358a17SRoland Dreier 
34824139032bSHal Rosenstock 	for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3483f5358a17SRoland Dreier 		host = srp_add_port(srp_dev, p);
3484aef9ec39SRoland Dreier 		if (host)
3485f5358a17SRoland Dreier 			list_add_tail(&host->list, &srp_dev->dev_list);
3486aef9ec39SRoland Dreier 	}
3487aef9ec39SRoland Dreier 
3488f5358a17SRoland Dreier 	ib_set_client_data(device, &srp_client, srp_dev);
34894a061b28SOr Gerlitz 	return;
3490f5358a17SRoland Dreier 
3491f5358a17SRoland Dreier err_pd:
3492f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3493f5358a17SRoland Dreier 
3494f5358a17SRoland Dreier free_dev:
3495f5358a17SRoland Dreier 	kfree(srp_dev);
3496aef9ec39SRoland Dreier }
3497aef9ec39SRoland Dreier 
34987c1eb45aSHaggai Eran static void srp_remove_one(struct ib_device *device, void *client_data)
3499aef9ec39SRoland Dreier {
3500f5358a17SRoland Dreier 	struct srp_device *srp_dev;
3501aef9ec39SRoland Dreier 	struct srp_host *host, *tmp_host;
3502ef6c49d8SBart Van Assche 	struct srp_target_port *target;
3503aef9ec39SRoland Dreier 
35047c1eb45aSHaggai Eran 	srp_dev = client_data;
35051fe0cb84SDotan Barak 	if (!srp_dev)
35061fe0cb84SDotan Barak 		return;
3507aef9ec39SRoland Dreier 
3508f5358a17SRoland Dreier 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3509ee959b00STony Jones 		device_unregister(&host->dev);
3510aef9ec39SRoland Dreier 		/*
3511aef9ec39SRoland Dreier 		 * Wait for the sysfs entry to go away, so that no new
3512aef9ec39SRoland Dreier 		 * target ports can be created.
3513aef9ec39SRoland Dreier 		 */
3514aef9ec39SRoland Dreier 		wait_for_completion(&host->released);
3515aef9ec39SRoland Dreier 
3516aef9ec39SRoland Dreier 		/*
3517ef6c49d8SBart Van Assche 		 * Remove all target ports.
3518aef9ec39SRoland Dreier 		 */
3519b3589fd4SMatthew Wilcox 		spin_lock(&host->target_lock);
3520ef6c49d8SBart Van Assche 		list_for_each_entry(target, &host->target_list, list)
3521ef6c49d8SBart Van Assche 			srp_queue_remove_work(target);
3522b3589fd4SMatthew Wilcox 		spin_unlock(&host->target_lock);
3523aef9ec39SRoland Dreier 
3524aef9ec39SRoland Dreier 		/*
3525bcc05910SBart Van Assche 		 * Wait for tl_err and target port removal tasks.
3526aef9ec39SRoland Dreier 		 */
3527ef6c49d8SBart Van Assche 		flush_workqueue(system_long_wq);
3528bcc05910SBart Van Assche 		flush_workqueue(srp_remove_wq);
3529aef9ec39SRoland Dreier 
3530aef9ec39SRoland Dreier 		kfree(host);
3531aef9ec39SRoland Dreier 	}
3532aef9ec39SRoland Dreier 
353303f6fb93SBart Van Assche 	if (srp_dev->global_mr)
353403f6fb93SBart Van Assche 		ib_dereg_mr(srp_dev->global_mr);
3535f5358a17SRoland Dreier 	ib_dealloc_pd(srp_dev->pd);
3536f5358a17SRoland Dreier 
3537f5358a17SRoland Dreier 	kfree(srp_dev);
3538aef9ec39SRoland Dreier }
3539aef9ec39SRoland Dreier 
35403236822bSFUJITA Tomonori static struct srp_function_template ib_srp_transport_functions = {
3541ed9b2264SBart Van Assche 	.has_rport_state	 = true,
3542ed9b2264SBart Van Assche 	.reset_timer_if_blocked	 = true,
3543a95cadb9SBart Van Assche 	.reconnect_delay	 = &srp_reconnect_delay,
3544ed9b2264SBart Van Assche 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
3545ed9b2264SBart Van Assche 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
3546ed9b2264SBart Van Assche 	.reconnect		 = srp_rport_reconnect,
3547dc1bdbd9SBart Van Assche 	.rport_delete		 = srp_rport_delete,
3548ed9b2264SBart Van Assche 	.terminate_rport_io	 = srp_terminate_io,
35493236822bSFUJITA Tomonori };
35503236822bSFUJITA Tomonori 
3551aef9ec39SRoland Dreier static int __init srp_init_module(void)
3552aef9ec39SRoland Dreier {
3553aef9ec39SRoland Dreier 	int ret;
3554aef9ec39SRoland Dreier 
355549248644SDavid Dillow 	if (srp_sg_tablesize) {
3556e0bda7d8SBart Van Assche 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
355749248644SDavid Dillow 		if (!cmd_sg_entries)
355849248644SDavid Dillow 			cmd_sg_entries = srp_sg_tablesize;
355949248644SDavid Dillow 	}
356049248644SDavid Dillow 
356149248644SDavid Dillow 	if (!cmd_sg_entries)
356249248644SDavid Dillow 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
356349248644SDavid Dillow 
356449248644SDavid Dillow 	if (cmd_sg_entries > 255) {
3565e0bda7d8SBart Van Assche 		pr_warn("Clamping cmd_sg_entries to 255\n");
356649248644SDavid Dillow 		cmd_sg_entries = 255;
35671e89a194SDavid Dillow 	}
35681e89a194SDavid Dillow 
3569c07d424dSDavid Dillow 	if (!indirect_sg_entries)
3570c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3571c07d424dSDavid Dillow 	else if (indirect_sg_entries < cmd_sg_entries) {
3572e0bda7d8SBart Van Assche 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3573e0bda7d8SBart Van Assche 			cmd_sg_entries);
3574c07d424dSDavid Dillow 		indirect_sg_entries = cmd_sg_entries;
3575c07d424dSDavid Dillow 	}
3576c07d424dSDavid Dillow 
3577bcc05910SBart Van Assche 	srp_remove_wq = create_workqueue("srp_remove");
3578da05be29SWei Yongjun 	if (!srp_remove_wq) {
3579da05be29SWei Yongjun 		ret = -ENOMEM;
3580bcc05910SBart Van Assche 		goto out;
3581bcc05910SBart Van Assche 	}
3582bcc05910SBart Van Assche 
3583bcc05910SBart Van Assche 	ret = -ENOMEM;
35843236822bSFUJITA Tomonori 	ib_srp_transport_template =
35853236822bSFUJITA Tomonori 		srp_attach_transport(&ib_srp_transport_functions);
35863236822bSFUJITA Tomonori 	if (!ib_srp_transport_template)
3587bcc05910SBart Van Assche 		goto destroy_wq;
35883236822bSFUJITA Tomonori 
3589aef9ec39SRoland Dreier 	ret = class_register(&srp_class);
3590aef9ec39SRoland Dreier 	if (ret) {
3591e0bda7d8SBart Van Assche 		pr_err("couldn't register class infiniband_srp\n");
3592bcc05910SBart Van Assche 		goto release_tr;
3593aef9ec39SRoland Dreier 	}
3594aef9ec39SRoland Dreier 
3595c1a0b23bSMichael S. Tsirkin 	ib_sa_register_client(&srp_sa_client);
3596c1a0b23bSMichael S. Tsirkin 
3597aef9ec39SRoland Dreier 	ret = ib_register_client(&srp_client);
3598aef9ec39SRoland Dreier 	if (ret) {
3599e0bda7d8SBart Van Assche 		pr_err("couldn't register IB client\n");
3600bcc05910SBart Van Assche 		goto unreg_sa;
3601aef9ec39SRoland Dreier 	}
3602aef9ec39SRoland Dreier 
3603bcc05910SBart Van Assche out:
3604bcc05910SBart Van Assche 	return ret;
3605bcc05910SBart Van Assche 
3606bcc05910SBart Van Assche unreg_sa:
3607bcc05910SBart Van Assche 	ib_sa_unregister_client(&srp_sa_client);
3608bcc05910SBart Van Assche 	class_unregister(&srp_class);
3609bcc05910SBart Van Assche 
3610bcc05910SBart Van Assche release_tr:
3611bcc05910SBart Van Assche 	srp_release_transport(ib_srp_transport_template);
3612bcc05910SBart Van Assche 
3613bcc05910SBart Van Assche destroy_wq:
3614bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3615bcc05910SBart Van Assche 	goto out;
3616aef9ec39SRoland Dreier }
3617aef9ec39SRoland Dreier 
3618aef9ec39SRoland Dreier static void __exit srp_cleanup_module(void)
3619aef9ec39SRoland Dreier {
3620aef9ec39SRoland Dreier 	ib_unregister_client(&srp_client);
3621c1a0b23bSMichael S. Tsirkin 	ib_sa_unregister_client(&srp_sa_client);
3622aef9ec39SRoland Dreier 	class_unregister(&srp_class);
36233236822bSFUJITA Tomonori 	srp_release_transport(ib_srp_transport_template);
3624bcc05910SBart Van Assche 	destroy_workqueue(srp_remove_wq);
3625aef9ec39SRoland Dreier }
3626aef9ec39SRoland Dreier 
3627aef9ec39SRoland Dreier module_init(srp_init_module);
3628aef9ec39SRoland Dreier module_exit(srp_cleanup_module);
3629