xref: /linux/drivers/infiniband/ulp/srp/ib_srp.c (revision d4a379a52c3c2dc44366c4f6722c063a7d0de179)
1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/module.h>
36 #include <linux/hex.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/err.h>
40 #include <linux/string.h>
41 #include <linux/parser.h>
42 #include <linux/random.h>
43 #include <linux/jiffies.h>
44 #include <linux/lockdep.h>
45 #include <linux/inet.h>
46 #include <rdma/ib_cache.h>
47 
48 #include <linux/atomic.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_dbg.h>
53 #include <scsi/scsi_tcq.h>
54 #include <scsi/srp.h>
55 #include <scsi/scsi_transport_srp.h>
56 
57 #include "ib_srp.h"
58 
59 #define DRV_NAME	"ib_srp"
60 #define PFX		DRV_NAME ": "
61 
62 MODULE_AUTHOR("Roland Dreier");
63 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
64 MODULE_LICENSE("Dual BSD/GPL");
65 
66 static unsigned int srp_sg_tablesize;
67 static unsigned int cmd_sg_entries;
68 static unsigned int indirect_sg_entries;
69 static bool allow_ext_sg;
70 static bool register_always = true;
71 static bool never_register;
72 static int topspin_workarounds = 1;
73 
74 module_param(srp_sg_tablesize, uint, 0444);
75 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
76 
77 module_param(cmd_sg_entries, uint, 0444);
78 MODULE_PARM_DESC(cmd_sg_entries,
79 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
80 
81 module_param(indirect_sg_entries, uint, 0444);
82 MODULE_PARM_DESC(indirect_sg_entries,
83 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
84 
85 module_param(allow_ext_sg, bool, 0444);
86 MODULE_PARM_DESC(allow_ext_sg,
87 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
88 
89 module_param(topspin_workarounds, int, 0444);
90 MODULE_PARM_DESC(topspin_workarounds,
91 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
92 
93 module_param(register_always, bool, 0444);
94 MODULE_PARM_DESC(register_always,
95 		 "Use memory registration even for contiguous memory regions");
96 
97 module_param(never_register, bool, 0444);
98 MODULE_PARM_DESC(never_register, "Never register memory");
99 
100 static const struct kernel_param_ops srp_tmo_ops;
101 
102 static int srp_reconnect_delay = 10;
103 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104 		S_IRUGO | S_IWUSR);
105 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106 
107 static int srp_fast_io_fail_tmo = 15;
108 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109 		S_IRUGO | S_IWUSR);
110 MODULE_PARM_DESC(fast_io_fail_tmo,
111 		 "Number of seconds between the observation of a transport"
112 		 " layer error and failing all I/O. \"off\" means that this"
113 		 " functionality is disabled.");
114 
115 static int srp_dev_loss_tmo = 600;
116 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117 		S_IRUGO | S_IWUSR);
118 MODULE_PARM_DESC(dev_loss_tmo,
119 		 "Maximum number of seconds that the SRP transport should"
120 		 " insulate transport layer errors. After this time has been"
121 		 " exceeded the SCSI host is removed. Should be"
122 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 		 " this functionality is disabled.");
125 
126 static bool srp_use_imm_data = true;
127 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
128 MODULE_PARM_DESC(use_imm_data,
129 		 "Whether or not to request permission to use immediate data during SRP login.");
130 
131 static unsigned int srp_max_imm_data = 8 * 1024;
132 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
133 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
134 
135 static unsigned ch_count;
136 module_param(ch_count, uint, 0444);
137 MODULE_PARM_DESC(ch_count,
138 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
139 
140 static int srp_add_one(struct ib_device *device);
141 static void srp_remove_one(struct ib_device *device, void *client_data);
142 static void srp_rename_dev(struct ib_device *device, void *client_data);
143 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
144 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
145 		const char *opname);
146 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
147 			     const struct ib_cm_event *event);
148 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
149 			       struct rdma_cm_event *event);
150 
151 static struct scsi_transport_template *ib_srp_transport_template;
152 static struct workqueue_struct *srp_remove_wq;
153 
154 static struct ib_client srp_client = {
155 	.name   = "srp",
156 	.add    = srp_add_one,
157 	.remove = srp_remove_one,
158 	.rename = srp_rename_dev
159 };
160 
161 static struct ib_sa_client srp_sa_client;
162 
163 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
164 {
165 	int tmo = *(int *)kp->arg;
166 
167 	if (tmo >= 0)
168 		return sysfs_emit(buffer, "%d\n", tmo);
169 	else
170 		return sysfs_emit(buffer, "off\n");
171 }
172 
173 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
174 {
175 	int tmo, res;
176 
177 	res = srp_parse_tmo(&tmo, val);
178 	if (res)
179 		goto out;
180 
181 	if (kp->arg == &srp_reconnect_delay)
182 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
183 				    srp_dev_loss_tmo);
184 	else if (kp->arg == &srp_fast_io_fail_tmo)
185 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
186 	else
187 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
188 				    tmo);
189 	if (res)
190 		goto out;
191 	*(int *)kp->arg = tmo;
192 
193 out:
194 	return res;
195 }
196 
197 static const struct kernel_param_ops srp_tmo_ops = {
198 	.get = srp_tmo_get,
199 	.set = srp_tmo_set,
200 };
201 
202 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
203 {
204 	return (struct srp_target_port *) host->hostdata;
205 }
206 
207 static const char *srp_target_info(struct Scsi_Host *host)
208 {
209 	return host_to_target(host)->target_name;
210 }
211 
212 static int srp_target_is_topspin(struct srp_target_port *target)
213 {
214 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
215 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
216 
217 	return topspin_workarounds &&
218 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
219 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
220 }
221 
222 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
223 				   gfp_t gfp_mask,
224 				   enum dma_data_direction direction)
225 {
226 	struct srp_iu *iu;
227 
228 	iu = kmalloc(sizeof *iu, gfp_mask);
229 	if (!iu)
230 		goto out;
231 
232 	iu->buf = kzalloc(size, gfp_mask);
233 	if (!iu->buf)
234 		goto out_free_iu;
235 
236 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
237 				    direction);
238 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
239 		goto out_free_buf;
240 
241 	iu->size      = size;
242 	iu->direction = direction;
243 
244 	return iu;
245 
246 out_free_buf:
247 	kfree(iu->buf);
248 out_free_iu:
249 	kfree(iu);
250 out:
251 	return NULL;
252 }
253 
254 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
255 {
256 	if (!iu)
257 		return;
258 
259 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
260 			    iu->direction);
261 	kfree(iu->buf);
262 	kfree(iu);
263 }
264 
265 static void srp_qp_event(struct ib_event *event, void *context)
266 {
267 	pr_debug("QP event %s (%d)\n",
268 		 ib_event_msg(event->event), event->event);
269 }
270 
271 static int srp_init_ib_qp(struct srp_target_port *target,
272 			  struct ib_qp *qp)
273 {
274 	struct ib_qp_attr *attr;
275 	int ret;
276 
277 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
278 	if (!attr)
279 		return -ENOMEM;
280 
281 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
282 				  target->srp_host->port,
283 				  be16_to_cpu(target->ib_cm.pkey),
284 				  &attr->pkey_index);
285 	if (ret)
286 		goto out;
287 
288 	attr->qp_state        = IB_QPS_INIT;
289 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
290 				    IB_ACCESS_REMOTE_WRITE);
291 	attr->port_num        = target->srp_host->port;
292 
293 	ret = ib_modify_qp(qp, attr,
294 			   IB_QP_STATE		|
295 			   IB_QP_PKEY_INDEX	|
296 			   IB_QP_ACCESS_FLAGS	|
297 			   IB_QP_PORT);
298 
299 out:
300 	kfree(attr);
301 	return ret;
302 }
303 
304 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
305 {
306 	struct srp_target_port *target = ch->target;
307 	struct ib_cm_id *new_cm_id;
308 
309 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
310 				    srp_ib_cm_handler, ch);
311 	if (IS_ERR(new_cm_id))
312 		return PTR_ERR(new_cm_id);
313 
314 	if (ch->ib_cm.cm_id)
315 		ib_destroy_cm_id(ch->ib_cm.cm_id);
316 	ch->ib_cm.cm_id = new_cm_id;
317 	if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
318 			    target->srp_host->port))
319 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
320 	else
321 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
322 	ch->ib_cm.path.sgid = target->sgid;
323 	ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
324 	ch->ib_cm.path.pkey = target->ib_cm.pkey;
325 	ch->ib_cm.path.service_id = target->ib_cm.service_id;
326 
327 	return 0;
328 }
329 
330 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
331 {
332 	struct srp_target_port *target = ch->target;
333 	struct rdma_cm_id *new_cm_id;
334 	int ret;
335 
336 	new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
337 				   RDMA_PS_TCP, IB_QPT_RC);
338 	if (IS_ERR(new_cm_id)) {
339 		ret = PTR_ERR(new_cm_id);
340 		new_cm_id = NULL;
341 		goto out;
342 	}
343 
344 	init_completion(&ch->done);
345 	ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
346 				&target->rdma_cm.src.sa : NULL,
347 				&target->rdma_cm.dst.sa,
348 				SRP_PATH_REC_TIMEOUT_MS);
349 	if (ret) {
350 		pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
351 		       &target->rdma_cm.src, &target->rdma_cm.dst, ret);
352 		goto out;
353 	}
354 	ret = wait_for_completion_interruptible(&ch->done);
355 	if (ret < 0)
356 		goto out;
357 
358 	ret = ch->status;
359 	if (ret) {
360 		pr_err("Resolving address %pISpsc failed (%d)\n",
361 		       &target->rdma_cm.dst, ret);
362 		goto out;
363 	}
364 
365 	swap(ch->rdma_cm.cm_id, new_cm_id);
366 
367 out:
368 	if (new_cm_id)
369 		rdma_destroy_id(new_cm_id);
370 
371 	return ret;
372 }
373 
374 static int srp_new_cm_id(struct srp_rdma_ch *ch)
375 {
376 	struct srp_target_port *target = ch->target;
377 
378 	return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
379 		srp_new_ib_cm_id(ch);
380 }
381 
382 /**
383  * srp_destroy_fr_pool() - free the resources owned by a pool
384  * @pool: Fast registration pool to be destroyed.
385  */
386 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
387 {
388 	int i;
389 	struct srp_fr_desc *d;
390 
391 	if (!pool)
392 		return;
393 
394 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
395 		if (d->mr)
396 			ib_dereg_mr(d->mr);
397 	}
398 	kfree(pool);
399 }
400 
401 /**
402  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
403  * @device:            IB device to allocate fast registration descriptors for.
404  * @pd:                Protection domain associated with the FR descriptors.
405  * @pool_size:         Number of descriptors to allocate.
406  * @max_page_list_len: Maximum fast registration work request page list length.
407  */
408 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
409 					      struct ib_pd *pd, int pool_size,
410 					      int max_page_list_len)
411 {
412 	struct srp_fr_pool *pool;
413 	struct srp_fr_desc *d;
414 	struct ib_mr *mr;
415 	int i, ret = -EINVAL;
416 	enum ib_mr_type mr_type;
417 
418 	if (pool_size <= 0)
419 		goto err;
420 	ret = -ENOMEM;
421 	pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
422 	if (!pool)
423 		goto err;
424 	pool->size = pool_size;
425 	pool->max_page_list_len = max_page_list_len;
426 	spin_lock_init(&pool->lock);
427 	INIT_LIST_HEAD(&pool->free_list);
428 
429 	if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
430 		mr_type = IB_MR_TYPE_SG_GAPS;
431 	else
432 		mr_type = IB_MR_TYPE_MEM_REG;
433 
434 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
435 		mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
436 		if (IS_ERR(mr)) {
437 			ret = PTR_ERR(mr);
438 			if (ret == -ENOMEM)
439 				pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
440 					dev_name(&device->dev));
441 			goto destroy_pool;
442 		}
443 		d->mr = mr;
444 		list_add_tail(&d->entry, &pool->free_list);
445 	}
446 
447 out:
448 	return pool;
449 
450 destroy_pool:
451 	srp_destroy_fr_pool(pool);
452 
453 err:
454 	pool = ERR_PTR(ret);
455 	goto out;
456 }
457 
458 /**
459  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
460  * @pool: Pool to obtain descriptor from.
461  */
462 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
463 {
464 	struct srp_fr_desc *d = NULL;
465 	unsigned long flags;
466 
467 	spin_lock_irqsave(&pool->lock, flags);
468 	if (!list_empty(&pool->free_list)) {
469 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
470 		list_del(&d->entry);
471 	}
472 	spin_unlock_irqrestore(&pool->lock, flags);
473 
474 	return d;
475 }
476 
477 /**
478  * srp_fr_pool_put() - put an FR descriptor back in the free list
479  * @pool: Pool the descriptor was allocated from.
480  * @desc: Pointer to an array of fast registration descriptor pointers.
481  * @n:    Number of descriptors to put back.
482  *
483  * Note: The caller must already have queued an invalidation request for
484  * desc->mr->rkey before calling this function.
485  */
486 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
487 			    int n)
488 {
489 	unsigned long flags;
490 	int i;
491 
492 	spin_lock_irqsave(&pool->lock, flags);
493 	for (i = 0; i < n; i++)
494 		list_add(&desc[i]->entry, &pool->free_list);
495 	spin_unlock_irqrestore(&pool->lock, flags);
496 }
497 
498 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
499 {
500 	struct srp_device *dev = target->srp_host->srp_dev;
501 
502 	return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
503 				  dev->max_pages_per_mr);
504 }
505 
506 /**
507  * srp_destroy_qp() - destroy an RDMA queue pair
508  * @ch: SRP RDMA channel.
509  *
510  * Drain the qp before destroying it.  This avoids that the receive
511  * completion handler can access the queue pair while it is
512  * being destroyed.
513  */
514 static void srp_destroy_qp(struct srp_rdma_ch *ch)
515 {
516 	spin_lock_irq(&ch->lock);
517 	ib_process_cq_direct(ch->send_cq, -1);
518 	spin_unlock_irq(&ch->lock);
519 
520 	ib_drain_qp(ch->qp);
521 	ib_destroy_qp(ch->qp);
522 }
523 
524 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
525 {
526 	struct srp_target_port *target = ch->target;
527 	struct srp_device *dev = target->srp_host->srp_dev;
528 	const struct ib_device_attr *attr = &dev->dev->attrs;
529 	struct ib_qp_init_attr *init_attr;
530 	struct ib_cq *recv_cq, *send_cq;
531 	struct ib_qp *qp;
532 	struct srp_fr_pool *fr_pool = NULL;
533 	const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
534 	int ret;
535 
536 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
537 	if (!init_attr)
538 		return -ENOMEM;
539 
540 	/* queue_size + 1 for ib_drain_rq() */
541 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
542 				ch->comp_vector, IB_POLL_SOFTIRQ);
543 	if (IS_ERR(recv_cq)) {
544 		ret = PTR_ERR(recv_cq);
545 		goto err;
546 	}
547 
548 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
549 				ch->comp_vector, IB_POLL_DIRECT);
550 	if (IS_ERR(send_cq)) {
551 		ret = PTR_ERR(send_cq);
552 		goto err_recv_cq;
553 	}
554 
555 	init_attr->event_handler       = srp_qp_event;
556 	init_attr->cap.max_send_wr     = m * target->queue_size;
557 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
558 	init_attr->cap.max_recv_sge    = 1;
559 	init_attr->cap.max_send_sge    = min(SRP_MAX_SGE, attr->max_send_sge);
560 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
561 	init_attr->qp_type             = IB_QPT_RC;
562 	init_attr->send_cq             = send_cq;
563 	init_attr->recv_cq             = recv_cq;
564 
565 	ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
566 
567 	if (target->using_rdma_cm) {
568 		ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
569 		qp = ch->rdma_cm.cm_id->qp;
570 	} else {
571 		qp = ib_create_qp(dev->pd, init_attr);
572 		if (!IS_ERR(qp)) {
573 			ret = srp_init_ib_qp(target, qp);
574 			if (ret)
575 				ib_destroy_qp(qp);
576 		} else {
577 			ret = PTR_ERR(qp);
578 		}
579 	}
580 	if (ret) {
581 		pr_err("QP creation failed for dev %s: %d\n",
582 		       dev_name(&dev->dev->dev), ret);
583 		goto err_send_cq;
584 	}
585 
586 	if (dev->use_fast_reg) {
587 		fr_pool = srp_alloc_fr_pool(target);
588 		if (IS_ERR(fr_pool)) {
589 			ret = PTR_ERR(fr_pool);
590 			shost_printk(KERN_WARNING, target->scsi_host, PFX
591 				     "FR pool allocation failed (%d)\n", ret);
592 			goto err_qp;
593 		}
594 	}
595 
596 	if (ch->qp)
597 		srp_destroy_qp(ch);
598 	if (ch->recv_cq)
599 		ib_free_cq(ch->recv_cq);
600 	if (ch->send_cq)
601 		ib_free_cq(ch->send_cq);
602 
603 	ch->qp = qp;
604 	ch->recv_cq = recv_cq;
605 	ch->send_cq = send_cq;
606 
607 	if (dev->use_fast_reg) {
608 		if (ch->fr_pool)
609 			srp_destroy_fr_pool(ch->fr_pool);
610 		ch->fr_pool = fr_pool;
611 	}
612 
613 	kfree(init_attr);
614 	return 0;
615 
616 err_qp:
617 	if (target->using_rdma_cm)
618 		rdma_destroy_qp(ch->rdma_cm.cm_id);
619 	else
620 		ib_destroy_qp(qp);
621 
622 err_send_cq:
623 	ib_free_cq(send_cq);
624 
625 err_recv_cq:
626 	ib_free_cq(recv_cq);
627 
628 err:
629 	kfree(init_attr);
630 	return ret;
631 }
632 
633 /*
634  * Note: this function may be called without srp_alloc_iu_bufs() having been
635  * invoked. Hence the ch->[rt]x_ring checks.
636  */
637 static void srp_free_ch_ib(struct srp_target_port *target,
638 			   struct srp_rdma_ch *ch)
639 {
640 	struct srp_device *dev = target->srp_host->srp_dev;
641 	int i;
642 
643 	if (!ch->target)
644 		return;
645 
646 	if (target->using_rdma_cm) {
647 		if (ch->rdma_cm.cm_id) {
648 			rdma_destroy_id(ch->rdma_cm.cm_id);
649 			ch->rdma_cm.cm_id = NULL;
650 		}
651 	} else {
652 		if (ch->ib_cm.cm_id) {
653 			ib_destroy_cm_id(ch->ib_cm.cm_id);
654 			ch->ib_cm.cm_id = NULL;
655 		}
656 	}
657 
658 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
659 	if (!ch->qp)
660 		return;
661 
662 	if (dev->use_fast_reg) {
663 		if (ch->fr_pool)
664 			srp_destroy_fr_pool(ch->fr_pool);
665 	}
666 
667 	srp_destroy_qp(ch);
668 	ib_free_cq(ch->send_cq);
669 	ib_free_cq(ch->recv_cq);
670 
671 	/*
672 	 * Avoid that the SCSI error handler tries to use this channel after
673 	 * it has been freed. The SCSI error handler can namely continue
674 	 * trying to perform recovery actions after scsi_remove_host()
675 	 * returned.
676 	 */
677 	ch->target = NULL;
678 
679 	ch->qp = NULL;
680 	ch->send_cq = ch->recv_cq = NULL;
681 
682 	if (ch->rx_ring) {
683 		for (i = 0; i < target->queue_size; ++i)
684 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
685 		kfree(ch->rx_ring);
686 		ch->rx_ring = NULL;
687 	}
688 	if (ch->tx_ring) {
689 		for (i = 0; i < target->queue_size; ++i)
690 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
691 		kfree(ch->tx_ring);
692 		ch->tx_ring = NULL;
693 	}
694 }
695 
696 static void srp_path_rec_completion(int status,
697 				    struct sa_path_rec *pathrec,
698 				    unsigned int num_paths, void *ch_ptr)
699 {
700 	struct srp_rdma_ch *ch = ch_ptr;
701 	struct srp_target_port *target = ch->target;
702 
703 	ch->status = status;
704 	if (status)
705 		shost_printk(KERN_ERR, target->scsi_host,
706 			     PFX "Got failed path rec status %d\n", status);
707 	else
708 		ch->ib_cm.path = *pathrec;
709 	complete(&ch->done);
710 }
711 
712 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
713 {
714 	struct srp_target_port *target = ch->target;
715 	int ret;
716 
717 	ch->ib_cm.path.numb_path = 1;
718 
719 	init_completion(&ch->done);
720 
721 	ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
722 					       target->srp_host->srp_dev->dev,
723 					       target->srp_host->port,
724 					       &ch->ib_cm.path,
725 					       IB_SA_PATH_REC_SERVICE_ID |
726 					       IB_SA_PATH_REC_DGID	 |
727 					       IB_SA_PATH_REC_SGID	 |
728 					       IB_SA_PATH_REC_NUMB_PATH	 |
729 					       IB_SA_PATH_REC_PKEY,
730 					       SRP_PATH_REC_TIMEOUT_MS,
731 					       GFP_KERNEL,
732 					       srp_path_rec_completion,
733 					       ch, &ch->ib_cm.path_query);
734 	if (ch->ib_cm.path_query_id < 0)
735 		return ch->ib_cm.path_query_id;
736 
737 	ret = wait_for_completion_interruptible(&ch->done);
738 	if (ret < 0)
739 		return ret;
740 
741 	if (ch->status < 0)
742 		shost_printk(KERN_WARNING, target->scsi_host,
743 			     PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
744 			     ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
745 			     be16_to_cpu(target->ib_cm.pkey),
746 			     be64_to_cpu(target->ib_cm.service_id));
747 
748 	return ch->status;
749 }
750 
751 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
752 {
753 	struct srp_target_port *target = ch->target;
754 	int ret;
755 
756 	init_completion(&ch->done);
757 
758 	ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
759 	if (ret)
760 		return ret;
761 
762 	wait_for_completion_interruptible(&ch->done);
763 
764 	if (ch->status != 0)
765 		shost_printk(KERN_WARNING, target->scsi_host,
766 			     PFX "Path resolution failed\n");
767 
768 	return ch->status;
769 }
770 
771 static int srp_lookup_path(struct srp_rdma_ch *ch)
772 {
773 	struct srp_target_port *target = ch->target;
774 
775 	return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
776 		srp_ib_lookup_path(ch);
777 }
778 
779 static u8 srp_get_subnet_timeout(struct srp_host *host)
780 {
781 	struct ib_port_attr attr;
782 	int ret;
783 	u8 subnet_timeout = 18;
784 
785 	ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
786 	if (ret == 0)
787 		subnet_timeout = attr.subnet_timeout;
788 
789 	if (unlikely(subnet_timeout < 15))
790 		pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
791 			dev_name(&host->srp_dev->dev->dev), subnet_timeout);
792 
793 	return subnet_timeout;
794 }
795 
796 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
797 			bool multich)
798 {
799 	struct srp_target_port *target = ch->target;
800 	struct {
801 		struct rdma_conn_param	  rdma_param;
802 		struct srp_login_req_rdma rdma_req;
803 		struct ib_cm_req_param	  ib_param;
804 		struct srp_login_req	  ib_req;
805 	} *req = NULL;
806 	char *ipi, *tpi;
807 	int status;
808 
809 	req = kzalloc(sizeof *req, GFP_KERNEL);
810 	if (!req)
811 		return -ENOMEM;
812 
813 	req->ib_param.flow_control = 1;
814 	req->ib_param.retry_count = target->tl_retry_count;
815 
816 	/*
817 	 * Pick some arbitrary defaults here; we could make these
818 	 * module parameters if anyone cared about setting them.
819 	 */
820 	req->ib_param.responder_resources = 4;
821 	req->ib_param.rnr_retry_count = 7;
822 	req->ib_param.max_cm_retries = 15;
823 
824 	req->ib_req.opcode = SRP_LOGIN_REQ;
825 	req->ib_req.tag = 0;
826 	req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
827 	req->ib_req.req_buf_fmt	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
828 					      SRP_BUF_FORMAT_INDIRECT);
829 	req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
830 				 SRP_MULTICHAN_SINGLE);
831 	if (srp_use_imm_data) {
832 		req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
833 		req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
834 	}
835 
836 	if (target->using_rdma_cm) {
837 		req->rdma_param.flow_control = req->ib_param.flow_control;
838 		req->rdma_param.responder_resources =
839 			req->ib_param.responder_resources;
840 		req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
841 		req->rdma_param.retry_count = req->ib_param.retry_count;
842 		req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
843 		req->rdma_param.private_data = &req->rdma_req;
844 		req->rdma_param.private_data_len = sizeof(req->rdma_req);
845 
846 		req->rdma_req.opcode = req->ib_req.opcode;
847 		req->rdma_req.tag = req->ib_req.tag;
848 		req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
849 		req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
850 		req->rdma_req.req_flags	= req->ib_req.req_flags;
851 		req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
852 
853 		ipi = req->rdma_req.initiator_port_id;
854 		tpi = req->rdma_req.target_port_id;
855 	} else {
856 		u8 subnet_timeout;
857 
858 		subnet_timeout = srp_get_subnet_timeout(target->srp_host);
859 
860 		req->ib_param.primary_path = &ch->ib_cm.path;
861 		req->ib_param.alternate_path = NULL;
862 		req->ib_param.service_id = target->ib_cm.service_id;
863 		get_random_bytes(&req->ib_param.starting_psn, 4);
864 		req->ib_param.starting_psn &= 0xffffff;
865 		req->ib_param.qp_num = ch->qp->qp_num;
866 		req->ib_param.qp_type = ch->qp->qp_type;
867 		req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
868 		req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
869 		req->ib_param.private_data = &req->ib_req;
870 		req->ib_param.private_data_len = sizeof(req->ib_req);
871 
872 		ipi = req->ib_req.initiator_port_id;
873 		tpi = req->ib_req.target_port_id;
874 	}
875 
876 	/*
877 	 * In the published SRP specification (draft rev. 16a), the
878 	 * port identifier format is 8 bytes of ID extension followed
879 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
880 	 * opposite order, so that the GUID comes first.
881 	 *
882 	 * Targets conforming to these obsolete drafts can be
883 	 * recognized by the I/O Class they report.
884 	 */
885 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
886 		memcpy(ipi,     &target->sgid.global.interface_id, 8);
887 		memcpy(ipi + 8, &target->initiator_ext, 8);
888 		memcpy(tpi,     &target->ioc_guid, 8);
889 		memcpy(tpi + 8, &target->id_ext, 8);
890 	} else {
891 		memcpy(ipi,     &target->initiator_ext, 8);
892 		memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
893 		memcpy(tpi,     &target->id_ext, 8);
894 		memcpy(tpi + 8, &target->ioc_guid, 8);
895 	}
896 
897 	/*
898 	 * Topspin/Cisco SRP targets will reject our login unless we
899 	 * zero out the first 8 bytes of our initiator port ID and set
900 	 * the second 8 bytes to the local node GUID.
901 	 */
902 	if (srp_target_is_topspin(target)) {
903 		shost_printk(KERN_DEBUG, target->scsi_host,
904 			     PFX "Topspin/Cisco initiator port ID workaround "
905 			     "activated for target GUID %016llx\n",
906 			     be64_to_cpu(target->ioc_guid));
907 		memset(ipi, 0, 8);
908 		memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
909 	}
910 
911 	if (target->using_rdma_cm)
912 		status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
913 	else
914 		status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
915 
916 	kfree(req);
917 
918 	return status;
919 }
920 
921 static bool srp_queue_remove_work(struct srp_target_port *target)
922 {
923 	bool changed = false;
924 
925 	spin_lock_irq(&target->lock);
926 	if (target->state != SRP_TARGET_REMOVED) {
927 		target->state = SRP_TARGET_REMOVED;
928 		changed = true;
929 	}
930 	spin_unlock_irq(&target->lock);
931 
932 	if (changed)
933 		queue_work(srp_remove_wq, &target->remove_work);
934 
935 	return changed;
936 }
937 
938 static void srp_disconnect_target(struct srp_target_port *target)
939 {
940 	struct srp_rdma_ch *ch;
941 	int i, ret;
942 
943 	/* XXX should send SRP_I_LOGOUT request */
944 
945 	for (i = 0; i < target->ch_count; i++) {
946 		ch = &target->ch[i];
947 		ch->connected = false;
948 		ret = 0;
949 		if (target->using_rdma_cm) {
950 			if (ch->rdma_cm.cm_id)
951 				rdma_disconnect(ch->rdma_cm.cm_id);
952 		} else {
953 			if (ch->ib_cm.cm_id)
954 				ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
955 						      NULL, 0);
956 		}
957 		if (ret < 0) {
958 			shost_printk(KERN_DEBUG, target->scsi_host,
959 				     PFX "Sending CM DREQ failed\n");
960 		}
961 	}
962 }
963 
964 static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
965 {
966 	struct srp_target_port *target = host_to_target(shost);
967 	struct srp_device *dev = target->srp_host->srp_dev;
968 	struct ib_device *ibdev = dev->dev;
969 	struct srp_request *req = scsi_cmd_priv(cmd);
970 
971 	kfree(req->fr_list);
972 	if (req->indirect_dma_addr) {
973 		ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
974 				    target->indirect_size,
975 				    DMA_TO_DEVICE);
976 	}
977 	kfree(req->indirect_desc);
978 
979 	return 0;
980 }
981 
982 static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
983 {
984 	struct srp_target_port *target = host_to_target(shost);
985 	struct srp_device *srp_dev = target->srp_host->srp_dev;
986 	struct ib_device *ibdev = srp_dev->dev;
987 	struct srp_request *req = scsi_cmd_priv(cmd);
988 	dma_addr_t dma_addr;
989 	int ret = -ENOMEM;
990 
991 	if (srp_dev->use_fast_reg) {
992 		req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
993 					GFP_KERNEL);
994 		if (!req->fr_list)
995 			goto out;
996 	}
997 	req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
998 	if (!req->indirect_desc)
999 		goto out;
1000 
1001 	dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1002 				     target->indirect_size,
1003 				     DMA_TO_DEVICE);
1004 	if (ib_dma_mapping_error(ibdev, dma_addr)) {
1005 		srp_exit_cmd_priv(shost, cmd);
1006 		goto out;
1007 	}
1008 
1009 	req->indirect_dma_addr = dma_addr;
1010 	ret = 0;
1011 
1012 out:
1013 	return ret;
1014 }
1015 
1016 /**
1017  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1018  * @shost: SCSI host whose attributes to remove from sysfs.
1019  *
1020  * Note: Any attributes defined in the host template and that did not exist
1021  * before invocation of this function will be ignored.
1022  */
1023 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1024 {
1025 	const struct attribute_group **g;
1026 	struct attribute **attr;
1027 
1028 	for (g = shost->hostt->shost_groups; *g; ++g) {
1029 		for (attr = (*g)->attrs; *attr; ++attr) {
1030 			struct device_attribute *dev_attr =
1031 				container_of(*attr, typeof(*dev_attr), attr);
1032 
1033 			device_remove_file(&shost->shost_dev, dev_attr);
1034 		}
1035 	}
1036 }
1037 
1038 static void srp_remove_target(struct srp_target_port *target)
1039 {
1040 	struct srp_rdma_ch *ch;
1041 	int i;
1042 
1043 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1044 
1045 	srp_del_scsi_host_attr(target->scsi_host);
1046 	srp_rport_get(target->rport);
1047 	srp_remove_host(target->scsi_host);
1048 	scsi_remove_host(target->scsi_host);
1049 	srp_stop_rport_timers(target->rport);
1050 	srp_disconnect_target(target);
1051 	kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1052 	for (i = 0; i < target->ch_count; i++) {
1053 		ch = &target->ch[i];
1054 		srp_free_ch_ib(target, ch);
1055 	}
1056 	cancel_work_sync(&target->tl_err_work);
1057 	srp_rport_put(target->rport);
1058 	kfree(target->ch);
1059 	target->ch = NULL;
1060 
1061 	spin_lock(&target->srp_host->target_lock);
1062 	list_del(&target->list);
1063 	spin_unlock(&target->srp_host->target_lock);
1064 
1065 	scsi_host_put(target->scsi_host);
1066 }
1067 
1068 static void srp_remove_work(struct work_struct *work)
1069 {
1070 	struct srp_target_port *target =
1071 		container_of(work, struct srp_target_port, remove_work);
1072 
1073 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1074 
1075 	srp_remove_target(target);
1076 }
1077 
1078 static void srp_rport_delete(struct srp_rport *rport)
1079 {
1080 	struct srp_target_port *target = rport->lld_data;
1081 
1082 	srp_queue_remove_work(target);
1083 }
1084 
1085 /**
1086  * srp_connected_ch() - number of connected channels
1087  * @target: SRP target port.
1088  */
1089 static int srp_connected_ch(struct srp_target_port *target)
1090 {
1091 	int i, c = 0;
1092 
1093 	for (i = 0; i < target->ch_count; i++)
1094 		c += target->ch[i].connected;
1095 
1096 	return c;
1097 }
1098 
1099 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1100 			  bool multich)
1101 {
1102 	struct srp_target_port *target = ch->target;
1103 	int ret;
1104 
1105 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1106 
1107 	ret = srp_lookup_path(ch);
1108 	if (ret)
1109 		goto out;
1110 
1111 	while (1) {
1112 		init_completion(&ch->done);
1113 		ret = srp_send_req(ch, max_iu_len, multich);
1114 		if (ret)
1115 			goto out;
1116 		ret = wait_for_completion_interruptible(&ch->done);
1117 		if (ret < 0)
1118 			goto out;
1119 
1120 		/*
1121 		 * The CM event handling code will set status to
1122 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
1123 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1124 		 * redirect REJ back.
1125 		 */
1126 		ret = ch->status;
1127 		switch (ret) {
1128 		case 0:
1129 			ch->connected = true;
1130 			goto out;
1131 
1132 		case SRP_PORT_REDIRECT:
1133 			ret = srp_lookup_path(ch);
1134 			if (ret)
1135 				goto out;
1136 			break;
1137 
1138 		case SRP_DLID_REDIRECT:
1139 			break;
1140 
1141 		case SRP_STALE_CONN:
1142 			shost_printk(KERN_ERR, target->scsi_host, PFX
1143 				     "giving up on stale connection\n");
1144 			ret = -ECONNRESET;
1145 			goto out;
1146 
1147 		default:
1148 			goto out;
1149 		}
1150 	}
1151 
1152 out:
1153 	return ret <= 0 ? ret : -ENODEV;
1154 }
1155 
1156 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1157 {
1158 	srp_handle_qp_err(cq, wc, "INV RKEY");
1159 }
1160 
1161 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1162 		u32 rkey)
1163 {
1164 	struct ib_send_wr wr = {
1165 		.opcode		    = IB_WR_LOCAL_INV,
1166 		.next		    = NULL,
1167 		.num_sge	    = 0,
1168 		.send_flags	    = 0,
1169 		.ex.invalidate_rkey = rkey,
1170 	};
1171 
1172 	wr.wr_cqe = &req->reg_cqe;
1173 	req->reg_cqe.done = srp_inv_rkey_err_done;
1174 	return ib_post_send(ch->qp, &wr, NULL);
1175 }
1176 
1177 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1178 			   struct srp_rdma_ch *ch,
1179 			   struct srp_request *req)
1180 {
1181 	struct srp_target_port *target = ch->target;
1182 	struct srp_device *dev = target->srp_host->srp_dev;
1183 	struct ib_device *ibdev = dev->dev;
1184 	int i, res;
1185 
1186 	if (!scsi_sglist(scmnd) ||
1187 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1188 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1189 		return;
1190 
1191 	if (dev->use_fast_reg) {
1192 		struct srp_fr_desc **pfr;
1193 
1194 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1195 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1196 			if (res < 0) {
1197 				shost_printk(KERN_ERR, target->scsi_host, PFX
1198 				  "Queueing INV WR for rkey %#x failed (%d)\n",
1199 				  (*pfr)->mr->rkey, res);
1200 				queue_work(system_long_wq,
1201 					   &target->tl_err_work);
1202 			}
1203 		}
1204 		if (req->nmdesc)
1205 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
1206 					req->nmdesc);
1207 	}
1208 
1209 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1210 			scmnd->sc_data_direction);
1211 }
1212 
1213 /**
1214  * srp_claim_req - Take ownership of the scmnd associated with a request.
1215  * @ch: SRP RDMA channel.
1216  * @req: SRP request.
1217  * @sdev: If not NULL, only take ownership for this SCSI device.
1218  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1219  *         ownership of @req->scmnd if it equals @scmnd.
1220  *
1221  * Return value:
1222  * Either NULL or a pointer to the SCSI command the caller became owner of.
1223  */
1224 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1225 				       struct srp_request *req,
1226 				       struct scsi_device *sdev,
1227 				       struct scsi_cmnd *scmnd)
1228 {
1229 	unsigned long flags;
1230 
1231 	spin_lock_irqsave(&ch->lock, flags);
1232 	if (req->scmnd &&
1233 	    (!sdev || req->scmnd->device == sdev) &&
1234 	    (!scmnd || req->scmnd == scmnd)) {
1235 		scmnd = req->scmnd;
1236 		req->scmnd = NULL;
1237 	} else {
1238 		scmnd = NULL;
1239 	}
1240 	spin_unlock_irqrestore(&ch->lock, flags);
1241 
1242 	return scmnd;
1243 }
1244 
1245 /**
1246  * srp_free_req() - Unmap data and adjust ch->req_lim.
1247  * @ch:     SRP RDMA channel.
1248  * @req:    Request to be freed.
1249  * @scmnd:  SCSI command associated with @req.
1250  * @req_lim_delta: Amount to be added to @target->req_lim.
1251  */
1252 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1253 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1254 {
1255 	unsigned long flags;
1256 
1257 	srp_unmap_data(scmnd, ch, req);
1258 
1259 	spin_lock_irqsave(&ch->lock, flags);
1260 	ch->req_lim += req_lim_delta;
1261 	spin_unlock_irqrestore(&ch->lock, flags);
1262 }
1263 
1264 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1265 			   struct scsi_device *sdev, int result)
1266 {
1267 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1268 
1269 	if (scmnd) {
1270 		srp_free_req(ch, req, scmnd, 0);
1271 		scmnd->result = result;
1272 		scsi_done(scmnd);
1273 	}
1274 }
1275 
1276 struct srp_terminate_context {
1277 	struct srp_target_port *srp_target;
1278 	int scsi_result;
1279 };
1280 
1281 static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
1282 {
1283 	struct srp_terminate_context *context = context_ptr;
1284 	struct srp_target_port *target = context->srp_target;
1285 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
1286 	struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1287 	struct srp_request *req = scsi_cmd_priv(scmnd);
1288 
1289 	srp_finish_req(ch, req, NULL, context->scsi_result);
1290 
1291 	return true;
1292 }
1293 
1294 static void srp_terminate_io(struct srp_rport *rport)
1295 {
1296 	struct srp_target_port *target = rport->lld_data;
1297 	struct srp_terminate_context context = { .srp_target = target,
1298 		.scsi_result = DID_TRANSPORT_FAILFAST << 16 };
1299 
1300 	scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
1301 }
1302 
1303 /* Calculate maximum initiator to target information unit length. */
1304 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1305 				  uint32_t max_it_iu_size)
1306 {
1307 	uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1308 		sizeof(struct srp_indirect_buf) +
1309 		cmd_sg_cnt * sizeof(struct srp_direct_buf);
1310 
1311 	if (use_imm_data)
1312 		max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1313 				 srp_max_imm_data);
1314 
1315 	if (max_it_iu_size)
1316 		max_iu_len = min(max_iu_len, max_it_iu_size);
1317 
1318 	pr_debug("max_iu_len = %d\n", max_iu_len);
1319 
1320 	return max_iu_len;
1321 }
1322 
1323 /*
1324  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1325  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1326  * srp_reset_device() or srp_reset_host() calls will occur while this function
1327  * is in progress. One way to realize that is not to call this function
1328  * directly but to call srp_reconnect_rport() instead since that last function
1329  * serializes calls of this function via rport->mutex and also blocks
1330  * srp_queuecommand() calls before invoking this function.
1331  */
1332 static int srp_rport_reconnect(struct srp_rport *rport)
1333 {
1334 	struct srp_target_port *target = rport->lld_data;
1335 	struct srp_rdma_ch *ch;
1336 	uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1337 						srp_use_imm_data,
1338 						target->max_it_iu_size);
1339 	int i, j, ret = 0;
1340 	bool multich = false;
1341 
1342 	srp_disconnect_target(target);
1343 
1344 	if (target->state == SRP_TARGET_SCANNING)
1345 		return -ENODEV;
1346 
1347 	/*
1348 	 * Now get a new local CM ID so that we avoid confusing the target in
1349 	 * case things are really fouled up. Doing so also ensures that all CM
1350 	 * callbacks will have finished before a new QP is allocated.
1351 	 */
1352 	for (i = 0; i < target->ch_count; i++) {
1353 		ch = &target->ch[i];
1354 		ret += srp_new_cm_id(ch);
1355 	}
1356 	{
1357 		struct srp_terminate_context context = {
1358 			.srp_target = target, .scsi_result = DID_RESET << 16};
1359 
1360 		scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
1361 				    &context);
1362 	}
1363 	for (i = 0; i < target->ch_count; i++) {
1364 		ch = &target->ch[i];
1365 		/*
1366 		 * Whether or not creating a new CM ID succeeded, create a new
1367 		 * QP. This guarantees that all completion callback function
1368 		 * invocations have finished before request resetting starts.
1369 		 */
1370 		ret += srp_create_ch_ib(ch);
1371 
1372 		INIT_LIST_HEAD(&ch->free_tx);
1373 		for (j = 0; j < target->queue_size; ++j)
1374 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1375 	}
1376 
1377 	target->qp_in_error = false;
1378 
1379 	for (i = 0; i < target->ch_count; i++) {
1380 		ch = &target->ch[i];
1381 		if (ret)
1382 			break;
1383 		ret = srp_connect_ch(ch, max_iu_len, multich);
1384 		multich = true;
1385 	}
1386 
1387 	if (ret == 0)
1388 		shost_printk(KERN_INFO, target->scsi_host,
1389 			     PFX "reconnect succeeded\n");
1390 
1391 	return ret;
1392 }
1393 
1394 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1395 			 unsigned int dma_len, u32 rkey)
1396 {
1397 	struct srp_direct_buf *desc = state->desc;
1398 
1399 	WARN_ON_ONCE(!dma_len);
1400 
1401 	desc->va = cpu_to_be64(dma_addr);
1402 	desc->key = cpu_to_be32(rkey);
1403 	desc->len = cpu_to_be32(dma_len);
1404 
1405 	state->total_len += dma_len;
1406 	state->desc++;
1407 	state->ndesc++;
1408 }
1409 
1410 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1411 {
1412 	srp_handle_qp_err(cq, wc, "FAST REG");
1413 }
1414 
1415 /*
1416  * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1417  * where to start in the first element. If sg_offset_p != NULL then
1418  * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1419  * byte that has not yet been mapped.
1420  */
1421 static int srp_map_finish_fr(struct srp_map_state *state,
1422 			     struct srp_request *req,
1423 			     struct srp_rdma_ch *ch, int sg_nents,
1424 			     unsigned int *sg_offset_p)
1425 {
1426 	struct srp_target_port *target = ch->target;
1427 	struct srp_device *dev = target->srp_host->srp_dev;
1428 	struct ib_reg_wr wr;
1429 	struct srp_fr_desc *desc;
1430 	u32 rkey;
1431 	int n, err;
1432 
1433 	if (state->fr.next >= state->fr.end) {
1434 		shost_printk(KERN_ERR, ch->target->scsi_host,
1435 			     PFX "Out of MRs (mr_per_cmd = %d)\n",
1436 			     ch->target->mr_per_cmd);
1437 		return -ENOMEM;
1438 	}
1439 
1440 	WARN_ON_ONCE(!dev->use_fast_reg);
1441 
1442 	if (sg_nents == 1 && target->global_rkey) {
1443 		unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1444 
1445 		srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1446 			     sg_dma_len(state->sg) - sg_offset,
1447 			     target->global_rkey);
1448 		if (sg_offset_p)
1449 			*sg_offset_p = 0;
1450 		return 1;
1451 	}
1452 
1453 	desc = srp_fr_pool_get(ch->fr_pool);
1454 	if (!desc)
1455 		return -ENOMEM;
1456 
1457 	rkey = ib_inc_rkey(desc->mr->rkey);
1458 	ib_update_fast_reg_key(desc->mr, rkey);
1459 
1460 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1461 			 dev->mr_page_size);
1462 	if (unlikely(n < 0)) {
1463 		srp_fr_pool_put(ch->fr_pool, &desc, 1);
1464 		pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1465 			 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1466 			 sg_offset_p ? *sg_offset_p : -1, n);
1467 		return n;
1468 	}
1469 
1470 	WARN_ON_ONCE(desc->mr->length == 0);
1471 
1472 	req->reg_cqe.done = srp_reg_mr_err_done;
1473 
1474 	wr.wr.next = NULL;
1475 	wr.wr.opcode = IB_WR_REG_MR;
1476 	wr.wr.wr_cqe = &req->reg_cqe;
1477 	wr.wr.num_sge = 0;
1478 	wr.wr.send_flags = 0;
1479 	wr.mr = desc->mr;
1480 	wr.key = desc->mr->rkey;
1481 	wr.access = (IB_ACCESS_LOCAL_WRITE |
1482 		     IB_ACCESS_REMOTE_READ |
1483 		     IB_ACCESS_REMOTE_WRITE);
1484 
1485 	*state->fr.next++ = desc;
1486 	state->nmdesc++;
1487 
1488 	srp_map_desc(state, desc->mr->iova,
1489 		     desc->mr->length, desc->mr->rkey);
1490 
1491 	err = ib_post_send(ch->qp, &wr.wr, NULL);
1492 	if (unlikely(err)) {
1493 		WARN_ON_ONCE(err == -ENOMEM);
1494 		return err;
1495 	}
1496 
1497 	return n;
1498 }
1499 
1500 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1501 			 struct srp_request *req, struct scatterlist *scat,
1502 			 int count)
1503 {
1504 	unsigned int sg_offset = 0;
1505 
1506 	state->fr.next = req->fr_list;
1507 	state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1508 	state->sg = scat;
1509 
1510 	if (count == 0)
1511 		return 0;
1512 
1513 	while (count) {
1514 		int i, n;
1515 
1516 		n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1517 		if (unlikely(n < 0))
1518 			return n;
1519 
1520 		count -= n;
1521 		for (i = 0; i < n; i++)
1522 			state->sg = sg_next(state->sg);
1523 	}
1524 
1525 	return 0;
1526 }
1527 
1528 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1529 			  struct srp_request *req, struct scatterlist *scat,
1530 			  int count)
1531 {
1532 	struct srp_target_port *target = ch->target;
1533 	struct scatterlist *sg;
1534 	int i;
1535 
1536 	for_each_sg(scat, sg, count, i) {
1537 		srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1538 			     target->global_rkey);
1539 	}
1540 
1541 	return 0;
1542 }
1543 
1544 /*
1545  * Register the indirect data buffer descriptor with the HCA.
1546  *
1547  * Note: since the indirect data buffer descriptor has been allocated with
1548  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1549  * memory buffer.
1550  */
1551 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1552 		       void **next_mr, void **end_mr, u32 idb_len,
1553 		       __be32 *idb_rkey)
1554 {
1555 	struct srp_target_port *target = ch->target;
1556 	struct srp_device *dev = target->srp_host->srp_dev;
1557 	struct srp_map_state state;
1558 	struct srp_direct_buf idb_desc;
1559 	struct scatterlist idb_sg[1];
1560 	int ret;
1561 
1562 	memset(&state, 0, sizeof(state));
1563 	memset(&idb_desc, 0, sizeof(idb_desc));
1564 	state.gen.next = next_mr;
1565 	state.gen.end = end_mr;
1566 	state.desc = &idb_desc;
1567 	state.base_dma_addr = req->indirect_dma_addr;
1568 	state.dma_len = idb_len;
1569 
1570 	if (dev->use_fast_reg) {
1571 		state.sg = idb_sg;
1572 		sg_init_one(idb_sg, req->indirect_desc, idb_len);
1573 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1574 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1575 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1576 #endif
1577 		ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1578 		if (ret < 0)
1579 			return ret;
1580 		WARN_ON_ONCE(ret < 1);
1581 	} else {
1582 		return -EINVAL;
1583 	}
1584 
1585 	*idb_rkey = idb_desc.key;
1586 
1587 	return 0;
1588 }
1589 
1590 static void srp_check_mapping(struct srp_map_state *state,
1591 			      struct srp_rdma_ch *ch, struct srp_request *req,
1592 			      struct scatterlist *scat, int count)
1593 {
1594 	struct srp_device *dev = ch->target->srp_host->srp_dev;
1595 	struct srp_fr_desc **pfr;
1596 	u64 desc_len = 0, mr_len = 0;
1597 	int i;
1598 
1599 	for (i = 0; i < state->ndesc; i++)
1600 		desc_len += be32_to_cpu(req->indirect_desc[i].len);
1601 	if (dev->use_fast_reg)
1602 		for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1603 			mr_len += (*pfr)->mr->length;
1604 	if (desc_len != scsi_bufflen(req->scmnd) ||
1605 	    mr_len > scsi_bufflen(req->scmnd))
1606 		pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1607 		       scsi_bufflen(req->scmnd), desc_len, mr_len,
1608 		       state->ndesc, state->nmdesc);
1609 }
1610 
1611 /**
1612  * srp_map_data() - map SCSI data buffer onto an SRP request
1613  * @scmnd: SCSI command to map
1614  * @ch: SRP RDMA channel
1615  * @req: SRP request
1616  *
1617  * Returns the length in bytes of the SRP_CMD IU or a negative value if
1618  * mapping failed. The size of any immediate data is not included in the
1619  * return value.
1620  */
1621 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1622 			struct srp_request *req)
1623 {
1624 	struct srp_target_port *target = ch->target;
1625 	struct scatterlist *scat, *sg;
1626 	struct srp_cmd *cmd = req->cmd->buf;
1627 	int i, len, nents, count, ret;
1628 	struct srp_device *dev;
1629 	struct ib_device *ibdev;
1630 	struct srp_map_state state;
1631 	struct srp_indirect_buf *indirect_hdr;
1632 	u64 data_len;
1633 	u32 idb_len, table_len;
1634 	__be32 idb_rkey;
1635 	u8 fmt;
1636 
1637 	req->cmd->num_sge = 1;
1638 
1639 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1640 		return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1641 
1642 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1643 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
1644 		shost_printk(KERN_WARNING, target->scsi_host,
1645 			     PFX "Unhandled data direction %d\n",
1646 			     scmnd->sc_data_direction);
1647 		return -EINVAL;
1648 	}
1649 
1650 	nents = scsi_sg_count(scmnd);
1651 	scat  = scsi_sglist(scmnd);
1652 	data_len = scsi_bufflen(scmnd);
1653 
1654 	dev = target->srp_host->srp_dev;
1655 	ibdev = dev->dev;
1656 
1657 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1658 	if (unlikely(count == 0))
1659 		return -EIO;
1660 
1661 	if (ch->use_imm_data &&
1662 	    count <= ch->max_imm_sge &&
1663 	    SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1664 	    scmnd->sc_data_direction == DMA_TO_DEVICE) {
1665 		struct srp_imm_buf *buf;
1666 		struct ib_sge *sge = &req->cmd->sge[1];
1667 
1668 		fmt = SRP_DATA_DESC_IMM;
1669 		len = SRP_IMM_DATA_OFFSET;
1670 		req->nmdesc = 0;
1671 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1672 		buf->len = cpu_to_be32(data_len);
1673 		WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1674 		for_each_sg(scat, sg, count, i) {
1675 			sge[i].addr   = sg_dma_address(sg);
1676 			sge[i].length = sg_dma_len(sg);
1677 			sge[i].lkey   = target->lkey;
1678 		}
1679 		req->cmd->num_sge += count;
1680 		goto map_complete;
1681 	}
1682 
1683 	fmt = SRP_DATA_DESC_DIRECT;
1684 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1685 		sizeof(struct srp_direct_buf);
1686 
1687 	if (count == 1 && target->global_rkey) {
1688 		/*
1689 		 * The midlayer only generated a single gather/scatter
1690 		 * entry, or DMA mapping coalesced everything to a
1691 		 * single entry.  So a direct descriptor along with
1692 		 * the DMA MR suffices.
1693 		 */
1694 		struct srp_direct_buf *buf;
1695 
1696 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1697 		buf->va  = cpu_to_be64(sg_dma_address(scat));
1698 		buf->key = cpu_to_be32(target->global_rkey);
1699 		buf->len = cpu_to_be32(sg_dma_len(scat));
1700 
1701 		req->nmdesc = 0;
1702 		goto map_complete;
1703 	}
1704 
1705 	/*
1706 	 * We have more than one scatter/gather entry, so build our indirect
1707 	 * descriptor table, trying to merge as many entries as we can.
1708 	 */
1709 	indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1710 
1711 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1712 				   target->indirect_size, DMA_TO_DEVICE);
1713 
1714 	memset(&state, 0, sizeof(state));
1715 	state.desc = req->indirect_desc;
1716 	if (dev->use_fast_reg)
1717 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
1718 	else
1719 		ret = srp_map_sg_dma(&state, ch, req, scat, count);
1720 	req->nmdesc = state.nmdesc;
1721 	if (ret < 0)
1722 		goto unmap;
1723 
1724 	{
1725 		DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1726 			"Memory mapping consistency check");
1727 		if (DYNAMIC_DEBUG_BRANCH(ddm))
1728 			srp_check_mapping(&state, ch, req, scat, count);
1729 	}
1730 
1731 	/* We've mapped the request, now pull as much of the indirect
1732 	 * descriptor table as we can into the command buffer. If this
1733 	 * target is not using an external indirect table, we are
1734 	 * guaranteed to fit into the command, as the SCSI layer won't
1735 	 * give us more S/G entries than we allow.
1736 	 */
1737 	if (state.ndesc == 1) {
1738 		/*
1739 		 * Memory registration collapsed the sg-list into one entry,
1740 		 * so use a direct descriptor.
1741 		 */
1742 		struct srp_direct_buf *buf;
1743 
1744 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1745 		*buf = req->indirect_desc[0];
1746 		goto map_complete;
1747 	}
1748 
1749 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1750 						!target->allow_ext_sg)) {
1751 		shost_printk(KERN_ERR, target->scsi_host,
1752 			     "Could not fit S/G list into SRP_CMD\n");
1753 		ret = -EIO;
1754 		goto unmap;
1755 	}
1756 
1757 	count = min(state.ndesc, target->cmd_sg_cnt);
1758 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1759 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1760 
1761 	fmt = SRP_DATA_DESC_INDIRECT;
1762 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1763 		sizeof(struct srp_indirect_buf);
1764 	len += count * sizeof (struct srp_direct_buf);
1765 
1766 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1767 	       count * sizeof (struct srp_direct_buf));
1768 
1769 	if (!target->global_rkey) {
1770 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1771 				  idb_len, &idb_rkey);
1772 		if (ret < 0)
1773 			goto unmap;
1774 		req->nmdesc++;
1775 	} else {
1776 		idb_rkey = cpu_to_be32(target->global_rkey);
1777 	}
1778 
1779 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1780 	indirect_hdr->table_desc.key = idb_rkey;
1781 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1782 	indirect_hdr->len = cpu_to_be32(state.total_len);
1783 
1784 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1785 		cmd->data_out_desc_cnt = count;
1786 	else
1787 		cmd->data_in_desc_cnt = count;
1788 
1789 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1790 				      DMA_TO_DEVICE);
1791 
1792 map_complete:
1793 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1794 		cmd->buf_fmt = fmt << 4;
1795 	else
1796 		cmd->buf_fmt = fmt;
1797 
1798 	return len;
1799 
1800 unmap:
1801 	srp_unmap_data(scmnd, ch, req);
1802 	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1803 		ret = -E2BIG;
1804 	return ret;
1805 }
1806 
1807 /*
1808  * Return an IU and possible credit to the free pool
1809  */
1810 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1811 			  enum srp_iu_type iu_type)
1812 {
1813 	unsigned long flags;
1814 
1815 	spin_lock_irqsave(&ch->lock, flags);
1816 	list_add(&iu->list, &ch->free_tx);
1817 	if (iu_type != SRP_IU_RSP)
1818 		++ch->req_lim;
1819 	spin_unlock_irqrestore(&ch->lock, flags);
1820 }
1821 
1822 /*
1823  * Must be called with ch->lock held to protect req_lim and free_tx.
1824  * If IU is not sent, it must be returned using srp_put_tx_iu().
1825  *
1826  * Note:
1827  * An upper limit for the number of allocated information units for each
1828  * request type is:
1829  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1830  *   more than Scsi_Host.can_queue requests.
1831  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1832  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1833  *   one unanswered SRP request to an initiator.
1834  */
1835 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1836 				      enum srp_iu_type iu_type)
1837 {
1838 	struct srp_target_port *target = ch->target;
1839 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1840 	struct srp_iu *iu;
1841 
1842 	lockdep_assert_held(&ch->lock);
1843 
1844 	ib_process_cq_direct(ch->send_cq, -1);
1845 
1846 	if (list_empty(&ch->free_tx))
1847 		return NULL;
1848 
1849 	/* Initiator responses to target requests do not consume credits */
1850 	if (iu_type != SRP_IU_RSP) {
1851 		if (ch->req_lim <= rsv) {
1852 			++target->zero_req_lim;
1853 			return NULL;
1854 		}
1855 
1856 		--ch->req_lim;
1857 	}
1858 
1859 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1860 	list_del(&iu->list);
1861 	return iu;
1862 }
1863 
1864 /*
1865  * Note: if this function is called from inside ib_drain_sq() then it will
1866  * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1867  * with status IB_WC_SUCCESS then that's a bug.
1868  */
1869 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1870 {
1871 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1872 	struct srp_rdma_ch *ch = cq->cq_context;
1873 
1874 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1875 		srp_handle_qp_err(cq, wc, "SEND");
1876 		return;
1877 	}
1878 
1879 	lockdep_assert_held(&ch->lock);
1880 
1881 	list_add(&iu->list, &ch->free_tx);
1882 }
1883 
1884 /**
1885  * srp_post_send() - send an SRP information unit
1886  * @ch: RDMA channel over which to send the information unit.
1887  * @iu: Information unit to send.
1888  * @len: Length of the information unit excluding immediate data.
1889  */
1890 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1891 {
1892 	struct srp_target_port *target = ch->target;
1893 	struct ib_send_wr wr;
1894 
1895 	if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1896 		return -EINVAL;
1897 
1898 	iu->sge[0].addr   = iu->dma;
1899 	iu->sge[0].length = len;
1900 	iu->sge[0].lkey   = target->lkey;
1901 
1902 	iu->cqe.done = srp_send_done;
1903 
1904 	wr.next       = NULL;
1905 	wr.wr_cqe     = &iu->cqe;
1906 	wr.sg_list    = &iu->sge[0];
1907 	wr.num_sge    = iu->num_sge;
1908 	wr.opcode     = IB_WR_SEND;
1909 	wr.send_flags = IB_SEND_SIGNALED;
1910 
1911 	return ib_post_send(ch->qp, &wr, NULL);
1912 }
1913 
1914 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1915 {
1916 	struct srp_target_port *target = ch->target;
1917 	struct ib_recv_wr wr;
1918 	struct ib_sge list;
1919 
1920 	list.addr   = iu->dma;
1921 	list.length = iu->size;
1922 	list.lkey   = target->lkey;
1923 
1924 	iu->cqe.done = srp_recv_done;
1925 
1926 	wr.next     = NULL;
1927 	wr.wr_cqe   = &iu->cqe;
1928 	wr.sg_list  = &list;
1929 	wr.num_sge  = 1;
1930 
1931 	return ib_post_recv(ch->qp, &wr, NULL);
1932 }
1933 
1934 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1935 {
1936 	struct srp_target_port *target = ch->target;
1937 	struct srp_request *req;
1938 	struct scsi_cmnd *scmnd;
1939 	unsigned long flags;
1940 
1941 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1942 		spin_lock_irqsave(&ch->lock, flags);
1943 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1944 		if (rsp->tag == ch->tsk_mgmt_tag) {
1945 			ch->tsk_mgmt_status = -1;
1946 			if (be32_to_cpu(rsp->resp_data_len) >= 4)
1947 				ch->tsk_mgmt_status = rsp->data[3];
1948 			complete(&ch->tsk_mgmt_done);
1949 		} else {
1950 			shost_printk(KERN_ERR, target->scsi_host,
1951 				     "Received tsk mgmt response too late for tag %#llx\n",
1952 				     rsp->tag);
1953 		}
1954 		spin_unlock_irqrestore(&ch->lock, flags);
1955 	} else {
1956 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1957 		if (scmnd) {
1958 			req = scsi_cmd_priv(scmnd);
1959 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
1960 		}
1961 		if (!scmnd) {
1962 			shost_printk(KERN_ERR, target->scsi_host,
1963 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1964 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
1965 
1966 			spin_lock_irqsave(&ch->lock, flags);
1967 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1968 			spin_unlock_irqrestore(&ch->lock, flags);
1969 
1970 			return;
1971 		}
1972 		scmnd->result = rsp->status;
1973 
1974 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1975 			memcpy(scmnd->sense_buffer, rsp->data +
1976 			       be32_to_cpu(rsp->resp_data_len),
1977 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1978 				     SCSI_SENSE_BUFFERSIZE));
1979 		}
1980 
1981 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1982 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1983 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1984 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1985 
1986 		srp_free_req(ch, req, scmnd,
1987 			     be32_to_cpu(rsp->req_lim_delta));
1988 
1989 		scsi_done(scmnd);
1990 	}
1991 }
1992 
1993 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1994 			       void *rsp, int len)
1995 {
1996 	struct srp_target_port *target = ch->target;
1997 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1998 	unsigned long flags;
1999 	struct srp_iu *iu;
2000 	int err;
2001 
2002 	spin_lock_irqsave(&ch->lock, flags);
2003 	ch->req_lim += req_delta;
2004 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2005 	spin_unlock_irqrestore(&ch->lock, flags);
2006 
2007 	if (!iu) {
2008 		shost_printk(KERN_ERR, target->scsi_host, PFX
2009 			     "no IU available to send response\n");
2010 		return 1;
2011 	}
2012 
2013 	iu->num_sge = 1;
2014 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2015 	memcpy(iu->buf, rsp, len);
2016 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2017 
2018 	err = srp_post_send(ch, iu, len);
2019 	if (err) {
2020 		shost_printk(KERN_ERR, target->scsi_host, PFX
2021 			     "unable to post response: %d\n", err);
2022 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2023 	}
2024 
2025 	return err;
2026 }
2027 
2028 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2029 				 struct srp_cred_req *req)
2030 {
2031 	struct srp_cred_rsp rsp = {
2032 		.opcode = SRP_CRED_RSP,
2033 		.tag = req->tag,
2034 	};
2035 	s32 delta = be32_to_cpu(req->req_lim_delta);
2036 
2037 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2038 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2039 			     "problems processing SRP_CRED_REQ\n");
2040 }
2041 
2042 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2043 				struct srp_aer_req *req)
2044 {
2045 	struct srp_target_port *target = ch->target;
2046 	struct srp_aer_rsp rsp = {
2047 		.opcode = SRP_AER_RSP,
2048 		.tag = req->tag,
2049 	};
2050 	s32 delta = be32_to_cpu(req->req_lim_delta);
2051 
2052 	shost_printk(KERN_ERR, target->scsi_host, PFX
2053 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2054 
2055 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2056 		shost_printk(KERN_ERR, target->scsi_host, PFX
2057 			     "problems processing SRP_AER_REQ\n");
2058 }
2059 
2060 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2061 {
2062 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2063 	struct srp_rdma_ch *ch = cq->cq_context;
2064 	struct srp_target_port *target = ch->target;
2065 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2066 	int res;
2067 	u8 opcode;
2068 
2069 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
2070 		srp_handle_qp_err(cq, wc, "RECV");
2071 		return;
2072 	}
2073 
2074 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2075 				   DMA_FROM_DEVICE);
2076 
2077 	opcode = *(u8 *) iu->buf;
2078 
2079 	if (0) {
2080 		shost_printk(KERN_ERR, target->scsi_host,
2081 			     PFX "recv completion, opcode 0x%02x\n", opcode);
2082 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2083 			       iu->buf, wc->byte_len, true);
2084 	}
2085 
2086 	switch (opcode) {
2087 	case SRP_RSP:
2088 		srp_process_rsp(ch, iu->buf);
2089 		break;
2090 
2091 	case SRP_CRED_REQ:
2092 		srp_process_cred_req(ch, iu->buf);
2093 		break;
2094 
2095 	case SRP_AER_REQ:
2096 		srp_process_aer_req(ch, iu->buf);
2097 		break;
2098 
2099 	case SRP_T_LOGOUT:
2100 		/* XXX Handle target logout */
2101 		shost_printk(KERN_WARNING, target->scsi_host,
2102 			     PFX "Got target logout request\n");
2103 		break;
2104 
2105 	default:
2106 		shost_printk(KERN_WARNING, target->scsi_host,
2107 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2108 		break;
2109 	}
2110 
2111 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2112 				      DMA_FROM_DEVICE);
2113 
2114 	res = srp_post_recv(ch, iu);
2115 	if (res != 0)
2116 		shost_printk(KERN_ERR, target->scsi_host,
2117 			     PFX "Recv failed with error code %d\n", res);
2118 }
2119 
2120 /**
2121  * srp_tl_err_work() - handle a transport layer error
2122  * @work: Work structure embedded in an SRP target port.
2123  *
2124  * Note: This function may get invoked before the rport has been created,
2125  * hence the target->rport test.
2126  */
2127 static void srp_tl_err_work(struct work_struct *work)
2128 {
2129 	struct srp_target_port *target;
2130 
2131 	target = container_of(work, struct srp_target_port, tl_err_work);
2132 	if (target->rport)
2133 		srp_start_tl_fail_timers(target->rport);
2134 }
2135 
2136 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2137 		const char *opname)
2138 {
2139 	struct srp_rdma_ch *ch = cq->cq_context;
2140 	struct srp_target_port *target = ch->target;
2141 
2142 	if (ch->connected && !target->qp_in_error) {
2143 		shost_printk(KERN_ERR, target->scsi_host,
2144 			     PFX "failed %s status %s (%d) for CQE %p\n",
2145 			     opname, ib_wc_status_msg(wc->status), wc->status,
2146 			     wc->wr_cqe);
2147 		queue_work(system_long_wq, &target->tl_err_work);
2148 	}
2149 	target->qp_in_error = true;
2150 }
2151 
2152 static enum scsi_qc_status srp_queuecommand(struct Scsi_Host *shost,
2153 					    struct scsi_cmnd *scmnd)
2154 {
2155 	struct request *rq = scsi_cmd_to_rq(scmnd);
2156 	struct srp_target_port *target = host_to_target(shost);
2157 	struct srp_rdma_ch *ch;
2158 	struct srp_request *req = scsi_cmd_priv(scmnd);
2159 	struct srp_iu *iu;
2160 	struct srp_cmd *cmd;
2161 	struct ib_device *dev;
2162 	unsigned long flags;
2163 	u32 tag;
2164 	int len, ret;
2165 
2166 	scmnd->result = srp_chkready(target->rport);
2167 	if (unlikely(scmnd->result))
2168 		goto err;
2169 
2170 	WARN_ON_ONCE(rq->tag < 0);
2171 	tag = blk_mq_unique_tag(rq);
2172 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2173 
2174 	spin_lock_irqsave(&ch->lock, flags);
2175 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2176 	spin_unlock_irqrestore(&ch->lock, flags);
2177 
2178 	if (!iu)
2179 		goto err;
2180 
2181 	dev = target->srp_host->srp_dev->dev;
2182 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2183 				   DMA_TO_DEVICE);
2184 
2185 	cmd = iu->buf;
2186 	memset(cmd, 0, sizeof *cmd);
2187 
2188 	cmd->opcode = SRP_CMD;
2189 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
2190 	cmd->tag    = tag;
2191 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2192 	if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2193 		cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2194 					    4);
2195 		if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2196 			goto err_iu;
2197 	}
2198 
2199 	req->scmnd    = scmnd;
2200 	req->cmd      = iu;
2201 
2202 	len = srp_map_data(scmnd, ch, req);
2203 	if (len < 0) {
2204 		shost_printk(KERN_ERR, target->scsi_host,
2205 			     PFX "Failed to map data (%d)\n", len);
2206 		/*
2207 		 * If we ran out of memory descriptors (-ENOMEM) because an
2208 		 * application is queuing many requests with more than
2209 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2210 		 * to reduce queue depth temporarily.
2211 		 */
2212 		scmnd->result = len == -ENOMEM ?
2213 			DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
2214 		goto err_iu;
2215 	}
2216 
2217 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2218 				      DMA_TO_DEVICE);
2219 
2220 	if (srp_post_send(ch, iu, len)) {
2221 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2222 		scmnd->result = DID_ERROR << 16;
2223 		goto err_unmap;
2224 	}
2225 
2226 	return 0;
2227 
2228 err_unmap:
2229 	srp_unmap_data(scmnd, ch, req);
2230 
2231 err_iu:
2232 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2233 
2234 	/*
2235 	 * Avoid that the loops that iterate over the request ring can
2236 	 * encounter a dangling SCSI command pointer.
2237 	 */
2238 	req->scmnd = NULL;
2239 
2240 err:
2241 	if (scmnd->result) {
2242 		scsi_done(scmnd);
2243 		ret = 0;
2244 	} else {
2245 		ret = SCSI_MLQUEUE_HOST_BUSY;
2246 	}
2247 
2248 	return ret;
2249 }
2250 
2251 /*
2252  * Note: the resources allocated in this function are freed in
2253  * srp_free_ch_ib().
2254  */
2255 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2256 {
2257 	struct srp_target_port *target = ch->target;
2258 	int i;
2259 
2260 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2261 			      GFP_KERNEL);
2262 	if (!ch->rx_ring)
2263 		goto err_no_ring;
2264 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2265 			      GFP_KERNEL);
2266 	if (!ch->tx_ring)
2267 		goto err_no_ring;
2268 
2269 	for (i = 0; i < target->queue_size; ++i) {
2270 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2271 					      ch->max_ti_iu_len,
2272 					      GFP_KERNEL, DMA_FROM_DEVICE);
2273 		if (!ch->rx_ring[i])
2274 			goto err;
2275 	}
2276 
2277 	for (i = 0; i < target->queue_size; ++i) {
2278 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2279 					      ch->max_it_iu_len,
2280 					      GFP_KERNEL, DMA_TO_DEVICE);
2281 		if (!ch->tx_ring[i])
2282 			goto err;
2283 
2284 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2285 	}
2286 
2287 	return 0;
2288 
2289 err:
2290 	for (i = 0; i < target->queue_size; ++i) {
2291 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2292 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2293 	}
2294 
2295 
2296 err_no_ring:
2297 	kfree(ch->tx_ring);
2298 	ch->tx_ring = NULL;
2299 	kfree(ch->rx_ring);
2300 	ch->rx_ring = NULL;
2301 
2302 	return -ENOMEM;
2303 }
2304 
2305 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2306 {
2307 	uint64_t T_tr_ns, max_compl_time_ms;
2308 	uint32_t rq_tmo_jiffies;
2309 
2310 	/*
2311 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2312 	 * table 91), both the QP timeout and the retry count have to be set
2313 	 * for RC QP's during the RTR to RTS transition.
2314 	 */
2315 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2316 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2317 
2318 	/*
2319 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2320 	 * it can take before an error completion is generated. See also
2321 	 * C9-140..142 in the IBTA spec for more information about how to
2322 	 * convert the QP Local ACK Timeout value to nanoseconds.
2323 	 */
2324 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2325 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2326 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2327 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2328 
2329 	return rq_tmo_jiffies;
2330 }
2331 
2332 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2333 			       const struct srp_login_rsp *lrsp,
2334 			       struct srp_rdma_ch *ch)
2335 {
2336 	struct srp_target_port *target = ch->target;
2337 	struct ib_qp_attr *qp_attr = NULL;
2338 	int attr_mask = 0;
2339 	int ret = 0;
2340 	int i;
2341 
2342 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2343 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2344 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2345 		ch->use_imm_data  = srp_use_imm_data &&
2346 			(lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2347 		ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2348 						      ch->use_imm_data,
2349 						      target->max_it_iu_size);
2350 		WARN_ON_ONCE(ch->max_it_iu_len >
2351 			     be32_to_cpu(lrsp->max_it_iu_len));
2352 
2353 		if (ch->use_imm_data)
2354 			shost_printk(KERN_DEBUG, target->scsi_host,
2355 				     PFX "using immediate data\n");
2356 
2357 		/*
2358 		 * Reserve credits for task management so we don't
2359 		 * bounce requests back to the SCSI mid-layer.
2360 		 */
2361 		target->scsi_host->can_queue
2362 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2363 			      target->scsi_host->can_queue);
2364 		target->scsi_host->cmd_per_lun
2365 			= min_t(int, target->scsi_host->can_queue,
2366 				target->scsi_host->cmd_per_lun);
2367 	} else {
2368 		shost_printk(KERN_WARNING, target->scsi_host,
2369 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2370 		ret = -ECONNRESET;
2371 		goto error;
2372 	}
2373 
2374 	if (!ch->rx_ring) {
2375 		ret = srp_alloc_iu_bufs(ch);
2376 		if (ret)
2377 			goto error;
2378 	}
2379 
2380 	for (i = 0; i < target->queue_size; i++) {
2381 		struct srp_iu *iu = ch->rx_ring[i];
2382 
2383 		ret = srp_post_recv(ch, iu);
2384 		if (ret)
2385 			goto error;
2386 	}
2387 
2388 	if (!target->using_rdma_cm) {
2389 		ret = -ENOMEM;
2390 		qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2391 		if (!qp_attr)
2392 			goto error;
2393 
2394 		qp_attr->qp_state = IB_QPS_RTR;
2395 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2396 		if (ret)
2397 			goto error_free;
2398 
2399 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2400 		if (ret)
2401 			goto error_free;
2402 
2403 		qp_attr->qp_state = IB_QPS_RTS;
2404 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2405 		if (ret)
2406 			goto error_free;
2407 
2408 		target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2409 
2410 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2411 		if (ret)
2412 			goto error_free;
2413 
2414 		ret = ib_send_cm_rtu(cm_id, NULL, 0);
2415 	}
2416 
2417 error_free:
2418 	kfree(qp_attr);
2419 
2420 error:
2421 	ch->status = ret;
2422 }
2423 
2424 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2425 				  const struct ib_cm_event *event,
2426 				  struct srp_rdma_ch *ch)
2427 {
2428 	struct srp_target_port *target = ch->target;
2429 	struct Scsi_Host *shost = target->scsi_host;
2430 	struct ib_class_port_info *cpi;
2431 	int opcode;
2432 	u16 dlid;
2433 
2434 	switch (event->param.rej_rcvd.reason) {
2435 	case IB_CM_REJ_PORT_CM_REDIRECT:
2436 		cpi = event->param.rej_rcvd.ari;
2437 		dlid = be16_to_cpu(cpi->redirect_lid);
2438 		sa_path_set_dlid(&ch->ib_cm.path, dlid);
2439 		ch->ib_cm.path.pkey = cpi->redirect_pkey;
2440 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2441 		memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2442 
2443 		ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2444 		break;
2445 
2446 	case IB_CM_REJ_PORT_REDIRECT:
2447 		if (srp_target_is_topspin(target)) {
2448 			union ib_gid *dgid = &ch->ib_cm.path.dgid;
2449 
2450 			/*
2451 			 * Topspin/Cisco SRP gateways incorrectly send
2452 			 * reject reason code 25 when they mean 24
2453 			 * (port redirect).
2454 			 */
2455 			memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2456 
2457 			shost_printk(KERN_DEBUG, shost,
2458 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2459 				     be64_to_cpu(dgid->global.subnet_prefix),
2460 				     be64_to_cpu(dgid->global.interface_id));
2461 
2462 			ch->status = SRP_PORT_REDIRECT;
2463 		} else {
2464 			shost_printk(KERN_WARNING, shost,
2465 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2466 			ch->status = -ECONNRESET;
2467 		}
2468 		break;
2469 
2470 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2471 		shost_printk(KERN_WARNING, shost,
2472 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2473 		ch->status = -ECONNRESET;
2474 		break;
2475 
2476 	case IB_CM_REJ_CONSUMER_DEFINED:
2477 		opcode = *(u8 *) event->private_data;
2478 		if (opcode == SRP_LOGIN_REJ) {
2479 			struct srp_login_rej *rej = event->private_data;
2480 			u32 reason = be32_to_cpu(rej->reason);
2481 
2482 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2483 				shost_printk(KERN_WARNING, shost,
2484 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2485 			else
2486 				shost_printk(KERN_WARNING, shost, PFX
2487 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2488 					     target->sgid.raw,
2489 					     target->ib_cm.orig_dgid.raw,
2490 					     reason);
2491 		} else
2492 			shost_printk(KERN_WARNING, shost,
2493 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2494 				     " opcode 0x%02x\n", opcode);
2495 		ch->status = -ECONNRESET;
2496 		break;
2497 
2498 	case IB_CM_REJ_STALE_CONN:
2499 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2500 		ch->status = SRP_STALE_CONN;
2501 		break;
2502 
2503 	default:
2504 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2505 			     event->param.rej_rcvd.reason);
2506 		ch->status = -ECONNRESET;
2507 	}
2508 }
2509 
2510 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2511 			     const struct ib_cm_event *event)
2512 {
2513 	struct srp_rdma_ch *ch = cm_id->context;
2514 	struct srp_target_port *target = ch->target;
2515 	int comp = 0;
2516 
2517 	switch (event->event) {
2518 	case IB_CM_REQ_ERROR:
2519 		shost_printk(KERN_DEBUG, target->scsi_host,
2520 			     PFX "Sending CM REQ failed\n");
2521 		comp = 1;
2522 		ch->status = -ECONNRESET;
2523 		break;
2524 
2525 	case IB_CM_REP_RECEIVED:
2526 		comp = 1;
2527 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2528 		break;
2529 
2530 	case IB_CM_REJ_RECEIVED:
2531 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2532 		comp = 1;
2533 
2534 		srp_ib_cm_rej_handler(cm_id, event, ch);
2535 		break;
2536 
2537 	case IB_CM_DREQ_RECEIVED:
2538 		shost_printk(KERN_WARNING, target->scsi_host,
2539 			     PFX "DREQ received - connection closed\n");
2540 		ch->connected = false;
2541 		if (ib_send_cm_drep(cm_id, NULL, 0))
2542 			shost_printk(KERN_ERR, target->scsi_host,
2543 				     PFX "Sending CM DREP failed\n");
2544 		queue_work(system_long_wq, &target->tl_err_work);
2545 		break;
2546 
2547 	case IB_CM_TIMEWAIT_EXIT:
2548 		shost_printk(KERN_ERR, target->scsi_host,
2549 			     PFX "connection closed\n");
2550 		comp = 1;
2551 
2552 		ch->status = 0;
2553 		break;
2554 
2555 	case IB_CM_MRA_RECEIVED:
2556 	case IB_CM_DREQ_ERROR:
2557 	case IB_CM_DREP_RECEIVED:
2558 		break;
2559 
2560 	default:
2561 		shost_printk(KERN_WARNING, target->scsi_host,
2562 			     PFX "Unhandled CM event %d\n", event->event);
2563 		break;
2564 	}
2565 
2566 	if (comp)
2567 		complete(&ch->done);
2568 
2569 	return 0;
2570 }
2571 
2572 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2573 				    struct rdma_cm_event *event)
2574 {
2575 	struct srp_target_port *target = ch->target;
2576 	struct Scsi_Host *shost = target->scsi_host;
2577 	int opcode;
2578 
2579 	switch (event->status) {
2580 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2581 		shost_printk(KERN_WARNING, shost,
2582 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2583 		ch->status = -ECONNRESET;
2584 		break;
2585 
2586 	case IB_CM_REJ_CONSUMER_DEFINED:
2587 		opcode = *(u8 *) event->param.conn.private_data;
2588 		if (opcode == SRP_LOGIN_REJ) {
2589 			struct srp_login_rej *rej =
2590 				(struct srp_login_rej *)
2591 				event->param.conn.private_data;
2592 			u32 reason = be32_to_cpu(rej->reason);
2593 
2594 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2595 				shost_printk(KERN_WARNING, shost,
2596 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2597 			else
2598 				shost_printk(KERN_WARNING, shost,
2599 					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2600 		} else {
2601 			shost_printk(KERN_WARNING, shost,
2602 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2603 				     opcode);
2604 		}
2605 		ch->status = -ECONNRESET;
2606 		break;
2607 
2608 	case IB_CM_REJ_STALE_CONN:
2609 		shost_printk(KERN_WARNING, shost,
2610 			     "  REJ reason: stale connection\n");
2611 		ch->status = SRP_STALE_CONN;
2612 		break;
2613 
2614 	default:
2615 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2616 			     event->status);
2617 		ch->status = -ECONNRESET;
2618 		break;
2619 	}
2620 }
2621 
2622 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2623 			       struct rdma_cm_event *event)
2624 {
2625 	struct srp_rdma_ch *ch = cm_id->context;
2626 	struct srp_target_port *target = ch->target;
2627 	int comp = 0;
2628 
2629 	switch (event->event) {
2630 	case RDMA_CM_EVENT_ADDR_RESOLVED:
2631 		ch->status = 0;
2632 		comp = 1;
2633 		break;
2634 
2635 	case RDMA_CM_EVENT_ADDR_ERROR:
2636 		ch->status = -ENXIO;
2637 		comp = 1;
2638 		break;
2639 
2640 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
2641 		ch->status = 0;
2642 		comp = 1;
2643 		break;
2644 
2645 	case RDMA_CM_EVENT_ROUTE_ERROR:
2646 	case RDMA_CM_EVENT_UNREACHABLE:
2647 		ch->status = -EHOSTUNREACH;
2648 		comp = 1;
2649 		break;
2650 
2651 	case RDMA_CM_EVENT_CONNECT_ERROR:
2652 		shost_printk(KERN_DEBUG, target->scsi_host,
2653 			     PFX "Sending CM REQ failed\n");
2654 		comp = 1;
2655 		ch->status = -ECONNRESET;
2656 		break;
2657 
2658 	case RDMA_CM_EVENT_ESTABLISHED:
2659 		comp = 1;
2660 		srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2661 		break;
2662 
2663 	case RDMA_CM_EVENT_REJECTED:
2664 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2665 		comp = 1;
2666 
2667 		srp_rdma_cm_rej_handler(ch, event);
2668 		break;
2669 
2670 	case RDMA_CM_EVENT_DISCONNECTED:
2671 		if (ch->connected) {
2672 			shost_printk(KERN_WARNING, target->scsi_host,
2673 				     PFX "received DREQ\n");
2674 			rdma_disconnect(ch->rdma_cm.cm_id);
2675 			comp = 1;
2676 			ch->status = 0;
2677 			queue_work(system_long_wq, &target->tl_err_work);
2678 		}
2679 		break;
2680 
2681 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2682 		shost_printk(KERN_ERR, target->scsi_host,
2683 			     PFX "connection closed\n");
2684 
2685 		comp = 1;
2686 		ch->status = 0;
2687 		break;
2688 
2689 	default:
2690 		shost_printk(KERN_WARNING, target->scsi_host,
2691 			     PFX "Unhandled CM event %d\n", event->event);
2692 		break;
2693 	}
2694 
2695 	if (comp)
2696 		complete(&ch->done);
2697 
2698 	return 0;
2699 }
2700 
2701 /**
2702  * srp_change_queue_depth - setting device queue depth
2703  * @sdev: scsi device struct
2704  * @qdepth: requested queue depth
2705  *
2706  * Returns queue depth.
2707  */
2708 static int
2709 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2710 {
2711 	if (!sdev->tagged_supported)
2712 		qdepth = 1;
2713 	return scsi_change_queue_depth(sdev, qdepth);
2714 }
2715 
2716 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2717 			     u8 func, u8 *status)
2718 {
2719 	struct srp_target_port *target = ch->target;
2720 	struct srp_rport *rport = target->rport;
2721 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2722 	struct srp_iu *iu;
2723 	struct srp_tsk_mgmt *tsk_mgmt;
2724 	int res;
2725 
2726 	if (!ch->connected || target->qp_in_error)
2727 		return -1;
2728 
2729 	/*
2730 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2731 	 * invoked while a task management function is being sent.
2732 	 */
2733 	mutex_lock(&rport->mutex);
2734 	spin_lock_irq(&ch->lock);
2735 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2736 	spin_unlock_irq(&ch->lock);
2737 
2738 	if (!iu) {
2739 		mutex_unlock(&rport->mutex);
2740 
2741 		return -1;
2742 	}
2743 
2744 	iu->num_sge = 1;
2745 
2746 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2747 				   DMA_TO_DEVICE);
2748 	tsk_mgmt = iu->buf;
2749 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2750 
2751 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2752 	int_to_scsilun(lun, &tsk_mgmt->lun);
2753 	tsk_mgmt->tsk_mgmt_func = func;
2754 	tsk_mgmt->task_tag	= req_tag;
2755 
2756 	spin_lock_irq(&ch->lock);
2757 	ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2758 	tsk_mgmt->tag = ch->tsk_mgmt_tag;
2759 	spin_unlock_irq(&ch->lock);
2760 
2761 	init_completion(&ch->tsk_mgmt_done);
2762 
2763 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2764 				      DMA_TO_DEVICE);
2765 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2766 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2767 		mutex_unlock(&rport->mutex);
2768 
2769 		return -1;
2770 	}
2771 	res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2772 					msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2773 	if (res > 0 && status)
2774 		*status = ch->tsk_mgmt_status;
2775 	mutex_unlock(&rport->mutex);
2776 
2777 	WARN_ON_ONCE(res < 0);
2778 
2779 	return res > 0 ? 0 : -1;
2780 }
2781 
2782 static int srp_abort(struct scsi_cmnd *scmnd)
2783 {
2784 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2785 	struct srp_request *req = scsi_cmd_priv(scmnd);
2786 	u32 tag;
2787 	u16 ch_idx;
2788 	struct srp_rdma_ch *ch;
2789 
2790 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2791 
2792 	tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
2793 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2794 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2795 		return SUCCESS;
2796 	ch = &target->ch[ch_idx];
2797 	if (!srp_claim_req(ch, req, NULL, scmnd))
2798 		return SUCCESS;
2799 	shost_printk(KERN_ERR, target->scsi_host,
2800 		     "Sending SRP abort for tag %#x\n", tag);
2801 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2802 			      SRP_TSK_ABORT_TASK, NULL) == 0) {
2803 		srp_free_req(ch, req, scmnd, 0);
2804 		return SUCCESS;
2805 	}
2806 	if (target->rport->state == SRP_RPORT_LOST)
2807 		return FAST_IO_FAIL;
2808 
2809 	return FAILED;
2810 }
2811 
2812 static int srp_reset_device(struct scsi_cmnd *scmnd)
2813 {
2814 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2815 	struct srp_rdma_ch *ch;
2816 	u8 status;
2817 
2818 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2819 
2820 	ch = &target->ch[0];
2821 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2822 			      SRP_TSK_LUN_RESET, &status))
2823 		return FAILED;
2824 	if (status)
2825 		return FAILED;
2826 
2827 	return SUCCESS;
2828 }
2829 
2830 static int srp_reset_host(struct scsi_cmnd *scmnd)
2831 {
2832 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2833 
2834 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2835 
2836 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2837 }
2838 
2839 static int srp_target_alloc(struct scsi_target *starget)
2840 {
2841 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2842 	struct srp_target_port *target = host_to_target(shost);
2843 
2844 	if (target->target_can_queue)
2845 		starget->can_queue = target->target_can_queue;
2846 	return 0;
2847 }
2848 
2849 static int srp_sdev_configure(struct scsi_device *sdev,
2850 			      struct queue_limits *lim)
2851 {
2852 	struct Scsi_Host *shost = sdev->host;
2853 	struct srp_target_port *target = host_to_target(shost);
2854 	struct request_queue *q = sdev->request_queue;
2855 	unsigned long timeout;
2856 
2857 	if (sdev->type == TYPE_DISK) {
2858 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2859 		blk_queue_rq_timeout(q, timeout);
2860 	}
2861 
2862 	return 0;
2863 }
2864 
2865 static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
2866 			   char *buf)
2867 {
2868 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2869 
2870 	return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2871 }
2872 
2873 static DEVICE_ATTR_RO(id_ext);
2874 
2875 static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
2876 			     char *buf)
2877 {
2878 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2879 
2880 	return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2881 }
2882 
2883 static DEVICE_ATTR_RO(ioc_guid);
2884 
2885 static ssize_t service_id_show(struct device *dev,
2886 			       struct device_attribute *attr, char *buf)
2887 {
2888 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2889 
2890 	if (target->using_rdma_cm)
2891 		return -ENOENT;
2892 	return sysfs_emit(buf, "0x%016llx\n",
2893 			  be64_to_cpu(target->ib_cm.service_id));
2894 }
2895 
2896 static DEVICE_ATTR_RO(service_id);
2897 
2898 static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
2899 			 char *buf)
2900 {
2901 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2902 
2903 	if (target->using_rdma_cm)
2904 		return -ENOENT;
2905 
2906 	return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2907 }
2908 
2909 static DEVICE_ATTR_RO(pkey);
2910 
2911 static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
2912 			 char *buf)
2913 {
2914 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2915 
2916 	return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
2917 }
2918 
2919 static DEVICE_ATTR_RO(sgid);
2920 
2921 static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
2922 			 char *buf)
2923 {
2924 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2925 	struct srp_rdma_ch *ch = &target->ch[0];
2926 
2927 	if (target->using_rdma_cm)
2928 		return -ENOENT;
2929 
2930 	return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2931 }
2932 
2933 static DEVICE_ATTR_RO(dgid);
2934 
2935 static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
2936 			      char *buf)
2937 {
2938 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2939 
2940 	if (target->using_rdma_cm)
2941 		return -ENOENT;
2942 
2943 	return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2944 }
2945 
2946 static DEVICE_ATTR_RO(orig_dgid);
2947 
2948 static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
2949 			    char *buf)
2950 {
2951 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2952 	struct srp_rdma_ch *ch;
2953 	int i, req_lim = INT_MAX;
2954 
2955 	for (i = 0; i < target->ch_count; i++) {
2956 		ch = &target->ch[i];
2957 		req_lim = min(req_lim, ch->req_lim);
2958 	}
2959 
2960 	return sysfs_emit(buf, "%d\n", req_lim);
2961 }
2962 
2963 static DEVICE_ATTR_RO(req_lim);
2964 
2965 static ssize_t zero_req_lim_show(struct device *dev,
2966 				 struct device_attribute *attr, char *buf)
2967 {
2968 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2969 
2970 	return sysfs_emit(buf, "%d\n", target->zero_req_lim);
2971 }
2972 
2973 static DEVICE_ATTR_RO(zero_req_lim);
2974 
2975 static ssize_t local_ib_port_show(struct device *dev,
2976 				  struct device_attribute *attr, char *buf)
2977 {
2978 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2979 
2980 	return sysfs_emit(buf, "%u\n", target->srp_host->port);
2981 }
2982 
2983 static DEVICE_ATTR_RO(local_ib_port);
2984 
2985 static ssize_t local_ib_device_show(struct device *dev,
2986 				    struct device_attribute *attr, char *buf)
2987 {
2988 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2989 
2990 	return sysfs_emit(buf, "%s\n",
2991 			  dev_name(&target->srp_host->srp_dev->dev->dev));
2992 }
2993 
2994 static DEVICE_ATTR_RO(local_ib_device);
2995 
2996 static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
2997 			     char *buf)
2998 {
2999 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3000 
3001 	return sysfs_emit(buf, "%d\n", target->ch_count);
3002 }
3003 
3004 static DEVICE_ATTR_RO(ch_count);
3005 
3006 static ssize_t comp_vector_show(struct device *dev,
3007 				struct device_attribute *attr, char *buf)
3008 {
3009 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3010 
3011 	return sysfs_emit(buf, "%d\n", target->comp_vector);
3012 }
3013 
3014 static DEVICE_ATTR_RO(comp_vector);
3015 
3016 static ssize_t tl_retry_count_show(struct device *dev,
3017 				   struct device_attribute *attr, char *buf)
3018 {
3019 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3020 
3021 	return sysfs_emit(buf, "%d\n", target->tl_retry_count);
3022 }
3023 
3024 static DEVICE_ATTR_RO(tl_retry_count);
3025 
3026 static ssize_t cmd_sg_entries_show(struct device *dev,
3027 				   struct device_attribute *attr, char *buf)
3028 {
3029 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3030 
3031 	return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
3032 }
3033 
3034 static DEVICE_ATTR_RO(cmd_sg_entries);
3035 
3036 static ssize_t allow_ext_sg_show(struct device *dev,
3037 				 struct device_attribute *attr, char *buf)
3038 {
3039 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3040 
3041 	return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3042 }
3043 
3044 static DEVICE_ATTR_RO(allow_ext_sg);
3045 
3046 static struct attribute *srp_host_attrs[] = {
3047 	&dev_attr_id_ext.attr,
3048 	&dev_attr_ioc_guid.attr,
3049 	&dev_attr_service_id.attr,
3050 	&dev_attr_pkey.attr,
3051 	&dev_attr_sgid.attr,
3052 	&dev_attr_dgid.attr,
3053 	&dev_attr_orig_dgid.attr,
3054 	&dev_attr_req_lim.attr,
3055 	&dev_attr_zero_req_lim.attr,
3056 	&dev_attr_local_ib_port.attr,
3057 	&dev_attr_local_ib_device.attr,
3058 	&dev_attr_ch_count.attr,
3059 	&dev_attr_comp_vector.attr,
3060 	&dev_attr_tl_retry_count.attr,
3061 	&dev_attr_cmd_sg_entries.attr,
3062 	&dev_attr_allow_ext_sg.attr,
3063 	NULL
3064 };
3065 
3066 ATTRIBUTE_GROUPS(srp_host);
3067 
3068 static const struct scsi_host_template srp_template = {
3069 	.module				= THIS_MODULE,
3070 	.name				= "InfiniBand SRP initiator",
3071 	.proc_name			= DRV_NAME,
3072 	.target_alloc			= srp_target_alloc,
3073 	.sdev_configure			= srp_sdev_configure,
3074 	.info				= srp_target_info,
3075 	.init_cmd_priv			= srp_init_cmd_priv,
3076 	.exit_cmd_priv			= srp_exit_cmd_priv,
3077 	.queuecommand			= srp_queuecommand,
3078 	.change_queue_depth             = srp_change_queue_depth,
3079 	.eh_timed_out			= srp_timed_out,
3080 	.eh_abort_handler		= srp_abort,
3081 	.eh_device_reset_handler	= srp_reset_device,
3082 	.eh_host_reset_handler		= srp_reset_host,
3083 	.skip_settle_delay		= true,
3084 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
3085 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
3086 	.this_id			= -1,
3087 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
3088 	.shost_groups			= srp_host_groups,
3089 	.track_queue_depth		= 1,
3090 	.cmd_size			= sizeof(struct srp_request),
3091 };
3092 
3093 static int srp_sdev_count(struct Scsi_Host *host)
3094 {
3095 	struct scsi_device *sdev;
3096 	int c = 0;
3097 
3098 	shost_for_each_device(sdev, host)
3099 		c++;
3100 
3101 	return c;
3102 }
3103 
3104 /*
3105  * Return values:
3106  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3107  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3108  *    removal has been scheduled.
3109  * 0 and target->state != SRP_TARGET_REMOVED upon success.
3110  */
3111 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3112 {
3113 	struct srp_rport_identifiers ids;
3114 	struct srp_rport *rport;
3115 
3116 	target->state = SRP_TARGET_SCANNING;
3117 	sprintf(target->target_name, "SRP.T10:%016llX",
3118 		be64_to_cpu(target->id_ext));
3119 
3120 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3121 		return -ENODEV;
3122 
3123 	memcpy(ids.port_id, &target->id_ext, 8);
3124 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3125 	ids.roles = SRP_RPORT_ROLE_TARGET;
3126 	rport = srp_rport_add(target->scsi_host, &ids);
3127 	if (IS_ERR(rport)) {
3128 		scsi_remove_host(target->scsi_host);
3129 		return PTR_ERR(rport);
3130 	}
3131 
3132 	rport->lld_data = target;
3133 	target->rport = rport;
3134 
3135 	spin_lock(&host->target_lock);
3136 	list_add_tail(&target->list, &host->target_list);
3137 	spin_unlock(&host->target_lock);
3138 
3139 	scsi_scan_target(&target->scsi_host->shost_gendev,
3140 			 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3141 
3142 	if (srp_connected_ch(target) < target->ch_count ||
3143 	    target->qp_in_error) {
3144 		shost_printk(KERN_INFO, target->scsi_host,
3145 			     PFX "SCSI scan failed - removing SCSI host\n");
3146 		srp_queue_remove_work(target);
3147 		goto out;
3148 	}
3149 
3150 	pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3151 		 dev_name(&target->scsi_host->shost_gendev),
3152 		 srp_sdev_count(target->scsi_host));
3153 
3154 	spin_lock_irq(&target->lock);
3155 	if (target->state == SRP_TARGET_SCANNING)
3156 		target->state = SRP_TARGET_LIVE;
3157 	spin_unlock_irq(&target->lock);
3158 
3159 out:
3160 	return 0;
3161 }
3162 
3163 static void srp_release_dev(struct device *dev)
3164 {
3165 	struct srp_host *host =
3166 		container_of(dev, struct srp_host, dev);
3167 
3168 	kfree(host);
3169 }
3170 
3171 static struct attribute *srp_class_attrs[];
3172 
3173 ATTRIBUTE_GROUPS(srp_class);
3174 
3175 static struct class srp_class = {
3176 	.name    = "infiniband_srp",
3177 	.dev_groups = srp_class_groups,
3178 	.dev_release = srp_release_dev
3179 };
3180 
3181 /**
3182  * srp_conn_unique() - check whether the connection to a target is unique
3183  * @host:   SRP host.
3184  * @target: SRP target port.
3185  */
3186 static bool srp_conn_unique(struct srp_host *host,
3187 			    struct srp_target_port *target)
3188 {
3189 	struct srp_target_port *t;
3190 	bool ret = false;
3191 
3192 	if (target->state == SRP_TARGET_REMOVED)
3193 		goto out;
3194 
3195 	ret = true;
3196 
3197 	spin_lock(&host->target_lock);
3198 	list_for_each_entry(t, &host->target_list, list) {
3199 		if (t != target &&
3200 		    target->id_ext == t->id_ext &&
3201 		    target->ioc_guid == t->ioc_guid &&
3202 		    target->initiator_ext == t->initiator_ext) {
3203 			ret = false;
3204 			break;
3205 		}
3206 	}
3207 	spin_unlock(&host->target_lock);
3208 
3209 out:
3210 	return ret;
3211 }
3212 
3213 /*
3214  * Target ports are added by writing
3215  *
3216  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3217  *     pkey=<P_Key>,service_id=<service ID>
3218  * or
3219  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3220  *     [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3221  *
3222  * to the add_target sysfs attribute.
3223  */
3224 enum {
3225 	SRP_OPT_ERR		= 0,
3226 	SRP_OPT_ID_EXT		= 1 << 0,
3227 	SRP_OPT_IOC_GUID	= 1 << 1,
3228 	SRP_OPT_DGID		= 1 << 2,
3229 	SRP_OPT_PKEY		= 1 << 3,
3230 	SRP_OPT_SERVICE_ID	= 1 << 4,
3231 	SRP_OPT_MAX_SECT	= 1 << 5,
3232 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
3233 	SRP_OPT_IO_CLASS	= 1 << 7,
3234 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
3235 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
3236 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
3237 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
3238 	SRP_OPT_COMP_VECTOR	= 1 << 12,
3239 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
3240 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
3241 	SRP_OPT_IP_SRC		= 1 << 15,
3242 	SRP_OPT_IP_DEST		= 1 << 16,
3243 	SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3244 	SRP_OPT_MAX_IT_IU_SIZE  = 1 << 18,
3245 	SRP_OPT_CH_COUNT	= 1 << 19,
3246 };
3247 
3248 static unsigned int srp_opt_mandatory[] = {
3249 	SRP_OPT_ID_EXT		|
3250 	SRP_OPT_IOC_GUID	|
3251 	SRP_OPT_DGID		|
3252 	SRP_OPT_PKEY		|
3253 	SRP_OPT_SERVICE_ID,
3254 	SRP_OPT_ID_EXT		|
3255 	SRP_OPT_IOC_GUID	|
3256 	SRP_OPT_IP_DEST,
3257 };
3258 
3259 static const match_table_t srp_opt_tokens = {
3260 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
3261 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
3262 	{ SRP_OPT_DGID,			"dgid=%s" 		},
3263 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
3264 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
3265 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
3266 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
3267 	{ SRP_OPT_TARGET_CAN_QUEUE,	"target_can_queue=%d"	},
3268 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
3269 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
3270 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
3271 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
3272 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
3273 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
3274 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
3275 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
3276 	{ SRP_OPT_IP_SRC,		"src=%s"		},
3277 	{ SRP_OPT_IP_DEST,		"dest=%s"		},
3278 	{ SRP_OPT_MAX_IT_IU_SIZE,	"max_it_iu_size=%d"	},
3279 	{ SRP_OPT_CH_COUNT,		"ch_count=%u",		},
3280 	{ SRP_OPT_ERR,			NULL 			}
3281 };
3282 
3283 /**
3284  * srp_parse_in - parse an IP address and port number combination
3285  * @net:	   [in]  Network namespace.
3286  * @sa:		   [out] Address family, IP address and port number.
3287  * @addr_port_str: [in]  IP address and port number.
3288  * @has_port:	   [out] Whether or not @addr_port_str includes a port number.
3289  *
3290  * Parse the following address formats:
3291  * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3292  * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3293  */
3294 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3295 			const char *addr_port_str, bool *has_port)
3296 {
3297 	char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3298 	char *port_str;
3299 	int ret;
3300 
3301 	if (!addr)
3302 		return -ENOMEM;
3303 	port_str = strrchr(addr, ':');
3304 	if (port_str && strchr(port_str, ']'))
3305 		port_str = NULL;
3306 	if (port_str)
3307 		*port_str++ = '\0';
3308 	if (has_port)
3309 		*has_port = port_str != NULL;
3310 	ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3311 	if (ret && addr[0]) {
3312 		addr_end = addr + strlen(addr) - 1;
3313 		if (addr[0] == '[' && *addr_end == ']') {
3314 			*addr_end = '\0';
3315 			ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3316 						   port_str, sa);
3317 		}
3318 	}
3319 	kfree(addr);
3320 	pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3321 	return ret;
3322 }
3323 
3324 static int srp_parse_options(struct net *net, const char *buf,
3325 			     struct srp_target_port *target)
3326 {
3327 	char *options, *sep_opt;
3328 	char *p;
3329 	substring_t args[MAX_OPT_ARGS];
3330 	unsigned long long ull;
3331 	bool has_port;
3332 	int opt_mask = 0;
3333 	int token;
3334 	int ret = -EINVAL;
3335 	int i;
3336 
3337 	options = kstrdup(buf, GFP_KERNEL);
3338 	if (!options)
3339 		return -ENOMEM;
3340 
3341 	sep_opt = options;
3342 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3343 		if (!*p)
3344 			continue;
3345 
3346 		token = match_token(p, srp_opt_tokens, args);
3347 		opt_mask |= token;
3348 
3349 		switch (token) {
3350 		case SRP_OPT_ID_EXT:
3351 			p = match_strdup(args);
3352 			if (!p) {
3353 				ret = -ENOMEM;
3354 				goto out;
3355 			}
3356 			ret = kstrtoull(p, 16, &ull);
3357 			if (ret) {
3358 				pr_warn("invalid id_ext parameter '%s'\n", p);
3359 				kfree(p);
3360 				goto out;
3361 			}
3362 			target->id_ext = cpu_to_be64(ull);
3363 			kfree(p);
3364 			break;
3365 
3366 		case SRP_OPT_IOC_GUID:
3367 			p = match_strdup(args);
3368 			if (!p) {
3369 				ret = -ENOMEM;
3370 				goto out;
3371 			}
3372 			ret = kstrtoull(p, 16, &ull);
3373 			if (ret) {
3374 				pr_warn("invalid ioc_guid parameter '%s'\n", p);
3375 				kfree(p);
3376 				goto out;
3377 			}
3378 			target->ioc_guid = cpu_to_be64(ull);
3379 			kfree(p);
3380 			break;
3381 
3382 		case SRP_OPT_DGID:
3383 			p = match_strdup(args);
3384 			if (!p) {
3385 				ret = -ENOMEM;
3386 				goto out;
3387 			}
3388 			if (strlen(p) != 32) {
3389 				pr_warn("bad dest GID parameter '%s'\n", p);
3390 				kfree(p);
3391 				goto out;
3392 			}
3393 
3394 			ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3395 			kfree(p);
3396 			if (ret < 0)
3397 				goto out;
3398 			break;
3399 
3400 		case SRP_OPT_PKEY:
3401 			ret = match_hex(args, &token);
3402 			if (ret) {
3403 				pr_warn("bad P_Key parameter '%s'\n", p);
3404 				goto out;
3405 			}
3406 			target->ib_cm.pkey = cpu_to_be16(token);
3407 			break;
3408 
3409 		case SRP_OPT_SERVICE_ID:
3410 			p = match_strdup(args);
3411 			if (!p) {
3412 				ret = -ENOMEM;
3413 				goto out;
3414 			}
3415 			ret = kstrtoull(p, 16, &ull);
3416 			if (ret) {
3417 				pr_warn("bad service_id parameter '%s'\n", p);
3418 				kfree(p);
3419 				goto out;
3420 			}
3421 			target->ib_cm.service_id = cpu_to_be64(ull);
3422 			kfree(p);
3423 			break;
3424 
3425 		case SRP_OPT_IP_SRC:
3426 			p = match_strdup(args);
3427 			if (!p) {
3428 				ret = -ENOMEM;
3429 				goto out;
3430 			}
3431 			ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3432 					   NULL);
3433 			if (ret < 0) {
3434 				pr_warn("bad source parameter '%s'\n", p);
3435 				kfree(p);
3436 				goto out;
3437 			}
3438 			target->rdma_cm.src_specified = true;
3439 			kfree(p);
3440 			break;
3441 
3442 		case SRP_OPT_IP_DEST:
3443 			p = match_strdup(args);
3444 			if (!p) {
3445 				ret = -ENOMEM;
3446 				goto out;
3447 			}
3448 			ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3449 					   &has_port);
3450 			if (!has_port)
3451 				ret = -EINVAL;
3452 			if (ret < 0) {
3453 				pr_warn("bad dest parameter '%s'\n", p);
3454 				kfree(p);
3455 				goto out;
3456 			}
3457 			target->using_rdma_cm = true;
3458 			kfree(p);
3459 			break;
3460 
3461 		case SRP_OPT_MAX_SECT:
3462 			ret = match_int(args, &token);
3463 			if (ret) {
3464 				pr_warn("bad max sect parameter '%s'\n", p);
3465 				goto out;
3466 			}
3467 			target->scsi_host->max_sectors = token;
3468 			break;
3469 
3470 		case SRP_OPT_QUEUE_SIZE:
3471 			ret = match_int(args, &token);
3472 			if (ret) {
3473 				pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n",
3474 					p, ret);
3475 				goto out;
3476 			}
3477 			if (token < 1) {
3478 				pr_warn("bad queue_size parameter '%s'\n", p);
3479 				ret = -EINVAL;
3480 				goto out;
3481 			}
3482 			target->scsi_host->can_queue = token;
3483 			target->queue_size = token + SRP_RSP_SQ_SIZE +
3484 					     SRP_TSK_MGMT_SQ_SIZE;
3485 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3486 				target->scsi_host->cmd_per_lun = token;
3487 			break;
3488 
3489 		case SRP_OPT_MAX_CMD_PER_LUN:
3490 			ret = match_int(args, &token);
3491 			if (ret) {
3492 				pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n",
3493 					p, ret);
3494 				goto out;
3495 			}
3496 			if (token < 1) {
3497 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3498 					p);
3499 				ret = -EINVAL;
3500 				goto out;
3501 			}
3502 			target->scsi_host->cmd_per_lun = token;
3503 			break;
3504 
3505 		case SRP_OPT_TARGET_CAN_QUEUE:
3506 			ret = match_int(args, &token);
3507 			if (ret) {
3508 				pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n",
3509 					p, ret);
3510 				goto out;
3511 			}
3512 			if (token < 1) {
3513 				pr_warn("bad max target_can_queue parameter '%s'\n",
3514 					p);
3515 				ret = -EINVAL;
3516 				goto out;
3517 			}
3518 			target->target_can_queue = token;
3519 			break;
3520 
3521 		case SRP_OPT_IO_CLASS:
3522 			ret = match_hex(args, &token);
3523 			if (ret) {
3524 				pr_warn("bad IO class parameter '%s'\n", p);
3525 				goto out;
3526 			}
3527 			if (token != SRP_REV10_IB_IO_CLASS &&
3528 			    token != SRP_REV16A_IB_IO_CLASS) {
3529 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3530 					token, SRP_REV10_IB_IO_CLASS,
3531 					SRP_REV16A_IB_IO_CLASS);
3532 				ret = -EINVAL;
3533 				goto out;
3534 			}
3535 			target->io_class = token;
3536 			break;
3537 
3538 		case SRP_OPT_INITIATOR_EXT:
3539 			p = match_strdup(args);
3540 			if (!p) {
3541 				ret = -ENOMEM;
3542 				goto out;
3543 			}
3544 			ret = kstrtoull(p, 16, &ull);
3545 			if (ret) {
3546 				pr_warn("bad initiator_ext value '%s'\n", p);
3547 				kfree(p);
3548 				goto out;
3549 			}
3550 			target->initiator_ext = cpu_to_be64(ull);
3551 			kfree(p);
3552 			break;
3553 
3554 		case SRP_OPT_CMD_SG_ENTRIES:
3555 			ret = match_int(args, &token);
3556 			if (ret) {
3557 				pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n",
3558 					p, ret);
3559 				goto out;
3560 			}
3561 			if (token < 1 || token > 255) {
3562 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3563 					p);
3564 				ret = -EINVAL;
3565 				goto out;
3566 			}
3567 			target->cmd_sg_cnt = token;
3568 			break;
3569 
3570 		case SRP_OPT_ALLOW_EXT_SG:
3571 			ret = match_int(args, &token);
3572 			if (ret) {
3573 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3574 				goto out;
3575 			}
3576 			target->allow_ext_sg = !!token;
3577 			break;
3578 
3579 		case SRP_OPT_SG_TABLESIZE:
3580 			ret = match_int(args, &token);
3581 			if (ret) {
3582 				pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n",
3583 					p, ret);
3584 				goto out;
3585 			}
3586 			if (token < 1 || token > SG_MAX_SEGMENTS) {
3587 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3588 					p);
3589 				ret = -EINVAL;
3590 				goto out;
3591 			}
3592 			target->sg_tablesize = token;
3593 			break;
3594 
3595 		case SRP_OPT_COMP_VECTOR:
3596 			ret = match_int(args, &token);
3597 			if (ret) {
3598 				pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n",
3599 					p, ret);
3600 				goto out;
3601 			}
3602 			if (token < 0) {
3603 				pr_warn("bad comp_vector parameter '%s'\n", p);
3604 				ret = -EINVAL;
3605 				goto out;
3606 			}
3607 			target->comp_vector = token;
3608 			break;
3609 
3610 		case SRP_OPT_TL_RETRY_COUNT:
3611 			ret = match_int(args, &token);
3612 			if (ret) {
3613 				pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n",
3614 					p, ret);
3615 				goto out;
3616 			}
3617 			if (token < 2 || token > 7) {
3618 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3619 					p);
3620 				ret = -EINVAL;
3621 				goto out;
3622 			}
3623 			target->tl_retry_count = token;
3624 			break;
3625 
3626 		case SRP_OPT_MAX_IT_IU_SIZE:
3627 			ret = match_int(args, &token);
3628 			if (ret) {
3629 				pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n",
3630 					p, ret);
3631 				goto out;
3632 			}
3633 			if (token < 0) {
3634 				pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3635 				ret = -EINVAL;
3636 				goto out;
3637 			}
3638 			target->max_it_iu_size = token;
3639 			break;
3640 
3641 		case SRP_OPT_CH_COUNT:
3642 			ret = match_int(args, &token);
3643 			if (ret) {
3644 				pr_warn("match_int() failed for channel count parameter '%s', Error %d\n",
3645 					p, ret);
3646 				goto out;
3647 			}
3648 			if (token < 1) {
3649 				pr_warn("bad channel count %s\n", p);
3650 				ret = -EINVAL;
3651 				goto out;
3652 			}
3653 			target->ch_count = token;
3654 			break;
3655 
3656 		default:
3657 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3658 				p);
3659 			ret = -EINVAL;
3660 			goto out;
3661 		}
3662 	}
3663 
3664 	for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3665 		if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3666 			ret = 0;
3667 			break;
3668 		}
3669 	}
3670 	if (ret)
3671 		pr_warn("target creation request is missing one or more parameters\n");
3672 
3673 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3674 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3675 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3676 			target->scsi_host->cmd_per_lun,
3677 			target->scsi_host->can_queue);
3678 
3679 out:
3680 	kfree(options);
3681 	return ret;
3682 }
3683 
3684 static ssize_t add_target_store(struct device *dev,
3685 				struct device_attribute *attr, const char *buf,
3686 				size_t count)
3687 {
3688 	struct srp_host *host =
3689 		container_of(dev, struct srp_host, dev);
3690 	struct Scsi_Host *target_host;
3691 	struct srp_target_port *target;
3692 	struct srp_rdma_ch *ch;
3693 	struct srp_device *srp_dev = host->srp_dev;
3694 	struct ib_device *ibdev = srp_dev->dev;
3695 	int ret, i, ch_idx;
3696 	unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3697 	bool multich = false;
3698 	uint32_t max_iu_len;
3699 
3700 	target_host = scsi_host_alloc(&srp_template,
3701 				      sizeof (struct srp_target_port));
3702 	if (!target_host)
3703 		return -ENOMEM;
3704 
3705 	target_host->transportt  = ib_srp_transport_template;
3706 	target_host->max_channel = 0;
3707 	target_host->max_id      = 1;
3708 	target_host->max_lun     = -1LL;
3709 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3710 
3711 	if (ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
3712 		target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3713 	else
3714 		target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3715 
3716 	target = host_to_target(target_host);
3717 
3718 	target->net		= kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3719 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3720 	target->scsi_host	= target_host;
3721 	target->srp_host	= host;
3722 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
3723 	target->global_rkey	= host->srp_dev->global_rkey;
3724 	target->cmd_sg_cnt	= cmd_sg_entries;
3725 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3726 	target->allow_ext_sg	= allow_ext_sg;
3727 	target->tl_retry_count	= 7;
3728 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3729 
3730 	/*
3731 	 * Avoid that the SCSI host can be removed by srp_remove_target()
3732 	 * before this function returns.
3733 	 */
3734 	scsi_host_get(target->scsi_host);
3735 
3736 	ret = mutex_lock_interruptible(&host->add_target_mutex);
3737 	if (ret < 0)
3738 		goto put;
3739 
3740 	ret = srp_parse_options(target->net, buf, target);
3741 	if (ret)
3742 		goto out;
3743 
3744 	if (!srp_conn_unique(target->srp_host, target)) {
3745 		if (target->using_rdma_cm) {
3746 			shost_printk(KERN_INFO, target->scsi_host,
3747 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3748 				     be64_to_cpu(target->id_ext),
3749 				     be64_to_cpu(target->ioc_guid),
3750 				     &target->rdma_cm.dst);
3751 		} else {
3752 			shost_printk(KERN_INFO, target->scsi_host,
3753 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3754 				     be64_to_cpu(target->id_ext),
3755 				     be64_to_cpu(target->ioc_guid),
3756 				     be64_to_cpu(target->initiator_ext));
3757 		}
3758 		ret = -EEXIST;
3759 		goto out;
3760 	}
3761 
3762 	if (!srp_dev->has_fr && !target->allow_ext_sg &&
3763 	    target->cmd_sg_cnt < target->sg_tablesize) {
3764 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3765 		target->sg_tablesize = target->cmd_sg_cnt;
3766 	}
3767 
3768 	if (srp_dev->use_fast_reg) {
3769 		bool gaps_reg = ibdev->attrs.kernel_cap_flags &
3770 				 IBK_SG_GAPS_REG;
3771 
3772 		max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3773 				  (ilog2(srp_dev->mr_page_size) - 9);
3774 		if (!gaps_reg) {
3775 			/*
3776 			 * FR can only map one HCA page per entry. If the start
3777 			 * address is not aligned on a HCA page boundary two
3778 			 * entries will be used for the head and the tail
3779 			 * although these two entries combined contain at most
3780 			 * one HCA page of data. Hence the "+ 1" in the
3781 			 * calculation below.
3782 			 *
3783 			 * The indirect data buffer descriptor is contiguous
3784 			 * so the memory for that buffer will only be
3785 			 * registered if register_always is true. Hence add
3786 			 * one to mr_per_cmd if register_always has been set.
3787 			 */
3788 			mr_per_cmd = register_always +
3789 				(target->scsi_host->max_sectors + 1 +
3790 				 max_sectors_per_mr - 1) / max_sectors_per_mr;
3791 		} else {
3792 			mr_per_cmd = register_always +
3793 				(target->sg_tablesize +
3794 				 srp_dev->max_pages_per_mr - 1) /
3795 				srp_dev->max_pages_per_mr;
3796 		}
3797 		pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3798 			 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3799 			 max_sectors_per_mr, mr_per_cmd);
3800 	}
3801 
3802 	target_host->sg_tablesize = target->sg_tablesize;
3803 	target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3804 	target->mr_per_cmd = mr_per_cmd;
3805 	target->indirect_size = target->sg_tablesize *
3806 				sizeof (struct srp_direct_buf);
3807 	max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3808 				       srp_use_imm_data,
3809 				       target->max_it_iu_size);
3810 
3811 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3812 	INIT_WORK(&target->remove_work, srp_remove_work);
3813 	spin_lock_init(&target->lock);
3814 	ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3815 	if (ret)
3816 		goto out;
3817 
3818 	ret = -ENOMEM;
3819 	if (target->ch_count == 0) {
3820 		target->ch_count =
3821 			min(ch_count ?:
3822 				max(4 * num_online_nodes(),
3823 				    ibdev->num_comp_vectors),
3824 				num_online_cpus());
3825 	}
3826 
3827 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3828 			     GFP_KERNEL);
3829 	if (!target->ch)
3830 		goto out;
3831 
3832 	for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3833 		ch = &target->ch[ch_idx];
3834 		ch->target = target;
3835 		ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3836 		spin_lock_init(&ch->lock);
3837 		INIT_LIST_HEAD(&ch->free_tx);
3838 		ret = srp_new_cm_id(ch);
3839 		if (ret)
3840 			goto err_disconnect;
3841 
3842 		ret = srp_create_ch_ib(ch);
3843 		if (ret)
3844 			goto err_disconnect;
3845 
3846 		ret = srp_connect_ch(ch, max_iu_len, multich);
3847 		if (ret) {
3848 			char dst[64];
3849 
3850 			if (target->using_rdma_cm)
3851 				snprintf(dst, sizeof(dst), "%pIS",
3852 					&target->rdma_cm.dst);
3853 			else
3854 				snprintf(dst, sizeof(dst), "%pI6",
3855 					target->ib_cm.orig_dgid.raw);
3856 			shost_printk(KERN_ERR, target->scsi_host,
3857 				PFX "Connection %d/%d to %s failed\n",
3858 				ch_idx,
3859 				target->ch_count, dst);
3860 			if (ch_idx == 0) {
3861 				goto free_ch;
3862 			} else {
3863 				srp_free_ch_ib(target, ch);
3864 				target->ch_count = ch - target->ch;
3865 				goto connected;
3866 			}
3867 		}
3868 		multich = true;
3869 	}
3870 
3871 connected:
3872 	target->scsi_host->nr_hw_queues = target->ch_count;
3873 
3874 	ret = srp_add_target(host, target);
3875 	if (ret)
3876 		goto err_disconnect;
3877 
3878 	if (target->state != SRP_TARGET_REMOVED) {
3879 		if (target->using_rdma_cm) {
3880 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3881 				     "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3882 				     be64_to_cpu(target->id_ext),
3883 				     be64_to_cpu(target->ioc_guid),
3884 				     target->sgid.raw, &target->rdma_cm.dst);
3885 		} else {
3886 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3887 				     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3888 				     be64_to_cpu(target->id_ext),
3889 				     be64_to_cpu(target->ioc_guid),
3890 				     be16_to_cpu(target->ib_cm.pkey),
3891 				     be64_to_cpu(target->ib_cm.service_id),
3892 				     target->sgid.raw,
3893 				     target->ib_cm.orig_dgid.raw);
3894 		}
3895 	}
3896 
3897 	ret = count;
3898 
3899 out:
3900 	mutex_unlock(&host->add_target_mutex);
3901 
3902 put:
3903 	scsi_host_put(target->scsi_host);
3904 	if (ret < 0) {
3905 		/*
3906 		 * If a call to srp_remove_target() has not been scheduled,
3907 		 * drop the network namespace reference now that was obtained
3908 		 * earlier in this function.
3909 		 */
3910 		if (target->state != SRP_TARGET_REMOVED)
3911 			kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3912 		scsi_host_put(target->scsi_host);
3913 	}
3914 
3915 	return ret;
3916 
3917 err_disconnect:
3918 	srp_disconnect_target(target);
3919 
3920 free_ch:
3921 	for (i = 0; i < target->ch_count; i++) {
3922 		ch = &target->ch[i];
3923 		srp_free_ch_ib(target, ch);
3924 	}
3925 
3926 	kfree(target->ch);
3927 	goto out;
3928 }
3929 
3930 static DEVICE_ATTR_WO(add_target);
3931 
3932 static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
3933 			  char *buf)
3934 {
3935 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3936 
3937 	return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3938 }
3939 
3940 static DEVICE_ATTR_RO(ibdev);
3941 
3942 static ssize_t port_show(struct device *dev, struct device_attribute *attr,
3943 			 char *buf)
3944 {
3945 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3946 
3947 	return sysfs_emit(buf, "%u\n", host->port);
3948 }
3949 
3950 static DEVICE_ATTR_RO(port);
3951 
3952 static struct attribute *srp_class_attrs[] = {
3953 	&dev_attr_add_target.attr,
3954 	&dev_attr_ibdev.attr,
3955 	&dev_attr_port.attr,
3956 	NULL
3957 };
3958 
3959 static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
3960 {
3961 	struct srp_host *host;
3962 
3963 	host = kzalloc(sizeof *host, GFP_KERNEL);
3964 	if (!host)
3965 		return NULL;
3966 
3967 	INIT_LIST_HEAD(&host->target_list);
3968 	spin_lock_init(&host->target_lock);
3969 	mutex_init(&host->add_target_mutex);
3970 	host->srp_dev = device;
3971 	host->port = port;
3972 
3973 	device_initialize(&host->dev);
3974 	host->dev.class = &srp_class;
3975 	host->dev.parent = device->dev->dev.parent;
3976 	if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev),
3977 			 port))
3978 		goto put_host;
3979 	if (device_add(&host->dev))
3980 		goto put_host;
3981 
3982 	return host;
3983 
3984 put_host:
3985 	put_device(&host->dev);
3986 	return NULL;
3987 }
3988 
3989 static void srp_rename_dev(struct ib_device *device, void *client_data)
3990 {
3991 	struct srp_device *srp_dev = client_data;
3992 	struct srp_host *host, *tmp_host;
3993 
3994 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3995 		char name[IB_DEVICE_NAME_MAX + 8];
3996 
3997 		snprintf(name, sizeof(name), "srp-%s-%u",
3998 			 dev_name(&device->dev), host->port);
3999 		device_rename(&host->dev, name);
4000 	}
4001 }
4002 
4003 static int srp_add_one(struct ib_device *device)
4004 {
4005 	struct srp_device *srp_dev;
4006 	struct ib_device_attr *attr = &device->attrs;
4007 	struct srp_host *host;
4008 	int mr_page_shift;
4009 	u32 p;
4010 	u64 max_pages_per_mr;
4011 	unsigned int flags = 0;
4012 
4013 	srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
4014 	if (!srp_dev)
4015 		return -ENOMEM;
4016 
4017 	/*
4018 	 * Use the smallest page size supported by the HCA, down to a
4019 	 * minimum of 4096 bytes. We're unlikely to build large sglists
4020 	 * out of smaller entries.
4021 	 */
4022 	mr_page_shift		= max(12, ffs(attr->page_size_cap) - 1);
4023 	srp_dev->mr_page_size	= 1 << mr_page_shift;
4024 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
4025 	max_pages_per_mr	= attr->max_mr_size;
4026 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
4027 	pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
4028 		 attr->max_mr_size, srp_dev->mr_page_size,
4029 		 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
4030 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
4031 					  max_pages_per_mr);
4032 
4033 	srp_dev->has_fr = (attr->device_cap_flags &
4034 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
4035 	if (!never_register && !srp_dev->has_fr)
4036 		dev_warn(&device->dev, "FR is not supported\n");
4037 	else if (!never_register &&
4038 		 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
4039 		srp_dev->use_fast_reg = srp_dev->has_fr;
4040 
4041 	if (never_register || !register_always || !srp_dev->has_fr)
4042 		flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
4043 
4044 	if (srp_dev->use_fast_reg) {
4045 		srp_dev->max_pages_per_mr =
4046 			min_t(u32, srp_dev->max_pages_per_mr,
4047 			      attr->max_fast_reg_page_list_len);
4048 	}
4049 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
4050 				   srp_dev->max_pages_per_mr;
4051 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
4052 		 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
4053 		 attr->max_fast_reg_page_list_len,
4054 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
4055 
4056 	INIT_LIST_HEAD(&srp_dev->dev_list);
4057 
4058 	srp_dev->dev = device;
4059 	srp_dev->pd  = ib_alloc_pd(device, flags);
4060 	if (IS_ERR(srp_dev->pd)) {
4061 		int ret = PTR_ERR(srp_dev->pd);
4062 
4063 		kfree(srp_dev);
4064 		return ret;
4065 	}
4066 
4067 	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4068 		srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4069 		WARN_ON_ONCE(srp_dev->global_rkey == 0);
4070 	}
4071 
4072 	rdma_for_each_port (device, p) {
4073 		host = srp_add_port(srp_dev, p);
4074 		if (host)
4075 			list_add_tail(&host->list, &srp_dev->dev_list);
4076 	}
4077 
4078 	ib_set_client_data(device, &srp_client, srp_dev);
4079 	return 0;
4080 }
4081 
4082 static void srp_remove_one(struct ib_device *device, void *client_data)
4083 {
4084 	struct srp_device *srp_dev;
4085 	struct srp_host *host, *tmp_host;
4086 	struct srp_target_port *target;
4087 
4088 	srp_dev = client_data;
4089 
4090 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4091 		/*
4092 		 * Remove the add_target sysfs entry so that no new target ports
4093 		 * can be created.
4094 		 */
4095 		device_del(&host->dev);
4096 
4097 		/*
4098 		 * Remove all target ports.
4099 		 */
4100 		spin_lock(&host->target_lock);
4101 		list_for_each_entry(target, &host->target_list, list)
4102 			srp_queue_remove_work(target);
4103 		spin_unlock(&host->target_lock);
4104 
4105 		/*
4106 		 * srp_queue_remove_work() queues a call to
4107 		 * srp_remove_target(). The latter function cancels
4108 		 * target->tl_err_work so waiting for the remove works to
4109 		 * finish is sufficient.
4110 		 */
4111 		flush_workqueue(srp_remove_wq);
4112 
4113 		put_device(&host->dev);
4114 	}
4115 
4116 	ib_dealloc_pd(srp_dev->pd);
4117 
4118 	kfree(srp_dev);
4119 }
4120 
4121 static struct srp_function_template ib_srp_transport_functions = {
4122 	.has_rport_state	 = true,
4123 	.reset_timer_if_blocked	 = true,
4124 	.reconnect_delay	 = &srp_reconnect_delay,
4125 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
4126 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
4127 	.reconnect		 = srp_rport_reconnect,
4128 	.rport_delete		 = srp_rport_delete,
4129 	.terminate_rport_io	 = srp_terminate_io,
4130 };
4131 
4132 static int __init srp_init_module(void)
4133 {
4134 	int ret;
4135 
4136 	BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
4137 	BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4138 	BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4139 	BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
4140 	BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4141 	BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4142 	BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
4143 
4144 	if (srp_sg_tablesize) {
4145 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4146 		if (!cmd_sg_entries)
4147 			cmd_sg_entries = srp_sg_tablesize;
4148 	}
4149 
4150 	if (!cmd_sg_entries)
4151 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4152 
4153 	if (cmd_sg_entries > 255) {
4154 		pr_warn("Clamping cmd_sg_entries to 255\n");
4155 		cmd_sg_entries = 255;
4156 	}
4157 
4158 	if (!indirect_sg_entries)
4159 		indirect_sg_entries = cmd_sg_entries;
4160 	else if (indirect_sg_entries < cmd_sg_entries) {
4161 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4162 			cmd_sg_entries);
4163 		indirect_sg_entries = cmd_sg_entries;
4164 	}
4165 
4166 	if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4167 		pr_warn("Clamping indirect_sg_entries to %u\n",
4168 			SG_MAX_SEGMENTS);
4169 		indirect_sg_entries = SG_MAX_SEGMENTS;
4170 	}
4171 
4172 	srp_remove_wq = create_workqueue("srp_remove");
4173 	if (!srp_remove_wq) {
4174 		ret = -ENOMEM;
4175 		goto out;
4176 	}
4177 
4178 	ret = -ENOMEM;
4179 	ib_srp_transport_template =
4180 		srp_attach_transport(&ib_srp_transport_functions);
4181 	if (!ib_srp_transport_template)
4182 		goto destroy_wq;
4183 
4184 	ret = class_register(&srp_class);
4185 	if (ret) {
4186 		pr_err("couldn't register class infiniband_srp\n");
4187 		goto release_tr;
4188 	}
4189 
4190 	ib_sa_register_client(&srp_sa_client);
4191 
4192 	ret = ib_register_client(&srp_client);
4193 	if (ret) {
4194 		pr_err("couldn't register IB client\n");
4195 		goto unreg_sa;
4196 	}
4197 
4198 out:
4199 	return ret;
4200 
4201 unreg_sa:
4202 	ib_sa_unregister_client(&srp_sa_client);
4203 	class_unregister(&srp_class);
4204 
4205 release_tr:
4206 	srp_release_transport(ib_srp_transport_template);
4207 
4208 destroy_wq:
4209 	destroy_workqueue(srp_remove_wq);
4210 	goto out;
4211 }
4212 
4213 static void __exit srp_cleanup_module(void)
4214 {
4215 	ib_unregister_client(&srp_client);
4216 	ib_sa_unregister_client(&srp_sa_client);
4217 	class_unregister(&srp_class);
4218 	srp_release_transport(ib_srp_transport_template);
4219 	destroy_workqueue(srp_remove_wq);
4220 }
4221 
4222 module_init(srp_init_module);
4223 module_exit(srp_cleanup_module);
4224