xref: /linux/drivers/infiniband/ulp/srp/ib_srp.c (revision d30aca3eeffc18452e5cc5c4e59f1a4da2bd2f12)
1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/module.h>
36 #include <linux/hex.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/err.h>
40 #include <linux/string.h>
41 #include <linux/parser.h>
42 #include <linux/random.h>
43 #include <linux/jiffies.h>
44 #include <linux/lockdep.h>
45 #include <linux/inet.h>
46 #include <rdma/ib_cache.h>
47 
48 #include <linux/atomic.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_dbg.h>
53 #include <scsi/scsi_tcq.h>
54 #include <scsi/srp.h>
55 #include <scsi/scsi_transport_srp.h>
56 
57 #include "ib_srp.h"
58 
59 #define DRV_NAME	"ib_srp"
60 #define PFX		DRV_NAME ": "
61 
62 MODULE_AUTHOR("Roland Dreier");
63 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
64 MODULE_LICENSE("Dual BSD/GPL");
65 
66 static unsigned int srp_sg_tablesize;
67 static unsigned int cmd_sg_entries;
68 static unsigned int indirect_sg_entries;
69 static bool allow_ext_sg;
70 static bool register_always = true;
71 static bool never_register;
72 static int topspin_workarounds = 1;
73 
74 module_param(srp_sg_tablesize, uint, 0444);
75 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
76 
77 module_param(cmd_sg_entries, uint, 0444);
78 MODULE_PARM_DESC(cmd_sg_entries,
79 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
80 
81 module_param(indirect_sg_entries, uint, 0444);
82 MODULE_PARM_DESC(indirect_sg_entries,
83 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
84 
85 module_param(allow_ext_sg, bool, 0444);
86 MODULE_PARM_DESC(allow_ext_sg,
87 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
88 
89 module_param(topspin_workarounds, int, 0444);
90 MODULE_PARM_DESC(topspin_workarounds,
91 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
92 
93 module_param(register_always, bool, 0444);
94 MODULE_PARM_DESC(register_always,
95 		 "Use memory registration even for contiguous memory regions");
96 
97 module_param(never_register, bool, 0444);
98 MODULE_PARM_DESC(never_register, "Never register memory");
99 
100 static const struct kernel_param_ops srp_tmo_ops;
101 
102 static int srp_reconnect_delay = 10;
103 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
104 		S_IRUGO | S_IWUSR);
105 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
106 
107 static int srp_fast_io_fail_tmo = 15;
108 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
109 		S_IRUGO | S_IWUSR);
110 MODULE_PARM_DESC(fast_io_fail_tmo,
111 		 "Number of seconds between the observation of a transport"
112 		 " layer error and failing all I/O. \"off\" means that this"
113 		 " functionality is disabled.");
114 
115 static int srp_dev_loss_tmo = 600;
116 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
117 		S_IRUGO | S_IWUSR);
118 MODULE_PARM_DESC(dev_loss_tmo,
119 		 "Maximum number of seconds that the SRP transport should"
120 		 " insulate transport layer errors. After this time has been"
121 		 " exceeded the SCSI host is removed. Should be"
122 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
123 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
124 		 " this functionality is disabled.");
125 
126 static bool srp_use_imm_data = true;
127 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
128 MODULE_PARM_DESC(use_imm_data,
129 		 "Whether or not to request permission to use immediate data during SRP login.");
130 
131 static unsigned int srp_max_imm_data = 8 * 1024;
132 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
133 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
134 
135 static unsigned ch_count;
136 module_param(ch_count, uint, 0444);
137 MODULE_PARM_DESC(ch_count,
138 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
139 
140 static int srp_add_one(struct ib_device *device);
141 static void srp_remove_one(struct ib_device *device, void *client_data);
142 static void srp_rename_dev(struct ib_device *device, void *client_data);
143 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
144 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
145 		const char *opname);
146 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
147 			     const struct ib_cm_event *event);
148 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
149 			       struct rdma_cm_event *event);
150 
151 static struct scsi_transport_template *ib_srp_transport_template;
152 static struct workqueue_struct *srp_remove_wq;
153 
154 static struct ib_client srp_client = {
155 	.name   = "srp",
156 	.add    = srp_add_one,
157 	.remove = srp_remove_one,
158 	.rename = srp_rename_dev
159 };
160 
161 static struct ib_sa_client srp_sa_client;
162 
163 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
164 {
165 	int tmo = *(int *)kp->arg;
166 
167 	if (tmo >= 0)
168 		return sysfs_emit(buffer, "%d\n", tmo);
169 	else
170 		return sysfs_emit(buffer, "off\n");
171 }
172 
173 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
174 {
175 	int tmo, res;
176 
177 	res = srp_parse_tmo(&tmo, val);
178 	if (res)
179 		goto out;
180 
181 	if (kp->arg == &srp_reconnect_delay)
182 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
183 				    srp_dev_loss_tmo);
184 	else if (kp->arg == &srp_fast_io_fail_tmo)
185 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
186 	else
187 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
188 				    tmo);
189 	if (res)
190 		goto out;
191 	*(int *)kp->arg = tmo;
192 
193 out:
194 	return res;
195 }
196 
197 static const struct kernel_param_ops srp_tmo_ops = {
198 	.get = srp_tmo_get,
199 	.set = srp_tmo_set,
200 };
201 
202 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
203 {
204 	return (struct srp_target_port *) host->hostdata;
205 }
206 
207 static const char *srp_target_info(struct Scsi_Host *host)
208 {
209 	return host_to_target(host)->target_name;
210 }
211 
212 static int srp_target_is_topspin(struct srp_target_port *target)
213 {
214 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
215 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
216 
217 	return topspin_workarounds &&
218 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
219 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
220 }
221 
222 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
223 				   gfp_t gfp_mask,
224 				   enum dma_data_direction direction)
225 {
226 	struct srp_iu *iu;
227 
228 	iu = kmalloc(sizeof *iu, gfp_mask);
229 	if (!iu)
230 		goto out;
231 
232 	iu->buf = kzalloc(size, gfp_mask);
233 	if (!iu->buf)
234 		goto out_free_iu;
235 
236 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
237 				    direction);
238 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
239 		goto out_free_buf;
240 
241 	iu->size      = size;
242 	iu->direction = direction;
243 
244 	return iu;
245 
246 out_free_buf:
247 	kfree(iu->buf);
248 out_free_iu:
249 	kfree(iu);
250 out:
251 	return NULL;
252 }
253 
254 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
255 {
256 	if (!iu)
257 		return;
258 
259 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
260 			    iu->direction);
261 	kfree(iu->buf);
262 	kfree(iu);
263 }
264 
265 static void srp_qp_event(struct ib_event *event, void *context)
266 {
267 	pr_debug("QP event %s (%d)\n",
268 		 ib_event_msg(event->event), event->event);
269 }
270 
271 static int srp_init_ib_qp(struct srp_target_port *target,
272 			  struct ib_qp *qp)
273 {
274 	struct ib_qp_attr *attr;
275 	int ret;
276 
277 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
278 	if (!attr)
279 		return -ENOMEM;
280 
281 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
282 				  target->srp_host->port,
283 				  be16_to_cpu(target->ib_cm.pkey),
284 				  &attr->pkey_index);
285 	if (ret)
286 		goto out;
287 
288 	attr->qp_state        = IB_QPS_INIT;
289 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
290 				    IB_ACCESS_REMOTE_WRITE);
291 	attr->port_num        = target->srp_host->port;
292 
293 	ret = ib_modify_qp(qp, attr,
294 			   IB_QP_STATE		|
295 			   IB_QP_PKEY_INDEX	|
296 			   IB_QP_ACCESS_FLAGS	|
297 			   IB_QP_PORT);
298 
299 out:
300 	kfree(attr);
301 	return ret;
302 }
303 
304 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
305 {
306 	struct srp_target_port *target = ch->target;
307 	struct ib_cm_id *new_cm_id;
308 
309 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
310 				    srp_ib_cm_handler, ch);
311 	if (IS_ERR(new_cm_id))
312 		return PTR_ERR(new_cm_id);
313 
314 	if (ch->ib_cm.cm_id)
315 		ib_destroy_cm_id(ch->ib_cm.cm_id);
316 	ch->ib_cm.cm_id = new_cm_id;
317 	if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
318 			    target->srp_host->port))
319 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
320 	else
321 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
322 	ch->ib_cm.path.sgid = target->sgid;
323 	ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
324 	ch->ib_cm.path.pkey = target->ib_cm.pkey;
325 	ch->ib_cm.path.service_id = target->ib_cm.service_id;
326 
327 	return 0;
328 }
329 
330 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
331 {
332 	struct srp_target_port *target = ch->target;
333 	struct rdma_cm_id *new_cm_id;
334 	int ret;
335 
336 	new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
337 				   RDMA_PS_TCP, IB_QPT_RC);
338 	if (IS_ERR(new_cm_id)) {
339 		ret = PTR_ERR(new_cm_id);
340 		new_cm_id = NULL;
341 		goto out;
342 	}
343 
344 	init_completion(&ch->done);
345 	ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
346 				&target->rdma_cm.src.sa : NULL,
347 				&target->rdma_cm.dst.sa,
348 				SRP_PATH_REC_TIMEOUT_MS);
349 	if (ret) {
350 		pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
351 		       &target->rdma_cm.src, &target->rdma_cm.dst, ret);
352 		goto out;
353 	}
354 	ret = wait_for_completion_interruptible(&ch->done);
355 	if (ret < 0)
356 		goto out;
357 
358 	ret = ch->status;
359 	if (ret) {
360 		pr_err("Resolving address %pISpsc failed (%d)\n",
361 		       &target->rdma_cm.dst, ret);
362 		goto out;
363 	}
364 
365 	swap(ch->rdma_cm.cm_id, new_cm_id);
366 
367 out:
368 	if (new_cm_id)
369 		rdma_destroy_id(new_cm_id);
370 
371 	return ret;
372 }
373 
374 static int srp_new_cm_id(struct srp_rdma_ch *ch)
375 {
376 	struct srp_target_port *target = ch->target;
377 
378 	return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
379 		srp_new_ib_cm_id(ch);
380 }
381 
382 /**
383  * srp_destroy_fr_pool() - free the resources owned by a pool
384  * @pool: Fast registration pool to be destroyed.
385  */
386 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
387 {
388 	int i;
389 	struct srp_fr_desc *d;
390 
391 	if (!pool)
392 		return;
393 
394 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
395 		if (d->mr)
396 			ib_dereg_mr(d->mr);
397 	}
398 	kfree(pool);
399 }
400 
401 /**
402  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
403  * @device:            IB device to allocate fast registration descriptors for.
404  * @pd:                Protection domain associated with the FR descriptors.
405  * @pool_size:         Number of descriptors to allocate.
406  * @max_page_list_len: Maximum fast registration work request page list length.
407  */
408 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
409 					      struct ib_pd *pd, int pool_size,
410 					      int max_page_list_len)
411 {
412 	struct srp_fr_pool *pool;
413 	struct srp_fr_desc *d;
414 	struct ib_mr *mr;
415 	int i, ret = -EINVAL;
416 	enum ib_mr_type mr_type;
417 
418 	if (pool_size <= 0)
419 		goto err;
420 	ret = -ENOMEM;
421 	pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
422 	if (!pool)
423 		goto err;
424 	pool->size = pool_size;
425 	pool->max_page_list_len = max_page_list_len;
426 	spin_lock_init(&pool->lock);
427 	INIT_LIST_HEAD(&pool->free_list);
428 
429 	if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
430 		mr_type = IB_MR_TYPE_SG_GAPS;
431 	else
432 		mr_type = IB_MR_TYPE_MEM_REG;
433 
434 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
435 		mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
436 		if (IS_ERR(mr)) {
437 			ret = PTR_ERR(mr);
438 			if (ret == -ENOMEM)
439 				pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
440 					dev_name(&device->dev));
441 			goto destroy_pool;
442 		}
443 		d->mr = mr;
444 		list_add_tail(&d->entry, &pool->free_list);
445 	}
446 
447 out:
448 	return pool;
449 
450 destroy_pool:
451 	srp_destroy_fr_pool(pool);
452 
453 err:
454 	pool = ERR_PTR(ret);
455 	goto out;
456 }
457 
458 /**
459  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
460  * @pool: Pool to obtain descriptor from.
461  */
462 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
463 {
464 	struct srp_fr_desc *d = NULL;
465 	unsigned long flags;
466 
467 	spin_lock_irqsave(&pool->lock, flags);
468 	if (!list_empty(&pool->free_list)) {
469 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
470 		list_del(&d->entry);
471 	}
472 	spin_unlock_irqrestore(&pool->lock, flags);
473 
474 	return d;
475 }
476 
477 /**
478  * srp_fr_pool_put() - put an FR descriptor back in the free list
479  * @pool: Pool the descriptor was allocated from.
480  * @desc: Pointer to an array of fast registration descriptor pointers.
481  * @n:    Number of descriptors to put back.
482  *
483  * Note: The caller must already have queued an invalidation request for
484  * desc->mr->rkey before calling this function.
485  */
486 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
487 			    int n)
488 {
489 	unsigned long flags;
490 	int i;
491 
492 	spin_lock_irqsave(&pool->lock, flags);
493 	for (i = 0; i < n; i++)
494 		list_add(&desc[i]->entry, &pool->free_list);
495 	spin_unlock_irqrestore(&pool->lock, flags);
496 }
497 
498 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
499 {
500 	struct srp_device *dev = target->srp_host->srp_dev;
501 
502 	return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
503 				  dev->max_pages_per_mr);
504 }
505 
506 /**
507  * srp_destroy_qp() - destroy an RDMA queue pair
508  * @ch: SRP RDMA channel.
509  *
510  * Drain the qp before destroying it.  This avoids that the receive
511  * completion handler can access the queue pair while it is
512  * being destroyed.
513  */
514 static void srp_destroy_qp(struct srp_rdma_ch *ch)
515 {
516 	spin_lock_irq(&ch->lock);
517 	ib_process_cq_direct(ch->send_cq, -1);
518 	spin_unlock_irq(&ch->lock);
519 
520 	ib_drain_qp(ch->qp);
521 	ib_destroy_qp(ch->qp);
522 }
523 
524 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
525 {
526 	struct srp_target_port *target = ch->target;
527 	struct srp_device *dev = target->srp_host->srp_dev;
528 	const struct ib_device_attr *attr = &dev->dev->attrs;
529 	struct ib_qp_init_attr *init_attr;
530 	struct ib_cq *recv_cq, *send_cq;
531 	struct ib_qp *qp;
532 	struct srp_fr_pool *fr_pool = NULL;
533 	const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
534 	int ret;
535 
536 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
537 	if (!init_attr)
538 		return -ENOMEM;
539 
540 	/* queue_size + 1 for ib_drain_rq() */
541 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
542 				ch->comp_vector, IB_POLL_SOFTIRQ);
543 	if (IS_ERR(recv_cq)) {
544 		ret = PTR_ERR(recv_cq);
545 		goto err;
546 	}
547 
548 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
549 				ch->comp_vector, IB_POLL_DIRECT);
550 	if (IS_ERR(send_cq)) {
551 		ret = PTR_ERR(send_cq);
552 		goto err_recv_cq;
553 	}
554 
555 	init_attr->event_handler       = srp_qp_event;
556 	init_attr->cap.max_send_wr     = m * target->queue_size;
557 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
558 	init_attr->cap.max_recv_sge    = 1;
559 	init_attr->cap.max_send_sge    = min(SRP_MAX_SGE, attr->max_send_sge);
560 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
561 	init_attr->qp_type             = IB_QPT_RC;
562 	init_attr->send_cq             = send_cq;
563 	init_attr->recv_cq             = recv_cq;
564 
565 	ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
566 
567 	if (target->using_rdma_cm) {
568 		ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
569 		qp = ch->rdma_cm.cm_id->qp;
570 	} else {
571 		qp = ib_create_qp(dev->pd, init_attr);
572 		if (!IS_ERR(qp)) {
573 			ret = srp_init_ib_qp(target, qp);
574 			if (ret)
575 				ib_destroy_qp(qp);
576 		} else {
577 			ret = PTR_ERR(qp);
578 		}
579 	}
580 	if (ret) {
581 		pr_err("QP creation failed for dev %s: %d\n",
582 		       dev_name(&dev->dev->dev), ret);
583 		goto err_send_cq;
584 	}
585 
586 	if (dev->use_fast_reg) {
587 		fr_pool = srp_alloc_fr_pool(target);
588 		if (IS_ERR(fr_pool)) {
589 			ret = PTR_ERR(fr_pool);
590 			shost_printk(KERN_WARNING, target->scsi_host, PFX
591 				     "FR pool allocation failed (%d)\n", ret);
592 			goto err_qp;
593 		}
594 	}
595 
596 	if (ch->qp)
597 		srp_destroy_qp(ch);
598 	if (ch->recv_cq)
599 		ib_free_cq(ch->recv_cq);
600 	if (ch->send_cq)
601 		ib_free_cq(ch->send_cq);
602 
603 	ch->qp = qp;
604 	ch->recv_cq = recv_cq;
605 	ch->send_cq = send_cq;
606 
607 	if (dev->use_fast_reg) {
608 		if (ch->fr_pool)
609 			srp_destroy_fr_pool(ch->fr_pool);
610 		ch->fr_pool = fr_pool;
611 	}
612 
613 	kfree(init_attr);
614 	return 0;
615 
616 err_qp:
617 	if (target->using_rdma_cm)
618 		rdma_destroy_qp(ch->rdma_cm.cm_id);
619 	else
620 		ib_destroy_qp(qp);
621 
622 err_send_cq:
623 	ib_free_cq(send_cq);
624 
625 err_recv_cq:
626 	ib_free_cq(recv_cq);
627 
628 err:
629 	kfree(init_attr);
630 	return ret;
631 }
632 
633 /*
634  * Note: this function may be called without srp_alloc_iu_bufs() having been
635  * invoked. Hence the ch->[rt]x_ring checks.
636  */
637 static void srp_free_ch_ib(struct srp_target_port *target,
638 			   struct srp_rdma_ch *ch)
639 {
640 	struct srp_device *dev = target->srp_host->srp_dev;
641 	int i;
642 
643 	if (!ch->target)
644 		return;
645 
646 	if (target->using_rdma_cm) {
647 		if (ch->rdma_cm.cm_id) {
648 			rdma_destroy_id(ch->rdma_cm.cm_id);
649 			ch->rdma_cm.cm_id = NULL;
650 		}
651 	} else {
652 		if (ch->ib_cm.cm_id) {
653 			ib_destroy_cm_id(ch->ib_cm.cm_id);
654 			ch->ib_cm.cm_id = NULL;
655 		}
656 	}
657 
658 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
659 	if (!ch->qp)
660 		return;
661 
662 	if (dev->use_fast_reg) {
663 		if (ch->fr_pool)
664 			srp_destroy_fr_pool(ch->fr_pool);
665 	}
666 
667 	srp_destroy_qp(ch);
668 	ib_free_cq(ch->send_cq);
669 	ib_free_cq(ch->recv_cq);
670 
671 	/*
672 	 * Avoid that the SCSI error handler tries to use this channel after
673 	 * it has been freed. The SCSI error handler can namely continue
674 	 * trying to perform recovery actions after scsi_remove_host()
675 	 * returned.
676 	 */
677 	ch->target = NULL;
678 
679 	ch->qp = NULL;
680 	ch->send_cq = ch->recv_cq = NULL;
681 
682 	if (ch->rx_ring) {
683 		for (i = 0; i < target->queue_size; ++i)
684 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
685 		kfree(ch->rx_ring);
686 		ch->rx_ring = NULL;
687 	}
688 	if (ch->tx_ring) {
689 		for (i = 0; i < target->queue_size; ++i)
690 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
691 		kfree(ch->tx_ring);
692 		ch->tx_ring = NULL;
693 	}
694 }
695 
696 static void srp_path_rec_completion(int status,
697 				    struct sa_path_rec *pathrec,
698 				    unsigned int num_paths, void *ch_ptr)
699 {
700 	struct srp_rdma_ch *ch = ch_ptr;
701 	struct srp_target_port *target = ch->target;
702 
703 	ch->status = status;
704 	if (status)
705 		shost_printk(KERN_ERR, target->scsi_host,
706 			     PFX "Got failed path rec status %d\n", status);
707 	else
708 		ch->ib_cm.path = *pathrec;
709 	complete(&ch->done);
710 }
711 
712 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
713 {
714 	struct srp_target_port *target = ch->target;
715 	int ret;
716 
717 	ch->ib_cm.path.numb_path = 1;
718 
719 	init_completion(&ch->done);
720 
721 	ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
722 					       target->srp_host->srp_dev->dev,
723 					       target->srp_host->port,
724 					       &ch->ib_cm.path,
725 					       IB_SA_PATH_REC_SERVICE_ID |
726 					       IB_SA_PATH_REC_DGID	 |
727 					       IB_SA_PATH_REC_SGID	 |
728 					       IB_SA_PATH_REC_NUMB_PATH	 |
729 					       IB_SA_PATH_REC_PKEY,
730 					       SRP_PATH_REC_TIMEOUT_MS,
731 					       GFP_KERNEL,
732 					       srp_path_rec_completion,
733 					       ch, &ch->ib_cm.path_query);
734 	if (ch->ib_cm.path_query_id < 0)
735 		return ch->ib_cm.path_query_id;
736 
737 	ret = wait_for_completion_interruptible(&ch->done);
738 	if (ret < 0)
739 		return ret;
740 
741 	if (ch->status < 0)
742 		shost_printk(KERN_WARNING, target->scsi_host,
743 			     PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
744 			     ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
745 			     be16_to_cpu(target->ib_cm.pkey),
746 			     be64_to_cpu(target->ib_cm.service_id));
747 
748 	return ch->status;
749 }
750 
751 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
752 {
753 	struct srp_target_port *target = ch->target;
754 	int ret;
755 
756 	init_completion(&ch->done);
757 
758 	ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
759 	if (ret)
760 		return ret;
761 
762 	wait_for_completion_interruptible(&ch->done);
763 
764 	if (ch->status != 0)
765 		shost_printk(KERN_WARNING, target->scsi_host,
766 			     PFX "Path resolution failed\n");
767 
768 	return ch->status;
769 }
770 
771 static int srp_lookup_path(struct srp_rdma_ch *ch)
772 {
773 	struct srp_target_port *target = ch->target;
774 
775 	return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
776 		srp_ib_lookup_path(ch);
777 }
778 
779 static u8 srp_get_subnet_timeout(struct srp_host *host)
780 {
781 	struct ib_port_attr attr;
782 	int ret;
783 	u8 subnet_timeout = 18;
784 
785 	ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
786 	if (ret == 0)
787 		subnet_timeout = attr.subnet_timeout;
788 
789 	if (unlikely(subnet_timeout < 15))
790 		pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
791 			dev_name(&host->srp_dev->dev->dev), subnet_timeout);
792 
793 	return subnet_timeout;
794 }
795 
796 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
797 			bool multich)
798 {
799 	struct srp_target_port *target = ch->target;
800 	struct {
801 		struct rdma_conn_param	  rdma_param;
802 		struct srp_login_req_rdma rdma_req;
803 		struct ib_cm_req_param	  ib_param;
804 		struct srp_login_req	  ib_req;
805 	} *req = NULL;
806 	char *ipi, *tpi;
807 	int status;
808 
809 	req = kzalloc(sizeof *req, GFP_KERNEL);
810 	if (!req)
811 		return -ENOMEM;
812 
813 	req->ib_param.flow_control = 1;
814 	req->ib_param.retry_count = target->tl_retry_count;
815 
816 	/*
817 	 * Pick some arbitrary defaults here; we could make these
818 	 * module parameters if anyone cared about setting them.
819 	 */
820 	req->ib_param.responder_resources = 4;
821 	req->ib_param.rnr_retry_count = 7;
822 	req->ib_param.max_cm_retries = 15;
823 
824 	req->ib_req.opcode = SRP_LOGIN_REQ;
825 	req->ib_req.tag = 0;
826 	req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
827 	req->ib_req.req_buf_fmt	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
828 					      SRP_BUF_FORMAT_INDIRECT);
829 	req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
830 				 SRP_MULTICHAN_SINGLE);
831 	if (srp_use_imm_data) {
832 		req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
833 		req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
834 	}
835 
836 	if (target->using_rdma_cm) {
837 		req->rdma_param.flow_control = req->ib_param.flow_control;
838 		req->rdma_param.responder_resources =
839 			req->ib_param.responder_resources;
840 		req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
841 		req->rdma_param.retry_count = req->ib_param.retry_count;
842 		req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
843 		req->rdma_param.private_data = &req->rdma_req;
844 		req->rdma_param.private_data_len = sizeof(req->rdma_req);
845 
846 		req->rdma_req.opcode = req->ib_req.opcode;
847 		req->rdma_req.tag = req->ib_req.tag;
848 		req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
849 		req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
850 		req->rdma_req.req_flags	= req->ib_req.req_flags;
851 		req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
852 
853 		ipi = req->rdma_req.initiator_port_id;
854 		tpi = req->rdma_req.target_port_id;
855 	} else {
856 		u8 subnet_timeout;
857 
858 		subnet_timeout = srp_get_subnet_timeout(target->srp_host);
859 
860 		req->ib_param.primary_path = &ch->ib_cm.path;
861 		req->ib_param.alternate_path = NULL;
862 		req->ib_param.service_id = target->ib_cm.service_id;
863 		get_random_bytes(&req->ib_param.starting_psn, 4);
864 		req->ib_param.starting_psn &= 0xffffff;
865 		req->ib_param.qp_num = ch->qp->qp_num;
866 		req->ib_param.qp_type = ch->qp->qp_type;
867 		req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
868 		req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
869 		req->ib_param.private_data = &req->ib_req;
870 		req->ib_param.private_data_len = sizeof(req->ib_req);
871 
872 		ipi = req->ib_req.initiator_port_id;
873 		tpi = req->ib_req.target_port_id;
874 	}
875 
876 	/*
877 	 * In the published SRP specification (draft rev. 16a), the
878 	 * port identifier format is 8 bytes of ID extension followed
879 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
880 	 * opposite order, so that the GUID comes first.
881 	 *
882 	 * Targets conforming to these obsolete drafts can be
883 	 * recognized by the I/O Class they report.
884 	 */
885 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
886 		memcpy(ipi,     &target->sgid.global.interface_id, 8);
887 		memcpy(ipi + 8, &target->initiator_ext, 8);
888 		memcpy(tpi,     &target->ioc_guid, 8);
889 		memcpy(tpi + 8, &target->id_ext, 8);
890 	} else {
891 		memcpy(ipi,     &target->initiator_ext, 8);
892 		memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
893 		memcpy(tpi,     &target->id_ext, 8);
894 		memcpy(tpi + 8, &target->ioc_guid, 8);
895 	}
896 
897 	/*
898 	 * Topspin/Cisco SRP targets will reject our login unless we
899 	 * zero out the first 8 bytes of our initiator port ID and set
900 	 * the second 8 bytes to the local node GUID.
901 	 */
902 	if (srp_target_is_topspin(target)) {
903 		shost_printk(KERN_DEBUG, target->scsi_host,
904 			     PFX "Topspin/Cisco initiator port ID workaround "
905 			     "activated for target GUID %016llx\n",
906 			     be64_to_cpu(target->ioc_guid));
907 		memset(ipi, 0, 8);
908 		memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
909 	}
910 
911 	if (target->using_rdma_cm)
912 		status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
913 	else
914 		status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
915 
916 	kfree(req);
917 
918 	return status;
919 }
920 
921 static bool srp_queue_remove_work(struct srp_target_port *target)
922 {
923 	bool changed = false;
924 
925 	spin_lock_irq(&target->lock);
926 	if (target->state != SRP_TARGET_REMOVED) {
927 		target->state = SRP_TARGET_REMOVED;
928 		changed = true;
929 	}
930 	spin_unlock_irq(&target->lock);
931 
932 	if (changed)
933 		queue_work(srp_remove_wq, &target->remove_work);
934 
935 	return changed;
936 }
937 
938 static void srp_disconnect_target(struct srp_target_port *target)
939 {
940 	struct srp_rdma_ch *ch;
941 	int i, ret;
942 
943 	/* XXX should send SRP_I_LOGOUT request */
944 
945 	for (i = 0; i < target->ch_count; i++) {
946 		ch = &target->ch[i];
947 		ch->connected = false;
948 		ret = 0;
949 		if (target->using_rdma_cm) {
950 			if (ch->rdma_cm.cm_id)
951 				rdma_disconnect(ch->rdma_cm.cm_id);
952 		} else {
953 			if (ch->ib_cm.cm_id)
954 				ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
955 						      NULL, 0);
956 		}
957 		if (ret < 0) {
958 			shost_printk(KERN_DEBUG, target->scsi_host,
959 				     PFX "Sending CM DREQ failed\n");
960 		}
961 	}
962 }
963 
964 static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
965 {
966 	struct srp_target_port *target = host_to_target(shost);
967 	struct srp_device *dev = target->srp_host->srp_dev;
968 	struct ib_device *ibdev = dev->dev;
969 	struct srp_request *req = scsi_cmd_priv(cmd);
970 
971 	kfree(req->fr_list);
972 	if (req->indirect_dma_addr) {
973 		ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
974 				    target->indirect_size,
975 				    DMA_TO_DEVICE);
976 	}
977 	kfree(req->indirect_desc);
978 
979 	return 0;
980 }
981 
982 static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
983 {
984 	struct srp_target_port *target = host_to_target(shost);
985 	struct srp_device *srp_dev = target->srp_host->srp_dev;
986 	struct ib_device *ibdev = srp_dev->dev;
987 	struct srp_request *req = scsi_cmd_priv(cmd);
988 	dma_addr_t dma_addr;
989 	int ret = -ENOMEM;
990 
991 	if (srp_dev->use_fast_reg) {
992 		req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
993 					GFP_KERNEL);
994 		if (!req->fr_list)
995 			goto out;
996 	}
997 	req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
998 	if (!req->indirect_desc)
999 		goto out;
1000 
1001 	dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1002 				     target->indirect_size,
1003 				     DMA_TO_DEVICE);
1004 	if (ib_dma_mapping_error(ibdev, dma_addr)) {
1005 		srp_exit_cmd_priv(shost, cmd);
1006 		goto out;
1007 	}
1008 
1009 	req->indirect_dma_addr = dma_addr;
1010 	ret = 0;
1011 
1012 out:
1013 	return ret;
1014 }
1015 
1016 /**
1017  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1018  * @shost: SCSI host whose attributes to remove from sysfs.
1019  *
1020  * Note: Any attributes defined in the host template and that did not exist
1021  * before invocation of this function will be ignored.
1022  */
1023 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1024 {
1025 	const struct attribute_group **g;
1026 	struct attribute **attr;
1027 
1028 	for (g = shost->hostt->shost_groups; *g; ++g) {
1029 		for (attr = (*g)->attrs; *attr; ++attr) {
1030 			struct device_attribute *dev_attr =
1031 				container_of(*attr, typeof(*dev_attr), attr);
1032 
1033 			device_remove_file(&shost->shost_dev, dev_attr);
1034 		}
1035 	}
1036 }
1037 
1038 static void srp_remove_target(struct srp_target_port *target)
1039 {
1040 	struct srp_rdma_ch *ch;
1041 	int i;
1042 
1043 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1044 
1045 	srp_del_scsi_host_attr(target->scsi_host);
1046 	srp_rport_get(target->rport);
1047 	srp_remove_host(target->scsi_host);
1048 	scsi_remove_host(target->scsi_host);
1049 	srp_stop_rport_timers(target->rport);
1050 	srp_disconnect_target(target);
1051 	kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1052 	for (i = 0; i < target->ch_count; i++) {
1053 		ch = &target->ch[i];
1054 		srp_free_ch_ib(target, ch);
1055 	}
1056 	cancel_work_sync(&target->tl_err_work);
1057 	srp_rport_put(target->rport);
1058 	kfree(target->ch);
1059 	target->ch = NULL;
1060 
1061 	spin_lock(&target->srp_host->target_lock);
1062 	list_del(&target->list);
1063 	spin_unlock(&target->srp_host->target_lock);
1064 
1065 	scsi_host_put(target->scsi_host);
1066 }
1067 
1068 static void srp_remove_work(struct work_struct *work)
1069 {
1070 	struct srp_target_port *target =
1071 		container_of(work, struct srp_target_port, remove_work);
1072 
1073 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1074 
1075 	srp_remove_target(target);
1076 }
1077 
1078 static void srp_rport_delete(struct srp_rport *rport)
1079 {
1080 	struct srp_target_port *target = rport->lld_data;
1081 
1082 	srp_queue_remove_work(target);
1083 }
1084 
1085 /**
1086  * srp_connected_ch() - number of connected channels
1087  * @target: SRP target port.
1088  */
1089 static int srp_connected_ch(struct srp_target_port *target)
1090 {
1091 	int i, c = 0;
1092 
1093 	for (i = 0; i < target->ch_count; i++)
1094 		c += target->ch[i].connected;
1095 
1096 	return c;
1097 }
1098 
1099 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1100 			  bool multich)
1101 {
1102 	struct srp_target_port *target = ch->target;
1103 	int ret;
1104 
1105 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1106 
1107 	ret = srp_lookup_path(ch);
1108 	if (ret)
1109 		goto out;
1110 
1111 	while (1) {
1112 		init_completion(&ch->done);
1113 		ret = srp_send_req(ch, max_iu_len, multich);
1114 		if (ret)
1115 			goto out;
1116 		ret = wait_for_completion_interruptible(&ch->done);
1117 		if (ret < 0)
1118 			goto out;
1119 
1120 		/*
1121 		 * The CM event handling code will set status to
1122 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
1123 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1124 		 * redirect REJ back.
1125 		 */
1126 		ret = ch->status;
1127 		switch (ret) {
1128 		case 0:
1129 			ch->connected = true;
1130 			goto out;
1131 
1132 		case SRP_PORT_REDIRECT:
1133 			ret = srp_lookup_path(ch);
1134 			if (ret)
1135 				goto out;
1136 			break;
1137 
1138 		case SRP_DLID_REDIRECT:
1139 			break;
1140 
1141 		case SRP_STALE_CONN:
1142 			shost_printk(KERN_ERR, target->scsi_host, PFX
1143 				     "giving up on stale connection\n");
1144 			ret = -ECONNRESET;
1145 			goto out;
1146 
1147 		default:
1148 			goto out;
1149 		}
1150 	}
1151 
1152 out:
1153 	return ret <= 0 ? ret : -ENODEV;
1154 }
1155 
1156 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1157 {
1158 	srp_handle_qp_err(cq, wc, "INV RKEY");
1159 }
1160 
1161 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1162 		u32 rkey)
1163 {
1164 	struct ib_send_wr wr = {
1165 		.opcode		    = IB_WR_LOCAL_INV,
1166 		.next		    = NULL,
1167 		.num_sge	    = 0,
1168 		.send_flags	    = 0,
1169 		.ex.invalidate_rkey = rkey,
1170 	};
1171 
1172 	wr.wr_cqe = &req->reg_cqe;
1173 	req->reg_cqe.done = srp_inv_rkey_err_done;
1174 	return ib_post_send(ch->qp, &wr, NULL);
1175 }
1176 
1177 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1178 			   struct srp_rdma_ch *ch,
1179 			   struct srp_request *req)
1180 {
1181 	struct srp_target_port *target = ch->target;
1182 	struct srp_device *dev = target->srp_host->srp_dev;
1183 	struct ib_device *ibdev = dev->dev;
1184 	int i, res;
1185 
1186 	if (!scsi_sglist(scmnd) ||
1187 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1188 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1189 		return;
1190 
1191 	if (dev->use_fast_reg) {
1192 		struct srp_fr_desc **pfr;
1193 
1194 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1195 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1196 			if (res < 0) {
1197 				shost_printk(KERN_ERR, target->scsi_host, PFX
1198 				  "Queueing INV WR for rkey %#x failed (%d)\n",
1199 				  (*pfr)->mr->rkey, res);
1200 				queue_work(system_long_wq,
1201 					   &target->tl_err_work);
1202 			}
1203 		}
1204 		if (req->nmdesc)
1205 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
1206 					req->nmdesc);
1207 	}
1208 
1209 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1210 			scmnd->sc_data_direction);
1211 }
1212 
1213 /**
1214  * srp_claim_req - Take ownership of the scmnd associated with a request.
1215  * @ch: SRP RDMA channel.
1216  * @req: SRP request.
1217  * @sdev: If not NULL, only take ownership for this SCSI device.
1218  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1219  *         ownership of @req->scmnd if it equals @scmnd.
1220  *
1221  * Return value:
1222  * Either NULL or a pointer to the SCSI command the caller became owner of.
1223  */
1224 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1225 				       struct srp_request *req,
1226 				       struct scsi_device *sdev,
1227 				       struct scsi_cmnd *scmnd)
1228 {
1229 	unsigned long flags;
1230 
1231 	spin_lock_irqsave(&ch->lock, flags);
1232 	if (req->scmnd &&
1233 	    (!sdev || req->scmnd->device == sdev) &&
1234 	    (!scmnd || req->scmnd == scmnd)) {
1235 		scmnd = req->scmnd;
1236 		req->scmnd = NULL;
1237 	} else {
1238 		scmnd = NULL;
1239 	}
1240 	spin_unlock_irqrestore(&ch->lock, flags);
1241 
1242 	return scmnd;
1243 }
1244 
1245 /**
1246  * srp_free_req() - Unmap data and adjust ch->req_lim.
1247  * @ch:     SRP RDMA channel.
1248  * @req:    Request to be freed.
1249  * @scmnd:  SCSI command associated with @req.
1250  * @req_lim_delta: Amount to be added to @target->req_lim.
1251  */
1252 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1253 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1254 {
1255 	unsigned long flags;
1256 
1257 	srp_unmap_data(scmnd, ch, req);
1258 
1259 	spin_lock_irqsave(&ch->lock, flags);
1260 	ch->req_lim += req_lim_delta;
1261 	spin_unlock_irqrestore(&ch->lock, flags);
1262 }
1263 
1264 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1265 			   struct scsi_device *sdev, int result)
1266 {
1267 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1268 
1269 	if (scmnd) {
1270 		srp_free_req(ch, req, scmnd, 0);
1271 		scmnd->result = result;
1272 		scsi_done(scmnd);
1273 	}
1274 }
1275 
1276 struct srp_terminate_context {
1277 	struct srp_target_port *srp_target;
1278 	int scsi_result;
1279 };
1280 
1281 static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
1282 {
1283 	struct srp_terminate_context *context = context_ptr;
1284 	struct srp_target_port *target = context->srp_target;
1285 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
1286 	struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1287 	struct srp_request *req = scsi_cmd_priv(scmnd);
1288 
1289 	srp_finish_req(ch, req, NULL, context->scsi_result);
1290 
1291 	return true;
1292 }
1293 
1294 static void srp_terminate_io(struct srp_rport *rport)
1295 {
1296 	struct srp_target_port *target = rport->lld_data;
1297 	struct srp_terminate_context context = { .srp_target = target,
1298 		.scsi_result = DID_TRANSPORT_FAILFAST << 16 };
1299 
1300 	scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
1301 }
1302 
1303 /* Calculate maximum initiator to target information unit length. */
1304 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1305 				  uint32_t max_it_iu_size)
1306 {
1307 	uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1308 		sizeof(struct srp_indirect_buf) +
1309 		cmd_sg_cnt * sizeof(struct srp_direct_buf);
1310 
1311 	if (use_imm_data)
1312 		max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1313 				 srp_max_imm_data);
1314 
1315 	if (max_it_iu_size)
1316 		max_iu_len = min(max_iu_len, max_it_iu_size);
1317 
1318 	pr_debug("max_iu_len = %d\n", max_iu_len);
1319 
1320 	return max_iu_len;
1321 }
1322 
1323 /*
1324  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1325  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1326  * srp_reset_device() or srp_reset_host() calls will occur while this function
1327  * is in progress. One way to realize that is not to call this function
1328  * directly but to call srp_reconnect_rport() instead since that last function
1329  * serializes calls of this function via rport->mutex and also blocks
1330  * srp_queuecommand() calls before invoking this function.
1331  */
1332 static int srp_rport_reconnect(struct srp_rport *rport)
1333 {
1334 	struct srp_target_port *target = rport->lld_data;
1335 	struct srp_rdma_ch *ch;
1336 	uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1337 						srp_use_imm_data,
1338 						target->max_it_iu_size);
1339 	int i, j, ret = 0;
1340 	bool multich = false;
1341 
1342 	srp_disconnect_target(target);
1343 
1344 	if (target->state == SRP_TARGET_SCANNING)
1345 		return -ENODEV;
1346 
1347 	/*
1348 	 * Now get a new local CM ID so that we avoid confusing the target in
1349 	 * case things are really fouled up. Doing so also ensures that all CM
1350 	 * callbacks will have finished before a new QP is allocated.
1351 	 */
1352 	for (i = 0; i < target->ch_count; i++) {
1353 		ch = &target->ch[i];
1354 		ret += srp_new_cm_id(ch);
1355 	}
1356 	{
1357 		struct srp_terminate_context context = {
1358 			.srp_target = target, .scsi_result = DID_RESET << 16};
1359 
1360 		scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
1361 				    &context);
1362 	}
1363 	for (i = 0; i < target->ch_count; i++) {
1364 		ch = &target->ch[i];
1365 		/*
1366 		 * Whether or not creating a new CM ID succeeded, create a new
1367 		 * QP. This guarantees that all completion callback function
1368 		 * invocations have finished before request resetting starts.
1369 		 */
1370 		ret += srp_create_ch_ib(ch);
1371 
1372 		INIT_LIST_HEAD(&ch->free_tx);
1373 		for (j = 0; j < target->queue_size; ++j)
1374 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1375 	}
1376 
1377 	target->qp_in_error = false;
1378 
1379 	for (i = 0; i < target->ch_count; i++) {
1380 		ch = &target->ch[i];
1381 		if (ret)
1382 			break;
1383 		ret = srp_connect_ch(ch, max_iu_len, multich);
1384 		multich = true;
1385 	}
1386 
1387 	if (ret == 0)
1388 		shost_printk(KERN_INFO, target->scsi_host,
1389 			     PFX "reconnect succeeded\n");
1390 
1391 	return ret;
1392 }
1393 
1394 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1395 			 unsigned int dma_len, u32 rkey)
1396 {
1397 	struct srp_direct_buf *desc = state->desc;
1398 
1399 	WARN_ON_ONCE(!dma_len);
1400 
1401 	desc->va = cpu_to_be64(dma_addr);
1402 	desc->key = cpu_to_be32(rkey);
1403 	desc->len = cpu_to_be32(dma_len);
1404 
1405 	state->total_len += dma_len;
1406 	state->desc++;
1407 	state->ndesc++;
1408 }
1409 
1410 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1411 {
1412 	srp_handle_qp_err(cq, wc, "FAST REG");
1413 }
1414 
1415 /*
1416  * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1417  * where to start in the first element. If sg_offset_p != NULL then
1418  * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1419  * byte that has not yet been mapped.
1420  */
1421 static int srp_map_finish_fr(struct srp_map_state *state,
1422 			     struct srp_request *req,
1423 			     struct srp_rdma_ch *ch, int sg_nents,
1424 			     unsigned int *sg_offset_p)
1425 {
1426 	struct srp_target_port *target = ch->target;
1427 	struct srp_device *dev = target->srp_host->srp_dev;
1428 	struct ib_reg_wr wr;
1429 	struct srp_fr_desc *desc;
1430 	u32 rkey;
1431 	int n, err;
1432 
1433 	if (state->fr.next >= state->fr.end) {
1434 		shost_printk(KERN_ERR, ch->target->scsi_host,
1435 			     PFX "Out of MRs (mr_per_cmd = %d)\n",
1436 			     ch->target->mr_per_cmd);
1437 		return -ENOMEM;
1438 	}
1439 
1440 	WARN_ON_ONCE(!dev->use_fast_reg);
1441 
1442 	if (sg_nents == 1 && target->global_rkey) {
1443 		unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1444 
1445 		srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1446 			     sg_dma_len(state->sg) - sg_offset,
1447 			     target->global_rkey);
1448 		if (sg_offset_p)
1449 			*sg_offset_p = 0;
1450 		return 1;
1451 	}
1452 
1453 	desc = srp_fr_pool_get(ch->fr_pool);
1454 	if (!desc)
1455 		return -ENOMEM;
1456 
1457 	rkey = ib_inc_rkey(desc->mr->rkey);
1458 	ib_update_fast_reg_key(desc->mr, rkey);
1459 
1460 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1461 			 dev->mr_page_size);
1462 	if (unlikely(n < 0)) {
1463 		srp_fr_pool_put(ch->fr_pool, &desc, 1);
1464 		pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1465 			 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1466 			 sg_offset_p ? *sg_offset_p : -1, n);
1467 		return n;
1468 	}
1469 
1470 	WARN_ON_ONCE(desc->mr->length == 0);
1471 
1472 	req->reg_cqe.done = srp_reg_mr_err_done;
1473 
1474 	wr.wr.next = NULL;
1475 	wr.wr.opcode = IB_WR_REG_MR;
1476 	wr.wr.wr_cqe = &req->reg_cqe;
1477 	wr.wr.num_sge = 0;
1478 	wr.wr.send_flags = 0;
1479 	wr.mr = desc->mr;
1480 	wr.key = desc->mr->rkey;
1481 	wr.access = (IB_ACCESS_LOCAL_WRITE |
1482 		     IB_ACCESS_REMOTE_READ |
1483 		     IB_ACCESS_REMOTE_WRITE);
1484 
1485 	*state->fr.next++ = desc;
1486 	state->nmdesc++;
1487 
1488 	srp_map_desc(state, desc->mr->iova,
1489 		     desc->mr->length, desc->mr->rkey);
1490 
1491 	err = ib_post_send(ch->qp, &wr.wr, NULL);
1492 	if (unlikely(err)) {
1493 		WARN_ON_ONCE(err == -ENOMEM);
1494 		return err;
1495 	}
1496 
1497 	return n;
1498 }
1499 
1500 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1501 			 struct srp_request *req, struct scatterlist *scat,
1502 			 int count)
1503 {
1504 	unsigned int sg_offset = 0;
1505 
1506 	state->fr.next = req->fr_list;
1507 	state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1508 	state->sg = scat;
1509 
1510 	if (count == 0)
1511 		return 0;
1512 
1513 	while (count) {
1514 		int i, n;
1515 
1516 		n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1517 		if (unlikely(n < 0))
1518 			return n;
1519 
1520 		count -= n;
1521 		for (i = 0; i < n; i++)
1522 			state->sg = sg_next(state->sg);
1523 	}
1524 
1525 	return 0;
1526 }
1527 
1528 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1529 			  struct srp_request *req, struct scatterlist *scat,
1530 			  int count)
1531 {
1532 	struct srp_target_port *target = ch->target;
1533 	struct scatterlist *sg;
1534 	int i;
1535 
1536 	for_each_sg(scat, sg, count, i) {
1537 		srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1538 			     target->global_rkey);
1539 	}
1540 
1541 	return 0;
1542 }
1543 
1544 /*
1545  * Register the indirect data buffer descriptor with the HCA.
1546  *
1547  * Note: since the indirect data buffer descriptor has been allocated with
1548  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1549  * memory buffer.
1550  */
1551 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1552 		       void **next_mr, void **end_mr, u32 idb_len,
1553 		       __be32 *idb_rkey)
1554 {
1555 	struct srp_target_port *target = ch->target;
1556 	struct srp_device *dev = target->srp_host->srp_dev;
1557 	struct srp_map_state state;
1558 	struct srp_direct_buf idb_desc;
1559 	struct scatterlist idb_sg[1];
1560 	int ret;
1561 
1562 	memset(&state, 0, sizeof(state));
1563 	memset(&idb_desc, 0, sizeof(idb_desc));
1564 	state.gen.next = next_mr;
1565 	state.gen.end = end_mr;
1566 	state.desc = &idb_desc;
1567 	state.base_dma_addr = req->indirect_dma_addr;
1568 	state.dma_len = idb_len;
1569 
1570 	if (dev->use_fast_reg) {
1571 		state.sg = idb_sg;
1572 		sg_init_one(idb_sg, req->indirect_desc, idb_len);
1573 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1574 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1575 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1576 #endif
1577 		ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1578 		if (ret < 0)
1579 			return ret;
1580 		WARN_ON_ONCE(ret < 1);
1581 	} else {
1582 		return -EINVAL;
1583 	}
1584 
1585 	*idb_rkey = idb_desc.key;
1586 
1587 	return 0;
1588 }
1589 
1590 static void srp_check_mapping(struct srp_map_state *state,
1591 			      struct srp_rdma_ch *ch, struct srp_request *req,
1592 			      struct scatterlist *scat, int count)
1593 {
1594 	struct srp_device *dev = ch->target->srp_host->srp_dev;
1595 	struct srp_fr_desc **pfr;
1596 	u64 desc_len = 0, mr_len = 0;
1597 	int i;
1598 
1599 	for (i = 0; i < state->ndesc; i++)
1600 		desc_len += be32_to_cpu(req->indirect_desc[i].len);
1601 	if (dev->use_fast_reg)
1602 		for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1603 			mr_len += (*pfr)->mr->length;
1604 	if (desc_len != scsi_bufflen(req->scmnd) ||
1605 	    mr_len > scsi_bufflen(req->scmnd))
1606 		pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1607 		       scsi_bufflen(req->scmnd), desc_len, mr_len,
1608 		       state->ndesc, state->nmdesc);
1609 }
1610 
1611 /**
1612  * srp_map_data() - map SCSI data buffer onto an SRP request
1613  * @scmnd: SCSI command to map
1614  * @ch: SRP RDMA channel
1615  * @req: SRP request
1616  *
1617  * Returns the length in bytes of the SRP_CMD IU or a negative value if
1618  * mapping failed. The size of any immediate data is not included in the
1619  * return value.
1620  */
1621 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1622 			struct srp_request *req)
1623 {
1624 	struct srp_target_port *target = ch->target;
1625 	struct scatterlist *scat, *sg;
1626 	struct srp_cmd *cmd = req->cmd->buf;
1627 	int i, len, nents, count, ret;
1628 	struct srp_device *dev;
1629 	struct ib_device *ibdev;
1630 	struct srp_map_state state;
1631 	struct srp_indirect_buf *indirect_hdr;
1632 	u64 data_len;
1633 	u32 idb_len, table_len;
1634 	__be32 idb_rkey;
1635 	u8 fmt;
1636 
1637 	req->cmd->num_sge = 1;
1638 
1639 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1640 		return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1641 
1642 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1643 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
1644 		shost_printk(KERN_WARNING, target->scsi_host,
1645 			     PFX "Unhandled data direction %d\n",
1646 			     scmnd->sc_data_direction);
1647 		return -EINVAL;
1648 	}
1649 
1650 	nents = scsi_sg_count(scmnd);
1651 	scat  = scsi_sglist(scmnd);
1652 	data_len = scsi_bufflen(scmnd);
1653 
1654 	dev = target->srp_host->srp_dev;
1655 	ibdev = dev->dev;
1656 
1657 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1658 	if (unlikely(count == 0))
1659 		return -EIO;
1660 
1661 	if (ch->use_imm_data &&
1662 	    count <= ch->max_imm_sge &&
1663 	    SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1664 	    scmnd->sc_data_direction == DMA_TO_DEVICE) {
1665 		struct srp_imm_buf *buf;
1666 		struct ib_sge *sge = &req->cmd->sge[1];
1667 
1668 		fmt = SRP_DATA_DESC_IMM;
1669 		len = SRP_IMM_DATA_OFFSET;
1670 		req->nmdesc = 0;
1671 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1672 		buf->len = cpu_to_be32(data_len);
1673 		WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1674 		for_each_sg(scat, sg, count, i) {
1675 			sge[i].addr   = sg_dma_address(sg);
1676 			sge[i].length = sg_dma_len(sg);
1677 			sge[i].lkey   = target->lkey;
1678 		}
1679 		req->cmd->num_sge += count;
1680 		goto map_complete;
1681 	}
1682 
1683 	fmt = SRP_DATA_DESC_DIRECT;
1684 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1685 		sizeof(struct srp_direct_buf);
1686 
1687 	if (count == 1 && target->global_rkey) {
1688 		/*
1689 		 * The midlayer only generated a single gather/scatter
1690 		 * entry, or DMA mapping coalesced everything to a
1691 		 * single entry.  So a direct descriptor along with
1692 		 * the DMA MR suffices.
1693 		 */
1694 		struct srp_direct_buf *buf;
1695 
1696 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1697 		buf->va  = cpu_to_be64(sg_dma_address(scat));
1698 		buf->key = cpu_to_be32(target->global_rkey);
1699 		buf->len = cpu_to_be32(sg_dma_len(scat));
1700 
1701 		req->nmdesc = 0;
1702 		goto map_complete;
1703 	}
1704 
1705 	/*
1706 	 * We have more than one scatter/gather entry, so build our indirect
1707 	 * descriptor table, trying to merge as many entries as we can.
1708 	 */
1709 	indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1710 
1711 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1712 				   target->indirect_size, DMA_TO_DEVICE);
1713 
1714 	memset(&state, 0, sizeof(state));
1715 	state.desc = req->indirect_desc;
1716 	if (dev->use_fast_reg)
1717 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
1718 	else
1719 		ret = srp_map_sg_dma(&state, ch, req, scat, count);
1720 	req->nmdesc = state.nmdesc;
1721 	if (ret < 0)
1722 		goto unmap;
1723 
1724 	{
1725 		DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1726 			"Memory mapping consistency check");
1727 		if (DYNAMIC_DEBUG_BRANCH(ddm))
1728 			srp_check_mapping(&state, ch, req, scat, count);
1729 	}
1730 
1731 	/* We've mapped the request, now pull as much of the indirect
1732 	 * descriptor table as we can into the command buffer. If this
1733 	 * target is not using an external indirect table, we are
1734 	 * guaranteed to fit into the command, as the SCSI layer won't
1735 	 * give us more S/G entries than we allow.
1736 	 */
1737 	if (state.ndesc == 1) {
1738 		/*
1739 		 * Memory registration collapsed the sg-list into one entry,
1740 		 * so use a direct descriptor.
1741 		 */
1742 		struct srp_direct_buf *buf;
1743 
1744 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1745 		*buf = req->indirect_desc[0];
1746 		goto map_complete;
1747 	}
1748 
1749 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1750 						!target->allow_ext_sg)) {
1751 		shost_printk(KERN_ERR, target->scsi_host,
1752 			     "Could not fit S/G list into SRP_CMD\n");
1753 		ret = -EIO;
1754 		goto unmap;
1755 	}
1756 
1757 	count = min(state.ndesc, target->cmd_sg_cnt);
1758 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1759 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1760 
1761 	fmt = SRP_DATA_DESC_INDIRECT;
1762 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1763 		sizeof(struct srp_indirect_buf);
1764 	len += count * sizeof (struct srp_direct_buf);
1765 
1766 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1767 	       count * sizeof (struct srp_direct_buf));
1768 
1769 	if (!target->global_rkey) {
1770 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1771 				  idb_len, &idb_rkey);
1772 		if (ret < 0)
1773 			goto unmap;
1774 		req->nmdesc++;
1775 	} else {
1776 		idb_rkey = cpu_to_be32(target->global_rkey);
1777 	}
1778 
1779 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1780 	indirect_hdr->table_desc.key = idb_rkey;
1781 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1782 	indirect_hdr->len = cpu_to_be32(state.total_len);
1783 
1784 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1785 		cmd->data_out_desc_cnt = count;
1786 	else
1787 		cmd->data_in_desc_cnt = count;
1788 
1789 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1790 				      DMA_TO_DEVICE);
1791 
1792 map_complete:
1793 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1794 		cmd->buf_fmt = fmt << 4;
1795 	else
1796 		cmd->buf_fmt = fmt;
1797 
1798 	return len;
1799 
1800 unmap:
1801 	srp_unmap_data(scmnd, ch, req);
1802 	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1803 		ret = -E2BIG;
1804 	return ret;
1805 }
1806 
1807 /*
1808  * Return an IU and possible credit to the free pool
1809  */
1810 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1811 			  enum srp_iu_type iu_type)
1812 {
1813 	unsigned long flags;
1814 
1815 	spin_lock_irqsave(&ch->lock, flags);
1816 	list_add(&iu->list, &ch->free_tx);
1817 	if (iu_type != SRP_IU_RSP)
1818 		++ch->req_lim;
1819 	spin_unlock_irqrestore(&ch->lock, flags);
1820 }
1821 
1822 /*
1823  * Must be called with ch->lock held to protect req_lim and free_tx.
1824  * If IU is not sent, it must be returned using srp_put_tx_iu().
1825  *
1826  * Note:
1827  * An upper limit for the number of allocated information units for each
1828  * request type is:
1829  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1830  *   more than Scsi_Host.can_queue requests.
1831  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1832  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1833  *   one unanswered SRP request to an initiator.
1834  */
1835 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1836 				      enum srp_iu_type iu_type)
1837 {
1838 	struct srp_target_port *target = ch->target;
1839 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1840 	struct srp_iu *iu;
1841 
1842 	lockdep_assert_held(&ch->lock);
1843 
1844 	ib_process_cq_direct(ch->send_cq, -1);
1845 
1846 	if (list_empty(&ch->free_tx))
1847 		return NULL;
1848 
1849 	/* Initiator responses to target requests do not consume credits */
1850 	if (iu_type != SRP_IU_RSP) {
1851 		if (ch->req_lim <= rsv) {
1852 			++target->zero_req_lim;
1853 			return NULL;
1854 		}
1855 
1856 		--ch->req_lim;
1857 	}
1858 
1859 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1860 	list_del(&iu->list);
1861 	return iu;
1862 }
1863 
1864 /*
1865  * Note: if this function is called from inside ib_drain_sq() then it will
1866  * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1867  * with status IB_WC_SUCCESS then that's a bug.
1868  */
1869 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1870 {
1871 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1872 	struct srp_rdma_ch *ch = cq->cq_context;
1873 
1874 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1875 		srp_handle_qp_err(cq, wc, "SEND");
1876 		return;
1877 	}
1878 
1879 	lockdep_assert_held(&ch->lock);
1880 
1881 	list_add(&iu->list, &ch->free_tx);
1882 }
1883 
1884 /**
1885  * srp_post_send() - send an SRP information unit
1886  * @ch: RDMA channel over which to send the information unit.
1887  * @iu: Information unit to send.
1888  * @len: Length of the information unit excluding immediate data.
1889  */
1890 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1891 {
1892 	struct srp_target_port *target = ch->target;
1893 	struct ib_send_wr wr;
1894 
1895 	if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1896 		return -EINVAL;
1897 
1898 	iu->sge[0].addr   = iu->dma;
1899 	iu->sge[0].length = len;
1900 	iu->sge[0].lkey   = target->lkey;
1901 
1902 	iu->cqe.done = srp_send_done;
1903 
1904 	wr.next       = NULL;
1905 	wr.wr_cqe     = &iu->cqe;
1906 	wr.sg_list    = &iu->sge[0];
1907 	wr.num_sge    = iu->num_sge;
1908 	wr.opcode     = IB_WR_SEND;
1909 	wr.send_flags = IB_SEND_SIGNALED;
1910 
1911 	return ib_post_send(ch->qp, &wr, NULL);
1912 }
1913 
1914 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1915 {
1916 	struct srp_target_port *target = ch->target;
1917 	struct ib_recv_wr wr;
1918 	struct ib_sge list;
1919 
1920 	list.addr   = iu->dma;
1921 	list.length = iu->size;
1922 	list.lkey   = target->lkey;
1923 
1924 	iu->cqe.done = srp_recv_done;
1925 
1926 	wr.next     = NULL;
1927 	wr.wr_cqe   = &iu->cqe;
1928 	wr.sg_list  = &list;
1929 	wr.num_sge  = 1;
1930 
1931 	return ib_post_recv(ch->qp, &wr, NULL);
1932 }
1933 
1934 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1935 {
1936 	struct srp_target_port *target = ch->target;
1937 	struct srp_request *req;
1938 	struct scsi_cmnd *scmnd;
1939 	unsigned long flags;
1940 
1941 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1942 		spin_lock_irqsave(&ch->lock, flags);
1943 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1944 		if (rsp->tag == ch->tsk_mgmt_tag) {
1945 			ch->tsk_mgmt_status = -1;
1946 			if (be32_to_cpu(rsp->resp_data_len) >= 4)
1947 				ch->tsk_mgmt_status = rsp->data[3];
1948 			complete(&ch->tsk_mgmt_done);
1949 		} else {
1950 			shost_printk(KERN_ERR, target->scsi_host,
1951 				     "Received tsk mgmt response too late for tag %#llx\n",
1952 				     rsp->tag);
1953 		}
1954 		spin_unlock_irqrestore(&ch->lock, flags);
1955 	} else {
1956 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1957 		if (scmnd) {
1958 			req = scsi_cmd_priv(scmnd);
1959 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
1960 		}
1961 		if (!scmnd) {
1962 			shost_printk(KERN_ERR, target->scsi_host,
1963 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1964 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
1965 
1966 			spin_lock_irqsave(&ch->lock, flags);
1967 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1968 			spin_unlock_irqrestore(&ch->lock, flags);
1969 
1970 			return;
1971 		}
1972 		scmnd->result = rsp->status;
1973 
1974 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1975 			memcpy(scmnd->sense_buffer, rsp->data +
1976 			       be32_to_cpu(rsp->resp_data_len),
1977 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1978 				     SCSI_SENSE_BUFFERSIZE));
1979 		}
1980 
1981 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1982 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1983 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1984 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1985 
1986 		srp_free_req(ch, req, scmnd,
1987 			     be32_to_cpu(rsp->req_lim_delta));
1988 
1989 		scsi_done(scmnd);
1990 	}
1991 }
1992 
1993 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1994 			       void *rsp, int len)
1995 {
1996 	struct srp_target_port *target = ch->target;
1997 	struct ib_device *dev = target->srp_host->srp_dev->dev;
1998 	unsigned long flags;
1999 	struct srp_iu *iu;
2000 	int err;
2001 
2002 	spin_lock_irqsave(&ch->lock, flags);
2003 	ch->req_lim += req_delta;
2004 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2005 	spin_unlock_irqrestore(&ch->lock, flags);
2006 
2007 	if (!iu) {
2008 		shost_printk(KERN_ERR, target->scsi_host, PFX
2009 			     "no IU available to send response\n");
2010 		return 1;
2011 	}
2012 
2013 	iu->num_sge = 1;
2014 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2015 	memcpy(iu->buf, rsp, len);
2016 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2017 
2018 	err = srp_post_send(ch, iu, len);
2019 	if (err) {
2020 		shost_printk(KERN_ERR, target->scsi_host, PFX
2021 			     "unable to post response: %d\n", err);
2022 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2023 	}
2024 
2025 	return err;
2026 }
2027 
2028 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2029 				 struct srp_cred_req *req)
2030 {
2031 	struct srp_cred_rsp rsp = {
2032 		.opcode = SRP_CRED_RSP,
2033 		.tag = req->tag,
2034 	};
2035 	s32 delta = be32_to_cpu(req->req_lim_delta);
2036 
2037 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2038 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2039 			     "problems processing SRP_CRED_REQ\n");
2040 }
2041 
2042 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2043 				struct srp_aer_req *req)
2044 {
2045 	struct srp_target_port *target = ch->target;
2046 	struct srp_aer_rsp rsp = {
2047 		.opcode = SRP_AER_RSP,
2048 		.tag = req->tag,
2049 	};
2050 	s32 delta = be32_to_cpu(req->req_lim_delta);
2051 
2052 	shost_printk(KERN_ERR, target->scsi_host, PFX
2053 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2054 
2055 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2056 		shost_printk(KERN_ERR, target->scsi_host, PFX
2057 			     "problems processing SRP_AER_REQ\n");
2058 }
2059 
2060 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2061 {
2062 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2063 	struct srp_rdma_ch *ch = cq->cq_context;
2064 	struct srp_target_port *target = ch->target;
2065 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2066 	int res;
2067 	u8 opcode;
2068 
2069 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
2070 		srp_handle_qp_err(cq, wc, "RECV");
2071 		return;
2072 	}
2073 
2074 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2075 				   DMA_FROM_DEVICE);
2076 
2077 	opcode = *(u8 *) iu->buf;
2078 
2079 	if (0) {
2080 		shost_printk(KERN_ERR, target->scsi_host,
2081 			     PFX "recv completion, opcode 0x%02x\n", opcode);
2082 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2083 			       iu->buf, wc->byte_len, true);
2084 	}
2085 
2086 	switch (opcode) {
2087 	case SRP_RSP:
2088 		srp_process_rsp(ch, iu->buf);
2089 		break;
2090 
2091 	case SRP_CRED_REQ:
2092 		srp_process_cred_req(ch, iu->buf);
2093 		break;
2094 
2095 	case SRP_AER_REQ:
2096 		srp_process_aer_req(ch, iu->buf);
2097 		break;
2098 
2099 	case SRP_T_LOGOUT:
2100 		/* XXX Handle target logout */
2101 		shost_printk(KERN_WARNING, target->scsi_host,
2102 			     PFX "Got target logout request\n");
2103 		break;
2104 
2105 	default:
2106 		shost_printk(KERN_WARNING, target->scsi_host,
2107 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2108 		break;
2109 	}
2110 
2111 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2112 				      DMA_FROM_DEVICE);
2113 
2114 	res = srp_post_recv(ch, iu);
2115 	if (res != 0)
2116 		shost_printk(KERN_ERR, target->scsi_host,
2117 			     PFX "Recv failed with error code %d\n", res);
2118 }
2119 
2120 /**
2121  * srp_tl_err_work() - handle a transport layer error
2122  * @work: Work structure embedded in an SRP target port.
2123  *
2124  * Note: This function may get invoked before the rport has been created,
2125  * hence the target->rport test.
2126  */
2127 static void srp_tl_err_work(struct work_struct *work)
2128 {
2129 	struct srp_target_port *target;
2130 
2131 	target = container_of(work, struct srp_target_port, tl_err_work);
2132 	if (target->rport)
2133 		srp_start_tl_fail_timers(target->rport);
2134 }
2135 
2136 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2137 		const char *opname)
2138 {
2139 	struct srp_rdma_ch *ch = cq->cq_context;
2140 	struct srp_target_port *target = ch->target;
2141 
2142 	if (ch->connected && !target->qp_in_error) {
2143 		shost_printk(KERN_ERR, target->scsi_host,
2144 			     PFX "failed %s status %s (%d) for CQE %p\n",
2145 			     opname, ib_wc_status_msg(wc->status), wc->status,
2146 			     wc->wr_cqe);
2147 		queue_work(system_long_wq, &target->tl_err_work);
2148 	}
2149 	target->qp_in_error = true;
2150 }
2151 
2152 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2153 {
2154 	struct request *rq = scsi_cmd_to_rq(scmnd);
2155 	struct srp_target_port *target = host_to_target(shost);
2156 	struct srp_rdma_ch *ch;
2157 	struct srp_request *req = scsi_cmd_priv(scmnd);
2158 	struct srp_iu *iu;
2159 	struct srp_cmd *cmd;
2160 	struct ib_device *dev;
2161 	unsigned long flags;
2162 	u32 tag;
2163 	int len, ret;
2164 
2165 	scmnd->result = srp_chkready(target->rport);
2166 	if (unlikely(scmnd->result))
2167 		goto err;
2168 
2169 	WARN_ON_ONCE(rq->tag < 0);
2170 	tag = blk_mq_unique_tag(rq);
2171 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2172 
2173 	spin_lock_irqsave(&ch->lock, flags);
2174 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2175 	spin_unlock_irqrestore(&ch->lock, flags);
2176 
2177 	if (!iu)
2178 		goto err;
2179 
2180 	dev = target->srp_host->srp_dev->dev;
2181 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2182 				   DMA_TO_DEVICE);
2183 
2184 	cmd = iu->buf;
2185 	memset(cmd, 0, sizeof *cmd);
2186 
2187 	cmd->opcode = SRP_CMD;
2188 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
2189 	cmd->tag    = tag;
2190 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2191 	if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2192 		cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2193 					    4);
2194 		if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2195 			goto err_iu;
2196 	}
2197 
2198 	req->scmnd    = scmnd;
2199 	req->cmd      = iu;
2200 
2201 	len = srp_map_data(scmnd, ch, req);
2202 	if (len < 0) {
2203 		shost_printk(KERN_ERR, target->scsi_host,
2204 			     PFX "Failed to map data (%d)\n", len);
2205 		/*
2206 		 * If we ran out of memory descriptors (-ENOMEM) because an
2207 		 * application is queuing many requests with more than
2208 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2209 		 * to reduce queue depth temporarily.
2210 		 */
2211 		scmnd->result = len == -ENOMEM ?
2212 			DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
2213 		goto err_iu;
2214 	}
2215 
2216 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2217 				      DMA_TO_DEVICE);
2218 
2219 	if (srp_post_send(ch, iu, len)) {
2220 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2221 		scmnd->result = DID_ERROR << 16;
2222 		goto err_unmap;
2223 	}
2224 
2225 	return 0;
2226 
2227 err_unmap:
2228 	srp_unmap_data(scmnd, ch, req);
2229 
2230 err_iu:
2231 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2232 
2233 	/*
2234 	 * Avoid that the loops that iterate over the request ring can
2235 	 * encounter a dangling SCSI command pointer.
2236 	 */
2237 	req->scmnd = NULL;
2238 
2239 err:
2240 	if (scmnd->result) {
2241 		scsi_done(scmnd);
2242 		ret = 0;
2243 	} else {
2244 		ret = SCSI_MLQUEUE_HOST_BUSY;
2245 	}
2246 
2247 	return ret;
2248 }
2249 
2250 /*
2251  * Note: the resources allocated in this function are freed in
2252  * srp_free_ch_ib().
2253  */
2254 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2255 {
2256 	struct srp_target_port *target = ch->target;
2257 	int i;
2258 
2259 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2260 			      GFP_KERNEL);
2261 	if (!ch->rx_ring)
2262 		goto err_no_ring;
2263 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2264 			      GFP_KERNEL);
2265 	if (!ch->tx_ring)
2266 		goto err_no_ring;
2267 
2268 	for (i = 0; i < target->queue_size; ++i) {
2269 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2270 					      ch->max_ti_iu_len,
2271 					      GFP_KERNEL, DMA_FROM_DEVICE);
2272 		if (!ch->rx_ring[i])
2273 			goto err;
2274 	}
2275 
2276 	for (i = 0; i < target->queue_size; ++i) {
2277 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2278 					      ch->max_it_iu_len,
2279 					      GFP_KERNEL, DMA_TO_DEVICE);
2280 		if (!ch->tx_ring[i])
2281 			goto err;
2282 
2283 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2284 	}
2285 
2286 	return 0;
2287 
2288 err:
2289 	for (i = 0; i < target->queue_size; ++i) {
2290 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2291 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2292 	}
2293 
2294 
2295 err_no_ring:
2296 	kfree(ch->tx_ring);
2297 	ch->tx_ring = NULL;
2298 	kfree(ch->rx_ring);
2299 	ch->rx_ring = NULL;
2300 
2301 	return -ENOMEM;
2302 }
2303 
2304 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2305 {
2306 	uint64_t T_tr_ns, max_compl_time_ms;
2307 	uint32_t rq_tmo_jiffies;
2308 
2309 	/*
2310 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2311 	 * table 91), both the QP timeout and the retry count have to be set
2312 	 * for RC QP's during the RTR to RTS transition.
2313 	 */
2314 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2315 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2316 
2317 	/*
2318 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2319 	 * it can take before an error completion is generated. See also
2320 	 * C9-140..142 in the IBTA spec for more information about how to
2321 	 * convert the QP Local ACK Timeout value to nanoseconds.
2322 	 */
2323 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2324 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2325 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2326 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2327 
2328 	return rq_tmo_jiffies;
2329 }
2330 
2331 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2332 			       const struct srp_login_rsp *lrsp,
2333 			       struct srp_rdma_ch *ch)
2334 {
2335 	struct srp_target_port *target = ch->target;
2336 	struct ib_qp_attr *qp_attr = NULL;
2337 	int attr_mask = 0;
2338 	int ret = 0;
2339 	int i;
2340 
2341 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2342 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2343 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2344 		ch->use_imm_data  = srp_use_imm_data &&
2345 			(lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2346 		ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2347 						      ch->use_imm_data,
2348 						      target->max_it_iu_size);
2349 		WARN_ON_ONCE(ch->max_it_iu_len >
2350 			     be32_to_cpu(lrsp->max_it_iu_len));
2351 
2352 		if (ch->use_imm_data)
2353 			shost_printk(KERN_DEBUG, target->scsi_host,
2354 				     PFX "using immediate data\n");
2355 
2356 		/*
2357 		 * Reserve credits for task management so we don't
2358 		 * bounce requests back to the SCSI mid-layer.
2359 		 */
2360 		target->scsi_host->can_queue
2361 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2362 			      target->scsi_host->can_queue);
2363 		target->scsi_host->cmd_per_lun
2364 			= min_t(int, target->scsi_host->can_queue,
2365 				target->scsi_host->cmd_per_lun);
2366 	} else {
2367 		shost_printk(KERN_WARNING, target->scsi_host,
2368 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2369 		ret = -ECONNRESET;
2370 		goto error;
2371 	}
2372 
2373 	if (!ch->rx_ring) {
2374 		ret = srp_alloc_iu_bufs(ch);
2375 		if (ret)
2376 			goto error;
2377 	}
2378 
2379 	for (i = 0; i < target->queue_size; i++) {
2380 		struct srp_iu *iu = ch->rx_ring[i];
2381 
2382 		ret = srp_post_recv(ch, iu);
2383 		if (ret)
2384 			goto error;
2385 	}
2386 
2387 	if (!target->using_rdma_cm) {
2388 		ret = -ENOMEM;
2389 		qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2390 		if (!qp_attr)
2391 			goto error;
2392 
2393 		qp_attr->qp_state = IB_QPS_RTR;
2394 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2395 		if (ret)
2396 			goto error_free;
2397 
2398 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2399 		if (ret)
2400 			goto error_free;
2401 
2402 		qp_attr->qp_state = IB_QPS_RTS;
2403 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2404 		if (ret)
2405 			goto error_free;
2406 
2407 		target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2408 
2409 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2410 		if (ret)
2411 			goto error_free;
2412 
2413 		ret = ib_send_cm_rtu(cm_id, NULL, 0);
2414 	}
2415 
2416 error_free:
2417 	kfree(qp_attr);
2418 
2419 error:
2420 	ch->status = ret;
2421 }
2422 
2423 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2424 				  const struct ib_cm_event *event,
2425 				  struct srp_rdma_ch *ch)
2426 {
2427 	struct srp_target_port *target = ch->target;
2428 	struct Scsi_Host *shost = target->scsi_host;
2429 	struct ib_class_port_info *cpi;
2430 	int opcode;
2431 	u16 dlid;
2432 
2433 	switch (event->param.rej_rcvd.reason) {
2434 	case IB_CM_REJ_PORT_CM_REDIRECT:
2435 		cpi = event->param.rej_rcvd.ari;
2436 		dlid = be16_to_cpu(cpi->redirect_lid);
2437 		sa_path_set_dlid(&ch->ib_cm.path, dlid);
2438 		ch->ib_cm.path.pkey = cpi->redirect_pkey;
2439 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2440 		memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2441 
2442 		ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2443 		break;
2444 
2445 	case IB_CM_REJ_PORT_REDIRECT:
2446 		if (srp_target_is_topspin(target)) {
2447 			union ib_gid *dgid = &ch->ib_cm.path.dgid;
2448 
2449 			/*
2450 			 * Topspin/Cisco SRP gateways incorrectly send
2451 			 * reject reason code 25 when they mean 24
2452 			 * (port redirect).
2453 			 */
2454 			memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2455 
2456 			shost_printk(KERN_DEBUG, shost,
2457 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2458 				     be64_to_cpu(dgid->global.subnet_prefix),
2459 				     be64_to_cpu(dgid->global.interface_id));
2460 
2461 			ch->status = SRP_PORT_REDIRECT;
2462 		} else {
2463 			shost_printk(KERN_WARNING, shost,
2464 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2465 			ch->status = -ECONNRESET;
2466 		}
2467 		break;
2468 
2469 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2470 		shost_printk(KERN_WARNING, shost,
2471 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2472 		ch->status = -ECONNRESET;
2473 		break;
2474 
2475 	case IB_CM_REJ_CONSUMER_DEFINED:
2476 		opcode = *(u8 *) event->private_data;
2477 		if (opcode == SRP_LOGIN_REJ) {
2478 			struct srp_login_rej *rej = event->private_data;
2479 			u32 reason = be32_to_cpu(rej->reason);
2480 
2481 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2482 				shost_printk(KERN_WARNING, shost,
2483 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2484 			else
2485 				shost_printk(KERN_WARNING, shost, PFX
2486 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2487 					     target->sgid.raw,
2488 					     target->ib_cm.orig_dgid.raw,
2489 					     reason);
2490 		} else
2491 			shost_printk(KERN_WARNING, shost,
2492 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2493 				     " opcode 0x%02x\n", opcode);
2494 		ch->status = -ECONNRESET;
2495 		break;
2496 
2497 	case IB_CM_REJ_STALE_CONN:
2498 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2499 		ch->status = SRP_STALE_CONN;
2500 		break;
2501 
2502 	default:
2503 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2504 			     event->param.rej_rcvd.reason);
2505 		ch->status = -ECONNRESET;
2506 	}
2507 }
2508 
2509 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2510 			     const struct ib_cm_event *event)
2511 {
2512 	struct srp_rdma_ch *ch = cm_id->context;
2513 	struct srp_target_port *target = ch->target;
2514 	int comp = 0;
2515 
2516 	switch (event->event) {
2517 	case IB_CM_REQ_ERROR:
2518 		shost_printk(KERN_DEBUG, target->scsi_host,
2519 			     PFX "Sending CM REQ failed\n");
2520 		comp = 1;
2521 		ch->status = -ECONNRESET;
2522 		break;
2523 
2524 	case IB_CM_REP_RECEIVED:
2525 		comp = 1;
2526 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2527 		break;
2528 
2529 	case IB_CM_REJ_RECEIVED:
2530 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2531 		comp = 1;
2532 
2533 		srp_ib_cm_rej_handler(cm_id, event, ch);
2534 		break;
2535 
2536 	case IB_CM_DREQ_RECEIVED:
2537 		shost_printk(KERN_WARNING, target->scsi_host,
2538 			     PFX "DREQ received - connection closed\n");
2539 		ch->connected = false;
2540 		if (ib_send_cm_drep(cm_id, NULL, 0))
2541 			shost_printk(KERN_ERR, target->scsi_host,
2542 				     PFX "Sending CM DREP failed\n");
2543 		queue_work(system_long_wq, &target->tl_err_work);
2544 		break;
2545 
2546 	case IB_CM_TIMEWAIT_EXIT:
2547 		shost_printk(KERN_ERR, target->scsi_host,
2548 			     PFX "connection closed\n");
2549 		comp = 1;
2550 
2551 		ch->status = 0;
2552 		break;
2553 
2554 	case IB_CM_MRA_RECEIVED:
2555 	case IB_CM_DREQ_ERROR:
2556 	case IB_CM_DREP_RECEIVED:
2557 		break;
2558 
2559 	default:
2560 		shost_printk(KERN_WARNING, target->scsi_host,
2561 			     PFX "Unhandled CM event %d\n", event->event);
2562 		break;
2563 	}
2564 
2565 	if (comp)
2566 		complete(&ch->done);
2567 
2568 	return 0;
2569 }
2570 
2571 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2572 				    struct rdma_cm_event *event)
2573 {
2574 	struct srp_target_port *target = ch->target;
2575 	struct Scsi_Host *shost = target->scsi_host;
2576 	int opcode;
2577 
2578 	switch (event->status) {
2579 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2580 		shost_printk(KERN_WARNING, shost,
2581 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2582 		ch->status = -ECONNRESET;
2583 		break;
2584 
2585 	case IB_CM_REJ_CONSUMER_DEFINED:
2586 		opcode = *(u8 *) event->param.conn.private_data;
2587 		if (opcode == SRP_LOGIN_REJ) {
2588 			struct srp_login_rej *rej =
2589 				(struct srp_login_rej *)
2590 				event->param.conn.private_data;
2591 			u32 reason = be32_to_cpu(rej->reason);
2592 
2593 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2594 				shost_printk(KERN_WARNING, shost,
2595 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2596 			else
2597 				shost_printk(KERN_WARNING, shost,
2598 					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2599 		} else {
2600 			shost_printk(KERN_WARNING, shost,
2601 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2602 				     opcode);
2603 		}
2604 		ch->status = -ECONNRESET;
2605 		break;
2606 
2607 	case IB_CM_REJ_STALE_CONN:
2608 		shost_printk(KERN_WARNING, shost,
2609 			     "  REJ reason: stale connection\n");
2610 		ch->status = SRP_STALE_CONN;
2611 		break;
2612 
2613 	default:
2614 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2615 			     event->status);
2616 		ch->status = -ECONNRESET;
2617 		break;
2618 	}
2619 }
2620 
2621 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2622 			       struct rdma_cm_event *event)
2623 {
2624 	struct srp_rdma_ch *ch = cm_id->context;
2625 	struct srp_target_port *target = ch->target;
2626 	int comp = 0;
2627 
2628 	switch (event->event) {
2629 	case RDMA_CM_EVENT_ADDR_RESOLVED:
2630 		ch->status = 0;
2631 		comp = 1;
2632 		break;
2633 
2634 	case RDMA_CM_EVENT_ADDR_ERROR:
2635 		ch->status = -ENXIO;
2636 		comp = 1;
2637 		break;
2638 
2639 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
2640 		ch->status = 0;
2641 		comp = 1;
2642 		break;
2643 
2644 	case RDMA_CM_EVENT_ROUTE_ERROR:
2645 	case RDMA_CM_EVENT_UNREACHABLE:
2646 		ch->status = -EHOSTUNREACH;
2647 		comp = 1;
2648 		break;
2649 
2650 	case RDMA_CM_EVENT_CONNECT_ERROR:
2651 		shost_printk(KERN_DEBUG, target->scsi_host,
2652 			     PFX "Sending CM REQ failed\n");
2653 		comp = 1;
2654 		ch->status = -ECONNRESET;
2655 		break;
2656 
2657 	case RDMA_CM_EVENT_ESTABLISHED:
2658 		comp = 1;
2659 		srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2660 		break;
2661 
2662 	case RDMA_CM_EVENT_REJECTED:
2663 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2664 		comp = 1;
2665 
2666 		srp_rdma_cm_rej_handler(ch, event);
2667 		break;
2668 
2669 	case RDMA_CM_EVENT_DISCONNECTED:
2670 		if (ch->connected) {
2671 			shost_printk(KERN_WARNING, target->scsi_host,
2672 				     PFX "received DREQ\n");
2673 			rdma_disconnect(ch->rdma_cm.cm_id);
2674 			comp = 1;
2675 			ch->status = 0;
2676 			queue_work(system_long_wq, &target->tl_err_work);
2677 		}
2678 		break;
2679 
2680 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2681 		shost_printk(KERN_ERR, target->scsi_host,
2682 			     PFX "connection closed\n");
2683 
2684 		comp = 1;
2685 		ch->status = 0;
2686 		break;
2687 
2688 	default:
2689 		shost_printk(KERN_WARNING, target->scsi_host,
2690 			     PFX "Unhandled CM event %d\n", event->event);
2691 		break;
2692 	}
2693 
2694 	if (comp)
2695 		complete(&ch->done);
2696 
2697 	return 0;
2698 }
2699 
2700 /**
2701  * srp_change_queue_depth - setting device queue depth
2702  * @sdev: scsi device struct
2703  * @qdepth: requested queue depth
2704  *
2705  * Returns queue depth.
2706  */
2707 static int
2708 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2709 {
2710 	if (!sdev->tagged_supported)
2711 		qdepth = 1;
2712 	return scsi_change_queue_depth(sdev, qdepth);
2713 }
2714 
2715 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2716 			     u8 func, u8 *status)
2717 {
2718 	struct srp_target_port *target = ch->target;
2719 	struct srp_rport *rport = target->rport;
2720 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2721 	struct srp_iu *iu;
2722 	struct srp_tsk_mgmt *tsk_mgmt;
2723 	int res;
2724 
2725 	if (!ch->connected || target->qp_in_error)
2726 		return -1;
2727 
2728 	/*
2729 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2730 	 * invoked while a task management function is being sent.
2731 	 */
2732 	mutex_lock(&rport->mutex);
2733 	spin_lock_irq(&ch->lock);
2734 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2735 	spin_unlock_irq(&ch->lock);
2736 
2737 	if (!iu) {
2738 		mutex_unlock(&rport->mutex);
2739 
2740 		return -1;
2741 	}
2742 
2743 	iu->num_sge = 1;
2744 
2745 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2746 				   DMA_TO_DEVICE);
2747 	tsk_mgmt = iu->buf;
2748 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2749 
2750 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2751 	int_to_scsilun(lun, &tsk_mgmt->lun);
2752 	tsk_mgmt->tsk_mgmt_func = func;
2753 	tsk_mgmt->task_tag	= req_tag;
2754 
2755 	spin_lock_irq(&ch->lock);
2756 	ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2757 	tsk_mgmt->tag = ch->tsk_mgmt_tag;
2758 	spin_unlock_irq(&ch->lock);
2759 
2760 	init_completion(&ch->tsk_mgmt_done);
2761 
2762 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2763 				      DMA_TO_DEVICE);
2764 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2765 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2766 		mutex_unlock(&rport->mutex);
2767 
2768 		return -1;
2769 	}
2770 	res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2771 					msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2772 	if (res > 0 && status)
2773 		*status = ch->tsk_mgmt_status;
2774 	mutex_unlock(&rport->mutex);
2775 
2776 	WARN_ON_ONCE(res < 0);
2777 
2778 	return res > 0 ? 0 : -1;
2779 }
2780 
2781 static int srp_abort(struct scsi_cmnd *scmnd)
2782 {
2783 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2784 	struct srp_request *req = scsi_cmd_priv(scmnd);
2785 	u32 tag;
2786 	u16 ch_idx;
2787 	struct srp_rdma_ch *ch;
2788 
2789 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2790 
2791 	tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
2792 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2793 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2794 		return SUCCESS;
2795 	ch = &target->ch[ch_idx];
2796 	if (!srp_claim_req(ch, req, NULL, scmnd))
2797 		return SUCCESS;
2798 	shost_printk(KERN_ERR, target->scsi_host,
2799 		     "Sending SRP abort for tag %#x\n", tag);
2800 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2801 			      SRP_TSK_ABORT_TASK, NULL) == 0) {
2802 		srp_free_req(ch, req, scmnd, 0);
2803 		return SUCCESS;
2804 	}
2805 	if (target->rport->state == SRP_RPORT_LOST)
2806 		return FAST_IO_FAIL;
2807 
2808 	return FAILED;
2809 }
2810 
2811 static int srp_reset_device(struct scsi_cmnd *scmnd)
2812 {
2813 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2814 	struct srp_rdma_ch *ch;
2815 	u8 status;
2816 
2817 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2818 
2819 	ch = &target->ch[0];
2820 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2821 			      SRP_TSK_LUN_RESET, &status))
2822 		return FAILED;
2823 	if (status)
2824 		return FAILED;
2825 
2826 	return SUCCESS;
2827 }
2828 
2829 static int srp_reset_host(struct scsi_cmnd *scmnd)
2830 {
2831 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2832 
2833 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2834 
2835 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2836 }
2837 
2838 static int srp_target_alloc(struct scsi_target *starget)
2839 {
2840 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2841 	struct srp_target_port *target = host_to_target(shost);
2842 
2843 	if (target->target_can_queue)
2844 		starget->can_queue = target->target_can_queue;
2845 	return 0;
2846 }
2847 
2848 static int srp_sdev_configure(struct scsi_device *sdev,
2849 			      struct queue_limits *lim)
2850 {
2851 	struct Scsi_Host *shost = sdev->host;
2852 	struct srp_target_port *target = host_to_target(shost);
2853 	struct request_queue *q = sdev->request_queue;
2854 	unsigned long timeout;
2855 
2856 	if (sdev->type == TYPE_DISK) {
2857 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2858 		blk_queue_rq_timeout(q, timeout);
2859 	}
2860 
2861 	return 0;
2862 }
2863 
2864 static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
2865 			   char *buf)
2866 {
2867 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2868 
2869 	return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2870 }
2871 
2872 static DEVICE_ATTR_RO(id_ext);
2873 
2874 static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
2875 			     char *buf)
2876 {
2877 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2878 
2879 	return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2880 }
2881 
2882 static DEVICE_ATTR_RO(ioc_guid);
2883 
2884 static ssize_t service_id_show(struct device *dev,
2885 			       struct device_attribute *attr, char *buf)
2886 {
2887 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2888 
2889 	if (target->using_rdma_cm)
2890 		return -ENOENT;
2891 	return sysfs_emit(buf, "0x%016llx\n",
2892 			  be64_to_cpu(target->ib_cm.service_id));
2893 }
2894 
2895 static DEVICE_ATTR_RO(service_id);
2896 
2897 static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
2898 			 char *buf)
2899 {
2900 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2901 
2902 	if (target->using_rdma_cm)
2903 		return -ENOENT;
2904 
2905 	return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2906 }
2907 
2908 static DEVICE_ATTR_RO(pkey);
2909 
2910 static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
2911 			 char *buf)
2912 {
2913 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2914 
2915 	return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
2916 }
2917 
2918 static DEVICE_ATTR_RO(sgid);
2919 
2920 static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
2921 			 char *buf)
2922 {
2923 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2924 	struct srp_rdma_ch *ch = &target->ch[0];
2925 
2926 	if (target->using_rdma_cm)
2927 		return -ENOENT;
2928 
2929 	return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2930 }
2931 
2932 static DEVICE_ATTR_RO(dgid);
2933 
2934 static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
2935 			      char *buf)
2936 {
2937 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2938 
2939 	if (target->using_rdma_cm)
2940 		return -ENOENT;
2941 
2942 	return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2943 }
2944 
2945 static DEVICE_ATTR_RO(orig_dgid);
2946 
2947 static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
2948 			    char *buf)
2949 {
2950 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2951 	struct srp_rdma_ch *ch;
2952 	int i, req_lim = INT_MAX;
2953 
2954 	for (i = 0; i < target->ch_count; i++) {
2955 		ch = &target->ch[i];
2956 		req_lim = min(req_lim, ch->req_lim);
2957 	}
2958 
2959 	return sysfs_emit(buf, "%d\n", req_lim);
2960 }
2961 
2962 static DEVICE_ATTR_RO(req_lim);
2963 
2964 static ssize_t zero_req_lim_show(struct device *dev,
2965 				 struct device_attribute *attr, char *buf)
2966 {
2967 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2968 
2969 	return sysfs_emit(buf, "%d\n", target->zero_req_lim);
2970 }
2971 
2972 static DEVICE_ATTR_RO(zero_req_lim);
2973 
2974 static ssize_t local_ib_port_show(struct device *dev,
2975 				  struct device_attribute *attr, char *buf)
2976 {
2977 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2978 
2979 	return sysfs_emit(buf, "%u\n", target->srp_host->port);
2980 }
2981 
2982 static DEVICE_ATTR_RO(local_ib_port);
2983 
2984 static ssize_t local_ib_device_show(struct device *dev,
2985 				    struct device_attribute *attr, char *buf)
2986 {
2987 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2988 
2989 	return sysfs_emit(buf, "%s\n",
2990 			  dev_name(&target->srp_host->srp_dev->dev->dev));
2991 }
2992 
2993 static DEVICE_ATTR_RO(local_ib_device);
2994 
2995 static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
2996 			     char *buf)
2997 {
2998 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2999 
3000 	return sysfs_emit(buf, "%d\n", target->ch_count);
3001 }
3002 
3003 static DEVICE_ATTR_RO(ch_count);
3004 
3005 static ssize_t comp_vector_show(struct device *dev,
3006 				struct device_attribute *attr, char *buf)
3007 {
3008 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3009 
3010 	return sysfs_emit(buf, "%d\n", target->comp_vector);
3011 }
3012 
3013 static DEVICE_ATTR_RO(comp_vector);
3014 
3015 static ssize_t tl_retry_count_show(struct device *dev,
3016 				   struct device_attribute *attr, char *buf)
3017 {
3018 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3019 
3020 	return sysfs_emit(buf, "%d\n", target->tl_retry_count);
3021 }
3022 
3023 static DEVICE_ATTR_RO(tl_retry_count);
3024 
3025 static ssize_t cmd_sg_entries_show(struct device *dev,
3026 				   struct device_attribute *attr, char *buf)
3027 {
3028 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3029 
3030 	return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
3031 }
3032 
3033 static DEVICE_ATTR_RO(cmd_sg_entries);
3034 
3035 static ssize_t allow_ext_sg_show(struct device *dev,
3036 				 struct device_attribute *attr, char *buf)
3037 {
3038 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3039 
3040 	return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3041 }
3042 
3043 static DEVICE_ATTR_RO(allow_ext_sg);
3044 
3045 static struct attribute *srp_host_attrs[] = {
3046 	&dev_attr_id_ext.attr,
3047 	&dev_attr_ioc_guid.attr,
3048 	&dev_attr_service_id.attr,
3049 	&dev_attr_pkey.attr,
3050 	&dev_attr_sgid.attr,
3051 	&dev_attr_dgid.attr,
3052 	&dev_attr_orig_dgid.attr,
3053 	&dev_attr_req_lim.attr,
3054 	&dev_attr_zero_req_lim.attr,
3055 	&dev_attr_local_ib_port.attr,
3056 	&dev_attr_local_ib_device.attr,
3057 	&dev_attr_ch_count.attr,
3058 	&dev_attr_comp_vector.attr,
3059 	&dev_attr_tl_retry_count.attr,
3060 	&dev_attr_cmd_sg_entries.attr,
3061 	&dev_attr_allow_ext_sg.attr,
3062 	NULL
3063 };
3064 
3065 ATTRIBUTE_GROUPS(srp_host);
3066 
3067 static const struct scsi_host_template srp_template = {
3068 	.module				= THIS_MODULE,
3069 	.name				= "InfiniBand SRP initiator",
3070 	.proc_name			= DRV_NAME,
3071 	.target_alloc			= srp_target_alloc,
3072 	.sdev_configure			= srp_sdev_configure,
3073 	.info				= srp_target_info,
3074 	.init_cmd_priv			= srp_init_cmd_priv,
3075 	.exit_cmd_priv			= srp_exit_cmd_priv,
3076 	.queuecommand			= srp_queuecommand,
3077 	.change_queue_depth             = srp_change_queue_depth,
3078 	.eh_timed_out			= srp_timed_out,
3079 	.eh_abort_handler		= srp_abort,
3080 	.eh_device_reset_handler	= srp_reset_device,
3081 	.eh_host_reset_handler		= srp_reset_host,
3082 	.skip_settle_delay		= true,
3083 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
3084 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
3085 	.this_id			= -1,
3086 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
3087 	.shost_groups			= srp_host_groups,
3088 	.track_queue_depth		= 1,
3089 	.cmd_size			= sizeof(struct srp_request),
3090 };
3091 
3092 static int srp_sdev_count(struct Scsi_Host *host)
3093 {
3094 	struct scsi_device *sdev;
3095 	int c = 0;
3096 
3097 	shost_for_each_device(sdev, host)
3098 		c++;
3099 
3100 	return c;
3101 }
3102 
3103 /*
3104  * Return values:
3105  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3106  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3107  *    removal has been scheduled.
3108  * 0 and target->state != SRP_TARGET_REMOVED upon success.
3109  */
3110 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3111 {
3112 	struct srp_rport_identifiers ids;
3113 	struct srp_rport *rport;
3114 
3115 	target->state = SRP_TARGET_SCANNING;
3116 	sprintf(target->target_name, "SRP.T10:%016llX",
3117 		be64_to_cpu(target->id_ext));
3118 
3119 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3120 		return -ENODEV;
3121 
3122 	memcpy(ids.port_id, &target->id_ext, 8);
3123 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3124 	ids.roles = SRP_RPORT_ROLE_TARGET;
3125 	rport = srp_rport_add(target->scsi_host, &ids);
3126 	if (IS_ERR(rport)) {
3127 		scsi_remove_host(target->scsi_host);
3128 		return PTR_ERR(rport);
3129 	}
3130 
3131 	rport->lld_data = target;
3132 	target->rport = rport;
3133 
3134 	spin_lock(&host->target_lock);
3135 	list_add_tail(&target->list, &host->target_list);
3136 	spin_unlock(&host->target_lock);
3137 
3138 	scsi_scan_target(&target->scsi_host->shost_gendev,
3139 			 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3140 
3141 	if (srp_connected_ch(target) < target->ch_count ||
3142 	    target->qp_in_error) {
3143 		shost_printk(KERN_INFO, target->scsi_host,
3144 			     PFX "SCSI scan failed - removing SCSI host\n");
3145 		srp_queue_remove_work(target);
3146 		goto out;
3147 	}
3148 
3149 	pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3150 		 dev_name(&target->scsi_host->shost_gendev),
3151 		 srp_sdev_count(target->scsi_host));
3152 
3153 	spin_lock_irq(&target->lock);
3154 	if (target->state == SRP_TARGET_SCANNING)
3155 		target->state = SRP_TARGET_LIVE;
3156 	spin_unlock_irq(&target->lock);
3157 
3158 out:
3159 	return 0;
3160 }
3161 
3162 static void srp_release_dev(struct device *dev)
3163 {
3164 	struct srp_host *host =
3165 		container_of(dev, struct srp_host, dev);
3166 
3167 	kfree(host);
3168 }
3169 
3170 static struct attribute *srp_class_attrs[];
3171 
3172 ATTRIBUTE_GROUPS(srp_class);
3173 
3174 static struct class srp_class = {
3175 	.name    = "infiniband_srp",
3176 	.dev_groups = srp_class_groups,
3177 	.dev_release = srp_release_dev
3178 };
3179 
3180 /**
3181  * srp_conn_unique() - check whether the connection to a target is unique
3182  * @host:   SRP host.
3183  * @target: SRP target port.
3184  */
3185 static bool srp_conn_unique(struct srp_host *host,
3186 			    struct srp_target_port *target)
3187 {
3188 	struct srp_target_port *t;
3189 	bool ret = false;
3190 
3191 	if (target->state == SRP_TARGET_REMOVED)
3192 		goto out;
3193 
3194 	ret = true;
3195 
3196 	spin_lock(&host->target_lock);
3197 	list_for_each_entry(t, &host->target_list, list) {
3198 		if (t != target &&
3199 		    target->id_ext == t->id_ext &&
3200 		    target->ioc_guid == t->ioc_guid &&
3201 		    target->initiator_ext == t->initiator_ext) {
3202 			ret = false;
3203 			break;
3204 		}
3205 	}
3206 	spin_unlock(&host->target_lock);
3207 
3208 out:
3209 	return ret;
3210 }
3211 
3212 /*
3213  * Target ports are added by writing
3214  *
3215  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3216  *     pkey=<P_Key>,service_id=<service ID>
3217  * or
3218  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3219  *     [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3220  *
3221  * to the add_target sysfs attribute.
3222  */
3223 enum {
3224 	SRP_OPT_ERR		= 0,
3225 	SRP_OPT_ID_EXT		= 1 << 0,
3226 	SRP_OPT_IOC_GUID	= 1 << 1,
3227 	SRP_OPT_DGID		= 1 << 2,
3228 	SRP_OPT_PKEY		= 1 << 3,
3229 	SRP_OPT_SERVICE_ID	= 1 << 4,
3230 	SRP_OPT_MAX_SECT	= 1 << 5,
3231 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
3232 	SRP_OPT_IO_CLASS	= 1 << 7,
3233 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
3234 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
3235 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
3236 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
3237 	SRP_OPT_COMP_VECTOR	= 1 << 12,
3238 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
3239 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
3240 	SRP_OPT_IP_SRC		= 1 << 15,
3241 	SRP_OPT_IP_DEST		= 1 << 16,
3242 	SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3243 	SRP_OPT_MAX_IT_IU_SIZE  = 1 << 18,
3244 	SRP_OPT_CH_COUNT	= 1 << 19,
3245 };
3246 
3247 static unsigned int srp_opt_mandatory[] = {
3248 	SRP_OPT_ID_EXT		|
3249 	SRP_OPT_IOC_GUID	|
3250 	SRP_OPT_DGID		|
3251 	SRP_OPT_PKEY		|
3252 	SRP_OPT_SERVICE_ID,
3253 	SRP_OPT_ID_EXT		|
3254 	SRP_OPT_IOC_GUID	|
3255 	SRP_OPT_IP_DEST,
3256 };
3257 
3258 static const match_table_t srp_opt_tokens = {
3259 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
3260 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
3261 	{ SRP_OPT_DGID,			"dgid=%s" 		},
3262 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
3263 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
3264 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
3265 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
3266 	{ SRP_OPT_TARGET_CAN_QUEUE,	"target_can_queue=%d"	},
3267 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
3268 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
3269 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
3270 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
3271 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
3272 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
3273 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
3274 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
3275 	{ SRP_OPT_IP_SRC,		"src=%s"		},
3276 	{ SRP_OPT_IP_DEST,		"dest=%s"		},
3277 	{ SRP_OPT_MAX_IT_IU_SIZE,	"max_it_iu_size=%d"	},
3278 	{ SRP_OPT_CH_COUNT,		"ch_count=%u",		},
3279 	{ SRP_OPT_ERR,			NULL 			}
3280 };
3281 
3282 /**
3283  * srp_parse_in - parse an IP address and port number combination
3284  * @net:	   [in]  Network namespace.
3285  * @sa:		   [out] Address family, IP address and port number.
3286  * @addr_port_str: [in]  IP address and port number.
3287  * @has_port:	   [out] Whether or not @addr_port_str includes a port number.
3288  *
3289  * Parse the following address formats:
3290  * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3291  * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3292  */
3293 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3294 			const char *addr_port_str, bool *has_port)
3295 {
3296 	char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3297 	char *port_str;
3298 	int ret;
3299 
3300 	if (!addr)
3301 		return -ENOMEM;
3302 	port_str = strrchr(addr, ':');
3303 	if (port_str && strchr(port_str, ']'))
3304 		port_str = NULL;
3305 	if (port_str)
3306 		*port_str++ = '\0';
3307 	if (has_port)
3308 		*has_port = port_str != NULL;
3309 	ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3310 	if (ret && addr[0]) {
3311 		addr_end = addr + strlen(addr) - 1;
3312 		if (addr[0] == '[' && *addr_end == ']') {
3313 			*addr_end = '\0';
3314 			ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3315 						   port_str, sa);
3316 		}
3317 	}
3318 	kfree(addr);
3319 	pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3320 	return ret;
3321 }
3322 
3323 static int srp_parse_options(struct net *net, const char *buf,
3324 			     struct srp_target_port *target)
3325 {
3326 	char *options, *sep_opt;
3327 	char *p;
3328 	substring_t args[MAX_OPT_ARGS];
3329 	unsigned long long ull;
3330 	bool has_port;
3331 	int opt_mask = 0;
3332 	int token;
3333 	int ret = -EINVAL;
3334 	int i;
3335 
3336 	options = kstrdup(buf, GFP_KERNEL);
3337 	if (!options)
3338 		return -ENOMEM;
3339 
3340 	sep_opt = options;
3341 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3342 		if (!*p)
3343 			continue;
3344 
3345 		token = match_token(p, srp_opt_tokens, args);
3346 		opt_mask |= token;
3347 
3348 		switch (token) {
3349 		case SRP_OPT_ID_EXT:
3350 			p = match_strdup(args);
3351 			if (!p) {
3352 				ret = -ENOMEM;
3353 				goto out;
3354 			}
3355 			ret = kstrtoull(p, 16, &ull);
3356 			if (ret) {
3357 				pr_warn("invalid id_ext parameter '%s'\n", p);
3358 				kfree(p);
3359 				goto out;
3360 			}
3361 			target->id_ext = cpu_to_be64(ull);
3362 			kfree(p);
3363 			break;
3364 
3365 		case SRP_OPT_IOC_GUID:
3366 			p = match_strdup(args);
3367 			if (!p) {
3368 				ret = -ENOMEM;
3369 				goto out;
3370 			}
3371 			ret = kstrtoull(p, 16, &ull);
3372 			if (ret) {
3373 				pr_warn("invalid ioc_guid parameter '%s'\n", p);
3374 				kfree(p);
3375 				goto out;
3376 			}
3377 			target->ioc_guid = cpu_to_be64(ull);
3378 			kfree(p);
3379 			break;
3380 
3381 		case SRP_OPT_DGID:
3382 			p = match_strdup(args);
3383 			if (!p) {
3384 				ret = -ENOMEM;
3385 				goto out;
3386 			}
3387 			if (strlen(p) != 32) {
3388 				pr_warn("bad dest GID parameter '%s'\n", p);
3389 				kfree(p);
3390 				goto out;
3391 			}
3392 
3393 			ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3394 			kfree(p);
3395 			if (ret < 0)
3396 				goto out;
3397 			break;
3398 
3399 		case SRP_OPT_PKEY:
3400 			ret = match_hex(args, &token);
3401 			if (ret) {
3402 				pr_warn("bad P_Key parameter '%s'\n", p);
3403 				goto out;
3404 			}
3405 			target->ib_cm.pkey = cpu_to_be16(token);
3406 			break;
3407 
3408 		case SRP_OPT_SERVICE_ID:
3409 			p = match_strdup(args);
3410 			if (!p) {
3411 				ret = -ENOMEM;
3412 				goto out;
3413 			}
3414 			ret = kstrtoull(p, 16, &ull);
3415 			if (ret) {
3416 				pr_warn("bad service_id parameter '%s'\n", p);
3417 				kfree(p);
3418 				goto out;
3419 			}
3420 			target->ib_cm.service_id = cpu_to_be64(ull);
3421 			kfree(p);
3422 			break;
3423 
3424 		case SRP_OPT_IP_SRC:
3425 			p = match_strdup(args);
3426 			if (!p) {
3427 				ret = -ENOMEM;
3428 				goto out;
3429 			}
3430 			ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3431 					   NULL);
3432 			if (ret < 0) {
3433 				pr_warn("bad source parameter '%s'\n", p);
3434 				kfree(p);
3435 				goto out;
3436 			}
3437 			target->rdma_cm.src_specified = true;
3438 			kfree(p);
3439 			break;
3440 
3441 		case SRP_OPT_IP_DEST:
3442 			p = match_strdup(args);
3443 			if (!p) {
3444 				ret = -ENOMEM;
3445 				goto out;
3446 			}
3447 			ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3448 					   &has_port);
3449 			if (!has_port)
3450 				ret = -EINVAL;
3451 			if (ret < 0) {
3452 				pr_warn("bad dest parameter '%s'\n", p);
3453 				kfree(p);
3454 				goto out;
3455 			}
3456 			target->using_rdma_cm = true;
3457 			kfree(p);
3458 			break;
3459 
3460 		case SRP_OPT_MAX_SECT:
3461 			ret = match_int(args, &token);
3462 			if (ret) {
3463 				pr_warn("bad max sect parameter '%s'\n", p);
3464 				goto out;
3465 			}
3466 			target->scsi_host->max_sectors = token;
3467 			break;
3468 
3469 		case SRP_OPT_QUEUE_SIZE:
3470 			ret = match_int(args, &token);
3471 			if (ret) {
3472 				pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n",
3473 					p, ret);
3474 				goto out;
3475 			}
3476 			if (token < 1) {
3477 				pr_warn("bad queue_size parameter '%s'\n", p);
3478 				ret = -EINVAL;
3479 				goto out;
3480 			}
3481 			target->scsi_host->can_queue = token;
3482 			target->queue_size = token + SRP_RSP_SQ_SIZE +
3483 					     SRP_TSK_MGMT_SQ_SIZE;
3484 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3485 				target->scsi_host->cmd_per_lun = token;
3486 			break;
3487 
3488 		case SRP_OPT_MAX_CMD_PER_LUN:
3489 			ret = match_int(args, &token);
3490 			if (ret) {
3491 				pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n",
3492 					p, ret);
3493 				goto out;
3494 			}
3495 			if (token < 1) {
3496 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3497 					p);
3498 				ret = -EINVAL;
3499 				goto out;
3500 			}
3501 			target->scsi_host->cmd_per_lun = token;
3502 			break;
3503 
3504 		case SRP_OPT_TARGET_CAN_QUEUE:
3505 			ret = match_int(args, &token);
3506 			if (ret) {
3507 				pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n",
3508 					p, ret);
3509 				goto out;
3510 			}
3511 			if (token < 1) {
3512 				pr_warn("bad max target_can_queue parameter '%s'\n",
3513 					p);
3514 				ret = -EINVAL;
3515 				goto out;
3516 			}
3517 			target->target_can_queue = token;
3518 			break;
3519 
3520 		case SRP_OPT_IO_CLASS:
3521 			ret = match_hex(args, &token);
3522 			if (ret) {
3523 				pr_warn("bad IO class parameter '%s'\n", p);
3524 				goto out;
3525 			}
3526 			if (token != SRP_REV10_IB_IO_CLASS &&
3527 			    token != SRP_REV16A_IB_IO_CLASS) {
3528 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3529 					token, SRP_REV10_IB_IO_CLASS,
3530 					SRP_REV16A_IB_IO_CLASS);
3531 				ret = -EINVAL;
3532 				goto out;
3533 			}
3534 			target->io_class = token;
3535 			break;
3536 
3537 		case SRP_OPT_INITIATOR_EXT:
3538 			p = match_strdup(args);
3539 			if (!p) {
3540 				ret = -ENOMEM;
3541 				goto out;
3542 			}
3543 			ret = kstrtoull(p, 16, &ull);
3544 			if (ret) {
3545 				pr_warn("bad initiator_ext value '%s'\n", p);
3546 				kfree(p);
3547 				goto out;
3548 			}
3549 			target->initiator_ext = cpu_to_be64(ull);
3550 			kfree(p);
3551 			break;
3552 
3553 		case SRP_OPT_CMD_SG_ENTRIES:
3554 			ret = match_int(args, &token);
3555 			if (ret) {
3556 				pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n",
3557 					p, ret);
3558 				goto out;
3559 			}
3560 			if (token < 1 || token > 255) {
3561 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3562 					p);
3563 				ret = -EINVAL;
3564 				goto out;
3565 			}
3566 			target->cmd_sg_cnt = token;
3567 			break;
3568 
3569 		case SRP_OPT_ALLOW_EXT_SG:
3570 			ret = match_int(args, &token);
3571 			if (ret) {
3572 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3573 				goto out;
3574 			}
3575 			target->allow_ext_sg = !!token;
3576 			break;
3577 
3578 		case SRP_OPT_SG_TABLESIZE:
3579 			ret = match_int(args, &token);
3580 			if (ret) {
3581 				pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n",
3582 					p, ret);
3583 				goto out;
3584 			}
3585 			if (token < 1 || token > SG_MAX_SEGMENTS) {
3586 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3587 					p);
3588 				ret = -EINVAL;
3589 				goto out;
3590 			}
3591 			target->sg_tablesize = token;
3592 			break;
3593 
3594 		case SRP_OPT_COMP_VECTOR:
3595 			ret = match_int(args, &token);
3596 			if (ret) {
3597 				pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n",
3598 					p, ret);
3599 				goto out;
3600 			}
3601 			if (token < 0) {
3602 				pr_warn("bad comp_vector parameter '%s'\n", p);
3603 				ret = -EINVAL;
3604 				goto out;
3605 			}
3606 			target->comp_vector = token;
3607 			break;
3608 
3609 		case SRP_OPT_TL_RETRY_COUNT:
3610 			ret = match_int(args, &token);
3611 			if (ret) {
3612 				pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n",
3613 					p, ret);
3614 				goto out;
3615 			}
3616 			if (token < 2 || token > 7) {
3617 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3618 					p);
3619 				ret = -EINVAL;
3620 				goto out;
3621 			}
3622 			target->tl_retry_count = token;
3623 			break;
3624 
3625 		case SRP_OPT_MAX_IT_IU_SIZE:
3626 			ret = match_int(args, &token);
3627 			if (ret) {
3628 				pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n",
3629 					p, ret);
3630 				goto out;
3631 			}
3632 			if (token < 0) {
3633 				pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3634 				ret = -EINVAL;
3635 				goto out;
3636 			}
3637 			target->max_it_iu_size = token;
3638 			break;
3639 
3640 		case SRP_OPT_CH_COUNT:
3641 			ret = match_int(args, &token);
3642 			if (ret) {
3643 				pr_warn("match_int() failed for channel count parameter '%s', Error %d\n",
3644 					p, ret);
3645 				goto out;
3646 			}
3647 			if (token < 1) {
3648 				pr_warn("bad channel count %s\n", p);
3649 				ret = -EINVAL;
3650 				goto out;
3651 			}
3652 			target->ch_count = token;
3653 			break;
3654 
3655 		default:
3656 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3657 				p);
3658 			ret = -EINVAL;
3659 			goto out;
3660 		}
3661 	}
3662 
3663 	for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3664 		if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3665 			ret = 0;
3666 			break;
3667 		}
3668 	}
3669 	if (ret)
3670 		pr_warn("target creation request is missing one or more parameters\n");
3671 
3672 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3673 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3674 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3675 			target->scsi_host->cmd_per_lun,
3676 			target->scsi_host->can_queue);
3677 
3678 out:
3679 	kfree(options);
3680 	return ret;
3681 }
3682 
3683 static ssize_t add_target_store(struct device *dev,
3684 				struct device_attribute *attr, const char *buf,
3685 				size_t count)
3686 {
3687 	struct srp_host *host =
3688 		container_of(dev, struct srp_host, dev);
3689 	struct Scsi_Host *target_host;
3690 	struct srp_target_port *target;
3691 	struct srp_rdma_ch *ch;
3692 	struct srp_device *srp_dev = host->srp_dev;
3693 	struct ib_device *ibdev = srp_dev->dev;
3694 	int ret, i, ch_idx;
3695 	unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3696 	bool multich = false;
3697 	uint32_t max_iu_len;
3698 
3699 	target_host = scsi_host_alloc(&srp_template,
3700 				      sizeof (struct srp_target_port));
3701 	if (!target_host)
3702 		return -ENOMEM;
3703 
3704 	target_host->transportt  = ib_srp_transport_template;
3705 	target_host->max_channel = 0;
3706 	target_host->max_id      = 1;
3707 	target_host->max_lun     = -1LL;
3708 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3709 
3710 	if (ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
3711 		target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3712 	else
3713 		target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3714 
3715 	target = host_to_target(target_host);
3716 
3717 	target->net		= kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3718 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3719 	target->scsi_host	= target_host;
3720 	target->srp_host	= host;
3721 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
3722 	target->global_rkey	= host->srp_dev->global_rkey;
3723 	target->cmd_sg_cnt	= cmd_sg_entries;
3724 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3725 	target->allow_ext_sg	= allow_ext_sg;
3726 	target->tl_retry_count	= 7;
3727 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3728 
3729 	/*
3730 	 * Avoid that the SCSI host can be removed by srp_remove_target()
3731 	 * before this function returns.
3732 	 */
3733 	scsi_host_get(target->scsi_host);
3734 
3735 	ret = mutex_lock_interruptible(&host->add_target_mutex);
3736 	if (ret < 0)
3737 		goto put;
3738 
3739 	ret = srp_parse_options(target->net, buf, target);
3740 	if (ret)
3741 		goto out;
3742 
3743 	if (!srp_conn_unique(target->srp_host, target)) {
3744 		if (target->using_rdma_cm) {
3745 			shost_printk(KERN_INFO, target->scsi_host,
3746 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3747 				     be64_to_cpu(target->id_ext),
3748 				     be64_to_cpu(target->ioc_guid),
3749 				     &target->rdma_cm.dst);
3750 		} else {
3751 			shost_printk(KERN_INFO, target->scsi_host,
3752 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3753 				     be64_to_cpu(target->id_ext),
3754 				     be64_to_cpu(target->ioc_guid),
3755 				     be64_to_cpu(target->initiator_ext));
3756 		}
3757 		ret = -EEXIST;
3758 		goto out;
3759 	}
3760 
3761 	if (!srp_dev->has_fr && !target->allow_ext_sg &&
3762 	    target->cmd_sg_cnt < target->sg_tablesize) {
3763 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3764 		target->sg_tablesize = target->cmd_sg_cnt;
3765 	}
3766 
3767 	if (srp_dev->use_fast_reg) {
3768 		bool gaps_reg = ibdev->attrs.kernel_cap_flags &
3769 				 IBK_SG_GAPS_REG;
3770 
3771 		max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3772 				  (ilog2(srp_dev->mr_page_size) - 9);
3773 		if (!gaps_reg) {
3774 			/*
3775 			 * FR can only map one HCA page per entry. If the start
3776 			 * address is not aligned on a HCA page boundary two
3777 			 * entries will be used for the head and the tail
3778 			 * although these two entries combined contain at most
3779 			 * one HCA page of data. Hence the "+ 1" in the
3780 			 * calculation below.
3781 			 *
3782 			 * The indirect data buffer descriptor is contiguous
3783 			 * so the memory for that buffer will only be
3784 			 * registered if register_always is true. Hence add
3785 			 * one to mr_per_cmd if register_always has been set.
3786 			 */
3787 			mr_per_cmd = register_always +
3788 				(target->scsi_host->max_sectors + 1 +
3789 				 max_sectors_per_mr - 1) / max_sectors_per_mr;
3790 		} else {
3791 			mr_per_cmd = register_always +
3792 				(target->sg_tablesize +
3793 				 srp_dev->max_pages_per_mr - 1) /
3794 				srp_dev->max_pages_per_mr;
3795 		}
3796 		pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3797 			 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3798 			 max_sectors_per_mr, mr_per_cmd);
3799 	}
3800 
3801 	target_host->sg_tablesize = target->sg_tablesize;
3802 	target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3803 	target->mr_per_cmd = mr_per_cmd;
3804 	target->indirect_size = target->sg_tablesize *
3805 				sizeof (struct srp_direct_buf);
3806 	max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3807 				       srp_use_imm_data,
3808 				       target->max_it_iu_size);
3809 
3810 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3811 	INIT_WORK(&target->remove_work, srp_remove_work);
3812 	spin_lock_init(&target->lock);
3813 	ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3814 	if (ret)
3815 		goto out;
3816 
3817 	ret = -ENOMEM;
3818 	if (target->ch_count == 0) {
3819 		target->ch_count =
3820 			min(ch_count ?:
3821 				max(4 * num_online_nodes(),
3822 				    ibdev->num_comp_vectors),
3823 				num_online_cpus());
3824 	}
3825 
3826 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3827 			     GFP_KERNEL);
3828 	if (!target->ch)
3829 		goto out;
3830 
3831 	for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3832 		ch = &target->ch[ch_idx];
3833 		ch->target = target;
3834 		ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3835 		spin_lock_init(&ch->lock);
3836 		INIT_LIST_HEAD(&ch->free_tx);
3837 		ret = srp_new_cm_id(ch);
3838 		if (ret)
3839 			goto err_disconnect;
3840 
3841 		ret = srp_create_ch_ib(ch);
3842 		if (ret)
3843 			goto err_disconnect;
3844 
3845 		ret = srp_connect_ch(ch, max_iu_len, multich);
3846 		if (ret) {
3847 			char dst[64];
3848 
3849 			if (target->using_rdma_cm)
3850 				snprintf(dst, sizeof(dst), "%pIS",
3851 					&target->rdma_cm.dst);
3852 			else
3853 				snprintf(dst, sizeof(dst), "%pI6",
3854 					target->ib_cm.orig_dgid.raw);
3855 			shost_printk(KERN_ERR, target->scsi_host,
3856 				PFX "Connection %d/%d to %s failed\n",
3857 				ch_idx,
3858 				target->ch_count, dst);
3859 			if (ch_idx == 0) {
3860 				goto free_ch;
3861 			} else {
3862 				srp_free_ch_ib(target, ch);
3863 				target->ch_count = ch - target->ch;
3864 				goto connected;
3865 			}
3866 		}
3867 		multich = true;
3868 	}
3869 
3870 connected:
3871 	target->scsi_host->nr_hw_queues = target->ch_count;
3872 
3873 	ret = srp_add_target(host, target);
3874 	if (ret)
3875 		goto err_disconnect;
3876 
3877 	if (target->state != SRP_TARGET_REMOVED) {
3878 		if (target->using_rdma_cm) {
3879 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3880 				     "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3881 				     be64_to_cpu(target->id_ext),
3882 				     be64_to_cpu(target->ioc_guid),
3883 				     target->sgid.raw, &target->rdma_cm.dst);
3884 		} else {
3885 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3886 				     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3887 				     be64_to_cpu(target->id_ext),
3888 				     be64_to_cpu(target->ioc_guid),
3889 				     be16_to_cpu(target->ib_cm.pkey),
3890 				     be64_to_cpu(target->ib_cm.service_id),
3891 				     target->sgid.raw,
3892 				     target->ib_cm.orig_dgid.raw);
3893 		}
3894 	}
3895 
3896 	ret = count;
3897 
3898 out:
3899 	mutex_unlock(&host->add_target_mutex);
3900 
3901 put:
3902 	scsi_host_put(target->scsi_host);
3903 	if (ret < 0) {
3904 		/*
3905 		 * If a call to srp_remove_target() has not been scheduled,
3906 		 * drop the network namespace reference now that was obtained
3907 		 * earlier in this function.
3908 		 */
3909 		if (target->state != SRP_TARGET_REMOVED)
3910 			kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3911 		scsi_host_put(target->scsi_host);
3912 	}
3913 
3914 	return ret;
3915 
3916 err_disconnect:
3917 	srp_disconnect_target(target);
3918 
3919 free_ch:
3920 	for (i = 0; i < target->ch_count; i++) {
3921 		ch = &target->ch[i];
3922 		srp_free_ch_ib(target, ch);
3923 	}
3924 
3925 	kfree(target->ch);
3926 	goto out;
3927 }
3928 
3929 static DEVICE_ATTR_WO(add_target);
3930 
3931 static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
3932 			  char *buf)
3933 {
3934 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3935 
3936 	return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3937 }
3938 
3939 static DEVICE_ATTR_RO(ibdev);
3940 
3941 static ssize_t port_show(struct device *dev, struct device_attribute *attr,
3942 			 char *buf)
3943 {
3944 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3945 
3946 	return sysfs_emit(buf, "%u\n", host->port);
3947 }
3948 
3949 static DEVICE_ATTR_RO(port);
3950 
3951 static struct attribute *srp_class_attrs[] = {
3952 	&dev_attr_add_target.attr,
3953 	&dev_attr_ibdev.attr,
3954 	&dev_attr_port.attr,
3955 	NULL
3956 };
3957 
3958 static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
3959 {
3960 	struct srp_host *host;
3961 
3962 	host = kzalloc(sizeof *host, GFP_KERNEL);
3963 	if (!host)
3964 		return NULL;
3965 
3966 	INIT_LIST_HEAD(&host->target_list);
3967 	spin_lock_init(&host->target_lock);
3968 	mutex_init(&host->add_target_mutex);
3969 	host->srp_dev = device;
3970 	host->port = port;
3971 
3972 	device_initialize(&host->dev);
3973 	host->dev.class = &srp_class;
3974 	host->dev.parent = device->dev->dev.parent;
3975 	if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev),
3976 			 port))
3977 		goto put_host;
3978 	if (device_add(&host->dev))
3979 		goto put_host;
3980 
3981 	return host;
3982 
3983 put_host:
3984 	put_device(&host->dev);
3985 	return NULL;
3986 }
3987 
3988 static void srp_rename_dev(struct ib_device *device, void *client_data)
3989 {
3990 	struct srp_device *srp_dev = client_data;
3991 	struct srp_host *host, *tmp_host;
3992 
3993 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3994 		char name[IB_DEVICE_NAME_MAX + 8];
3995 
3996 		snprintf(name, sizeof(name), "srp-%s-%u",
3997 			 dev_name(&device->dev), host->port);
3998 		device_rename(&host->dev, name);
3999 	}
4000 }
4001 
4002 static int srp_add_one(struct ib_device *device)
4003 {
4004 	struct srp_device *srp_dev;
4005 	struct ib_device_attr *attr = &device->attrs;
4006 	struct srp_host *host;
4007 	int mr_page_shift;
4008 	u32 p;
4009 	u64 max_pages_per_mr;
4010 	unsigned int flags = 0;
4011 
4012 	srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
4013 	if (!srp_dev)
4014 		return -ENOMEM;
4015 
4016 	/*
4017 	 * Use the smallest page size supported by the HCA, down to a
4018 	 * minimum of 4096 bytes. We're unlikely to build large sglists
4019 	 * out of smaller entries.
4020 	 */
4021 	mr_page_shift		= max(12, ffs(attr->page_size_cap) - 1);
4022 	srp_dev->mr_page_size	= 1 << mr_page_shift;
4023 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
4024 	max_pages_per_mr	= attr->max_mr_size;
4025 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
4026 	pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
4027 		 attr->max_mr_size, srp_dev->mr_page_size,
4028 		 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
4029 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
4030 					  max_pages_per_mr);
4031 
4032 	srp_dev->has_fr = (attr->device_cap_flags &
4033 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
4034 	if (!never_register && !srp_dev->has_fr)
4035 		dev_warn(&device->dev, "FR is not supported\n");
4036 	else if (!never_register &&
4037 		 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
4038 		srp_dev->use_fast_reg = srp_dev->has_fr;
4039 
4040 	if (never_register || !register_always || !srp_dev->has_fr)
4041 		flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
4042 
4043 	if (srp_dev->use_fast_reg) {
4044 		srp_dev->max_pages_per_mr =
4045 			min_t(u32, srp_dev->max_pages_per_mr,
4046 			      attr->max_fast_reg_page_list_len);
4047 	}
4048 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
4049 				   srp_dev->max_pages_per_mr;
4050 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
4051 		 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
4052 		 attr->max_fast_reg_page_list_len,
4053 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
4054 
4055 	INIT_LIST_HEAD(&srp_dev->dev_list);
4056 
4057 	srp_dev->dev = device;
4058 	srp_dev->pd  = ib_alloc_pd(device, flags);
4059 	if (IS_ERR(srp_dev->pd)) {
4060 		int ret = PTR_ERR(srp_dev->pd);
4061 
4062 		kfree(srp_dev);
4063 		return ret;
4064 	}
4065 
4066 	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4067 		srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4068 		WARN_ON_ONCE(srp_dev->global_rkey == 0);
4069 	}
4070 
4071 	rdma_for_each_port (device, p) {
4072 		host = srp_add_port(srp_dev, p);
4073 		if (host)
4074 			list_add_tail(&host->list, &srp_dev->dev_list);
4075 	}
4076 
4077 	ib_set_client_data(device, &srp_client, srp_dev);
4078 	return 0;
4079 }
4080 
4081 static void srp_remove_one(struct ib_device *device, void *client_data)
4082 {
4083 	struct srp_device *srp_dev;
4084 	struct srp_host *host, *tmp_host;
4085 	struct srp_target_port *target;
4086 
4087 	srp_dev = client_data;
4088 
4089 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4090 		/*
4091 		 * Remove the add_target sysfs entry so that no new target ports
4092 		 * can be created.
4093 		 */
4094 		device_del(&host->dev);
4095 
4096 		/*
4097 		 * Remove all target ports.
4098 		 */
4099 		spin_lock(&host->target_lock);
4100 		list_for_each_entry(target, &host->target_list, list)
4101 			srp_queue_remove_work(target);
4102 		spin_unlock(&host->target_lock);
4103 
4104 		/*
4105 		 * srp_queue_remove_work() queues a call to
4106 		 * srp_remove_target(). The latter function cancels
4107 		 * target->tl_err_work so waiting for the remove works to
4108 		 * finish is sufficient.
4109 		 */
4110 		flush_workqueue(srp_remove_wq);
4111 
4112 		put_device(&host->dev);
4113 	}
4114 
4115 	ib_dealloc_pd(srp_dev->pd);
4116 
4117 	kfree(srp_dev);
4118 }
4119 
4120 static struct srp_function_template ib_srp_transport_functions = {
4121 	.has_rport_state	 = true,
4122 	.reset_timer_if_blocked	 = true,
4123 	.reconnect_delay	 = &srp_reconnect_delay,
4124 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
4125 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
4126 	.reconnect		 = srp_rport_reconnect,
4127 	.rport_delete		 = srp_rport_delete,
4128 	.terminate_rport_io	 = srp_terminate_io,
4129 };
4130 
4131 static int __init srp_init_module(void)
4132 {
4133 	int ret;
4134 
4135 	BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
4136 	BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4137 	BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4138 	BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
4139 	BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4140 	BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4141 	BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
4142 
4143 	if (srp_sg_tablesize) {
4144 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4145 		if (!cmd_sg_entries)
4146 			cmd_sg_entries = srp_sg_tablesize;
4147 	}
4148 
4149 	if (!cmd_sg_entries)
4150 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4151 
4152 	if (cmd_sg_entries > 255) {
4153 		pr_warn("Clamping cmd_sg_entries to 255\n");
4154 		cmd_sg_entries = 255;
4155 	}
4156 
4157 	if (!indirect_sg_entries)
4158 		indirect_sg_entries = cmd_sg_entries;
4159 	else if (indirect_sg_entries < cmd_sg_entries) {
4160 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4161 			cmd_sg_entries);
4162 		indirect_sg_entries = cmd_sg_entries;
4163 	}
4164 
4165 	if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4166 		pr_warn("Clamping indirect_sg_entries to %u\n",
4167 			SG_MAX_SEGMENTS);
4168 		indirect_sg_entries = SG_MAX_SEGMENTS;
4169 	}
4170 
4171 	srp_remove_wq = create_workqueue("srp_remove");
4172 	if (!srp_remove_wq) {
4173 		ret = -ENOMEM;
4174 		goto out;
4175 	}
4176 
4177 	ret = -ENOMEM;
4178 	ib_srp_transport_template =
4179 		srp_attach_transport(&ib_srp_transport_functions);
4180 	if (!ib_srp_transport_template)
4181 		goto destroy_wq;
4182 
4183 	ret = class_register(&srp_class);
4184 	if (ret) {
4185 		pr_err("couldn't register class infiniband_srp\n");
4186 		goto release_tr;
4187 	}
4188 
4189 	ib_sa_register_client(&srp_sa_client);
4190 
4191 	ret = ib_register_client(&srp_client);
4192 	if (ret) {
4193 		pr_err("couldn't register IB client\n");
4194 		goto unreg_sa;
4195 	}
4196 
4197 out:
4198 	return ret;
4199 
4200 unreg_sa:
4201 	ib_sa_unregister_client(&srp_sa_client);
4202 	class_unregister(&srp_class);
4203 
4204 release_tr:
4205 	srp_release_transport(ib_srp_transport_template);
4206 
4207 destroy_wq:
4208 	destroy_workqueue(srp_remove_wq);
4209 	goto out;
4210 }
4211 
4212 static void __exit srp_cleanup_module(void)
4213 {
4214 	ib_unregister_client(&srp_client);
4215 	ib_sa_unregister_client(&srp_sa_client);
4216 	class_unregister(&srp_class);
4217 	srp_release_transport(ib_srp_transport_template);
4218 	destroy_workqueue(srp_remove_wq);
4219 }
4220 
4221 module_init(srp_init_module);
4222 module_exit(srp_cleanup_module);
4223