xref: /linux/drivers/infiniband/ulp/srp/ib_srp.c (revision ed4bc1890b4984d0af447ad3cc1f93541623f8f3)
1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <linux/inet.h>
45 #include <rdma/ib_cache.h>
46 
47 #include <linux/atomic.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_dbg.h>
52 #include <scsi/scsi_tcq.h>
53 #include <scsi/srp.h>
54 #include <scsi/scsi_transport_srp.h>
55 
56 #include "ib_srp.h"
57 
58 #define DRV_NAME	"ib_srp"
59 #define PFX		DRV_NAME ": "
60 
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 
65 #if !defined(CONFIG_DYNAMIC_DEBUG)
66 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
68 #endif
69 
70 static unsigned int srp_sg_tablesize;
71 static unsigned int cmd_sg_entries;
72 static unsigned int indirect_sg_entries;
73 static bool allow_ext_sg;
74 static bool register_always = true;
75 static bool never_register;
76 static int topspin_workarounds = 1;
77 
78 module_param(srp_sg_tablesize, uint, 0444);
79 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
80 
81 module_param(cmd_sg_entries, uint, 0444);
82 MODULE_PARM_DESC(cmd_sg_entries,
83 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
84 
85 module_param(indirect_sg_entries, uint, 0444);
86 MODULE_PARM_DESC(indirect_sg_entries,
87 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
88 
89 module_param(allow_ext_sg, bool, 0444);
90 MODULE_PARM_DESC(allow_ext_sg,
91 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
92 
93 module_param(topspin_workarounds, int, 0444);
94 MODULE_PARM_DESC(topspin_workarounds,
95 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
96 
97 module_param(register_always, bool, 0444);
98 MODULE_PARM_DESC(register_always,
99 		 "Use memory registration even for contiguous memory regions");
100 
101 module_param(never_register, bool, 0444);
102 MODULE_PARM_DESC(never_register, "Never register memory");
103 
104 static const struct kernel_param_ops srp_tmo_ops;
105 
106 static int srp_reconnect_delay = 10;
107 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
108 		S_IRUGO | S_IWUSR);
109 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
110 
111 static int srp_fast_io_fail_tmo = 15;
112 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
113 		S_IRUGO | S_IWUSR);
114 MODULE_PARM_DESC(fast_io_fail_tmo,
115 		 "Number of seconds between the observation of a transport"
116 		 " layer error and failing all I/O. \"off\" means that this"
117 		 " functionality is disabled.");
118 
119 static int srp_dev_loss_tmo = 600;
120 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
121 		S_IRUGO | S_IWUSR);
122 MODULE_PARM_DESC(dev_loss_tmo,
123 		 "Maximum number of seconds that the SRP transport should"
124 		 " insulate transport layer errors. After this time has been"
125 		 " exceeded the SCSI host is removed. Should be"
126 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
127 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
128 		 " this functionality is disabled.");
129 
130 static bool srp_use_imm_data = true;
131 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
132 MODULE_PARM_DESC(use_imm_data,
133 		 "Whether or not to request permission to use immediate data during SRP login.");
134 
135 static unsigned int srp_max_imm_data = 8 * 1024;
136 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
137 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
138 
139 static unsigned ch_count;
140 module_param(ch_count, uint, 0444);
141 MODULE_PARM_DESC(ch_count,
142 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
143 
144 static int srp_add_one(struct ib_device *device);
145 static void srp_remove_one(struct ib_device *device, void *client_data);
146 static void srp_rename_dev(struct ib_device *device, void *client_data);
147 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
148 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
149 		const char *opname);
150 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
151 			     const struct ib_cm_event *event);
152 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
153 			       struct rdma_cm_event *event);
154 
155 static struct scsi_transport_template *ib_srp_transport_template;
156 static struct workqueue_struct *srp_remove_wq;
157 
158 static struct ib_client srp_client = {
159 	.name   = "srp",
160 	.add    = srp_add_one,
161 	.remove = srp_remove_one,
162 	.rename = srp_rename_dev
163 };
164 
165 static struct ib_sa_client srp_sa_client;
166 
167 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
168 {
169 	int tmo = *(int *)kp->arg;
170 
171 	if (tmo >= 0)
172 		return sprintf(buffer, "%d\n", tmo);
173 	else
174 		return sprintf(buffer, "off\n");
175 }
176 
177 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
178 {
179 	int tmo, res;
180 
181 	res = srp_parse_tmo(&tmo, val);
182 	if (res)
183 		goto out;
184 
185 	if (kp->arg == &srp_reconnect_delay)
186 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
187 				    srp_dev_loss_tmo);
188 	else if (kp->arg == &srp_fast_io_fail_tmo)
189 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
190 	else
191 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
192 				    tmo);
193 	if (res)
194 		goto out;
195 	*(int *)kp->arg = tmo;
196 
197 out:
198 	return res;
199 }
200 
201 static const struct kernel_param_ops srp_tmo_ops = {
202 	.get = srp_tmo_get,
203 	.set = srp_tmo_set,
204 };
205 
206 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
207 {
208 	return (struct srp_target_port *) host->hostdata;
209 }
210 
211 static const char *srp_target_info(struct Scsi_Host *host)
212 {
213 	return host_to_target(host)->target_name;
214 }
215 
216 static int srp_target_is_topspin(struct srp_target_port *target)
217 {
218 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
219 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
220 
221 	return topspin_workarounds &&
222 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
223 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
224 }
225 
226 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
227 				   gfp_t gfp_mask,
228 				   enum dma_data_direction direction)
229 {
230 	struct srp_iu *iu;
231 
232 	iu = kmalloc(sizeof *iu, gfp_mask);
233 	if (!iu)
234 		goto out;
235 
236 	iu->buf = kzalloc(size, gfp_mask);
237 	if (!iu->buf)
238 		goto out_free_iu;
239 
240 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
241 				    direction);
242 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
243 		goto out_free_buf;
244 
245 	iu->size      = size;
246 	iu->direction = direction;
247 
248 	return iu;
249 
250 out_free_buf:
251 	kfree(iu->buf);
252 out_free_iu:
253 	kfree(iu);
254 out:
255 	return NULL;
256 }
257 
258 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
259 {
260 	if (!iu)
261 		return;
262 
263 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
264 			    iu->direction);
265 	kfree(iu->buf);
266 	kfree(iu);
267 }
268 
269 static void srp_qp_event(struct ib_event *event, void *context)
270 {
271 	pr_debug("QP event %s (%d)\n",
272 		 ib_event_msg(event->event), event->event);
273 }
274 
275 static int srp_init_ib_qp(struct srp_target_port *target,
276 			  struct ib_qp *qp)
277 {
278 	struct ib_qp_attr *attr;
279 	int ret;
280 
281 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
282 	if (!attr)
283 		return -ENOMEM;
284 
285 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
286 				  target->srp_host->port,
287 				  be16_to_cpu(target->ib_cm.pkey),
288 				  &attr->pkey_index);
289 	if (ret)
290 		goto out;
291 
292 	attr->qp_state        = IB_QPS_INIT;
293 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
294 				    IB_ACCESS_REMOTE_WRITE);
295 	attr->port_num        = target->srp_host->port;
296 
297 	ret = ib_modify_qp(qp, attr,
298 			   IB_QP_STATE		|
299 			   IB_QP_PKEY_INDEX	|
300 			   IB_QP_ACCESS_FLAGS	|
301 			   IB_QP_PORT);
302 
303 out:
304 	kfree(attr);
305 	return ret;
306 }
307 
308 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
309 {
310 	struct srp_target_port *target = ch->target;
311 	struct ib_cm_id *new_cm_id;
312 
313 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
314 				    srp_ib_cm_handler, ch);
315 	if (IS_ERR(new_cm_id))
316 		return PTR_ERR(new_cm_id);
317 
318 	if (ch->ib_cm.cm_id)
319 		ib_destroy_cm_id(ch->ib_cm.cm_id);
320 	ch->ib_cm.cm_id = new_cm_id;
321 	if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
322 			    target->srp_host->port))
323 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
324 	else
325 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
326 	ch->ib_cm.path.sgid = target->sgid;
327 	ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
328 	ch->ib_cm.path.pkey = target->ib_cm.pkey;
329 	ch->ib_cm.path.service_id = target->ib_cm.service_id;
330 
331 	return 0;
332 }
333 
334 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
335 {
336 	struct srp_target_port *target = ch->target;
337 	struct rdma_cm_id *new_cm_id;
338 	int ret;
339 
340 	new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
341 				   RDMA_PS_TCP, IB_QPT_RC);
342 	if (IS_ERR(new_cm_id)) {
343 		ret = PTR_ERR(new_cm_id);
344 		new_cm_id = NULL;
345 		goto out;
346 	}
347 
348 	init_completion(&ch->done);
349 	ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
350 				&target->rdma_cm.src.sa : NULL,
351 				&target->rdma_cm.dst.sa,
352 				SRP_PATH_REC_TIMEOUT_MS);
353 	if (ret) {
354 		pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
355 		       &target->rdma_cm.src, &target->rdma_cm.dst, ret);
356 		goto out;
357 	}
358 	ret = wait_for_completion_interruptible(&ch->done);
359 	if (ret < 0)
360 		goto out;
361 
362 	ret = ch->status;
363 	if (ret) {
364 		pr_err("Resolving address %pISpsc failed (%d)\n",
365 		       &target->rdma_cm.dst, ret);
366 		goto out;
367 	}
368 
369 	swap(ch->rdma_cm.cm_id, new_cm_id);
370 
371 out:
372 	if (new_cm_id)
373 		rdma_destroy_id(new_cm_id);
374 
375 	return ret;
376 }
377 
378 static int srp_new_cm_id(struct srp_rdma_ch *ch)
379 {
380 	struct srp_target_port *target = ch->target;
381 
382 	return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
383 		srp_new_ib_cm_id(ch);
384 }
385 
386 /**
387  * srp_destroy_fr_pool() - free the resources owned by a pool
388  * @pool: Fast registration pool to be destroyed.
389  */
390 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
391 {
392 	int i;
393 	struct srp_fr_desc *d;
394 
395 	if (!pool)
396 		return;
397 
398 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
399 		if (d->mr)
400 			ib_dereg_mr(d->mr);
401 	}
402 	kfree(pool);
403 }
404 
405 /**
406  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
407  * @device:            IB device to allocate fast registration descriptors for.
408  * @pd:                Protection domain associated with the FR descriptors.
409  * @pool_size:         Number of descriptors to allocate.
410  * @max_page_list_len: Maximum fast registration work request page list length.
411  */
412 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
413 					      struct ib_pd *pd, int pool_size,
414 					      int max_page_list_len)
415 {
416 	struct srp_fr_pool *pool;
417 	struct srp_fr_desc *d;
418 	struct ib_mr *mr;
419 	int i, ret = -EINVAL;
420 	enum ib_mr_type mr_type;
421 
422 	if (pool_size <= 0)
423 		goto err;
424 	ret = -ENOMEM;
425 	pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
426 	if (!pool)
427 		goto err;
428 	pool->size = pool_size;
429 	pool->max_page_list_len = max_page_list_len;
430 	spin_lock_init(&pool->lock);
431 	INIT_LIST_HEAD(&pool->free_list);
432 
433 	if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
434 		mr_type = IB_MR_TYPE_SG_GAPS;
435 	else
436 		mr_type = IB_MR_TYPE_MEM_REG;
437 
438 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
439 		mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
440 		if (IS_ERR(mr)) {
441 			ret = PTR_ERR(mr);
442 			if (ret == -ENOMEM)
443 				pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
444 					dev_name(&device->dev));
445 			goto destroy_pool;
446 		}
447 		d->mr = mr;
448 		list_add_tail(&d->entry, &pool->free_list);
449 	}
450 
451 out:
452 	return pool;
453 
454 destroy_pool:
455 	srp_destroy_fr_pool(pool);
456 
457 err:
458 	pool = ERR_PTR(ret);
459 	goto out;
460 }
461 
462 /**
463  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
464  * @pool: Pool to obtain descriptor from.
465  */
466 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
467 {
468 	struct srp_fr_desc *d = NULL;
469 	unsigned long flags;
470 
471 	spin_lock_irqsave(&pool->lock, flags);
472 	if (!list_empty(&pool->free_list)) {
473 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
474 		list_del(&d->entry);
475 	}
476 	spin_unlock_irqrestore(&pool->lock, flags);
477 
478 	return d;
479 }
480 
481 /**
482  * srp_fr_pool_put() - put an FR descriptor back in the free list
483  * @pool: Pool the descriptor was allocated from.
484  * @desc: Pointer to an array of fast registration descriptor pointers.
485  * @n:    Number of descriptors to put back.
486  *
487  * Note: The caller must already have queued an invalidation request for
488  * desc->mr->rkey before calling this function.
489  */
490 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
491 			    int n)
492 {
493 	unsigned long flags;
494 	int i;
495 
496 	spin_lock_irqsave(&pool->lock, flags);
497 	for (i = 0; i < n; i++)
498 		list_add(&desc[i]->entry, &pool->free_list);
499 	spin_unlock_irqrestore(&pool->lock, flags);
500 }
501 
502 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
503 {
504 	struct srp_device *dev = target->srp_host->srp_dev;
505 
506 	return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
507 				  dev->max_pages_per_mr);
508 }
509 
510 /**
511  * srp_destroy_qp() - destroy an RDMA queue pair
512  * @ch: SRP RDMA channel.
513  *
514  * Drain the qp before destroying it.  This avoids that the receive
515  * completion handler can access the queue pair while it is
516  * being destroyed.
517  */
518 static void srp_destroy_qp(struct srp_rdma_ch *ch)
519 {
520 	spin_lock_irq(&ch->lock);
521 	ib_process_cq_direct(ch->send_cq, -1);
522 	spin_unlock_irq(&ch->lock);
523 
524 	ib_drain_qp(ch->qp);
525 	ib_destroy_qp(ch->qp);
526 }
527 
528 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
529 {
530 	struct srp_target_port *target = ch->target;
531 	struct srp_device *dev = target->srp_host->srp_dev;
532 	const struct ib_device_attr *attr = &dev->dev->attrs;
533 	struct ib_qp_init_attr *init_attr;
534 	struct ib_cq *recv_cq, *send_cq;
535 	struct ib_qp *qp;
536 	struct srp_fr_pool *fr_pool = NULL;
537 	const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
538 	int ret;
539 
540 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
541 	if (!init_attr)
542 		return -ENOMEM;
543 
544 	/* queue_size + 1 for ib_drain_rq() */
545 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
546 				ch->comp_vector, IB_POLL_SOFTIRQ);
547 	if (IS_ERR(recv_cq)) {
548 		ret = PTR_ERR(recv_cq);
549 		goto err;
550 	}
551 
552 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
553 				ch->comp_vector, IB_POLL_DIRECT);
554 	if (IS_ERR(send_cq)) {
555 		ret = PTR_ERR(send_cq);
556 		goto err_recv_cq;
557 	}
558 
559 	init_attr->event_handler       = srp_qp_event;
560 	init_attr->cap.max_send_wr     = m * target->queue_size;
561 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
562 	init_attr->cap.max_recv_sge    = 1;
563 	init_attr->cap.max_send_sge    = min(SRP_MAX_SGE, attr->max_send_sge);
564 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
565 	init_attr->qp_type             = IB_QPT_RC;
566 	init_attr->send_cq             = send_cq;
567 	init_attr->recv_cq             = recv_cq;
568 
569 	ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
570 
571 	if (target->using_rdma_cm) {
572 		ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
573 		qp = ch->rdma_cm.cm_id->qp;
574 	} else {
575 		qp = ib_create_qp(dev->pd, init_attr);
576 		if (!IS_ERR(qp)) {
577 			ret = srp_init_ib_qp(target, qp);
578 			if (ret)
579 				ib_destroy_qp(qp);
580 		} else {
581 			ret = PTR_ERR(qp);
582 		}
583 	}
584 	if (ret) {
585 		pr_err("QP creation failed for dev %s: %d\n",
586 		       dev_name(&dev->dev->dev), ret);
587 		goto err_send_cq;
588 	}
589 
590 	if (dev->use_fast_reg) {
591 		fr_pool = srp_alloc_fr_pool(target);
592 		if (IS_ERR(fr_pool)) {
593 			ret = PTR_ERR(fr_pool);
594 			shost_printk(KERN_WARNING, target->scsi_host, PFX
595 				     "FR pool allocation failed (%d)\n", ret);
596 			goto err_qp;
597 		}
598 	}
599 
600 	if (ch->qp)
601 		srp_destroy_qp(ch);
602 	if (ch->recv_cq)
603 		ib_free_cq(ch->recv_cq);
604 	if (ch->send_cq)
605 		ib_free_cq(ch->send_cq);
606 
607 	ch->qp = qp;
608 	ch->recv_cq = recv_cq;
609 	ch->send_cq = send_cq;
610 
611 	if (dev->use_fast_reg) {
612 		if (ch->fr_pool)
613 			srp_destroy_fr_pool(ch->fr_pool);
614 		ch->fr_pool = fr_pool;
615 	}
616 
617 	kfree(init_attr);
618 	return 0;
619 
620 err_qp:
621 	if (target->using_rdma_cm)
622 		rdma_destroy_qp(ch->rdma_cm.cm_id);
623 	else
624 		ib_destroy_qp(qp);
625 
626 err_send_cq:
627 	ib_free_cq(send_cq);
628 
629 err_recv_cq:
630 	ib_free_cq(recv_cq);
631 
632 err:
633 	kfree(init_attr);
634 	return ret;
635 }
636 
637 /*
638  * Note: this function may be called without srp_alloc_iu_bufs() having been
639  * invoked. Hence the ch->[rt]x_ring checks.
640  */
641 static void srp_free_ch_ib(struct srp_target_port *target,
642 			   struct srp_rdma_ch *ch)
643 {
644 	struct srp_device *dev = target->srp_host->srp_dev;
645 	int i;
646 
647 	if (!ch->target)
648 		return;
649 
650 	if (target->using_rdma_cm) {
651 		if (ch->rdma_cm.cm_id) {
652 			rdma_destroy_id(ch->rdma_cm.cm_id);
653 			ch->rdma_cm.cm_id = NULL;
654 		}
655 	} else {
656 		if (ch->ib_cm.cm_id) {
657 			ib_destroy_cm_id(ch->ib_cm.cm_id);
658 			ch->ib_cm.cm_id = NULL;
659 		}
660 	}
661 
662 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
663 	if (!ch->qp)
664 		return;
665 
666 	if (dev->use_fast_reg) {
667 		if (ch->fr_pool)
668 			srp_destroy_fr_pool(ch->fr_pool);
669 	}
670 
671 	srp_destroy_qp(ch);
672 	ib_free_cq(ch->send_cq);
673 	ib_free_cq(ch->recv_cq);
674 
675 	/*
676 	 * Avoid that the SCSI error handler tries to use this channel after
677 	 * it has been freed. The SCSI error handler can namely continue
678 	 * trying to perform recovery actions after scsi_remove_host()
679 	 * returned.
680 	 */
681 	ch->target = NULL;
682 
683 	ch->qp = NULL;
684 	ch->send_cq = ch->recv_cq = NULL;
685 
686 	if (ch->rx_ring) {
687 		for (i = 0; i < target->queue_size; ++i)
688 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
689 		kfree(ch->rx_ring);
690 		ch->rx_ring = NULL;
691 	}
692 	if (ch->tx_ring) {
693 		for (i = 0; i < target->queue_size; ++i)
694 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
695 		kfree(ch->tx_ring);
696 		ch->tx_ring = NULL;
697 	}
698 }
699 
700 static void srp_path_rec_completion(int status,
701 				    struct sa_path_rec *pathrec,
702 				    void *ch_ptr)
703 {
704 	struct srp_rdma_ch *ch = ch_ptr;
705 	struct srp_target_port *target = ch->target;
706 
707 	ch->status = status;
708 	if (status)
709 		shost_printk(KERN_ERR, target->scsi_host,
710 			     PFX "Got failed path rec status %d\n", status);
711 	else
712 		ch->ib_cm.path = *pathrec;
713 	complete(&ch->done);
714 }
715 
716 static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
717 {
718 	struct srp_target_port *target = ch->target;
719 	int ret;
720 
721 	ch->ib_cm.path.numb_path = 1;
722 
723 	init_completion(&ch->done);
724 
725 	ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
726 					       target->srp_host->srp_dev->dev,
727 					       target->srp_host->port,
728 					       &ch->ib_cm.path,
729 					       IB_SA_PATH_REC_SERVICE_ID |
730 					       IB_SA_PATH_REC_DGID	 |
731 					       IB_SA_PATH_REC_SGID	 |
732 					       IB_SA_PATH_REC_NUMB_PATH	 |
733 					       IB_SA_PATH_REC_PKEY,
734 					       SRP_PATH_REC_TIMEOUT_MS,
735 					       GFP_KERNEL,
736 					       srp_path_rec_completion,
737 					       ch, &ch->ib_cm.path_query);
738 	if (ch->ib_cm.path_query_id < 0)
739 		return ch->ib_cm.path_query_id;
740 
741 	ret = wait_for_completion_interruptible(&ch->done);
742 	if (ret < 0)
743 		return ret;
744 
745 	if (ch->status < 0)
746 		shost_printk(KERN_WARNING, target->scsi_host,
747 			     PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
748 			     ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
749 			     be16_to_cpu(target->ib_cm.pkey),
750 			     be64_to_cpu(target->ib_cm.service_id));
751 
752 	return ch->status;
753 }
754 
755 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
756 {
757 	struct srp_target_port *target = ch->target;
758 	int ret;
759 
760 	init_completion(&ch->done);
761 
762 	ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
763 	if (ret)
764 		return ret;
765 
766 	wait_for_completion_interruptible(&ch->done);
767 
768 	if (ch->status != 0)
769 		shost_printk(KERN_WARNING, target->scsi_host,
770 			     PFX "Path resolution failed\n");
771 
772 	return ch->status;
773 }
774 
775 static int srp_lookup_path(struct srp_rdma_ch *ch)
776 {
777 	struct srp_target_port *target = ch->target;
778 
779 	return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
780 		srp_ib_lookup_path(ch);
781 }
782 
783 static u8 srp_get_subnet_timeout(struct srp_host *host)
784 {
785 	struct ib_port_attr attr;
786 	int ret;
787 	u8 subnet_timeout = 18;
788 
789 	ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
790 	if (ret == 0)
791 		subnet_timeout = attr.subnet_timeout;
792 
793 	if (unlikely(subnet_timeout < 15))
794 		pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
795 			dev_name(&host->srp_dev->dev->dev), subnet_timeout);
796 
797 	return subnet_timeout;
798 }
799 
800 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
801 			bool multich)
802 {
803 	struct srp_target_port *target = ch->target;
804 	struct {
805 		struct rdma_conn_param	  rdma_param;
806 		struct srp_login_req_rdma rdma_req;
807 		struct ib_cm_req_param	  ib_param;
808 		struct srp_login_req	  ib_req;
809 	} *req = NULL;
810 	char *ipi, *tpi;
811 	int status;
812 
813 	req = kzalloc(sizeof *req, GFP_KERNEL);
814 	if (!req)
815 		return -ENOMEM;
816 
817 	req->ib_param.flow_control = 1;
818 	req->ib_param.retry_count = target->tl_retry_count;
819 
820 	/*
821 	 * Pick some arbitrary defaults here; we could make these
822 	 * module parameters if anyone cared about setting them.
823 	 */
824 	req->ib_param.responder_resources = 4;
825 	req->ib_param.rnr_retry_count = 7;
826 	req->ib_param.max_cm_retries = 15;
827 
828 	req->ib_req.opcode = SRP_LOGIN_REQ;
829 	req->ib_req.tag = 0;
830 	req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
831 	req->ib_req.req_buf_fmt	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
832 					      SRP_BUF_FORMAT_INDIRECT);
833 	req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
834 				 SRP_MULTICHAN_SINGLE);
835 	if (srp_use_imm_data) {
836 		req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
837 		req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
838 	}
839 
840 	if (target->using_rdma_cm) {
841 		req->rdma_param.flow_control = req->ib_param.flow_control;
842 		req->rdma_param.responder_resources =
843 			req->ib_param.responder_resources;
844 		req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
845 		req->rdma_param.retry_count = req->ib_param.retry_count;
846 		req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
847 		req->rdma_param.private_data = &req->rdma_req;
848 		req->rdma_param.private_data_len = sizeof(req->rdma_req);
849 
850 		req->rdma_req.opcode = req->ib_req.opcode;
851 		req->rdma_req.tag = req->ib_req.tag;
852 		req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
853 		req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
854 		req->rdma_req.req_flags	= req->ib_req.req_flags;
855 		req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
856 
857 		ipi = req->rdma_req.initiator_port_id;
858 		tpi = req->rdma_req.target_port_id;
859 	} else {
860 		u8 subnet_timeout;
861 
862 		subnet_timeout = srp_get_subnet_timeout(target->srp_host);
863 
864 		req->ib_param.primary_path = &ch->ib_cm.path;
865 		req->ib_param.alternate_path = NULL;
866 		req->ib_param.service_id = target->ib_cm.service_id;
867 		get_random_bytes(&req->ib_param.starting_psn, 4);
868 		req->ib_param.starting_psn &= 0xffffff;
869 		req->ib_param.qp_num = ch->qp->qp_num;
870 		req->ib_param.qp_type = ch->qp->qp_type;
871 		req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
872 		req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
873 		req->ib_param.private_data = &req->ib_req;
874 		req->ib_param.private_data_len = sizeof(req->ib_req);
875 
876 		ipi = req->ib_req.initiator_port_id;
877 		tpi = req->ib_req.target_port_id;
878 	}
879 
880 	/*
881 	 * In the published SRP specification (draft rev. 16a), the
882 	 * port identifier format is 8 bytes of ID extension followed
883 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
884 	 * opposite order, so that the GUID comes first.
885 	 *
886 	 * Targets conforming to these obsolete drafts can be
887 	 * recognized by the I/O Class they report.
888 	 */
889 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
890 		memcpy(ipi,     &target->sgid.global.interface_id, 8);
891 		memcpy(ipi + 8, &target->initiator_ext, 8);
892 		memcpy(tpi,     &target->ioc_guid, 8);
893 		memcpy(tpi + 8, &target->id_ext, 8);
894 	} else {
895 		memcpy(ipi,     &target->initiator_ext, 8);
896 		memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
897 		memcpy(tpi,     &target->id_ext, 8);
898 		memcpy(tpi + 8, &target->ioc_guid, 8);
899 	}
900 
901 	/*
902 	 * Topspin/Cisco SRP targets will reject our login unless we
903 	 * zero out the first 8 bytes of our initiator port ID and set
904 	 * the second 8 bytes to the local node GUID.
905 	 */
906 	if (srp_target_is_topspin(target)) {
907 		shost_printk(KERN_DEBUG, target->scsi_host,
908 			     PFX "Topspin/Cisco initiator port ID workaround "
909 			     "activated for target GUID %016llx\n",
910 			     be64_to_cpu(target->ioc_guid));
911 		memset(ipi, 0, 8);
912 		memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
913 	}
914 
915 	if (target->using_rdma_cm)
916 		status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
917 	else
918 		status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
919 
920 	kfree(req);
921 
922 	return status;
923 }
924 
925 static bool srp_queue_remove_work(struct srp_target_port *target)
926 {
927 	bool changed = false;
928 
929 	spin_lock_irq(&target->lock);
930 	if (target->state != SRP_TARGET_REMOVED) {
931 		target->state = SRP_TARGET_REMOVED;
932 		changed = true;
933 	}
934 	spin_unlock_irq(&target->lock);
935 
936 	if (changed)
937 		queue_work(srp_remove_wq, &target->remove_work);
938 
939 	return changed;
940 }
941 
942 static void srp_disconnect_target(struct srp_target_port *target)
943 {
944 	struct srp_rdma_ch *ch;
945 	int i, ret;
946 
947 	/* XXX should send SRP_I_LOGOUT request */
948 
949 	for (i = 0; i < target->ch_count; i++) {
950 		ch = &target->ch[i];
951 		ch->connected = false;
952 		ret = 0;
953 		if (target->using_rdma_cm) {
954 			if (ch->rdma_cm.cm_id)
955 				rdma_disconnect(ch->rdma_cm.cm_id);
956 		} else {
957 			if (ch->ib_cm.cm_id)
958 				ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
959 						      NULL, 0);
960 		}
961 		if (ret < 0) {
962 			shost_printk(KERN_DEBUG, target->scsi_host,
963 				     PFX "Sending CM DREQ failed\n");
964 		}
965 	}
966 }
967 
968 static void srp_free_req_data(struct srp_target_port *target,
969 			      struct srp_rdma_ch *ch)
970 {
971 	struct srp_device *dev = target->srp_host->srp_dev;
972 	struct ib_device *ibdev = dev->dev;
973 	struct srp_request *req;
974 	int i;
975 
976 	if (!ch->req_ring)
977 		return;
978 
979 	for (i = 0; i < target->req_ring_size; ++i) {
980 		req = &ch->req_ring[i];
981 		if (dev->use_fast_reg)
982 			kfree(req->fr_list);
983 		if (req->indirect_dma_addr) {
984 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
985 					    target->indirect_size,
986 					    DMA_TO_DEVICE);
987 		}
988 		kfree(req->indirect_desc);
989 	}
990 
991 	kfree(ch->req_ring);
992 	ch->req_ring = NULL;
993 }
994 
995 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
996 {
997 	struct srp_target_port *target = ch->target;
998 	struct srp_device *srp_dev = target->srp_host->srp_dev;
999 	struct ib_device *ibdev = srp_dev->dev;
1000 	struct srp_request *req;
1001 	void *mr_list;
1002 	dma_addr_t dma_addr;
1003 	int i, ret = -ENOMEM;
1004 
1005 	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
1006 			       GFP_KERNEL);
1007 	if (!ch->req_ring)
1008 		goto out;
1009 
1010 	for (i = 0; i < target->req_ring_size; ++i) {
1011 		req = &ch->req_ring[i];
1012 		mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
1013 					GFP_KERNEL);
1014 		if (!mr_list)
1015 			goto out;
1016 		if (srp_dev->use_fast_reg)
1017 			req->fr_list = mr_list;
1018 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1019 		if (!req->indirect_desc)
1020 			goto out;
1021 
1022 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1023 					     target->indirect_size,
1024 					     DMA_TO_DEVICE);
1025 		if (ib_dma_mapping_error(ibdev, dma_addr))
1026 			goto out;
1027 
1028 		req->indirect_dma_addr = dma_addr;
1029 	}
1030 	ret = 0;
1031 
1032 out:
1033 	return ret;
1034 }
1035 
1036 /**
1037  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1038  * @shost: SCSI host whose attributes to remove from sysfs.
1039  *
1040  * Note: Any attributes defined in the host template and that did not exist
1041  * before invocation of this function will be ignored.
1042  */
1043 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1044 {
1045 	struct device_attribute **attr;
1046 
1047 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
1048 		device_remove_file(&shost->shost_dev, *attr);
1049 }
1050 
1051 static void srp_remove_target(struct srp_target_port *target)
1052 {
1053 	struct srp_rdma_ch *ch;
1054 	int i;
1055 
1056 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1057 
1058 	srp_del_scsi_host_attr(target->scsi_host);
1059 	srp_rport_get(target->rport);
1060 	srp_remove_host(target->scsi_host);
1061 	scsi_remove_host(target->scsi_host);
1062 	srp_stop_rport_timers(target->rport);
1063 	srp_disconnect_target(target);
1064 	kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1065 	for (i = 0; i < target->ch_count; i++) {
1066 		ch = &target->ch[i];
1067 		srp_free_ch_ib(target, ch);
1068 	}
1069 	cancel_work_sync(&target->tl_err_work);
1070 	srp_rport_put(target->rport);
1071 	for (i = 0; i < target->ch_count; i++) {
1072 		ch = &target->ch[i];
1073 		srp_free_req_data(target, ch);
1074 	}
1075 	kfree(target->ch);
1076 	target->ch = NULL;
1077 
1078 	spin_lock(&target->srp_host->target_lock);
1079 	list_del(&target->list);
1080 	spin_unlock(&target->srp_host->target_lock);
1081 
1082 	scsi_host_put(target->scsi_host);
1083 }
1084 
1085 static void srp_remove_work(struct work_struct *work)
1086 {
1087 	struct srp_target_port *target =
1088 		container_of(work, struct srp_target_port, remove_work);
1089 
1090 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1091 
1092 	srp_remove_target(target);
1093 }
1094 
1095 static void srp_rport_delete(struct srp_rport *rport)
1096 {
1097 	struct srp_target_port *target = rport->lld_data;
1098 
1099 	srp_queue_remove_work(target);
1100 }
1101 
1102 /**
1103  * srp_connected_ch() - number of connected channels
1104  * @target: SRP target port.
1105  */
1106 static int srp_connected_ch(struct srp_target_port *target)
1107 {
1108 	int i, c = 0;
1109 
1110 	for (i = 0; i < target->ch_count; i++)
1111 		c += target->ch[i].connected;
1112 
1113 	return c;
1114 }
1115 
1116 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1117 			  bool multich)
1118 {
1119 	struct srp_target_port *target = ch->target;
1120 	int ret;
1121 
1122 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1123 
1124 	ret = srp_lookup_path(ch);
1125 	if (ret)
1126 		goto out;
1127 
1128 	while (1) {
1129 		init_completion(&ch->done);
1130 		ret = srp_send_req(ch, max_iu_len, multich);
1131 		if (ret)
1132 			goto out;
1133 		ret = wait_for_completion_interruptible(&ch->done);
1134 		if (ret < 0)
1135 			goto out;
1136 
1137 		/*
1138 		 * The CM event handling code will set status to
1139 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
1140 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1141 		 * redirect REJ back.
1142 		 */
1143 		ret = ch->status;
1144 		switch (ret) {
1145 		case 0:
1146 			ch->connected = true;
1147 			goto out;
1148 
1149 		case SRP_PORT_REDIRECT:
1150 			ret = srp_lookup_path(ch);
1151 			if (ret)
1152 				goto out;
1153 			break;
1154 
1155 		case SRP_DLID_REDIRECT:
1156 			break;
1157 
1158 		case SRP_STALE_CONN:
1159 			shost_printk(KERN_ERR, target->scsi_host, PFX
1160 				     "giving up on stale connection\n");
1161 			ret = -ECONNRESET;
1162 			goto out;
1163 
1164 		default:
1165 			goto out;
1166 		}
1167 	}
1168 
1169 out:
1170 	return ret <= 0 ? ret : -ENODEV;
1171 }
1172 
1173 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1174 {
1175 	srp_handle_qp_err(cq, wc, "INV RKEY");
1176 }
1177 
1178 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1179 		u32 rkey)
1180 {
1181 	struct ib_send_wr wr = {
1182 		.opcode		    = IB_WR_LOCAL_INV,
1183 		.next		    = NULL,
1184 		.num_sge	    = 0,
1185 		.send_flags	    = 0,
1186 		.ex.invalidate_rkey = rkey,
1187 	};
1188 
1189 	wr.wr_cqe = &req->reg_cqe;
1190 	req->reg_cqe.done = srp_inv_rkey_err_done;
1191 	return ib_post_send(ch->qp, &wr, NULL);
1192 }
1193 
1194 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1195 			   struct srp_rdma_ch *ch,
1196 			   struct srp_request *req)
1197 {
1198 	struct srp_target_port *target = ch->target;
1199 	struct srp_device *dev = target->srp_host->srp_dev;
1200 	struct ib_device *ibdev = dev->dev;
1201 	int i, res;
1202 
1203 	if (!scsi_sglist(scmnd) ||
1204 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1205 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1206 		return;
1207 
1208 	if (dev->use_fast_reg) {
1209 		struct srp_fr_desc **pfr;
1210 
1211 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1212 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1213 			if (res < 0) {
1214 				shost_printk(KERN_ERR, target->scsi_host, PFX
1215 				  "Queueing INV WR for rkey %#x failed (%d)\n",
1216 				  (*pfr)->mr->rkey, res);
1217 				queue_work(system_long_wq,
1218 					   &target->tl_err_work);
1219 			}
1220 		}
1221 		if (req->nmdesc)
1222 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
1223 					req->nmdesc);
1224 	}
1225 
1226 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1227 			scmnd->sc_data_direction);
1228 }
1229 
1230 /**
1231  * srp_claim_req - Take ownership of the scmnd associated with a request.
1232  * @ch: SRP RDMA channel.
1233  * @req: SRP request.
1234  * @sdev: If not NULL, only take ownership for this SCSI device.
1235  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1236  *         ownership of @req->scmnd if it equals @scmnd.
1237  *
1238  * Return value:
1239  * Either NULL or a pointer to the SCSI command the caller became owner of.
1240  */
1241 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1242 				       struct srp_request *req,
1243 				       struct scsi_device *sdev,
1244 				       struct scsi_cmnd *scmnd)
1245 {
1246 	unsigned long flags;
1247 
1248 	spin_lock_irqsave(&ch->lock, flags);
1249 	if (req->scmnd &&
1250 	    (!sdev || req->scmnd->device == sdev) &&
1251 	    (!scmnd || req->scmnd == scmnd)) {
1252 		scmnd = req->scmnd;
1253 		req->scmnd = NULL;
1254 	} else {
1255 		scmnd = NULL;
1256 	}
1257 	spin_unlock_irqrestore(&ch->lock, flags);
1258 
1259 	return scmnd;
1260 }
1261 
1262 /**
1263  * srp_free_req() - Unmap data and adjust ch->req_lim.
1264  * @ch:     SRP RDMA channel.
1265  * @req:    Request to be freed.
1266  * @scmnd:  SCSI command associated with @req.
1267  * @req_lim_delta: Amount to be added to @target->req_lim.
1268  */
1269 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1270 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1271 {
1272 	unsigned long flags;
1273 
1274 	srp_unmap_data(scmnd, ch, req);
1275 
1276 	spin_lock_irqsave(&ch->lock, flags);
1277 	ch->req_lim += req_lim_delta;
1278 	spin_unlock_irqrestore(&ch->lock, flags);
1279 }
1280 
1281 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1282 			   struct scsi_device *sdev, int result)
1283 {
1284 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1285 
1286 	if (scmnd) {
1287 		srp_free_req(ch, req, scmnd, 0);
1288 		scmnd->result = result;
1289 		scmnd->scsi_done(scmnd);
1290 	}
1291 }
1292 
1293 static void srp_terminate_io(struct srp_rport *rport)
1294 {
1295 	struct srp_target_port *target = rport->lld_data;
1296 	struct srp_rdma_ch *ch;
1297 	int i, j;
1298 
1299 	for (i = 0; i < target->ch_count; i++) {
1300 		ch = &target->ch[i];
1301 
1302 		for (j = 0; j < target->req_ring_size; ++j) {
1303 			struct srp_request *req = &ch->req_ring[j];
1304 
1305 			srp_finish_req(ch, req, NULL,
1306 				       DID_TRANSPORT_FAILFAST << 16);
1307 		}
1308 	}
1309 }
1310 
1311 /* Calculate maximum initiator to target information unit length. */
1312 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1313 				  uint32_t max_it_iu_size)
1314 {
1315 	uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1316 		sizeof(struct srp_indirect_buf) +
1317 		cmd_sg_cnt * sizeof(struct srp_direct_buf);
1318 
1319 	if (use_imm_data)
1320 		max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1321 				 srp_max_imm_data);
1322 
1323 	if (max_it_iu_size)
1324 		max_iu_len = min(max_iu_len, max_it_iu_size);
1325 
1326 	pr_debug("max_iu_len = %d\n", max_iu_len);
1327 
1328 	return max_iu_len;
1329 }
1330 
1331 /*
1332  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1333  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1334  * srp_reset_device() or srp_reset_host() calls will occur while this function
1335  * is in progress. One way to realize that is not to call this function
1336  * directly but to call srp_reconnect_rport() instead since that last function
1337  * serializes calls of this function via rport->mutex and also blocks
1338  * srp_queuecommand() calls before invoking this function.
1339  */
1340 static int srp_rport_reconnect(struct srp_rport *rport)
1341 {
1342 	struct srp_target_port *target = rport->lld_data;
1343 	struct srp_rdma_ch *ch;
1344 	uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1345 						srp_use_imm_data,
1346 						target->max_it_iu_size);
1347 	int i, j, ret = 0;
1348 	bool multich = false;
1349 
1350 	srp_disconnect_target(target);
1351 
1352 	if (target->state == SRP_TARGET_SCANNING)
1353 		return -ENODEV;
1354 
1355 	/*
1356 	 * Now get a new local CM ID so that we avoid confusing the target in
1357 	 * case things are really fouled up. Doing so also ensures that all CM
1358 	 * callbacks will have finished before a new QP is allocated.
1359 	 */
1360 	for (i = 0; i < target->ch_count; i++) {
1361 		ch = &target->ch[i];
1362 		ret += srp_new_cm_id(ch);
1363 	}
1364 	for (i = 0; i < target->ch_count; i++) {
1365 		ch = &target->ch[i];
1366 		for (j = 0; j < target->req_ring_size; ++j) {
1367 			struct srp_request *req = &ch->req_ring[j];
1368 
1369 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
1370 		}
1371 	}
1372 	for (i = 0; i < target->ch_count; i++) {
1373 		ch = &target->ch[i];
1374 		/*
1375 		 * Whether or not creating a new CM ID succeeded, create a new
1376 		 * QP. This guarantees that all completion callback function
1377 		 * invocations have finished before request resetting starts.
1378 		 */
1379 		ret += srp_create_ch_ib(ch);
1380 
1381 		INIT_LIST_HEAD(&ch->free_tx);
1382 		for (j = 0; j < target->queue_size; ++j)
1383 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1384 	}
1385 
1386 	target->qp_in_error = false;
1387 
1388 	for (i = 0; i < target->ch_count; i++) {
1389 		ch = &target->ch[i];
1390 		if (ret)
1391 			break;
1392 		ret = srp_connect_ch(ch, max_iu_len, multich);
1393 		multich = true;
1394 	}
1395 
1396 	if (ret == 0)
1397 		shost_printk(KERN_INFO, target->scsi_host,
1398 			     PFX "reconnect succeeded\n");
1399 
1400 	return ret;
1401 }
1402 
1403 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1404 			 unsigned int dma_len, u32 rkey)
1405 {
1406 	struct srp_direct_buf *desc = state->desc;
1407 
1408 	WARN_ON_ONCE(!dma_len);
1409 
1410 	desc->va = cpu_to_be64(dma_addr);
1411 	desc->key = cpu_to_be32(rkey);
1412 	desc->len = cpu_to_be32(dma_len);
1413 
1414 	state->total_len += dma_len;
1415 	state->desc++;
1416 	state->ndesc++;
1417 }
1418 
1419 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1420 {
1421 	srp_handle_qp_err(cq, wc, "FAST REG");
1422 }
1423 
1424 /*
1425  * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1426  * where to start in the first element. If sg_offset_p != NULL then
1427  * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1428  * byte that has not yet been mapped.
1429  */
1430 static int srp_map_finish_fr(struct srp_map_state *state,
1431 			     struct srp_request *req,
1432 			     struct srp_rdma_ch *ch, int sg_nents,
1433 			     unsigned int *sg_offset_p)
1434 {
1435 	struct srp_target_port *target = ch->target;
1436 	struct srp_device *dev = target->srp_host->srp_dev;
1437 	struct ib_reg_wr wr;
1438 	struct srp_fr_desc *desc;
1439 	u32 rkey;
1440 	int n, err;
1441 
1442 	if (state->fr.next >= state->fr.end) {
1443 		shost_printk(KERN_ERR, ch->target->scsi_host,
1444 			     PFX "Out of MRs (mr_per_cmd = %d)\n",
1445 			     ch->target->mr_per_cmd);
1446 		return -ENOMEM;
1447 	}
1448 
1449 	WARN_ON_ONCE(!dev->use_fast_reg);
1450 
1451 	if (sg_nents == 1 && target->global_rkey) {
1452 		unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1453 
1454 		srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1455 			     sg_dma_len(state->sg) - sg_offset,
1456 			     target->global_rkey);
1457 		if (sg_offset_p)
1458 			*sg_offset_p = 0;
1459 		return 1;
1460 	}
1461 
1462 	desc = srp_fr_pool_get(ch->fr_pool);
1463 	if (!desc)
1464 		return -ENOMEM;
1465 
1466 	rkey = ib_inc_rkey(desc->mr->rkey);
1467 	ib_update_fast_reg_key(desc->mr, rkey);
1468 
1469 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1470 			 dev->mr_page_size);
1471 	if (unlikely(n < 0)) {
1472 		srp_fr_pool_put(ch->fr_pool, &desc, 1);
1473 		pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1474 			 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1475 			 sg_offset_p ? *sg_offset_p : -1, n);
1476 		return n;
1477 	}
1478 
1479 	WARN_ON_ONCE(desc->mr->length == 0);
1480 
1481 	req->reg_cqe.done = srp_reg_mr_err_done;
1482 
1483 	wr.wr.next = NULL;
1484 	wr.wr.opcode = IB_WR_REG_MR;
1485 	wr.wr.wr_cqe = &req->reg_cqe;
1486 	wr.wr.num_sge = 0;
1487 	wr.wr.send_flags = 0;
1488 	wr.mr = desc->mr;
1489 	wr.key = desc->mr->rkey;
1490 	wr.access = (IB_ACCESS_LOCAL_WRITE |
1491 		     IB_ACCESS_REMOTE_READ |
1492 		     IB_ACCESS_REMOTE_WRITE);
1493 
1494 	*state->fr.next++ = desc;
1495 	state->nmdesc++;
1496 
1497 	srp_map_desc(state, desc->mr->iova,
1498 		     desc->mr->length, desc->mr->rkey);
1499 
1500 	err = ib_post_send(ch->qp, &wr.wr, NULL);
1501 	if (unlikely(err)) {
1502 		WARN_ON_ONCE(err == -ENOMEM);
1503 		return err;
1504 	}
1505 
1506 	return n;
1507 }
1508 
1509 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1510 			 struct srp_request *req, struct scatterlist *scat,
1511 			 int count)
1512 {
1513 	unsigned int sg_offset = 0;
1514 
1515 	state->fr.next = req->fr_list;
1516 	state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1517 	state->sg = scat;
1518 
1519 	if (count == 0)
1520 		return 0;
1521 
1522 	while (count) {
1523 		int i, n;
1524 
1525 		n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1526 		if (unlikely(n < 0))
1527 			return n;
1528 
1529 		count -= n;
1530 		for (i = 0; i < n; i++)
1531 			state->sg = sg_next(state->sg);
1532 	}
1533 
1534 	return 0;
1535 }
1536 
1537 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1538 			  struct srp_request *req, struct scatterlist *scat,
1539 			  int count)
1540 {
1541 	struct srp_target_port *target = ch->target;
1542 	struct scatterlist *sg;
1543 	int i;
1544 
1545 	for_each_sg(scat, sg, count, i) {
1546 		srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1547 			     target->global_rkey);
1548 	}
1549 
1550 	return 0;
1551 }
1552 
1553 /*
1554  * Register the indirect data buffer descriptor with the HCA.
1555  *
1556  * Note: since the indirect data buffer descriptor has been allocated with
1557  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1558  * memory buffer.
1559  */
1560 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1561 		       void **next_mr, void **end_mr, u32 idb_len,
1562 		       __be32 *idb_rkey)
1563 {
1564 	struct srp_target_port *target = ch->target;
1565 	struct srp_device *dev = target->srp_host->srp_dev;
1566 	struct srp_map_state state;
1567 	struct srp_direct_buf idb_desc;
1568 	struct scatterlist idb_sg[1];
1569 	int ret;
1570 
1571 	memset(&state, 0, sizeof(state));
1572 	memset(&idb_desc, 0, sizeof(idb_desc));
1573 	state.gen.next = next_mr;
1574 	state.gen.end = end_mr;
1575 	state.desc = &idb_desc;
1576 	state.base_dma_addr = req->indirect_dma_addr;
1577 	state.dma_len = idb_len;
1578 
1579 	if (dev->use_fast_reg) {
1580 		state.sg = idb_sg;
1581 		sg_init_one(idb_sg, req->indirect_desc, idb_len);
1582 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1583 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1584 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1585 #endif
1586 		ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1587 		if (ret < 0)
1588 			return ret;
1589 		WARN_ON_ONCE(ret < 1);
1590 	} else {
1591 		return -EINVAL;
1592 	}
1593 
1594 	*idb_rkey = idb_desc.key;
1595 
1596 	return 0;
1597 }
1598 
1599 static void srp_check_mapping(struct srp_map_state *state,
1600 			      struct srp_rdma_ch *ch, struct srp_request *req,
1601 			      struct scatterlist *scat, int count)
1602 {
1603 	struct srp_device *dev = ch->target->srp_host->srp_dev;
1604 	struct srp_fr_desc **pfr;
1605 	u64 desc_len = 0, mr_len = 0;
1606 	int i;
1607 
1608 	for (i = 0; i < state->ndesc; i++)
1609 		desc_len += be32_to_cpu(req->indirect_desc[i].len);
1610 	if (dev->use_fast_reg)
1611 		for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1612 			mr_len += (*pfr)->mr->length;
1613 	if (desc_len != scsi_bufflen(req->scmnd) ||
1614 	    mr_len > scsi_bufflen(req->scmnd))
1615 		pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1616 		       scsi_bufflen(req->scmnd), desc_len, mr_len,
1617 		       state->ndesc, state->nmdesc);
1618 }
1619 
1620 /**
1621  * srp_map_data() - map SCSI data buffer onto an SRP request
1622  * @scmnd: SCSI command to map
1623  * @ch: SRP RDMA channel
1624  * @req: SRP request
1625  *
1626  * Returns the length in bytes of the SRP_CMD IU or a negative value if
1627  * mapping failed. The size of any immediate data is not included in the
1628  * return value.
1629  */
1630 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1631 			struct srp_request *req)
1632 {
1633 	struct srp_target_port *target = ch->target;
1634 	struct scatterlist *scat, *sg;
1635 	struct srp_cmd *cmd = req->cmd->buf;
1636 	int i, len, nents, count, ret;
1637 	struct srp_device *dev;
1638 	struct ib_device *ibdev;
1639 	struct srp_map_state state;
1640 	struct srp_indirect_buf *indirect_hdr;
1641 	u64 data_len;
1642 	u32 idb_len, table_len;
1643 	__be32 idb_rkey;
1644 	u8 fmt;
1645 
1646 	req->cmd->num_sge = 1;
1647 
1648 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1649 		return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1650 
1651 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1652 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
1653 		shost_printk(KERN_WARNING, target->scsi_host,
1654 			     PFX "Unhandled data direction %d\n",
1655 			     scmnd->sc_data_direction);
1656 		return -EINVAL;
1657 	}
1658 
1659 	nents = scsi_sg_count(scmnd);
1660 	scat  = scsi_sglist(scmnd);
1661 	data_len = scsi_bufflen(scmnd);
1662 
1663 	dev = target->srp_host->srp_dev;
1664 	ibdev = dev->dev;
1665 
1666 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1667 	if (unlikely(count == 0))
1668 		return -EIO;
1669 
1670 	if (ch->use_imm_data &&
1671 	    count <= ch->max_imm_sge &&
1672 	    SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1673 	    scmnd->sc_data_direction == DMA_TO_DEVICE) {
1674 		struct srp_imm_buf *buf;
1675 		struct ib_sge *sge = &req->cmd->sge[1];
1676 
1677 		fmt = SRP_DATA_DESC_IMM;
1678 		len = SRP_IMM_DATA_OFFSET;
1679 		req->nmdesc = 0;
1680 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1681 		buf->len = cpu_to_be32(data_len);
1682 		WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1683 		for_each_sg(scat, sg, count, i) {
1684 			sge[i].addr   = sg_dma_address(sg);
1685 			sge[i].length = sg_dma_len(sg);
1686 			sge[i].lkey   = target->lkey;
1687 		}
1688 		req->cmd->num_sge += count;
1689 		goto map_complete;
1690 	}
1691 
1692 	fmt = SRP_DATA_DESC_DIRECT;
1693 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1694 		sizeof(struct srp_direct_buf);
1695 
1696 	if (count == 1 && target->global_rkey) {
1697 		/*
1698 		 * The midlayer only generated a single gather/scatter
1699 		 * entry, or DMA mapping coalesced everything to a
1700 		 * single entry.  So a direct descriptor along with
1701 		 * the DMA MR suffices.
1702 		 */
1703 		struct srp_direct_buf *buf;
1704 
1705 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1706 		buf->va  = cpu_to_be64(sg_dma_address(scat));
1707 		buf->key = cpu_to_be32(target->global_rkey);
1708 		buf->len = cpu_to_be32(sg_dma_len(scat));
1709 
1710 		req->nmdesc = 0;
1711 		goto map_complete;
1712 	}
1713 
1714 	/*
1715 	 * We have more than one scatter/gather entry, so build our indirect
1716 	 * descriptor table, trying to merge as many entries as we can.
1717 	 */
1718 	indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1719 
1720 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1721 				   target->indirect_size, DMA_TO_DEVICE);
1722 
1723 	memset(&state, 0, sizeof(state));
1724 	state.desc = req->indirect_desc;
1725 	if (dev->use_fast_reg)
1726 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
1727 	else
1728 		ret = srp_map_sg_dma(&state, ch, req, scat, count);
1729 	req->nmdesc = state.nmdesc;
1730 	if (ret < 0)
1731 		goto unmap;
1732 
1733 	{
1734 		DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1735 			"Memory mapping consistency check");
1736 		if (DYNAMIC_DEBUG_BRANCH(ddm))
1737 			srp_check_mapping(&state, ch, req, scat, count);
1738 	}
1739 
1740 	/* We've mapped the request, now pull as much of the indirect
1741 	 * descriptor table as we can into the command buffer. If this
1742 	 * target is not using an external indirect table, we are
1743 	 * guaranteed to fit into the command, as the SCSI layer won't
1744 	 * give us more S/G entries than we allow.
1745 	 */
1746 	if (state.ndesc == 1) {
1747 		/*
1748 		 * Memory registration collapsed the sg-list into one entry,
1749 		 * so use a direct descriptor.
1750 		 */
1751 		struct srp_direct_buf *buf;
1752 
1753 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1754 		*buf = req->indirect_desc[0];
1755 		goto map_complete;
1756 	}
1757 
1758 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1759 						!target->allow_ext_sg)) {
1760 		shost_printk(KERN_ERR, target->scsi_host,
1761 			     "Could not fit S/G list into SRP_CMD\n");
1762 		ret = -EIO;
1763 		goto unmap;
1764 	}
1765 
1766 	count = min(state.ndesc, target->cmd_sg_cnt);
1767 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1768 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1769 
1770 	fmt = SRP_DATA_DESC_INDIRECT;
1771 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1772 		sizeof(struct srp_indirect_buf);
1773 	len += count * sizeof (struct srp_direct_buf);
1774 
1775 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1776 	       count * sizeof (struct srp_direct_buf));
1777 
1778 	if (!target->global_rkey) {
1779 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1780 				  idb_len, &idb_rkey);
1781 		if (ret < 0)
1782 			goto unmap;
1783 		req->nmdesc++;
1784 	} else {
1785 		idb_rkey = cpu_to_be32(target->global_rkey);
1786 	}
1787 
1788 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1789 	indirect_hdr->table_desc.key = idb_rkey;
1790 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1791 	indirect_hdr->len = cpu_to_be32(state.total_len);
1792 
1793 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1794 		cmd->data_out_desc_cnt = count;
1795 	else
1796 		cmd->data_in_desc_cnt = count;
1797 
1798 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1799 				      DMA_TO_DEVICE);
1800 
1801 map_complete:
1802 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1803 		cmd->buf_fmt = fmt << 4;
1804 	else
1805 		cmd->buf_fmt = fmt;
1806 
1807 	return len;
1808 
1809 unmap:
1810 	srp_unmap_data(scmnd, ch, req);
1811 	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1812 		ret = -E2BIG;
1813 	return ret;
1814 }
1815 
1816 /*
1817  * Return an IU and possible credit to the free pool
1818  */
1819 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1820 			  enum srp_iu_type iu_type)
1821 {
1822 	unsigned long flags;
1823 
1824 	spin_lock_irqsave(&ch->lock, flags);
1825 	list_add(&iu->list, &ch->free_tx);
1826 	if (iu_type != SRP_IU_RSP)
1827 		++ch->req_lim;
1828 	spin_unlock_irqrestore(&ch->lock, flags);
1829 }
1830 
1831 /*
1832  * Must be called with ch->lock held to protect req_lim and free_tx.
1833  * If IU is not sent, it must be returned using srp_put_tx_iu().
1834  *
1835  * Note:
1836  * An upper limit for the number of allocated information units for each
1837  * request type is:
1838  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1839  *   more than Scsi_Host.can_queue requests.
1840  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1841  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1842  *   one unanswered SRP request to an initiator.
1843  */
1844 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1845 				      enum srp_iu_type iu_type)
1846 {
1847 	struct srp_target_port *target = ch->target;
1848 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1849 	struct srp_iu *iu;
1850 
1851 	lockdep_assert_held(&ch->lock);
1852 
1853 	ib_process_cq_direct(ch->send_cq, -1);
1854 
1855 	if (list_empty(&ch->free_tx))
1856 		return NULL;
1857 
1858 	/* Initiator responses to target requests do not consume credits */
1859 	if (iu_type != SRP_IU_RSP) {
1860 		if (ch->req_lim <= rsv) {
1861 			++target->zero_req_lim;
1862 			return NULL;
1863 		}
1864 
1865 		--ch->req_lim;
1866 	}
1867 
1868 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1869 	list_del(&iu->list);
1870 	return iu;
1871 }
1872 
1873 /*
1874  * Note: if this function is called from inside ib_drain_sq() then it will
1875  * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1876  * with status IB_WC_SUCCESS then that's a bug.
1877  */
1878 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1879 {
1880 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1881 	struct srp_rdma_ch *ch = cq->cq_context;
1882 
1883 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1884 		srp_handle_qp_err(cq, wc, "SEND");
1885 		return;
1886 	}
1887 
1888 	lockdep_assert_held(&ch->lock);
1889 
1890 	list_add(&iu->list, &ch->free_tx);
1891 }
1892 
1893 /**
1894  * srp_post_send() - send an SRP information unit
1895  * @ch: RDMA channel over which to send the information unit.
1896  * @iu: Information unit to send.
1897  * @len: Length of the information unit excluding immediate data.
1898  */
1899 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1900 {
1901 	struct srp_target_port *target = ch->target;
1902 	struct ib_send_wr wr;
1903 
1904 	if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1905 		return -EINVAL;
1906 
1907 	iu->sge[0].addr   = iu->dma;
1908 	iu->sge[0].length = len;
1909 	iu->sge[0].lkey   = target->lkey;
1910 
1911 	iu->cqe.done = srp_send_done;
1912 
1913 	wr.next       = NULL;
1914 	wr.wr_cqe     = &iu->cqe;
1915 	wr.sg_list    = &iu->sge[0];
1916 	wr.num_sge    = iu->num_sge;
1917 	wr.opcode     = IB_WR_SEND;
1918 	wr.send_flags = IB_SEND_SIGNALED;
1919 
1920 	return ib_post_send(ch->qp, &wr, NULL);
1921 }
1922 
1923 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1924 {
1925 	struct srp_target_port *target = ch->target;
1926 	struct ib_recv_wr wr;
1927 	struct ib_sge list;
1928 
1929 	list.addr   = iu->dma;
1930 	list.length = iu->size;
1931 	list.lkey   = target->lkey;
1932 
1933 	iu->cqe.done = srp_recv_done;
1934 
1935 	wr.next     = NULL;
1936 	wr.wr_cqe   = &iu->cqe;
1937 	wr.sg_list  = &list;
1938 	wr.num_sge  = 1;
1939 
1940 	return ib_post_recv(ch->qp, &wr, NULL);
1941 }
1942 
1943 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1944 {
1945 	struct srp_target_port *target = ch->target;
1946 	struct srp_request *req;
1947 	struct scsi_cmnd *scmnd;
1948 	unsigned long flags;
1949 
1950 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1951 		spin_lock_irqsave(&ch->lock, flags);
1952 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1953 		if (rsp->tag == ch->tsk_mgmt_tag) {
1954 			ch->tsk_mgmt_status = -1;
1955 			if (be32_to_cpu(rsp->resp_data_len) >= 4)
1956 				ch->tsk_mgmt_status = rsp->data[3];
1957 			complete(&ch->tsk_mgmt_done);
1958 		} else {
1959 			shost_printk(KERN_ERR, target->scsi_host,
1960 				     "Received tsk mgmt response too late for tag %#llx\n",
1961 				     rsp->tag);
1962 		}
1963 		spin_unlock_irqrestore(&ch->lock, flags);
1964 	} else {
1965 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1966 		if (scmnd && scmnd->host_scribble) {
1967 			req = (void *)scmnd->host_scribble;
1968 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
1969 		} else {
1970 			scmnd = NULL;
1971 		}
1972 		if (!scmnd) {
1973 			shost_printk(KERN_ERR, target->scsi_host,
1974 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1975 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
1976 
1977 			spin_lock_irqsave(&ch->lock, flags);
1978 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1979 			spin_unlock_irqrestore(&ch->lock, flags);
1980 
1981 			return;
1982 		}
1983 		scmnd->result = rsp->status;
1984 
1985 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1986 			memcpy(scmnd->sense_buffer, rsp->data +
1987 			       be32_to_cpu(rsp->resp_data_len),
1988 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1989 				     SCSI_SENSE_BUFFERSIZE));
1990 		}
1991 
1992 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1993 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1994 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1995 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1996 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1997 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1998 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1999 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
2000 
2001 		srp_free_req(ch, req, scmnd,
2002 			     be32_to_cpu(rsp->req_lim_delta));
2003 
2004 		scmnd->host_scribble = NULL;
2005 		scmnd->scsi_done(scmnd);
2006 	}
2007 }
2008 
2009 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
2010 			       void *rsp, int len)
2011 {
2012 	struct srp_target_port *target = ch->target;
2013 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2014 	unsigned long flags;
2015 	struct srp_iu *iu;
2016 	int err;
2017 
2018 	spin_lock_irqsave(&ch->lock, flags);
2019 	ch->req_lim += req_delta;
2020 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2021 	spin_unlock_irqrestore(&ch->lock, flags);
2022 
2023 	if (!iu) {
2024 		shost_printk(KERN_ERR, target->scsi_host, PFX
2025 			     "no IU available to send response\n");
2026 		return 1;
2027 	}
2028 
2029 	iu->num_sge = 1;
2030 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2031 	memcpy(iu->buf, rsp, len);
2032 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2033 
2034 	err = srp_post_send(ch, iu, len);
2035 	if (err) {
2036 		shost_printk(KERN_ERR, target->scsi_host, PFX
2037 			     "unable to post response: %d\n", err);
2038 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2039 	}
2040 
2041 	return err;
2042 }
2043 
2044 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2045 				 struct srp_cred_req *req)
2046 {
2047 	struct srp_cred_rsp rsp = {
2048 		.opcode = SRP_CRED_RSP,
2049 		.tag = req->tag,
2050 	};
2051 	s32 delta = be32_to_cpu(req->req_lim_delta);
2052 
2053 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2054 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2055 			     "problems processing SRP_CRED_REQ\n");
2056 }
2057 
2058 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2059 				struct srp_aer_req *req)
2060 {
2061 	struct srp_target_port *target = ch->target;
2062 	struct srp_aer_rsp rsp = {
2063 		.opcode = SRP_AER_RSP,
2064 		.tag = req->tag,
2065 	};
2066 	s32 delta = be32_to_cpu(req->req_lim_delta);
2067 
2068 	shost_printk(KERN_ERR, target->scsi_host, PFX
2069 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2070 
2071 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2072 		shost_printk(KERN_ERR, target->scsi_host, PFX
2073 			     "problems processing SRP_AER_REQ\n");
2074 }
2075 
2076 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2077 {
2078 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2079 	struct srp_rdma_ch *ch = cq->cq_context;
2080 	struct srp_target_port *target = ch->target;
2081 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2082 	int res;
2083 	u8 opcode;
2084 
2085 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
2086 		srp_handle_qp_err(cq, wc, "RECV");
2087 		return;
2088 	}
2089 
2090 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2091 				   DMA_FROM_DEVICE);
2092 
2093 	opcode = *(u8 *) iu->buf;
2094 
2095 	if (0) {
2096 		shost_printk(KERN_ERR, target->scsi_host,
2097 			     PFX "recv completion, opcode 0x%02x\n", opcode);
2098 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2099 			       iu->buf, wc->byte_len, true);
2100 	}
2101 
2102 	switch (opcode) {
2103 	case SRP_RSP:
2104 		srp_process_rsp(ch, iu->buf);
2105 		break;
2106 
2107 	case SRP_CRED_REQ:
2108 		srp_process_cred_req(ch, iu->buf);
2109 		break;
2110 
2111 	case SRP_AER_REQ:
2112 		srp_process_aer_req(ch, iu->buf);
2113 		break;
2114 
2115 	case SRP_T_LOGOUT:
2116 		/* XXX Handle target logout */
2117 		shost_printk(KERN_WARNING, target->scsi_host,
2118 			     PFX "Got target logout request\n");
2119 		break;
2120 
2121 	default:
2122 		shost_printk(KERN_WARNING, target->scsi_host,
2123 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2124 		break;
2125 	}
2126 
2127 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2128 				      DMA_FROM_DEVICE);
2129 
2130 	res = srp_post_recv(ch, iu);
2131 	if (res != 0)
2132 		shost_printk(KERN_ERR, target->scsi_host,
2133 			     PFX "Recv failed with error code %d\n", res);
2134 }
2135 
2136 /**
2137  * srp_tl_err_work() - handle a transport layer error
2138  * @work: Work structure embedded in an SRP target port.
2139  *
2140  * Note: This function may get invoked before the rport has been created,
2141  * hence the target->rport test.
2142  */
2143 static void srp_tl_err_work(struct work_struct *work)
2144 {
2145 	struct srp_target_port *target;
2146 
2147 	target = container_of(work, struct srp_target_port, tl_err_work);
2148 	if (target->rport)
2149 		srp_start_tl_fail_timers(target->rport);
2150 }
2151 
2152 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2153 		const char *opname)
2154 {
2155 	struct srp_rdma_ch *ch = cq->cq_context;
2156 	struct srp_target_port *target = ch->target;
2157 
2158 	if (ch->connected && !target->qp_in_error) {
2159 		shost_printk(KERN_ERR, target->scsi_host,
2160 			     PFX "failed %s status %s (%d) for CQE %p\n",
2161 			     opname, ib_wc_status_msg(wc->status), wc->status,
2162 			     wc->wr_cqe);
2163 		queue_work(system_long_wq, &target->tl_err_work);
2164 	}
2165 	target->qp_in_error = true;
2166 }
2167 
2168 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2169 {
2170 	struct srp_target_port *target = host_to_target(shost);
2171 	struct srp_rdma_ch *ch;
2172 	struct srp_request *req;
2173 	struct srp_iu *iu;
2174 	struct srp_cmd *cmd;
2175 	struct ib_device *dev;
2176 	unsigned long flags;
2177 	u32 tag;
2178 	u16 idx;
2179 	int len, ret;
2180 
2181 	scmnd->result = srp_chkready(target->rport);
2182 	if (unlikely(scmnd->result))
2183 		goto err;
2184 
2185 	WARN_ON_ONCE(scmnd->request->tag < 0);
2186 	tag = blk_mq_unique_tag(scmnd->request);
2187 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2188 	idx = blk_mq_unique_tag_to_tag(tag);
2189 	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2190 		  dev_name(&shost->shost_gendev), tag, idx,
2191 		  target->req_ring_size);
2192 
2193 	spin_lock_irqsave(&ch->lock, flags);
2194 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2195 	spin_unlock_irqrestore(&ch->lock, flags);
2196 
2197 	if (!iu)
2198 		goto err;
2199 
2200 	req = &ch->req_ring[idx];
2201 	dev = target->srp_host->srp_dev->dev;
2202 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2203 				   DMA_TO_DEVICE);
2204 
2205 	scmnd->host_scribble = (void *) req;
2206 
2207 	cmd = iu->buf;
2208 	memset(cmd, 0, sizeof *cmd);
2209 
2210 	cmd->opcode = SRP_CMD;
2211 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
2212 	cmd->tag    = tag;
2213 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2214 	if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2215 		cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2216 					    4);
2217 		if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2218 			goto err_iu;
2219 	}
2220 
2221 	req->scmnd    = scmnd;
2222 	req->cmd      = iu;
2223 
2224 	len = srp_map_data(scmnd, ch, req);
2225 	if (len < 0) {
2226 		shost_printk(KERN_ERR, target->scsi_host,
2227 			     PFX "Failed to map data (%d)\n", len);
2228 		/*
2229 		 * If we ran out of memory descriptors (-ENOMEM) because an
2230 		 * application is queuing many requests with more than
2231 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2232 		 * to reduce queue depth temporarily.
2233 		 */
2234 		scmnd->result = len == -ENOMEM ?
2235 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2236 		goto err_iu;
2237 	}
2238 
2239 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2240 				      DMA_TO_DEVICE);
2241 
2242 	if (srp_post_send(ch, iu, len)) {
2243 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2244 		scmnd->result = DID_ERROR << 16;
2245 		goto err_unmap;
2246 	}
2247 
2248 	return 0;
2249 
2250 err_unmap:
2251 	srp_unmap_data(scmnd, ch, req);
2252 
2253 err_iu:
2254 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2255 
2256 	/*
2257 	 * Avoid that the loops that iterate over the request ring can
2258 	 * encounter a dangling SCSI command pointer.
2259 	 */
2260 	req->scmnd = NULL;
2261 
2262 err:
2263 	if (scmnd->result) {
2264 		scmnd->scsi_done(scmnd);
2265 		ret = 0;
2266 	} else {
2267 		ret = SCSI_MLQUEUE_HOST_BUSY;
2268 	}
2269 
2270 	return ret;
2271 }
2272 
2273 /*
2274  * Note: the resources allocated in this function are freed in
2275  * srp_free_ch_ib().
2276  */
2277 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2278 {
2279 	struct srp_target_port *target = ch->target;
2280 	int i;
2281 
2282 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2283 			      GFP_KERNEL);
2284 	if (!ch->rx_ring)
2285 		goto err_no_ring;
2286 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2287 			      GFP_KERNEL);
2288 	if (!ch->tx_ring)
2289 		goto err_no_ring;
2290 
2291 	for (i = 0; i < target->queue_size; ++i) {
2292 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2293 					      ch->max_ti_iu_len,
2294 					      GFP_KERNEL, DMA_FROM_DEVICE);
2295 		if (!ch->rx_ring[i])
2296 			goto err;
2297 	}
2298 
2299 	for (i = 0; i < target->queue_size; ++i) {
2300 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2301 					      ch->max_it_iu_len,
2302 					      GFP_KERNEL, DMA_TO_DEVICE);
2303 		if (!ch->tx_ring[i])
2304 			goto err;
2305 
2306 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2307 	}
2308 
2309 	return 0;
2310 
2311 err:
2312 	for (i = 0; i < target->queue_size; ++i) {
2313 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2314 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2315 	}
2316 
2317 
2318 err_no_ring:
2319 	kfree(ch->tx_ring);
2320 	ch->tx_ring = NULL;
2321 	kfree(ch->rx_ring);
2322 	ch->rx_ring = NULL;
2323 
2324 	return -ENOMEM;
2325 }
2326 
2327 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2328 {
2329 	uint64_t T_tr_ns, max_compl_time_ms;
2330 	uint32_t rq_tmo_jiffies;
2331 
2332 	/*
2333 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2334 	 * table 91), both the QP timeout and the retry count have to be set
2335 	 * for RC QP's during the RTR to RTS transition.
2336 	 */
2337 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2338 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2339 
2340 	/*
2341 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2342 	 * it can take before an error completion is generated. See also
2343 	 * C9-140..142 in the IBTA spec for more information about how to
2344 	 * convert the QP Local ACK Timeout value to nanoseconds.
2345 	 */
2346 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2347 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2348 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2349 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2350 
2351 	return rq_tmo_jiffies;
2352 }
2353 
2354 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2355 			       const struct srp_login_rsp *lrsp,
2356 			       struct srp_rdma_ch *ch)
2357 {
2358 	struct srp_target_port *target = ch->target;
2359 	struct ib_qp_attr *qp_attr = NULL;
2360 	int attr_mask = 0;
2361 	int ret = 0;
2362 	int i;
2363 
2364 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2365 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2366 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2367 		ch->use_imm_data  = srp_use_imm_data &&
2368 			(lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2369 		ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2370 						      ch->use_imm_data,
2371 						      target->max_it_iu_size);
2372 		WARN_ON_ONCE(ch->max_it_iu_len >
2373 			     be32_to_cpu(lrsp->max_it_iu_len));
2374 
2375 		if (ch->use_imm_data)
2376 			shost_printk(KERN_DEBUG, target->scsi_host,
2377 				     PFX "using immediate data\n");
2378 
2379 		/*
2380 		 * Reserve credits for task management so we don't
2381 		 * bounce requests back to the SCSI mid-layer.
2382 		 */
2383 		target->scsi_host->can_queue
2384 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2385 			      target->scsi_host->can_queue);
2386 		target->scsi_host->cmd_per_lun
2387 			= min_t(int, target->scsi_host->can_queue,
2388 				target->scsi_host->cmd_per_lun);
2389 	} else {
2390 		shost_printk(KERN_WARNING, target->scsi_host,
2391 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2392 		ret = -ECONNRESET;
2393 		goto error;
2394 	}
2395 
2396 	if (!ch->rx_ring) {
2397 		ret = srp_alloc_iu_bufs(ch);
2398 		if (ret)
2399 			goto error;
2400 	}
2401 
2402 	for (i = 0; i < target->queue_size; i++) {
2403 		struct srp_iu *iu = ch->rx_ring[i];
2404 
2405 		ret = srp_post_recv(ch, iu);
2406 		if (ret)
2407 			goto error;
2408 	}
2409 
2410 	if (!target->using_rdma_cm) {
2411 		ret = -ENOMEM;
2412 		qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2413 		if (!qp_attr)
2414 			goto error;
2415 
2416 		qp_attr->qp_state = IB_QPS_RTR;
2417 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2418 		if (ret)
2419 			goto error_free;
2420 
2421 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2422 		if (ret)
2423 			goto error_free;
2424 
2425 		qp_attr->qp_state = IB_QPS_RTS;
2426 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2427 		if (ret)
2428 			goto error_free;
2429 
2430 		target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2431 
2432 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2433 		if (ret)
2434 			goto error_free;
2435 
2436 		ret = ib_send_cm_rtu(cm_id, NULL, 0);
2437 	}
2438 
2439 error_free:
2440 	kfree(qp_attr);
2441 
2442 error:
2443 	ch->status = ret;
2444 }
2445 
2446 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2447 				  const struct ib_cm_event *event,
2448 				  struct srp_rdma_ch *ch)
2449 {
2450 	struct srp_target_port *target = ch->target;
2451 	struct Scsi_Host *shost = target->scsi_host;
2452 	struct ib_class_port_info *cpi;
2453 	int opcode;
2454 	u16 dlid;
2455 
2456 	switch (event->param.rej_rcvd.reason) {
2457 	case IB_CM_REJ_PORT_CM_REDIRECT:
2458 		cpi = event->param.rej_rcvd.ari;
2459 		dlid = be16_to_cpu(cpi->redirect_lid);
2460 		sa_path_set_dlid(&ch->ib_cm.path, dlid);
2461 		ch->ib_cm.path.pkey = cpi->redirect_pkey;
2462 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2463 		memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2464 
2465 		ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2466 		break;
2467 
2468 	case IB_CM_REJ_PORT_REDIRECT:
2469 		if (srp_target_is_topspin(target)) {
2470 			union ib_gid *dgid = &ch->ib_cm.path.dgid;
2471 
2472 			/*
2473 			 * Topspin/Cisco SRP gateways incorrectly send
2474 			 * reject reason code 25 when they mean 24
2475 			 * (port redirect).
2476 			 */
2477 			memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2478 
2479 			shost_printk(KERN_DEBUG, shost,
2480 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2481 				     be64_to_cpu(dgid->global.subnet_prefix),
2482 				     be64_to_cpu(dgid->global.interface_id));
2483 
2484 			ch->status = SRP_PORT_REDIRECT;
2485 		} else {
2486 			shost_printk(KERN_WARNING, shost,
2487 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2488 			ch->status = -ECONNRESET;
2489 		}
2490 		break;
2491 
2492 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2493 		shost_printk(KERN_WARNING, shost,
2494 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2495 		ch->status = -ECONNRESET;
2496 		break;
2497 
2498 	case IB_CM_REJ_CONSUMER_DEFINED:
2499 		opcode = *(u8 *) event->private_data;
2500 		if (opcode == SRP_LOGIN_REJ) {
2501 			struct srp_login_rej *rej = event->private_data;
2502 			u32 reason = be32_to_cpu(rej->reason);
2503 
2504 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2505 				shost_printk(KERN_WARNING, shost,
2506 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2507 			else
2508 				shost_printk(KERN_WARNING, shost, PFX
2509 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2510 					     target->sgid.raw,
2511 					     target->ib_cm.orig_dgid.raw,
2512 					     reason);
2513 		} else
2514 			shost_printk(KERN_WARNING, shost,
2515 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2516 				     " opcode 0x%02x\n", opcode);
2517 		ch->status = -ECONNRESET;
2518 		break;
2519 
2520 	case IB_CM_REJ_STALE_CONN:
2521 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2522 		ch->status = SRP_STALE_CONN;
2523 		break;
2524 
2525 	default:
2526 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2527 			     event->param.rej_rcvd.reason);
2528 		ch->status = -ECONNRESET;
2529 	}
2530 }
2531 
2532 static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2533 			     const struct ib_cm_event *event)
2534 {
2535 	struct srp_rdma_ch *ch = cm_id->context;
2536 	struct srp_target_port *target = ch->target;
2537 	int comp = 0;
2538 
2539 	switch (event->event) {
2540 	case IB_CM_REQ_ERROR:
2541 		shost_printk(KERN_DEBUG, target->scsi_host,
2542 			     PFX "Sending CM REQ failed\n");
2543 		comp = 1;
2544 		ch->status = -ECONNRESET;
2545 		break;
2546 
2547 	case IB_CM_REP_RECEIVED:
2548 		comp = 1;
2549 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2550 		break;
2551 
2552 	case IB_CM_REJ_RECEIVED:
2553 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2554 		comp = 1;
2555 
2556 		srp_ib_cm_rej_handler(cm_id, event, ch);
2557 		break;
2558 
2559 	case IB_CM_DREQ_RECEIVED:
2560 		shost_printk(KERN_WARNING, target->scsi_host,
2561 			     PFX "DREQ received - connection closed\n");
2562 		ch->connected = false;
2563 		if (ib_send_cm_drep(cm_id, NULL, 0))
2564 			shost_printk(KERN_ERR, target->scsi_host,
2565 				     PFX "Sending CM DREP failed\n");
2566 		queue_work(system_long_wq, &target->tl_err_work);
2567 		break;
2568 
2569 	case IB_CM_TIMEWAIT_EXIT:
2570 		shost_printk(KERN_ERR, target->scsi_host,
2571 			     PFX "connection closed\n");
2572 		comp = 1;
2573 
2574 		ch->status = 0;
2575 		break;
2576 
2577 	case IB_CM_MRA_RECEIVED:
2578 	case IB_CM_DREQ_ERROR:
2579 	case IB_CM_DREP_RECEIVED:
2580 		break;
2581 
2582 	default:
2583 		shost_printk(KERN_WARNING, target->scsi_host,
2584 			     PFX "Unhandled CM event %d\n", event->event);
2585 		break;
2586 	}
2587 
2588 	if (comp)
2589 		complete(&ch->done);
2590 
2591 	return 0;
2592 }
2593 
2594 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2595 				    struct rdma_cm_event *event)
2596 {
2597 	struct srp_target_port *target = ch->target;
2598 	struct Scsi_Host *shost = target->scsi_host;
2599 	int opcode;
2600 
2601 	switch (event->status) {
2602 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2603 		shost_printk(KERN_WARNING, shost,
2604 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2605 		ch->status = -ECONNRESET;
2606 		break;
2607 
2608 	case IB_CM_REJ_CONSUMER_DEFINED:
2609 		opcode = *(u8 *) event->param.conn.private_data;
2610 		if (opcode == SRP_LOGIN_REJ) {
2611 			struct srp_login_rej *rej =
2612 				(struct srp_login_rej *)
2613 				event->param.conn.private_data;
2614 			u32 reason = be32_to_cpu(rej->reason);
2615 
2616 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2617 				shost_printk(KERN_WARNING, shost,
2618 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2619 			else
2620 				shost_printk(KERN_WARNING, shost,
2621 					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2622 		} else {
2623 			shost_printk(KERN_WARNING, shost,
2624 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2625 				     opcode);
2626 		}
2627 		ch->status = -ECONNRESET;
2628 		break;
2629 
2630 	case IB_CM_REJ_STALE_CONN:
2631 		shost_printk(KERN_WARNING, shost,
2632 			     "  REJ reason: stale connection\n");
2633 		ch->status = SRP_STALE_CONN;
2634 		break;
2635 
2636 	default:
2637 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2638 			     event->status);
2639 		ch->status = -ECONNRESET;
2640 		break;
2641 	}
2642 }
2643 
2644 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2645 			       struct rdma_cm_event *event)
2646 {
2647 	struct srp_rdma_ch *ch = cm_id->context;
2648 	struct srp_target_port *target = ch->target;
2649 	int comp = 0;
2650 
2651 	switch (event->event) {
2652 	case RDMA_CM_EVENT_ADDR_RESOLVED:
2653 		ch->status = 0;
2654 		comp = 1;
2655 		break;
2656 
2657 	case RDMA_CM_EVENT_ADDR_ERROR:
2658 		ch->status = -ENXIO;
2659 		comp = 1;
2660 		break;
2661 
2662 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
2663 		ch->status = 0;
2664 		comp = 1;
2665 		break;
2666 
2667 	case RDMA_CM_EVENT_ROUTE_ERROR:
2668 	case RDMA_CM_EVENT_UNREACHABLE:
2669 		ch->status = -EHOSTUNREACH;
2670 		comp = 1;
2671 		break;
2672 
2673 	case RDMA_CM_EVENT_CONNECT_ERROR:
2674 		shost_printk(KERN_DEBUG, target->scsi_host,
2675 			     PFX "Sending CM REQ failed\n");
2676 		comp = 1;
2677 		ch->status = -ECONNRESET;
2678 		break;
2679 
2680 	case RDMA_CM_EVENT_ESTABLISHED:
2681 		comp = 1;
2682 		srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2683 		break;
2684 
2685 	case RDMA_CM_EVENT_REJECTED:
2686 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2687 		comp = 1;
2688 
2689 		srp_rdma_cm_rej_handler(ch, event);
2690 		break;
2691 
2692 	case RDMA_CM_EVENT_DISCONNECTED:
2693 		if (ch->connected) {
2694 			shost_printk(KERN_WARNING, target->scsi_host,
2695 				     PFX "received DREQ\n");
2696 			rdma_disconnect(ch->rdma_cm.cm_id);
2697 			comp = 1;
2698 			ch->status = 0;
2699 			queue_work(system_long_wq, &target->tl_err_work);
2700 		}
2701 		break;
2702 
2703 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2704 		shost_printk(KERN_ERR, target->scsi_host,
2705 			     PFX "connection closed\n");
2706 
2707 		comp = 1;
2708 		ch->status = 0;
2709 		break;
2710 
2711 	default:
2712 		shost_printk(KERN_WARNING, target->scsi_host,
2713 			     PFX "Unhandled CM event %d\n", event->event);
2714 		break;
2715 	}
2716 
2717 	if (comp)
2718 		complete(&ch->done);
2719 
2720 	return 0;
2721 }
2722 
2723 /**
2724  * srp_change_queue_depth - setting device queue depth
2725  * @sdev: scsi device struct
2726  * @qdepth: requested queue depth
2727  *
2728  * Returns queue depth.
2729  */
2730 static int
2731 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2732 {
2733 	if (!sdev->tagged_supported)
2734 		qdepth = 1;
2735 	return scsi_change_queue_depth(sdev, qdepth);
2736 }
2737 
2738 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2739 			     u8 func, u8 *status)
2740 {
2741 	struct srp_target_port *target = ch->target;
2742 	struct srp_rport *rport = target->rport;
2743 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2744 	struct srp_iu *iu;
2745 	struct srp_tsk_mgmt *tsk_mgmt;
2746 	int res;
2747 
2748 	if (!ch->connected || target->qp_in_error)
2749 		return -1;
2750 
2751 	/*
2752 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2753 	 * invoked while a task management function is being sent.
2754 	 */
2755 	mutex_lock(&rport->mutex);
2756 	spin_lock_irq(&ch->lock);
2757 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2758 	spin_unlock_irq(&ch->lock);
2759 
2760 	if (!iu) {
2761 		mutex_unlock(&rport->mutex);
2762 
2763 		return -1;
2764 	}
2765 
2766 	iu->num_sge = 1;
2767 
2768 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2769 				   DMA_TO_DEVICE);
2770 	tsk_mgmt = iu->buf;
2771 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2772 
2773 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2774 	int_to_scsilun(lun, &tsk_mgmt->lun);
2775 	tsk_mgmt->tsk_mgmt_func = func;
2776 	tsk_mgmt->task_tag	= req_tag;
2777 
2778 	spin_lock_irq(&ch->lock);
2779 	ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2780 	tsk_mgmt->tag = ch->tsk_mgmt_tag;
2781 	spin_unlock_irq(&ch->lock);
2782 
2783 	init_completion(&ch->tsk_mgmt_done);
2784 
2785 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2786 				      DMA_TO_DEVICE);
2787 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2788 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2789 		mutex_unlock(&rport->mutex);
2790 
2791 		return -1;
2792 	}
2793 	res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2794 					msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2795 	if (res > 0 && status)
2796 		*status = ch->tsk_mgmt_status;
2797 	mutex_unlock(&rport->mutex);
2798 
2799 	WARN_ON_ONCE(res < 0);
2800 
2801 	return res > 0 ? 0 : -1;
2802 }
2803 
2804 static int srp_abort(struct scsi_cmnd *scmnd)
2805 {
2806 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2807 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2808 	u32 tag;
2809 	u16 ch_idx;
2810 	struct srp_rdma_ch *ch;
2811 	int ret;
2812 
2813 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2814 
2815 	if (!req)
2816 		return SUCCESS;
2817 	tag = blk_mq_unique_tag(scmnd->request);
2818 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2819 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2820 		return SUCCESS;
2821 	ch = &target->ch[ch_idx];
2822 	if (!srp_claim_req(ch, req, NULL, scmnd))
2823 		return SUCCESS;
2824 	shost_printk(KERN_ERR, target->scsi_host,
2825 		     "Sending SRP abort for tag %#x\n", tag);
2826 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2827 			      SRP_TSK_ABORT_TASK, NULL) == 0)
2828 		ret = SUCCESS;
2829 	else if (target->rport->state == SRP_RPORT_LOST)
2830 		ret = FAST_IO_FAIL;
2831 	else
2832 		ret = FAILED;
2833 	if (ret == SUCCESS) {
2834 		srp_free_req(ch, req, scmnd, 0);
2835 		scmnd->result = DID_ABORT << 16;
2836 		scmnd->scsi_done(scmnd);
2837 	}
2838 
2839 	return ret;
2840 }
2841 
2842 static int srp_reset_device(struct scsi_cmnd *scmnd)
2843 {
2844 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2845 	struct srp_rdma_ch *ch;
2846 	u8 status;
2847 
2848 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2849 
2850 	ch = &target->ch[0];
2851 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2852 			      SRP_TSK_LUN_RESET, &status))
2853 		return FAILED;
2854 	if (status)
2855 		return FAILED;
2856 
2857 	return SUCCESS;
2858 }
2859 
2860 static int srp_reset_host(struct scsi_cmnd *scmnd)
2861 {
2862 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2863 
2864 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2865 
2866 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2867 }
2868 
2869 static int srp_target_alloc(struct scsi_target *starget)
2870 {
2871 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2872 	struct srp_target_port *target = host_to_target(shost);
2873 
2874 	if (target->target_can_queue)
2875 		starget->can_queue = target->target_can_queue;
2876 	return 0;
2877 }
2878 
2879 static int srp_slave_configure(struct scsi_device *sdev)
2880 {
2881 	struct Scsi_Host *shost = sdev->host;
2882 	struct srp_target_port *target = host_to_target(shost);
2883 	struct request_queue *q = sdev->request_queue;
2884 	unsigned long timeout;
2885 
2886 	if (sdev->type == TYPE_DISK) {
2887 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2888 		blk_queue_rq_timeout(q, timeout);
2889 	}
2890 
2891 	return 0;
2892 }
2893 
2894 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2895 			   char *buf)
2896 {
2897 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2898 
2899 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2900 }
2901 
2902 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2903 			     char *buf)
2904 {
2905 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2906 
2907 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2908 }
2909 
2910 static ssize_t show_service_id(struct device *dev,
2911 			       struct device_attribute *attr, char *buf)
2912 {
2913 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2914 
2915 	if (target->using_rdma_cm)
2916 		return -ENOENT;
2917 	return sprintf(buf, "0x%016llx\n",
2918 		       be64_to_cpu(target->ib_cm.service_id));
2919 }
2920 
2921 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2922 			 char *buf)
2923 {
2924 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2925 
2926 	if (target->using_rdma_cm)
2927 		return -ENOENT;
2928 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2929 }
2930 
2931 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2932 			 char *buf)
2933 {
2934 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2935 
2936 	return sprintf(buf, "%pI6\n", target->sgid.raw);
2937 }
2938 
2939 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2940 			 char *buf)
2941 {
2942 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2943 	struct srp_rdma_ch *ch = &target->ch[0];
2944 
2945 	if (target->using_rdma_cm)
2946 		return -ENOENT;
2947 	return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2948 }
2949 
2950 static ssize_t show_orig_dgid(struct device *dev,
2951 			      struct device_attribute *attr, char *buf)
2952 {
2953 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2954 
2955 	if (target->using_rdma_cm)
2956 		return -ENOENT;
2957 	return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2958 }
2959 
2960 static ssize_t show_req_lim(struct device *dev,
2961 			    struct device_attribute *attr, char *buf)
2962 {
2963 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2964 	struct srp_rdma_ch *ch;
2965 	int i, req_lim = INT_MAX;
2966 
2967 	for (i = 0; i < target->ch_count; i++) {
2968 		ch = &target->ch[i];
2969 		req_lim = min(req_lim, ch->req_lim);
2970 	}
2971 	return sprintf(buf, "%d\n", req_lim);
2972 }
2973 
2974 static ssize_t show_zero_req_lim(struct device *dev,
2975 				 struct device_attribute *attr, char *buf)
2976 {
2977 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2978 
2979 	return sprintf(buf, "%d\n", target->zero_req_lim);
2980 }
2981 
2982 static ssize_t show_local_ib_port(struct device *dev,
2983 				  struct device_attribute *attr, char *buf)
2984 {
2985 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2986 
2987 	return sprintf(buf, "%d\n", target->srp_host->port);
2988 }
2989 
2990 static ssize_t show_local_ib_device(struct device *dev,
2991 				    struct device_attribute *attr, char *buf)
2992 {
2993 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2994 
2995 	return sprintf(buf, "%s\n",
2996 		       dev_name(&target->srp_host->srp_dev->dev->dev));
2997 }
2998 
2999 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
3000 			     char *buf)
3001 {
3002 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3003 
3004 	return sprintf(buf, "%d\n", target->ch_count);
3005 }
3006 
3007 static ssize_t show_comp_vector(struct device *dev,
3008 				struct device_attribute *attr, char *buf)
3009 {
3010 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3011 
3012 	return sprintf(buf, "%d\n", target->comp_vector);
3013 }
3014 
3015 static ssize_t show_tl_retry_count(struct device *dev,
3016 				   struct device_attribute *attr, char *buf)
3017 {
3018 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3019 
3020 	return sprintf(buf, "%d\n", target->tl_retry_count);
3021 }
3022 
3023 static ssize_t show_cmd_sg_entries(struct device *dev,
3024 				   struct device_attribute *attr, char *buf)
3025 {
3026 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3027 
3028 	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
3029 }
3030 
3031 static ssize_t show_allow_ext_sg(struct device *dev,
3032 				 struct device_attribute *attr, char *buf)
3033 {
3034 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3035 
3036 	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3037 }
3038 
3039 static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
3040 static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
3041 static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
3042 static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
3043 static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
3044 static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
3045 static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
3046 static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
3047 static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
3048 static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
3049 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
3050 static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
3051 static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
3052 static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
3053 static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
3054 static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
3055 
3056 static struct device_attribute *srp_host_attrs[] = {
3057 	&dev_attr_id_ext,
3058 	&dev_attr_ioc_guid,
3059 	&dev_attr_service_id,
3060 	&dev_attr_pkey,
3061 	&dev_attr_sgid,
3062 	&dev_attr_dgid,
3063 	&dev_attr_orig_dgid,
3064 	&dev_attr_req_lim,
3065 	&dev_attr_zero_req_lim,
3066 	&dev_attr_local_ib_port,
3067 	&dev_attr_local_ib_device,
3068 	&dev_attr_ch_count,
3069 	&dev_attr_comp_vector,
3070 	&dev_attr_tl_retry_count,
3071 	&dev_attr_cmd_sg_entries,
3072 	&dev_attr_allow_ext_sg,
3073 	NULL
3074 };
3075 
3076 static struct scsi_host_template srp_template = {
3077 	.module				= THIS_MODULE,
3078 	.name				= "InfiniBand SRP initiator",
3079 	.proc_name			= DRV_NAME,
3080 	.target_alloc			= srp_target_alloc,
3081 	.slave_configure		= srp_slave_configure,
3082 	.info				= srp_target_info,
3083 	.queuecommand			= srp_queuecommand,
3084 	.change_queue_depth             = srp_change_queue_depth,
3085 	.eh_timed_out			= srp_timed_out,
3086 	.eh_abort_handler		= srp_abort,
3087 	.eh_device_reset_handler	= srp_reset_device,
3088 	.eh_host_reset_handler		= srp_reset_host,
3089 	.skip_settle_delay		= true,
3090 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
3091 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
3092 	.this_id			= -1,
3093 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
3094 	.shost_attrs			= srp_host_attrs,
3095 	.track_queue_depth		= 1,
3096 };
3097 
3098 static int srp_sdev_count(struct Scsi_Host *host)
3099 {
3100 	struct scsi_device *sdev;
3101 	int c = 0;
3102 
3103 	shost_for_each_device(sdev, host)
3104 		c++;
3105 
3106 	return c;
3107 }
3108 
3109 /*
3110  * Return values:
3111  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3112  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3113  *    removal has been scheduled.
3114  * 0 and target->state != SRP_TARGET_REMOVED upon success.
3115  */
3116 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3117 {
3118 	struct srp_rport_identifiers ids;
3119 	struct srp_rport *rport;
3120 
3121 	target->state = SRP_TARGET_SCANNING;
3122 	sprintf(target->target_name, "SRP.T10:%016llX",
3123 		be64_to_cpu(target->id_ext));
3124 
3125 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3126 		return -ENODEV;
3127 
3128 	memcpy(ids.port_id, &target->id_ext, 8);
3129 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3130 	ids.roles = SRP_RPORT_ROLE_TARGET;
3131 	rport = srp_rport_add(target->scsi_host, &ids);
3132 	if (IS_ERR(rport)) {
3133 		scsi_remove_host(target->scsi_host);
3134 		return PTR_ERR(rport);
3135 	}
3136 
3137 	rport->lld_data = target;
3138 	target->rport = rport;
3139 
3140 	spin_lock(&host->target_lock);
3141 	list_add_tail(&target->list, &host->target_list);
3142 	spin_unlock(&host->target_lock);
3143 
3144 	scsi_scan_target(&target->scsi_host->shost_gendev,
3145 			 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3146 
3147 	if (srp_connected_ch(target) < target->ch_count ||
3148 	    target->qp_in_error) {
3149 		shost_printk(KERN_INFO, target->scsi_host,
3150 			     PFX "SCSI scan failed - removing SCSI host\n");
3151 		srp_queue_remove_work(target);
3152 		goto out;
3153 	}
3154 
3155 	pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3156 		 dev_name(&target->scsi_host->shost_gendev),
3157 		 srp_sdev_count(target->scsi_host));
3158 
3159 	spin_lock_irq(&target->lock);
3160 	if (target->state == SRP_TARGET_SCANNING)
3161 		target->state = SRP_TARGET_LIVE;
3162 	spin_unlock_irq(&target->lock);
3163 
3164 out:
3165 	return 0;
3166 }
3167 
3168 static void srp_release_dev(struct device *dev)
3169 {
3170 	struct srp_host *host =
3171 		container_of(dev, struct srp_host, dev);
3172 
3173 	complete(&host->released);
3174 }
3175 
3176 static struct class srp_class = {
3177 	.name    = "infiniband_srp",
3178 	.dev_release = srp_release_dev
3179 };
3180 
3181 /**
3182  * srp_conn_unique() - check whether the connection to a target is unique
3183  * @host:   SRP host.
3184  * @target: SRP target port.
3185  */
3186 static bool srp_conn_unique(struct srp_host *host,
3187 			    struct srp_target_port *target)
3188 {
3189 	struct srp_target_port *t;
3190 	bool ret = false;
3191 
3192 	if (target->state == SRP_TARGET_REMOVED)
3193 		goto out;
3194 
3195 	ret = true;
3196 
3197 	spin_lock(&host->target_lock);
3198 	list_for_each_entry(t, &host->target_list, list) {
3199 		if (t != target &&
3200 		    target->id_ext == t->id_ext &&
3201 		    target->ioc_guid == t->ioc_guid &&
3202 		    target->initiator_ext == t->initiator_ext) {
3203 			ret = false;
3204 			break;
3205 		}
3206 	}
3207 	spin_unlock(&host->target_lock);
3208 
3209 out:
3210 	return ret;
3211 }
3212 
3213 /*
3214  * Target ports are added by writing
3215  *
3216  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3217  *     pkey=<P_Key>,service_id=<service ID>
3218  * or
3219  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3220  *     [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3221  *
3222  * to the add_target sysfs attribute.
3223  */
3224 enum {
3225 	SRP_OPT_ERR		= 0,
3226 	SRP_OPT_ID_EXT		= 1 << 0,
3227 	SRP_OPT_IOC_GUID	= 1 << 1,
3228 	SRP_OPT_DGID		= 1 << 2,
3229 	SRP_OPT_PKEY		= 1 << 3,
3230 	SRP_OPT_SERVICE_ID	= 1 << 4,
3231 	SRP_OPT_MAX_SECT	= 1 << 5,
3232 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
3233 	SRP_OPT_IO_CLASS	= 1 << 7,
3234 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
3235 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
3236 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
3237 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
3238 	SRP_OPT_COMP_VECTOR	= 1 << 12,
3239 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
3240 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
3241 	SRP_OPT_IP_SRC		= 1 << 15,
3242 	SRP_OPT_IP_DEST		= 1 << 16,
3243 	SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3244 	SRP_OPT_MAX_IT_IU_SIZE  = 1 << 18,
3245 	SRP_OPT_CH_COUNT	= 1 << 19,
3246 };
3247 
3248 static unsigned int srp_opt_mandatory[] = {
3249 	SRP_OPT_ID_EXT		|
3250 	SRP_OPT_IOC_GUID	|
3251 	SRP_OPT_DGID		|
3252 	SRP_OPT_PKEY		|
3253 	SRP_OPT_SERVICE_ID,
3254 	SRP_OPT_ID_EXT		|
3255 	SRP_OPT_IOC_GUID	|
3256 	SRP_OPT_IP_DEST,
3257 };
3258 
3259 static const match_table_t srp_opt_tokens = {
3260 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
3261 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
3262 	{ SRP_OPT_DGID,			"dgid=%s" 		},
3263 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
3264 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
3265 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
3266 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
3267 	{ SRP_OPT_TARGET_CAN_QUEUE,	"target_can_queue=%d"	},
3268 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
3269 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
3270 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
3271 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
3272 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
3273 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
3274 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
3275 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
3276 	{ SRP_OPT_IP_SRC,		"src=%s"		},
3277 	{ SRP_OPT_IP_DEST,		"dest=%s"		},
3278 	{ SRP_OPT_MAX_IT_IU_SIZE,	"max_it_iu_size=%d"	},
3279 	{ SRP_OPT_CH_COUNT,		"ch_count=%u",		},
3280 	{ SRP_OPT_ERR,			NULL 			}
3281 };
3282 
3283 /**
3284  * srp_parse_in - parse an IP address and port number combination
3285  * @net:	   [in]  Network namespace.
3286  * @sa:		   [out] Address family, IP address and port number.
3287  * @addr_port_str: [in]  IP address and port number.
3288  * @has_port:	   [out] Whether or not @addr_port_str includes a port number.
3289  *
3290  * Parse the following address formats:
3291  * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3292  * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3293  */
3294 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3295 			const char *addr_port_str, bool *has_port)
3296 {
3297 	char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3298 	char *port_str;
3299 	int ret;
3300 
3301 	if (!addr)
3302 		return -ENOMEM;
3303 	port_str = strrchr(addr, ':');
3304 	if (port_str && strchr(port_str, ']'))
3305 		port_str = NULL;
3306 	if (port_str)
3307 		*port_str++ = '\0';
3308 	if (has_port)
3309 		*has_port = port_str != NULL;
3310 	ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3311 	if (ret && addr[0]) {
3312 		addr_end = addr + strlen(addr) - 1;
3313 		if (addr[0] == '[' && *addr_end == ']') {
3314 			*addr_end = '\0';
3315 			ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3316 						   port_str, sa);
3317 		}
3318 	}
3319 	kfree(addr);
3320 	pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3321 	return ret;
3322 }
3323 
3324 static int srp_parse_options(struct net *net, const char *buf,
3325 			     struct srp_target_port *target)
3326 {
3327 	char *options, *sep_opt;
3328 	char *p;
3329 	substring_t args[MAX_OPT_ARGS];
3330 	unsigned long long ull;
3331 	bool has_port;
3332 	int opt_mask = 0;
3333 	int token;
3334 	int ret = -EINVAL;
3335 	int i;
3336 
3337 	options = kstrdup(buf, GFP_KERNEL);
3338 	if (!options)
3339 		return -ENOMEM;
3340 
3341 	sep_opt = options;
3342 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3343 		if (!*p)
3344 			continue;
3345 
3346 		token = match_token(p, srp_opt_tokens, args);
3347 		opt_mask |= token;
3348 
3349 		switch (token) {
3350 		case SRP_OPT_ID_EXT:
3351 			p = match_strdup(args);
3352 			if (!p) {
3353 				ret = -ENOMEM;
3354 				goto out;
3355 			}
3356 			ret = kstrtoull(p, 16, &ull);
3357 			if (ret) {
3358 				pr_warn("invalid id_ext parameter '%s'\n", p);
3359 				kfree(p);
3360 				goto out;
3361 			}
3362 			target->id_ext = cpu_to_be64(ull);
3363 			kfree(p);
3364 			break;
3365 
3366 		case SRP_OPT_IOC_GUID:
3367 			p = match_strdup(args);
3368 			if (!p) {
3369 				ret = -ENOMEM;
3370 				goto out;
3371 			}
3372 			ret = kstrtoull(p, 16, &ull);
3373 			if (ret) {
3374 				pr_warn("invalid ioc_guid parameter '%s'\n", p);
3375 				kfree(p);
3376 				goto out;
3377 			}
3378 			target->ioc_guid = cpu_to_be64(ull);
3379 			kfree(p);
3380 			break;
3381 
3382 		case SRP_OPT_DGID:
3383 			p = match_strdup(args);
3384 			if (!p) {
3385 				ret = -ENOMEM;
3386 				goto out;
3387 			}
3388 			if (strlen(p) != 32) {
3389 				pr_warn("bad dest GID parameter '%s'\n", p);
3390 				kfree(p);
3391 				goto out;
3392 			}
3393 
3394 			ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3395 			kfree(p);
3396 			if (ret < 0)
3397 				goto out;
3398 			break;
3399 
3400 		case SRP_OPT_PKEY:
3401 			if (match_hex(args, &token)) {
3402 				pr_warn("bad P_Key parameter '%s'\n", p);
3403 				goto out;
3404 			}
3405 			target->ib_cm.pkey = cpu_to_be16(token);
3406 			break;
3407 
3408 		case SRP_OPT_SERVICE_ID:
3409 			p = match_strdup(args);
3410 			if (!p) {
3411 				ret = -ENOMEM;
3412 				goto out;
3413 			}
3414 			ret = kstrtoull(p, 16, &ull);
3415 			if (ret) {
3416 				pr_warn("bad service_id parameter '%s'\n", p);
3417 				kfree(p);
3418 				goto out;
3419 			}
3420 			target->ib_cm.service_id = cpu_to_be64(ull);
3421 			kfree(p);
3422 			break;
3423 
3424 		case SRP_OPT_IP_SRC:
3425 			p = match_strdup(args);
3426 			if (!p) {
3427 				ret = -ENOMEM;
3428 				goto out;
3429 			}
3430 			ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3431 					   NULL);
3432 			if (ret < 0) {
3433 				pr_warn("bad source parameter '%s'\n", p);
3434 				kfree(p);
3435 				goto out;
3436 			}
3437 			target->rdma_cm.src_specified = true;
3438 			kfree(p);
3439 			break;
3440 
3441 		case SRP_OPT_IP_DEST:
3442 			p = match_strdup(args);
3443 			if (!p) {
3444 				ret = -ENOMEM;
3445 				goto out;
3446 			}
3447 			ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3448 					   &has_port);
3449 			if (!has_port)
3450 				ret = -EINVAL;
3451 			if (ret < 0) {
3452 				pr_warn("bad dest parameter '%s'\n", p);
3453 				kfree(p);
3454 				goto out;
3455 			}
3456 			target->using_rdma_cm = true;
3457 			kfree(p);
3458 			break;
3459 
3460 		case SRP_OPT_MAX_SECT:
3461 			if (match_int(args, &token)) {
3462 				pr_warn("bad max sect parameter '%s'\n", p);
3463 				goto out;
3464 			}
3465 			target->scsi_host->max_sectors = token;
3466 			break;
3467 
3468 		case SRP_OPT_QUEUE_SIZE:
3469 			if (match_int(args, &token) || token < 1) {
3470 				pr_warn("bad queue_size parameter '%s'\n", p);
3471 				goto out;
3472 			}
3473 			target->scsi_host->can_queue = token;
3474 			target->queue_size = token + SRP_RSP_SQ_SIZE +
3475 					     SRP_TSK_MGMT_SQ_SIZE;
3476 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3477 				target->scsi_host->cmd_per_lun = token;
3478 			break;
3479 
3480 		case SRP_OPT_MAX_CMD_PER_LUN:
3481 			if (match_int(args, &token) || token < 1) {
3482 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3483 					p);
3484 				goto out;
3485 			}
3486 			target->scsi_host->cmd_per_lun = token;
3487 			break;
3488 
3489 		case SRP_OPT_TARGET_CAN_QUEUE:
3490 			if (match_int(args, &token) || token < 1) {
3491 				pr_warn("bad max target_can_queue parameter '%s'\n",
3492 					p);
3493 				goto out;
3494 			}
3495 			target->target_can_queue = token;
3496 			break;
3497 
3498 		case SRP_OPT_IO_CLASS:
3499 			if (match_hex(args, &token)) {
3500 				pr_warn("bad IO class parameter '%s'\n", p);
3501 				goto out;
3502 			}
3503 			if (token != SRP_REV10_IB_IO_CLASS &&
3504 			    token != SRP_REV16A_IB_IO_CLASS) {
3505 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3506 					token, SRP_REV10_IB_IO_CLASS,
3507 					SRP_REV16A_IB_IO_CLASS);
3508 				goto out;
3509 			}
3510 			target->io_class = token;
3511 			break;
3512 
3513 		case SRP_OPT_INITIATOR_EXT:
3514 			p = match_strdup(args);
3515 			if (!p) {
3516 				ret = -ENOMEM;
3517 				goto out;
3518 			}
3519 			ret = kstrtoull(p, 16, &ull);
3520 			if (ret) {
3521 				pr_warn("bad initiator_ext value '%s'\n", p);
3522 				kfree(p);
3523 				goto out;
3524 			}
3525 			target->initiator_ext = cpu_to_be64(ull);
3526 			kfree(p);
3527 			break;
3528 
3529 		case SRP_OPT_CMD_SG_ENTRIES:
3530 			if (match_int(args, &token) || token < 1 || token > 255) {
3531 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3532 					p);
3533 				goto out;
3534 			}
3535 			target->cmd_sg_cnt = token;
3536 			break;
3537 
3538 		case SRP_OPT_ALLOW_EXT_SG:
3539 			if (match_int(args, &token)) {
3540 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3541 				goto out;
3542 			}
3543 			target->allow_ext_sg = !!token;
3544 			break;
3545 
3546 		case SRP_OPT_SG_TABLESIZE:
3547 			if (match_int(args, &token) || token < 1 ||
3548 					token > SG_MAX_SEGMENTS) {
3549 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3550 					p);
3551 				goto out;
3552 			}
3553 			target->sg_tablesize = token;
3554 			break;
3555 
3556 		case SRP_OPT_COMP_VECTOR:
3557 			if (match_int(args, &token) || token < 0) {
3558 				pr_warn("bad comp_vector parameter '%s'\n", p);
3559 				goto out;
3560 			}
3561 			target->comp_vector = token;
3562 			break;
3563 
3564 		case SRP_OPT_TL_RETRY_COUNT:
3565 			if (match_int(args, &token) || token < 2 || token > 7) {
3566 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3567 					p);
3568 				goto out;
3569 			}
3570 			target->tl_retry_count = token;
3571 			break;
3572 
3573 		case SRP_OPT_MAX_IT_IU_SIZE:
3574 			if (match_int(args, &token) || token < 0) {
3575 				pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3576 				goto out;
3577 			}
3578 			target->max_it_iu_size = token;
3579 			break;
3580 
3581 		case SRP_OPT_CH_COUNT:
3582 			if (match_int(args, &token) || token < 1) {
3583 				pr_warn("bad channel count %s\n", p);
3584 				goto out;
3585 			}
3586 			target->ch_count = token;
3587 			break;
3588 
3589 		default:
3590 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3591 				p);
3592 			goto out;
3593 		}
3594 	}
3595 
3596 	for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3597 		if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3598 			ret = 0;
3599 			break;
3600 		}
3601 	}
3602 	if (ret)
3603 		pr_warn("target creation request is missing one or more parameters\n");
3604 
3605 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3606 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3607 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3608 			target->scsi_host->cmd_per_lun,
3609 			target->scsi_host->can_queue);
3610 
3611 out:
3612 	kfree(options);
3613 	return ret;
3614 }
3615 
3616 static ssize_t srp_create_target(struct device *dev,
3617 				 struct device_attribute *attr,
3618 				 const char *buf, size_t count)
3619 {
3620 	struct srp_host *host =
3621 		container_of(dev, struct srp_host, dev);
3622 	struct Scsi_Host *target_host;
3623 	struct srp_target_port *target;
3624 	struct srp_rdma_ch *ch;
3625 	struct srp_device *srp_dev = host->srp_dev;
3626 	struct ib_device *ibdev = srp_dev->dev;
3627 	int ret, node_idx, node, cpu, i;
3628 	unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3629 	bool multich = false;
3630 	uint32_t max_iu_len;
3631 
3632 	target_host = scsi_host_alloc(&srp_template,
3633 				      sizeof (struct srp_target_port));
3634 	if (!target_host)
3635 		return -ENOMEM;
3636 
3637 	target_host->transportt  = ib_srp_transport_template;
3638 	target_host->max_channel = 0;
3639 	target_host->max_id      = 1;
3640 	target_host->max_lun     = -1LL;
3641 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3642 	target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3643 
3644 	if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
3645 		target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3646 
3647 	target = host_to_target(target_host);
3648 
3649 	target->net		= kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3650 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3651 	target->scsi_host	= target_host;
3652 	target->srp_host	= host;
3653 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
3654 	target->global_rkey	= host->srp_dev->global_rkey;
3655 	target->cmd_sg_cnt	= cmd_sg_entries;
3656 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3657 	target->allow_ext_sg	= allow_ext_sg;
3658 	target->tl_retry_count	= 7;
3659 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3660 
3661 	/*
3662 	 * Avoid that the SCSI host can be removed by srp_remove_target()
3663 	 * before this function returns.
3664 	 */
3665 	scsi_host_get(target->scsi_host);
3666 
3667 	ret = mutex_lock_interruptible(&host->add_target_mutex);
3668 	if (ret < 0)
3669 		goto put;
3670 
3671 	ret = srp_parse_options(target->net, buf, target);
3672 	if (ret)
3673 		goto out;
3674 
3675 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3676 
3677 	if (!srp_conn_unique(target->srp_host, target)) {
3678 		if (target->using_rdma_cm) {
3679 			shost_printk(KERN_INFO, target->scsi_host,
3680 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3681 				     be64_to_cpu(target->id_ext),
3682 				     be64_to_cpu(target->ioc_guid),
3683 				     &target->rdma_cm.dst);
3684 		} else {
3685 			shost_printk(KERN_INFO, target->scsi_host,
3686 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3687 				     be64_to_cpu(target->id_ext),
3688 				     be64_to_cpu(target->ioc_guid),
3689 				     be64_to_cpu(target->initiator_ext));
3690 		}
3691 		ret = -EEXIST;
3692 		goto out;
3693 	}
3694 
3695 	if (!srp_dev->has_fr && !target->allow_ext_sg &&
3696 	    target->cmd_sg_cnt < target->sg_tablesize) {
3697 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3698 		target->sg_tablesize = target->cmd_sg_cnt;
3699 	}
3700 
3701 	if (srp_dev->use_fast_reg) {
3702 		bool gaps_reg = (ibdev->attrs.device_cap_flags &
3703 				 IB_DEVICE_SG_GAPS_REG);
3704 
3705 		max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3706 				  (ilog2(srp_dev->mr_page_size) - 9);
3707 		if (!gaps_reg) {
3708 			/*
3709 			 * FR can only map one HCA page per entry. If the start
3710 			 * address is not aligned on a HCA page boundary two
3711 			 * entries will be used for the head and the tail
3712 			 * although these two entries combined contain at most
3713 			 * one HCA page of data. Hence the "+ 1" in the
3714 			 * calculation below.
3715 			 *
3716 			 * The indirect data buffer descriptor is contiguous
3717 			 * so the memory for that buffer will only be
3718 			 * registered if register_always is true. Hence add
3719 			 * one to mr_per_cmd if register_always has been set.
3720 			 */
3721 			mr_per_cmd = register_always +
3722 				(target->scsi_host->max_sectors + 1 +
3723 				 max_sectors_per_mr - 1) / max_sectors_per_mr;
3724 		} else {
3725 			mr_per_cmd = register_always +
3726 				(target->sg_tablesize +
3727 				 srp_dev->max_pages_per_mr - 1) /
3728 				srp_dev->max_pages_per_mr;
3729 		}
3730 		pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3731 			 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3732 			 max_sectors_per_mr, mr_per_cmd);
3733 	}
3734 
3735 	target_host->sg_tablesize = target->sg_tablesize;
3736 	target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3737 	target->mr_per_cmd = mr_per_cmd;
3738 	target->indirect_size = target->sg_tablesize *
3739 				sizeof (struct srp_direct_buf);
3740 	max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3741 				       srp_use_imm_data,
3742 				       target->max_it_iu_size);
3743 
3744 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3745 	INIT_WORK(&target->remove_work, srp_remove_work);
3746 	spin_lock_init(&target->lock);
3747 	ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3748 	if (ret)
3749 		goto out;
3750 
3751 	ret = -ENOMEM;
3752 	if (target->ch_count == 0)
3753 		target->ch_count =
3754 			max_t(unsigned int, num_online_nodes(),
3755 			      min(ch_count ?:
3756 					  min(4 * num_online_nodes(),
3757 					      ibdev->num_comp_vectors),
3758 				  num_online_cpus()));
3759 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3760 			     GFP_KERNEL);
3761 	if (!target->ch)
3762 		goto out;
3763 
3764 	node_idx = 0;
3765 	for_each_online_node(node) {
3766 		const int ch_start = (node_idx * target->ch_count /
3767 				      num_online_nodes());
3768 		const int ch_end = ((node_idx + 1) * target->ch_count /
3769 				    num_online_nodes());
3770 		const int cv_start = node_idx * ibdev->num_comp_vectors /
3771 				     num_online_nodes();
3772 		const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
3773 				   num_online_nodes();
3774 		int cpu_idx = 0;
3775 
3776 		for_each_online_cpu(cpu) {
3777 			if (cpu_to_node(cpu) != node)
3778 				continue;
3779 			if (ch_start + cpu_idx >= ch_end)
3780 				continue;
3781 			ch = &target->ch[ch_start + cpu_idx];
3782 			ch->target = target;
3783 			ch->comp_vector = cv_start == cv_end ? cv_start :
3784 				cv_start + cpu_idx % (cv_end - cv_start);
3785 			spin_lock_init(&ch->lock);
3786 			INIT_LIST_HEAD(&ch->free_tx);
3787 			ret = srp_new_cm_id(ch);
3788 			if (ret)
3789 				goto err_disconnect;
3790 
3791 			ret = srp_create_ch_ib(ch);
3792 			if (ret)
3793 				goto err_disconnect;
3794 
3795 			ret = srp_alloc_req_data(ch);
3796 			if (ret)
3797 				goto err_disconnect;
3798 
3799 			ret = srp_connect_ch(ch, max_iu_len, multich);
3800 			if (ret) {
3801 				char dst[64];
3802 
3803 				if (target->using_rdma_cm)
3804 					snprintf(dst, sizeof(dst), "%pIS",
3805 						 &target->rdma_cm.dst);
3806 				else
3807 					snprintf(dst, sizeof(dst), "%pI6",
3808 						 target->ib_cm.orig_dgid.raw);
3809 				shost_printk(KERN_ERR, target->scsi_host,
3810 					     PFX "Connection %d/%d to %s failed\n",
3811 					     ch_start + cpu_idx,
3812 					     target->ch_count, dst);
3813 				if (node_idx == 0 && cpu_idx == 0) {
3814 					goto free_ch;
3815 				} else {
3816 					srp_free_ch_ib(target, ch);
3817 					srp_free_req_data(target, ch);
3818 					target->ch_count = ch - target->ch;
3819 					goto connected;
3820 				}
3821 			}
3822 
3823 			multich = true;
3824 			cpu_idx++;
3825 		}
3826 		node_idx++;
3827 	}
3828 
3829 connected:
3830 	target->scsi_host->nr_hw_queues = target->ch_count;
3831 
3832 	ret = srp_add_target(host, target);
3833 	if (ret)
3834 		goto err_disconnect;
3835 
3836 	if (target->state != SRP_TARGET_REMOVED) {
3837 		if (target->using_rdma_cm) {
3838 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3839 				     "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3840 				     be64_to_cpu(target->id_ext),
3841 				     be64_to_cpu(target->ioc_guid),
3842 				     target->sgid.raw, &target->rdma_cm.dst);
3843 		} else {
3844 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3845 				     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3846 				     be64_to_cpu(target->id_ext),
3847 				     be64_to_cpu(target->ioc_guid),
3848 				     be16_to_cpu(target->ib_cm.pkey),
3849 				     be64_to_cpu(target->ib_cm.service_id),
3850 				     target->sgid.raw,
3851 				     target->ib_cm.orig_dgid.raw);
3852 		}
3853 	}
3854 
3855 	ret = count;
3856 
3857 out:
3858 	mutex_unlock(&host->add_target_mutex);
3859 
3860 put:
3861 	scsi_host_put(target->scsi_host);
3862 	if (ret < 0) {
3863 		/*
3864 		 * If a call to srp_remove_target() has not been scheduled,
3865 		 * drop the network namespace reference now that was obtained
3866 		 * earlier in this function.
3867 		 */
3868 		if (target->state != SRP_TARGET_REMOVED)
3869 			kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3870 		scsi_host_put(target->scsi_host);
3871 	}
3872 
3873 	return ret;
3874 
3875 err_disconnect:
3876 	srp_disconnect_target(target);
3877 
3878 free_ch:
3879 	for (i = 0; i < target->ch_count; i++) {
3880 		ch = &target->ch[i];
3881 		srp_free_ch_ib(target, ch);
3882 		srp_free_req_data(target, ch);
3883 	}
3884 
3885 	kfree(target->ch);
3886 	goto out;
3887 }
3888 
3889 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3890 
3891 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3892 			  char *buf)
3893 {
3894 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3895 
3896 	return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3897 }
3898 
3899 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3900 
3901 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3902 			 char *buf)
3903 {
3904 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3905 
3906 	return sprintf(buf, "%d\n", host->port);
3907 }
3908 
3909 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3910 
3911 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3912 {
3913 	struct srp_host *host;
3914 
3915 	host = kzalloc(sizeof *host, GFP_KERNEL);
3916 	if (!host)
3917 		return NULL;
3918 
3919 	INIT_LIST_HEAD(&host->target_list);
3920 	spin_lock_init(&host->target_lock);
3921 	init_completion(&host->released);
3922 	mutex_init(&host->add_target_mutex);
3923 	host->srp_dev = device;
3924 	host->port = port;
3925 
3926 	host->dev.class = &srp_class;
3927 	host->dev.parent = device->dev->dev.parent;
3928 	dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
3929 		     port);
3930 
3931 	if (device_register(&host->dev))
3932 		goto free_host;
3933 	if (device_create_file(&host->dev, &dev_attr_add_target))
3934 		goto err_class;
3935 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3936 		goto err_class;
3937 	if (device_create_file(&host->dev, &dev_attr_port))
3938 		goto err_class;
3939 
3940 	return host;
3941 
3942 err_class:
3943 	device_unregister(&host->dev);
3944 
3945 free_host:
3946 	kfree(host);
3947 
3948 	return NULL;
3949 }
3950 
3951 static void srp_rename_dev(struct ib_device *device, void *client_data)
3952 {
3953 	struct srp_device *srp_dev = client_data;
3954 	struct srp_host *host, *tmp_host;
3955 
3956 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3957 		char name[IB_DEVICE_NAME_MAX + 8];
3958 
3959 		snprintf(name, sizeof(name), "srp-%s-%d",
3960 			 dev_name(&device->dev), host->port);
3961 		device_rename(&host->dev, name);
3962 	}
3963 }
3964 
3965 static int srp_add_one(struct ib_device *device)
3966 {
3967 	struct srp_device *srp_dev;
3968 	struct ib_device_attr *attr = &device->attrs;
3969 	struct srp_host *host;
3970 	int mr_page_shift;
3971 	unsigned int p;
3972 	u64 max_pages_per_mr;
3973 	unsigned int flags = 0;
3974 
3975 	srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3976 	if (!srp_dev)
3977 		return -ENOMEM;
3978 
3979 	/*
3980 	 * Use the smallest page size supported by the HCA, down to a
3981 	 * minimum of 4096 bytes. We're unlikely to build large sglists
3982 	 * out of smaller entries.
3983 	 */
3984 	mr_page_shift		= max(12, ffs(attr->page_size_cap) - 1);
3985 	srp_dev->mr_page_size	= 1 << mr_page_shift;
3986 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
3987 	max_pages_per_mr	= attr->max_mr_size;
3988 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
3989 	pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3990 		 attr->max_mr_size, srp_dev->mr_page_size,
3991 		 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3992 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3993 					  max_pages_per_mr);
3994 
3995 	srp_dev->has_fr = (attr->device_cap_flags &
3996 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
3997 	if (!never_register && !srp_dev->has_fr)
3998 		dev_warn(&device->dev, "FR is not supported\n");
3999 	else if (!never_register &&
4000 		 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
4001 		srp_dev->use_fast_reg = srp_dev->has_fr;
4002 
4003 	if (never_register || !register_always || !srp_dev->has_fr)
4004 		flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
4005 
4006 	if (srp_dev->use_fast_reg) {
4007 		srp_dev->max_pages_per_mr =
4008 			min_t(u32, srp_dev->max_pages_per_mr,
4009 			      attr->max_fast_reg_page_list_len);
4010 	}
4011 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
4012 				   srp_dev->max_pages_per_mr;
4013 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
4014 		 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
4015 		 attr->max_fast_reg_page_list_len,
4016 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
4017 
4018 	INIT_LIST_HEAD(&srp_dev->dev_list);
4019 
4020 	srp_dev->dev = device;
4021 	srp_dev->pd  = ib_alloc_pd(device, flags);
4022 	if (IS_ERR(srp_dev->pd)) {
4023 		int ret = PTR_ERR(srp_dev->pd);
4024 
4025 		kfree(srp_dev);
4026 		return ret;
4027 	}
4028 
4029 	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4030 		srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4031 		WARN_ON_ONCE(srp_dev->global_rkey == 0);
4032 	}
4033 
4034 	rdma_for_each_port (device, p) {
4035 		host = srp_add_port(srp_dev, p);
4036 		if (host)
4037 			list_add_tail(&host->list, &srp_dev->dev_list);
4038 	}
4039 
4040 	ib_set_client_data(device, &srp_client, srp_dev);
4041 	return 0;
4042 }
4043 
4044 static void srp_remove_one(struct ib_device *device, void *client_data)
4045 {
4046 	struct srp_device *srp_dev;
4047 	struct srp_host *host, *tmp_host;
4048 	struct srp_target_port *target;
4049 
4050 	srp_dev = client_data;
4051 
4052 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4053 		device_unregister(&host->dev);
4054 		/*
4055 		 * Wait for the sysfs entry to go away, so that no new
4056 		 * target ports can be created.
4057 		 */
4058 		wait_for_completion(&host->released);
4059 
4060 		/*
4061 		 * Remove all target ports.
4062 		 */
4063 		spin_lock(&host->target_lock);
4064 		list_for_each_entry(target, &host->target_list, list)
4065 			srp_queue_remove_work(target);
4066 		spin_unlock(&host->target_lock);
4067 
4068 		/*
4069 		 * Wait for tl_err and target port removal tasks.
4070 		 */
4071 		flush_workqueue(system_long_wq);
4072 		flush_workqueue(srp_remove_wq);
4073 
4074 		kfree(host);
4075 	}
4076 
4077 	ib_dealloc_pd(srp_dev->pd);
4078 
4079 	kfree(srp_dev);
4080 }
4081 
4082 static struct srp_function_template ib_srp_transport_functions = {
4083 	.has_rport_state	 = true,
4084 	.reset_timer_if_blocked	 = true,
4085 	.reconnect_delay	 = &srp_reconnect_delay,
4086 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
4087 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
4088 	.reconnect		 = srp_rport_reconnect,
4089 	.rport_delete		 = srp_rport_delete,
4090 	.terminate_rport_io	 = srp_terminate_io,
4091 };
4092 
4093 static int __init srp_init_module(void)
4094 {
4095 	int ret;
4096 
4097 	BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4098 	BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4099 	BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4100 	BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4101 
4102 	if (srp_sg_tablesize) {
4103 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4104 		if (!cmd_sg_entries)
4105 			cmd_sg_entries = srp_sg_tablesize;
4106 	}
4107 
4108 	if (!cmd_sg_entries)
4109 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4110 
4111 	if (cmd_sg_entries > 255) {
4112 		pr_warn("Clamping cmd_sg_entries to 255\n");
4113 		cmd_sg_entries = 255;
4114 	}
4115 
4116 	if (!indirect_sg_entries)
4117 		indirect_sg_entries = cmd_sg_entries;
4118 	else if (indirect_sg_entries < cmd_sg_entries) {
4119 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4120 			cmd_sg_entries);
4121 		indirect_sg_entries = cmd_sg_entries;
4122 	}
4123 
4124 	if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4125 		pr_warn("Clamping indirect_sg_entries to %u\n",
4126 			SG_MAX_SEGMENTS);
4127 		indirect_sg_entries = SG_MAX_SEGMENTS;
4128 	}
4129 
4130 	srp_remove_wq = create_workqueue("srp_remove");
4131 	if (!srp_remove_wq) {
4132 		ret = -ENOMEM;
4133 		goto out;
4134 	}
4135 
4136 	ret = -ENOMEM;
4137 	ib_srp_transport_template =
4138 		srp_attach_transport(&ib_srp_transport_functions);
4139 	if (!ib_srp_transport_template)
4140 		goto destroy_wq;
4141 
4142 	ret = class_register(&srp_class);
4143 	if (ret) {
4144 		pr_err("couldn't register class infiniband_srp\n");
4145 		goto release_tr;
4146 	}
4147 
4148 	ib_sa_register_client(&srp_sa_client);
4149 
4150 	ret = ib_register_client(&srp_client);
4151 	if (ret) {
4152 		pr_err("couldn't register IB client\n");
4153 		goto unreg_sa;
4154 	}
4155 
4156 out:
4157 	return ret;
4158 
4159 unreg_sa:
4160 	ib_sa_unregister_client(&srp_sa_client);
4161 	class_unregister(&srp_class);
4162 
4163 release_tr:
4164 	srp_release_transport(ib_srp_transport_template);
4165 
4166 destroy_wq:
4167 	destroy_workqueue(srp_remove_wq);
4168 	goto out;
4169 }
4170 
4171 static void __exit srp_cleanup_module(void)
4172 {
4173 	ib_unregister_client(&srp_client);
4174 	ib_sa_unregister_client(&srp_sa_client);
4175 	class_unregister(&srp_class);
4176 	srp_release_transport(ib_srp_transport_template);
4177 	destroy_workqueue(srp_remove_wq);
4178 }
4179 
4180 module_init(srp_init_module);
4181 module_exit(srp_cleanup_module);
4182