xref: /linux/drivers/infiniband/ulp/srp/ib_srp.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $
33  */
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 
44 #include <asm/atomic.h>
45 
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
49 #include <scsi/srp.h>
50 
51 #include <rdma/ib_cache.h>
52 
53 #include "ib_srp.h"
54 
55 #define DRV_NAME	"ib_srp"
56 #define PFX		DRV_NAME ": "
57 #define DRV_VERSION	"0.2"
58 #define DRV_RELDATE	"November 1, 2005"
59 
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 		   "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
64 
65 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
66 static int srp_max_iu_len;
67 
68 module_param(srp_sg_tablesize, int, 0444);
69 MODULE_PARM_DESC(srp_sg_tablesize,
70 		 "Max number of gather/scatter entries per I/O (default is 12)");
71 
72 static int topspin_workarounds = 1;
73 
74 module_param(topspin_workarounds, int, 0444);
75 MODULE_PARM_DESC(topspin_workarounds,
76 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
77 
78 static int mellanox_workarounds = 1;
79 
80 module_param(mellanox_workarounds, int, 0444);
81 MODULE_PARM_DESC(mellanox_workarounds,
82 		 "Enable workarounds for Mellanox SRP target bugs if != 0");
83 
84 static void srp_add_one(struct ib_device *device);
85 static void srp_remove_one(struct ib_device *device);
86 static void srp_completion(struct ib_cq *cq, void *target_ptr);
87 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
88 
89 static struct ib_client srp_client = {
90 	.name   = "srp",
91 	.add    = srp_add_one,
92 	.remove = srp_remove_one
93 };
94 
95 static struct ib_sa_client srp_sa_client;
96 
97 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
98 {
99 	return (struct srp_target_port *) host->hostdata;
100 }
101 
102 static const char *srp_target_info(struct Scsi_Host *host)
103 {
104 	return host_to_target(host)->target_name;
105 }
106 
107 static int srp_target_is_topspin(struct srp_target_port *target)
108 {
109 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
110 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
111 
112 	return topspin_workarounds &&
113 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
114 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
115 }
116 
117 static int srp_target_is_mellanox(struct srp_target_port *target)
118 {
119 	static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 };
120 
121 	return mellanox_workarounds &&
122 		!memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui);
123 }
124 
125 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
126 				   gfp_t gfp_mask,
127 				   enum dma_data_direction direction)
128 {
129 	struct srp_iu *iu;
130 
131 	iu = kmalloc(sizeof *iu, gfp_mask);
132 	if (!iu)
133 		goto out;
134 
135 	iu->buf = kzalloc(size, gfp_mask);
136 	if (!iu->buf)
137 		goto out_free_iu;
138 
139 	iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction);
140 	if (ib_dma_mapping_error(host->dev->dev, iu->dma))
141 		goto out_free_buf;
142 
143 	iu->size      = size;
144 	iu->direction = direction;
145 
146 	return iu;
147 
148 out_free_buf:
149 	kfree(iu->buf);
150 out_free_iu:
151 	kfree(iu);
152 out:
153 	return NULL;
154 }
155 
156 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
157 {
158 	if (!iu)
159 		return;
160 
161 	ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction);
162 	kfree(iu->buf);
163 	kfree(iu);
164 }
165 
166 static void srp_qp_event(struct ib_event *event, void *context)
167 {
168 	printk(KERN_ERR PFX "QP event %d\n", event->event);
169 }
170 
171 static int srp_init_qp(struct srp_target_port *target,
172 		       struct ib_qp *qp)
173 {
174 	struct ib_qp_attr *attr;
175 	int ret;
176 
177 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
178 	if (!attr)
179 		return -ENOMEM;
180 
181 	ret = ib_find_cached_pkey(target->srp_host->dev->dev,
182 				  target->srp_host->port,
183 				  be16_to_cpu(target->path.pkey),
184 				  &attr->pkey_index);
185 	if (ret)
186 		goto out;
187 
188 	attr->qp_state        = IB_QPS_INIT;
189 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
190 				    IB_ACCESS_REMOTE_WRITE);
191 	attr->port_num        = target->srp_host->port;
192 
193 	ret = ib_modify_qp(qp, attr,
194 			   IB_QP_STATE		|
195 			   IB_QP_PKEY_INDEX	|
196 			   IB_QP_ACCESS_FLAGS	|
197 			   IB_QP_PORT);
198 
199 out:
200 	kfree(attr);
201 	return ret;
202 }
203 
204 static int srp_create_target_ib(struct srp_target_port *target)
205 {
206 	struct ib_qp_init_attr *init_attr;
207 	int ret;
208 
209 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
210 	if (!init_attr)
211 		return -ENOMEM;
212 
213 	target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion,
214 				  NULL, target, SRP_CQ_SIZE, 0);
215 	if (IS_ERR(target->cq)) {
216 		ret = PTR_ERR(target->cq);
217 		goto out;
218 	}
219 
220 	ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
221 
222 	init_attr->event_handler       = srp_qp_event;
223 	init_attr->cap.max_send_wr     = SRP_SQ_SIZE;
224 	init_attr->cap.max_recv_wr     = SRP_RQ_SIZE;
225 	init_attr->cap.max_recv_sge    = 1;
226 	init_attr->cap.max_send_sge    = 1;
227 	init_attr->sq_sig_type         = IB_SIGNAL_ALL_WR;
228 	init_attr->qp_type             = IB_QPT_RC;
229 	init_attr->send_cq             = target->cq;
230 	init_attr->recv_cq             = target->cq;
231 
232 	target->qp = ib_create_qp(target->srp_host->dev->pd, init_attr);
233 	if (IS_ERR(target->qp)) {
234 		ret = PTR_ERR(target->qp);
235 		ib_destroy_cq(target->cq);
236 		goto out;
237 	}
238 
239 	ret = srp_init_qp(target, target->qp);
240 	if (ret) {
241 		ib_destroy_qp(target->qp);
242 		ib_destroy_cq(target->cq);
243 		goto out;
244 	}
245 
246 out:
247 	kfree(init_attr);
248 	return ret;
249 }
250 
251 static void srp_free_target_ib(struct srp_target_port *target)
252 {
253 	int i;
254 
255 	ib_destroy_qp(target->qp);
256 	ib_destroy_cq(target->cq);
257 
258 	for (i = 0; i < SRP_RQ_SIZE; ++i)
259 		srp_free_iu(target->srp_host, target->rx_ring[i]);
260 	for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
261 		srp_free_iu(target->srp_host, target->tx_ring[i]);
262 }
263 
264 static void srp_path_rec_completion(int status,
265 				    struct ib_sa_path_rec *pathrec,
266 				    void *target_ptr)
267 {
268 	struct srp_target_port *target = target_ptr;
269 
270 	target->status = status;
271 	if (status)
272 		printk(KERN_ERR PFX "Got failed path rec status %d\n", status);
273 	else
274 		target->path = *pathrec;
275 	complete(&target->done);
276 }
277 
278 static int srp_lookup_path(struct srp_target_port *target)
279 {
280 	target->path.numb_path = 1;
281 
282 	init_completion(&target->done);
283 
284 	target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
285 						   target->srp_host->dev->dev,
286 						   target->srp_host->port,
287 						   &target->path,
288 						   IB_SA_PATH_REC_SERVICE_ID	|
289 						   IB_SA_PATH_REC_DGID		|
290 						   IB_SA_PATH_REC_SGID		|
291 						   IB_SA_PATH_REC_NUMB_PATH	|
292 						   IB_SA_PATH_REC_PKEY,
293 						   SRP_PATH_REC_TIMEOUT_MS,
294 						   GFP_KERNEL,
295 						   srp_path_rec_completion,
296 						   target, &target->path_query);
297 	if (target->path_query_id < 0)
298 		return target->path_query_id;
299 
300 	wait_for_completion(&target->done);
301 
302 	if (target->status < 0)
303 		printk(KERN_WARNING PFX "Path record query failed\n");
304 
305 	return target->status;
306 }
307 
308 static int srp_send_req(struct srp_target_port *target)
309 {
310 	struct {
311 		struct ib_cm_req_param param;
312 		struct srp_login_req   priv;
313 	} *req = NULL;
314 	int status;
315 
316 	req = kzalloc(sizeof *req, GFP_KERNEL);
317 	if (!req)
318 		return -ENOMEM;
319 
320 	req->param.primary_path 	      = &target->path;
321 	req->param.alternate_path 	      = NULL;
322 	req->param.service_id 		      = target->service_id;
323 	req->param.qp_num 		      = target->qp->qp_num;
324 	req->param.qp_type 		      = target->qp->qp_type;
325 	req->param.private_data 	      = &req->priv;
326 	req->param.private_data_len 	      = sizeof req->priv;
327 	req->param.flow_control 	      = 1;
328 
329 	get_random_bytes(&req->param.starting_psn, 4);
330 	req->param.starting_psn 	     &= 0xffffff;
331 
332 	/*
333 	 * Pick some arbitrary defaults here; we could make these
334 	 * module parameters if anyone cared about setting them.
335 	 */
336 	req->param.responder_resources	      = 4;
337 	req->param.remote_cm_response_timeout = 20;
338 	req->param.local_cm_response_timeout  = 20;
339 	req->param.retry_count 		      = 7;
340 	req->param.rnr_retry_count 	      = 7;
341 	req->param.max_cm_retries 	      = 15;
342 
343 	req->priv.opcode     	= SRP_LOGIN_REQ;
344 	req->priv.tag        	= 0;
345 	req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
346 	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
347 					      SRP_BUF_FORMAT_INDIRECT);
348 	/*
349 	 * In the published SRP specification (draft rev. 16a), the
350 	 * port identifier format is 8 bytes of ID extension followed
351 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
352 	 * opposite order, so that the GUID comes first.
353 	 *
354 	 * Targets conforming to these obsolete drafts can be
355 	 * recognized by the I/O Class they report.
356 	 */
357 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
358 		memcpy(req->priv.initiator_port_id,
359 		       &target->path.sgid.global.interface_id, 8);
360 		memcpy(req->priv.initiator_port_id + 8,
361 		       &target->initiator_ext, 8);
362 		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
363 		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
364 	} else {
365 		memcpy(req->priv.initiator_port_id,
366 		       &target->initiator_ext, 8);
367 		memcpy(req->priv.initiator_port_id + 8,
368 		       &target->path.sgid.global.interface_id, 8);
369 		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
370 		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
371 	}
372 
373 	/*
374 	 * Topspin/Cisco SRP targets will reject our login unless we
375 	 * zero out the first 8 bytes of our initiator port ID and set
376 	 * the second 8 bytes to the local node GUID.
377 	 */
378 	if (srp_target_is_topspin(target)) {
379 		printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
380 		       "activated for target GUID %016llx\n",
381 		       (unsigned long long) be64_to_cpu(target->ioc_guid));
382 		memset(req->priv.initiator_port_id, 0, 8);
383 		memcpy(req->priv.initiator_port_id + 8,
384 		       &target->srp_host->dev->dev->node_guid, 8);
385 	}
386 
387 	status = ib_send_cm_req(target->cm_id, &req->param);
388 
389 	kfree(req);
390 
391 	return status;
392 }
393 
394 static void srp_disconnect_target(struct srp_target_port *target)
395 {
396 	/* XXX should send SRP_I_LOGOUT request */
397 
398 	init_completion(&target->done);
399 	if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
400 		printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
401 		return;
402 	}
403 	wait_for_completion(&target->done);
404 }
405 
406 static void srp_remove_work(struct work_struct *work)
407 {
408 	struct srp_target_port *target =
409 		container_of(work, struct srp_target_port, work);
410 
411 	spin_lock_irq(target->scsi_host->host_lock);
412 	if (target->state != SRP_TARGET_DEAD) {
413 		spin_unlock_irq(target->scsi_host->host_lock);
414 		return;
415 	}
416 	target->state = SRP_TARGET_REMOVED;
417 	spin_unlock_irq(target->scsi_host->host_lock);
418 
419 	spin_lock(&target->srp_host->target_lock);
420 	list_del(&target->list);
421 	spin_unlock(&target->srp_host->target_lock);
422 
423 	scsi_remove_host(target->scsi_host);
424 	ib_destroy_cm_id(target->cm_id);
425 	srp_free_target_ib(target);
426 	scsi_host_put(target->scsi_host);
427 }
428 
429 static int srp_connect_target(struct srp_target_port *target)
430 {
431 	int ret;
432 
433 	ret = srp_lookup_path(target);
434 	if (ret)
435 		return ret;
436 
437 	while (1) {
438 		init_completion(&target->done);
439 		ret = srp_send_req(target);
440 		if (ret)
441 			return ret;
442 		wait_for_completion(&target->done);
443 
444 		/*
445 		 * The CM event handling code will set status to
446 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
447 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
448 		 * redirect REJ back.
449 		 */
450 		switch (target->status) {
451 		case 0:
452 			return 0;
453 
454 		case SRP_PORT_REDIRECT:
455 			ret = srp_lookup_path(target);
456 			if (ret)
457 				return ret;
458 			break;
459 
460 		case SRP_DLID_REDIRECT:
461 			break;
462 
463 		default:
464 			return target->status;
465 		}
466 	}
467 }
468 
469 static void srp_unmap_data(struct scsi_cmnd *scmnd,
470 			   struct srp_target_port *target,
471 			   struct srp_request *req)
472 {
473 	if (!scsi_sglist(scmnd) ||
474 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
475 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
476 		return;
477 
478 	if (req->fmr) {
479 		ib_fmr_pool_unmap(req->fmr);
480 		req->fmr = NULL;
481 	}
482 
483 	ib_dma_unmap_sg(target->srp_host->dev->dev, scsi_sglist(scmnd),
484 			scsi_sg_count(scmnd), scmnd->sc_data_direction);
485 }
486 
487 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
488 {
489 	srp_unmap_data(req->scmnd, target, req);
490 	list_move_tail(&req->list, &target->free_reqs);
491 }
492 
493 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
494 {
495 	req->scmnd->result = DID_RESET << 16;
496 	req->scmnd->scsi_done(req->scmnd);
497 	srp_remove_req(target, req);
498 }
499 
500 static int srp_reconnect_target(struct srp_target_port *target)
501 {
502 	struct ib_cm_id *new_cm_id;
503 	struct ib_qp_attr qp_attr;
504 	struct srp_request *req, *tmp;
505 	struct ib_wc wc;
506 	int ret;
507 
508 	spin_lock_irq(target->scsi_host->host_lock);
509 	if (target->state != SRP_TARGET_LIVE) {
510 		spin_unlock_irq(target->scsi_host->host_lock);
511 		return -EAGAIN;
512 	}
513 	target->state = SRP_TARGET_CONNECTING;
514 	spin_unlock_irq(target->scsi_host->host_lock);
515 
516 	srp_disconnect_target(target);
517 	/*
518 	 * Now get a new local CM ID so that we avoid confusing the
519 	 * target in case things are really fouled up.
520 	 */
521 	new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
522 				    srp_cm_handler, target);
523 	if (IS_ERR(new_cm_id)) {
524 		ret = PTR_ERR(new_cm_id);
525 		goto err;
526 	}
527 	ib_destroy_cm_id(target->cm_id);
528 	target->cm_id = new_cm_id;
529 
530 	qp_attr.qp_state = IB_QPS_RESET;
531 	ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
532 	if (ret)
533 		goto err;
534 
535 	ret = srp_init_qp(target, target->qp);
536 	if (ret)
537 		goto err;
538 
539 	while (ib_poll_cq(target->cq, 1, &wc) > 0)
540 		; /* nothing */
541 
542 	spin_lock_irq(target->scsi_host->host_lock);
543 	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
544 		srp_reset_req(target, req);
545 	spin_unlock_irq(target->scsi_host->host_lock);
546 
547 	target->rx_head	 = 0;
548 	target->tx_head	 = 0;
549 	target->tx_tail  = 0;
550 
551 	target->qp_in_error = 0;
552 	ret = srp_connect_target(target);
553 	if (ret)
554 		goto err;
555 
556 	spin_lock_irq(target->scsi_host->host_lock);
557 	if (target->state == SRP_TARGET_CONNECTING) {
558 		ret = 0;
559 		target->state = SRP_TARGET_LIVE;
560 	} else
561 		ret = -EAGAIN;
562 	spin_unlock_irq(target->scsi_host->host_lock);
563 
564 	return ret;
565 
566 err:
567 	printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);
568 
569 	/*
570 	 * We couldn't reconnect, so kill our target port off.
571 	 * However, we have to defer the real removal because we might
572 	 * be in the context of the SCSI error handler now, which
573 	 * would deadlock if we call scsi_remove_host().
574 	 */
575 	spin_lock_irq(target->scsi_host->host_lock);
576 	if (target->state == SRP_TARGET_CONNECTING) {
577 		target->state = SRP_TARGET_DEAD;
578 		INIT_WORK(&target->work, srp_remove_work);
579 		schedule_work(&target->work);
580 	}
581 	spin_unlock_irq(target->scsi_host->host_lock);
582 
583 	return ret;
584 }
585 
586 static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
587 		       int sg_cnt, struct srp_request *req,
588 		       struct srp_direct_buf *buf)
589 {
590 	u64 io_addr = 0;
591 	u64 *dma_pages;
592 	u32 len;
593 	int page_cnt;
594 	int i, j;
595 	int ret;
596 	struct srp_device *dev = target->srp_host->dev;
597 	struct ib_device *ibdev = dev->dev;
598 	struct scatterlist *sg;
599 
600 	if (!dev->fmr_pool)
601 		return -ENODEV;
602 
603 	if (srp_target_is_mellanox(target) &&
604 	    (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask))
605 		return -EINVAL;
606 
607 	len = page_cnt = 0;
608 	scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
609 		unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
610 
611 		if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) {
612 			if (i > 0)
613 				return -EINVAL;
614 			else
615 				++page_cnt;
616 		}
617 		if ((ib_sg_dma_address(ibdev, sg) + dma_len) &
618 		    ~dev->fmr_page_mask) {
619 			if (i < sg_cnt - 1)
620 				return -EINVAL;
621 			else
622 				++page_cnt;
623 		}
624 
625 		len += dma_len;
626 	}
627 
628 	page_cnt += len >> dev->fmr_page_shift;
629 	if (page_cnt > SRP_FMR_SIZE)
630 		return -ENOMEM;
631 
632 	dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
633 	if (!dma_pages)
634 		return -ENOMEM;
635 
636 	page_cnt = 0;
637 	scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
638 		unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
639 
640 		for (j = 0; j < dma_len; j += dev->fmr_page_size)
641 			dma_pages[page_cnt++] =
642 				(ib_sg_dma_address(ibdev, sg) &
643 				 dev->fmr_page_mask) + j;
644 	}
645 
646 	req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
647 					dma_pages, page_cnt, io_addr);
648 	if (IS_ERR(req->fmr)) {
649 		ret = PTR_ERR(req->fmr);
650 		req->fmr = NULL;
651 		goto out;
652 	}
653 
654 	buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
655 			       ~dev->fmr_page_mask);
656 	buf->key = cpu_to_be32(req->fmr->fmr->rkey);
657 	buf->len = cpu_to_be32(len);
658 
659 	ret = 0;
660 
661 out:
662 	kfree(dma_pages);
663 
664 	return ret;
665 }
666 
667 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
668 			struct srp_request *req)
669 {
670 	struct scatterlist *scat;
671 	struct srp_cmd *cmd = req->cmd->buf;
672 	int len, nents, count;
673 	u8 fmt = SRP_DATA_DESC_DIRECT;
674 	struct srp_device *dev;
675 	struct ib_device *ibdev;
676 
677 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
678 		return sizeof (struct srp_cmd);
679 
680 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
681 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
682 		printk(KERN_WARNING PFX "Unhandled data direction %d\n",
683 		       scmnd->sc_data_direction);
684 		return -EINVAL;
685 	}
686 
687 	nents = scsi_sg_count(scmnd);
688 	scat  = scsi_sglist(scmnd);
689 
690 	dev = target->srp_host->dev;
691 	ibdev = dev->dev;
692 
693 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
694 
695 	fmt = SRP_DATA_DESC_DIRECT;
696 	len = sizeof (struct srp_cmd) +	sizeof (struct srp_direct_buf);
697 
698 	if (count == 1) {
699 		/*
700 		 * The midlayer only generated a single gather/scatter
701 		 * entry, or DMA mapping coalesced everything to a
702 		 * single entry.  So a direct descriptor along with
703 		 * the DMA MR suffices.
704 		 */
705 		struct srp_direct_buf *buf = (void *) cmd->add_data;
706 
707 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
708 		buf->key = cpu_to_be32(dev->mr->rkey);
709 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
710 	} else if (srp_map_fmr(target, scat, count, req,
711 			       (void *) cmd->add_data)) {
712 		/*
713 		 * FMR mapping failed, and the scatterlist has more
714 		 * than one entry.  Generate an indirect memory
715 		 * descriptor.
716 		 */
717 		struct srp_indirect_buf *buf = (void *) cmd->add_data;
718 		struct scatterlist *sg;
719 		u32 datalen = 0;
720 		int i;
721 
722 		fmt = SRP_DATA_DESC_INDIRECT;
723 		len = sizeof (struct srp_cmd) +
724 			sizeof (struct srp_indirect_buf) +
725 			count * sizeof (struct srp_direct_buf);
726 
727 		scsi_for_each_sg(scmnd, sg, count, i) {
728 			unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
729 
730 			buf->desc_list[i].va  =
731 				cpu_to_be64(ib_sg_dma_address(ibdev, sg));
732 			buf->desc_list[i].key =
733 				cpu_to_be32(dev->mr->rkey);
734 			buf->desc_list[i].len = cpu_to_be32(dma_len);
735 			datalen += dma_len;
736 		}
737 
738 		if (scmnd->sc_data_direction == DMA_TO_DEVICE)
739 			cmd->data_out_desc_cnt = count;
740 		else
741 			cmd->data_in_desc_cnt = count;
742 
743 		buf->table_desc.va  =
744 			cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
745 		buf->table_desc.key =
746 			cpu_to_be32(target->srp_host->dev->mr->rkey);
747 		buf->table_desc.len =
748 			cpu_to_be32(count * sizeof (struct srp_direct_buf));
749 
750 		buf->len = cpu_to_be32(datalen);
751 	}
752 
753 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
754 		cmd->buf_fmt = fmt << 4;
755 	else
756 		cmd->buf_fmt = fmt;
757 
758 	return len;
759 }
760 
761 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
762 {
763 	struct srp_request *req;
764 	struct scsi_cmnd *scmnd;
765 	unsigned long flags;
766 	s32 delta;
767 
768 	delta = (s32) be32_to_cpu(rsp->req_lim_delta);
769 
770 	spin_lock_irqsave(target->scsi_host->host_lock, flags);
771 
772 	target->req_lim += delta;
773 
774 	req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
775 
776 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
777 		if (be32_to_cpu(rsp->resp_data_len) < 4)
778 			req->tsk_status = -1;
779 		else
780 			req->tsk_status = rsp->data[3];
781 		complete(&req->done);
782 	} else {
783 		scmnd = req->scmnd;
784 		if (!scmnd)
785 			printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
786 			       (unsigned long long) rsp->tag);
787 		scmnd->result = rsp->status;
788 
789 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
790 			memcpy(scmnd->sense_buffer, rsp->data +
791 			       be32_to_cpu(rsp->resp_data_len),
792 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
793 				     SCSI_SENSE_BUFFERSIZE));
794 		}
795 
796 		if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
797 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
798 		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
799 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
800 
801 		if (!req->tsk_mgmt) {
802 			scmnd->host_scribble = (void *) -1L;
803 			scmnd->scsi_done(scmnd);
804 
805 			srp_remove_req(target, req);
806 		} else
807 			req->cmd_done = 1;
808 	}
809 
810 	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
811 }
812 
813 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
814 {
815 	struct ib_device *dev;
816 	struct srp_iu *iu;
817 	u8 opcode;
818 
819 	iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
820 
821 	dev = target->srp_host->dev->dev;
822 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
823 				   DMA_FROM_DEVICE);
824 
825 	opcode = *(u8 *) iu->buf;
826 
827 	if (0) {
828 		int i;
829 
830 		printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);
831 
832 		for (i = 0; i < wc->byte_len; ++i) {
833 			if (i % 8 == 0)
834 				printk(KERN_ERR "  [%02x] ", i);
835 			printk(" %02x", ((u8 *) iu->buf)[i]);
836 			if ((i + 1) % 8 == 0)
837 				printk("\n");
838 		}
839 
840 		if (wc->byte_len % 8)
841 			printk("\n");
842 	}
843 
844 	switch (opcode) {
845 	case SRP_RSP:
846 		srp_process_rsp(target, iu->buf);
847 		break;
848 
849 	case SRP_T_LOGOUT:
850 		/* XXX Handle target logout */
851 		printk(KERN_WARNING PFX "Got target logout request\n");
852 		break;
853 
854 	default:
855 		printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);
856 		break;
857 	}
858 
859 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
860 				      DMA_FROM_DEVICE);
861 }
862 
863 static void srp_completion(struct ib_cq *cq, void *target_ptr)
864 {
865 	struct srp_target_port *target = target_ptr;
866 	struct ib_wc wc;
867 
868 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
869 	while (ib_poll_cq(cq, 1, &wc) > 0) {
870 		if (wc.status) {
871 			printk(KERN_ERR PFX "failed %s status %d\n",
872 			       wc.wr_id & SRP_OP_RECV ? "receive" : "send",
873 			       wc.status);
874 			target->qp_in_error = 1;
875 			break;
876 		}
877 
878 		if (wc.wr_id & SRP_OP_RECV)
879 			srp_handle_recv(target, &wc);
880 		else
881 			++target->tx_tail;
882 	}
883 }
884 
885 static int __srp_post_recv(struct srp_target_port *target)
886 {
887 	struct srp_iu *iu;
888 	struct ib_sge list;
889 	struct ib_recv_wr wr, *bad_wr;
890 	unsigned int next;
891 	int ret;
892 
893 	next 	 = target->rx_head & (SRP_RQ_SIZE - 1);
894 	wr.wr_id = next | SRP_OP_RECV;
895 	iu 	 = target->rx_ring[next];
896 
897 	list.addr   = iu->dma;
898 	list.length = iu->size;
899 	list.lkey   = target->srp_host->dev->mr->lkey;
900 
901 	wr.next     = NULL;
902 	wr.sg_list  = &list;
903 	wr.num_sge  = 1;
904 
905 	ret = ib_post_recv(target->qp, &wr, &bad_wr);
906 	if (!ret)
907 		++target->rx_head;
908 
909 	return ret;
910 }
911 
912 static int srp_post_recv(struct srp_target_port *target)
913 {
914 	unsigned long flags;
915 	int ret;
916 
917 	spin_lock_irqsave(target->scsi_host->host_lock, flags);
918 	ret = __srp_post_recv(target);
919 	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
920 
921 	return ret;
922 }
923 
924 /*
925  * Must be called with target->scsi_host->host_lock held to protect
926  * req_lim and tx_head.  Lock cannot be dropped between call here and
927  * call to __srp_post_send().
928  */
929 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
930 {
931 	if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
932 		return NULL;
933 
934 	if (unlikely(target->req_lim < 1))
935 		++target->zero_req_lim;
936 
937 	return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
938 }
939 
940 /*
941  * Must be called with target->scsi_host->host_lock held to protect
942  * req_lim and tx_head.
943  */
944 static int __srp_post_send(struct srp_target_port *target,
945 			   struct srp_iu *iu, int len)
946 {
947 	struct ib_sge list;
948 	struct ib_send_wr wr, *bad_wr;
949 	int ret = 0;
950 
951 	list.addr   = iu->dma;
952 	list.length = len;
953 	list.lkey   = target->srp_host->dev->mr->lkey;
954 
955 	wr.next       = NULL;
956 	wr.wr_id      = target->tx_head & SRP_SQ_SIZE;
957 	wr.sg_list    = &list;
958 	wr.num_sge    = 1;
959 	wr.opcode     = IB_WR_SEND;
960 	wr.send_flags = IB_SEND_SIGNALED;
961 
962 	ret = ib_post_send(target->qp, &wr, &bad_wr);
963 
964 	if (!ret) {
965 		++target->tx_head;
966 		--target->req_lim;
967 	}
968 
969 	return ret;
970 }
971 
972 static int srp_queuecommand(struct scsi_cmnd *scmnd,
973 			    void (*done)(struct scsi_cmnd *))
974 {
975 	struct srp_target_port *target = host_to_target(scmnd->device->host);
976 	struct srp_request *req;
977 	struct srp_iu *iu;
978 	struct srp_cmd *cmd;
979 	struct ib_device *dev;
980 	int len;
981 
982 	if (target->state == SRP_TARGET_CONNECTING)
983 		goto err;
984 
985 	if (target->state == SRP_TARGET_DEAD ||
986 	    target->state == SRP_TARGET_REMOVED) {
987 		scmnd->result = DID_BAD_TARGET << 16;
988 		done(scmnd);
989 		return 0;
990 	}
991 
992 	iu = __srp_get_tx_iu(target);
993 	if (!iu)
994 		goto err;
995 
996 	dev = target->srp_host->dev->dev;
997 	ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
998 				   DMA_TO_DEVICE);
999 
1000 	req = list_entry(target->free_reqs.next, struct srp_request, list);
1001 
1002 	scmnd->scsi_done     = done;
1003 	scmnd->result        = 0;
1004 	scmnd->host_scribble = (void *) (long) req->index;
1005 
1006 	cmd = iu->buf;
1007 	memset(cmd, 0, sizeof *cmd);
1008 
1009 	cmd->opcode = SRP_CMD;
1010 	cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
1011 	cmd->tag    = req->index;
1012 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1013 
1014 	req->scmnd    = scmnd;
1015 	req->cmd      = iu;
1016 	req->cmd_done = 0;
1017 	req->tsk_mgmt = NULL;
1018 
1019 	len = srp_map_data(scmnd, target, req);
1020 	if (len < 0) {
1021 		printk(KERN_ERR PFX "Failed to map data\n");
1022 		goto err;
1023 	}
1024 
1025 	if (__srp_post_recv(target)) {
1026 		printk(KERN_ERR PFX "Recv failed\n");
1027 		goto err_unmap;
1028 	}
1029 
1030 	ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1031 				      DMA_TO_DEVICE);
1032 
1033 	if (__srp_post_send(target, iu, len)) {
1034 		printk(KERN_ERR PFX "Send failed\n");
1035 		goto err_unmap;
1036 	}
1037 
1038 	list_move_tail(&req->list, &target->req_queue);
1039 
1040 	return 0;
1041 
1042 err_unmap:
1043 	srp_unmap_data(scmnd, target, req);
1044 
1045 err:
1046 	return SCSI_MLQUEUE_HOST_BUSY;
1047 }
1048 
1049 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1050 {
1051 	int i;
1052 
1053 	for (i = 0; i < SRP_RQ_SIZE; ++i) {
1054 		target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1055 						  target->max_ti_iu_len,
1056 						  GFP_KERNEL, DMA_FROM_DEVICE);
1057 		if (!target->rx_ring[i])
1058 			goto err;
1059 	}
1060 
1061 	for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1062 		target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1063 						  srp_max_iu_len,
1064 						  GFP_KERNEL, DMA_TO_DEVICE);
1065 		if (!target->tx_ring[i])
1066 			goto err;
1067 	}
1068 
1069 	return 0;
1070 
1071 err:
1072 	for (i = 0; i < SRP_RQ_SIZE; ++i) {
1073 		srp_free_iu(target->srp_host, target->rx_ring[i]);
1074 		target->rx_ring[i] = NULL;
1075 	}
1076 
1077 	for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1078 		srp_free_iu(target->srp_host, target->tx_ring[i]);
1079 		target->tx_ring[i] = NULL;
1080 	}
1081 
1082 	return -ENOMEM;
1083 }
1084 
1085 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1086 			       struct ib_cm_event *event,
1087 			       struct srp_target_port *target)
1088 {
1089 	struct ib_class_port_info *cpi;
1090 	int opcode;
1091 
1092 	switch (event->param.rej_rcvd.reason) {
1093 	case IB_CM_REJ_PORT_CM_REDIRECT:
1094 		cpi = event->param.rej_rcvd.ari;
1095 		target->path.dlid = cpi->redirect_lid;
1096 		target->path.pkey = cpi->redirect_pkey;
1097 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1098 		memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1099 
1100 		target->status = target->path.dlid ?
1101 			SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1102 		break;
1103 
1104 	case IB_CM_REJ_PORT_REDIRECT:
1105 		if (srp_target_is_topspin(target)) {
1106 			/*
1107 			 * Topspin/Cisco SRP gateways incorrectly send
1108 			 * reject reason code 25 when they mean 24
1109 			 * (port redirect).
1110 			 */
1111 			memcpy(target->path.dgid.raw,
1112 			       event->param.rej_rcvd.ari, 16);
1113 
1114 			printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1115 			       (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1116 			       (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1117 
1118 			target->status = SRP_PORT_REDIRECT;
1119 		} else {
1120 			printk(KERN_WARNING "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1121 			target->status = -ECONNRESET;
1122 		}
1123 		break;
1124 
1125 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1126 		printk(KERN_WARNING "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1127 		target->status = -ECONNRESET;
1128 		break;
1129 
1130 	case IB_CM_REJ_CONSUMER_DEFINED:
1131 		opcode = *(u8 *) event->private_data;
1132 		if (opcode == SRP_LOGIN_REJ) {
1133 			struct srp_login_rej *rej = event->private_data;
1134 			u32 reason = be32_to_cpu(rej->reason);
1135 
1136 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1137 				printk(KERN_WARNING PFX
1138 				       "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1139 			else
1140 				printk(KERN_WARNING PFX
1141 				       "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1142 		} else
1143 			printk(KERN_WARNING "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1144 			       " opcode 0x%02x\n", opcode);
1145 		target->status = -ECONNRESET;
1146 		break;
1147 
1148 	default:
1149 		printk(KERN_WARNING "  REJ reason 0x%x\n",
1150 		       event->param.rej_rcvd.reason);
1151 		target->status = -ECONNRESET;
1152 	}
1153 }
1154 
1155 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1156 {
1157 	struct srp_target_port *target = cm_id->context;
1158 	struct ib_qp_attr *qp_attr = NULL;
1159 	int attr_mask = 0;
1160 	int comp = 0;
1161 	int opcode = 0;
1162 
1163 	switch (event->event) {
1164 	case IB_CM_REQ_ERROR:
1165 		printk(KERN_DEBUG PFX "Sending CM REQ failed\n");
1166 		comp = 1;
1167 		target->status = -ECONNRESET;
1168 		break;
1169 
1170 	case IB_CM_REP_RECEIVED:
1171 		comp = 1;
1172 		opcode = *(u8 *) event->private_data;
1173 
1174 		if (opcode == SRP_LOGIN_RSP) {
1175 			struct srp_login_rsp *rsp = event->private_data;
1176 
1177 			target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1178 			target->req_lim       = be32_to_cpu(rsp->req_lim_delta);
1179 
1180 			target->scsi_host->can_queue = min(target->req_lim,
1181 							   target->scsi_host->can_queue);
1182 		} else {
1183 			printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);
1184 			target->status = -ECONNRESET;
1185 			break;
1186 		}
1187 
1188 		if (!target->rx_ring[0]) {
1189 			target->status = srp_alloc_iu_bufs(target);
1190 			if (target->status)
1191 				break;
1192 		}
1193 
1194 		qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1195 		if (!qp_attr) {
1196 			target->status = -ENOMEM;
1197 			break;
1198 		}
1199 
1200 		qp_attr->qp_state = IB_QPS_RTR;
1201 		target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1202 		if (target->status)
1203 			break;
1204 
1205 		target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1206 		if (target->status)
1207 			break;
1208 
1209 		target->status = srp_post_recv(target);
1210 		if (target->status)
1211 			break;
1212 
1213 		qp_attr->qp_state = IB_QPS_RTS;
1214 		target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1215 		if (target->status)
1216 			break;
1217 
1218 		target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1219 		if (target->status)
1220 			break;
1221 
1222 		target->status = ib_send_cm_rtu(cm_id, NULL, 0);
1223 		if (target->status)
1224 			break;
1225 
1226 		break;
1227 
1228 	case IB_CM_REJ_RECEIVED:
1229 		printk(KERN_DEBUG PFX "REJ received\n");
1230 		comp = 1;
1231 
1232 		srp_cm_rej_handler(cm_id, event, target);
1233 		break;
1234 
1235 	case IB_CM_DREQ_RECEIVED:
1236 		printk(KERN_WARNING PFX "DREQ received - connection closed\n");
1237 		if (ib_send_cm_drep(cm_id, NULL, 0))
1238 			printk(KERN_ERR PFX "Sending CM DREP failed\n");
1239 		break;
1240 
1241 	case IB_CM_TIMEWAIT_EXIT:
1242 		printk(KERN_ERR PFX "connection closed\n");
1243 
1244 		comp = 1;
1245 		target->status = 0;
1246 		break;
1247 
1248 	case IB_CM_MRA_RECEIVED:
1249 	case IB_CM_DREQ_ERROR:
1250 	case IB_CM_DREP_RECEIVED:
1251 		break;
1252 
1253 	default:
1254 		printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);
1255 		break;
1256 	}
1257 
1258 	if (comp)
1259 		complete(&target->done);
1260 
1261 	kfree(qp_attr);
1262 
1263 	return 0;
1264 }
1265 
1266 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1267 			     struct srp_request *req, u8 func)
1268 {
1269 	struct srp_iu *iu;
1270 	struct srp_tsk_mgmt *tsk_mgmt;
1271 
1272 	spin_lock_irq(target->scsi_host->host_lock);
1273 
1274 	if (target->state == SRP_TARGET_DEAD ||
1275 	    target->state == SRP_TARGET_REMOVED) {
1276 		req->scmnd->result = DID_BAD_TARGET << 16;
1277 		goto out;
1278 	}
1279 
1280 	init_completion(&req->done);
1281 
1282 	iu = __srp_get_tx_iu(target);
1283 	if (!iu)
1284 		goto out;
1285 
1286 	tsk_mgmt = iu->buf;
1287 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1288 
1289 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
1290 	tsk_mgmt->lun 		= cpu_to_be64((u64) req->scmnd->device->lun << 48);
1291 	tsk_mgmt->tag 		= req->index | SRP_TAG_TSK_MGMT;
1292 	tsk_mgmt->tsk_mgmt_func = func;
1293 	tsk_mgmt->task_tag 	= req->index;
1294 
1295 	if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1296 		goto out;
1297 
1298 	req->tsk_mgmt = iu;
1299 
1300 	spin_unlock_irq(target->scsi_host->host_lock);
1301 
1302 	if (!wait_for_completion_timeout(&req->done,
1303 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1304 		return -1;
1305 
1306 	return 0;
1307 
1308 out:
1309 	spin_unlock_irq(target->scsi_host->host_lock);
1310 	return -1;
1311 }
1312 
1313 static int srp_find_req(struct srp_target_port *target,
1314 			struct scsi_cmnd *scmnd,
1315 			struct srp_request **req)
1316 {
1317 	if (scmnd->host_scribble == (void *) -1L)
1318 		return -1;
1319 
1320 	*req = &target->req_ring[(long) scmnd->host_scribble];
1321 
1322 	return 0;
1323 }
1324 
1325 static int srp_abort(struct scsi_cmnd *scmnd)
1326 {
1327 	struct srp_target_port *target = host_to_target(scmnd->device->host);
1328 	struct srp_request *req;
1329 	int ret = SUCCESS;
1330 
1331 	printk(KERN_ERR "SRP abort called\n");
1332 
1333 	if (target->qp_in_error)
1334 		return FAILED;
1335 	if (srp_find_req(target, scmnd, &req))
1336 		return FAILED;
1337 	if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1338 		return FAILED;
1339 
1340 	spin_lock_irq(target->scsi_host->host_lock);
1341 
1342 	if (req->cmd_done) {
1343 		srp_remove_req(target, req);
1344 		scmnd->scsi_done(scmnd);
1345 	} else if (!req->tsk_status) {
1346 		srp_remove_req(target, req);
1347 		scmnd->result = DID_ABORT << 16;
1348 	} else
1349 		ret = FAILED;
1350 
1351 	spin_unlock_irq(target->scsi_host->host_lock);
1352 
1353 	return ret;
1354 }
1355 
1356 static int srp_reset_device(struct scsi_cmnd *scmnd)
1357 {
1358 	struct srp_target_port *target = host_to_target(scmnd->device->host);
1359 	struct srp_request *req, *tmp;
1360 
1361 	printk(KERN_ERR "SRP reset_device called\n");
1362 
1363 	if (target->qp_in_error)
1364 		return FAILED;
1365 	if (srp_find_req(target, scmnd, &req))
1366 		return FAILED;
1367 	if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1368 		return FAILED;
1369 	if (req->tsk_status)
1370 		return FAILED;
1371 
1372 	spin_lock_irq(target->scsi_host->host_lock);
1373 
1374 	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1375 		if (req->scmnd->device == scmnd->device)
1376 			srp_reset_req(target, req);
1377 
1378 	spin_unlock_irq(target->scsi_host->host_lock);
1379 
1380 	return SUCCESS;
1381 }
1382 
1383 static int srp_reset_host(struct scsi_cmnd *scmnd)
1384 {
1385 	struct srp_target_port *target = host_to_target(scmnd->device->host);
1386 	int ret = FAILED;
1387 
1388 	printk(KERN_ERR PFX "SRP reset_host called\n");
1389 
1390 	if (!srp_reconnect_target(target))
1391 		ret = SUCCESS;
1392 
1393 	return ret;
1394 }
1395 
1396 static ssize_t show_id_ext(struct class_device *cdev, char *buf)
1397 {
1398 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1399 
1400 	if (target->state == SRP_TARGET_DEAD ||
1401 	    target->state == SRP_TARGET_REMOVED)
1402 		return -ENODEV;
1403 
1404 	return sprintf(buf, "0x%016llx\n",
1405 		       (unsigned long long) be64_to_cpu(target->id_ext));
1406 }
1407 
1408 static ssize_t show_ioc_guid(struct class_device *cdev, char *buf)
1409 {
1410 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1411 
1412 	if (target->state == SRP_TARGET_DEAD ||
1413 	    target->state == SRP_TARGET_REMOVED)
1414 		return -ENODEV;
1415 
1416 	return sprintf(buf, "0x%016llx\n",
1417 		       (unsigned long long) be64_to_cpu(target->ioc_guid));
1418 }
1419 
1420 static ssize_t show_service_id(struct class_device *cdev, char *buf)
1421 {
1422 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1423 
1424 	if (target->state == SRP_TARGET_DEAD ||
1425 	    target->state == SRP_TARGET_REMOVED)
1426 		return -ENODEV;
1427 
1428 	return sprintf(buf, "0x%016llx\n",
1429 		       (unsigned long long) be64_to_cpu(target->service_id));
1430 }
1431 
1432 static ssize_t show_pkey(struct class_device *cdev, char *buf)
1433 {
1434 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1435 
1436 	if (target->state == SRP_TARGET_DEAD ||
1437 	    target->state == SRP_TARGET_REMOVED)
1438 		return -ENODEV;
1439 
1440 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1441 }
1442 
1443 static ssize_t show_dgid(struct class_device *cdev, char *buf)
1444 {
1445 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1446 
1447 	if (target->state == SRP_TARGET_DEAD ||
1448 	    target->state == SRP_TARGET_REMOVED)
1449 		return -ENODEV;
1450 
1451 	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1452 		       be16_to_cpu(((__be16 *) target->path.dgid.raw)[0]),
1453 		       be16_to_cpu(((__be16 *) target->path.dgid.raw)[1]),
1454 		       be16_to_cpu(((__be16 *) target->path.dgid.raw)[2]),
1455 		       be16_to_cpu(((__be16 *) target->path.dgid.raw)[3]),
1456 		       be16_to_cpu(((__be16 *) target->path.dgid.raw)[4]),
1457 		       be16_to_cpu(((__be16 *) target->path.dgid.raw)[5]),
1458 		       be16_to_cpu(((__be16 *) target->path.dgid.raw)[6]),
1459 		       be16_to_cpu(((__be16 *) target->path.dgid.raw)[7]));
1460 }
1461 
1462 static ssize_t show_orig_dgid(struct class_device *cdev, char *buf)
1463 {
1464 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1465 
1466 	if (target->state == SRP_TARGET_DEAD ||
1467 	    target->state == SRP_TARGET_REMOVED)
1468 		return -ENODEV;
1469 
1470 	return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1471 		       be16_to_cpu(target->orig_dgid[0]),
1472 		       be16_to_cpu(target->orig_dgid[1]),
1473 		       be16_to_cpu(target->orig_dgid[2]),
1474 		       be16_to_cpu(target->orig_dgid[3]),
1475 		       be16_to_cpu(target->orig_dgid[4]),
1476 		       be16_to_cpu(target->orig_dgid[5]),
1477 		       be16_to_cpu(target->orig_dgid[6]),
1478 		       be16_to_cpu(target->orig_dgid[7]));
1479 }
1480 
1481 static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
1482 {
1483 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1484 
1485 	if (target->state == SRP_TARGET_DEAD ||
1486 	    target->state == SRP_TARGET_REMOVED)
1487 		return -ENODEV;
1488 
1489 	return sprintf(buf, "%d\n", target->zero_req_lim);
1490 }
1491 
1492 static ssize_t show_local_ib_port(struct class_device *cdev, char *buf)
1493 {
1494 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1495 
1496 	return sprintf(buf, "%d\n", target->srp_host->port);
1497 }
1498 
1499 static ssize_t show_local_ib_device(struct class_device *cdev, char *buf)
1500 {
1501 	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1502 
1503 	return sprintf(buf, "%s\n", target->srp_host->dev->dev->name);
1504 }
1505 
1506 static CLASS_DEVICE_ATTR(id_ext,	  S_IRUGO, show_id_ext,		 NULL);
1507 static CLASS_DEVICE_ATTR(ioc_guid,	  S_IRUGO, show_ioc_guid,	 NULL);
1508 static CLASS_DEVICE_ATTR(service_id,	  S_IRUGO, show_service_id,	 NULL);
1509 static CLASS_DEVICE_ATTR(pkey,		  S_IRUGO, show_pkey,		 NULL);
1510 static CLASS_DEVICE_ATTR(dgid,		  S_IRUGO, show_dgid,		 NULL);
1511 static CLASS_DEVICE_ATTR(orig_dgid,	  S_IRUGO, show_orig_dgid,	 NULL);
1512 static CLASS_DEVICE_ATTR(zero_req_lim,	  S_IRUGO, show_zero_req_lim,	 NULL);
1513 static CLASS_DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,	 NULL);
1514 static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1515 
1516 static struct class_device_attribute *srp_host_attrs[] = {
1517 	&class_device_attr_id_ext,
1518 	&class_device_attr_ioc_guid,
1519 	&class_device_attr_service_id,
1520 	&class_device_attr_pkey,
1521 	&class_device_attr_dgid,
1522 	&class_device_attr_orig_dgid,
1523 	&class_device_attr_zero_req_lim,
1524 	&class_device_attr_local_ib_port,
1525 	&class_device_attr_local_ib_device,
1526 	NULL
1527 };
1528 
1529 static struct scsi_host_template srp_template = {
1530 	.module				= THIS_MODULE,
1531 	.name				= "InfiniBand SRP initiator",
1532 	.proc_name			= DRV_NAME,
1533 	.info				= srp_target_info,
1534 	.queuecommand			= srp_queuecommand,
1535 	.eh_abort_handler		= srp_abort,
1536 	.eh_device_reset_handler	= srp_reset_device,
1537 	.eh_host_reset_handler		= srp_reset_host,
1538 	.can_queue			= SRP_SQ_SIZE,
1539 	.this_id			= -1,
1540 	.cmd_per_lun			= SRP_SQ_SIZE,
1541 	.use_clustering			= ENABLE_CLUSTERING,
1542 	.shost_attrs			= srp_host_attrs
1543 };
1544 
1545 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1546 {
1547 	sprintf(target->target_name, "SRP.T10:%016llX",
1548 		 (unsigned long long) be64_to_cpu(target->id_ext));
1549 
1550 	if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device))
1551 		return -ENODEV;
1552 
1553 	spin_lock(&host->target_lock);
1554 	list_add_tail(&target->list, &host->target_list);
1555 	spin_unlock(&host->target_lock);
1556 
1557 	target->state = SRP_TARGET_LIVE;
1558 
1559 	scsi_scan_target(&target->scsi_host->shost_gendev,
1560 			 0, target->scsi_id, SCAN_WILD_CARD, 0);
1561 
1562 	return 0;
1563 }
1564 
1565 static void srp_release_class_dev(struct class_device *class_dev)
1566 {
1567 	struct srp_host *host =
1568 		container_of(class_dev, struct srp_host, class_dev);
1569 
1570 	complete(&host->released);
1571 }
1572 
1573 static struct class srp_class = {
1574 	.name    = "infiniband_srp",
1575 	.release = srp_release_class_dev
1576 };
1577 
1578 /*
1579  * Target ports are added by writing
1580  *
1581  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1582  *     pkey=<P_Key>,service_id=<service ID>
1583  *
1584  * to the add_target sysfs attribute.
1585  */
1586 enum {
1587 	SRP_OPT_ERR		= 0,
1588 	SRP_OPT_ID_EXT		= 1 << 0,
1589 	SRP_OPT_IOC_GUID	= 1 << 1,
1590 	SRP_OPT_DGID		= 1 << 2,
1591 	SRP_OPT_PKEY		= 1 << 3,
1592 	SRP_OPT_SERVICE_ID	= 1 << 4,
1593 	SRP_OPT_MAX_SECT	= 1 << 5,
1594 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
1595 	SRP_OPT_IO_CLASS	= 1 << 7,
1596 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
1597 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
1598 				   SRP_OPT_IOC_GUID	|
1599 				   SRP_OPT_DGID		|
1600 				   SRP_OPT_PKEY		|
1601 				   SRP_OPT_SERVICE_ID),
1602 };
1603 
1604 static match_table_t srp_opt_tokens = {
1605 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
1606 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
1607 	{ SRP_OPT_DGID,			"dgid=%s" 		},
1608 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
1609 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
1610 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
1611 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
1612 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
1613 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
1614 	{ SRP_OPT_ERR,			NULL 			}
1615 };
1616 
1617 static int srp_parse_options(const char *buf, struct srp_target_port *target)
1618 {
1619 	char *options, *sep_opt;
1620 	char *p;
1621 	char dgid[3];
1622 	substring_t args[MAX_OPT_ARGS];
1623 	int opt_mask = 0;
1624 	int token;
1625 	int ret = -EINVAL;
1626 	int i;
1627 
1628 	options = kstrdup(buf, GFP_KERNEL);
1629 	if (!options)
1630 		return -ENOMEM;
1631 
1632 	sep_opt = options;
1633 	while ((p = strsep(&sep_opt, ",")) != NULL) {
1634 		if (!*p)
1635 			continue;
1636 
1637 		token = match_token(p, srp_opt_tokens, args);
1638 		opt_mask |= token;
1639 
1640 		switch (token) {
1641 		case SRP_OPT_ID_EXT:
1642 			p = match_strdup(args);
1643 			if (!p) {
1644 				ret = -ENOMEM;
1645 				goto out;
1646 			}
1647 			target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1648 			kfree(p);
1649 			break;
1650 
1651 		case SRP_OPT_IOC_GUID:
1652 			p = match_strdup(args);
1653 			if (!p) {
1654 				ret = -ENOMEM;
1655 				goto out;
1656 			}
1657 			target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1658 			kfree(p);
1659 			break;
1660 
1661 		case SRP_OPT_DGID:
1662 			p = match_strdup(args);
1663 			if (!p) {
1664 				ret = -ENOMEM;
1665 				goto out;
1666 			}
1667 			if (strlen(p) != 32) {
1668 				printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
1669 				kfree(p);
1670 				goto out;
1671 			}
1672 
1673 			for (i = 0; i < 16; ++i) {
1674 				strlcpy(dgid, p + i * 2, 3);
1675 				target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1676 			}
1677 			kfree(p);
1678 			memcpy(target->orig_dgid, target->path.dgid.raw, 16);
1679 			break;
1680 
1681 		case SRP_OPT_PKEY:
1682 			if (match_hex(args, &token)) {
1683 				printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
1684 				goto out;
1685 			}
1686 			target->path.pkey = cpu_to_be16(token);
1687 			break;
1688 
1689 		case SRP_OPT_SERVICE_ID:
1690 			p = match_strdup(args);
1691 			if (!p) {
1692 				ret = -ENOMEM;
1693 				goto out;
1694 			}
1695 			target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
1696 			target->path.service_id = target->service_id;
1697 			kfree(p);
1698 			break;
1699 
1700 		case SRP_OPT_MAX_SECT:
1701 			if (match_int(args, &token)) {
1702 				printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
1703 				goto out;
1704 			}
1705 			target->scsi_host->max_sectors = token;
1706 			break;
1707 
1708 		case SRP_OPT_MAX_CMD_PER_LUN:
1709 			if (match_int(args, &token)) {
1710 				printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1711 				goto out;
1712 			}
1713 			target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
1714 			break;
1715 
1716 		case SRP_OPT_IO_CLASS:
1717 			if (match_hex(args, &token)) {
1718 				printk(KERN_WARNING PFX "bad  IO class parameter '%s' \n", p);
1719 				goto out;
1720 			}
1721 			if (token != SRP_REV10_IB_IO_CLASS &&
1722 			    token != SRP_REV16A_IB_IO_CLASS) {
1723 				printk(KERN_WARNING PFX "unknown IO class parameter value"
1724 				       " %x specified (use %x or %x).\n",
1725 				       token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
1726 				goto out;
1727 			}
1728 			target->io_class = token;
1729 			break;
1730 
1731 		case SRP_OPT_INITIATOR_EXT:
1732 			p = match_strdup(args);
1733 			if (!p) {
1734 				ret = -ENOMEM;
1735 				goto out;
1736 			}
1737 			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1738 			kfree(p);
1739 			break;
1740 
1741 		default:
1742 			printk(KERN_WARNING PFX "unknown parameter or missing value "
1743 			       "'%s' in target creation request\n", p);
1744 			goto out;
1745 		}
1746 	}
1747 
1748 	if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
1749 		ret = 0;
1750 	else
1751 		for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
1752 			if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
1753 			    !(srp_opt_tokens[i].token & opt_mask))
1754 				printk(KERN_WARNING PFX "target creation request is "
1755 				       "missing parameter '%s'\n",
1756 				       srp_opt_tokens[i].pattern);
1757 
1758 out:
1759 	kfree(options);
1760 	return ret;
1761 }
1762 
1763 static ssize_t srp_create_target(struct class_device *class_dev,
1764 				 const char *buf, size_t count)
1765 {
1766 	struct srp_host *host =
1767 		container_of(class_dev, struct srp_host, class_dev);
1768 	struct Scsi_Host *target_host;
1769 	struct srp_target_port *target;
1770 	int ret;
1771 	int i;
1772 
1773 	target_host = scsi_host_alloc(&srp_template,
1774 				      sizeof (struct srp_target_port));
1775 	if (!target_host)
1776 		return -ENOMEM;
1777 
1778 	target_host->max_lun     = SRP_MAX_LUN;
1779 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
1780 
1781 	target = host_to_target(target_host);
1782 
1783 	target->io_class   = SRP_REV16A_IB_IO_CLASS;
1784 	target->scsi_host  = target_host;
1785 	target->srp_host   = host;
1786 
1787 	INIT_LIST_HEAD(&target->free_reqs);
1788 	INIT_LIST_HEAD(&target->req_queue);
1789 	for (i = 0; i < SRP_SQ_SIZE; ++i) {
1790 		target->req_ring[i].index = i;
1791 		list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1792 	}
1793 
1794 	ret = srp_parse_options(buf, target);
1795 	if (ret)
1796 		goto err;
1797 
1798 	ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid);
1799 
1800 	printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
1801 	       "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1802 	       (unsigned long long) be64_to_cpu(target->id_ext),
1803 	       (unsigned long long) be64_to_cpu(target->ioc_guid),
1804 	       be16_to_cpu(target->path.pkey),
1805 	       (unsigned long long) be64_to_cpu(target->service_id),
1806 	       (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]),
1807 	       (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]),
1808 	       (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]),
1809 	       (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]),
1810 	       (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]),
1811 	       (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]),
1812 	       (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]),
1813 	       (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14]));
1814 
1815 	ret = srp_create_target_ib(target);
1816 	if (ret)
1817 		goto err;
1818 
1819 	target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
1820 	if (IS_ERR(target->cm_id)) {
1821 		ret = PTR_ERR(target->cm_id);
1822 		goto err_free;
1823 	}
1824 
1825 	target->qp_in_error = 0;
1826 	ret = srp_connect_target(target);
1827 	if (ret) {
1828 		printk(KERN_ERR PFX "Connection failed\n");
1829 		goto err_cm_id;
1830 	}
1831 
1832 	ret = srp_add_target(host, target);
1833 	if (ret)
1834 		goto err_disconnect;
1835 
1836 	return count;
1837 
1838 err_disconnect:
1839 	srp_disconnect_target(target);
1840 
1841 err_cm_id:
1842 	ib_destroy_cm_id(target->cm_id);
1843 
1844 err_free:
1845 	srp_free_target_ib(target);
1846 
1847 err:
1848 	scsi_host_put(target_host);
1849 
1850 	return ret;
1851 }
1852 
1853 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
1854 
1855 static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
1856 {
1857 	struct srp_host *host =
1858 		container_of(class_dev, struct srp_host, class_dev);
1859 
1860 	return sprintf(buf, "%s\n", host->dev->dev->name);
1861 }
1862 
1863 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1864 
1865 static ssize_t show_port(struct class_device *class_dev, char *buf)
1866 {
1867 	struct srp_host *host =
1868 		container_of(class_dev, struct srp_host, class_dev);
1869 
1870 	return sprintf(buf, "%d\n", host->port);
1871 }
1872 
1873 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
1874 
1875 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
1876 {
1877 	struct srp_host *host;
1878 
1879 	host = kzalloc(sizeof *host, GFP_KERNEL);
1880 	if (!host)
1881 		return NULL;
1882 
1883 	INIT_LIST_HEAD(&host->target_list);
1884 	spin_lock_init(&host->target_lock);
1885 	init_completion(&host->released);
1886 	host->dev  = device;
1887 	host->port = port;
1888 
1889 	host->class_dev.class = &srp_class;
1890 	host->class_dev.dev   = device->dev->dma_device;
1891 	snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",
1892 		 device->dev->name, port);
1893 
1894 	if (class_device_register(&host->class_dev))
1895 		goto free_host;
1896 	if (class_device_create_file(&host->class_dev, &class_device_attr_add_target))
1897 		goto err_class;
1898 	if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev))
1899 		goto err_class;
1900 	if (class_device_create_file(&host->class_dev, &class_device_attr_port))
1901 		goto err_class;
1902 
1903 	return host;
1904 
1905 err_class:
1906 	class_device_unregister(&host->class_dev);
1907 
1908 free_host:
1909 	kfree(host);
1910 
1911 	return NULL;
1912 }
1913 
1914 static void srp_add_one(struct ib_device *device)
1915 {
1916 	struct srp_device *srp_dev;
1917 	struct ib_device_attr *dev_attr;
1918 	struct ib_fmr_pool_param fmr_param;
1919 	struct srp_host *host;
1920 	int s, e, p;
1921 
1922 	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
1923 	if (!dev_attr)
1924 		return;
1925 
1926 	if (ib_query_device(device, dev_attr)) {
1927 		printk(KERN_WARNING PFX "Query device failed for %s\n",
1928 		       device->name);
1929 		goto free_attr;
1930 	}
1931 
1932 	srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
1933 	if (!srp_dev)
1934 		goto free_attr;
1935 
1936 	/*
1937 	 * Use the smallest page size supported by the HCA, down to a
1938 	 * minimum of 512 bytes (which is the smallest sector that a
1939 	 * SCSI command will ever carry).
1940 	 */
1941 	srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
1942 	srp_dev->fmr_page_size  = 1 << srp_dev->fmr_page_shift;
1943 	srp_dev->fmr_page_mask  = ~((u64) srp_dev->fmr_page_size - 1);
1944 
1945 	INIT_LIST_HEAD(&srp_dev->dev_list);
1946 
1947 	srp_dev->dev = device;
1948 	srp_dev->pd  = ib_alloc_pd(device);
1949 	if (IS_ERR(srp_dev->pd))
1950 		goto free_dev;
1951 
1952 	srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
1953 				    IB_ACCESS_LOCAL_WRITE |
1954 				    IB_ACCESS_REMOTE_READ |
1955 				    IB_ACCESS_REMOTE_WRITE);
1956 	if (IS_ERR(srp_dev->mr))
1957 		goto err_pd;
1958 
1959 	memset(&fmr_param, 0, sizeof fmr_param);
1960 	fmr_param.pool_size	    = SRP_FMR_POOL_SIZE;
1961 	fmr_param.dirty_watermark   = SRP_FMR_DIRTY_SIZE;
1962 	fmr_param.cache		    = 1;
1963 	fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
1964 	fmr_param.page_shift	    = srp_dev->fmr_page_shift;
1965 	fmr_param.access	    = (IB_ACCESS_LOCAL_WRITE |
1966 				       IB_ACCESS_REMOTE_WRITE |
1967 				       IB_ACCESS_REMOTE_READ);
1968 
1969 	srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
1970 	if (IS_ERR(srp_dev->fmr_pool))
1971 		srp_dev->fmr_pool = NULL;
1972 
1973 	if (device->node_type == RDMA_NODE_IB_SWITCH) {
1974 		s = 0;
1975 		e = 0;
1976 	} else {
1977 		s = 1;
1978 		e = device->phys_port_cnt;
1979 	}
1980 
1981 	for (p = s; p <= e; ++p) {
1982 		host = srp_add_port(srp_dev, p);
1983 		if (host)
1984 			list_add_tail(&host->list, &srp_dev->dev_list);
1985 	}
1986 
1987 	ib_set_client_data(device, &srp_client, srp_dev);
1988 
1989 	goto free_attr;
1990 
1991 err_pd:
1992 	ib_dealloc_pd(srp_dev->pd);
1993 
1994 free_dev:
1995 	kfree(srp_dev);
1996 
1997 free_attr:
1998 	kfree(dev_attr);
1999 }
2000 
2001 static void srp_remove_one(struct ib_device *device)
2002 {
2003 	struct srp_device *srp_dev;
2004 	struct srp_host *host, *tmp_host;
2005 	LIST_HEAD(target_list);
2006 	struct srp_target_port *target, *tmp_target;
2007 
2008 	srp_dev = ib_get_client_data(device, &srp_client);
2009 
2010 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2011 		class_device_unregister(&host->class_dev);
2012 		/*
2013 		 * Wait for the sysfs entry to go away, so that no new
2014 		 * target ports can be created.
2015 		 */
2016 		wait_for_completion(&host->released);
2017 
2018 		/*
2019 		 * Mark all target ports as removed, so we stop queueing
2020 		 * commands and don't try to reconnect.
2021 		 */
2022 		spin_lock(&host->target_lock);
2023 		list_for_each_entry(target, &host->target_list, list) {
2024 			spin_lock_irq(target->scsi_host->host_lock);
2025 			target->state = SRP_TARGET_REMOVED;
2026 			spin_unlock_irq(target->scsi_host->host_lock);
2027 		}
2028 		spin_unlock(&host->target_lock);
2029 
2030 		/*
2031 		 * Wait for any reconnection tasks that may have
2032 		 * started before we marked our target ports as
2033 		 * removed, and any target port removal tasks.
2034 		 */
2035 		flush_scheduled_work();
2036 
2037 		list_for_each_entry_safe(target, tmp_target,
2038 					 &host->target_list, list) {
2039 			scsi_remove_host(target->scsi_host);
2040 			srp_disconnect_target(target);
2041 			ib_destroy_cm_id(target->cm_id);
2042 			srp_free_target_ib(target);
2043 			scsi_host_put(target->scsi_host);
2044 		}
2045 
2046 		kfree(host);
2047 	}
2048 
2049 	if (srp_dev->fmr_pool)
2050 		ib_destroy_fmr_pool(srp_dev->fmr_pool);
2051 	ib_dereg_mr(srp_dev->mr);
2052 	ib_dealloc_pd(srp_dev->pd);
2053 
2054 	kfree(srp_dev);
2055 }
2056 
2057 static int __init srp_init_module(void)
2058 {
2059 	int ret;
2060 
2061 	srp_template.sg_tablesize = srp_sg_tablesize;
2062 	srp_max_iu_len = (sizeof (struct srp_cmd) +
2063 			  sizeof (struct srp_indirect_buf) +
2064 			  srp_sg_tablesize * 16);
2065 
2066 	ret = class_register(&srp_class);
2067 	if (ret) {
2068 		printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
2069 		return ret;
2070 	}
2071 
2072 	ib_sa_register_client(&srp_sa_client);
2073 
2074 	ret = ib_register_client(&srp_client);
2075 	if (ret) {
2076 		printk(KERN_ERR PFX "couldn't register IB client\n");
2077 		ib_sa_unregister_client(&srp_sa_client);
2078 		class_unregister(&srp_class);
2079 		return ret;
2080 	}
2081 
2082 	return 0;
2083 }
2084 
2085 static void __exit srp_cleanup_module(void)
2086 {
2087 	ib_unregister_client(&srp_client);
2088 	ib_sa_unregister_client(&srp_sa_client);
2089 	class_unregister(&srp_class);
2090 }
2091 
2092 module_init(srp_init_module);
2093 module_exit(srp_cleanup_module);
2094