xref: /linux/drivers/infiniband/ulp/srpt/ib_srpt.c (revision 080edf75d337d35faa6fc3df99342b10d2848d16)
1 /*
2  * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
3  * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  */
34 
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/atomic.h>
44 #include <scsi/scsi_proto.h>
45 #include <scsi/scsi_tcq.h>
46 #include <target/target_core_base.h>
47 #include <target/target_core_fabric.h>
48 #include "ib_srpt.h"
49 
50 /* Name of this kernel module. */
51 #define DRV_NAME		"ib_srpt"
52 #define DRV_VERSION		"2.0.0"
53 #define DRV_RELDATE		"2011-02-14"
54 
55 #define SRPT_ID_STRING	"Linux SRP target"
56 
57 #undef pr_fmt
58 #define pr_fmt(fmt) DRV_NAME " " fmt
59 
60 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
62 		   "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
64 
65 /*
66  * Global Variables
67  */
68 
69 static u64 srpt_service_guid;
70 static DEFINE_SPINLOCK(srpt_dev_lock);	/* Protects srpt_dev_list. */
71 static LIST_HEAD(srpt_dev_list);	/* List of srpt_device structures. */
72 
73 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
74 module_param(srp_max_req_size, int, 0444);
75 MODULE_PARM_DESC(srp_max_req_size,
76 		 "Maximum size of SRP request messages in bytes.");
77 
78 static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
79 module_param(srpt_srq_size, int, 0444);
80 MODULE_PARM_DESC(srpt_srq_size,
81 		 "Shared receive queue (SRQ) size.");
82 
83 static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
84 {
85 	return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
86 }
87 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
88 		  0444);
89 MODULE_PARM_DESC(srpt_service_guid,
90 		 "Using this value for ioc_guid, id_ext, and cm_listen_id"
91 		 " instead of using the node_guid of the first HCA.");
92 
93 static struct ib_client srpt_client;
94 static void srpt_release_cmd(struct se_cmd *se_cmd);
95 static void srpt_free_ch(struct kref *kref);
96 static int srpt_queue_status(struct se_cmd *cmd);
97 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
98 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
99 static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
100 
101 /*
102  * The only allowed channel state changes are those that change the channel
103  * state into a state with a higher numerical value. Hence the new > prev test.
104  */
105 static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
106 {
107 	unsigned long flags;
108 	enum rdma_ch_state prev;
109 	bool changed = false;
110 
111 	spin_lock_irqsave(&ch->spinlock, flags);
112 	prev = ch->state;
113 	if (new > prev) {
114 		ch->state = new;
115 		changed = true;
116 	}
117 	spin_unlock_irqrestore(&ch->spinlock, flags);
118 
119 	return changed;
120 }
121 
122 /**
123  * srpt_event_handler() - Asynchronous IB event callback function.
124  *
125  * Callback function called by the InfiniBand core when an asynchronous IB
126  * event occurs. This callback may occur in interrupt context. See also
127  * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
128  * Architecture Specification.
129  */
130 static void srpt_event_handler(struct ib_event_handler *handler,
131 			       struct ib_event *event)
132 {
133 	struct srpt_device *sdev;
134 	struct srpt_port *sport;
135 
136 	sdev = ib_get_client_data(event->device, &srpt_client);
137 	if (!sdev || sdev->device != event->device)
138 		return;
139 
140 	pr_debug("ASYNC event= %d on device= %s\n", event->event,
141 		 sdev->device->name);
142 
143 	switch (event->event) {
144 	case IB_EVENT_PORT_ERR:
145 		if (event->element.port_num <= sdev->device->phys_port_cnt) {
146 			sport = &sdev->port[event->element.port_num - 1];
147 			sport->lid = 0;
148 			sport->sm_lid = 0;
149 		}
150 		break;
151 	case IB_EVENT_PORT_ACTIVE:
152 	case IB_EVENT_LID_CHANGE:
153 	case IB_EVENT_PKEY_CHANGE:
154 	case IB_EVENT_SM_CHANGE:
155 	case IB_EVENT_CLIENT_REREGISTER:
156 	case IB_EVENT_GID_CHANGE:
157 		/* Refresh port data asynchronously. */
158 		if (event->element.port_num <= sdev->device->phys_port_cnt) {
159 			sport = &sdev->port[event->element.port_num - 1];
160 			if (!sport->lid && !sport->sm_lid)
161 				schedule_work(&sport->work);
162 		}
163 		break;
164 	default:
165 		pr_err("received unrecognized IB event %d\n",
166 		       event->event);
167 		break;
168 	}
169 }
170 
171 /**
172  * srpt_srq_event() - SRQ event callback function.
173  */
174 static void srpt_srq_event(struct ib_event *event, void *ctx)
175 {
176 	pr_info("SRQ event %d\n", event->event);
177 }
178 
179 static const char *get_ch_state_name(enum rdma_ch_state s)
180 {
181 	switch (s) {
182 	case CH_CONNECTING:
183 		return "connecting";
184 	case CH_LIVE:
185 		return "live";
186 	case CH_DISCONNECTING:
187 		return "disconnecting";
188 	case CH_DRAINING:
189 		return "draining";
190 	case CH_DISCONNECTED:
191 		return "disconnected";
192 	}
193 	return "???";
194 }
195 
196 /**
197  * srpt_qp_event() - QP event callback function.
198  */
199 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
200 {
201 	pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
202 		 event->event, ch->cm_id, ch->sess_name, ch->state);
203 
204 	switch (event->event) {
205 	case IB_EVENT_COMM_EST:
206 		ib_cm_notify(ch->cm_id, event->event);
207 		break;
208 	case IB_EVENT_QP_LAST_WQE_REACHED:
209 		pr_debug("%s-%d, state %s: received Last WQE event.\n",
210 			 ch->sess_name, ch->qp->qp_num,
211 			 get_ch_state_name(ch->state));
212 		break;
213 	default:
214 		pr_err("received unrecognized IB QP event %d\n", event->event);
215 		break;
216 	}
217 }
218 
219 /**
220  * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
221  *
222  * @slot: one-based slot number.
223  * @value: four-bit value.
224  *
225  * Copies the lowest four bits of value in element slot of the array of four
226  * bit elements called c_list (controller list). The index slot is one-based.
227  */
228 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
229 {
230 	u16 id;
231 	u8 tmp;
232 
233 	id = (slot - 1) / 2;
234 	if (slot & 0x1) {
235 		tmp = c_list[id] & 0xf;
236 		c_list[id] = (value << 4) | tmp;
237 	} else {
238 		tmp = c_list[id] & 0xf0;
239 		c_list[id] = (value & 0xf) | tmp;
240 	}
241 }
242 
243 /**
244  * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
245  *
246  * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
247  * Specification.
248  */
249 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
250 {
251 	struct ib_class_port_info *cif;
252 
253 	cif = (struct ib_class_port_info *)mad->data;
254 	memset(cif, 0, sizeof(*cif));
255 	cif->base_version = 1;
256 	cif->class_version = 1;
257 	cif->resp_time_value = 20;
258 
259 	mad->mad_hdr.status = 0;
260 }
261 
262 /**
263  * srpt_get_iou() - Write IOUnitInfo to a management datagram.
264  *
265  * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
266  * Specification. See also section B.7, table B.6 in the SRP r16a document.
267  */
268 static void srpt_get_iou(struct ib_dm_mad *mad)
269 {
270 	struct ib_dm_iou_info *ioui;
271 	u8 slot;
272 	int i;
273 
274 	ioui = (struct ib_dm_iou_info *)mad->data;
275 	ioui->change_id = cpu_to_be16(1);
276 	ioui->max_controllers = 16;
277 
278 	/* set present for slot 1 and empty for the rest */
279 	srpt_set_ioc(ioui->controller_list, 1, 1);
280 	for (i = 1, slot = 2; i < 16; i++, slot++)
281 		srpt_set_ioc(ioui->controller_list, slot, 0);
282 
283 	mad->mad_hdr.status = 0;
284 }
285 
286 /**
287  * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
288  *
289  * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
290  * Architecture Specification. See also section B.7, table B.7 in the SRP
291  * r16a document.
292  */
293 static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
294 			 struct ib_dm_mad *mad)
295 {
296 	struct srpt_device *sdev = sport->sdev;
297 	struct ib_dm_ioc_profile *iocp;
298 
299 	iocp = (struct ib_dm_ioc_profile *)mad->data;
300 
301 	if (!slot || slot > 16) {
302 		mad->mad_hdr.status
303 			= cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
304 		return;
305 	}
306 
307 	if (slot > 2) {
308 		mad->mad_hdr.status
309 			= cpu_to_be16(DM_MAD_STATUS_NO_IOC);
310 		return;
311 	}
312 
313 	memset(iocp, 0, sizeof(*iocp));
314 	strcpy(iocp->id_string, SRPT_ID_STRING);
315 	iocp->guid = cpu_to_be64(srpt_service_guid);
316 	iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
317 	iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
318 	iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
319 	iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
320 	iocp->subsys_device_id = 0x0;
321 	iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
322 	iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
323 	iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
324 	iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
325 	iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
326 	iocp->rdma_read_depth = 4;
327 	iocp->send_size = cpu_to_be32(srp_max_req_size);
328 	iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
329 					  1U << 24));
330 	iocp->num_svc_entries = 1;
331 	iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
332 		SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
333 
334 	mad->mad_hdr.status = 0;
335 }
336 
337 /**
338  * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
339  *
340  * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
341  * Specification. See also section B.7, table B.8 in the SRP r16a document.
342  */
343 static void srpt_get_svc_entries(u64 ioc_guid,
344 				 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
345 {
346 	struct ib_dm_svc_entries *svc_entries;
347 
348 	WARN_ON(!ioc_guid);
349 
350 	if (!slot || slot > 16) {
351 		mad->mad_hdr.status
352 			= cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
353 		return;
354 	}
355 
356 	if (slot > 2 || lo > hi || hi > 1) {
357 		mad->mad_hdr.status
358 			= cpu_to_be16(DM_MAD_STATUS_NO_IOC);
359 		return;
360 	}
361 
362 	svc_entries = (struct ib_dm_svc_entries *)mad->data;
363 	memset(svc_entries, 0, sizeof(*svc_entries));
364 	svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
365 	snprintf(svc_entries->service_entries[0].name,
366 		 sizeof(svc_entries->service_entries[0].name),
367 		 "%s%016llx",
368 		 SRP_SERVICE_NAME_PREFIX,
369 		 ioc_guid);
370 
371 	mad->mad_hdr.status = 0;
372 }
373 
374 /**
375  * srpt_mgmt_method_get() - Process a received management datagram.
376  * @sp:      source port through which the MAD has been received.
377  * @rq_mad:  received MAD.
378  * @rsp_mad: response MAD.
379  */
380 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
381 				 struct ib_dm_mad *rsp_mad)
382 {
383 	u16 attr_id;
384 	u32 slot;
385 	u8 hi, lo;
386 
387 	attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
388 	switch (attr_id) {
389 	case DM_ATTR_CLASS_PORT_INFO:
390 		srpt_get_class_port_info(rsp_mad);
391 		break;
392 	case DM_ATTR_IOU_INFO:
393 		srpt_get_iou(rsp_mad);
394 		break;
395 	case DM_ATTR_IOC_PROFILE:
396 		slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
397 		srpt_get_ioc(sp, slot, rsp_mad);
398 		break;
399 	case DM_ATTR_SVC_ENTRIES:
400 		slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
401 		hi = (u8) ((slot >> 8) & 0xff);
402 		lo = (u8) (slot & 0xff);
403 		slot = (u16) ((slot >> 16) & 0xffff);
404 		srpt_get_svc_entries(srpt_service_guid,
405 				     slot, hi, lo, rsp_mad);
406 		break;
407 	default:
408 		rsp_mad->mad_hdr.status =
409 		    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
410 		break;
411 	}
412 }
413 
414 /**
415  * srpt_mad_send_handler() - Post MAD-send callback function.
416  */
417 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
418 				  struct ib_mad_send_wc *mad_wc)
419 {
420 	ib_destroy_ah(mad_wc->send_buf->ah);
421 	ib_free_send_mad(mad_wc->send_buf);
422 }
423 
424 /**
425  * srpt_mad_recv_handler() - MAD reception callback function.
426  */
427 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
428 				  struct ib_mad_send_buf *send_buf,
429 				  struct ib_mad_recv_wc *mad_wc)
430 {
431 	struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
432 	struct ib_ah *ah;
433 	struct ib_mad_send_buf *rsp;
434 	struct ib_dm_mad *dm_mad;
435 
436 	if (!mad_wc || !mad_wc->recv_buf.mad)
437 		return;
438 
439 	ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
440 				  mad_wc->recv_buf.grh, mad_agent->port_num);
441 	if (IS_ERR(ah))
442 		goto err;
443 
444 	BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
445 
446 	rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
447 				 mad_wc->wc->pkey_index, 0,
448 				 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
449 				 GFP_KERNEL,
450 				 IB_MGMT_BASE_VERSION);
451 	if (IS_ERR(rsp))
452 		goto err_rsp;
453 
454 	rsp->ah = ah;
455 
456 	dm_mad = rsp->mad;
457 	memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
458 	dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
459 	dm_mad->mad_hdr.status = 0;
460 
461 	switch (mad_wc->recv_buf.mad->mad_hdr.method) {
462 	case IB_MGMT_METHOD_GET:
463 		srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
464 		break;
465 	case IB_MGMT_METHOD_SET:
466 		dm_mad->mad_hdr.status =
467 		    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
468 		break;
469 	default:
470 		dm_mad->mad_hdr.status =
471 		    cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
472 		break;
473 	}
474 
475 	if (!ib_post_send_mad(rsp, NULL)) {
476 		ib_free_recv_mad(mad_wc);
477 		/* will destroy_ah & free_send_mad in send completion */
478 		return;
479 	}
480 
481 	ib_free_send_mad(rsp);
482 
483 err_rsp:
484 	ib_destroy_ah(ah);
485 err:
486 	ib_free_recv_mad(mad_wc);
487 }
488 
489 /**
490  * srpt_refresh_port() - Configure a HCA port.
491  *
492  * Enable InfiniBand management datagram processing, update the cached sm_lid,
493  * lid and gid values, and register a callback function for processing MADs
494  * on the specified port.
495  *
496  * Note: It is safe to call this function more than once for the same port.
497  */
498 static int srpt_refresh_port(struct srpt_port *sport)
499 {
500 	struct ib_mad_reg_req reg_req;
501 	struct ib_port_modify port_modify;
502 	struct ib_port_attr port_attr;
503 	int ret;
504 
505 	memset(&port_modify, 0, sizeof(port_modify));
506 	port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
507 	port_modify.clr_port_cap_mask = 0;
508 
509 	ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
510 	if (ret)
511 		goto err_mod_port;
512 
513 	ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
514 	if (ret)
515 		goto err_query_port;
516 
517 	sport->sm_lid = port_attr.sm_lid;
518 	sport->lid = port_attr.lid;
519 
520 	ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid,
521 			   NULL);
522 	if (ret)
523 		goto err_query_port;
524 
525 	if (!sport->mad_agent) {
526 		memset(&reg_req, 0, sizeof(reg_req));
527 		reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
528 		reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
529 		set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
530 		set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
531 
532 		sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
533 							 sport->port,
534 							 IB_QPT_GSI,
535 							 &reg_req, 0,
536 							 srpt_mad_send_handler,
537 							 srpt_mad_recv_handler,
538 							 sport, 0);
539 		if (IS_ERR(sport->mad_agent)) {
540 			ret = PTR_ERR(sport->mad_agent);
541 			sport->mad_agent = NULL;
542 			goto err_query_port;
543 		}
544 	}
545 
546 	return 0;
547 
548 err_query_port:
549 
550 	port_modify.set_port_cap_mask = 0;
551 	port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
552 	ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
553 
554 err_mod_port:
555 
556 	return ret;
557 }
558 
559 /**
560  * srpt_unregister_mad_agent() - Unregister MAD callback functions.
561  *
562  * Note: It is safe to call this function more than once for the same device.
563  */
564 static void srpt_unregister_mad_agent(struct srpt_device *sdev)
565 {
566 	struct ib_port_modify port_modify = {
567 		.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
568 	};
569 	struct srpt_port *sport;
570 	int i;
571 
572 	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
573 		sport = &sdev->port[i - 1];
574 		WARN_ON(sport->port != i);
575 		if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
576 			pr_err("disabling MAD processing failed.\n");
577 		if (sport->mad_agent) {
578 			ib_unregister_mad_agent(sport->mad_agent);
579 			sport->mad_agent = NULL;
580 		}
581 	}
582 }
583 
584 /**
585  * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
586  */
587 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
588 					   int ioctx_size, int dma_size,
589 					   enum dma_data_direction dir)
590 {
591 	struct srpt_ioctx *ioctx;
592 
593 	ioctx = kmalloc(ioctx_size, GFP_KERNEL);
594 	if (!ioctx)
595 		goto err;
596 
597 	ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
598 	if (!ioctx->buf)
599 		goto err_free_ioctx;
600 
601 	ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
602 	if (ib_dma_mapping_error(sdev->device, ioctx->dma))
603 		goto err_free_buf;
604 
605 	return ioctx;
606 
607 err_free_buf:
608 	kfree(ioctx->buf);
609 err_free_ioctx:
610 	kfree(ioctx);
611 err:
612 	return NULL;
613 }
614 
615 /**
616  * srpt_free_ioctx() - Free an SRPT I/O context structure.
617  */
618 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
619 			    int dma_size, enum dma_data_direction dir)
620 {
621 	if (!ioctx)
622 		return;
623 
624 	ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
625 	kfree(ioctx->buf);
626 	kfree(ioctx);
627 }
628 
629 /**
630  * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
631  * @sdev:       Device to allocate the I/O context ring for.
632  * @ring_size:  Number of elements in the I/O context ring.
633  * @ioctx_size: I/O context size.
634  * @dma_size:   DMA buffer size.
635  * @dir:        DMA data direction.
636  */
637 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
638 				int ring_size, int ioctx_size,
639 				int dma_size, enum dma_data_direction dir)
640 {
641 	struct srpt_ioctx **ring;
642 	int i;
643 
644 	WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
645 		&& ioctx_size != sizeof(struct srpt_send_ioctx));
646 
647 	ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
648 	if (!ring)
649 		goto out;
650 	for (i = 0; i < ring_size; ++i) {
651 		ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
652 		if (!ring[i])
653 			goto err;
654 		ring[i]->index = i;
655 	}
656 	goto out;
657 
658 err:
659 	while (--i >= 0)
660 		srpt_free_ioctx(sdev, ring[i], dma_size, dir);
661 	kfree(ring);
662 	ring = NULL;
663 out:
664 	return ring;
665 }
666 
667 /**
668  * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
669  */
670 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
671 				 struct srpt_device *sdev, int ring_size,
672 				 int dma_size, enum dma_data_direction dir)
673 {
674 	int i;
675 
676 	for (i = 0; i < ring_size; ++i)
677 		srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
678 	kfree(ioctx_ring);
679 }
680 
681 /**
682  * srpt_get_cmd_state() - Get the state of a SCSI command.
683  */
684 static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
685 {
686 	enum srpt_command_state state;
687 	unsigned long flags;
688 
689 	BUG_ON(!ioctx);
690 
691 	spin_lock_irqsave(&ioctx->spinlock, flags);
692 	state = ioctx->state;
693 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
694 	return state;
695 }
696 
697 /**
698  * srpt_set_cmd_state() - Set the state of a SCSI command.
699  *
700  * Does not modify the state of aborted commands. Returns the previous command
701  * state.
702  */
703 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
704 						  enum srpt_command_state new)
705 {
706 	enum srpt_command_state previous;
707 	unsigned long flags;
708 
709 	BUG_ON(!ioctx);
710 
711 	spin_lock_irqsave(&ioctx->spinlock, flags);
712 	previous = ioctx->state;
713 	if (previous != SRPT_STATE_DONE)
714 		ioctx->state = new;
715 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
716 
717 	return previous;
718 }
719 
720 /**
721  * srpt_test_and_set_cmd_state() - Test and set the state of a command.
722  *
723  * Returns true if and only if the previous command state was equal to 'old'.
724  */
725 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
726 					enum srpt_command_state old,
727 					enum srpt_command_state new)
728 {
729 	enum srpt_command_state previous;
730 	unsigned long flags;
731 
732 	WARN_ON(!ioctx);
733 	WARN_ON(old == SRPT_STATE_DONE);
734 	WARN_ON(new == SRPT_STATE_NEW);
735 
736 	spin_lock_irqsave(&ioctx->spinlock, flags);
737 	previous = ioctx->state;
738 	if (previous == old)
739 		ioctx->state = new;
740 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
741 	return previous == old;
742 }
743 
744 /**
745  * srpt_post_recv() - Post an IB receive request.
746  */
747 static int srpt_post_recv(struct srpt_device *sdev,
748 			  struct srpt_recv_ioctx *ioctx)
749 {
750 	struct ib_sge list;
751 	struct ib_recv_wr wr, *bad_wr;
752 
753 	BUG_ON(!sdev);
754 	list.addr = ioctx->ioctx.dma;
755 	list.length = srp_max_req_size;
756 	list.lkey = sdev->pd->local_dma_lkey;
757 
758 	ioctx->ioctx.cqe.done = srpt_recv_done;
759 	wr.wr_cqe = &ioctx->ioctx.cqe;
760 	wr.next = NULL;
761 	wr.sg_list = &list;
762 	wr.num_sge = 1;
763 
764 	return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
765 }
766 
767 /**
768  * srpt_post_send() - Post an IB send request.
769  *
770  * Returns zero upon success and a non-zero value upon failure.
771  */
772 static int srpt_post_send(struct srpt_rdma_ch *ch,
773 			  struct srpt_send_ioctx *ioctx, int len)
774 {
775 	struct ib_sge list;
776 	struct ib_send_wr wr, *bad_wr;
777 	struct srpt_device *sdev = ch->sport->sdev;
778 	int ret;
779 
780 	atomic_inc(&ch->req_lim);
781 
782 	ret = -ENOMEM;
783 	if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
784 		pr_warn("IB send queue full (needed 1)\n");
785 		goto out;
786 	}
787 
788 	ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
789 				      DMA_TO_DEVICE);
790 
791 	list.addr = ioctx->ioctx.dma;
792 	list.length = len;
793 	list.lkey = sdev->pd->local_dma_lkey;
794 
795 	ioctx->ioctx.cqe.done = srpt_send_done;
796 	wr.next = NULL;
797 	wr.wr_cqe = &ioctx->ioctx.cqe;
798 	wr.sg_list = &list;
799 	wr.num_sge = 1;
800 	wr.opcode = IB_WR_SEND;
801 	wr.send_flags = IB_SEND_SIGNALED;
802 
803 	ret = ib_post_send(ch->qp, &wr, &bad_wr);
804 
805 out:
806 	if (ret < 0) {
807 		atomic_inc(&ch->sq_wr_avail);
808 		atomic_dec(&ch->req_lim);
809 	}
810 	return ret;
811 }
812 
813 /**
814  * srpt_zerolength_write() - Perform a zero-length RDMA write.
815  *
816  * A quote from the InfiniBand specification: C9-88: For an HCA responder
817  * using Reliable Connection service, for each zero-length RDMA READ or WRITE
818  * request, the R_Key shall not be validated, even if the request includes
819  * Immediate data.
820  */
821 static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
822 {
823 	struct ib_send_wr wr, *bad_wr;
824 
825 	memset(&wr, 0, sizeof(wr));
826 	wr.opcode = IB_WR_RDMA_WRITE;
827 	wr.wr_cqe = &ch->zw_cqe;
828 	wr.send_flags = IB_SEND_SIGNALED;
829 	return ib_post_send(ch->qp, &wr, &bad_wr);
830 }
831 
832 static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
833 {
834 	struct srpt_rdma_ch *ch = cq->cq_context;
835 
836 	if (wc->status == IB_WC_SUCCESS) {
837 		srpt_process_wait_list(ch);
838 	} else {
839 		if (srpt_set_ch_state(ch, CH_DISCONNECTED))
840 			schedule_work(&ch->release_work);
841 		else
842 			WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num);
843 	}
844 }
845 
846 /**
847  * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
848  * @ioctx: Pointer to the I/O context associated with the request.
849  * @srp_cmd: Pointer to the SRP_CMD request data.
850  * @dir: Pointer to the variable to which the transfer direction will be
851  *   written.
852  * @data_len: Pointer to the variable to which the total data length of all
853  *   descriptors in the SRP_CMD request will be written.
854  *
855  * This function initializes ioctx->nrbuf and ioctx->r_bufs.
856  *
857  * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
858  * -ENOMEM when memory allocation fails and zero upon success.
859  */
860 static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
861 			     struct srp_cmd *srp_cmd,
862 			     enum dma_data_direction *dir, u64 *data_len)
863 {
864 	struct srp_indirect_buf *idb;
865 	struct srp_direct_buf *db;
866 	unsigned add_cdb_offset;
867 	int ret;
868 
869 	/*
870 	 * The pointer computations below will only be compiled correctly
871 	 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
872 	 * whether srp_cmd::add_data has been declared as a byte pointer.
873 	 */
874 	BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
875 		     && !__same_type(srp_cmd->add_data[0], (u8)0));
876 
877 	BUG_ON(!dir);
878 	BUG_ON(!data_len);
879 
880 	ret = 0;
881 	*data_len = 0;
882 
883 	/*
884 	 * The lower four bits of the buffer format field contain the DATA-IN
885 	 * buffer descriptor format, and the highest four bits contain the
886 	 * DATA-OUT buffer descriptor format.
887 	 */
888 	*dir = DMA_NONE;
889 	if (srp_cmd->buf_fmt & 0xf)
890 		/* DATA-IN: transfer data from target to initiator (read). */
891 		*dir = DMA_FROM_DEVICE;
892 	else if (srp_cmd->buf_fmt >> 4)
893 		/* DATA-OUT: transfer data from initiator to target (write). */
894 		*dir = DMA_TO_DEVICE;
895 
896 	/*
897 	 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
898 	 * CDB LENGTH' field are reserved and the size in bytes of this field
899 	 * is four times the value specified in bits 3..7. Hence the "& ~3".
900 	 */
901 	add_cdb_offset = srp_cmd->add_cdb_len & ~3;
902 	if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
903 	    ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
904 		ioctx->n_rbuf = 1;
905 		ioctx->rbufs = &ioctx->single_rbuf;
906 
907 		db = (struct srp_direct_buf *)(srp_cmd->add_data
908 					       + add_cdb_offset);
909 		memcpy(ioctx->rbufs, db, sizeof(*db));
910 		*data_len = be32_to_cpu(db->len);
911 	} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
912 		   ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
913 		idb = (struct srp_indirect_buf *)(srp_cmd->add_data
914 						  + add_cdb_offset);
915 
916 		ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof(*db);
917 
918 		if (ioctx->n_rbuf >
919 		    (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
920 			pr_err("received unsupported SRP_CMD request"
921 			       " type (%u out + %u in != %u / %zu)\n",
922 			       srp_cmd->data_out_desc_cnt,
923 			       srp_cmd->data_in_desc_cnt,
924 			       be32_to_cpu(idb->table_desc.len),
925 			       sizeof(*db));
926 			ioctx->n_rbuf = 0;
927 			ret = -EINVAL;
928 			goto out;
929 		}
930 
931 		if (ioctx->n_rbuf == 1)
932 			ioctx->rbufs = &ioctx->single_rbuf;
933 		else {
934 			ioctx->rbufs =
935 				kmalloc(ioctx->n_rbuf * sizeof(*db), GFP_ATOMIC);
936 			if (!ioctx->rbufs) {
937 				ioctx->n_rbuf = 0;
938 				ret = -ENOMEM;
939 				goto out;
940 			}
941 		}
942 
943 		db = idb->desc_list;
944 		memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof(*db));
945 		*data_len = be32_to_cpu(idb->len);
946 	}
947 out:
948 	return ret;
949 }
950 
951 /**
952  * srpt_init_ch_qp() - Initialize queue pair attributes.
953  *
954  * Initialized the attributes of queue pair 'qp' by allowing local write,
955  * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
956  */
957 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
958 {
959 	struct ib_qp_attr *attr;
960 	int ret;
961 
962 	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
963 	if (!attr)
964 		return -ENOMEM;
965 
966 	attr->qp_state = IB_QPS_INIT;
967 	attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
968 	    IB_ACCESS_REMOTE_WRITE;
969 	attr->port_num = ch->sport->port;
970 	attr->pkey_index = 0;
971 
972 	ret = ib_modify_qp(qp, attr,
973 			   IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
974 			   IB_QP_PKEY_INDEX);
975 
976 	kfree(attr);
977 	return ret;
978 }
979 
980 /**
981  * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
982  * @ch: channel of the queue pair.
983  * @qp: queue pair to change the state of.
984  *
985  * Returns zero upon success and a negative value upon failure.
986  *
987  * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
988  * If this structure ever becomes larger, it might be necessary to allocate
989  * it dynamically instead of on the stack.
990  */
991 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
992 {
993 	struct ib_qp_attr qp_attr;
994 	int attr_mask;
995 	int ret;
996 
997 	qp_attr.qp_state = IB_QPS_RTR;
998 	ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
999 	if (ret)
1000 		goto out;
1001 
1002 	qp_attr.max_dest_rd_atomic = 4;
1003 
1004 	ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1005 
1006 out:
1007 	return ret;
1008 }
1009 
1010 /**
1011  * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
1012  * @ch: channel of the queue pair.
1013  * @qp: queue pair to change the state of.
1014  *
1015  * Returns zero upon success and a negative value upon failure.
1016  *
1017  * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1018  * If this structure ever becomes larger, it might be necessary to allocate
1019  * it dynamically instead of on the stack.
1020  */
1021 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1022 {
1023 	struct ib_qp_attr qp_attr;
1024 	int attr_mask;
1025 	int ret;
1026 
1027 	qp_attr.qp_state = IB_QPS_RTS;
1028 	ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1029 	if (ret)
1030 		goto out;
1031 
1032 	qp_attr.max_rd_atomic = 4;
1033 
1034 	ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1035 
1036 out:
1037 	return ret;
1038 }
1039 
1040 /**
1041  * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
1042  */
1043 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1044 {
1045 	struct ib_qp_attr qp_attr;
1046 
1047 	qp_attr.qp_state = IB_QPS_ERR;
1048 	return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1049 }
1050 
1051 /**
1052  * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
1053  */
1054 static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1055 				    struct srpt_send_ioctx *ioctx)
1056 {
1057 	struct scatterlist *sg;
1058 	enum dma_data_direction dir;
1059 
1060 	BUG_ON(!ch);
1061 	BUG_ON(!ioctx);
1062 	BUG_ON(ioctx->n_rdma && !ioctx->rdma_wrs);
1063 
1064 	while (ioctx->n_rdma)
1065 		kfree(ioctx->rdma_wrs[--ioctx->n_rdma].wr.sg_list);
1066 
1067 	kfree(ioctx->rdma_wrs);
1068 	ioctx->rdma_wrs = NULL;
1069 
1070 	if (ioctx->mapped_sg_count) {
1071 		sg = ioctx->sg;
1072 		WARN_ON(!sg);
1073 		dir = ioctx->cmd.data_direction;
1074 		BUG_ON(dir == DMA_NONE);
1075 		ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
1076 				target_reverse_dma_direction(&ioctx->cmd));
1077 		ioctx->mapped_sg_count = 0;
1078 	}
1079 }
1080 
1081 /**
1082  * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
1083  */
1084 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1085 				 struct srpt_send_ioctx *ioctx)
1086 {
1087 	struct ib_device *dev = ch->sport->sdev->device;
1088 	struct se_cmd *cmd;
1089 	struct scatterlist *sg, *sg_orig;
1090 	int sg_cnt;
1091 	enum dma_data_direction dir;
1092 	struct ib_rdma_wr *riu;
1093 	struct srp_direct_buf *db;
1094 	dma_addr_t dma_addr;
1095 	struct ib_sge *sge;
1096 	u64 raddr;
1097 	u32 rsize;
1098 	u32 tsize;
1099 	u32 dma_len;
1100 	int count, nrdma;
1101 	int i, j, k;
1102 
1103 	BUG_ON(!ch);
1104 	BUG_ON(!ioctx);
1105 	cmd = &ioctx->cmd;
1106 	dir = cmd->data_direction;
1107 	BUG_ON(dir == DMA_NONE);
1108 
1109 	ioctx->sg = sg = sg_orig = cmd->t_data_sg;
1110 	ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
1111 
1112 	count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
1113 			      target_reverse_dma_direction(cmd));
1114 	if (unlikely(!count))
1115 		return -EAGAIN;
1116 
1117 	ioctx->mapped_sg_count = count;
1118 
1119 	if (ioctx->rdma_wrs && ioctx->n_rdma_wrs)
1120 		nrdma = ioctx->n_rdma_wrs;
1121 	else {
1122 		nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
1123 			+ ioctx->n_rbuf;
1124 
1125 		ioctx->rdma_wrs = kcalloc(nrdma, sizeof(*ioctx->rdma_wrs),
1126 				GFP_KERNEL);
1127 		if (!ioctx->rdma_wrs)
1128 			goto free_mem;
1129 
1130 		ioctx->n_rdma_wrs = nrdma;
1131 	}
1132 
1133 	db = ioctx->rbufs;
1134 	tsize = cmd->data_length;
1135 	dma_len = ib_sg_dma_len(dev, &sg[0]);
1136 	riu = ioctx->rdma_wrs;
1137 
1138 	/*
1139 	 * For each remote desc - calculate the #ib_sge.
1140 	 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
1141 	 *      each remote desc rdma_iu is required a rdma wr;
1142 	 * else
1143 	 *      we need to allocate extra rdma_iu to carry extra #ib_sge in
1144 	 *      another rdma wr
1145 	 */
1146 	for (i = 0, j = 0;
1147 	     j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1148 		rsize = be32_to_cpu(db->len);
1149 		raddr = be64_to_cpu(db->va);
1150 		riu->remote_addr = raddr;
1151 		riu->rkey = be32_to_cpu(db->key);
1152 		riu->wr.num_sge = 0;
1153 
1154 		/* calculate how many sge required for this remote_buf */
1155 		while (rsize > 0 && tsize > 0) {
1156 
1157 			if (rsize >= dma_len) {
1158 				tsize -= dma_len;
1159 				rsize -= dma_len;
1160 				raddr += dma_len;
1161 
1162 				if (tsize > 0) {
1163 					++j;
1164 					if (j < count) {
1165 						sg = sg_next(sg);
1166 						dma_len = ib_sg_dma_len(
1167 								dev, sg);
1168 					}
1169 				}
1170 			} else {
1171 				tsize -= rsize;
1172 				dma_len -= rsize;
1173 				rsize = 0;
1174 			}
1175 
1176 			++riu->wr.num_sge;
1177 
1178 			if (rsize > 0 &&
1179 			    riu->wr.num_sge == SRPT_DEF_SG_PER_WQE) {
1180 				++ioctx->n_rdma;
1181 				riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
1182 						sizeof(*riu->wr.sg_list),
1183 						GFP_KERNEL);
1184 				if (!riu->wr.sg_list)
1185 					goto free_mem;
1186 
1187 				++riu;
1188 				riu->wr.num_sge = 0;
1189 				riu->remote_addr = raddr;
1190 				riu->rkey = be32_to_cpu(db->key);
1191 			}
1192 		}
1193 
1194 		++ioctx->n_rdma;
1195 		riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
1196 					sizeof(*riu->wr.sg_list),
1197 					GFP_KERNEL);
1198 		if (!riu->wr.sg_list)
1199 			goto free_mem;
1200 	}
1201 
1202 	db = ioctx->rbufs;
1203 	tsize = cmd->data_length;
1204 	riu = ioctx->rdma_wrs;
1205 	sg = sg_orig;
1206 	dma_len = ib_sg_dma_len(dev, &sg[0]);
1207 	dma_addr = ib_sg_dma_address(dev, &sg[0]);
1208 
1209 	/* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1210 	for (i = 0, j = 0;
1211 	     j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1212 		rsize = be32_to_cpu(db->len);
1213 		sge = riu->wr.sg_list;
1214 		k = 0;
1215 
1216 		while (rsize > 0 && tsize > 0) {
1217 			sge->addr = dma_addr;
1218 			sge->lkey = ch->sport->sdev->pd->local_dma_lkey;
1219 
1220 			if (rsize >= dma_len) {
1221 				sge->length =
1222 					(tsize < dma_len) ? tsize : dma_len;
1223 				tsize -= dma_len;
1224 				rsize -= dma_len;
1225 
1226 				if (tsize > 0) {
1227 					++j;
1228 					if (j < count) {
1229 						sg = sg_next(sg);
1230 						dma_len = ib_sg_dma_len(
1231 								dev, sg);
1232 						dma_addr = ib_sg_dma_address(
1233 								dev, sg);
1234 					}
1235 				}
1236 			} else {
1237 				sge->length = (tsize < rsize) ? tsize : rsize;
1238 				tsize -= rsize;
1239 				dma_len -= rsize;
1240 				dma_addr += rsize;
1241 				rsize = 0;
1242 			}
1243 
1244 			++k;
1245 			if (k == riu->wr.num_sge && rsize > 0 && tsize > 0) {
1246 				++riu;
1247 				sge = riu->wr.sg_list;
1248 				k = 0;
1249 			} else if (rsize > 0 && tsize > 0)
1250 				++sge;
1251 		}
1252 	}
1253 
1254 	return 0;
1255 
1256 free_mem:
1257 	srpt_unmap_sg_to_ib_sge(ch, ioctx);
1258 
1259 	return -ENOMEM;
1260 }
1261 
1262 /**
1263  * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
1264  */
1265 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1266 {
1267 	struct se_session *se_sess;
1268 	struct srpt_send_ioctx *ioctx;
1269 	int tag;
1270 
1271 	BUG_ON(!ch);
1272 	se_sess = ch->sess;
1273 
1274 	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
1275 	if (tag < 0) {
1276 		pr_err("Unable to obtain tag for srpt_send_ioctx\n");
1277 		return NULL;
1278 	}
1279 	ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag];
1280 	memset(ioctx, 0, sizeof(struct srpt_send_ioctx));
1281 	ioctx->ch = ch;
1282 	spin_lock_init(&ioctx->spinlock);
1283 	ioctx->state = SRPT_STATE_NEW;
1284 	init_completion(&ioctx->tx_done);
1285 
1286 	ioctx->cmd.map_tag = tag;
1287 
1288 	return ioctx;
1289 }
1290 
1291 /**
1292  * srpt_abort_cmd() - Abort a SCSI command.
1293  * @ioctx:   I/O context associated with the SCSI command.
1294  * @context: Preferred execution context.
1295  */
1296 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1297 {
1298 	enum srpt_command_state state;
1299 	unsigned long flags;
1300 
1301 	BUG_ON(!ioctx);
1302 
1303 	/*
1304 	 * If the command is in a state where the target core is waiting for
1305 	 * the ib_srpt driver, change the state to the next state.
1306 	 */
1307 
1308 	spin_lock_irqsave(&ioctx->spinlock, flags);
1309 	state = ioctx->state;
1310 	switch (state) {
1311 	case SRPT_STATE_NEED_DATA:
1312 		ioctx->state = SRPT_STATE_DATA_IN;
1313 		break;
1314 	case SRPT_STATE_CMD_RSP_SENT:
1315 	case SRPT_STATE_MGMT_RSP_SENT:
1316 		ioctx->state = SRPT_STATE_DONE;
1317 		break;
1318 	default:
1319 		WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
1320 			  __func__, state);
1321 		break;
1322 	}
1323 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
1324 
1325 	pr_debug("Aborting cmd with state %d and tag %lld\n", state,
1326 		 ioctx->cmd.tag);
1327 
1328 	switch (state) {
1329 	case SRPT_STATE_NEW:
1330 	case SRPT_STATE_DATA_IN:
1331 	case SRPT_STATE_MGMT:
1332 	case SRPT_STATE_DONE:
1333 		/*
1334 		 * Do nothing - defer abort processing until
1335 		 * srpt_queue_response() is invoked.
1336 		 */
1337 		break;
1338 	case SRPT_STATE_NEED_DATA:
1339 		pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
1340 		transport_generic_request_failure(&ioctx->cmd,
1341 					TCM_CHECK_CONDITION_ABORT_CMD);
1342 		break;
1343 	case SRPT_STATE_CMD_RSP_SENT:
1344 		/*
1345 		 * SRP_RSP sending failed or the SRP_RSP send completion has
1346 		 * not been received in time.
1347 		 */
1348 		srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1349 		transport_generic_free_cmd(&ioctx->cmd, 0);
1350 		break;
1351 	case SRPT_STATE_MGMT_RSP_SENT:
1352 		transport_generic_free_cmd(&ioctx->cmd, 0);
1353 		break;
1354 	default:
1355 		WARN(1, "Unexpected command state (%d)", state);
1356 		break;
1357 	}
1358 
1359 	return state;
1360 }
1361 
1362 /**
1363  * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
1364  * the data that has been transferred via IB RDMA had to be postponed until the
1365  * check_stop_free() callback.  None of this is necessary anymore and needs to
1366  * be cleaned up.
1367  */
1368 static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1369 {
1370 	struct srpt_rdma_ch *ch = cq->cq_context;
1371 	struct srpt_send_ioctx *ioctx =
1372 		container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1373 
1374 	WARN_ON(ioctx->n_rdma <= 0);
1375 	atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1376 
1377 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1378 		pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1379 			ioctx, wc->status);
1380 		srpt_abort_cmd(ioctx);
1381 		return;
1382 	}
1383 
1384 	if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1385 					SRPT_STATE_DATA_IN))
1386 		target_execute_cmd(&ioctx->cmd);
1387 	else
1388 		pr_err("%s[%d]: wrong state = %d\n", __func__,
1389 		       __LINE__, srpt_get_cmd_state(ioctx));
1390 }
1391 
1392 static void srpt_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1393 {
1394 	struct srpt_send_ioctx *ioctx =
1395 		container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1396 
1397 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1398 		/*
1399 		 * Note: if an RDMA write error completion is received that
1400 		 * means that a SEND also has been posted. Defer further
1401 		 * processing of the associated command until the send error
1402 		 * completion has been received.
1403 		 */
1404 		pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
1405 			ioctx, wc->status);
1406 	}
1407 }
1408 
1409 /**
1410  * srpt_build_cmd_rsp() - Build an SRP_RSP response.
1411  * @ch: RDMA channel through which the request has been received.
1412  * @ioctx: I/O context associated with the SRP_CMD request. The response will
1413  *   be built in the buffer ioctx->buf points at and hence this function will
1414  *   overwrite the request data.
1415  * @tag: tag of the request for which this response is being generated.
1416  * @status: value for the STATUS field of the SRP_RSP information unit.
1417  *
1418  * Returns the size in bytes of the SRP_RSP response.
1419  *
1420  * An SRP_RSP response contains a SCSI status or service response. See also
1421  * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1422  * response. See also SPC-2 for more information about sense data.
1423  */
1424 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1425 			      struct srpt_send_ioctx *ioctx, u64 tag,
1426 			      int status)
1427 {
1428 	struct srp_rsp *srp_rsp;
1429 	const u8 *sense_data;
1430 	int sense_data_len, max_sense_len;
1431 
1432 	/*
1433 	 * The lowest bit of all SAM-3 status codes is zero (see also
1434 	 * paragraph 5.3 in SAM-3).
1435 	 */
1436 	WARN_ON(status & 1);
1437 
1438 	srp_rsp = ioctx->ioctx.buf;
1439 	BUG_ON(!srp_rsp);
1440 
1441 	sense_data = ioctx->sense_data;
1442 	sense_data_len = ioctx->cmd.scsi_sense_length;
1443 	WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1444 
1445 	memset(srp_rsp, 0, sizeof(*srp_rsp));
1446 	srp_rsp->opcode = SRP_RSP;
1447 	srp_rsp->req_lim_delta =
1448 		cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1449 	srp_rsp->tag = tag;
1450 	srp_rsp->status = status;
1451 
1452 	if (sense_data_len) {
1453 		BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1454 		max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1455 		if (sense_data_len > max_sense_len) {
1456 			pr_warn("truncated sense data from %d to %d"
1457 				" bytes\n", sense_data_len, max_sense_len);
1458 			sense_data_len = max_sense_len;
1459 		}
1460 
1461 		srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1462 		srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1463 		memcpy(srp_rsp + 1, sense_data, sense_data_len);
1464 	}
1465 
1466 	return sizeof(*srp_rsp) + sense_data_len;
1467 }
1468 
1469 /**
1470  * srpt_build_tskmgmt_rsp() - Build a task management response.
1471  * @ch:       RDMA channel through which the request has been received.
1472  * @ioctx:    I/O context in which the SRP_RSP response will be built.
1473  * @rsp_code: RSP_CODE that will be stored in the response.
1474  * @tag:      Tag of the request for which this response is being generated.
1475  *
1476  * Returns the size in bytes of the SRP_RSP response.
1477  *
1478  * An SRP_RSP response contains a SCSI status or service response. See also
1479  * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1480  * response.
1481  */
1482 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1483 				  struct srpt_send_ioctx *ioctx,
1484 				  u8 rsp_code, u64 tag)
1485 {
1486 	struct srp_rsp *srp_rsp;
1487 	int resp_data_len;
1488 	int resp_len;
1489 
1490 	resp_data_len = 4;
1491 	resp_len = sizeof(*srp_rsp) + resp_data_len;
1492 
1493 	srp_rsp = ioctx->ioctx.buf;
1494 	BUG_ON(!srp_rsp);
1495 	memset(srp_rsp, 0, sizeof(*srp_rsp));
1496 
1497 	srp_rsp->opcode = SRP_RSP;
1498 	srp_rsp->req_lim_delta =
1499 		cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1500 	srp_rsp->tag = tag;
1501 
1502 	srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1503 	srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1504 	srp_rsp->data[3] = rsp_code;
1505 
1506 	return resp_len;
1507 }
1508 
1509 static int srpt_check_stop_free(struct se_cmd *cmd)
1510 {
1511 	struct srpt_send_ioctx *ioctx = container_of(cmd,
1512 				struct srpt_send_ioctx, cmd);
1513 
1514 	return target_put_sess_cmd(&ioctx->cmd);
1515 }
1516 
1517 /**
1518  * srpt_handle_cmd() - Process SRP_CMD.
1519  */
1520 static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
1521 			    struct srpt_recv_ioctx *recv_ioctx,
1522 			    struct srpt_send_ioctx *send_ioctx)
1523 {
1524 	struct se_cmd *cmd;
1525 	struct srp_cmd *srp_cmd;
1526 	u64 data_len;
1527 	enum dma_data_direction dir;
1528 	int rc;
1529 
1530 	BUG_ON(!send_ioctx);
1531 
1532 	srp_cmd = recv_ioctx->ioctx.buf;
1533 	cmd = &send_ioctx->cmd;
1534 	cmd->tag = srp_cmd->tag;
1535 
1536 	switch (srp_cmd->task_attr) {
1537 	case SRP_CMD_SIMPLE_Q:
1538 		cmd->sam_task_attr = TCM_SIMPLE_TAG;
1539 		break;
1540 	case SRP_CMD_ORDERED_Q:
1541 	default:
1542 		cmd->sam_task_attr = TCM_ORDERED_TAG;
1543 		break;
1544 	case SRP_CMD_HEAD_OF_Q:
1545 		cmd->sam_task_attr = TCM_HEAD_TAG;
1546 		break;
1547 	case SRP_CMD_ACA:
1548 		cmd->sam_task_attr = TCM_ACA_TAG;
1549 		break;
1550 	}
1551 
1552 	if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
1553 		pr_err("0x%llx: parsing SRP descriptor table failed.\n",
1554 		       srp_cmd->tag);
1555 		goto release_ioctx;
1556 	}
1557 
1558 	rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
1559 			       &send_ioctx->sense_data[0],
1560 			       scsilun_to_int(&srp_cmd->lun), data_len,
1561 			       TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
1562 	if (rc != 0) {
1563 		pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
1564 			 srp_cmd->tag);
1565 		goto release_ioctx;
1566 	}
1567 	return;
1568 
1569 release_ioctx:
1570 	send_ioctx->state = SRPT_STATE_DONE;
1571 	srpt_release_cmd(cmd);
1572 }
1573 
1574 static int srp_tmr_to_tcm(int fn)
1575 {
1576 	switch (fn) {
1577 	case SRP_TSK_ABORT_TASK:
1578 		return TMR_ABORT_TASK;
1579 	case SRP_TSK_ABORT_TASK_SET:
1580 		return TMR_ABORT_TASK_SET;
1581 	case SRP_TSK_CLEAR_TASK_SET:
1582 		return TMR_CLEAR_TASK_SET;
1583 	case SRP_TSK_LUN_RESET:
1584 		return TMR_LUN_RESET;
1585 	case SRP_TSK_CLEAR_ACA:
1586 		return TMR_CLEAR_ACA;
1587 	default:
1588 		return -1;
1589 	}
1590 }
1591 
1592 /**
1593  * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
1594  *
1595  * Returns 0 if and only if the request will be processed by the target core.
1596  *
1597  * For more information about SRP_TSK_MGMT information units, see also section
1598  * 6.7 in the SRP r16a document.
1599  */
1600 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1601 				 struct srpt_recv_ioctx *recv_ioctx,
1602 				 struct srpt_send_ioctx *send_ioctx)
1603 {
1604 	struct srp_tsk_mgmt *srp_tsk;
1605 	struct se_cmd *cmd;
1606 	struct se_session *sess = ch->sess;
1607 	int tcm_tmr;
1608 	int rc;
1609 
1610 	BUG_ON(!send_ioctx);
1611 
1612 	srp_tsk = recv_ioctx->ioctx.buf;
1613 	cmd = &send_ioctx->cmd;
1614 
1615 	pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
1616 		 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
1617 		 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1618 
1619 	srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1620 	send_ioctx->cmd.tag = srp_tsk->tag;
1621 	tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1622 	rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
1623 			       scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
1624 			       GFP_KERNEL, srp_tsk->task_tag,
1625 			       TARGET_SCF_ACK_KREF);
1626 	if (rc != 0) {
1627 		send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1628 		goto fail;
1629 	}
1630 	return;
1631 fail:
1632 	transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
1633 }
1634 
1635 /**
1636  * srpt_handle_new_iu() - Process a newly received information unit.
1637  * @ch:    RDMA channel through which the information unit has been received.
1638  * @ioctx: SRPT I/O context associated with the information unit.
1639  */
1640 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1641 			       struct srpt_recv_ioctx *recv_ioctx,
1642 			       struct srpt_send_ioctx *send_ioctx)
1643 {
1644 	struct srp_cmd *srp_cmd;
1645 
1646 	BUG_ON(!ch);
1647 	BUG_ON(!recv_ioctx);
1648 
1649 	ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1650 				   recv_ioctx->ioctx.dma, srp_max_req_size,
1651 				   DMA_FROM_DEVICE);
1652 
1653 	if (unlikely(ch->state == CH_CONNECTING)) {
1654 		list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1655 		goto out;
1656 	}
1657 
1658 	if (unlikely(ch->state != CH_LIVE))
1659 		goto out;
1660 
1661 	srp_cmd = recv_ioctx->ioctx.buf;
1662 	if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
1663 		if (!send_ioctx)
1664 			send_ioctx = srpt_get_send_ioctx(ch);
1665 		if (unlikely(!send_ioctx)) {
1666 			list_add_tail(&recv_ioctx->wait_list,
1667 				      &ch->cmd_wait_list);
1668 			goto out;
1669 		}
1670 	}
1671 
1672 	switch (srp_cmd->opcode) {
1673 	case SRP_CMD:
1674 		srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1675 		break;
1676 	case SRP_TSK_MGMT:
1677 		srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1678 		break;
1679 	case SRP_I_LOGOUT:
1680 		pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1681 		break;
1682 	case SRP_CRED_RSP:
1683 		pr_debug("received SRP_CRED_RSP\n");
1684 		break;
1685 	case SRP_AER_RSP:
1686 		pr_debug("received SRP_AER_RSP\n");
1687 		break;
1688 	case SRP_RSP:
1689 		pr_err("Received SRP_RSP\n");
1690 		break;
1691 	default:
1692 		pr_err("received IU with unknown opcode 0x%x\n",
1693 		       srp_cmd->opcode);
1694 		break;
1695 	}
1696 
1697 	srpt_post_recv(ch->sport->sdev, recv_ioctx);
1698 out:
1699 	return;
1700 }
1701 
1702 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1703 {
1704 	struct srpt_rdma_ch *ch = cq->cq_context;
1705 	struct srpt_recv_ioctx *ioctx =
1706 		container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1707 
1708 	if (wc->status == IB_WC_SUCCESS) {
1709 		int req_lim;
1710 
1711 		req_lim = atomic_dec_return(&ch->req_lim);
1712 		if (unlikely(req_lim < 0))
1713 			pr_err("req_lim = %d < 0\n", req_lim);
1714 		srpt_handle_new_iu(ch, ioctx, NULL);
1715 	} else {
1716 		pr_info("receiving failed for ioctx %p with status %d\n",
1717 			ioctx, wc->status);
1718 	}
1719 }
1720 
1721 /*
1722  * This function must be called from the context in which RDMA completions are
1723  * processed because it accesses the wait list without protection against
1724  * access from other threads.
1725  */
1726 static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
1727 {
1728 	struct srpt_send_ioctx *ioctx;
1729 
1730 	while (!list_empty(&ch->cmd_wait_list) &&
1731 	       ch->state >= CH_LIVE &&
1732 	       (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
1733 		struct srpt_recv_ioctx *recv_ioctx;
1734 
1735 		recv_ioctx = list_first_entry(&ch->cmd_wait_list,
1736 					      struct srpt_recv_ioctx,
1737 					      wait_list);
1738 		list_del(&recv_ioctx->wait_list);
1739 		srpt_handle_new_iu(ch, recv_ioctx, ioctx);
1740 	}
1741 }
1742 
1743 /**
1744  * Note: Although this has not yet been observed during tests, at least in
1745  * theory it is possible that the srpt_get_send_ioctx() call invoked by
1746  * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
1747  * value in each response is set to one, and it is possible that this response
1748  * makes the initiator send a new request before the send completion for that
1749  * response has been processed. This could e.g. happen if the call to
1750  * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
1751  * if IB retransmission causes generation of the send completion to be
1752  * delayed. Incoming information units for which srpt_get_send_ioctx() fails
1753  * are queued on cmd_wait_list. The code below processes these delayed
1754  * requests one at a time.
1755  */
1756 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
1757 {
1758 	struct srpt_rdma_ch *ch = cq->cq_context;
1759 	struct srpt_send_ioctx *ioctx =
1760 		container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1761 	enum srpt_command_state state;
1762 
1763 	state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1764 
1765 	WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
1766 		state != SRPT_STATE_MGMT_RSP_SENT);
1767 
1768 	atomic_inc(&ch->sq_wr_avail);
1769 
1770 	if (wc->status != IB_WC_SUCCESS)
1771 		pr_info("sending response for ioctx 0x%p failed"
1772 			" with status %d\n", ioctx, wc->status);
1773 
1774 	if (state != SRPT_STATE_DONE) {
1775 		srpt_unmap_sg_to_ib_sge(ch, ioctx);
1776 		transport_generic_free_cmd(&ioctx->cmd, 0);
1777 	} else {
1778 		pr_err("IB completion has been received too late for"
1779 		       " wr_id = %u.\n", ioctx->ioctx.index);
1780 	}
1781 
1782 	srpt_process_wait_list(ch);
1783 }
1784 
1785 /**
1786  * srpt_create_ch_ib() - Create receive and send completion queues.
1787  */
1788 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1789 {
1790 	struct ib_qp_init_attr *qp_init;
1791 	struct srpt_port *sport = ch->sport;
1792 	struct srpt_device *sdev = sport->sdev;
1793 	u32 srp_sq_size = sport->port_attrib.srp_sq_size;
1794 	int ret;
1795 
1796 	WARN_ON(ch->rq_size < 1);
1797 
1798 	ret = -ENOMEM;
1799 	qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
1800 	if (!qp_init)
1801 		goto out;
1802 
1803 retry:
1804 	ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size,
1805 			0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
1806 	if (IS_ERR(ch->cq)) {
1807 		ret = PTR_ERR(ch->cq);
1808 		pr_err("failed to create CQ cqe= %d ret= %d\n",
1809 		       ch->rq_size + srp_sq_size, ret);
1810 		goto out;
1811 	}
1812 
1813 	qp_init->qp_context = (void *)ch;
1814 	qp_init->event_handler
1815 		= (void(*)(struct ib_event *, void*))srpt_qp_event;
1816 	qp_init->send_cq = ch->cq;
1817 	qp_init->recv_cq = ch->cq;
1818 	qp_init->srq = sdev->srq;
1819 	qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1820 	qp_init->qp_type = IB_QPT_RC;
1821 	qp_init->cap.max_send_wr = srp_sq_size;
1822 	qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
1823 
1824 	ch->qp = ib_create_qp(sdev->pd, qp_init);
1825 	if (IS_ERR(ch->qp)) {
1826 		ret = PTR_ERR(ch->qp);
1827 		if (ret == -ENOMEM) {
1828 			srp_sq_size /= 2;
1829 			if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
1830 				ib_destroy_cq(ch->cq);
1831 				goto retry;
1832 			}
1833 		}
1834 		pr_err("failed to create_qp ret= %d\n", ret);
1835 		goto err_destroy_cq;
1836 	}
1837 
1838 	atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
1839 
1840 	pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1841 		 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1842 		 qp_init->cap.max_send_wr, ch->cm_id);
1843 
1844 	ret = srpt_init_ch_qp(ch, ch->qp);
1845 	if (ret)
1846 		goto err_destroy_qp;
1847 
1848 out:
1849 	kfree(qp_init);
1850 	return ret;
1851 
1852 err_destroy_qp:
1853 	ib_destroy_qp(ch->qp);
1854 err_destroy_cq:
1855 	ib_free_cq(ch->cq);
1856 	goto out;
1857 }
1858 
1859 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
1860 {
1861 	ib_destroy_qp(ch->qp);
1862 	ib_free_cq(ch->cq);
1863 }
1864 
1865 /**
1866  * srpt_close_ch() - Close an RDMA channel.
1867  *
1868  * Make sure all resources associated with the channel will be deallocated at
1869  * an appropriate time.
1870  *
1871  * Returns true if and only if the channel state has been modified into
1872  * CH_DRAINING.
1873  */
1874 static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1875 {
1876 	int ret;
1877 
1878 	if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1879 		pr_debug("%s-%d: already closed\n", ch->sess_name,
1880 			 ch->qp->qp_num);
1881 		return false;
1882 	}
1883 
1884 	kref_get(&ch->kref);
1885 
1886 	ret = srpt_ch_qp_err(ch);
1887 	if (ret < 0)
1888 		pr_err("%s-%d: changing queue pair into error state failed: %d\n",
1889 		       ch->sess_name, ch->qp->qp_num, ret);
1890 
1891 	pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
1892 		 ch->qp->qp_num);
1893 	ret = srpt_zerolength_write(ch);
1894 	if (ret < 0) {
1895 		pr_err("%s-%d: queuing zero-length write failed: %d\n",
1896 		       ch->sess_name, ch->qp->qp_num, ret);
1897 		if (srpt_set_ch_state(ch, CH_DISCONNECTED))
1898 			schedule_work(&ch->release_work);
1899 		else
1900 			WARN_ON_ONCE(true);
1901 	}
1902 
1903 	kref_put(&ch->kref, srpt_free_ch);
1904 
1905 	return true;
1906 }
1907 
1908 /*
1909  * Change the channel state into CH_DISCONNECTING. If a channel has not yet
1910  * reached the connected state, close it. If a channel is in the connected
1911  * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
1912  * the responsibility of the caller to ensure that this function is not
1913  * invoked concurrently with the code that accepts a connection. This means
1914  * that this function must either be invoked from inside a CM callback
1915  * function or that it must be invoked with the srpt_port.mutex held.
1916  */
1917 static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
1918 {
1919 	int ret;
1920 
1921 	if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
1922 		return -ENOTCONN;
1923 
1924 	ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
1925 	if (ret < 0)
1926 		ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
1927 
1928 	if (ret < 0 && srpt_close_ch(ch))
1929 		ret = 0;
1930 
1931 	return ret;
1932 }
1933 
1934 static void __srpt_close_all_ch(struct srpt_device *sdev)
1935 {
1936 	struct srpt_rdma_ch *ch;
1937 
1938 	lockdep_assert_held(&sdev->mutex);
1939 
1940 	list_for_each_entry(ch, &sdev->rch_list, list) {
1941 		if (srpt_disconnect_ch(ch) >= 0)
1942 			pr_info("Closing channel %s-%d because target %s has been disabled\n",
1943 				ch->sess_name, ch->qp->qp_num,
1944 				sdev->device->name);
1945 		srpt_close_ch(ch);
1946 	}
1947 }
1948 
1949 /**
1950  * srpt_shutdown_session() - Whether or not a session may be shut down.
1951  */
1952 static int srpt_shutdown_session(struct se_session *se_sess)
1953 {
1954 	return 1;
1955 }
1956 
1957 static void srpt_free_ch(struct kref *kref)
1958 {
1959 	struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
1960 
1961 	kfree(ch);
1962 }
1963 
1964 static void srpt_release_channel_work(struct work_struct *w)
1965 {
1966 	struct srpt_rdma_ch *ch;
1967 	struct srpt_device *sdev;
1968 	struct se_session *se_sess;
1969 
1970 	ch = container_of(w, struct srpt_rdma_ch, release_work);
1971 	pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
1972 		 ch->qp->qp_num, ch->release_done);
1973 
1974 	sdev = ch->sport->sdev;
1975 	BUG_ON(!sdev);
1976 
1977 	se_sess = ch->sess;
1978 	BUG_ON(!se_sess);
1979 
1980 	target_sess_cmd_list_set_waiting(se_sess);
1981 	target_wait_for_sess_cmds(se_sess);
1982 
1983 	transport_deregister_session_configfs(se_sess);
1984 	transport_deregister_session(se_sess);
1985 	ch->sess = NULL;
1986 
1987 	ib_destroy_cm_id(ch->cm_id);
1988 
1989 	srpt_destroy_ch_ib(ch);
1990 
1991 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
1992 			     ch->sport->sdev, ch->rq_size,
1993 			     ch->rsp_size, DMA_TO_DEVICE);
1994 
1995 	mutex_lock(&sdev->mutex);
1996 	list_del_init(&ch->list);
1997 	if (ch->release_done)
1998 		complete(ch->release_done);
1999 	mutex_unlock(&sdev->mutex);
2000 
2001 	wake_up(&sdev->ch_releaseQ);
2002 
2003 	kref_put(&ch->kref, srpt_free_ch);
2004 }
2005 
2006 /**
2007  * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
2008  *
2009  * Ownership of the cm_id is transferred to the target session if this
2010  * functions returns zero. Otherwise the caller remains the owner of cm_id.
2011  */
2012 static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2013 			    struct ib_cm_req_event_param *param,
2014 			    void *private_data)
2015 {
2016 	struct srpt_device *sdev = cm_id->context;
2017 	struct srpt_port *sport = &sdev->port[param->port - 1];
2018 	struct srp_login_req *req;
2019 	struct srp_login_rsp *rsp;
2020 	struct srp_login_rej *rej;
2021 	struct ib_cm_rep_param *rep_param;
2022 	struct srpt_rdma_ch *ch, *tmp_ch;
2023 	u32 it_iu_len;
2024 	int ret = 0;
2025 	unsigned char *p;
2026 
2027 	WARN_ON_ONCE(irqs_disabled());
2028 
2029 	if (WARN_ON(!sdev || !private_data))
2030 		return -EINVAL;
2031 
2032 	req = (struct srp_login_req *)private_data;
2033 
2034 	it_iu_len = be32_to_cpu(req->req_it_iu_len);
2035 
2036 	pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
2037 		" t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
2038 		" (guid=0x%llx:0x%llx)\n",
2039 		be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
2040 		be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
2041 		be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
2042 		be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
2043 		it_iu_len,
2044 		param->port,
2045 		be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
2046 		be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
2047 
2048 	rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2049 	rej = kzalloc(sizeof(*rej), GFP_KERNEL);
2050 	rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
2051 
2052 	if (!rsp || !rej || !rep_param) {
2053 		ret = -ENOMEM;
2054 		goto out;
2055 	}
2056 
2057 	if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2058 		rej->reason = cpu_to_be32(
2059 			      SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2060 		ret = -EINVAL;
2061 		pr_err("rejected SRP_LOGIN_REQ because its"
2062 		       " length (%d bytes) is out of range (%d .. %d)\n",
2063 		       it_iu_len, 64, srp_max_req_size);
2064 		goto reject;
2065 	}
2066 
2067 	if (!sport->enabled) {
2068 		rej->reason = cpu_to_be32(
2069 			      SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2070 		ret = -EINVAL;
2071 		pr_err("rejected SRP_LOGIN_REQ because the target port"
2072 		       " has not yet been enabled\n");
2073 		goto reject;
2074 	}
2075 
2076 	if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2077 		rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
2078 
2079 		mutex_lock(&sdev->mutex);
2080 
2081 		list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2082 			if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
2083 			    && !memcmp(ch->t_port_id, req->target_port_id, 16)
2084 			    && param->port == ch->sport->port
2085 			    && param->listen_id == ch->sport->sdev->cm_id
2086 			    && ch->cm_id) {
2087 				if (srpt_disconnect_ch(ch) < 0)
2088 					continue;
2089 				pr_info("Relogin - closed existing channel %s\n",
2090 					ch->sess_name);
2091 				rsp->rsp_flags =
2092 					SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2093 			}
2094 		}
2095 
2096 		mutex_unlock(&sdev->mutex);
2097 
2098 	} else
2099 		rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2100 
2101 	if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2102 	    || *(__be64 *)(req->target_port_id + 8) !=
2103 	       cpu_to_be64(srpt_service_guid)) {
2104 		rej->reason = cpu_to_be32(
2105 			      SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2106 		ret = -ENOMEM;
2107 		pr_err("rejected SRP_LOGIN_REQ because it"
2108 		       " has an invalid target port identifier.\n");
2109 		goto reject;
2110 	}
2111 
2112 	ch = kzalloc(sizeof(*ch), GFP_KERNEL);
2113 	if (!ch) {
2114 		rej->reason = cpu_to_be32(
2115 			      SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2116 		pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
2117 		ret = -ENOMEM;
2118 		goto reject;
2119 	}
2120 
2121 	kref_init(&ch->kref);
2122 	ch->zw_cqe.done = srpt_zerolength_write_done;
2123 	INIT_WORK(&ch->release_work, srpt_release_channel_work);
2124 	memcpy(ch->i_port_id, req->initiator_port_id, 16);
2125 	memcpy(ch->t_port_id, req->target_port_id, 16);
2126 	ch->sport = &sdev->port[param->port - 1];
2127 	ch->cm_id = cm_id;
2128 	cm_id->context = ch;
2129 	/*
2130 	 * Avoid QUEUE_FULL conditions by limiting the number of buffers used
2131 	 * for the SRP protocol to the command queue size.
2132 	 */
2133 	ch->rq_size = SRPT_RQ_SIZE;
2134 	spin_lock_init(&ch->spinlock);
2135 	ch->state = CH_CONNECTING;
2136 	INIT_LIST_HEAD(&ch->cmd_wait_list);
2137 	ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2138 
2139 	ch->ioctx_ring = (struct srpt_send_ioctx **)
2140 		srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2141 				      sizeof(*ch->ioctx_ring[0]),
2142 				      ch->rsp_size, DMA_TO_DEVICE);
2143 	if (!ch->ioctx_ring)
2144 		goto free_ch;
2145 
2146 	ret = srpt_create_ch_ib(ch);
2147 	if (ret) {
2148 		rej->reason = cpu_to_be32(
2149 			      SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2150 		pr_err("rejected SRP_LOGIN_REQ because creating"
2151 		       " a new RDMA channel failed.\n");
2152 		goto free_ring;
2153 	}
2154 
2155 	ret = srpt_ch_qp_rtr(ch, ch->qp);
2156 	if (ret) {
2157 		rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2158 		pr_err("rejected SRP_LOGIN_REQ because enabling"
2159 		       " RTR failed (error code = %d)\n", ret);
2160 		goto destroy_ib;
2161 	}
2162 
2163 	/*
2164 	 * Use the initator port identifier as the session name, when
2165 	 * checking against se_node_acl->initiatorname[] this can be
2166 	 * with or without preceeding '0x'.
2167 	 */
2168 	snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2169 			be64_to_cpu(*(__be64 *)ch->i_port_id),
2170 			be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2171 
2172 	pr_debug("registering session %s\n", ch->sess_name);
2173 	p = &ch->sess_name[0];
2174 
2175 try_again:
2176 	ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size,
2177 					sizeof(struct srpt_send_ioctx),
2178 					TARGET_PROT_NORMAL, p, ch, NULL);
2179 	if (IS_ERR(ch->sess)) {
2180 		pr_info("Rejected login because no ACL has been"
2181 			" configured yet for initiator %s.\n", p);
2182 		/*
2183 		 * XXX: Hack to retry of ch->i_port_id without leading '0x'
2184 		 */
2185 		if (p == &ch->sess_name[0]) {
2186 			p += 2;
2187 			goto try_again;
2188 		}
2189 		rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
2190 				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
2191 				SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2192 		goto destroy_ib;
2193 	}
2194 
2195 	pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2196 		 ch->sess_name, ch->cm_id);
2197 
2198 	/* create srp_login_response */
2199 	rsp->opcode = SRP_LOGIN_RSP;
2200 	rsp->tag = req->tag;
2201 	rsp->max_it_iu_len = req->req_it_iu_len;
2202 	rsp->max_ti_iu_len = req->req_it_iu_len;
2203 	ch->max_ti_iu_len = it_iu_len;
2204 	rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2205 				   | SRP_BUF_FORMAT_INDIRECT);
2206 	rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2207 	atomic_set(&ch->req_lim, ch->rq_size);
2208 	atomic_set(&ch->req_lim_delta, 0);
2209 
2210 	/* create cm reply */
2211 	rep_param->qp_num = ch->qp->qp_num;
2212 	rep_param->private_data = (void *)rsp;
2213 	rep_param->private_data_len = sizeof(*rsp);
2214 	rep_param->rnr_retry_count = 7;
2215 	rep_param->flow_control = 1;
2216 	rep_param->failover_accepted = 0;
2217 	rep_param->srq = 1;
2218 	rep_param->responder_resources = 4;
2219 	rep_param->initiator_depth = 4;
2220 
2221 	ret = ib_send_cm_rep(cm_id, rep_param);
2222 	if (ret) {
2223 		pr_err("sending SRP_LOGIN_REQ response failed"
2224 		       " (error code = %d)\n", ret);
2225 		goto release_channel;
2226 	}
2227 
2228 	mutex_lock(&sdev->mutex);
2229 	list_add_tail(&ch->list, &sdev->rch_list);
2230 	mutex_unlock(&sdev->mutex);
2231 
2232 	goto out;
2233 
2234 release_channel:
2235 	srpt_disconnect_ch(ch);
2236 	transport_deregister_session_configfs(ch->sess);
2237 	transport_deregister_session(ch->sess);
2238 	ch->sess = NULL;
2239 
2240 destroy_ib:
2241 	srpt_destroy_ch_ib(ch);
2242 
2243 free_ring:
2244 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2245 			     ch->sport->sdev, ch->rq_size,
2246 			     ch->rsp_size, DMA_TO_DEVICE);
2247 free_ch:
2248 	kfree(ch);
2249 
2250 reject:
2251 	rej->opcode = SRP_LOGIN_REJ;
2252 	rej->tag = req->tag;
2253 	rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2254 				   | SRP_BUF_FORMAT_INDIRECT);
2255 
2256 	ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2257 			     (void *)rej, sizeof(*rej));
2258 
2259 out:
2260 	kfree(rep_param);
2261 	kfree(rsp);
2262 	kfree(rej);
2263 
2264 	return ret;
2265 }
2266 
2267 static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
2268 			     enum ib_cm_rej_reason reason,
2269 			     const u8 *private_data,
2270 			     u8 private_data_len)
2271 {
2272 	char *priv = NULL;
2273 	int i;
2274 
2275 	if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
2276 						GFP_KERNEL))) {
2277 		for (i = 0; i < private_data_len; i++)
2278 			sprintf(priv + 3 * i, " %02x", private_data[i]);
2279 	}
2280 	pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
2281 		ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
2282 		"; private data" : "", priv ? priv : " (?)");
2283 	kfree(priv);
2284 }
2285 
2286 /**
2287  * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
2288  *
2289  * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2290  * and that the recipient may begin transmitting (RTU = ready to use).
2291  */
2292 static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
2293 {
2294 	int ret;
2295 
2296 	if (srpt_set_ch_state(ch, CH_LIVE)) {
2297 		ret = srpt_ch_qp_rts(ch, ch->qp);
2298 
2299 		if (ret == 0) {
2300 			/* Trigger wait list processing. */
2301 			ret = srpt_zerolength_write(ch);
2302 			WARN_ONCE(ret < 0, "%d\n", ret);
2303 		} else {
2304 			srpt_close_ch(ch);
2305 		}
2306 	}
2307 }
2308 
2309 /**
2310  * srpt_cm_handler() - IB connection manager callback function.
2311  *
2312  * A non-zero return value will cause the caller destroy the CM ID.
2313  *
2314  * Note: srpt_cm_handler() must only return a non-zero value when transferring
2315  * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2316  * a non-zero value in any other case will trigger a race with the
2317  * ib_destroy_cm_id() call in srpt_release_channel().
2318  */
2319 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2320 {
2321 	struct srpt_rdma_ch *ch = cm_id->context;
2322 	int ret;
2323 
2324 	ret = 0;
2325 	switch (event->event) {
2326 	case IB_CM_REQ_RECEIVED:
2327 		ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2328 				       event->private_data);
2329 		break;
2330 	case IB_CM_REJ_RECEIVED:
2331 		srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
2332 				 event->private_data,
2333 				 IB_CM_REJ_PRIVATE_DATA_SIZE);
2334 		break;
2335 	case IB_CM_RTU_RECEIVED:
2336 	case IB_CM_USER_ESTABLISHED:
2337 		srpt_cm_rtu_recv(ch);
2338 		break;
2339 	case IB_CM_DREQ_RECEIVED:
2340 		srpt_disconnect_ch(ch);
2341 		break;
2342 	case IB_CM_DREP_RECEIVED:
2343 		pr_info("Received CM DREP message for ch %s-%d.\n",
2344 			ch->sess_name, ch->qp->qp_num);
2345 		srpt_close_ch(ch);
2346 		break;
2347 	case IB_CM_TIMEWAIT_EXIT:
2348 		pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2349 			ch->sess_name, ch->qp->qp_num);
2350 		srpt_close_ch(ch);
2351 		break;
2352 	case IB_CM_REP_ERROR:
2353 		pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2354 			ch->qp->qp_num);
2355 		break;
2356 	case IB_CM_DREQ_ERROR:
2357 		pr_info("Received CM DREQ ERROR event.\n");
2358 		break;
2359 	case IB_CM_MRA_RECEIVED:
2360 		pr_info("Received CM MRA event\n");
2361 		break;
2362 	default:
2363 		pr_err("received unrecognized CM event %d\n", event->event);
2364 		break;
2365 	}
2366 
2367 	return ret;
2368 }
2369 
2370 /**
2371  * srpt_perform_rdmas() - Perform IB RDMA.
2372  *
2373  * Returns zero upon success or a negative number upon failure.
2374  */
2375 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2376 			      struct srpt_send_ioctx *ioctx)
2377 {
2378 	struct ib_send_wr *bad_wr;
2379 	int sq_wr_avail, ret, i;
2380 	enum dma_data_direction dir;
2381 	const int n_rdma = ioctx->n_rdma;
2382 
2383 	dir = ioctx->cmd.data_direction;
2384 	if (dir == DMA_TO_DEVICE) {
2385 		/* write */
2386 		ret = -ENOMEM;
2387 		sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
2388 		if (sq_wr_avail < 0) {
2389 			pr_warn("IB send queue full (needed %d)\n",
2390 				n_rdma);
2391 			goto out;
2392 		}
2393 	}
2394 
2395 	for (i = 0; i < n_rdma; i++) {
2396 		struct ib_send_wr *wr = &ioctx->rdma_wrs[i].wr;
2397 
2398 		wr->opcode = (dir == DMA_FROM_DEVICE) ?
2399 				IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
2400 
2401 		if (i == n_rdma - 1) {
2402 			/* only get completion event for the last rdma read */
2403 			if (dir == DMA_TO_DEVICE) {
2404 				wr->send_flags = IB_SEND_SIGNALED;
2405 				ioctx->rdma_cqe.done = srpt_rdma_read_done;
2406 			} else {
2407 				ioctx->rdma_cqe.done = srpt_rdma_write_done;
2408 			}
2409 			wr->wr_cqe = &ioctx->rdma_cqe;
2410 			wr->next = NULL;
2411 		} else {
2412 			wr->wr_cqe = NULL;
2413 			wr->next = &ioctx->rdma_wrs[i + 1].wr;
2414 		}
2415 	}
2416 
2417 	ret = ib_post_send(ch->qp, &ioctx->rdma_wrs->wr, &bad_wr);
2418 	if (ret)
2419 		pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n",
2420 				 __func__, __LINE__, ret, i, n_rdma);
2421 out:
2422 	if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
2423 		atomic_add(n_rdma, &ch->sq_wr_avail);
2424 	return ret;
2425 }
2426 
2427 /**
2428  * srpt_xfer_data() - Start data transfer from initiator to target.
2429  */
2430 static int srpt_xfer_data(struct srpt_rdma_ch *ch,
2431 			  struct srpt_send_ioctx *ioctx)
2432 {
2433 	int ret;
2434 
2435 	ret = srpt_map_sg_to_ib_sge(ch, ioctx);
2436 	if (ret) {
2437 		pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret);
2438 		goto out;
2439 	}
2440 
2441 	ret = srpt_perform_rdmas(ch, ioctx);
2442 	if (ret) {
2443 		if (ret == -EAGAIN || ret == -ENOMEM)
2444 			pr_info("%s[%d] queue full -- ret=%d\n",
2445 				__func__, __LINE__, ret);
2446 		else
2447 			pr_err("%s[%d] fatal error -- ret=%d\n",
2448 			       __func__, __LINE__, ret);
2449 		goto out_unmap;
2450 	}
2451 
2452 out:
2453 	return ret;
2454 out_unmap:
2455 	srpt_unmap_sg_to_ib_sge(ch, ioctx);
2456 	goto out;
2457 }
2458 
2459 static int srpt_write_pending_status(struct se_cmd *se_cmd)
2460 {
2461 	struct srpt_send_ioctx *ioctx;
2462 
2463 	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2464 	return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
2465 }
2466 
2467 /*
2468  * srpt_write_pending() - Start data transfer from initiator to target (write).
2469  */
2470 static int srpt_write_pending(struct se_cmd *se_cmd)
2471 {
2472 	struct srpt_send_ioctx *ioctx =
2473 		container_of(se_cmd, struct srpt_send_ioctx, cmd);
2474 	struct srpt_rdma_ch *ch = ioctx->ch;
2475 	enum srpt_command_state new_state;
2476 
2477 	new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2478 	WARN_ON(new_state == SRPT_STATE_DONE);
2479 	return srpt_xfer_data(ch, ioctx);
2480 }
2481 
2482 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
2483 {
2484 	switch (tcm_mgmt_status) {
2485 	case TMR_FUNCTION_COMPLETE:
2486 		return SRP_TSK_MGMT_SUCCESS;
2487 	case TMR_FUNCTION_REJECTED:
2488 		return SRP_TSK_MGMT_FUNC_NOT_SUPP;
2489 	}
2490 	return SRP_TSK_MGMT_FAILED;
2491 }
2492 
2493 /**
2494  * srpt_queue_response() - Transmits the response to a SCSI command.
2495  *
2496  * Callback function called by the TCM core. Must not block since it can be
2497  * invoked on the context of the IB completion handler.
2498  */
2499 static void srpt_queue_response(struct se_cmd *cmd)
2500 {
2501 	struct srpt_rdma_ch *ch;
2502 	struct srpt_send_ioctx *ioctx;
2503 	enum srpt_command_state state;
2504 	unsigned long flags;
2505 	int ret;
2506 	enum dma_data_direction dir;
2507 	int resp_len;
2508 	u8 srp_tm_status;
2509 
2510 	ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2511 	ch = ioctx->ch;
2512 	BUG_ON(!ch);
2513 
2514 	spin_lock_irqsave(&ioctx->spinlock, flags);
2515 	state = ioctx->state;
2516 	switch (state) {
2517 	case SRPT_STATE_NEW:
2518 	case SRPT_STATE_DATA_IN:
2519 		ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2520 		break;
2521 	case SRPT_STATE_MGMT:
2522 		ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2523 		break;
2524 	default:
2525 		WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2526 			ch, ioctx->ioctx.index, ioctx->state);
2527 		break;
2528 	}
2529 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
2530 
2531 	if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
2532 		     || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
2533 		atomic_inc(&ch->req_lim_delta);
2534 		srpt_abort_cmd(ioctx);
2535 		return;
2536 	}
2537 
2538 	dir = ioctx->cmd.data_direction;
2539 
2540 	/* For read commands, transfer the data to the initiator. */
2541 	if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
2542 	    !ioctx->queue_status_only) {
2543 		ret = srpt_xfer_data(ch, ioctx);
2544 		if (ret) {
2545 			pr_err("xfer_data failed for tag %llu\n",
2546 			       ioctx->cmd.tag);
2547 			return;
2548 		}
2549 	}
2550 
2551 	if (state != SRPT_STATE_MGMT)
2552 		resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2553 					      cmd->scsi_status);
2554 	else {
2555 		srp_tm_status
2556 			= tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
2557 		resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2558 						 ioctx->cmd.tag);
2559 	}
2560 	ret = srpt_post_send(ch, ioctx, resp_len);
2561 	if (ret) {
2562 		pr_err("sending cmd response failed for tag %llu\n",
2563 		       ioctx->cmd.tag);
2564 		srpt_unmap_sg_to_ib_sge(ch, ioctx);
2565 		srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
2566 		target_put_sess_cmd(&ioctx->cmd);
2567 	}
2568 }
2569 
2570 static int srpt_queue_data_in(struct se_cmd *cmd)
2571 {
2572 	srpt_queue_response(cmd);
2573 	return 0;
2574 }
2575 
2576 static void srpt_queue_tm_rsp(struct se_cmd *cmd)
2577 {
2578 	srpt_queue_response(cmd);
2579 }
2580 
2581 static void srpt_aborted_task(struct se_cmd *cmd)
2582 {
2583 	struct srpt_send_ioctx *ioctx = container_of(cmd,
2584 				struct srpt_send_ioctx, cmd);
2585 
2586 	srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
2587 }
2588 
2589 static int srpt_queue_status(struct se_cmd *cmd)
2590 {
2591 	struct srpt_send_ioctx *ioctx;
2592 
2593 	ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2594 	BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2595 	if (cmd->se_cmd_flags &
2596 	    (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
2597 		WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
2598 	ioctx->queue_status_only = true;
2599 	srpt_queue_response(cmd);
2600 	return 0;
2601 }
2602 
2603 static void srpt_refresh_port_work(struct work_struct *work)
2604 {
2605 	struct srpt_port *sport = container_of(work, struct srpt_port, work);
2606 
2607 	srpt_refresh_port(sport);
2608 }
2609 
2610 /**
2611  * srpt_release_sdev() - Free the channel resources associated with a target.
2612  */
2613 static int srpt_release_sdev(struct srpt_device *sdev)
2614 {
2615 	int i, res;
2616 
2617 	WARN_ON_ONCE(irqs_disabled());
2618 
2619 	BUG_ON(!sdev);
2620 
2621 	mutex_lock(&sdev->mutex);
2622 	for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
2623 		sdev->port[i].enabled = false;
2624 	__srpt_close_all_ch(sdev);
2625 	mutex_unlock(&sdev->mutex);
2626 
2627 	res = wait_event_interruptible(sdev->ch_releaseQ,
2628 				       list_empty_careful(&sdev->rch_list));
2629 	if (res)
2630 		pr_err("%s: interrupted.\n", __func__);
2631 
2632 	return 0;
2633 }
2634 
2635 static struct srpt_port *__srpt_lookup_port(const char *name)
2636 {
2637 	struct ib_device *dev;
2638 	struct srpt_device *sdev;
2639 	struct srpt_port *sport;
2640 	int i;
2641 
2642 	list_for_each_entry(sdev, &srpt_dev_list, list) {
2643 		dev = sdev->device;
2644 		if (!dev)
2645 			continue;
2646 
2647 		for (i = 0; i < dev->phys_port_cnt; i++) {
2648 			sport = &sdev->port[i];
2649 
2650 			if (!strcmp(sport->port_guid, name))
2651 				return sport;
2652 		}
2653 	}
2654 
2655 	return NULL;
2656 }
2657 
2658 static struct srpt_port *srpt_lookup_port(const char *name)
2659 {
2660 	struct srpt_port *sport;
2661 
2662 	spin_lock(&srpt_dev_lock);
2663 	sport = __srpt_lookup_port(name);
2664 	spin_unlock(&srpt_dev_lock);
2665 
2666 	return sport;
2667 }
2668 
2669 /**
2670  * srpt_add_one() - Infiniband device addition callback function.
2671  */
2672 static void srpt_add_one(struct ib_device *device)
2673 {
2674 	struct srpt_device *sdev;
2675 	struct srpt_port *sport;
2676 	struct ib_srq_init_attr srq_attr;
2677 	int i;
2678 
2679 	pr_debug("device = %p, device->dma_ops = %p\n", device,
2680 		 device->dma_ops);
2681 
2682 	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
2683 	if (!sdev)
2684 		goto err;
2685 
2686 	sdev->device = device;
2687 	INIT_LIST_HEAD(&sdev->rch_list);
2688 	init_waitqueue_head(&sdev->ch_releaseQ);
2689 	mutex_init(&sdev->mutex);
2690 
2691 	sdev->pd = ib_alloc_pd(device);
2692 	if (IS_ERR(sdev->pd))
2693 		goto free_dev;
2694 
2695 	sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
2696 
2697 	srq_attr.event_handler = srpt_srq_event;
2698 	srq_attr.srq_context = (void *)sdev;
2699 	srq_attr.attr.max_wr = sdev->srq_size;
2700 	srq_attr.attr.max_sge = 1;
2701 	srq_attr.attr.srq_limit = 0;
2702 	srq_attr.srq_type = IB_SRQT_BASIC;
2703 
2704 	sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
2705 	if (IS_ERR(sdev->srq))
2706 		goto err_pd;
2707 
2708 	pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
2709 		 __func__, sdev->srq_size, sdev->device->attrs.max_srq_wr,
2710 		 device->name);
2711 
2712 	if (!srpt_service_guid)
2713 		srpt_service_guid = be64_to_cpu(device->node_guid);
2714 
2715 	sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
2716 	if (IS_ERR(sdev->cm_id))
2717 		goto err_srq;
2718 
2719 	/* print out target login information */
2720 	pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
2721 		 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
2722 		 srpt_service_guid, srpt_service_guid);
2723 
2724 	/*
2725 	 * We do not have a consistent service_id (ie. also id_ext of target_id)
2726 	 * to identify this target. We currently use the guid of the first HCA
2727 	 * in the system as service_id; therefore, the target_id will change
2728 	 * if this HCA is gone bad and replaced by different HCA
2729 	 */
2730 	if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0))
2731 		goto err_cm;
2732 
2733 	INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
2734 			      srpt_event_handler);
2735 	if (ib_register_event_handler(&sdev->event_handler))
2736 		goto err_cm;
2737 
2738 	sdev->ioctx_ring = (struct srpt_recv_ioctx **)
2739 		srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
2740 				      sizeof(*sdev->ioctx_ring[0]),
2741 				      srp_max_req_size, DMA_FROM_DEVICE);
2742 	if (!sdev->ioctx_ring)
2743 		goto err_event;
2744 
2745 	for (i = 0; i < sdev->srq_size; ++i)
2746 		srpt_post_recv(sdev, sdev->ioctx_ring[i]);
2747 
2748 	WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
2749 
2750 	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
2751 		sport = &sdev->port[i - 1];
2752 		sport->sdev = sdev;
2753 		sport->port = i;
2754 		sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
2755 		sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
2756 		sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
2757 		INIT_WORK(&sport->work, srpt_refresh_port_work);
2758 
2759 		if (srpt_refresh_port(sport)) {
2760 			pr_err("MAD registration failed for %s-%d.\n",
2761 			       sdev->device->name, i);
2762 			goto err_ring;
2763 		}
2764 		snprintf(sport->port_guid, sizeof(sport->port_guid),
2765 			"0x%016llx%016llx",
2766 			be64_to_cpu(sport->gid.global.subnet_prefix),
2767 			be64_to_cpu(sport->gid.global.interface_id));
2768 	}
2769 
2770 	spin_lock(&srpt_dev_lock);
2771 	list_add_tail(&sdev->list, &srpt_dev_list);
2772 	spin_unlock(&srpt_dev_lock);
2773 
2774 out:
2775 	ib_set_client_data(device, &srpt_client, sdev);
2776 	pr_debug("added %s.\n", device->name);
2777 	return;
2778 
2779 err_ring:
2780 	srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2781 			     sdev->srq_size, srp_max_req_size,
2782 			     DMA_FROM_DEVICE);
2783 err_event:
2784 	ib_unregister_event_handler(&sdev->event_handler);
2785 err_cm:
2786 	ib_destroy_cm_id(sdev->cm_id);
2787 err_srq:
2788 	ib_destroy_srq(sdev->srq);
2789 err_pd:
2790 	ib_dealloc_pd(sdev->pd);
2791 free_dev:
2792 	kfree(sdev);
2793 err:
2794 	sdev = NULL;
2795 	pr_info("%s(%s) failed.\n", __func__, device->name);
2796 	goto out;
2797 }
2798 
2799 /**
2800  * srpt_remove_one() - InfiniBand device removal callback function.
2801  */
2802 static void srpt_remove_one(struct ib_device *device, void *client_data)
2803 {
2804 	struct srpt_device *sdev = client_data;
2805 	int i;
2806 
2807 	if (!sdev) {
2808 		pr_info("%s(%s): nothing to do.\n", __func__, device->name);
2809 		return;
2810 	}
2811 
2812 	srpt_unregister_mad_agent(sdev);
2813 
2814 	ib_unregister_event_handler(&sdev->event_handler);
2815 
2816 	/* Cancel any work queued by the just unregistered IB event handler. */
2817 	for (i = 0; i < sdev->device->phys_port_cnt; i++)
2818 		cancel_work_sync(&sdev->port[i].work);
2819 
2820 	ib_destroy_cm_id(sdev->cm_id);
2821 
2822 	/*
2823 	 * Unregistering a target must happen after destroying sdev->cm_id
2824 	 * such that no new SRP_LOGIN_REQ information units can arrive while
2825 	 * destroying the target.
2826 	 */
2827 	spin_lock(&srpt_dev_lock);
2828 	list_del(&sdev->list);
2829 	spin_unlock(&srpt_dev_lock);
2830 	srpt_release_sdev(sdev);
2831 
2832 	ib_destroy_srq(sdev->srq);
2833 	ib_dealloc_pd(sdev->pd);
2834 
2835 	srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
2836 			     sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
2837 	sdev->ioctx_ring = NULL;
2838 	kfree(sdev);
2839 }
2840 
2841 static struct ib_client srpt_client = {
2842 	.name = DRV_NAME,
2843 	.add = srpt_add_one,
2844 	.remove = srpt_remove_one
2845 };
2846 
2847 static int srpt_check_true(struct se_portal_group *se_tpg)
2848 {
2849 	return 1;
2850 }
2851 
2852 static int srpt_check_false(struct se_portal_group *se_tpg)
2853 {
2854 	return 0;
2855 }
2856 
2857 static char *srpt_get_fabric_name(void)
2858 {
2859 	return "srpt";
2860 }
2861 
2862 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
2863 {
2864 	struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
2865 
2866 	return sport->port_guid;
2867 }
2868 
2869 static u16 srpt_get_tag(struct se_portal_group *tpg)
2870 {
2871 	return 1;
2872 }
2873 
2874 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
2875 {
2876 	return 1;
2877 }
2878 
2879 static void srpt_release_cmd(struct se_cmd *se_cmd)
2880 {
2881 	struct srpt_send_ioctx *ioctx = container_of(se_cmd,
2882 				struct srpt_send_ioctx, cmd);
2883 	struct srpt_rdma_ch *ch = ioctx->ch;
2884 	struct se_session *se_sess = ch->sess;
2885 
2886 	WARN_ON(ioctx->state != SRPT_STATE_DONE);
2887 	WARN_ON(ioctx->mapped_sg_count != 0);
2888 
2889 	if (ioctx->n_rbuf > 1) {
2890 		kfree(ioctx->rbufs);
2891 		ioctx->rbufs = NULL;
2892 		ioctx->n_rbuf = 0;
2893 	}
2894 
2895 	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
2896 }
2897 
2898 /**
2899  * srpt_close_session() - Forcibly close a session.
2900  *
2901  * Callback function invoked by the TCM core to clean up sessions associated
2902  * with a node ACL when the user invokes
2903  * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
2904  */
2905 static void srpt_close_session(struct se_session *se_sess)
2906 {
2907 	DECLARE_COMPLETION_ONSTACK(release_done);
2908 	struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
2909 	struct srpt_device *sdev = ch->sport->sdev;
2910 	bool wait;
2911 
2912 	pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
2913 		 ch->state);
2914 
2915 	mutex_lock(&sdev->mutex);
2916 	BUG_ON(ch->release_done);
2917 	ch->release_done = &release_done;
2918 	wait = !list_empty(&ch->list);
2919 	srpt_disconnect_ch(ch);
2920 	mutex_unlock(&sdev->mutex);
2921 
2922 	if (!wait)
2923 		return;
2924 
2925 	while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
2926 		pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
2927 			ch->sess_name, ch->qp->qp_num, ch->state);
2928 }
2929 
2930 /**
2931  * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
2932  *
2933  * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
2934  * This object represents an arbitrary integer used to uniquely identify a
2935  * particular attached remote initiator port to a particular SCSI target port
2936  * within a particular SCSI target device within a particular SCSI instance.
2937  */
2938 static u32 srpt_sess_get_index(struct se_session *se_sess)
2939 {
2940 	return 0;
2941 }
2942 
2943 static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
2944 {
2945 }
2946 
2947 /* Note: only used from inside debug printk's by the TCM core. */
2948 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
2949 {
2950 	struct srpt_send_ioctx *ioctx;
2951 
2952 	ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2953 	return srpt_get_cmd_state(ioctx);
2954 }
2955 
2956 /**
2957  * srpt_parse_i_port_id() - Parse an initiator port ID.
2958  * @name: ASCII representation of a 128-bit initiator port ID.
2959  * @i_port_id: Binary 128-bit port ID.
2960  */
2961 static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
2962 {
2963 	const char *p;
2964 	unsigned len, count, leading_zero_bytes;
2965 	int ret, rc;
2966 
2967 	p = name;
2968 	if (strncasecmp(p, "0x", 2) == 0)
2969 		p += 2;
2970 	ret = -EINVAL;
2971 	len = strlen(p);
2972 	if (len % 2)
2973 		goto out;
2974 	count = min(len / 2, 16U);
2975 	leading_zero_bytes = 16 - count;
2976 	memset(i_port_id, 0, leading_zero_bytes);
2977 	rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
2978 	if (rc < 0)
2979 		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
2980 	ret = 0;
2981 out:
2982 	return ret;
2983 }
2984 
2985 /*
2986  * configfs callback function invoked for
2987  * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
2988  */
2989 static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
2990 {
2991 	u8 i_port_id[16];
2992 
2993 	if (srpt_parse_i_port_id(i_port_id, name) < 0) {
2994 		pr_err("invalid initiator port ID %s\n", name);
2995 		return -EINVAL;
2996 	}
2997 	return 0;
2998 }
2999 
3000 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
3001 		char *page)
3002 {
3003 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3004 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3005 
3006 	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3007 }
3008 
3009 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
3010 		const char *page, size_t count)
3011 {
3012 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3013 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3014 	unsigned long val;
3015 	int ret;
3016 
3017 	ret = kstrtoul(page, 0, &val);
3018 	if (ret < 0) {
3019 		pr_err("kstrtoul() failed with ret: %d\n", ret);
3020 		return -EINVAL;
3021 	}
3022 	if (val > MAX_SRPT_RDMA_SIZE) {
3023 		pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3024 			MAX_SRPT_RDMA_SIZE);
3025 		return -EINVAL;
3026 	}
3027 	if (val < DEFAULT_MAX_RDMA_SIZE) {
3028 		pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3029 			val, DEFAULT_MAX_RDMA_SIZE);
3030 		return -EINVAL;
3031 	}
3032 	sport->port_attrib.srp_max_rdma_size = val;
3033 
3034 	return count;
3035 }
3036 
3037 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
3038 		char *page)
3039 {
3040 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3041 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3042 
3043 	return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3044 }
3045 
3046 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
3047 		const char *page, size_t count)
3048 {
3049 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3050 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3051 	unsigned long val;
3052 	int ret;
3053 
3054 	ret = kstrtoul(page, 0, &val);
3055 	if (ret < 0) {
3056 		pr_err("kstrtoul() failed with ret: %d\n", ret);
3057 		return -EINVAL;
3058 	}
3059 	if (val > MAX_SRPT_RSP_SIZE) {
3060 		pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3061 			MAX_SRPT_RSP_SIZE);
3062 		return -EINVAL;
3063 	}
3064 	if (val < MIN_MAX_RSP_SIZE) {
3065 		pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3066 			MIN_MAX_RSP_SIZE);
3067 		return -EINVAL;
3068 	}
3069 	sport->port_attrib.srp_max_rsp_size = val;
3070 
3071 	return count;
3072 }
3073 
3074 static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
3075 		char *page)
3076 {
3077 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3078 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3079 
3080 	return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3081 }
3082 
3083 static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
3084 		const char *page, size_t count)
3085 {
3086 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
3087 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3088 	unsigned long val;
3089 	int ret;
3090 
3091 	ret = kstrtoul(page, 0, &val);
3092 	if (ret < 0) {
3093 		pr_err("kstrtoul() failed with ret: %d\n", ret);
3094 		return -EINVAL;
3095 	}
3096 	if (val > MAX_SRPT_SRQ_SIZE) {
3097 		pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3098 			MAX_SRPT_SRQ_SIZE);
3099 		return -EINVAL;
3100 	}
3101 	if (val < MIN_SRPT_SRQ_SIZE) {
3102 		pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3103 			MIN_SRPT_SRQ_SIZE);
3104 		return -EINVAL;
3105 	}
3106 	sport->port_attrib.srp_sq_size = val;
3107 
3108 	return count;
3109 }
3110 
3111 CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_max_rdma_size);
3112 CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_max_rsp_size);
3113 CONFIGFS_ATTR(srpt_tpg_attrib_,  srp_sq_size);
3114 
3115 static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3116 	&srpt_tpg_attrib_attr_srp_max_rdma_size,
3117 	&srpt_tpg_attrib_attr_srp_max_rsp_size,
3118 	&srpt_tpg_attrib_attr_srp_sq_size,
3119 	NULL,
3120 };
3121 
3122 static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
3123 {
3124 	struct se_portal_group *se_tpg = to_tpg(item);
3125 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3126 
3127 	return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3128 }
3129 
3130 static ssize_t srpt_tpg_enable_store(struct config_item *item,
3131 		const char *page, size_t count)
3132 {
3133 	struct se_portal_group *se_tpg = to_tpg(item);
3134 	struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3135 	struct srpt_device *sdev = sport->sdev;
3136 	struct srpt_rdma_ch *ch;
3137 	unsigned long tmp;
3138         int ret;
3139 
3140 	ret = kstrtoul(page, 0, &tmp);
3141 	if (ret < 0) {
3142 		pr_err("Unable to extract srpt_tpg_store_enable\n");
3143 		return -EINVAL;
3144 	}
3145 
3146 	if ((tmp != 0) && (tmp != 1)) {
3147 		pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3148 		return -EINVAL;
3149 	}
3150 	if (sport->enabled == tmp)
3151 		goto out;
3152 	sport->enabled = tmp;
3153 	if (sport->enabled)
3154 		goto out;
3155 
3156 	mutex_lock(&sdev->mutex);
3157 	list_for_each_entry(ch, &sdev->rch_list, list) {
3158 		if (ch->sport == sport) {
3159 			pr_debug("%s: ch %p %s-%d\n", __func__, ch,
3160 				 ch->sess_name, ch->qp->qp_num);
3161 			srpt_disconnect_ch(ch);
3162 			srpt_close_ch(ch);
3163 		}
3164 	}
3165 	mutex_unlock(&sdev->mutex);
3166 
3167 out:
3168 	return count;
3169 }
3170 
3171 CONFIGFS_ATTR(srpt_tpg_, enable);
3172 
3173 static struct configfs_attribute *srpt_tpg_attrs[] = {
3174 	&srpt_tpg_attr_enable,
3175 	NULL,
3176 };
3177 
3178 /**
3179  * configfs callback invoked for
3180  * mkdir /sys/kernel/config/target/$driver/$port/$tpg
3181  */
3182 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3183 					     struct config_group *group,
3184 					     const char *name)
3185 {
3186 	struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3187 	int res;
3188 
3189 	/* Initialize sport->port_wwn and sport->port_tpg_1 */
3190 	res = core_tpg_register(&sport->port_wwn, &sport->port_tpg_1, SCSI_PROTOCOL_SRP);
3191 	if (res)
3192 		return ERR_PTR(res);
3193 
3194 	return &sport->port_tpg_1;
3195 }
3196 
3197 /**
3198  * configfs callback invoked for
3199  * rmdir /sys/kernel/config/target/$driver/$port/$tpg
3200  */
3201 static void srpt_drop_tpg(struct se_portal_group *tpg)
3202 {
3203 	struct srpt_port *sport = container_of(tpg,
3204 				struct srpt_port, port_tpg_1);
3205 
3206 	sport->enabled = false;
3207 	core_tpg_deregister(&sport->port_tpg_1);
3208 }
3209 
3210 /**
3211  * configfs callback invoked for
3212  * mkdir /sys/kernel/config/target/$driver/$port
3213  */
3214 static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3215 				      struct config_group *group,
3216 				      const char *name)
3217 {
3218 	struct srpt_port *sport;
3219 	int ret;
3220 
3221 	sport = srpt_lookup_port(name);
3222 	pr_debug("make_tport(%s)\n", name);
3223 	ret = -EINVAL;
3224 	if (!sport)
3225 		goto err;
3226 
3227 	return &sport->port_wwn;
3228 
3229 err:
3230 	return ERR_PTR(ret);
3231 }
3232 
3233 /**
3234  * configfs callback invoked for
3235  * rmdir /sys/kernel/config/target/$driver/$port
3236  */
3237 static void srpt_drop_tport(struct se_wwn *wwn)
3238 {
3239 	struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3240 
3241 	pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
3242 }
3243 
3244 static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
3245 {
3246 	return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3247 }
3248 
3249 CONFIGFS_ATTR_RO(srpt_wwn_, version);
3250 
3251 static struct configfs_attribute *srpt_wwn_attrs[] = {
3252 	&srpt_wwn_attr_version,
3253 	NULL,
3254 };
3255 
3256 static const struct target_core_fabric_ops srpt_template = {
3257 	.module				= THIS_MODULE,
3258 	.name				= "srpt",
3259 	.get_fabric_name		= srpt_get_fabric_name,
3260 	.tpg_get_wwn			= srpt_get_fabric_wwn,
3261 	.tpg_get_tag			= srpt_get_tag,
3262 	.tpg_check_demo_mode		= srpt_check_false,
3263 	.tpg_check_demo_mode_cache	= srpt_check_true,
3264 	.tpg_check_demo_mode_write_protect = srpt_check_true,
3265 	.tpg_check_prod_mode_write_protect = srpt_check_false,
3266 	.tpg_get_inst_index		= srpt_tpg_get_inst_index,
3267 	.release_cmd			= srpt_release_cmd,
3268 	.check_stop_free		= srpt_check_stop_free,
3269 	.shutdown_session		= srpt_shutdown_session,
3270 	.close_session			= srpt_close_session,
3271 	.sess_get_index			= srpt_sess_get_index,
3272 	.sess_get_initiator_sid		= NULL,
3273 	.write_pending			= srpt_write_pending,
3274 	.write_pending_status		= srpt_write_pending_status,
3275 	.set_default_node_attributes	= srpt_set_default_node_attrs,
3276 	.get_cmd_state			= srpt_get_tcm_cmd_state,
3277 	.queue_data_in			= srpt_queue_data_in,
3278 	.queue_status			= srpt_queue_status,
3279 	.queue_tm_rsp			= srpt_queue_tm_rsp,
3280 	.aborted_task			= srpt_aborted_task,
3281 	/*
3282 	 * Setup function pointers for generic logic in
3283 	 * target_core_fabric_configfs.c
3284 	 */
3285 	.fabric_make_wwn		= srpt_make_tport,
3286 	.fabric_drop_wwn		= srpt_drop_tport,
3287 	.fabric_make_tpg		= srpt_make_tpg,
3288 	.fabric_drop_tpg		= srpt_drop_tpg,
3289 	.fabric_init_nodeacl		= srpt_init_nodeacl,
3290 
3291 	.tfc_wwn_attrs			= srpt_wwn_attrs,
3292 	.tfc_tpg_base_attrs		= srpt_tpg_attrs,
3293 	.tfc_tpg_attrib_attrs		= srpt_tpg_attrib_attrs,
3294 };
3295 
3296 /**
3297  * srpt_init_module() - Kernel module initialization.
3298  *
3299  * Note: Since ib_register_client() registers callback functions, and since at
3300  * least one of these callback functions (srpt_add_one()) calls target core
3301  * functions, this driver must be registered with the target core before
3302  * ib_register_client() is called.
3303  */
3304 static int __init srpt_init_module(void)
3305 {
3306 	int ret;
3307 
3308 	ret = -EINVAL;
3309 	if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3310 		pr_err("invalid value %d for kernel module parameter"
3311 		       " srp_max_req_size -- must be at least %d.\n",
3312 		       srp_max_req_size, MIN_MAX_REQ_SIZE);
3313 		goto out;
3314 	}
3315 
3316 	if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
3317 	    || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3318 		pr_err("invalid value %d for kernel module parameter"
3319 		       " srpt_srq_size -- must be in the range [%d..%d].\n",
3320 		       srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
3321 		goto out;
3322 	}
3323 
3324 	ret = target_register_template(&srpt_template);
3325 	if (ret)
3326 		goto out;
3327 
3328 	ret = ib_register_client(&srpt_client);
3329 	if (ret) {
3330 		pr_err("couldn't register IB client\n");
3331 		goto out_unregister_target;
3332 	}
3333 
3334 	return 0;
3335 
3336 out_unregister_target:
3337 	target_unregister_template(&srpt_template);
3338 out:
3339 	return ret;
3340 }
3341 
3342 static void __exit srpt_cleanup_module(void)
3343 {
3344 	ib_unregister_client(&srpt_client);
3345 	target_unregister_template(&srpt_template);
3346 }
3347 
3348 module_init(srpt_init_module);
3349 module_exit(srpt_cleanup_module);
3350