1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 *
5 * (c) Copyright 2013 Datera, Inc.
6 *
7 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 *
9 ****************************************************************************/
10
11 #include <linux/string.h>
12 #include <linux/module.h>
13 #include <linux/scatterlist.h>
14 #include <linux/socket.h>
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <rdma/ib_verbs.h>
18 #include <rdma/ib_cm.h>
19 #include <rdma/rdma_cm.h>
20 #include <target/target_core_base.h>
21 #include <target/target_core_fabric.h>
22 #include <target/iscsi/iscsi_transport.h>
23 #include <linux/semaphore.h>
24
25 #include "ib_isert.h"
26
27 static int isert_debug_level;
28 module_param_named(debug_level, isert_debug_level, int, 0644);
29 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
30
31 static int isert_sg_tablesize_set(const char *val,
32 const struct kernel_param *kp);
33 static const struct kernel_param_ops sg_tablesize_ops = {
34 .set = isert_sg_tablesize_set,
35 .get = param_get_int,
36 };
37
38 static int isert_sg_tablesize = ISCSI_ISER_MIN_SG_TABLESIZE;
39 module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644);
40 MODULE_PARM_DESC(sg_tablesize,
41 "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)");
42
43 static DEFINE_MUTEX(device_list_mutex);
44 static LIST_HEAD(device_list);
45 static struct workqueue_struct *isert_login_wq;
46 static struct workqueue_struct *isert_comp_wq;
47 static struct workqueue_struct *isert_release_wq;
48
49 static int
50 isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd);
51 static int
52 isert_login_post_recv(struct isert_conn *isert_conn);
53 static int
54 isert_rdma_accept(struct isert_conn *isert_conn);
55 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
56
57 static void isert_release_work(struct work_struct *work);
58 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
59 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
60 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
61 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
62
isert_sg_tablesize_set(const char * val,const struct kernel_param * kp)63 static int isert_sg_tablesize_set(const char *val, const struct kernel_param *kp)
64 {
65 int n = 0, ret;
66
67 ret = kstrtoint(val, 10, &n);
68 if (ret != 0 || n < ISCSI_ISER_MIN_SG_TABLESIZE ||
69 n > ISCSI_ISER_MAX_SG_TABLESIZE)
70 return -EINVAL;
71
72 return param_set_int(val, kp);
73 }
74
75 static inline bool
isert_prot_cmd(struct isert_conn * conn,struct se_cmd * cmd)76 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
77 {
78 return (conn->pi_support &&
79 cmd->prot_op != TARGET_PROT_NORMAL);
80 }
81
82 static void
isert_qp_event_callback(struct ib_event * e,void * context)83 isert_qp_event_callback(struct ib_event *e, void *context)
84 {
85 struct isert_conn *isert_conn = context;
86
87 isert_err("%s (%d): conn %p\n",
88 ib_event_msg(e->event), e->event, isert_conn);
89
90 switch (e->event) {
91 case IB_EVENT_COMM_EST:
92 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
93 break;
94 default:
95 break;
96 }
97 }
98
99 static struct ib_qp *
isert_create_qp(struct isert_conn * isert_conn,struct rdma_cm_id * cma_id)100 isert_create_qp(struct isert_conn *isert_conn,
101 struct rdma_cm_id *cma_id)
102 {
103 u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2;
104 struct isert_device *device = isert_conn->device;
105 struct ib_device *ib_dev = device->ib_device;
106 struct ib_qp_init_attr attr;
107 int ret, factor;
108
109 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE);
110 if (IS_ERR(isert_conn->cq)) {
111 isert_err("Unable to allocate cq\n");
112 ret = PTR_ERR(isert_conn->cq);
113 return ERR_PTR(ret);
114 }
115 isert_conn->cq_size = cq_size;
116
117 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
118 attr.event_handler = isert_qp_event_callback;
119 attr.qp_context = isert_conn;
120 attr.send_cq = isert_conn->cq;
121 attr.recv_cq = isert_conn->cq;
122 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
123 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
124 factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num,
125 isert_sg_tablesize);
126 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX * factor;
127 attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge;
128 attr.cap.max_recv_sge = 1;
129 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
130 attr.qp_type = IB_QPT_RC;
131 if (device->pi_capable)
132 attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
133
134 ret = rdma_create_qp(cma_id, device->pd, &attr);
135 if (ret) {
136 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
137 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size);
138
139 return ERR_PTR(ret);
140 }
141
142 return cma_id->qp;
143 }
144
145 static int
isert_alloc_rx_descriptors(struct isert_conn * isert_conn)146 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
147 {
148 struct isert_device *device = isert_conn->device;
149 struct ib_device *ib_dev = device->ib_device;
150 struct iser_rx_desc *rx_desc;
151 struct ib_sge *rx_sg;
152 u64 dma_addr;
153 int i, j;
154
155 isert_conn->rx_descs = kzalloc_objs(struct iser_rx_desc,
156 ISERT_QP_MAX_RECV_DTOS);
157 if (!isert_conn->rx_descs)
158 return -ENOMEM;
159
160 rx_desc = isert_conn->rx_descs;
161
162 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
163 dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf,
164 ISER_RX_SIZE, DMA_FROM_DEVICE);
165 if (ib_dma_mapping_error(ib_dev, dma_addr))
166 goto dma_map_fail;
167
168 rx_desc->dma_addr = dma_addr;
169
170 rx_sg = &rx_desc->rx_sg;
171 rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc);
172 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
173 rx_sg->lkey = device->pd->local_dma_lkey;
174 rx_desc->rx_cqe.done = isert_recv_done;
175 }
176
177 return 0;
178
179 dma_map_fail:
180 rx_desc = isert_conn->rx_descs;
181 for (j = 0; j < i; j++, rx_desc++) {
182 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
183 ISER_RX_SIZE, DMA_FROM_DEVICE);
184 }
185 kfree(isert_conn->rx_descs);
186 isert_conn->rx_descs = NULL;
187 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
188 return -ENOMEM;
189 }
190
191 static void
isert_free_rx_descriptors(struct isert_conn * isert_conn)192 isert_free_rx_descriptors(struct isert_conn *isert_conn)
193 {
194 struct ib_device *ib_dev = isert_conn->device->ib_device;
195 struct iser_rx_desc *rx_desc;
196 int i;
197
198 if (!isert_conn->rx_descs)
199 return;
200
201 rx_desc = isert_conn->rx_descs;
202 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
203 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
204 ISER_RX_SIZE, DMA_FROM_DEVICE);
205 }
206
207 kfree(isert_conn->rx_descs);
208 isert_conn->rx_descs = NULL;
209 }
210
211 static int
isert_create_device_ib_res(struct isert_device * device)212 isert_create_device_ib_res(struct isert_device *device)
213 {
214 struct ib_device *ib_dev = device->ib_device;
215 int ret;
216
217 isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n",
218 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge);
219 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
220
221 device->pd = ib_alloc_pd(ib_dev, 0);
222 if (IS_ERR(device->pd)) {
223 ret = PTR_ERR(device->pd);
224 isert_err("failed to allocate pd, device %p, ret=%d\n",
225 device, ret);
226 return ret;
227 }
228
229 /* Check signature cap */
230 if (ib_dev->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER)
231 device->pi_capable = true;
232 else
233 device->pi_capable = false;
234
235 return 0;
236 }
237
238 static void
isert_free_device_ib_res(struct isert_device * device)239 isert_free_device_ib_res(struct isert_device *device)
240 {
241 isert_info("device %p\n", device);
242
243 ib_dealloc_pd(device->pd);
244 }
245
246 static void
isert_device_put(struct isert_device * device)247 isert_device_put(struct isert_device *device)
248 {
249 mutex_lock(&device_list_mutex);
250 device->refcount--;
251 isert_info("device %p refcount %d\n", device, device->refcount);
252 if (!device->refcount) {
253 isert_free_device_ib_res(device);
254 list_del(&device->dev_node);
255 kfree(device);
256 }
257 mutex_unlock(&device_list_mutex);
258 }
259
260 static struct isert_device *
isert_device_get(struct rdma_cm_id * cma_id)261 isert_device_get(struct rdma_cm_id *cma_id)
262 {
263 struct isert_device *device;
264 int ret;
265
266 mutex_lock(&device_list_mutex);
267 list_for_each_entry(device, &device_list, dev_node) {
268 if (device->ib_device->node_guid == cma_id->device->node_guid) {
269 device->refcount++;
270 isert_info("Found iser device %p refcount %d\n",
271 device, device->refcount);
272 mutex_unlock(&device_list_mutex);
273 return device;
274 }
275 }
276
277 device = kzalloc_obj(struct isert_device);
278 if (!device) {
279 mutex_unlock(&device_list_mutex);
280 return ERR_PTR(-ENOMEM);
281 }
282
283 INIT_LIST_HEAD(&device->dev_node);
284
285 device->ib_device = cma_id->device;
286 ret = isert_create_device_ib_res(device);
287 if (ret) {
288 kfree(device);
289 mutex_unlock(&device_list_mutex);
290 return ERR_PTR(ret);
291 }
292
293 device->refcount++;
294 list_add_tail(&device->dev_node, &device_list);
295 isert_info("Created a new iser device %p refcount %d\n",
296 device, device->refcount);
297 mutex_unlock(&device_list_mutex);
298
299 return device;
300 }
301
302 static void
isert_init_conn(struct isert_conn * isert_conn)303 isert_init_conn(struct isert_conn *isert_conn)
304 {
305 isert_conn->state = ISER_CONN_INIT;
306 INIT_LIST_HEAD(&isert_conn->node);
307 init_completion(&isert_conn->login_comp);
308 init_completion(&isert_conn->login_req_comp);
309 init_waitqueue_head(&isert_conn->rem_wait);
310 kref_init(&isert_conn->kref);
311 mutex_init(&isert_conn->mutex);
312 INIT_WORK(&isert_conn->release_work, isert_release_work);
313 }
314
315 static void
isert_free_login_buf(struct isert_conn * isert_conn)316 isert_free_login_buf(struct isert_conn *isert_conn)
317 {
318 struct ib_device *ib_dev = isert_conn->device->ib_device;
319
320 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
321 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
322 kfree(isert_conn->login_rsp_buf);
323
324 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
325 ISER_RX_SIZE, DMA_FROM_DEVICE);
326 kfree(isert_conn->login_desc);
327 }
328
329 static int
isert_alloc_login_buf(struct isert_conn * isert_conn,struct ib_device * ib_dev)330 isert_alloc_login_buf(struct isert_conn *isert_conn,
331 struct ib_device *ib_dev)
332 {
333 int ret;
334
335 isert_conn->login_desc = kzalloc_obj(*isert_conn->login_desc);
336 if (!isert_conn->login_desc)
337 return -ENOMEM;
338
339 isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev,
340 isert_conn->login_desc->buf,
341 ISER_RX_SIZE, DMA_FROM_DEVICE);
342 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr);
343 if (ret) {
344 isert_err("login_desc dma mapping error: %d\n", ret);
345 isert_conn->login_desc->dma_addr = 0;
346 goto out_free_login_desc;
347 }
348
349 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
350 if (!isert_conn->login_rsp_buf) {
351 ret = -ENOMEM;
352 goto out_unmap_login_desc;
353 }
354
355 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
356 isert_conn->login_rsp_buf,
357 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
358 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
359 if (ret) {
360 isert_err("login_rsp_dma mapping error: %d\n", ret);
361 isert_conn->login_rsp_dma = 0;
362 goto out_free_login_rsp_buf;
363 }
364
365 return 0;
366
367 out_free_login_rsp_buf:
368 kfree(isert_conn->login_rsp_buf);
369 out_unmap_login_desc:
370 ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
371 ISER_RX_SIZE, DMA_FROM_DEVICE);
372 out_free_login_desc:
373 kfree(isert_conn->login_desc);
374 return ret;
375 }
376
377 static void
isert_set_nego_params(struct isert_conn * isert_conn,struct rdma_conn_param * param)378 isert_set_nego_params(struct isert_conn *isert_conn,
379 struct rdma_conn_param *param)
380 {
381 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs;
382
383 /* Set max inflight RDMA READ requests */
384 isert_conn->initiator_depth = min_t(u8, param->initiator_depth,
385 attr->max_qp_init_rd_atom);
386 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
387
388 if (param->private_data) {
389 u8 flags = *(u8 *)param->private_data;
390
391 /*
392 * use remote invalidation if the both initiator
393 * and the HCA support it
394 */
395 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) &&
396 (attr->device_cap_flags &
397 IB_DEVICE_MEM_MGT_EXTENSIONS);
398 if (isert_conn->snd_w_inv)
399 isert_info("Using remote invalidation\n");
400 }
401 }
402
403 static void
isert_destroy_qp(struct isert_conn * isert_conn)404 isert_destroy_qp(struct isert_conn *isert_conn)
405 {
406 ib_destroy_qp(isert_conn->qp);
407 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size);
408 }
409
410 static int
isert_connect_request(struct rdma_cm_id * cma_id,struct rdma_cm_event * event)411 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
412 {
413 struct isert_np *isert_np = cma_id->context;
414 struct iscsi_np *np = isert_np->np;
415 struct isert_conn *isert_conn;
416 struct isert_device *device;
417 int ret = 0;
418
419 spin_lock_bh(&np->np_thread_lock);
420 if (!np->enabled) {
421 spin_unlock_bh(&np->np_thread_lock);
422 isert_dbg("iscsi_np is not enabled, reject connect request\n");
423 return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
424 }
425 spin_unlock_bh(&np->np_thread_lock);
426
427 isert_dbg("cma_id: %p, portal: %p\n",
428 cma_id, cma_id->context);
429
430 isert_conn = kzalloc_obj(struct isert_conn);
431 if (!isert_conn)
432 return -ENOMEM;
433
434 isert_init_conn(isert_conn);
435 isert_conn->cm_id = cma_id;
436
437 device = isert_device_get(cma_id);
438 if (IS_ERR(device)) {
439 ret = PTR_ERR(device);
440 goto out;
441 }
442 isert_conn->device = device;
443
444 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
445 if (ret)
446 goto out_conn_dev;
447
448 isert_set_nego_params(isert_conn, &event->param.conn);
449
450 isert_conn->qp = isert_create_qp(isert_conn, cma_id);
451 if (IS_ERR(isert_conn->qp)) {
452 ret = PTR_ERR(isert_conn->qp);
453 goto out_rsp_dma_map;
454 }
455
456 ret = isert_login_post_recv(isert_conn);
457 if (ret)
458 goto out_destroy_qp;
459
460 ret = isert_rdma_accept(isert_conn);
461 if (ret)
462 goto out_destroy_qp;
463
464 mutex_lock(&isert_np->mutex);
465 list_add_tail(&isert_conn->node, &isert_np->accepted);
466 mutex_unlock(&isert_np->mutex);
467
468 return 0;
469
470 out_destroy_qp:
471 isert_destroy_qp(isert_conn);
472 out_rsp_dma_map:
473 isert_free_login_buf(isert_conn);
474 out_conn_dev:
475 isert_device_put(device);
476 out:
477 kfree(isert_conn);
478 rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
479 return ret;
480 }
481
482 static void
isert_connect_release(struct isert_conn * isert_conn)483 isert_connect_release(struct isert_conn *isert_conn)
484 {
485 struct isert_device *device = isert_conn->device;
486
487 isert_dbg("conn %p\n", isert_conn);
488
489 BUG_ON(!device);
490
491 isert_free_rx_descriptors(isert_conn);
492 if (isert_conn->cm_id &&
493 !isert_conn->dev_removed)
494 rdma_destroy_id(isert_conn->cm_id);
495
496 if (isert_conn->qp)
497 isert_destroy_qp(isert_conn);
498
499 if (isert_conn->login_desc)
500 isert_free_login_buf(isert_conn);
501
502 isert_device_put(device);
503
504 if (isert_conn->dev_removed)
505 wake_up_interruptible(&isert_conn->rem_wait);
506 else
507 kfree(isert_conn);
508 }
509
510 static void
isert_connected_handler(struct rdma_cm_id * cma_id)511 isert_connected_handler(struct rdma_cm_id *cma_id)
512 {
513 struct isert_conn *isert_conn = cma_id->qp->qp_context;
514 struct isert_np *isert_np = cma_id->context;
515
516 isert_info("conn %p\n", isert_conn);
517
518 mutex_lock(&isert_conn->mutex);
519 isert_conn->state = ISER_CONN_UP;
520 kref_get(&isert_conn->kref);
521 mutex_unlock(&isert_conn->mutex);
522
523 mutex_lock(&isert_np->mutex);
524 list_move_tail(&isert_conn->node, &isert_np->pending);
525 mutex_unlock(&isert_np->mutex);
526
527 isert_info("np %p: Allow accept_np to continue\n", isert_np);
528 up(&isert_np->sem);
529 }
530
531 static void
isert_release_kref(struct kref * kref)532 isert_release_kref(struct kref *kref)
533 {
534 struct isert_conn *isert_conn = container_of(kref,
535 struct isert_conn, kref);
536
537 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
538 current->pid);
539
540 isert_connect_release(isert_conn);
541 }
542
543 static void
isert_put_conn(struct isert_conn * isert_conn)544 isert_put_conn(struct isert_conn *isert_conn)
545 {
546 kref_put(&isert_conn->kref, isert_release_kref);
547 }
548
549 static void
isert_handle_unbound_conn(struct isert_conn * isert_conn)550 isert_handle_unbound_conn(struct isert_conn *isert_conn)
551 {
552 struct isert_np *isert_np = isert_conn->cm_id->context;
553
554 mutex_lock(&isert_np->mutex);
555 if (!list_empty(&isert_conn->node)) {
556 /*
557 * This means iscsi doesn't know this connection
558 * so schedule a cleanup ourselves
559 */
560 list_del_init(&isert_conn->node);
561 isert_put_conn(isert_conn);
562 queue_work(isert_release_wq, &isert_conn->release_work);
563 }
564 mutex_unlock(&isert_np->mutex);
565 }
566
567 /**
568 * isert_conn_terminate() - Initiate connection termination
569 * @isert_conn: isert connection struct
570 *
571 * Notes:
572 * In case the connection state is BOUND, move state
573 * to TEMINATING and start teardown sequence (rdma_disconnect).
574 * In case the connection state is UP, complete flush as well.
575 *
576 * This routine must be called with mutex held. Thus it is
577 * safe to call multiple times.
578 */
579 static void
isert_conn_terminate(struct isert_conn * isert_conn)580 isert_conn_terminate(struct isert_conn *isert_conn)
581 {
582 int err;
583
584 if (isert_conn->state >= ISER_CONN_TERMINATING)
585 return;
586
587 isert_info("Terminating conn %p state %d\n",
588 isert_conn, isert_conn->state);
589 isert_conn->state = ISER_CONN_TERMINATING;
590 err = rdma_disconnect(isert_conn->cm_id);
591 if (err)
592 isert_warn("Failed rdma_disconnect isert_conn %p\n",
593 isert_conn);
594 }
595
596 static int
isert_np_cma_handler(struct isert_np * isert_np,enum rdma_cm_event_type event)597 isert_np_cma_handler(struct isert_np *isert_np,
598 enum rdma_cm_event_type event)
599 {
600 isert_dbg("%s (%d): isert np %p\n",
601 rdma_event_msg(event), event, isert_np);
602
603 switch (event) {
604 case RDMA_CM_EVENT_DEVICE_REMOVAL:
605 isert_np->cm_id = NULL;
606 break;
607 case RDMA_CM_EVENT_ADDR_CHANGE:
608 isert_np->cm_id = isert_setup_id(isert_np);
609 if (IS_ERR(isert_np->cm_id)) {
610 isert_err("isert np %p setup id failed: %ld\n",
611 isert_np, PTR_ERR(isert_np->cm_id));
612 isert_np->cm_id = NULL;
613 }
614 break;
615 default:
616 isert_err("isert np %p Unexpected event %d\n",
617 isert_np, event);
618 }
619
620 return -1;
621 }
622
623 static int
isert_disconnected_handler(struct rdma_cm_id * cma_id,enum rdma_cm_event_type event)624 isert_disconnected_handler(struct rdma_cm_id *cma_id,
625 enum rdma_cm_event_type event)
626 {
627 struct isert_conn *isert_conn = cma_id->qp->qp_context;
628
629 mutex_lock(&isert_conn->mutex);
630 switch (isert_conn->state) {
631 case ISER_CONN_TERMINATING:
632 break;
633 case ISER_CONN_UP:
634 isert_conn_terminate(isert_conn);
635 ib_drain_qp(isert_conn->qp);
636 isert_handle_unbound_conn(isert_conn);
637 break;
638 case ISER_CONN_BOUND:
639 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
640 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
641 break;
642 default:
643 isert_warn("conn %p terminating in state %d\n",
644 isert_conn, isert_conn->state);
645 }
646 mutex_unlock(&isert_conn->mutex);
647
648 return 0;
649 }
650
651 static int
isert_connect_error(struct rdma_cm_id * cma_id)652 isert_connect_error(struct rdma_cm_id *cma_id)
653 {
654 struct isert_conn *isert_conn = cma_id->qp->qp_context;
655 struct isert_np *isert_np = cma_id->context;
656
657 ib_drain_qp(isert_conn->qp);
658
659 mutex_lock(&isert_np->mutex);
660 list_del_init(&isert_conn->node);
661 mutex_unlock(&isert_np->mutex);
662 isert_conn->cm_id = NULL;
663 isert_put_conn(isert_conn);
664
665 return -1;
666 }
667
668 static int
isert_cma_handler(struct rdma_cm_id * cma_id,struct rdma_cm_event * event)669 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
670 {
671 struct isert_np *isert_np = cma_id->context;
672 struct isert_conn *isert_conn;
673 int ret = 0;
674
675 isert_info("%s (%d): status %d id %p np %p\n",
676 rdma_event_msg(event->event), event->event,
677 event->status, cma_id, cma_id->context);
678
679 if (isert_np->cm_id == cma_id)
680 return isert_np_cma_handler(cma_id->context, event->event);
681
682 switch (event->event) {
683 case RDMA_CM_EVENT_CONNECT_REQUEST:
684 ret = isert_connect_request(cma_id, event);
685 if (ret)
686 isert_err("failed handle connect request %d\n", ret);
687 break;
688 case RDMA_CM_EVENT_ESTABLISHED:
689 isert_connected_handler(cma_id);
690 break;
691 case RDMA_CM_EVENT_ADDR_CHANGE:
692 case RDMA_CM_EVENT_DISCONNECTED:
693 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
694 ret = isert_disconnected_handler(cma_id, event->event);
695 break;
696 case RDMA_CM_EVENT_DEVICE_REMOVAL:
697 isert_conn = cma_id->qp->qp_context;
698 isert_conn->dev_removed = true;
699 isert_disconnected_handler(cma_id, event->event);
700 wait_event_interruptible(isert_conn->rem_wait,
701 isert_conn->state == ISER_CONN_DOWN);
702 kfree(isert_conn);
703 /*
704 * return non-zero from the callback to destroy
705 * the rdma cm id
706 */
707 return 1;
708 case RDMA_CM_EVENT_REJECTED:
709 isert_info("Connection rejected: %s\n",
710 rdma_reject_msg(cma_id, event->status));
711 fallthrough;
712 case RDMA_CM_EVENT_UNREACHABLE:
713 case RDMA_CM_EVENT_CONNECT_ERROR:
714 ret = isert_connect_error(cma_id);
715 break;
716 default:
717 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
718 break;
719 }
720
721 return ret;
722 }
723
724 static int
isert_post_recvm(struct isert_conn * isert_conn,u32 count)725 isert_post_recvm(struct isert_conn *isert_conn, u32 count)
726 {
727 struct ib_recv_wr *rx_wr;
728 int i, ret;
729 struct iser_rx_desc *rx_desc;
730
731 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
732 rx_desc = &isert_conn->rx_descs[i];
733
734 rx_wr->wr_cqe = &rx_desc->rx_cqe;
735 rx_wr->sg_list = &rx_desc->rx_sg;
736 rx_wr->num_sge = 1;
737 rx_wr->next = rx_wr + 1;
738 rx_desc->in_use = false;
739 }
740 rx_wr--;
741 rx_wr->next = NULL; /* mark end of work requests list */
742
743 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL);
744 if (ret)
745 isert_err("ib_post_recv() failed with ret: %d\n", ret);
746
747 return ret;
748 }
749
750 static int
isert_post_recv(struct isert_conn * isert_conn,struct iser_rx_desc * rx_desc)751 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
752 {
753 struct ib_recv_wr rx_wr;
754 int ret;
755
756 if (!rx_desc->in_use) {
757 /*
758 * if the descriptor is not in-use we already reposted it
759 * for recv, so just silently return
760 */
761 return 0;
762 }
763
764 rx_desc->in_use = false;
765 rx_wr.wr_cqe = &rx_desc->rx_cqe;
766 rx_wr.sg_list = &rx_desc->rx_sg;
767 rx_wr.num_sge = 1;
768 rx_wr.next = NULL;
769
770 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
771 if (ret)
772 isert_err("ib_post_recv() failed with ret: %d\n", ret);
773
774 return ret;
775 }
776
777 static int
isert_login_post_send(struct isert_conn * isert_conn,struct iser_tx_desc * tx_desc)778 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
779 {
780 struct ib_device *ib_dev = isert_conn->cm_id->device;
781 struct ib_send_wr send_wr;
782 int ret;
783
784 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
785 ISER_HEADERS_LEN, DMA_TO_DEVICE);
786
787 tx_desc->tx_cqe.done = isert_login_send_done;
788
789 send_wr.next = NULL;
790 send_wr.wr_cqe = &tx_desc->tx_cqe;
791 send_wr.sg_list = tx_desc->tx_sg;
792 send_wr.num_sge = tx_desc->num_sge;
793 send_wr.opcode = IB_WR_SEND;
794 send_wr.send_flags = IB_SEND_SIGNALED;
795
796 ret = ib_post_send(isert_conn->qp, &send_wr, NULL);
797 if (ret)
798 isert_err("ib_post_send() failed, ret: %d\n", ret);
799
800 return ret;
801 }
802
803 static void
__isert_create_send_desc(struct isert_device * device,struct iser_tx_desc * tx_desc)804 __isert_create_send_desc(struct isert_device *device,
805 struct iser_tx_desc *tx_desc)
806 {
807
808 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
809 tx_desc->iser_header.flags = ISCSI_CTRL;
810
811 tx_desc->num_sge = 1;
812
813 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
814 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
815 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
816 }
817 }
818
819 static void
isert_create_send_desc(struct isert_conn * isert_conn,struct isert_cmd * isert_cmd,struct iser_tx_desc * tx_desc)820 isert_create_send_desc(struct isert_conn *isert_conn,
821 struct isert_cmd *isert_cmd,
822 struct iser_tx_desc *tx_desc)
823 {
824 struct isert_device *device = isert_conn->device;
825 struct ib_device *ib_dev = device->ib_device;
826
827 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
828 ISER_HEADERS_LEN, DMA_TO_DEVICE);
829
830 __isert_create_send_desc(device, tx_desc);
831 }
832
833 static int
isert_init_tx_hdrs(struct isert_conn * isert_conn,struct iser_tx_desc * tx_desc)834 isert_init_tx_hdrs(struct isert_conn *isert_conn,
835 struct iser_tx_desc *tx_desc)
836 {
837 struct isert_device *device = isert_conn->device;
838 struct ib_device *ib_dev = device->ib_device;
839 u64 dma_addr;
840
841 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
842 ISER_HEADERS_LEN, DMA_TO_DEVICE);
843 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
844 isert_err("ib_dma_mapping_error() failed\n");
845 return -ENOMEM;
846 }
847
848 tx_desc->dma_addr = dma_addr;
849 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
850 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
851 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
852
853 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
854 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
855 tx_desc->tx_sg[0].lkey);
856
857 return 0;
858 }
859
860 static void
isert_init_send_wr(struct isert_conn * isert_conn,struct isert_cmd * isert_cmd,struct ib_send_wr * send_wr)861 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
862 struct ib_send_wr *send_wr)
863 {
864 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
865
866 tx_desc->tx_cqe.done = isert_send_done;
867 send_wr->wr_cqe = &tx_desc->tx_cqe;
868
869 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
870 send_wr->opcode = IB_WR_SEND_WITH_INV;
871 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey;
872 } else {
873 send_wr->opcode = IB_WR_SEND;
874 }
875
876 send_wr->sg_list = &tx_desc->tx_sg[0];
877 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
878 send_wr->send_flags = IB_SEND_SIGNALED;
879 }
880
881 static int
isert_login_post_recv(struct isert_conn * isert_conn)882 isert_login_post_recv(struct isert_conn *isert_conn)
883 {
884 struct ib_recv_wr rx_wr;
885 struct ib_sge sge;
886 int ret;
887
888 memset(&sge, 0, sizeof(struct ib_sge));
889 sge.addr = isert_conn->login_desc->dma_addr +
890 isert_get_hdr_offset(isert_conn->login_desc);
891 sge.length = ISER_RX_PAYLOAD_SIZE;
892 sge.lkey = isert_conn->device->pd->local_dma_lkey;
893
894 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
895 sge.addr, sge.length, sge.lkey);
896
897 isert_conn->login_desc->rx_cqe.done = isert_login_recv_done;
898
899 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
900 rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe;
901 rx_wr.sg_list = &sge;
902 rx_wr.num_sge = 1;
903
904 ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
905 if (ret)
906 isert_err("ib_post_recv() failed: %d\n", ret);
907
908 return ret;
909 }
910
911 static int
isert_put_login_tx(struct iscsit_conn * conn,struct iscsi_login * login,u32 length)912 isert_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
913 u32 length)
914 {
915 struct isert_conn *isert_conn = conn->context;
916 struct isert_device *device = isert_conn->device;
917 struct ib_device *ib_dev = device->ib_device;
918 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
919 int ret;
920
921 __isert_create_send_desc(device, tx_desc);
922
923 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
924 sizeof(struct iscsi_hdr));
925
926 isert_init_tx_hdrs(isert_conn, tx_desc);
927
928 if (length > 0) {
929 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
930
931 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
932 length, DMA_TO_DEVICE);
933
934 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
935
936 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
937 length, DMA_TO_DEVICE);
938
939 tx_dsg->addr = isert_conn->login_rsp_dma;
940 tx_dsg->length = length;
941 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
942 tx_desc->num_sge = 2;
943 }
944 if (!login->login_failed) {
945 if (login->login_complete) {
946 ret = isert_alloc_rx_descriptors(isert_conn);
947 if (ret)
948 return ret;
949
950 ret = isert_post_recvm(isert_conn,
951 ISERT_QP_MAX_RECV_DTOS);
952 if (ret)
953 return ret;
954
955 /* Now we are in FULL_FEATURE phase */
956 mutex_lock(&isert_conn->mutex);
957 isert_conn->state = ISER_CONN_FULL_FEATURE;
958 mutex_unlock(&isert_conn->mutex);
959 goto post_send;
960 }
961
962 ret = isert_login_post_recv(isert_conn);
963 if (ret)
964 return ret;
965 }
966 post_send:
967 ret = isert_login_post_send(isert_conn, tx_desc);
968 if (ret)
969 return ret;
970
971 return 0;
972 }
973
974 static void
isert_rx_login_req(struct isert_conn * isert_conn)975 isert_rx_login_req(struct isert_conn *isert_conn)
976 {
977 struct iser_rx_desc *rx_desc = isert_conn->login_desc;
978 int rx_buflen = isert_conn->login_req_len;
979 struct iscsit_conn *conn = isert_conn->conn;
980 struct iscsi_login *login = conn->conn_login;
981 int size;
982
983 isert_info("conn %p\n", isert_conn);
984
985 WARN_ON_ONCE(!login);
986
987 if (login->first_request) {
988 struct iscsi_login_req *login_req =
989 (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc);
990 /*
991 * Setup the initial iscsi_login values from the leading
992 * login request PDU.
993 */
994 login->leading_connection = (!login_req->tsih) ? 1 : 0;
995 login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
996 login_req->flags);
997 login->version_min = login_req->min_version;
998 login->version_max = login_req->max_version;
999 memcpy(login->isid, login_req->isid, 6);
1000 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1001 login->init_task_tag = login_req->itt;
1002 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1003 login->cid = be16_to_cpu(login_req->cid);
1004 login->tsih = be16_to_cpu(login_req->tsih);
1005 }
1006
1007 memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN);
1008
1009 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1010 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1011 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1012 MAX_KEY_VALUE_PAIRS);
1013 memcpy(login->req_buf, isert_get_data(rx_desc), size);
1014
1015 if (login->first_request) {
1016 complete(&isert_conn->login_comp);
1017 return;
1018 }
1019 queue_delayed_work(isert_login_wq, &conn->login_work, 0);
1020 }
1021
1022 static struct iscsit_cmd
isert_allocate_cmd(struct iscsit_conn * conn,struct iser_rx_desc * rx_desc)1023 *isert_allocate_cmd(struct iscsit_conn *conn, struct iser_rx_desc *rx_desc)
1024 {
1025 struct isert_conn *isert_conn = conn->context;
1026 struct isert_cmd *isert_cmd;
1027 struct iscsit_cmd *cmd;
1028
1029 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1030 if (!cmd) {
1031 isert_err("Unable to allocate iscsit_cmd + isert_cmd\n");
1032 return NULL;
1033 }
1034 isert_cmd = iscsit_priv_cmd(cmd);
1035 isert_cmd->conn = isert_conn;
1036 isert_cmd->iscsit_cmd = cmd;
1037 isert_cmd->rx_desc = rx_desc;
1038
1039 return cmd;
1040 }
1041
1042 static int
isert_handle_scsi_cmd(struct isert_conn * isert_conn,struct isert_cmd * isert_cmd,struct iscsit_cmd * cmd,struct iser_rx_desc * rx_desc,unsigned char * buf)1043 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1044 struct isert_cmd *isert_cmd, struct iscsit_cmd *cmd,
1045 struct iser_rx_desc *rx_desc, unsigned char *buf)
1046 {
1047 struct iscsit_conn *conn = isert_conn->conn;
1048 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1049 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1050 bool dump_payload = false;
1051 unsigned int data_len;
1052
1053 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1054 if (rc < 0)
1055 return rc;
1056
1057 imm_data = cmd->immediate_data;
1058 imm_data_len = cmd->first_burst_len;
1059 unsol_data = cmd->unsolicited_data;
1060 data_len = cmd->se_cmd.data_length;
1061
1062 if (imm_data && imm_data_len == data_len)
1063 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1064 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1065 if (rc < 0) {
1066 return 0;
1067 } else if (rc > 0) {
1068 dump_payload = true;
1069 goto sequence_cmd;
1070 }
1071
1072 if (!imm_data)
1073 return 0;
1074
1075 if (imm_data_len != data_len) {
1076 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1077 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1078 isert_get_data(rx_desc), imm_data_len);
1079 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1080 sg_nents, imm_data_len);
1081 } else {
1082 sg_init_table(&isert_cmd->sg, 1);
1083 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1084 cmd->se_cmd.t_data_nents = 1;
1085 sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc),
1086 imm_data_len);
1087 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1088 imm_data_len);
1089 }
1090
1091 cmd->write_data_done += imm_data_len;
1092
1093 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1094 spin_lock_bh(&cmd->istate_lock);
1095 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1096 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1097 spin_unlock_bh(&cmd->istate_lock);
1098 }
1099
1100 sequence_cmd:
1101 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1102
1103 if (!rc && !dump_payload && unsol_data)
1104 iscsit_set_unsolicited_dataout(cmd);
1105 else if (dump_payload && imm_data)
1106 target_put_sess_cmd(&cmd->se_cmd);
1107
1108 return 0;
1109 }
1110
1111 static int
isert_handle_iscsi_dataout(struct isert_conn * isert_conn,struct iser_rx_desc * rx_desc,unsigned char * buf)1112 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1113 struct iser_rx_desc *rx_desc, unsigned char *buf)
1114 {
1115 struct scatterlist *sg_start;
1116 struct iscsit_conn *conn = isert_conn->conn;
1117 struct iscsit_cmd *cmd = NULL;
1118 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1119 u32 unsol_data_len = ntoh24(hdr->dlength);
1120 int rc, sg_nents, sg_off, page_off;
1121
1122 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1123 if (rc < 0)
1124 return rc;
1125 else if (!cmd)
1126 return 0;
1127 /*
1128 * FIXME: Unexpected unsolicited_data out
1129 */
1130 if (!cmd->unsolicited_data) {
1131 isert_err("Received unexpected solicited data payload\n");
1132 dump_stack();
1133 return -1;
1134 }
1135
1136 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1137 "write_data_done: %u, data_length: %u\n",
1138 unsol_data_len, cmd->write_data_done,
1139 cmd->se_cmd.data_length);
1140
1141 sg_off = cmd->write_data_done / PAGE_SIZE;
1142 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1143 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1144 page_off = cmd->write_data_done % PAGE_SIZE;
1145 /*
1146 * FIXME: Non page-aligned unsolicited_data out
1147 */
1148 if (page_off) {
1149 isert_err("unexpected non-page aligned data payload\n");
1150 dump_stack();
1151 return -1;
1152 }
1153 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1154 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1155 sg_nents, isert_get_data(rx_desc), unsol_data_len);
1156
1157 sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc),
1158 unsol_data_len);
1159
1160 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1161 if (rc < 0)
1162 return rc;
1163
1164 /*
1165 * multiple data-outs on the same command can arrive -
1166 * so post the buffer before hand
1167 */
1168 return isert_post_recv(isert_conn, rx_desc);
1169 }
1170
1171 static int
isert_handle_nop_out(struct isert_conn * isert_conn,struct isert_cmd * isert_cmd,struct iscsit_cmd * cmd,struct iser_rx_desc * rx_desc,unsigned char * buf)1172 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1173 struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
1174 unsigned char *buf)
1175 {
1176 struct iscsit_conn *conn = isert_conn->conn;
1177 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1178 int rc;
1179
1180 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1181 if (rc < 0)
1182 return rc;
1183 /*
1184 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1185 */
1186
1187 return iscsit_process_nop_out(conn, cmd, hdr);
1188 }
1189
1190 static int
isert_handle_text_cmd(struct isert_conn * isert_conn,struct isert_cmd * isert_cmd,struct iscsit_cmd * cmd,struct iser_rx_desc * rx_desc,struct iscsi_text * hdr)1191 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1192 struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
1193 struct iscsi_text *hdr)
1194 {
1195 struct iscsit_conn *conn = isert_conn->conn;
1196 u32 payload_length = ntoh24(hdr->dlength);
1197 int rc;
1198 unsigned char *text_in = NULL;
1199
1200 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1201 if (rc < 0)
1202 return rc;
1203
1204 if (payload_length) {
1205 text_in = kzalloc(payload_length, GFP_KERNEL);
1206 if (!text_in)
1207 return -ENOMEM;
1208 }
1209 cmd->text_in_ptr = text_in;
1210
1211 memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length);
1212
1213 return iscsit_process_text_cmd(conn, cmd, hdr);
1214 }
1215
1216 static int
isert_rx_opcode(struct isert_conn * isert_conn,struct iser_rx_desc * rx_desc,uint32_t read_stag,uint64_t read_va,uint32_t write_stag,uint64_t write_va)1217 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1218 uint32_t read_stag, uint64_t read_va,
1219 uint32_t write_stag, uint64_t write_va)
1220 {
1221 struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
1222 struct iscsit_conn *conn = isert_conn->conn;
1223 struct iscsit_cmd *cmd;
1224 struct isert_cmd *isert_cmd;
1225 int ret = -EINVAL;
1226 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1227
1228 if (conn->sess->sess_ops->SessionType &&
1229 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1230 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1231 " ignoring\n", opcode);
1232 return 0;
1233 }
1234
1235 switch (opcode) {
1236 case ISCSI_OP_SCSI_CMD:
1237 cmd = isert_allocate_cmd(conn, rx_desc);
1238 if (!cmd)
1239 break;
1240
1241 isert_cmd = iscsit_priv_cmd(cmd);
1242 isert_cmd->read_stag = read_stag;
1243 isert_cmd->read_va = read_va;
1244 isert_cmd->write_stag = write_stag;
1245 isert_cmd->write_va = write_va;
1246 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag;
1247
1248 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1249 rx_desc, (unsigned char *)hdr);
1250 break;
1251 case ISCSI_OP_NOOP_OUT:
1252 cmd = isert_allocate_cmd(conn, rx_desc);
1253 if (!cmd)
1254 break;
1255
1256 isert_cmd = iscsit_priv_cmd(cmd);
1257 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1258 rx_desc, (unsigned char *)hdr);
1259 break;
1260 case ISCSI_OP_SCSI_DATA_OUT:
1261 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1262 (unsigned char *)hdr);
1263 break;
1264 case ISCSI_OP_SCSI_TMFUNC:
1265 cmd = isert_allocate_cmd(conn, rx_desc);
1266 if (!cmd)
1267 break;
1268
1269 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1270 (unsigned char *)hdr);
1271 break;
1272 case ISCSI_OP_LOGOUT:
1273 cmd = isert_allocate_cmd(conn, rx_desc);
1274 if (!cmd)
1275 break;
1276
1277 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1278 break;
1279 case ISCSI_OP_TEXT:
1280 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1281 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1282 else
1283 cmd = isert_allocate_cmd(conn, rx_desc);
1284
1285 if (!cmd)
1286 break;
1287
1288 isert_cmd = iscsit_priv_cmd(cmd);
1289 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1290 rx_desc, (struct iscsi_text *)hdr);
1291 break;
1292 default:
1293 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1294 dump_stack();
1295 break;
1296 }
1297
1298 return ret;
1299 }
1300
1301 static void
isert_print_wc(struct ib_wc * wc,const char * type)1302 isert_print_wc(struct ib_wc *wc, const char *type)
1303 {
1304 if (wc->status != IB_WC_WR_FLUSH_ERR)
1305 isert_err("%s failure: %s (%d) vend_err %x\n", type,
1306 ib_wc_status_msg(wc->status), wc->status,
1307 wc->vendor_err);
1308 else
1309 isert_dbg("%s failure: %s (%d)\n", type,
1310 ib_wc_status_msg(wc->status), wc->status);
1311 }
1312
1313 static void
isert_recv_done(struct ib_cq * cq,struct ib_wc * wc)1314 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1315 {
1316 struct isert_conn *isert_conn = wc->qp->qp_context;
1317 struct ib_device *ib_dev = isert_conn->cm_id->device;
1318 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
1319 struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
1320 struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc);
1321 uint64_t read_va = 0, write_va = 0;
1322 uint32_t read_stag = 0, write_stag = 0;
1323
1324 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1325 isert_print_wc(wc, "recv");
1326 if (wc->status != IB_WC_WR_FLUSH_ERR)
1327 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1328 return;
1329 }
1330
1331 rx_desc->in_use = true;
1332
1333 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1334 ISER_RX_SIZE, DMA_FROM_DEVICE);
1335
1336 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1337 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
1338 (int)(wc->byte_len - ISER_HEADERS_LEN));
1339
1340 switch (iser_ctrl->flags & 0xF0) {
1341 case ISCSI_CTRL:
1342 if (iser_ctrl->flags & ISER_RSV) {
1343 read_stag = be32_to_cpu(iser_ctrl->read_stag);
1344 read_va = be64_to_cpu(iser_ctrl->read_va);
1345 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1346 read_stag, (unsigned long long)read_va);
1347 }
1348 if (iser_ctrl->flags & ISER_WSV) {
1349 write_stag = be32_to_cpu(iser_ctrl->write_stag);
1350 write_va = be64_to_cpu(iser_ctrl->write_va);
1351 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1352 write_stag, (unsigned long long)write_va);
1353 }
1354
1355 isert_dbg("ISER ISCSI_CTRL PDU\n");
1356 break;
1357 case ISER_HELLO:
1358 isert_err("iSER Hello message\n");
1359 break;
1360 default:
1361 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags);
1362 break;
1363 }
1364
1365 isert_rx_opcode(isert_conn, rx_desc,
1366 read_stag, read_va, write_stag, write_va);
1367
1368 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
1369 ISER_RX_SIZE, DMA_FROM_DEVICE);
1370 }
1371
1372 static void
isert_login_recv_done(struct ib_cq * cq,struct ib_wc * wc)1373 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1374 {
1375 struct isert_conn *isert_conn = wc->qp->qp_context;
1376 struct ib_device *ib_dev = isert_conn->device->ib_device;
1377
1378 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1379 isert_print_wc(wc, "login recv");
1380 return;
1381 }
1382
1383 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr,
1384 ISER_RX_SIZE, DMA_FROM_DEVICE);
1385
1386 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
1387
1388 if (isert_conn->conn) {
1389 struct iscsi_login *login = isert_conn->conn->conn_login;
1390
1391 if (login && !login->first_request)
1392 isert_rx_login_req(isert_conn);
1393 }
1394
1395 mutex_lock(&isert_conn->mutex);
1396 complete(&isert_conn->login_req_comp);
1397 mutex_unlock(&isert_conn->mutex);
1398
1399 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr,
1400 ISER_RX_SIZE, DMA_FROM_DEVICE);
1401 }
1402
1403 static void
isert_rdma_rw_ctx_destroy(struct isert_cmd * cmd,struct isert_conn * conn)1404 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
1405 {
1406 struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
1407 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
1408
1409 if (!cmd->rw.nr_ops)
1410 return;
1411
1412 if (isert_prot_cmd(conn, se_cmd)) {
1413 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
1414 conn->cm_id->port_num, se_cmd->t_data_sg,
1415 se_cmd->t_data_nents, se_cmd->t_prot_sg,
1416 se_cmd->t_prot_nents, dir);
1417 } else {
1418 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
1419 se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
1420 }
1421
1422 cmd->rw.nr_ops = 0;
1423 }
1424
1425 static void
isert_put_cmd(struct isert_cmd * isert_cmd,bool comp_err)1426 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1427 {
1428 struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
1429 struct isert_conn *isert_conn = isert_cmd->conn;
1430 struct iscsit_conn *conn = isert_conn->conn;
1431 struct iscsi_text_rsp *hdr;
1432
1433 isert_dbg("Cmd %p\n", isert_cmd);
1434
1435 switch (cmd->iscsi_opcode) {
1436 case ISCSI_OP_SCSI_CMD:
1437 spin_lock_bh(&conn->cmd_lock);
1438 if (!list_empty(&cmd->i_conn_node))
1439 list_del_init(&cmd->i_conn_node);
1440 spin_unlock_bh(&conn->cmd_lock);
1441
1442 if (cmd->data_direction == DMA_TO_DEVICE) {
1443 iscsit_stop_dataout_timer(cmd);
1444 /*
1445 * Check for special case during comp_err where
1446 * WRITE_PENDING has been handed off from core,
1447 * but requires an extra target_put_sess_cmd()
1448 * before transport_generic_free_cmd() below.
1449 */
1450 if (comp_err &&
1451 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1452 struct se_cmd *se_cmd = &cmd->se_cmd;
1453
1454 target_put_sess_cmd(se_cmd);
1455 }
1456 }
1457
1458 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1459 transport_generic_free_cmd(&cmd->se_cmd, 0);
1460 break;
1461 case ISCSI_OP_SCSI_TMFUNC:
1462 spin_lock_bh(&conn->cmd_lock);
1463 if (!list_empty(&cmd->i_conn_node))
1464 list_del_init(&cmd->i_conn_node);
1465 spin_unlock_bh(&conn->cmd_lock);
1466
1467 transport_generic_free_cmd(&cmd->se_cmd, 0);
1468 break;
1469 case ISCSI_OP_REJECT:
1470 case ISCSI_OP_NOOP_OUT:
1471 case ISCSI_OP_TEXT:
1472 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1473 /* If the continue bit is on, keep the command alive */
1474 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1475 break;
1476
1477 spin_lock_bh(&conn->cmd_lock);
1478 if (!list_empty(&cmd->i_conn_node))
1479 list_del_init(&cmd->i_conn_node);
1480 spin_unlock_bh(&conn->cmd_lock);
1481
1482 /*
1483 * Handle special case for REJECT when iscsi_add_reject*() has
1484 * overwritten the original iscsi_opcode assignment, and the
1485 * associated cmd->se_cmd needs to be released.
1486 */
1487 if (cmd->se_cmd.se_tfo != NULL) {
1488 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1489 cmd->iscsi_opcode);
1490 transport_generic_free_cmd(&cmd->se_cmd, 0);
1491 break;
1492 }
1493 fallthrough;
1494 default:
1495 iscsit_release_cmd(cmd);
1496 break;
1497 }
1498 }
1499
1500 static void
isert_unmap_tx_desc(struct iser_tx_desc * tx_desc,struct ib_device * ib_dev)1501 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1502 {
1503 if (tx_desc->dma_addr != 0) {
1504 isert_dbg("unmap single for tx_desc->dma_addr\n");
1505 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1506 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1507 tx_desc->dma_addr = 0;
1508 }
1509 }
1510
1511 static void
isert_completion_put(struct iser_tx_desc * tx_desc,struct isert_cmd * isert_cmd,struct ib_device * ib_dev,bool comp_err)1512 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1513 struct ib_device *ib_dev, bool comp_err)
1514 {
1515 if (isert_cmd->pdu_buf_dma != 0) {
1516 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1517 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1518 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1519 isert_cmd->pdu_buf_dma = 0;
1520 }
1521
1522 isert_unmap_tx_desc(tx_desc, ib_dev);
1523 isert_put_cmd(isert_cmd, comp_err);
1524 }
1525
1526 static int
isert_check_pi_status(struct se_cmd * se_cmd,struct ib_mr * sig_mr)1527 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1528 {
1529 struct ib_mr_status mr_status;
1530 int ret;
1531
1532 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1533 if (ret) {
1534 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1535 goto fail_mr_status;
1536 }
1537
1538 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1539 u64 sec_offset_err;
1540 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1541
1542 switch (mr_status.sig_err.err_type) {
1543 case IB_SIG_BAD_GUARD:
1544 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1545 break;
1546 case IB_SIG_BAD_REFTAG:
1547 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1548 break;
1549 case IB_SIG_BAD_APPTAG:
1550 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1551 break;
1552 }
1553 sec_offset_err = mr_status.sig_err.sig_err_offset;
1554 do_div(sec_offset_err, block_size);
1555 se_cmd->sense_info = sec_offset_err + se_cmd->t_task_lba;
1556
1557 isert_err("PI error found type %d at sector 0x%llx "
1558 "expected 0x%x vs actual 0x%x\n",
1559 mr_status.sig_err.err_type,
1560 (unsigned long long)se_cmd->sense_info,
1561 mr_status.sig_err.expected,
1562 mr_status.sig_err.actual);
1563 ret = 1;
1564 }
1565
1566 fail_mr_status:
1567 return ret;
1568 }
1569
1570 static void
isert_rdma_write_done(struct ib_cq * cq,struct ib_wc * wc)1571 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1572 {
1573 struct isert_conn *isert_conn = wc->qp->qp_context;
1574 struct isert_device *device = isert_conn->device;
1575 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1576 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1577 struct se_cmd *cmd = &isert_cmd->iscsit_cmd->se_cmd;
1578 int ret = 0;
1579
1580 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1581 isert_print_wc(wc, "rdma write");
1582 if (wc->status != IB_WC_WR_FLUSH_ERR)
1583 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1584 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1585 return;
1586 }
1587
1588 isert_dbg("Cmd %p\n", isert_cmd);
1589
1590 ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr);
1591 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1592
1593 if (ret) {
1594 /*
1595 * transport_generic_request_failure() expects to have
1596 * plus two references to handle queue-full, so re-add
1597 * one here as target-core will have already dropped
1598 * it after the first isert_put_datain() callback.
1599 */
1600 kref_get(&cmd->cmd_kref);
1601 transport_generic_request_failure(cmd, cmd->pi_err);
1602 } else {
1603 /*
1604 * XXX: isert_put_response() failure is not retried.
1605 */
1606 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsit_cmd);
1607 if (ret)
1608 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
1609 }
1610 }
1611
1612 static void
isert_rdma_read_done(struct ib_cq * cq,struct ib_wc * wc)1613 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1614 {
1615 struct isert_conn *isert_conn = wc->qp->qp_context;
1616 struct isert_device *device = isert_conn->device;
1617 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1618 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1619 struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
1620 struct se_cmd *se_cmd = &cmd->se_cmd;
1621 int ret = 0;
1622
1623 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1624 isert_print_wc(wc, "rdma read");
1625 if (wc->status != IB_WC_WR_FLUSH_ERR)
1626 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1627 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1628 return;
1629 }
1630
1631 isert_dbg("Cmd %p\n", isert_cmd);
1632
1633 iscsit_stop_dataout_timer(cmd);
1634
1635 if (isert_prot_cmd(isert_conn, se_cmd))
1636 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr);
1637 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1638 cmd->write_data_done = 0;
1639
1640 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1641 spin_lock_bh(&cmd->istate_lock);
1642 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1643 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1644 spin_unlock_bh(&cmd->istate_lock);
1645
1646 /*
1647 * transport_generic_request_failure() will drop the extra
1648 * se_cmd->cmd_kref reference after T10-PI error, and handle
1649 * any non-zero ->queue_status() callback error retries.
1650 */
1651 if (ret)
1652 transport_generic_request_failure(se_cmd, se_cmd->pi_err);
1653 else
1654 target_execute_cmd(se_cmd);
1655 }
1656
1657 static void
isert_do_control_comp(struct work_struct * work)1658 isert_do_control_comp(struct work_struct *work)
1659 {
1660 struct isert_cmd *isert_cmd = container_of(work,
1661 struct isert_cmd, comp_work);
1662 struct isert_conn *isert_conn = isert_cmd->conn;
1663 struct ib_device *ib_dev = isert_conn->cm_id->device;
1664 struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
1665
1666 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1667
1668 switch (cmd->i_state) {
1669 case ISTATE_SEND_TASKMGTRSP:
1670 iscsit_tmr_post_handler(cmd, cmd->conn);
1671 fallthrough;
1672 case ISTATE_SEND_REJECT:
1673 case ISTATE_SEND_TEXTRSP:
1674 cmd->i_state = ISTATE_SENT_STATUS;
1675 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1676 ib_dev, false);
1677 break;
1678 case ISTATE_SEND_LOGOUTRSP:
1679 iscsit_logout_post_handler(cmd, cmd->conn);
1680 break;
1681 default:
1682 isert_err("Unknown i_state %d\n", cmd->i_state);
1683 dump_stack();
1684 break;
1685 }
1686 }
1687
1688 static void
isert_login_send_done(struct ib_cq * cq,struct ib_wc * wc)1689 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
1690 {
1691 struct isert_conn *isert_conn = wc->qp->qp_context;
1692 struct ib_device *ib_dev = isert_conn->cm_id->device;
1693 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1694
1695 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1696 isert_print_wc(wc, "login send");
1697 if (wc->status != IB_WC_WR_FLUSH_ERR)
1698 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1699 }
1700
1701 isert_unmap_tx_desc(tx_desc, ib_dev);
1702 }
1703
1704 static void
isert_send_done(struct ib_cq * cq,struct ib_wc * wc)1705 isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
1706 {
1707 struct isert_conn *isert_conn = wc->qp->qp_context;
1708 struct ib_device *ib_dev = isert_conn->cm_id->device;
1709 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1710 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
1711
1712 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1713 isert_print_wc(wc, "send");
1714 if (wc->status != IB_WC_WR_FLUSH_ERR)
1715 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1716 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1717 return;
1718 }
1719
1720 isert_dbg("Cmd %p\n", isert_cmd);
1721
1722 switch (isert_cmd->iscsit_cmd->i_state) {
1723 case ISTATE_SEND_TASKMGTRSP:
1724 case ISTATE_SEND_LOGOUTRSP:
1725 case ISTATE_SEND_REJECT:
1726 case ISTATE_SEND_TEXTRSP:
1727 isert_unmap_tx_desc(tx_desc, ib_dev);
1728
1729 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1730 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1731 return;
1732 default:
1733 isert_cmd->iscsit_cmd->i_state = ISTATE_SENT_STATUS;
1734 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1735 break;
1736 }
1737 }
1738
1739 static int
isert_post_response(struct isert_conn * isert_conn,struct isert_cmd * isert_cmd)1740 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1741 {
1742 int ret;
1743
1744 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
1745 if (ret)
1746 return ret;
1747
1748 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL);
1749 if (ret) {
1750 isert_err("ib_post_send failed with %d\n", ret);
1751 return ret;
1752 }
1753 return ret;
1754 }
1755
1756 static int
isert_put_response(struct iscsit_conn * conn,struct iscsit_cmd * cmd)1757 isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
1758 {
1759 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1760 struct isert_conn *isert_conn = conn->context;
1761 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1762 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1763 &isert_cmd->tx_desc.iscsi_header;
1764
1765 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1766 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1767 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1768 /*
1769 * Attach SENSE DATA payload to iSCSI Response PDU
1770 */
1771 if (cmd->se_cmd.sense_buffer &&
1772 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1773 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1774 struct isert_device *device = isert_conn->device;
1775 struct ib_device *ib_dev = device->ib_device;
1776 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1777 u32 padding, pdu_len;
1778
1779 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1780 cmd->sense_buffer);
1781 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1782
1783 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1784 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1785 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1786
1787 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1788 (void *)cmd->sense_buffer, pdu_len,
1789 DMA_TO_DEVICE);
1790 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
1791 return -ENOMEM;
1792
1793 isert_cmd->pdu_buf_len = pdu_len;
1794 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1795 tx_dsg->length = pdu_len;
1796 tx_dsg->lkey = device->pd->local_dma_lkey;
1797 isert_cmd->tx_desc.num_sge = 2;
1798 }
1799
1800 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1801
1802 isert_dbg("Posting SCSI Response\n");
1803
1804 return isert_post_response(isert_conn, isert_cmd);
1805 }
1806
1807 static void
isert_aborted_task(struct iscsit_conn * conn,struct iscsit_cmd * cmd)1808 isert_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
1809 {
1810 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1811 struct isert_conn *isert_conn = conn->context;
1812
1813 spin_lock_bh(&conn->cmd_lock);
1814 if (!list_empty(&cmd->i_conn_node))
1815 list_del_init(&cmd->i_conn_node);
1816 spin_unlock_bh(&conn->cmd_lock);
1817
1818 if (cmd->data_direction == DMA_TO_DEVICE)
1819 iscsit_stop_dataout_timer(cmd);
1820 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1821 }
1822
1823 static enum target_prot_op
isert_get_sup_prot_ops(struct iscsit_conn * conn)1824 isert_get_sup_prot_ops(struct iscsit_conn *conn)
1825 {
1826 struct isert_conn *isert_conn = conn->context;
1827 struct isert_device *device = isert_conn->device;
1828
1829 if (conn->tpg->tpg_attrib.t10_pi) {
1830 if (device->pi_capable) {
1831 isert_info("conn %p PI offload enabled\n", isert_conn);
1832 isert_conn->pi_support = true;
1833 return TARGET_PROT_ALL;
1834 }
1835 }
1836
1837 isert_info("conn %p PI offload disabled\n", isert_conn);
1838 isert_conn->pi_support = false;
1839
1840 return TARGET_PROT_NORMAL;
1841 }
1842
1843 static int
isert_put_nopin(struct iscsit_cmd * cmd,struct iscsit_conn * conn,bool nopout_response)1844 isert_put_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
1845 bool nopout_response)
1846 {
1847 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1848 struct isert_conn *isert_conn = conn->context;
1849 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1850
1851 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1852 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1853 &isert_cmd->tx_desc.iscsi_header,
1854 nopout_response);
1855 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1856 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1857
1858 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
1859
1860 return isert_post_response(isert_conn, isert_cmd);
1861 }
1862
1863 static int
isert_put_logout_rsp(struct iscsit_cmd * cmd,struct iscsit_conn * conn)1864 isert_put_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
1865 {
1866 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1867 struct isert_conn *isert_conn = conn->context;
1868 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1869
1870 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1871 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1872 &isert_cmd->tx_desc.iscsi_header);
1873 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1874 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1875
1876 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
1877
1878 return isert_post_response(isert_conn, isert_cmd);
1879 }
1880
1881 static int
isert_put_tm_rsp(struct iscsit_cmd * cmd,struct iscsit_conn * conn)1882 isert_put_tm_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
1883 {
1884 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1885 struct isert_conn *isert_conn = conn->context;
1886 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1887
1888 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1889 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1890 &isert_cmd->tx_desc.iscsi_header);
1891 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1892 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1893
1894 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
1895
1896 return isert_post_response(isert_conn, isert_cmd);
1897 }
1898
1899 static int
isert_put_reject(struct iscsit_cmd * cmd,struct iscsit_conn * conn)1900 isert_put_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
1901 {
1902 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1903 struct isert_conn *isert_conn = conn->context;
1904 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1905 struct isert_device *device = isert_conn->device;
1906 struct ib_device *ib_dev = device->ib_device;
1907 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1908 struct iscsi_reject *hdr =
1909 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1910
1911 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1912 iscsit_build_reject(cmd, conn, hdr);
1913 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1914
1915 hton24(hdr->dlength, ISCSI_HDR_LEN);
1916 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1917 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1918 DMA_TO_DEVICE);
1919 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
1920 return -ENOMEM;
1921 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1922 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1923 tx_dsg->length = ISCSI_HDR_LEN;
1924 tx_dsg->lkey = device->pd->local_dma_lkey;
1925 isert_cmd->tx_desc.num_sge = 2;
1926
1927 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1928
1929 isert_dbg("conn %p Posting Reject\n", isert_conn);
1930
1931 return isert_post_response(isert_conn, isert_cmd);
1932 }
1933
1934 static int
isert_put_text_rsp(struct iscsit_cmd * cmd,struct iscsit_conn * conn)1935 isert_put_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
1936 {
1937 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1938 struct isert_conn *isert_conn = conn->context;
1939 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1940 struct iscsi_text_rsp *hdr =
1941 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1942 u32 txt_rsp_len;
1943 int rc;
1944
1945 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1946 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
1947 if (rc < 0)
1948 return rc;
1949
1950 txt_rsp_len = rc;
1951 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1952
1953 if (txt_rsp_len) {
1954 struct isert_device *device = isert_conn->device;
1955 struct ib_device *ib_dev = device->ib_device;
1956 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1957 void *txt_rsp_buf = cmd->buf_ptr;
1958
1959 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1960 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
1961 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
1962 return -ENOMEM;
1963
1964 isert_cmd->pdu_buf_len = txt_rsp_len;
1965 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1966 tx_dsg->length = txt_rsp_len;
1967 tx_dsg->lkey = device->pd->local_dma_lkey;
1968 isert_cmd->tx_desc.num_sge = 2;
1969 }
1970 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1971
1972 isert_dbg("conn %p Text Response\n", isert_conn);
1973
1974 return isert_post_response(isert_conn, isert_cmd);
1975 }
1976
1977 static inline void
isert_set_dif_domain(struct se_cmd * se_cmd,struct ib_sig_domain * domain)1978 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain)
1979 {
1980 domain->sig_type = IB_SIG_TYPE_T10_DIF;
1981 domain->sig.dif.bg_type = IB_T10DIF_CRC;
1982 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
1983 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
1984 /*
1985 * At the moment we hard code those, but if in the future
1986 * the target core would like to use it, we will take it
1987 * from se_cmd.
1988 */
1989 domain->sig.dif.apptag_check_mask = 0xffff;
1990 domain->sig.dif.app_escape = true;
1991 domain->sig.dif.ref_escape = true;
1992 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
1993 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
1994 domain->sig.dif.ref_remap = true;
1995 }
1996
1997 static int
isert_set_sig_attrs(struct se_cmd * se_cmd,struct ib_sig_attrs * sig_attrs)1998 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
1999 {
2000 memset(sig_attrs, 0, sizeof(*sig_attrs));
2001
2002 switch (se_cmd->prot_op) {
2003 case TARGET_PROT_DIN_INSERT:
2004 case TARGET_PROT_DOUT_STRIP:
2005 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2006 isert_set_dif_domain(se_cmd, &sig_attrs->wire);
2007 break;
2008 case TARGET_PROT_DOUT_INSERT:
2009 case TARGET_PROT_DIN_STRIP:
2010 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2011 isert_set_dif_domain(se_cmd, &sig_attrs->mem);
2012 break;
2013 case TARGET_PROT_DIN_PASS:
2014 case TARGET_PROT_DOUT_PASS:
2015 isert_set_dif_domain(se_cmd, &sig_attrs->wire);
2016 isert_set_dif_domain(se_cmd, &sig_attrs->mem);
2017 break;
2018 default:
2019 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2020 return -EINVAL;
2021 }
2022
2023 if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)
2024 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
2025 if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)
2026 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
2027 if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)
2028 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
2029
2030 return 0;
2031 }
2032
2033 static int
isert_rdma_rw_ctx_post(struct isert_cmd * cmd,struct isert_conn * conn,struct ib_cqe * cqe,struct ib_send_wr * chain_wr)2034 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
2035 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
2036 {
2037 struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
2038 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
2039 u8 port_num = conn->cm_id->port_num;
2040 u64 addr;
2041 u32 rkey, offset;
2042 int ret;
2043
2044 if (cmd->ctx_init_done)
2045 goto rdma_ctx_post;
2046
2047 if (dir == DMA_FROM_DEVICE) {
2048 addr = cmd->write_va;
2049 rkey = cmd->write_stag;
2050 offset = cmd->iscsit_cmd->write_data_done;
2051 } else {
2052 addr = cmd->read_va;
2053 rkey = cmd->read_stag;
2054 offset = 0;
2055 }
2056
2057 if (isert_prot_cmd(conn, se_cmd)) {
2058 struct ib_sig_attrs sig_attrs;
2059
2060 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2061 if (ret)
2062 return ret;
2063
2064 WARN_ON_ONCE(offset);
2065 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
2066 se_cmd->t_data_sg, se_cmd->t_data_nents,
2067 se_cmd->t_prot_sg, se_cmd->t_prot_nents,
2068 &sig_attrs, addr, rkey, dir);
2069 } else {
2070 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
2071 se_cmd->t_data_sg, se_cmd->t_data_nents,
2072 offset, addr, rkey, dir);
2073 }
2074
2075 if (ret < 0) {
2076 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
2077 return ret;
2078 }
2079
2080 cmd->ctx_init_done = true;
2081
2082 rdma_ctx_post:
2083 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
2084 if (ret < 0)
2085 isert_err("Cmd: %p failed to post RDMA res\n", cmd);
2086 return ret;
2087 }
2088
2089 static int
isert_put_datain(struct iscsit_conn * conn,struct iscsit_cmd * cmd)2090 isert_put_datain(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
2091 {
2092 struct se_cmd *se_cmd = &cmd->se_cmd;
2093 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2094 struct isert_conn *isert_conn = conn->context;
2095 struct ib_cqe *cqe = NULL;
2096 struct ib_send_wr *chain_wr = NULL;
2097 int rc;
2098
2099 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2100 isert_cmd, se_cmd->data_length);
2101
2102 if (isert_prot_cmd(isert_conn, se_cmd)) {
2103 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2104 cqe = &isert_cmd->tx_desc.tx_cqe;
2105 } else {
2106 /*
2107 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2108 */
2109 isert_create_send_desc(isert_conn, isert_cmd,
2110 &isert_cmd->tx_desc);
2111 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2112 &isert_cmd->tx_desc.iscsi_header);
2113 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2114 isert_init_send_wr(isert_conn, isert_cmd,
2115 &isert_cmd->tx_desc.send_wr);
2116
2117 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2118 if (rc)
2119 return rc;
2120
2121 chain_wr = &isert_cmd->tx_desc.send_wr;
2122 }
2123
2124 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2125 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2126 isert_cmd, rc);
2127 return rc;
2128 }
2129
2130 static int
isert_get_dataout(struct iscsit_conn * conn,struct iscsit_cmd * cmd,bool recovery)2131 isert_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd, bool recovery)
2132 {
2133 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2134 int ret;
2135
2136 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2137 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2138
2139 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2140 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2141 &isert_cmd->tx_desc.tx_cqe, NULL);
2142
2143 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2144 isert_cmd, ret);
2145 return ret;
2146 }
2147
2148 static int
isert_immediate_queue(struct iscsit_conn * conn,struct iscsit_cmd * cmd,int state)2149 isert_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
2150 {
2151 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2152 int ret = 0;
2153
2154 switch (state) {
2155 case ISTATE_REMOVE:
2156 spin_lock_bh(&conn->cmd_lock);
2157 list_del_init(&cmd->i_conn_node);
2158 spin_unlock_bh(&conn->cmd_lock);
2159 isert_put_cmd(isert_cmd, true);
2160 break;
2161 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2162 ret = isert_put_nopin(cmd, conn, false);
2163 break;
2164 default:
2165 isert_err("Unknown immediate state: 0x%02x\n", state);
2166 ret = -EINVAL;
2167 break;
2168 }
2169
2170 return ret;
2171 }
2172
2173 static int
isert_response_queue(struct iscsit_conn * conn,struct iscsit_cmd * cmd,int state)2174 isert_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
2175 {
2176 struct isert_conn *isert_conn = conn->context;
2177 int ret;
2178
2179 switch (state) {
2180 case ISTATE_SEND_LOGOUTRSP:
2181 ret = isert_put_logout_rsp(cmd, conn);
2182 if (!ret)
2183 isert_conn->logout_posted = true;
2184 break;
2185 case ISTATE_SEND_NOPIN:
2186 ret = isert_put_nopin(cmd, conn, true);
2187 break;
2188 case ISTATE_SEND_TASKMGTRSP:
2189 ret = isert_put_tm_rsp(cmd, conn);
2190 break;
2191 case ISTATE_SEND_REJECT:
2192 ret = isert_put_reject(cmd, conn);
2193 break;
2194 case ISTATE_SEND_TEXTRSP:
2195 ret = isert_put_text_rsp(cmd, conn);
2196 break;
2197 case ISTATE_SEND_STATUS:
2198 /*
2199 * Special case for sending non GOOD SCSI status from TX thread
2200 * context during pre se_cmd excecution failure.
2201 */
2202 ret = isert_put_response(conn, cmd);
2203 break;
2204 default:
2205 isert_err("Unknown response state: 0x%02x\n", state);
2206 ret = -EINVAL;
2207 break;
2208 }
2209
2210 return ret;
2211 }
2212
2213 struct rdma_cm_id *
isert_setup_id(struct isert_np * isert_np)2214 isert_setup_id(struct isert_np *isert_np)
2215 {
2216 struct iscsi_np *np = isert_np->np;
2217 struct rdma_cm_id *id;
2218 struct sockaddr *sa;
2219 int ret;
2220
2221 sa = (struct sockaddr *)&np->np_sockaddr;
2222 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2223
2224 id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
2225 RDMA_PS_TCP, IB_QPT_RC);
2226 if (IS_ERR(id)) {
2227 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2228 ret = PTR_ERR(id);
2229 goto out;
2230 }
2231 isert_dbg("id %p context %p\n", id, id->context);
2232
2233 /*
2234 * Allow both IPv4 and IPv6 sockets to bind a single port
2235 * at the same time.
2236 */
2237 ret = rdma_set_afonly(id, 1);
2238 if (ret) {
2239 isert_err("rdma_set_afonly() failed: %d\n", ret);
2240 goto out_id;
2241 }
2242
2243 ret = rdma_bind_addr(id, sa);
2244 if (ret) {
2245 isert_err("rdma_bind_addr() failed: %d\n", ret);
2246 goto out_id;
2247 }
2248
2249 ret = rdma_listen(id, 0);
2250 if (ret) {
2251 isert_err("rdma_listen() failed: %d\n", ret);
2252 goto out_id;
2253 }
2254
2255 return id;
2256 out_id:
2257 rdma_destroy_id(id);
2258 out:
2259 return ERR_PTR(ret);
2260 }
2261
2262 static int
isert_setup_np(struct iscsi_np * np,struct sockaddr_storage * ksockaddr)2263 isert_setup_np(struct iscsi_np *np,
2264 struct sockaddr_storage *ksockaddr)
2265 {
2266 struct isert_np *isert_np;
2267 struct rdma_cm_id *isert_lid;
2268 int ret;
2269
2270 isert_np = kzalloc_obj(struct isert_np);
2271 if (!isert_np)
2272 return -ENOMEM;
2273
2274 sema_init(&isert_np->sem, 0);
2275 mutex_init(&isert_np->mutex);
2276 INIT_LIST_HEAD(&isert_np->accepted);
2277 INIT_LIST_HEAD(&isert_np->pending);
2278 isert_np->np = np;
2279
2280 /*
2281 * Setup the np->np_sockaddr from the passed sockaddr setup
2282 * in iscsi_target_configfs.c code..
2283 */
2284 memcpy(&np->np_sockaddr, ksockaddr,
2285 sizeof(struct sockaddr_storage));
2286
2287 isert_lid = isert_setup_id(isert_np);
2288 if (IS_ERR(isert_lid)) {
2289 ret = PTR_ERR(isert_lid);
2290 goto out;
2291 }
2292
2293 isert_np->cm_id = isert_lid;
2294 np->np_context = isert_np;
2295
2296 return 0;
2297
2298 out:
2299 kfree(isert_np);
2300
2301 return ret;
2302 }
2303
2304 static int
isert_rdma_accept(struct isert_conn * isert_conn)2305 isert_rdma_accept(struct isert_conn *isert_conn)
2306 {
2307 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2308 struct rdma_conn_param cp;
2309 int ret;
2310 struct iser_cm_hdr rsp_hdr;
2311
2312 memset(&cp, 0, sizeof(struct rdma_conn_param));
2313 cp.initiator_depth = isert_conn->initiator_depth;
2314 cp.retry_count = 7;
2315 cp.rnr_retry_count = 7;
2316
2317 memset(&rsp_hdr, 0, sizeof(rsp_hdr));
2318 rsp_hdr.flags = ISERT_ZBVA_NOT_USED;
2319 if (!isert_conn->snd_w_inv)
2320 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED;
2321 cp.private_data = (void *)&rsp_hdr;
2322 cp.private_data_len = sizeof(rsp_hdr);
2323
2324 ret = rdma_accept(cm_id, &cp);
2325 if (ret) {
2326 isert_err("rdma_accept() failed with: %d\n", ret);
2327 return ret;
2328 }
2329
2330 return 0;
2331 }
2332
2333 static int
isert_get_login_rx(struct iscsit_conn * conn,struct iscsi_login * login)2334 isert_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
2335 {
2336 struct isert_conn *isert_conn = conn->context;
2337 int ret;
2338
2339 isert_info("before login_req comp conn: %p\n", isert_conn);
2340 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
2341 if (ret) {
2342 isert_err("isert_conn %p interrupted before got login req\n",
2343 isert_conn);
2344 return ret;
2345 }
2346 reinit_completion(&isert_conn->login_req_comp);
2347
2348 /*
2349 * For login requests after the first PDU, isert_rx_login_req() will
2350 * kick queue_delayed_work(isert_login_wq, &conn->login_work) as
2351 * the packet is received, which turns this callback from
2352 * iscsi_target_do_login_rx() into a NOP.
2353 */
2354 if (!login->first_request)
2355 return 0;
2356
2357 isert_rx_login_req(isert_conn);
2358
2359 isert_info("before login_comp conn: %p\n", conn);
2360 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
2361 if (ret)
2362 return ret;
2363
2364 isert_info("processing login->req: %p\n", login->req);
2365
2366 return 0;
2367 }
2368
2369 static void
isert_set_conn_info(struct iscsi_np * np,struct iscsit_conn * conn,struct isert_conn * isert_conn)2370 isert_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
2371 struct isert_conn *isert_conn)
2372 {
2373 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2374 struct rdma_route *cm_route = &cm_id->route;
2375
2376 conn->login_family = np->np_sockaddr.ss_family;
2377
2378 conn->login_sockaddr = cm_route->addr.dst_addr;
2379 conn->local_sockaddr = cm_route->addr.src_addr;
2380 }
2381
2382 static int
isert_accept_np(struct iscsi_np * np,struct iscsit_conn * conn)2383 isert_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
2384 {
2385 struct isert_np *isert_np = np->np_context;
2386 struct isert_conn *isert_conn;
2387 int ret;
2388
2389 accept_wait:
2390 ret = down_interruptible(&isert_np->sem);
2391 if (ret)
2392 return -ENODEV;
2393
2394 spin_lock_bh(&np->np_thread_lock);
2395 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
2396 spin_unlock_bh(&np->np_thread_lock);
2397 isert_dbg("np_thread_state %d\n",
2398 np->np_thread_state);
2399 /*
2400 * No point in stalling here when np_thread
2401 * is in state RESET/SHUTDOWN/EXIT - bail
2402 */
2403 return -ENODEV;
2404 }
2405 spin_unlock_bh(&np->np_thread_lock);
2406
2407 mutex_lock(&isert_np->mutex);
2408 if (list_empty(&isert_np->pending)) {
2409 mutex_unlock(&isert_np->mutex);
2410 goto accept_wait;
2411 }
2412 isert_conn = list_first_entry(&isert_np->pending,
2413 struct isert_conn, node);
2414 list_del_init(&isert_conn->node);
2415 mutex_unlock(&isert_np->mutex);
2416
2417 conn->context = isert_conn;
2418 isert_conn->conn = conn;
2419 isert_conn->state = ISER_CONN_BOUND;
2420
2421 isert_set_conn_info(np, conn, isert_conn);
2422
2423 isert_dbg("Processing isert_conn: %p\n", isert_conn);
2424
2425 return 0;
2426 }
2427
2428 static void
isert_free_np(struct iscsi_np * np)2429 isert_free_np(struct iscsi_np *np)
2430 {
2431 struct isert_np *isert_np = np->np_context;
2432 struct isert_conn *isert_conn, *n;
2433 LIST_HEAD(drop_conn_list);
2434
2435 if (isert_np->cm_id)
2436 rdma_destroy_id(isert_np->cm_id);
2437
2438 /*
2439 * FIXME: At this point we don't have a good way to insure
2440 * that at this point we don't have hanging connections that
2441 * completed RDMA establishment but didn't start iscsi login
2442 * process. So work-around this by cleaning up what ever piled
2443 * up in accepted and pending lists.
2444 */
2445 mutex_lock(&isert_np->mutex);
2446 if (!list_empty(&isert_np->pending)) {
2447 isert_info("Still have isert pending connections\n");
2448 list_for_each_entry_safe(isert_conn, n,
2449 &isert_np->pending,
2450 node) {
2451 isert_info("cleaning isert_conn %p state (%d)\n",
2452 isert_conn, isert_conn->state);
2453 list_move_tail(&isert_conn->node, &drop_conn_list);
2454 }
2455 }
2456
2457 if (!list_empty(&isert_np->accepted)) {
2458 isert_info("Still have isert accepted connections\n");
2459 list_for_each_entry_safe(isert_conn, n,
2460 &isert_np->accepted,
2461 node) {
2462 isert_info("cleaning isert_conn %p state (%d)\n",
2463 isert_conn, isert_conn->state);
2464 list_move_tail(&isert_conn->node, &drop_conn_list);
2465 }
2466 }
2467 mutex_unlock(&isert_np->mutex);
2468
2469 list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
2470 list_del_init(&isert_conn->node);
2471 isert_connect_release(isert_conn);
2472 }
2473
2474 np->np_context = NULL;
2475 kfree(isert_np);
2476 }
2477
isert_release_work(struct work_struct * work)2478 static void isert_release_work(struct work_struct *work)
2479 {
2480 struct isert_conn *isert_conn = container_of(work,
2481 struct isert_conn,
2482 release_work);
2483
2484 isert_info("Starting release conn %p\n", isert_conn);
2485
2486 mutex_lock(&isert_conn->mutex);
2487 isert_conn->state = ISER_CONN_DOWN;
2488 mutex_unlock(&isert_conn->mutex);
2489
2490 isert_info("Destroying conn %p\n", isert_conn);
2491 isert_put_conn(isert_conn);
2492 }
2493
2494 static void
isert_wait4logout(struct isert_conn * isert_conn)2495 isert_wait4logout(struct isert_conn *isert_conn)
2496 {
2497 struct iscsit_conn *conn = isert_conn->conn;
2498
2499 isert_info("conn %p\n", isert_conn);
2500
2501 if (isert_conn->logout_posted) {
2502 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
2503 wait_for_completion_timeout(&conn->conn_logout_comp,
2504 SECONDS_FOR_LOGOUT_COMP * HZ);
2505 }
2506 }
2507
2508 static void
isert_wait4cmds(struct iscsit_conn * conn)2509 isert_wait4cmds(struct iscsit_conn *conn)
2510 {
2511 isert_info("iscsit_conn %p\n", conn);
2512
2513 if (conn->sess) {
2514 target_stop_cmd_counter(conn->cmd_cnt);
2515 target_wait_for_cmds(conn->cmd_cnt);
2516 }
2517 }
2518
2519 /**
2520 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2521 * unsolicitate dataout
2522 * @conn: iscsi connection
2523 *
2524 * We might still have commands that are waiting for unsolicited
2525 * dataouts messages. We must put the extra reference on those
2526 * before blocking on the target_wait_for_session_cmds
2527 */
2528 static void
isert_put_unsol_pending_cmds(struct iscsit_conn * conn)2529 isert_put_unsol_pending_cmds(struct iscsit_conn *conn)
2530 {
2531 struct iscsit_cmd *cmd, *tmp;
2532 static LIST_HEAD(drop_cmd_list);
2533
2534 spin_lock_bh(&conn->cmd_lock);
2535 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
2536 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
2537 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
2538 (cmd->write_data_done < cmd->se_cmd.data_length))
2539 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
2540 }
2541 spin_unlock_bh(&conn->cmd_lock);
2542
2543 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
2544 list_del_init(&cmd->i_conn_node);
2545 if (cmd->i_state != ISTATE_REMOVE) {
2546 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2547
2548 isert_info("conn %p dropping cmd %p\n", conn, cmd);
2549 isert_put_cmd(isert_cmd, true);
2550 }
2551 }
2552 }
2553
isert_wait_conn(struct iscsit_conn * conn)2554 static void isert_wait_conn(struct iscsit_conn *conn)
2555 {
2556 struct isert_conn *isert_conn = conn->context;
2557
2558 isert_info("Starting conn %p\n", isert_conn);
2559
2560 mutex_lock(&isert_conn->mutex);
2561 isert_conn_terminate(isert_conn);
2562 mutex_unlock(&isert_conn->mutex);
2563
2564 ib_drain_qp(isert_conn->qp);
2565 isert_put_unsol_pending_cmds(conn);
2566 isert_wait4cmds(conn);
2567 isert_wait4logout(isert_conn);
2568
2569 queue_work(isert_release_wq, &isert_conn->release_work);
2570 }
2571
isert_free_conn(struct iscsit_conn * conn)2572 static void isert_free_conn(struct iscsit_conn *conn)
2573 {
2574 struct isert_conn *isert_conn = conn->context;
2575
2576 ib_drain_qp(isert_conn->qp);
2577 isert_put_conn(isert_conn);
2578 }
2579
isert_get_rx_pdu(struct iscsit_conn * conn)2580 static void isert_get_rx_pdu(struct iscsit_conn *conn)
2581 {
2582 struct completion comp;
2583
2584 init_completion(&comp);
2585
2586 wait_for_completion_interruptible(&comp);
2587 }
2588
2589 static struct iscsit_transport iser_target_transport = {
2590 .name = "IB/iSER",
2591 .transport_type = ISCSI_INFINIBAND,
2592 .rdma_shutdown = true,
2593 .priv_size = sizeof(struct isert_cmd),
2594 .owner = THIS_MODULE,
2595 .iscsit_setup_np = isert_setup_np,
2596 .iscsit_accept_np = isert_accept_np,
2597 .iscsit_free_np = isert_free_np,
2598 .iscsit_wait_conn = isert_wait_conn,
2599 .iscsit_free_conn = isert_free_conn,
2600 .iscsit_get_login_rx = isert_get_login_rx,
2601 .iscsit_put_login_tx = isert_put_login_tx,
2602 .iscsit_immediate_queue = isert_immediate_queue,
2603 .iscsit_response_queue = isert_response_queue,
2604 .iscsit_get_dataout = isert_get_dataout,
2605 .iscsit_queue_data_in = isert_put_datain,
2606 .iscsit_queue_status = isert_put_response,
2607 .iscsit_aborted_task = isert_aborted_task,
2608 .iscsit_get_rx_pdu = isert_get_rx_pdu,
2609 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2610 };
2611
isert_init(void)2612 static int __init isert_init(void)
2613 {
2614 isert_login_wq = alloc_workqueue("isert_login_wq", WQ_PERCPU, 0);
2615 if (!isert_login_wq) {
2616 isert_err("Unable to allocate isert_login_wq\n");
2617 return -ENOMEM;
2618 }
2619
2620 isert_comp_wq = alloc_workqueue("isert_comp_wq",
2621 WQ_UNBOUND | WQ_HIGHPRI, 0);
2622 if (!isert_comp_wq) {
2623 isert_err("Unable to allocate isert_comp_wq\n");
2624 goto destroy_login_wq;
2625 }
2626
2627 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
2628 WQ_UNBOUND_MAX_ACTIVE);
2629 if (!isert_release_wq) {
2630 isert_err("Unable to allocate isert_release_wq\n");
2631 goto destroy_comp_wq;
2632 }
2633
2634 iscsit_register_transport(&iser_target_transport);
2635 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2636
2637 return 0;
2638
2639 destroy_comp_wq:
2640 destroy_workqueue(isert_comp_wq);
2641 destroy_login_wq:
2642 destroy_workqueue(isert_login_wq);
2643
2644 return -ENOMEM;
2645 }
2646
isert_exit(void)2647 static void __exit isert_exit(void)
2648 {
2649 flush_workqueue(isert_login_wq);
2650 destroy_workqueue(isert_release_wq);
2651 destroy_workqueue(isert_comp_wq);
2652 iscsit_unregister_transport(&iser_target_transport);
2653 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
2654 destroy_workqueue(isert_login_wq);
2655 }
2656
2657 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2658 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2659 MODULE_LICENSE("GPL");
2660
2661 module_init(isert_init);
2662 module_exit(isert_exit);
2663