1 /* 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 #include <linux/kernel.h> 34 #include <linux/slab.h> 35 #include <linux/mm.h> 36 #include <linux/scatterlist.h> 37 #include <linux/kfifo.h> 38 #include <scsi/scsi_cmnd.h> 39 #include <scsi/scsi_host.h> 40 41 #include "iscsi_iser.h" 42 43 /* Register user buffer memory and initialize passive rdma 44 * dto descriptor. Data size is stored in 45 * task->data[ISER_DIR_IN].data_len, Protection size 46 * os stored in task->prot[ISER_DIR_IN].data_len 47 */ 48 static int iser_prepare_read_cmd(struct iscsi_task *task) 49 50 { 51 struct iscsi_iser_task *iser_task = task->dd_data; 52 struct iser_mem_reg *mem_reg; 53 int err; 54 struct iser_ctrl *hdr = &iser_task->desc.iser_header; 55 56 err = iser_dma_map_task_data(iser_task, 57 ISER_DIR_IN, 58 DMA_FROM_DEVICE); 59 if (err) 60 return err; 61 62 err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false); 63 if (err) { 64 iser_err("Failed to set up Data-IN RDMA\n"); 65 goto out_err; 66 } 67 mem_reg = &iser_task->rdma_reg[ISER_DIR_IN]; 68 69 hdr->flags |= ISER_RSV; 70 hdr->read_stag = cpu_to_be32(mem_reg->rkey); 71 hdr->read_va = cpu_to_be64(mem_reg->sge.addr); 72 73 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", 74 task->itt, mem_reg->rkey, 75 (unsigned long long)mem_reg->sge.addr); 76 77 return 0; 78 79 out_err: 80 iser_dma_unmap_task_data(iser_task, ISER_DIR_IN, DMA_FROM_DEVICE); 81 return err; 82 } 83 84 /* Register user buffer memory and initialize passive rdma 85 * dto descriptor. Data size is stored in 86 * task->data[ISER_DIR_OUT].data_len, Protection size 87 * is stored at task->prot[ISER_DIR_OUT].data_len 88 */ 89 static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz, 90 unsigned int unsol_sz, unsigned int edtl) 91 { 92 struct iscsi_iser_task *iser_task = task->dd_data; 93 struct iser_mem_reg *mem_reg; 94 int err; 95 struct iser_ctrl *hdr = &iser_task->desc.iser_header; 96 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; 97 struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1]; 98 99 err = iser_dma_map_task_data(iser_task, 100 ISER_DIR_OUT, 101 DMA_TO_DEVICE); 102 if (err) 103 return err; 104 105 err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT, 106 buf_out->data_len == imm_sz); 107 if (err) { 108 iser_err("Failed to register write cmd RDMA mem\n"); 109 goto out_err; 110 } 111 112 mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; 113 114 if (unsol_sz < edtl) { 115 hdr->flags |= ISER_WSV; 116 if (buf_out->data_len > imm_sz) { 117 hdr->write_stag = cpu_to_be32(mem_reg->rkey); 118 hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); 119 } 120 121 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n", 122 task->itt, mem_reg->rkey, 123 (unsigned long long)mem_reg->sge.addr, unsol_sz); 124 } 125 126 if (imm_sz > 0) { 127 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", 128 task->itt, imm_sz); 129 tx_dsg->addr = mem_reg->sge.addr; 130 tx_dsg->length = imm_sz; 131 tx_dsg->lkey = mem_reg->sge.lkey; 132 iser_task->desc.num_sge = 2; 133 } 134 135 return 0; 136 137 out_err: 138 iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT, DMA_TO_DEVICE); 139 return err; 140 } 141 142 /* creates a new tx descriptor and adds header regd buffer */ 143 static void iser_create_send_desc(struct iser_conn *iser_conn, 144 struct iser_tx_desc *tx_desc, enum iser_desc_type type, 145 void (*done)(struct ib_cq *cq, struct ib_wc *wc)) 146 { 147 struct iser_device *device = iser_conn->ib_conn.device; 148 149 tx_desc->type = type; 150 tx_desc->cqe.done = done; 151 152 ib_dma_sync_single_for_cpu(device->ib_device, 153 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); 154 155 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); 156 tx_desc->iser_header.flags = ISER_VER; 157 tx_desc->num_sge = 1; 158 } 159 160 static void iser_free_login_buf(struct iser_conn *iser_conn) 161 { 162 struct iser_device *device = iser_conn->ib_conn.device; 163 struct iser_login_desc *desc = &iser_conn->login_desc; 164 165 if (!desc->req) 166 return; 167 168 ib_dma_unmap_single(device->ib_device, desc->req_dma, 169 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); 170 171 ib_dma_unmap_single(device->ib_device, desc->rsp_dma, 172 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); 173 174 kfree(desc->req); 175 kfree(desc->rsp); 176 177 /* make sure we never redo any unmapping */ 178 desc->req = NULL; 179 desc->rsp = NULL; 180 } 181 182 static int iser_alloc_login_buf(struct iser_conn *iser_conn) 183 { 184 struct iser_device *device = iser_conn->ib_conn.device; 185 struct iser_login_desc *desc = &iser_conn->login_desc; 186 187 desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); 188 if (!desc->req) 189 return -ENOMEM; 190 191 desc->req_dma = ib_dma_map_single(device->ib_device, desc->req, 192 ISCSI_DEF_MAX_RECV_SEG_LEN, 193 DMA_TO_DEVICE); 194 if (ib_dma_mapping_error(device->ib_device, 195 desc->req_dma)) 196 goto free_req; 197 198 desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); 199 if (!desc->rsp) 200 goto unmap_req; 201 202 desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp, 203 ISER_RX_LOGIN_SIZE, 204 DMA_FROM_DEVICE); 205 if (ib_dma_mapping_error(device->ib_device, 206 desc->rsp_dma)) 207 goto free_rsp; 208 209 return 0; 210 211 free_rsp: 212 kfree(desc->rsp); 213 unmap_req: 214 ib_dma_unmap_single(device->ib_device, desc->req_dma, 215 ISCSI_DEF_MAX_RECV_SEG_LEN, 216 DMA_TO_DEVICE); 217 free_req: 218 kfree(desc->req); 219 220 return -ENOMEM; 221 } 222 223 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, 224 struct iscsi_session *session) 225 { 226 int i, j; 227 u64 dma_addr; 228 struct iser_rx_desc *rx_desc; 229 struct ib_sge *rx_sg; 230 struct ib_conn *ib_conn = &iser_conn->ib_conn; 231 struct iser_device *device = ib_conn->device; 232 233 iser_conn->qp_max_recv_dtos = session->cmds_max; 234 235 if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max, 236 iser_conn->pages_per_mr)) 237 goto create_rdma_reg_res_failed; 238 239 if (iser_alloc_login_buf(iser_conn)) 240 goto alloc_login_buf_fail; 241 242 iser_conn->num_rx_descs = session->cmds_max; 243 iser_conn->rx_descs = kmalloc_objs(struct iser_rx_desc, 244 iser_conn->num_rx_descs); 245 if (!iser_conn->rx_descs) 246 goto rx_desc_alloc_fail; 247 248 rx_desc = iser_conn->rx_descs; 249 250 for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) { 251 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc, 252 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 253 if (ib_dma_mapping_error(device->ib_device, dma_addr)) 254 goto rx_desc_dma_map_failed; 255 256 rx_desc->dma_addr = dma_addr; 257 rx_desc->cqe.done = iser_task_rsp; 258 rx_sg = &rx_desc->rx_sg; 259 rx_sg->addr = rx_desc->dma_addr; 260 rx_sg->length = ISER_RX_PAYLOAD_SIZE; 261 rx_sg->lkey = device->pd->local_dma_lkey; 262 } 263 264 return 0; 265 266 rx_desc_dma_map_failed: 267 rx_desc = iser_conn->rx_descs; 268 for (j = 0; j < i; j++, rx_desc++) 269 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, 270 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 271 kfree(iser_conn->rx_descs); 272 iser_conn->rx_descs = NULL; 273 rx_desc_alloc_fail: 274 iser_free_login_buf(iser_conn); 275 alloc_login_buf_fail: 276 iser_free_fastreg_pool(ib_conn); 277 create_rdma_reg_res_failed: 278 iser_err("failed allocating rx descriptors / data buffers\n"); 279 return -ENOMEM; 280 } 281 282 void iser_free_rx_descriptors(struct iser_conn *iser_conn) 283 { 284 int i; 285 struct iser_rx_desc *rx_desc; 286 struct ib_conn *ib_conn = &iser_conn->ib_conn; 287 struct iser_device *device = ib_conn->device; 288 289 iser_free_fastreg_pool(ib_conn); 290 291 rx_desc = iser_conn->rx_descs; 292 for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) 293 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, 294 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 295 kfree(iser_conn->rx_descs); 296 /* make sure we never redo any unmapping */ 297 iser_conn->rx_descs = NULL; 298 299 iser_free_login_buf(iser_conn); 300 } 301 302 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) 303 { 304 struct iser_conn *iser_conn = conn->dd_data; 305 struct iscsi_session *session = conn->session; 306 int err = 0; 307 int i; 308 309 iser_dbg("req op %x flags %x\n", req->opcode, req->flags); 310 /* check if this is the last login - going to full feature phase */ 311 if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) 312 goto out; 313 314 if (session->discovery_sess) { 315 iser_info("Discovery session, re-using login RX buffer\n"); 316 goto out; 317 } 318 319 iser_info("Normal session, posting batch of RX %d buffers\n", 320 iser_conn->qp_max_recv_dtos - 1); 321 322 /* 323 * Initial post receive buffers. 324 * There is one already posted recv buffer (for the last login 325 * response). Therefore, the first recv buffer is skipped here. 326 */ 327 for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) { 328 err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]); 329 if (err) 330 goto out; 331 } 332 out: 333 return err; 334 } 335 336 /** 337 * iser_send_command - send command PDU 338 * @conn: link to matching iscsi connection 339 * @task: SCSI command task 340 */ 341 int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task) 342 { 343 struct iser_conn *iser_conn = conn->dd_data; 344 struct iscsi_iser_task *iser_task = task->dd_data; 345 unsigned long edtl; 346 int err; 347 struct iser_data_buf *data_buf, *prot_buf; 348 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; 349 struct scsi_cmnd *sc = task->sc; 350 struct iser_tx_desc *tx_desc = &iser_task->desc; 351 352 edtl = ntohl(hdr->data_length); 353 354 /* build the tx desc regd header and add it to the tx desc dto */ 355 iser_create_send_desc(iser_conn, tx_desc, ISCSI_TX_SCSI_COMMAND, 356 iser_cmd_comp); 357 358 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 359 data_buf = &iser_task->data[ISER_DIR_IN]; 360 prot_buf = &iser_task->prot[ISER_DIR_IN]; 361 } else { 362 data_buf = &iser_task->data[ISER_DIR_OUT]; 363 prot_buf = &iser_task->prot[ISER_DIR_OUT]; 364 } 365 366 if (scsi_sg_count(sc)) { /* using a scatter list */ 367 data_buf->sg = scsi_sglist(sc); 368 data_buf->size = scsi_sg_count(sc); 369 } 370 data_buf->data_len = scsi_bufflen(sc); 371 372 if (scsi_prot_sg_count(sc)) { 373 prot_buf->sg = scsi_prot_sglist(sc); 374 prot_buf->size = scsi_prot_sg_count(sc); 375 prot_buf->data_len = (data_buf->data_len >> 376 ilog2(sc->device->sector_size)) * 8; 377 } 378 379 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 380 err = iser_prepare_read_cmd(task); 381 if (err) 382 goto send_command_error; 383 } 384 if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { 385 err = iser_prepare_write_cmd(task, 386 task->imm_count, 387 task->imm_count + 388 task->unsol_r2t.data_length, 389 edtl); 390 if (err) 391 goto send_command_error; 392 } 393 394 iser_task->status = ISER_TASK_STATUS_STARTED; 395 396 err = iser_post_send(&iser_conn->ib_conn, tx_desc); 397 if (!err) 398 return 0; 399 400 send_command_error: 401 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); 402 return err; 403 } 404 405 /** 406 * iser_send_data_out - send data out PDU 407 * @conn: link to matching iscsi connection 408 * @task: SCSI command task 409 * @hdr: pointer to the LLD's iSCSI message header 410 */ 411 int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task, 412 struct iscsi_data *hdr) 413 { 414 struct iser_conn *iser_conn = conn->dd_data; 415 struct iscsi_iser_task *iser_task = task->dd_data; 416 struct iser_tx_desc *tx_desc; 417 struct iser_mem_reg *mem_reg; 418 unsigned long buf_offset; 419 unsigned long data_seg_len; 420 uint32_t itt; 421 int err; 422 struct ib_sge *tx_dsg; 423 424 itt = (__force uint32_t)hdr->itt; 425 data_seg_len = ntoh24(hdr->dlength); 426 buf_offset = ntohl(hdr->offset); 427 428 iser_dbg("%s itt %d dseg_len %d offset %d\n", 429 __func__,(int)itt,(int)data_seg_len,(int)buf_offset); 430 431 tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC); 432 if (!tx_desc) 433 return -ENOMEM; 434 435 tx_desc->type = ISCSI_TX_DATAOUT; 436 tx_desc->cqe.done = iser_dataout_comp; 437 tx_desc->iser_header.flags = ISER_VER; 438 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); 439 440 /* build the tx desc */ 441 err = iser_initialize_task_headers(task, tx_desc); 442 if (err) 443 goto send_data_out_error; 444 445 mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; 446 tx_dsg = &tx_desc->tx_sg[1]; 447 tx_dsg->addr = mem_reg->sge.addr + buf_offset; 448 tx_dsg->length = data_seg_len; 449 tx_dsg->lkey = mem_reg->sge.lkey; 450 tx_desc->num_sge = 2; 451 452 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { 453 iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n", 454 buf_offset, data_seg_len, 455 iser_task->data[ISER_DIR_OUT].data_len, itt); 456 err = -EINVAL; 457 goto send_data_out_error; 458 } 459 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n", 460 itt, buf_offset, data_seg_len); 461 462 err = iser_post_send(&iser_conn->ib_conn, tx_desc); 463 if (!err) 464 return 0; 465 466 send_data_out_error: 467 kmem_cache_free(ig.desc_cache, tx_desc); 468 iser_err("conn %p failed err %d\n", conn, err); 469 return err; 470 } 471 472 int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task) 473 { 474 struct iser_conn *iser_conn = conn->dd_data; 475 struct iscsi_iser_task *iser_task = task->dd_data; 476 struct iser_tx_desc *mdesc = &iser_task->desc; 477 unsigned long data_seg_len; 478 int err = 0; 479 struct iser_device *device; 480 481 /* build the tx desc regd header and add it to the tx desc dto */ 482 iser_create_send_desc(iser_conn, mdesc, ISCSI_TX_CONTROL, 483 iser_ctrl_comp); 484 485 device = iser_conn->ib_conn.device; 486 487 data_seg_len = ntoh24(task->hdr->dlength); 488 489 if (data_seg_len > 0) { 490 struct iser_login_desc *desc = &iser_conn->login_desc; 491 struct ib_sge *tx_dsg = &mdesc->tx_sg[1]; 492 493 if (task != conn->login_task) { 494 iser_err("data present on non login task!!!\n"); 495 goto send_control_error; 496 } 497 498 ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma, 499 task->data_count, DMA_TO_DEVICE); 500 501 memcpy(desc->req, task->data, task->data_count); 502 503 ib_dma_sync_single_for_device(device->ib_device, desc->req_dma, 504 task->data_count, DMA_TO_DEVICE); 505 506 tx_dsg->addr = desc->req_dma; 507 tx_dsg->length = task->data_count; 508 tx_dsg->lkey = device->pd->local_dma_lkey; 509 mdesc->num_sge = 2; 510 } 511 512 if (task == conn->login_task) { 513 iser_dbg("op %x dsl %lx, posting login rx buffer\n", 514 task->hdr->opcode, data_seg_len); 515 err = iser_post_recvl(iser_conn); 516 if (err) 517 goto send_control_error; 518 err = iser_post_rx_bufs(conn, task->hdr); 519 if (err) 520 goto send_control_error; 521 } 522 523 err = iser_post_send(&iser_conn->ib_conn, mdesc); 524 if (!err) 525 return 0; 526 527 send_control_error: 528 iser_err("conn %p failed err %d\n",conn, err); 529 return err; 530 } 531 532 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) 533 { 534 struct ib_conn *ib_conn = wc->qp->qp_context; 535 struct iser_conn *iser_conn = to_iser_conn(ib_conn); 536 struct iser_login_desc *desc = iser_login(wc->wr_cqe); 537 struct iscsi_hdr *hdr; 538 char *data; 539 int length; 540 bool full_feature_phase; 541 542 if (unlikely(wc->status != IB_WC_SUCCESS)) { 543 iser_err_comp(wc, "login_rsp"); 544 return; 545 } 546 547 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, 548 desc->rsp_dma, ISER_RX_LOGIN_SIZE, 549 DMA_FROM_DEVICE); 550 551 hdr = desc->rsp + sizeof(struct iser_ctrl); 552 data = desc->rsp + ISER_HEADERS_LEN; 553 length = wc->byte_len - ISER_HEADERS_LEN; 554 full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) == 555 ISCSI_FULL_FEATURE_PHASE) && 556 (hdr->flags & ISCSI_FLAG_CMD_FINAL); 557 558 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, 559 hdr->itt, length); 560 561 iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length); 562 563 ib_dma_sync_single_for_device(ib_conn->device->ib_device, 564 desc->rsp_dma, ISER_RX_LOGIN_SIZE, 565 DMA_FROM_DEVICE); 566 567 if (!full_feature_phase || 568 iser_conn->iscsi_conn->session->discovery_sess) 569 return; 570 571 /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */ 572 iser_post_recvm(iser_conn, iser_conn->rx_descs); 573 } 574 575 static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) 576 { 577 if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) || 578 (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) { 579 iser_err("Bogus remote invalidation for rkey %#x\n", rkey); 580 return -EINVAL; 581 } 582 583 if (desc->sig_protected) 584 desc->rsc.sig_mr->need_inval = false; 585 else 586 desc->rsc.mr->need_inval = false; 587 588 return 0; 589 } 590 591 static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc, 592 struct iscsi_hdr *hdr) 593 { 594 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { 595 struct iscsi_task *task; 596 u32 rkey = wc->ex.invalidate_rkey; 597 598 iser_dbg("conn %p: remote invalidation for rkey %#x\n", 599 iser_conn, rkey); 600 601 if (unlikely(!iser_conn->snd_w_inv)) { 602 iser_err("conn %p: unexpected remote invalidation, terminating connection\n", 603 iser_conn); 604 return -EPROTO; 605 } 606 607 task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt); 608 if (likely(task)) { 609 struct iscsi_iser_task *iser_task = task->dd_data; 610 struct iser_fr_desc *desc; 611 612 if (iser_task->dir[ISER_DIR_IN]) { 613 desc = iser_task->rdma_reg[ISER_DIR_IN].desc; 614 if (unlikely(iser_inv_desc(desc, rkey))) 615 return -EINVAL; 616 } 617 618 if (iser_task->dir[ISER_DIR_OUT]) { 619 desc = iser_task->rdma_reg[ISER_DIR_OUT].desc; 620 if (unlikely(iser_inv_desc(desc, rkey))) 621 return -EINVAL; 622 } 623 } else { 624 iser_err("failed to get task for itt=%d\n", hdr->itt); 625 return -EINVAL; 626 } 627 } 628 629 return 0; 630 } 631 632 633 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) 634 { 635 struct ib_conn *ib_conn = wc->qp->qp_context; 636 struct iser_conn *iser_conn = to_iser_conn(ib_conn); 637 struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); 638 struct iscsi_hdr *hdr; 639 int length, err; 640 641 if (unlikely(wc->status != IB_WC_SUCCESS)) { 642 iser_err_comp(wc, "task_rsp"); 643 return; 644 } 645 646 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, 647 desc->dma_addr, ISER_RX_PAYLOAD_SIZE, 648 DMA_FROM_DEVICE); 649 650 hdr = &desc->iscsi_header; 651 length = wc->byte_len - ISER_HEADERS_LEN; 652 653 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, 654 hdr->itt, length); 655 656 if (iser_check_remote_inv(iser_conn, wc, hdr)) { 657 iscsi_conn_failure(iser_conn->iscsi_conn, 658 ISCSI_ERR_CONN_FAILED); 659 return; 660 } 661 662 iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length); 663 664 ib_dma_sync_single_for_device(ib_conn->device->ib_device, 665 desc->dma_addr, ISER_RX_PAYLOAD_SIZE, 666 DMA_FROM_DEVICE); 667 668 err = iser_post_recvm(iser_conn, desc); 669 if (err) 670 iser_err("posting rx buffer err %d\n", err); 671 } 672 673 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) 674 { 675 if (unlikely(wc->status != IB_WC_SUCCESS)) 676 iser_err_comp(wc, "command"); 677 } 678 679 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc) 680 { 681 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); 682 struct iscsi_task *task; 683 684 if (unlikely(wc->status != IB_WC_SUCCESS)) { 685 iser_err_comp(wc, "control"); 686 return; 687 } 688 689 /* this arithmetic is legal by libiscsi dd_data allocation */ 690 task = (void *)desc - sizeof(struct iscsi_task); 691 if (task->hdr->itt == RESERVED_ITT) 692 iscsi_put_task(task); 693 } 694 695 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc) 696 { 697 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); 698 struct ib_conn *ib_conn = wc->qp->qp_context; 699 struct iser_device *device = ib_conn->device; 700 701 if (unlikely(wc->status != IB_WC_SUCCESS)) 702 iser_err_comp(wc, "dataout"); 703 704 ib_dma_unmap_single(device->ib_device, desc->dma_addr, 705 ISER_HEADERS_LEN, DMA_TO_DEVICE); 706 kmem_cache_free(ig.desc_cache, desc); 707 } 708 709 void iser_task_rdma_init(struct iscsi_iser_task *iser_task) 710 711 { 712 iser_task->status = ISER_TASK_STATUS_INIT; 713 714 iser_task->dir[ISER_DIR_IN] = 0; 715 iser_task->dir[ISER_DIR_OUT] = 0; 716 717 iser_task->data[ISER_DIR_IN].data_len = 0; 718 iser_task->data[ISER_DIR_OUT].data_len = 0; 719 720 iser_task->prot[ISER_DIR_IN].data_len = 0; 721 iser_task->prot[ISER_DIR_OUT].data_len = 0; 722 723 iser_task->prot[ISER_DIR_IN].dma_nents = 0; 724 iser_task->prot[ISER_DIR_OUT].dma_nents = 0; 725 726 memset(&iser_task->rdma_reg[ISER_DIR_IN], 0, 727 sizeof(struct iser_mem_reg)); 728 memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0, 729 sizeof(struct iser_mem_reg)); 730 } 731 732 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) 733 { 734 735 if (iser_task->dir[ISER_DIR_IN]) { 736 iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN); 737 iser_dma_unmap_task_data(iser_task, ISER_DIR_IN, 738 DMA_FROM_DEVICE); 739 } 740 741 if (iser_task->dir[ISER_DIR_OUT]) { 742 iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT); 743 iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT, 744 DMA_TO_DEVICE); 745 } 746 } 747