1 /* 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $ 33 */ 34 #include <linux/kernel.h> 35 #include <linux/slab.h> 36 #include <linux/mm.h> 37 #include <asm/io.h> 38 #include <asm/scatterlist.h> 39 #include <linux/scatterlist.h> 40 #include <linux/kfifo.h> 41 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_host.h> 43 44 #include "iscsi_iser.h" 45 46 /* Constant PDU lengths calculations */ 47 #define ISER_TOTAL_HEADERS_LEN (sizeof (struct iser_hdr) + \ 48 sizeof (struct iscsi_hdr)) 49 50 /* iser_dto_add_regd_buff - increments the reference count for * 51 * the registered buffer & adds it to the DTO object */ 52 static void iser_dto_add_regd_buff(struct iser_dto *dto, 53 struct iser_regd_buf *regd_buf, 54 unsigned long use_offset, 55 unsigned long use_size) 56 { 57 int add_idx; 58 59 atomic_inc(®d_buf->ref_count); 60 61 add_idx = dto->regd_vector_len; 62 dto->regd[add_idx] = regd_buf; 63 dto->used_sz[add_idx] = use_size; 64 dto->offset[add_idx] = use_offset; 65 66 dto->regd_vector_len++; 67 } 68 69 static int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, 70 struct iser_data_buf *data, 71 enum iser_data_dir iser_dir, 72 enum dma_data_direction dma_dir) 73 { 74 struct device *dma_device; 75 76 iser_ctask->dir[iser_dir] = 1; 77 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 78 79 data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir); 80 if (data->dma_nents == 0) { 81 iser_err("dma_map_sg failed!!!\n"); 82 return -EINVAL; 83 } 84 return 0; 85 } 86 87 static void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) 88 { 89 struct device *dma_device; 90 struct iser_data_buf *data; 91 92 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 93 94 if (iser_ctask->dir[ISER_DIR_IN]) { 95 data = &iser_ctask->data[ISER_DIR_IN]; 96 dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE); 97 } 98 99 if (iser_ctask->dir[ISER_DIR_OUT]) { 100 data = &iser_ctask->data[ISER_DIR_OUT]; 101 dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE); 102 } 103 } 104 105 /* Register user buffer memory and initialize passive rdma 106 * dto descriptor. Total data size is stored in 107 * iser_ctask->data[ISER_DIR_IN].data_len 108 */ 109 static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask, 110 unsigned int edtl) 111 112 { 113 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 114 struct iser_regd_buf *regd_buf; 115 int err; 116 struct iser_hdr *hdr = &iser_ctask->desc.iser_header; 117 struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN]; 118 119 err = iser_dma_map_task_data(iser_ctask, 120 buf_in, 121 ISER_DIR_IN, 122 DMA_FROM_DEVICE); 123 if (err) 124 return err; 125 126 if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) { 127 iser_err("Total data length: %ld, less than EDTL: " 128 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", 129 iser_ctask->data[ISER_DIR_IN].data_len, edtl, 130 ctask->itt, iser_ctask->iser_conn); 131 return -EINVAL; 132 } 133 134 err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN); 135 if (err) { 136 iser_err("Failed to set up Data-IN RDMA\n"); 137 return err; 138 } 139 regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN]; 140 141 hdr->flags |= ISER_RSV; 142 hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey); 143 hdr->read_va = cpu_to_be64(regd_buf->reg.va); 144 145 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", 146 ctask->itt, regd_buf->reg.rkey, 147 (unsigned long long)regd_buf->reg.va); 148 149 return 0; 150 } 151 152 /* Register user buffer memory and initialize passive rdma 153 * dto descriptor. Total data size is stored in 154 * ctask->data[ISER_DIR_OUT].data_len 155 */ 156 static int 157 iser_prepare_write_cmd(struct iscsi_cmd_task *ctask, 158 unsigned int imm_sz, 159 unsigned int unsol_sz, 160 unsigned int edtl) 161 { 162 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 163 struct iser_regd_buf *regd_buf; 164 int err; 165 struct iser_dto *send_dto = &iser_ctask->desc.dto; 166 struct iser_hdr *hdr = &iser_ctask->desc.iser_header; 167 struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT]; 168 169 err = iser_dma_map_task_data(iser_ctask, 170 buf_out, 171 ISER_DIR_OUT, 172 DMA_TO_DEVICE); 173 if (err) 174 return err; 175 176 if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) { 177 iser_err("Total data length: %ld, less than EDTL: %d, " 178 "in WRITE cmd BHS itt: %d, conn: 0x%p\n", 179 iser_ctask->data[ISER_DIR_OUT].data_len, 180 edtl, ctask->itt, ctask->conn); 181 return -EINVAL; 182 } 183 184 err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT); 185 if (err != 0) { 186 iser_err("Failed to register write cmd RDMA mem\n"); 187 return err; 188 } 189 190 regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT]; 191 192 if (unsol_sz < edtl) { 193 hdr->flags |= ISER_WSV; 194 hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey); 195 hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz); 196 197 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " 198 "VA:%#llX + unsol:%d\n", 199 ctask->itt, regd_buf->reg.rkey, 200 (unsigned long long)regd_buf->reg.va, unsol_sz); 201 } 202 203 if (imm_sz > 0) { 204 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", 205 ctask->itt, imm_sz); 206 iser_dto_add_regd_buff(send_dto, 207 regd_buf, 208 0, 209 imm_sz); 210 } 211 212 return 0; 213 } 214 215 /** 216 * iser_post_receive_control - allocates, initializes and posts receive DTO. 217 */ 218 static int iser_post_receive_control(struct iscsi_conn *conn) 219 { 220 struct iscsi_iser_conn *iser_conn = conn->dd_data; 221 struct iser_desc *rx_desc; 222 struct iser_regd_buf *regd_hdr; 223 struct iser_regd_buf *regd_data; 224 struct iser_dto *recv_dto = NULL; 225 struct iser_device *device = iser_conn->ib_conn->device; 226 int rx_data_size, err = 0; 227 228 rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); 229 if (rx_desc == NULL) { 230 iser_err("Failed to alloc desc for post recv\n"); 231 return -ENOMEM; 232 } 233 rx_desc->type = ISCSI_RX; 234 235 /* for the login sequence we must support rx of upto 8K; login is done 236 * after conn create/bind (connect) and conn stop/bind (reconnect), 237 * what's common for both schemes is that the connection is not started 238 */ 239 if (conn->c_stage != ISCSI_CONN_STARTED) 240 rx_data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 241 else /* FIXME till user space sets conn->max_recv_dlength correctly */ 242 rx_data_size = 128; 243 244 rx_desc->data = kmalloc(rx_data_size, GFP_NOIO); 245 if (rx_desc->data == NULL) { 246 iser_err("Failed to alloc data buf for post recv\n"); 247 err = -ENOMEM; 248 goto post_rx_kmalloc_failure; 249 } 250 251 recv_dto = &rx_desc->dto; 252 recv_dto->conn = iser_conn; 253 recv_dto->regd_vector_len = 0; 254 255 regd_hdr = &rx_desc->hdr_regd_buf; 256 memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); 257 regd_hdr->device = device; 258 regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */ 259 regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; 260 261 iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); 262 263 iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0); 264 265 regd_data = &rx_desc->data_regd_buf; 266 memset(regd_data, 0, sizeof(struct iser_regd_buf)); 267 regd_data->device = device; 268 regd_data->virt_addr = rx_desc->data; 269 regd_data->data_size = rx_data_size; 270 271 iser_reg_single(device, regd_data, DMA_FROM_DEVICE); 272 273 iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0); 274 275 err = iser_post_recv(rx_desc); 276 if (!err) 277 return 0; 278 279 /* iser_post_recv failed */ 280 iser_dto_buffs_release(recv_dto); 281 kfree(rx_desc->data); 282 post_rx_kmalloc_failure: 283 kmem_cache_free(ig.desc_cache, rx_desc); 284 return err; 285 } 286 287 /* creates a new tx descriptor and adds header regd buffer */ 288 static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn, 289 struct iser_desc *tx_desc) 290 { 291 struct iser_regd_buf *regd_hdr = &tx_desc->hdr_regd_buf; 292 struct iser_dto *send_dto = &tx_desc->dto; 293 294 memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); 295 regd_hdr->device = iser_conn->ib_conn->device; 296 regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */ 297 regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; 298 299 send_dto->conn = iser_conn; 300 send_dto->notify_enable = 1; 301 send_dto->regd_vector_len = 0; 302 303 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); 304 tx_desc->iser_header.flags = ISER_VER; 305 306 iser_dto_add_regd_buff(send_dto, regd_hdr, 0, 0); 307 } 308 309 /** 310 * iser_conn_set_full_featured_mode - (iSER API) 311 */ 312 int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) 313 { 314 struct iscsi_iser_conn *iser_conn = conn->dd_data; 315 316 int i; 317 /* no need to keep it in a var, we are after login so if this should 318 * be negotiated, by now the result should be available here */ 319 int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS; 320 321 iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num); 322 323 /* Check that there is no posted recv or send buffers left - */ 324 /* they must be consumed during the login phase */ 325 BUG_ON(atomic_read(&iser_conn->ib_conn->post_recv_buf_count) != 0); 326 BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); 327 328 /* Initial post receive buffers */ 329 for (i = 0; i < initial_post_recv_bufs_num; i++) { 330 if (iser_post_receive_control(conn) != 0) { 331 iser_err("Failed to post recv bufs at:%d conn:0x%p\n", 332 i, conn); 333 return -ENOMEM; 334 } 335 } 336 iser_dbg("Posted %d post recv bufs, conn:0x%p\n", i, conn); 337 return 0; 338 } 339 340 static int 341 iser_check_xmit(struct iscsi_conn *conn, void *task) 342 { 343 int rc = 0; 344 struct iscsi_iser_conn *iser_conn = conn->dd_data; 345 346 write_lock_bh(conn->recv_lock); 347 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == 348 ISER_QP_MAX_REQ_DTOS) { 349 iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task); 350 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 351 rc = -EAGAIN; 352 } 353 write_unlock_bh(conn->recv_lock); 354 return rc; 355 } 356 357 358 /** 359 * iser_send_command - send command PDU 360 */ 361 int iser_send_command(struct iscsi_conn *conn, 362 struct iscsi_cmd_task *ctask) 363 { 364 struct iscsi_iser_conn *iser_conn = conn->dd_data; 365 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 366 struct iser_dto *send_dto = NULL; 367 unsigned long edtl; 368 int err = 0; 369 struct iser_data_buf *data_buf; 370 371 struct iscsi_cmd *hdr = ctask->hdr; 372 struct scsi_cmnd *sc = ctask->sc; 373 374 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { 375 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); 376 return -EPERM; 377 } 378 if (iser_check_xmit(conn, ctask)) 379 return -EAGAIN; 380 381 edtl = ntohl(hdr->data_length); 382 383 /* build the tx desc regd header and add it to the tx desc dto */ 384 iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND; 385 send_dto = &iser_ctask->desc.dto; 386 send_dto->ctask = iser_ctask; 387 iser_create_send_desc(iser_conn, &iser_ctask->desc); 388 389 if (hdr->flags & ISCSI_FLAG_CMD_READ) 390 data_buf = &iser_ctask->data[ISER_DIR_IN]; 391 else 392 data_buf = &iser_ctask->data[ISER_DIR_OUT]; 393 394 if (sc->use_sg) { /* using a scatter list */ 395 data_buf->buf = sc->request_buffer; 396 data_buf->size = sc->use_sg; 397 } else if (sc->request_bufflen) { 398 /* using a single buffer - convert it into one entry SG */ 399 sg_init_one(&data_buf->sg_single, 400 sc->request_buffer, sc->request_bufflen); 401 data_buf->buf = &data_buf->sg_single; 402 data_buf->size = 1; 403 } 404 405 data_buf->data_len = sc->request_bufflen; 406 407 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 408 err = iser_prepare_read_cmd(ctask, edtl); 409 if (err) 410 goto send_command_error; 411 } 412 if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { 413 err = iser_prepare_write_cmd(ctask, 414 ctask->imm_count, 415 ctask->imm_count + 416 ctask->unsol_count, 417 edtl); 418 if (err) 419 goto send_command_error; 420 } 421 422 iser_reg_single(iser_conn->ib_conn->device, 423 send_dto->regd[0], DMA_TO_DEVICE); 424 425 if (iser_post_receive_control(conn) != 0) { 426 iser_err("post_recv failed!\n"); 427 err = -ENOMEM; 428 goto send_command_error; 429 } 430 431 iser_ctask->status = ISER_TASK_STATUS_STARTED; 432 433 err = iser_post_send(&iser_ctask->desc); 434 if (!err) 435 return 0; 436 437 send_command_error: 438 iser_dto_buffs_release(send_dto); 439 iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err); 440 return err; 441 } 442 443 /** 444 * iser_send_data_out - send data out PDU 445 */ 446 int iser_send_data_out(struct iscsi_conn *conn, 447 struct iscsi_cmd_task *ctask, 448 struct iscsi_data *hdr) 449 { 450 struct iscsi_iser_conn *iser_conn = conn->dd_data; 451 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; 452 struct iser_desc *tx_desc = NULL; 453 struct iser_dto *send_dto = NULL; 454 unsigned long buf_offset; 455 unsigned long data_seg_len; 456 unsigned int itt; 457 int err = 0; 458 459 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { 460 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); 461 return -EPERM; 462 } 463 464 if (iser_check_xmit(conn, ctask)) 465 return -EAGAIN; 466 467 itt = ntohl(hdr->itt); 468 data_seg_len = ntoh24(hdr->dlength); 469 buf_offset = ntohl(hdr->offset); 470 471 iser_dbg("%s itt %d dseg_len %d offset %d\n", 472 __func__,(int)itt,(int)data_seg_len,(int)buf_offset); 473 474 tx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); 475 if (tx_desc == NULL) { 476 iser_err("Failed to alloc desc for post dataout\n"); 477 return -ENOMEM; 478 } 479 480 tx_desc->type = ISCSI_TX_DATAOUT; 481 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); 482 483 /* build the tx desc regd header and add it to the tx desc dto */ 484 send_dto = &tx_desc->dto; 485 send_dto->ctask = iser_ctask; 486 iser_create_send_desc(iser_conn, tx_desc); 487 488 iser_reg_single(iser_conn->ib_conn->device, 489 send_dto->regd[0], DMA_TO_DEVICE); 490 491 /* all data was registered for RDMA, we can use the lkey */ 492 iser_dto_add_regd_buff(send_dto, 493 &iser_ctask->rdma_regd[ISER_DIR_OUT], 494 buf_offset, 495 data_seg_len); 496 497 if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) { 498 iser_err("Offset:%ld & DSL:%ld in Data-Out " 499 "inconsistent with total len:%ld, itt:%d\n", 500 buf_offset, data_seg_len, 501 iser_ctask->data[ISER_DIR_OUT].data_len, itt); 502 err = -EINVAL; 503 goto send_data_out_error; 504 } 505 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n", 506 itt, buf_offset, data_seg_len); 507 508 509 err = iser_post_send(tx_desc); 510 if (!err) 511 return 0; 512 513 send_data_out_error: 514 iser_dto_buffs_release(send_dto); 515 kmem_cache_free(ig.desc_cache, tx_desc); 516 iser_err("conn %p failed err %d\n",conn, err); 517 return err; 518 } 519 520 int iser_send_control(struct iscsi_conn *conn, 521 struct iscsi_mgmt_task *mtask) 522 { 523 struct iscsi_iser_conn *iser_conn = conn->dd_data; 524 struct iser_desc *mdesc = mtask->dd_data; 525 struct iser_dto *send_dto = NULL; 526 unsigned int itt; 527 unsigned long data_seg_len; 528 int err = 0; 529 unsigned char opcode; 530 struct iser_regd_buf *regd_buf; 531 struct iser_device *device; 532 533 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { 534 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); 535 return -EPERM; 536 } 537 538 if (iser_check_xmit(conn,mtask)) 539 return -EAGAIN; 540 541 /* build the tx desc regd header and add it to the tx desc dto */ 542 mdesc->type = ISCSI_TX_CONTROL; 543 send_dto = &mdesc->dto; 544 send_dto->ctask = NULL; 545 iser_create_send_desc(iser_conn, mdesc); 546 547 device = iser_conn->ib_conn->device; 548 549 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); 550 551 itt = ntohl(mtask->hdr->itt); 552 opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK; 553 data_seg_len = ntoh24(mtask->hdr->dlength); 554 555 if (data_seg_len > 0) { 556 regd_buf = &mdesc->data_regd_buf; 557 memset(regd_buf, 0, sizeof(struct iser_regd_buf)); 558 regd_buf->device = device; 559 regd_buf->virt_addr = mtask->data; 560 regd_buf->data_size = mtask->data_count; 561 iser_reg_single(device, regd_buf, 562 DMA_TO_DEVICE); 563 iser_dto_add_regd_buff(send_dto, regd_buf, 564 0, 565 data_seg_len); 566 } 567 568 if (iser_post_receive_control(conn) != 0) { 569 iser_err("post_rcv_buff failed!\n"); 570 err = -ENOMEM; 571 goto send_control_error; 572 } 573 574 err = iser_post_send(mdesc); 575 if (!err) 576 return 0; 577 578 send_control_error: 579 iser_dto_buffs_release(send_dto); 580 iser_err("conn %p failed err %d\n",conn, err); 581 return err; 582 } 583 584 /** 585 * iser_rcv_dto_completion - recv DTO completion 586 */ 587 void iser_rcv_completion(struct iser_desc *rx_desc, 588 unsigned long dto_xfer_len) 589 { 590 struct iser_dto *dto = &rx_desc->dto; 591 struct iscsi_iser_conn *conn = dto->conn; 592 struct iscsi_session *session = conn->iscsi_conn->session; 593 struct iscsi_cmd_task *ctask; 594 struct iscsi_iser_cmd_task *iser_ctask; 595 struct iscsi_hdr *hdr; 596 char *rx_data = NULL; 597 int rx_data_len = 0; 598 unsigned int itt; 599 unsigned char opcode; 600 601 hdr = &rx_desc->iscsi_header; 602 603 iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt); 604 605 if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */ 606 rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN; 607 rx_data = dto->regd[1]->virt_addr; 608 rx_data += dto->offset[1]; 609 } 610 611 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 612 613 if (opcode == ISCSI_OP_SCSI_CMD_RSP) { 614 itt = hdr->itt & ISCSI_ITT_MASK; /* mask out cid and age bits */ 615 if (!(itt < session->cmds_max)) 616 iser_err("itt can't be matched to task!!!" 617 "conn %p opcode %d cmds_max %d itt %d\n", 618 conn->iscsi_conn,opcode,session->cmds_max,itt); 619 /* use the mapping given with the cmds array indexed by itt */ 620 ctask = (struct iscsi_cmd_task *)session->cmds[itt]; 621 iser_ctask = ctask->dd_data; 622 iser_dbg("itt %d ctask %p\n",itt,ctask); 623 iser_ctask->status = ISER_TASK_STATUS_COMPLETED; 624 iser_ctask_rdma_finalize(iser_ctask); 625 } 626 627 iser_dto_buffs_release(dto); 628 629 iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); 630 631 kfree(rx_desc->data); 632 kmem_cache_free(ig.desc_cache, rx_desc); 633 634 /* decrementing conn->post_recv_buf_count only --after-- freeing the * 635 * task eliminates the need to worry on tasks which are completed in * 636 * parallel to the execution of iser_conn_term. So the code that waits * 637 * for the posted rx bufs refcount to become zero handles everything */ 638 atomic_dec(&conn->ib_conn->post_recv_buf_count); 639 } 640 641 void iser_snd_completion(struct iser_desc *tx_desc) 642 { 643 struct iser_dto *dto = &tx_desc->dto; 644 struct iscsi_iser_conn *iser_conn = dto->conn; 645 struct iscsi_conn *conn = iser_conn->iscsi_conn; 646 struct iscsi_mgmt_task *mtask; 647 648 iser_dbg("Initiator, Data sent dto=0x%p\n", dto); 649 650 iser_dto_buffs_release(dto); 651 652 if (tx_desc->type == ISCSI_TX_DATAOUT) 653 kmem_cache_free(ig.desc_cache, tx_desc); 654 655 atomic_dec(&iser_conn->ib_conn->post_send_buf_count); 656 657 write_lock(conn->recv_lock); 658 if (conn->suspend_tx) { 659 iser_dbg("%ld resuming tx\n",jiffies); 660 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 661 scsi_queue_work(conn->session->host, &conn->xmitwork); 662 } 663 write_unlock(conn->recv_lock); 664 665 if (tx_desc->type == ISCSI_TX_CONTROL) { 666 /* this arithmetic is legal by libiscsi dd_data allocation */ 667 mtask = (void *) ((long)(void *)tx_desc - 668 sizeof(struct iscsi_mgmt_task)); 669 if (mtask->hdr->itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { 670 struct iscsi_session *session = conn->session; 671 672 spin_lock(&conn->session->lock); 673 list_del(&mtask->running); 674 __kfifo_put(session->mgmtpool.queue, (void*)&mtask, 675 sizeof(void*)); 676 spin_unlock(&session->lock); 677 } 678 } 679 } 680 681 void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask) 682 683 { 684 iser_ctask->status = ISER_TASK_STATUS_INIT; 685 686 iser_ctask->dir[ISER_DIR_IN] = 0; 687 iser_ctask->dir[ISER_DIR_OUT] = 0; 688 689 iser_ctask->data[ISER_DIR_IN].data_len = 0; 690 iser_ctask->data[ISER_DIR_OUT].data_len = 0; 691 692 memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0, 693 sizeof(struct iser_regd_buf)); 694 memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0, 695 sizeof(struct iser_regd_buf)); 696 } 697 698 void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) 699 { 700 int deferred; 701 702 /* if we were reading, copy back to unaligned sglist, 703 * anyway dma_unmap and free the copy 704 */ 705 if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) 706 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN); 707 if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) 708 iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT); 709 710 if (iser_ctask->dir[ISER_DIR_IN]) { 711 deferred = iser_regd_buff_release 712 (&iser_ctask->rdma_regd[ISER_DIR_IN]); 713 if (deferred) { 714 iser_err("References remain for BUF-IN rdma reg\n"); 715 BUG(); 716 } 717 } 718 719 if (iser_ctask->dir[ISER_DIR_OUT]) { 720 deferred = iser_regd_buff_release 721 (&iser_ctask->rdma_regd[ISER_DIR_OUT]); 722 if (deferred) { 723 iser_err("References remain for BUF-OUT rdma reg\n"); 724 BUG(); 725 } 726 } 727 728 iser_dma_unmap_task_data(iser_ctask); 729 } 730 731 void iser_dto_buffs_release(struct iser_dto *dto) 732 { 733 int i; 734 735 for (i = 0; i < dto->regd_vector_len; i++) 736 iser_regd_buff_release(dto->regd[i]); 737 } 738 739