1 /* 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 #include <linux/kernel.h> 34 #include <linux/module.h> 35 #include <linux/slab.h> 36 #include <linux/delay.h> 37 38 #include "iscsi_iser.h" 39 40 #define ISCSI_ISER_MAX_CONN 8 41 #define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) 42 #define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) 43 44 static void iser_cq_tasklet_fn(unsigned long data); 45 static void iser_cq_callback(struct ib_cq *cq, void *cq_context); 46 47 static void iser_cq_event_callback(struct ib_event *cause, void *context) 48 { 49 iser_err("got cq event %d \n", cause->event); 50 } 51 52 static void iser_qp_event_callback(struct ib_event *cause, void *context) 53 { 54 iser_err("got qp event %d\n",cause->event); 55 } 56 57 /** 58 * iser_create_device_ib_res - creates Protection Domain (PD), Completion 59 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with 60 * the adapator. 61 * 62 * returns 0 on success, -1 on failure 63 */ 64 static int iser_create_device_ib_res(struct iser_device *device) 65 { 66 device->pd = ib_alloc_pd(device->ib_device); 67 if (IS_ERR(device->pd)) 68 goto pd_err; 69 70 device->rx_cq = ib_create_cq(device->ib_device, 71 iser_cq_callback, 72 iser_cq_event_callback, 73 (void *)device, 74 ISER_MAX_RX_CQ_LEN, 0); 75 if (IS_ERR(device->rx_cq)) 76 goto rx_cq_err; 77 78 device->tx_cq = ib_create_cq(device->ib_device, 79 NULL, iser_cq_event_callback, 80 (void *)device, 81 ISER_MAX_TX_CQ_LEN, 0); 82 83 if (IS_ERR(device->tx_cq)) 84 goto tx_cq_err; 85 86 if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP)) 87 goto cq_arm_err; 88 89 tasklet_init(&device->cq_tasklet, 90 iser_cq_tasklet_fn, 91 (unsigned long)device); 92 93 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | 94 IB_ACCESS_REMOTE_WRITE | 95 IB_ACCESS_REMOTE_READ); 96 if (IS_ERR(device->mr)) 97 goto dma_mr_err; 98 99 return 0; 100 101 dma_mr_err: 102 tasklet_kill(&device->cq_tasklet); 103 cq_arm_err: 104 ib_destroy_cq(device->tx_cq); 105 tx_cq_err: 106 ib_destroy_cq(device->rx_cq); 107 rx_cq_err: 108 ib_dealloc_pd(device->pd); 109 pd_err: 110 iser_err("failed to allocate an IB resource\n"); 111 return -1; 112 } 113 114 /** 115 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR, 116 * CQ and PD created with the device associated with the adapator. 117 */ 118 static void iser_free_device_ib_res(struct iser_device *device) 119 { 120 BUG_ON(device->mr == NULL); 121 122 tasklet_kill(&device->cq_tasklet); 123 124 (void)ib_dereg_mr(device->mr); 125 (void)ib_destroy_cq(device->tx_cq); 126 (void)ib_destroy_cq(device->rx_cq); 127 (void)ib_dealloc_pd(device->pd); 128 129 device->mr = NULL; 130 device->tx_cq = NULL; 131 device->rx_cq = NULL; 132 device->pd = NULL; 133 } 134 135 /** 136 * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP) 137 * 138 * returns 0 on success, -1 on failure 139 */ 140 static int iser_create_ib_conn_res(struct iser_conn *ib_conn) 141 { 142 struct iser_device *device; 143 struct ib_qp_init_attr init_attr; 144 int ret = -ENOMEM; 145 struct ib_fmr_pool_param params; 146 147 BUG_ON(ib_conn->device == NULL); 148 149 device = ib_conn->device; 150 151 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); 152 if (!ib_conn->login_buf) { 153 goto alloc_err; 154 ret = -ENOMEM; 155 } 156 157 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, 158 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, 159 DMA_FROM_DEVICE); 160 161 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + 162 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), 163 GFP_KERNEL); 164 if (!ib_conn->page_vec) { 165 ret = -ENOMEM; 166 goto alloc_err; 167 } 168 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 169 170 params.page_shift = SHIFT_4K; 171 /* when the first/last SG element are not start/end * 172 * page aligned, the map whould be of N+1 pages */ 173 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; 174 /* make the pool size twice the max number of SCSI commands * 175 * the ML is expected to queue, watermark for unmap at 50% */ 176 params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2; 177 params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX; 178 params.cache = 0; 179 params.flush_function = NULL; 180 params.access = (IB_ACCESS_LOCAL_WRITE | 181 IB_ACCESS_REMOTE_WRITE | 182 IB_ACCESS_REMOTE_READ); 183 184 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); 185 if (IS_ERR(ib_conn->fmr_pool)) { 186 ret = PTR_ERR(ib_conn->fmr_pool); 187 goto fmr_pool_err; 188 } 189 190 memset(&init_attr, 0, sizeof init_attr); 191 192 init_attr.event_handler = iser_qp_event_callback; 193 init_attr.qp_context = (void *)ib_conn; 194 init_attr.send_cq = device->tx_cq; 195 init_attr.recv_cq = device->rx_cq; 196 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; 197 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 198 init_attr.cap.max_send_sge = 2; 199 init_attr.cap.max_recv_sge = 1; 200 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 201 init_attr.qp_type = IB_QPT_RC; 202 203 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 204 if (ret) 205 goto qp_err; 206 207 ib_conn->qp = ib_conn->cma_id->qp; 208 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", 209 ib_conn, ib_conn->cma_id, 210 ib_conn->fmr_pool, ib_conn->cma_id->qp); 211 return ret; 212 213 qp_err: 214 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool); 215 fmr_pool_err: 216 kfree(ib_conn->page_vec); 217 kfree(ib_conn->login_buf); 218 alloc_err: 219 iser_err("unable to alloc mem or create resource, err %d\n", ret); 220 return ret; 221 } 222 223 /** 224 * releases the FMR pool, QP and CMA ID objects, returns 0 on success, 225 * -1 on failure 226 */ 227 static int iser_free_ib_conn_res(struct iser_conn *ib_conn) 228 { 229 BUG_ON(ib_conn == NULL); 230 231 iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", 232 ib_conn, ib_conn->cma_id, 233 ib_conn->fmr_pool, ib_conn->qp); 234 235 /* qp is created only once both addr & route are resolved */ 236 if (ib_conn->fmr_pool != NULL) 237 ib_destroy_fmr_pool(ib_conn->fmr_pool); 238 239 if (ib_conn->qp != NULL) 240 rdma_destroy_qp(ib_conn->cma_id); 241 242 if (ib_conn->cma_id != NULL) 243 rdma_destroy_id(ib_conn->cma_id); 244 245 ib_conn->fmr_pool = NULL; 246 ib_conn->qp = NULL; 247 ib_conn->cma_id = NULL; 248 kfree(ib_conn->page_vec); 249 250 return 0; 251 } 252 253 /** 254 * based on the resolved device node GUID see if there already allocated 255 * device for this device. If there's no such, create one. 256 */ 257 static 258 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) 259 { 260 struct iser_device *device; 261 262 mutex_lock(&ig.device_list_mutex); 263 264 list_for_each_entry(device, &ig.device_list, ig_list) 265 /* find if there's a match using the node GUID */ 266 if (device->ib_device->node_guid == cma_id->device->node_guid) 267 goto inc_refcnt; 268 269 device = kzalloc(sizeof *device, GFP_KERNEL); 270 if (device == NULL) 271 goto out; 272 273 /* assign this device to the device */ 274 device->ib_device = cma_id->device; 275 /* init the device and link it into ig device list */ 276 if (iser_create_device_ib_res(device)) { 277 kfree(device); 278 device = NULL; 279 goto out; 280 } 281 list_add(&device->ig_list, &ig.device_list); 282 283 inc_refcnt: 284 device->refcount++; 285 out: 286 mutex_unlock(&ig.device_list_mutex); 287 return device; 288 } 289 290 /* if there's no demand for this device, release it */ 291 static void iser_device_try_release(struct iser_device *device) 292 { 293 mutex_lock(&ig.device_list_mutex); 294 device->refcount--; 295 iser_err("device %p refcount %d\n",device,device->refcount); 296 if (!device->refcount) { 297 iser_free_device_ib_res(device); 298 list_del(&device->ig_list); 299 kfree(device); 300 } 301 mutex_unlock(&ig.device_list_mutex); 302 } 303 304 static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, 305 enum iser_ib_conn_state comp, 306 enum iser_ib_conn_state exch) 307 { 308 int ret; 309 310 spin_lock_bh(&ib_conn->lock); 311 if ((ret = (ib_conn->state == comp))) 312 ib_conn->state = exch; 313 spin_unlock_bh(&ib_conn->lock); 314 return ret; 315 } 316 317 /** 318 * Frees all conn objects and deallocs conn descriptor 319 */ 320 static void iser_conn_release(struct iser_conn *ib_conn) 321 { 322 struct iser_device *device = ib_conn->device; 323 324 BUG_ON(ib_conn->state != ISER_CONN_DOWN); 325 326 mutex_lock(&ig.connlist_mutex); 327 list_del(&ib_conn->conn_list); 328 mutex_unlock(&ig.connlist_mutex); 329 iser_free_rx_descriptors(ib_conn); 330 iser_free_ib_conn_res(ib_conn); 331 ib_conn->device = NULL; 332 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 333 if (device != NULL) 334 iser_device_try_release(device); 335 if (ib_conn->iser_conn) 336 ib_conn->iser_conn->ib_conn = NULL; 337 iscsi_destroy_endpoint(ib_conn->ep); 338 } 339 340 void iser_conn_get(struct iser_conn *ib_conn) 341 { 342 atomic_inc(&ib_conn->refcount); 343 } 344 345 void iser_conn_put(struct iser_conn *ib_conn) 346 { 347 if (atomic_dec_and_test(&ib_conn->refcount)) 348 iser_conn_release(ib_conn); 349 } 350 351 /** 352 * triggers start of the disconnect procedures and wait for them to be done 353 */ 354 void iser_conn_terminate(struct iser_conn *ib_conn) 355 { 356 int err = 0; 357 358 /* change the ib conn state only if the conn is UP, however always call 359 * rdma_disconnect since this is the only way to cause the CMA to change 360 * the QP state to ERROR 361 */ 362 363 iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING); 364 err = rdma_disconnect(ib_conn->cma_id); 365 if (err) 366 iser_err("Failed to disconnect, conn: 0x%p err %d\n", 367 ib_conn,err); 368 369 wait_event_interruptible(ib_conn->wait, 370 ib_conn->state == ISER_CONN_DOWN); 371 372 iser_conn_put(ib_conn); 373 } 374 375 static void iser_connect_error(struct rdma_cm_id *cma_id) 376 { 377 struct iser_conn *ib_conn; 378 ib_conn = (struct iser_conn *)cma_id->context; 379 380 ib_conn->state = ISER_CONN_DOWN; 381 wake_up_interruptible(&ib_conn->wait); 382 } 383 384 static void iser_addr_handler(struct rdma_cm_id *cma_id) 385 { 386 struct iser_device *device; 387 struct iser_conn *ib_conn; 388 int ret; 389 390 device = iser_device_find_by_ib_device(cma_id); 391 if (!device) { 392 iser_err("device lookup/creation failed\n"); 393 iser_connect_error(cma_id); 394 return; 395 } 396 397 ib_conn = (struct iser_conn *)cma_id->context; 398 ib_conn->device = device; 399 400 ret = rdma_resolve_route(cma_id, 1000); 401 if (ret) { 402 iser_err("resolve route failed: %d\n", ret); 403 iser_connect_error(cma_id); 404 } 405 } 406 407 static void iser_route_handler(struct rdma_cm_id *cma_id) 408 { 409 struct rdma_conn_param conn_param; 410 int ret; 411 412 ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); 413 if (ret) 414 goto failure; 415 416 memset(&conn_param, 0, sizeof conn_param); 417 conn_param.responder_resources = 4; 418 conn_param.initiator_depth = 1; 419 conn_param.retry_count = 7; 420 conn_param.rnr_retry_count = 6; 421 422 ret = rdma_connect(cma_id, &conn_param); 423 if (ret) { 424 iser_err("failure connecting: %d\n", ret); 425 goto failure; 426 } 427 428 return; 429 failure: 430 iser_connect_error(cma_id); 431 } 432 433 static void iser_connected_handler(struct rdma_cm_id *cma_id) 434 { 435 struct iser_conn *ib_conn; 436 437 ib_conn = (struct iser_conn *)cma_id->context; 438 ib_conn->state = ISER_CONN_UP; 439 wake_up_interruptible(&ib_conn->wait); 440 } 441 442 static void iser_disconnected_handler(struct rdma_cm_id *cma_id) 443 { 444 struct iser_conn *ib_conn; 445 446 ib_conn = (struct iser_conn *)cma_id->context; 447 ib_conn->disc_evt_flag = 1; 448 449 /* getting here when the state is UP means that the conn is being * 450 * terminated asynchronously from the iSCSI layer's perspective. */ 451 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, 452 ISER_CONN_TERMINATING)) 453 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 454 ISCSI_ERR_CONN_FAILED); 455 456 /* Complete the termination process if no posts are pending */ 457 if (ib_conn->post_recv_buf_count == 0 && 458 (atomic_read(&ib_conn->post_send_buf_count) == 0)) { 459 ib_conn->state = ISER_CONN_DOWN; 460 wake_up_interruptible(&ib_conn->wait); 461 } 462 } 463 464 static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 465 { 466 int ret = 0; 467 468 iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id); 469 470 switch (event->event) { 471 case RDMA_CM_EVENT_ADDR_RESOLVED: 472 iser_addr_handler(cma_id); 473 break; 474 case RDMA_CM_EVENT_ROUTE_RESOLVED: 475 iser_route_handler(cma_id); 476 break; 477 case RDMA_CM_EVENT_ESTABLISHED: 478 iser_connected_handler(cma_id); 479 break; 480 case RDMA_CM_EVENT_ADDR_ERROR: 481 case RDMA_CM_EVENT_ROUTE_ERROR: 482 case RDMA_CM_EVENT_CONNECT_ERROR: 483 case RDMA_CM_EVENT_UNREACHABLE: 484 case RDMA_CM_EVENT_REJECTED: 485 iser_err("event: %d, error: %d\n", event->event, event->status); 486 iser_connect_error(cma_id); 487 break; 488 case RDMA_CM_EVENT_DISCONNECTED: 489 case RDMA_CM_EVENT_DEVICE_REMOVAL: 490 case RDMA_CM_EVENT_ADDR_CHANGE: 491 iser_disconnected_handler(cma_id); 492 break; 493 default: 494 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 495 break; 496 } 497 return ret; 498 } 499 500 void iser_conn_init(struct iser_conn *ib_conn) 501 { 502 ib_conn->state = ISER_CONN_INIT; 503 init_waitqueue_head(&ib_conn->wait); 504 ib_conn->post_recv_buf_count = 0; 505 atomic_set(&ib_conn->post_send_buf_count, 0); 506 atomic_set(&ib_conn->refcount, 1); 507 INIT_LIST_HEAD(&ib_conn->conn_list); 508 spin_lock_init(&ib_conn->lock); 509 } 510 511 /** 512 * starts the process of connecting to the target 513 * sleeps until the connection is established or rejected 514 */ 515 int iser_connect(struct iser_conn *ib_conn, 516 struct sockaddr_in *src_addr, 517 struct sockaddr_in *dst_addr, 518 int non_blocking) 519 { 520 struct sockaddr *src, *dst; 521 int err = 0; 522 523 sprintf(ib_conn->name, "%pI4:%d", 524 &dst_addr->sin_addr.s_addr, dst_addr->sin_port); 525 526 /* the device is known only --after-- address resolution */ 527 ib_conn->device = NULL; 528 529 iser_err("connecting to: %pI4, port 0x%x\n", 530 &dst_addr->sin_addr, dst_addr->sin_port); 531 532 ib_conn->state = ISER_CONN_PENDING; 533 534 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 535 (void *)ib_conn, 536 RDMA_PS_TCP); 537 if (IS_ERR(ib_conn->cma_id)) { 538 err = PTR_ERR(ib_conn->cma_id); 539 iser_err("rdma_create_id failed: %d\n", err); 540 goto id_failure; 541 } 542 543 src = (struct sockaddr *)src_addr; 544 dst = (struct sockaddr *)dst_addr; 545 err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000); 546 if (err) { 547 iser_err("rdma_resolve_addr failed: %d\n", err); 548 goto addr_failure; 549 } 550 551 if (!non_blocking) { 552 wait_event_interruptible(ib_conn->wait, 553 (ib_conn->state != ISER_CONN_PENDING)); 554 555 if (ib_conn->state != ISER_CONN_UP) { 556 err = -EIO; 557 goto connect_failure; 558 } 559 } 560 561 mutex_lock(&ig.connlist_mutex); 562 list_add(&ib_conn->conn_list, &ig.connlist); 563 mutex_unlock(&ig.connlist_mutex); 564 return 0; 565 566 id_failure: 567 ib_conn->cma_id = NULL; 568 addr_failure: 569 ib_conn->state = ISER_CONN_DOWN; 570 connect_failure: 571 iser_conn_release(ib_conn); 572 return err; 573 } 574 575 /** 576 * iser_reg_page_vec - Register physical memory 577 * 578 * returns: 0 on success, errno code on failure 579 */ 580 int iser_reg_page_vec(struct iser_conn *ib_conn, 581 struct iser_page_vec *page_vec, 582 struct iser_mem_reg *mem_reg) 583 { 584 struct ib_pool_fmr *mem; 585 u64 io_addr; 586 u64 *page_list; 587 int status; 588 589 page_list = page_vec->pages; 590 io_addr = page_list[0]; 591 592 mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool, 593 page_list, 594 page_vec->length, 595 io_addr); 596 597 if (IS_ERR(mem)) { 598 status = (int)PTR_ERR(mem); 599 iser_err("ib_fmr_pool_map_phys failed: %d\n", status); 600 return status; 601 } 602 603 mem_reg->lkey = mem->fmr->lkey; 604 mem_reg->rkey = mem->fmr->rkey; 605 mem_reg->len = page_vec->length * SIZE_4K; 606 mem_reg->va = io_addr; 607 mem_reg->is_fmr = 1; 608 mem_reg->mem_h = (void *)mem; 609 610 mem_reg->va += page_vec->offset; 611 mem_reg->len = page_vec->data_size; 612 613 iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, " 614 "entry[0]: (0x%08lx,%ld)] -> " 615 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n", 616 page_vec, page_vec->length, 617 (unsigned long)page_vec->pages[0], 618 (unsigned long)page_vec->data_size, 619 (unsigned int)mem_reg->lkey, mem_reg->mem_h, 620 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len); 621 return 0; 622 } 623 624 /** 625 * Unregister (previosuly registered) memory. 626 */ 627 void iser_unreg_mem(struct iser_mem_reg *reg) 628 { 629 int ret; 630 631 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h); 632 633 ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); 634 if (ret) 635 iser_err("ib_fmr_pool_unmap failed %d\n", ret); 636 637 reg->mem_h = NULL; 638 } 639 640 int iser_post_recvl(struct iser_conn *ib_conn) 641 { 642 struct ib_recv_wr rx_wr, *rx_wr_failed; 643 struct ib_sge sge; 644 int ib_ret; 645 646 sge.addr = ib_conn->login_dma; 647 sge.length = ISER_RX_LOGIN_SIZE; 648 sge.lkey = ib_conn->device->mr->lkey; 649 650 rx_wr.wr_id = (unsigned long)ib_conn->login_buf; 651 rx_wr.sg_list = &sge; 652 rx_wr.num_sge = 1; 653 rx_wr.next = NULL; 654 655 ib_conn->post_recv_buf_count++; 656 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); 657 if (ib_ret) { 658 iser_err("ib_post_recv failed ret=%d\n", ib_ret); 659 ib_conn->post_recv_buf_count--; 660 } 661 return ib_ret; 662 } 663 664 int iser_post_recvm(struct iser_conn *ib_conn, int count) 665 { 666 struct ib_recv_wr *rx_wr, *rx_wr_failed; 667 int i, ib_ret; 668 unsigned int my_rx_head = ib_conn->rx_desc_head; 669 struct iser_rx_desc *rx_desc; 670 671 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 672 rx_desc = &ib_conn->rx_descs[my_rx_head]; 673 rx_wr->wr_id = (unsigned long)rx_desc; 674 rx_wr->sg_list = &rx_desc->rx_sg; 675 rx_wr->num_sge = 1; 676 rx_wr->next = rx_wr + 1; 677 my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1); 678 } 679 680 rx_wr--; 681 rx_wr->next = NULL; /* mark end of work requests list */ 682 683 ib_conn->post_recv_buf_count += count; 684 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); 685 if (ib_ret) { 686 iser_err("ib_post_recv failed ret=%d\n", ib_ret); 687 ib_conn->post_recv_buf_count -= count; 688 } else 689 ib_conn->rx_desc_head = my_rx_head; 690 return ib_ret; 691 } 692 693 694 /** 695 * iser_start_send - Initiate a Send DTO operation 696 * 697 * returns 0 on success, -1 on failure 698 */ 699 int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc) 700 { 701 int ib_ret; 702 struct ib_send_wr send_wr, *send_wr_failed; 703 704 ib_dma_sync_single_for_device(ib_conn->device->ib_device, 705 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); 706 707 send_wr.next = NULL; 708 send_wr.wr_id = (unsigned long)tx_desc; 709 send_wr.sg_list = tx_desc->tx_sg; 710 send_wr.num_sge = tx_desc->num_sge; 711 send_wr.opcode = IB_WR_SEND; 712 send_wr.send_flags = IB_SEND_SIGNALED; 713 714 atomic_inc(&ib_conn->post_send_buf_count); 715 716 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); 717 if (ib_ret) { 718 iser_err("ib_post_send failed, ret:%d\n", ib_ret); 719 atomic_dec(&ib_conn->post_send_buf_count); 720 } 721 return ib_ret; 722 } 723 724 static void iser_handle_comp_error(struct iser_tx_desc *desc, 725 struct iser_conn *ib_conn) 726 { 727 if (desc && desc->type == ISCSI_TX_DATAOUT) 728 kmem_cache_free(ig.desc_cache, desc); 729 730 if (ib_conn->post_recv_buf_count == 0 && 731 atomic_read(&ib_conn->post_send_buf_count) == 0) { 732 /* getting here when the state is UP means that the conn is * 733 * being terminated asynchronously from the iSCSI layer's * 734 * perspective. */ 735 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, 736 ISER_CONN_TERMINATING)) 737 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 738 ISCSI_ERR_CONN_FAILED); 739 740 /* complete the termination process if disconnect event was delivered * 741 * note there are no more non completed posts to the QP */ 742 if (ib_conn->disc_evt_flag) { 743 ib_conn->state = ISER_CONN_DOWN; 744 wake_up_interruptible(&ib_conn->wait); 745 } 746 } 747 } 748 749 static int iser_drain_tx_cq(struct iser_device *device) 750 { 751 struct ib_cq *cq = device->tx_cq; 752 struct ib_wc wc; 753 struct iser_tx_desc *tx_desc; 754 struct iser_conn *ib_conn; 755 int completed_tx = 0; 756 757 while (ib_poll_cq(cq, 1, &wc) == 1) { 758 tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id; 759 ib_conn = wc.qp->qp_context; 760 if (wc.status == IB_WC_SUCCESS) { 761 if (wc.opcode == IB_WC_SEND) 762 iser_snd_completion(tx_desc, ib_conn); 763 else 764 iser_err("expected opcode %d got %d\n", 765 IB_WC_SEND, wc.opcode); 766 } else { 767 iser_err("tx id %llx status %d vend_err %x\n", 768 wc.wr_id, wc.status, wc.vendor_err); 769 atomic_dec(&ib_conn->post_send_buf_count); 770 iser_handle_comp_error(tx_desc, ib_conn); 771 } 772 completed_tx++; 773 } 774 return completed_tx; 775 } 776 777 778 static void iser_cq_tasklet_fn(unsigned long data) 779 { 780 struct iser_device *device = (struct iser_device *)data; 781 struct ib_cq *cq = device->rx_cq; 782 struct ib_wc wc; 783 struct iser_rx_desc *desc; 784 unsigned long xfer_len; 785 struct iser_conn *ib_conn; 786 int completed_tx, completed_rx; 787 completed_tx = completed_rx = 0; 788 789 while (ib_poll_cq(cq, 1, &wc) == 1) { 790 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; 791 BUG_ON(desc == NULL); 792 ib_conn = wc.qp->qp_context; 793 if (wc.status == IB_WC_SUCCESS) { 794 if (wc.opcode == IB_WC_RECV) { 795 xfer_len = (unsigned long)wc.byte_len; 796 iser_rcv_completion(desc, xfer_len, ib_conn); 797 } else 798 iser_err("expected opcode %d got %d\n", 799 IB_WC_RECV, wc.opcode); 800 } else { 801 if (wc.status != IB_WC_WR_FLUSH_ERR) 802 iser_err("rx id %llx status %d vend_err %x\n", 803 wc.wr_id, wc.status, wc.vendor_err); 804 ib_conn->post_recv_buf_count--; 805 iser_handle_comp_error(NULL, ib_conn); 806 } 807 completed_rx++; 808 if (!(completed_rx & 63)) 809 completed_tx += iser_drain_tx_cq(device); 810 } 811 /* #warning "it is assumed here that arming CQ only once its empty" * 812 * " would not cause interrupts to be missed" */ 813 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 814 815 completed_tx += iser_drain_tx_cq(device); 816 iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); 817 } 818 819 static void iser_cq_callback(struct ib_cq *cq, void *cq_context) 820 { 821 struct iser_device *device = (struct iser_device *)cq_context; 822 823 tasklet_schedule(&device->cq_tasklet); 824 } 825