1 /* bnx2fc_tgt.c: QLogic Linux FCoE offload driver. 2 * Handles operations such as session offload/upload etc, and manages 3 * session resources such as connection id and qp resources. 4 * 5 * Copyright (c) 2008-2013 Broadcom Corporation 6 * Copyright (c) 2014-2016 QLogic Corporation 7 * Copyright (c) 2016-2017 Cavium Inc. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation. 12 * 13 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 14 */ 15 16 #include "bnx2fc.h" 17 static void bnx2fc_upld_timer(struct timer_list *t); 18 static void bnx2fc_ofld_timer(struct timer_list *t); 19 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, 20 struct fcoe_port *port, 21 struct fc_rport_priv *rdata); 22 static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, 23 struct bnx2fc_rport *tgt); 24 static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, 25 struct bnx2fc_rport *tgt); 26 static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, 27 struct bnx2fc_rport *tgt); 28 static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id); 29 30 static void bnx2fc_upld_timer(struct timer_list *t) 31 { 32 33 struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer); 34 35 BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n"); 36 /* fake upload completion */ 37 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 38 clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); 39 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); 40 wake_up_interruptible(&tgt->upld_wait); 41 } 42 43 static void bnx2fc_ofld_timer(struct timer_list *t) 44 { 45 46 struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer); 47 48 BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n"); 49 /* NOTE: This function should never be called, as 50 * offload should never timeout 51 */ 52 /* 53 * If the timer has expired, this session is dead 54 * Clear offloaded flag and logout of this device. 55 * Since OFFLOADED flag is cleared, this case 56 * will be considered as offload error and the 57 * port will be logged off, and conn_id, session 58 * resources are freed up in bnx2fc_offload_session 59 */ 60 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 61 clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); 62 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 63 wake_up_interruptible(&tgt->ofld_wait); 64 } 65 66 static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt) 67 { 68 timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0); 69 mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT); 70 71 wait_event_interruptible(tgt->ofld_wait, 72 (test_bit( 73 BNX2FC_FLAG_OFLD_REQ_CMPL, 74 &tgt->flags))); 75 if (signal_pending(current)) 76 flush_signals(current); 77 del_timer_sync(&tgt->ofld_timer); 78 } 79 80 static void bnx2fc_offload_session(struct fcoe_port *port, 81 struct bnx2fc_rport *tgt, 82 struct fc_rport_priv *rdata) 83 { 84 struct fc_rport *rport = rdata->rport; 85 struct bnx2fc_interface *interface = port->priv; 86 struct bnx2fc_hba *hba = interface->hba; 87 int rval; 88 int i = 0; 89 90 /* Initialize bnx2fc_rport */ 91 /* NOTE: tgt is already bzero'd */ 92 rval = bnx2fc_init_tgt(tgt, port, rdata); 93 if (rval) { 94 printk(KERN_ERR PFX "Failed to allocate conn id for " 95 "port_id (%6x)\n", rport->port_id); 96 goto tgt_init_err; 97 } 98 99 /* Allocate session resources */ 100 rval = bnx2fc_alloc_session_resc(hba, tgt); 101 if (rval) { 102 printk(KERN_ERR PFX "Failed to allocate resources\n"); 103 goto ofld_err; 104 } 105 106 /* 107 * Initialize FCoE session offload process. 108 * Upon completion of offload process add 109 * rport to list of rports 110 */ 111 retry_ofld: 112 clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 113 rval = bnx2fc_send_session_ofld_req(port, tgt); 114 if (rval) { 115 printk(KERN_ERR PFX "ofld_req failed\n"); 116 goto ofld_err; 117 } 118 119 /* 120 * wait for the session is offloaded and enabled. 3 Secs 121 * should be ample time for this process to complete. 122 */ 123 bnx2fc_ofld_wait(tgt); 124 125 if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { 126 if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, 127 &tgt->flags)) { 128 BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, " 129 "retry ofld..%d\n", i++); 130 msleep_interruptible(1000); 131 if (i > 3) 132 goto ofld_err; 133 goto retry_ofld; 134 } 135 goto ofld_err; 136 } 137 if (bnx2fc_map_doorbell(tgt)) { 138 printk(KERN_ERR PFX "map doorbell failed - no mem\n"); 139 goto ofld_err; 140 } 141 clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 142 rval = bnx2fc_send_session_enable_req(port, tgt); 143 if (rval) { 144 pr_err(PFX "enable session failed\n"); 145 goto ofld_err; 146 } 147 bnx2fc_ofld_wait(tgt); 148 if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) 149 goto ofld_err; 150 return; 151 152 ofld_err: 153 /* couldn't offload the session. log off from this rport */ 154 BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n"); 155 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 156 /* Free session resources */ 157 bnx2fc_free_session_resc(hba, tgt); 158 tgt_init_err: 159 if (tgt->fcoe_conn_id != -1) 160 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); 161 fc_rport_logoff(rdata); 162 } 163 164 void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) 165 { 166 struct bnx2fc_cmd *io_req; 167 struct bnx2fc_cmd *tmp; 168 int rc; 169 int i = 0; 170 BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n", 171 tgt->num_active_ios.counter); 172 173 spin_lock_bh(&tgt->tgt_lock); 174 tgt->flush_in_prog = 1; 175 176 list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) { 177 i++; 178 list_del_init(&io_req->link); 179 io_req->on_active_queue = 0; 180 BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); 181 182 if (cancel_delayed_work(&io_req->timeout_work)) { 183 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 184 &io_req->req_flags)) { 185 /* Handle eh_abort timeout */ 186 BNX2FC_IO_DBG(io_req, "eh_abort for IO " 187 "cleaned up\n"); 188 complete(&io_req->abts_done); 189 } 190 kref_put(&io_req->refcount, 191 bnx2fc_cmd_release); /* drop timer hold */ 192 } 193 194 set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags); 195 set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); 196 197 /* Do not issue cleanup when disable request failed */ 198 if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) 199 bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); 200 else { 201 rc = bnx2fc_initiate_cleanup(io_req); 202 BUG_ON(rc); 203 } 204 } 205 206 list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) { 207 i++; 208 list_del_init(&io_req->link); 209 io_req->on_tmf_queue = 0; 210 BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n"); 211 if (io_req->wait_for_abts_comp) 212 complete(&io_req->abts_done); 213 } 214 215 list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) { 216 i++; 217 list_del_init(&io_req->link); 218 io_req->on_active_queue = 0; 219 220 BNX2FC_IO_DBG(io_req, "els_queue cleanup\n"); 221 222 if (cancel_delayed_work(&io_req->timeout_work)) 223 kref_put(&io_req->refcount, 224 bnx2fc_cmd_release); /* drop timer hold */ 225 226 if ((io_req->cb_func) && (io_req->cb_arg)) { 227 io_req->cb_func(io_req->cb_arg); 228 io_req->cb_arg = NULL; 229 } 230 231 /* Do not issue cleanup when disable request failed */ 232 if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) 233 bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); 234 else { 235 rc = bnx2fc_initiate_cleanup(io_req); 236 BUG_ON(rc); 237 } 238 } 239 240 list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) { 241 i++; 242 list_del_init(&io_req->link); 243 244 BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); 245 246 if (cancel_delayed_work(&io_req->timeout_work)) { 247 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 248 &io_req->req_flags)) { 249 /* Handle eh_abort timeout */ 250 BNX2FC_IO_DBG(io_req, "eh_abort for IO " 251 "in retire_q\n"); 252 if (io_req->wait_for_abts_comp) 253 complete(&io_req->abts_done); 254 } 255 kref_put(&io_req->refcount, bnx2fc_cmd_release); 256 } 257 258 clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); 259 } 260 261 BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i); 262 i = 0; 263 spin_unlock_bh(&tgt->tgt_lock); 264 /* wait for active_ios to go to 0 */ 265 while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT)) 266 msleep(25); 267 if (tgt->num_active_ios.counter != 0) 268 printk(KERN_ERR PFX "CLEANUP on port 0x%x:" 269 " active_ios = %d\n", 270 tgt->rdata->ids.port_id, tgt->num_active_ios.counter); 271 spin_lock_bh(&tgt->tgt_lock); 272 tgt->flush_in_prog = 0; 273 spin_unlock_bh(&tgt->tgt_lock); 274 } 275 276 static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt) 277 { 278 timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0); 279 mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT); 280 wait_event_interruptible(tgt->upld_wait, 281 (test_bit( 282 BNX2FC_FLAG_UPLD_REQ_COMPL, 283 &tgt->flags))); 284 if (signal_pending(current)) 285 flush_signals(current); 286 del_timer_sync(&tgt->upld_timer); 287 } 288 289 static void bnx2fc_upload_session(struct fcoe_port *port, 290 struct bnx2fc_rport *tgt) 291 { 292 struct bnx2fc_interface *interface = port->priv; 293 struct bnx2fc_hba *hba = interface->hba; 294 295 BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", 296 tgt->num_active_ios.counter); 297 298 /* 299 * Called with hba->hba_mutex held. 300 * This is a blocking call 301 */ 302 clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); 303 bnx2fc_send_session_disable_req(port, tgt); 304 305 /* 306 * wait for upload to complete. 3 Secs 307 * should be sufficient time for this process to complete. 308 */ 309 BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n"); 310 bnx2fc_upld_wait(tgt); 311 312 /* 313 * traverse thru the active_q and tmf_q and cleanup 314 * IOs in these lists 315 */ 316 BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n", 317 tgt->flags); 318 bnx2fc_flush_active_ios(tgt); 319 320 /* Issue destroy KWQE */ 321 if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) { 322 BNX2FC_TGT_DBG(tgt, "send destroy req\n"); 323 clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); 324 bnx2fc_send_session_destroy_req(hba, tgt); 325 326 /* wait for destroy to complete */ 327 bnx2fc_upld_wait(tgt); 328 329 if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags))) 330 printk(KERN_ERR PFX "ERROR!! destroy timed out\n"); 331 332 BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n", 333 tgt->flags); 334 335 } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) { 336 printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy" 337 " not sent to FW\n"); 338 } else { 339 printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy" 340 " not sent to FW\n"); 341 } 342 343 /* Free session resources */ 344 bnx2fc_free_session_resc(hba, tgt); 345 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); 346 } 347 348 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, 349 struct fcoe_port *port, 350 struct fc_rport_priv *rdata) 351 { 352 353 struct fc_rport *rport = rdata->rport; 354 struct bnx2fc_interface *interface = port->priv; 355 struct bnx2fc_hba *hba = interface->hba; 356 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; 357 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; 358 359 tgt->rport = rport; 360 tgt->rdata = rdata; 361 tgt->port = port; 362 363 if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) { 364 BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n"); 365 tgt->fcoe_conn_id = -1; 366 return -1; 367 } 368 369 tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt); 370 if (tgt->fcoe_conn_id == -1) 371 return -1; 372 373 BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id); 374 375 tgt->max_sqes = BNX2FC_SQ_WQES_MAX; 376 tgt->max_rqes = BNX2FC_RQ_WQES_MAX; 377 tgt->max_cqes = BNX2FC_CQ_WQES_MAX; 378 atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX); 379 380 /* Initialize the toggle bit */ 381 tgt->sq_curr_toggle_bit = 1; 382 tgt->cq_curr_toggle_bit = 1; 383 tgt->sq_prod_idx = 0; 384 tgt->cq_cons_idx = 0; 385 tgt->rq_prod_idx = 0x8000; 386 tgt->rq_cons_idx = 0; 387 atomic_set(&tgt->num_active_ios, 0); 388 tgt->retry_delay_timestamp = 0; 389 390 if (rdata->flags & FC_RP_FLAGS_RETRY && 391 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && 392 !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { 393 tgt->dev_type = TYPE_TAPE; 394 tgt->io_timeout = 0; /* use default ULP timeout */ 395 } else { 396 tgt->dev_type = TYPE_DISK; 397 tgt->io_timeout = BNX2FC_IO_TIMEOUT; 398 } 399 400 /* initialize sq doorbell */ 401 sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE; 402 sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE << 403 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; 404 /* initialize rx doorbell */ 405 rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) | 406 (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) | 407 (B577XX_FCOE_CONNECTION_TYPE << 408 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT)); 409 rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) | 410 (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT); 411 412 spin_lock_init(&tgt->tgt_lock); 413 spin_lock_init(&tgt->cq_lock); 414 415 /* Initialize active_cmd_queue list */ 416 INIT_LIST_HEAD(&tgt->active_cmd_queue); 417 418 /* Initialize IO retire queue */ 419 INIT_LIST_HEAD(&tgt->io_retire_queue); 420 421 INIT_LIST_HEAD(&tgt->els_queue); 422 423 /* Initialize active_tm_queue list */ 424 INIT_LIST_HEAD(&tgt->active_tm_queue); 425 426 init_waitqueue_head(&tgt->ofld_wait); 427 init_waitqueue_head(&tgt->upld_wait); 428 429 return 0; 430 } 431 432 /* 433 * This event_callback is called after successful completion of libfc 434 * initiated target login. bnx2fc can proceed with initiating the session 435 * establishment. 436 */ 437 void bnx2fc_rport_event_handler(struct fc_lport *lport, 438 struct fc_rport_priv *rdata, 439 enum fc_rport_event event) 440 { 441 struct fcoe_port *port = lport_priv(lport); 442 struct bnx2fc_interface *interface = port->priv; 443 struct bnx2fc_hba *hba = interface->hba; 444 struct fc_rport *rport = rdata->rport; 445 struct fc_rport_libfc_priv *rp; 446 struct bnx2fc_rport *tgt; 447 u32 port_id; 448 449 BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n", 450 event, rdata->ids.port_id); 451 switch (event) { 452 case RPORT_EV_READY: 453 if (!rport) { 454 printk(KERN_ERR PFX "rport is NULL: ERROR!\n"); 455 break; 456 } 457 458 rp = rport->dd_data; 459 if (rport->port_id == FC_FID_DIR_SERV) { 460 /* 461 * bnx2fc_rport structure doesn't exist for 462 * directory server. 463 * We should not come here, as lport will 464 * take care of fabric login 465 */ 466 printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n", 467 rdata->ids.port_id); 468 break; 469 } 470 471 if (rdata->spp_type != FC_TYPE_FCP) { 472 BNX2FC_HBA_DBG(lport, "not FCP type target." 473 " not offloading\n"); 474 break; 475 } 476 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { 477 BNX2FC_HBA_DBG(lport, "not FCP_TARGET" 478 " not offloading\n"); 479 break; 480 } 481 482 /* 483 * Offload process is protected with hba mutex. 484 * Use the same mutex_lock for upload process too 485 */ 486 mutex_lock(&hba->hba_mutex); 487 tgt = (struct bnx2fc_rport *)&rp[1]; 488 489 /* This can happen when ADISC finds the same target */ 490 if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) { 491 BNX2FC_TGT_DBG(tgt, "already offloaded\n"); 492 mutex_unlock(&hba->hba_mutex); 493 return; 494 } 495 496 /* 497 * Offload the session. This is a blocking call, and will 498 * wait until the session is offloaded. 499 */ 500 bnx2fc_offload_session(port, tgt, rdata); 501 502 BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n", 503 hba->num_ofld_sess); 504 505 if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) { 506 /* Session is offloaded and enabled. */ 507 BNX2FC_TGT_DBG(tgt, "sess offloaded\n"); 508 /* This counter is protected with hba mutex */ 509 hba->num_ofld_sess++; 510 511 set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); 512 } else { 513 /* 514 * Offload or enable would have failed. 515 * In offload/enable completion path, the 516 * rport would have already been removed 517 */ 518 BNX2FC_TGT_DBG(tgt, "Port is being logged off as " 519 "offloaded flag not set\n"); 520 } 521 mutex_unlock(&hba->hba_mutex); 522 break; 523 case RPORT_EV_LOGO: 524 case RPORT_EV_FAILED: 525 case RPORT_EV_STOP: 526 port_id = rdata->ids.port_id; 527 if (port_id == FC_FID_DIR_SERV) 528 break; 529 530 if (!rport) { 531 printk(KERN_INFO PFX "%x - rport not created Yet!!\n", 532 port_id); 533 break; 534 } 535 rp = rport->dd_data; 536 mutex_lock(&hba->hba_mutex); 537 /* 538 * Perform session upload. Note that rdata->peers is already 539 * removed from disc->rports list before we get this event. 540 */ 541 tgt = (struct bnx2fc_rport *)&rp[1]; 542 543 if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) { 544 mutex_unlock(&hba->hba_mutex); 545 break; 546 } 547 clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); 548 549 bnx2fc_upload_session(port, tgt); 550 hba->num_ofld_sess--; 551 BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n", 552 hba->num_ofld_sess); 553 /* 554 * Try to wake up the linkdown wait thread. If num_ofld_sess 555 * is 0, the waiting therad wakes up 556 */ 557 if ((hba->wait_for_link_down) && 558 (hba->num_ofld_sess == 0)) { 559 wake_up_interruptible(&hba->shutdown_wait); 560 } 561 mutex_unlock(&hba->hba_mutex); 562 563 break; 564 565 case RPORT_EV_NONE: 566 break; 567 } 568 } 569 570 /** 571 * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id 572 * 573 * @port: fcoe_port struct to lookup the target port on 574 * @port_id: The remote port ID to look up 575 */ 576 struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, 577 u32 port_id) 578 { 579 struct bnx2fc_interface *interface = port->priv; 580 struct bnx2fc_hba *hba = interface->hba; 581 struct bnx2fc_rport *tgt; 582 struct fc_rport_priv *rdata; 583 int i; 584 585 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { 586 tgt = hba->tgt_ofld_list[i]; 587 if ((tgt) && (tgt->port == port)) { 588 rdata = tgt->rdata; 589 if (rdata->ids.port_id == port_id) { 590 if (rdata->rp_state != RPORT_ST_DELETE) { 591 BNX2FC_TGT_DBG(tgt, "rport " 592 "obtained\n"); 593 return tgt; 594 } else { 595 BNX2FC_TGT_DBG(tgt, "rport 0x%x " 596 "is in DELETED state\n", 597 rdata->ids.port_id); 598 return NULL; 599 } 600 } 601 } 602 } 603 return NULL; 604 } 605 606 607 /** 608 * bnx2fc_alloc_conn_id - allocates FCOE Connection id 609 * 610 * @hba: pointer to adapter structure 611 * @tgt: pointer to bnx2fc_rport structure 612 */ 613 static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, 614 struct bnx2fc_rport *tgt) 615 { 616 u32 conn_id, next; 617 618 /* called with hba mutex held */ 619 620 /* 621 * tgt_ofld_list access is synchronized using 622 * both hba mutex and hba lock. Atleast hba mutex or 623 * hba lock needs to be held for read access. 624 */ 625 626 spin_lock_bh(&hba->hba_lock); 627 next = hba->next_conn_id; 628 conn_id = hba->next_conn_id++; 629 if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS) 630 hba->next_conn_id = 0; 631 632 while (hba->tgt_ofld_list[conn_id] != NULL) { 633 conn_id++; 634 if (conn_id == BNX2FC_NUM_MAX_SESS) 635 conn_id = 0; 636 637 if (conn_id == next) { 638 /* No free conn_ids are available */ 639 spin_unlock_bh(&hba->hba_lock); 640 return -1; 641 } 642 } 643 hba->tgt_ofld_list[conn_id] = tgt; 644 tgt->fcoe_conn_id = conn_id; 645 spin_unlock_bh(&hba->hba_lock); 646 return conn_id; 647 } 648 649 static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id) 650 { 651 /* called with hba mutex held */ 652 spin_lock_bh(&hba->hba_lock); 653 hba->tgt_ofld_list[conn_id] = NULL; 654 spin_unlock_bh(&hba->hba_lock); 655 } 656 657 /* 658 * bnx2fc_alloc_session_resc - Allocate qp resources for the session 659 */ 660 static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, 661 struct bnx2fc_rport *tgt) 662 { 663 dma_addr_t page; 664 int num_pages; 665 u32 *pbl; 666 667 /* Allocate and map SQ */ 668 tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; 669 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & 670 CNIC_PAGE_MASK; 671 672 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 673 &tgt->sq_dma, GFP_KERNEL); 674 if (!tgt->sq) { 675 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", 676 tgt->sq_mem_size); 677 goto mem_alloc_failure; 678 } 679 680 /* Allocate and map CQ */ 681 tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; 682 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & 683 CNIC_PAGE_MASK; 684 685 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 686 &tgt->cq_dma, GFP_KERNEL); 687 if (!tgt->cq) { 688 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", 689 tgt->cq_mem_size); 690 goto mem_alloc_failure; 691 } 692 693 /* Allocate and map RQ and RQ PBL */ 694 tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; 695 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & 696 CNIC_PAGE_MASK; 697 698 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 699 &tgt->rq_dma, GFP_KERNEL); 700 if (!tgt->rq) { 701 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", 702 tgt->rq_mem_size); 703 goto mem_alloc_failure; 704 } 705 706 tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); 707 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & 708 CNIC_PAGE_MASK; 709 710 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 711 &tgt->rq_pbl_dma, GFP_KERNEL); 712 if (!tgt->rq_pbl) { 713 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", 714 tgt->rq_pbl_size); 715 goto mem_alloc_failure; 716 } 717 718 num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; 719 page = tgt->rq_dma; 720 pbl = (u32 *)tgt->rq_pbl; 721 722 while (num_pages--) { 723 *pbl = (u32)page; 724 pbl++; 725 *pbl = (u32)((u64)page >> 32); 726 pbl++; 727 page += CNIC_PAGE_SIZE; 728 } 729 730 /* Allocate and map XFERQ */ 731 tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; 732 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & 733 CNIC_PAGE_MASK; 734 735 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, 736 tgt->xferq_mem_size, &tgt->xferq_dma, 737 GFP_KERNEL); 738 if (!tgt->xferq) { 739 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", 740 tgt->xferq_mem_size); 741 goto mem_alloc_failure; 742 } 743 744 /* Allocate and map CONFQ & CONFQ PBL */ 745 tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; 746 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & 747 CNIC_PAGE_MASK; 748 749 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, 750 tgt->confq_mem_size, &tgt->confq_dma, 751 GFP_KERNEL); 752 if (!tgt->confq) { 753 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", 754 tgt->confq_mem_size); 755 goto mem_alloc_failure; 756 } 757 758 tgt->confq_pbl_size = 759 (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); 760 tgt->confq_pbl_size = 761 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; 762 763 tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, 764 tgt->confq_pbl_size, 765 &tgt->confq_pbl_dma, GFP_KERNEL); 766 if (!tgt->confq_pbl) { 767 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", 768 tgt->confq_pbl_size); 769 goto mem_alloc_failure; 770 } 771 772 num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; 773 page = tgt->confq_dma; 774 pbl = (u32 *)tgt->confq_pbl; 775 776 while (num_pages--) { 777 *pbl = (u32)page; 778 pbl++; 779 *pbl = (u32)((u64)page >> 32); 780 pbl++; 781 page += CNIC_PAGE_SIZE; 782 } 783 784 /* Allocate and map ConnDB */ 785 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); 786 787 tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, 788 tgt->conn_db_mem_size, 789 &tgt->conn_db_dma, GFP_KERNEL); 790 if (!tgt->conn_db) { 791 printk(KERN_ERR PFX "unable to allocate conn_db %d\n", 792 tgt->conn_db_mem_size); 793 goto mem_alloc_failure; 794 } 795 796 797 /* Allocate and map LCQ */ 798 tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; 799 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & 800 CNIC_PAGE_MASK; 801 802 tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 803 &tgt->lcq_dma, GFP_KERNEL); 804 805 if (!tgt->lcq) { 806 printk(KERN_ERR PFX "unable to allocate lcq %d\n", 807 tgt->lcq_mem_size); 808 goto mem_alloc_failure; 809 } 810 811 tgt->conn_db->rq_prod = 0x8000; 812 813 return 0; 814 815 mem_alloc_failure: 816 return -ENOMEM; 817 } 818 819 /** 820 * bnx2fc_free_session_resc - free qp resources for the session 821 * 822 * @hba: adapter structure pointer 823 * @tgt: bnx2fc_rport structure pointer 824 * 825 * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL 826 */ 827 static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, 828 struct bnx2fc_rport *tgt) 829 { 830 void __iomem *ctx_base_ptr; 831 832 BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); 833 834 ctx_base_ptr = tgt->ctx_base; 835 tgt->ctx_base = NULL; 836 837 /* Free LCQ */ 838 if (tgt->lcq) { 839 dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 840 tgt->lcq, tgt->lcq_dma); 841 tgt->lcq = NULL; 842 } 843 /* Free connDB */ 844 if (tgt->conn_db) { 845 dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size, 846 tgt->conn_db, tgt->conn_db_dma); 847 tgt->conn_db = NULL; 848 } 849 /* Free confq and confq pbl */ 850 if (tgt->confq_pbl) { 851 dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, 852 tgt->confq_pbl, tgt->confq_pbl_dma); 853 tgt->confq_pbl = NULL; 854 } 855 if (tgt->confq) { 856 dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size, 857 tgt->confq, tgt->confq_dma); 858 tgt->confq = NULL; 859 } 860 /* Free XFERQ */ 861 if (tgt->xferq) { 862 dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, 863 tgt->xferq, tgt->xferq_dma); 864 tgt->xferq = NULL; 865 } 866 /* Free RQ PBL and RQ */ 867 if (tgt->rq_pbl) { 868 dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 869 tgt->rq_pbl, tgt->rq_pbl_dma); 870 tgt->rq_pbl = NULL; 871 } 872 if (tgt->rq) { 873 dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 874 tgt->rq, tgt->rq_dma); 875 tgt->rq = NULL; 876 } 877 /* Free CQ */ 878 if (tgt->cq) { 879 dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 880 tgt->cq, tgt->cq_dma); 881 tgt->cq = NULL; 882 } 883 /* Free SQ */ 884 if (tgt->sq) { 885 dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 886 tgt->sq, tgt->sq_dma); 887 tgt->sq = NULL; 888 } 889 890 if (ctx_base_ptr) 891 iounmap(ctx_base_ptr); 892 } 893