1 /* 2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. 3 * 4 * Copyright (c) 2006 - 2013 Broadcom Corporation 5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 6 * Copyright (c) 2007, 2008 Mike Christie 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation. 11 * 12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 13 * Maintained by: Eddie Wai (eddie.wai@broadcom.com) 14 */ 15 16 #include <linux/slab.h> 17 #include <scsi/scsi_tcq.h> 18 #include <scsi/libiscsi.h> 19 #include "bnx2i.h" 20 21 struct scsi_transport_template *bnx2i_scsi_xport_template; 22 struct iscsi_transport bnx2i_iscsi_transport; 23 static struct scsi_host_template bnx2i_host_template; 24 25 /* 26 * Global endpoint resource info 27 */ 28 static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ 29 30 DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); 31 32 static int bnx2i_adapter_ready(struct bnx2i_hba *hba) 33 { 34 int retval = 0; 35 36 if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || 37 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || 38 test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) 39 retval = -EPERM; 40 return retval; 41 } 42 43 /** 44 * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks 45 * @cmd: iscsi cmd struct pointer 46 * @buf_off: absolute buffer offset 47 * @start_bd_off: u32 pointer to return the offset within the BD 48 * indicated by 'start_bd_idx' on which 'buf_off' falls 49 * @start_bd_idx: index of the BD on which 'buf_off' falls 50 * 51 * identifies & marks various bd info for scsi command's imm data, 52 * unsolicited data and the first solicited data seq. 53 */ 54 static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off, 55 u32 *start_bd_off, u32 *start_bd_idx) 56 { 57 struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl; 58 u32 cur_offset = 0; 59 u32 cur_bd_idx = 0; 60 61 if (buf_off) { 62 while (buf_off >= (cur_offset + bd_tbl->buffer_length)) { 63 cur_offset += bd_tbl->buffer_length; 64 cur_bd_idx++; 65 bd_tbl++; 66 } 67 } 68 69 *start_bd_off = buf_off - cur_offset; 70 *start_bd_idx = cur_bd_idx; 71 } 72 73 /** 74 * bnx2i_setup_write_cmd_bd_info - sets up BD various information 75 * @task: transport layer's cmd struct pointer 76 * 77 * identifies & marks various bd info for scsi command's immediate data, 78 * unsolicited data and first solicited data seq which includes BD start 79 * index & BD buf off. his function takes into account iscsi parameter such 80 * as immediate data and unsolicited data is support on this connection. 81 */ 82 static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) 83 { 84 struct bnx2i_cmd *cmd = task->dd_data; 85 u32 start_bd_offset; 86 u32 start_bd_idx; 87 u32 buffer_offset = 0; 88 u32 cmd_len = cmd->req.total_data_transfer_length; 89 90 /* if ImmediateData is turned off & IntialR2T is turned on, 91 * there will be no immediate or unsolicited data, just return. 92 */ 93 if (!iscsi_task_has_unsol_data(task) && !task->imm_count) 94 return; 95 96 /* Immediate data */ 97 buffer_offset += task->imm_count; 98 if (task->imm_count == cmd_len) 99 return; 100 101 if (iscsi_task_has_unsol_data(task)) { 102 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, 103 &start_bd_offset, &start_bd_idx); 104 cmd->req.ud_buffer_offset = start_bd_offset; 105 cmd->req.ud_start_bd_index = start_bd_idx; 106 buffer_offset += task->unsol_r2t.data_length; 107 } 108 109 if (buffer_offset != cmd_len) { 110 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, 111 &start_bd_offset, &start_bd_idx); 112 if ((start_bd_offset > task->conn->session->first_burst) || 113 (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) { 114 int i = 0; 115 116 iscsi_conn_printk(KERN_ALERT, task->conn, 117 "bnx2i- error, buf offset 0x%x " 118 "bd_valid %d use_sg %d\n", 119 buffer_offset, cmd->io_tbl.bd_valid, 120 scsi_sg_count(cmd->scsi_cmd)); 121 for (i = 0; i < cmd->io_tbl.bd_valid; i++) 122 iscsi_conn_printk(KERN_ALERT, task->conn, 123 "bnx2i err, bd[%d]: len %x\n", 124 i, cmd->io_tbl.bd_tbl[i].\ 125 buffer_length); 126 } 127 cmd->req.sd_buffer_offset = start_bd_offset; 128 cmd->req.sd_start_bd_index = start_bd_idx; 129 } 130 } 131 132 133 134 /** 135 * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table 136 * @hba: adapter instance 137 * @cmd: iscsi cmd struct pointer 138 * 139 * map SG list 140 */ 141 static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) 142 { 143 struct scsi_cmnd *sc = cmd->scsi_cmd; 144 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; 145 struct scatterlist *sg; 146 int byte_count = 0; 147 int bd_count = 0; 148 int sg_count; 149 int sg_len; 150 u64 addr; 151 int i; 152 153 BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD); 154 155 sg_count = scsi_dma_map(sc); 156 157 scsi_for_each_sg(sc, sg, sg_count, i) { 158 sg_len = sg_dma_len(sg); 159 addr = (u64) sg_dma_address(sg); 160 bd[bd_count].buffer_addr_lo = addr & 0xffffffff; 161 bd[bd_count].buffer_addr_hi = addr >> 32; 162 bd[bd_count].buffer_length = sg_len; 163 bd[bd_count].flags = 0; 164 if (bd_count == 0) 165 bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN; 166 167 byte_count += sg_len; 168 bd_count++; 169 } 170 171 if (bd_count) 172 bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN; 173 174 BUG_ON(byte_count != scsi_bufflen(sc)); 175 return bd_count; 176 } 177 178 /** 179 * bnx2i_iscsi_map_sg_list - maps SG list 180 * @cmd: iscsi cmd struct pointer 181 * 182 * creates BD list table for the command 183 */ 184 static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd) 185 { 186 int bd_count; 187 188 bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); 189 if (!bd_count) { 190 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; 191 192 bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0; 193 bd[0].buffer_length = bd[0].flags = 0; 194 } 195 cmd->io_tbl.bd_valid = bd_count; 196 } 197 198 199 /** 200 * bnx2i_iscsi_unmap_sg_list - unmaps SG list 201 * @cmd: iscsi cmd struct pointer 202 * 203 * unmap IO buffers and invalidate the BD table 204 */ 205 void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd) 206 { 207 struct scsi_cmnd *sc = cmd->scsi_cmd; 208 209 if (cmd->io_tbl.bd_valid && sc) { 210 scsi_dma_unmap(sc); 211 cmd->io_tbl.bd_valid = 0; 212 } 213 } 214 215 static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd) 216 { 217 memset(&cmd->req, 0x00, sizeof(cmd->req)); 218 cmd->req.op_code = 0xFF; 219 cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma; 220 cmd->req.bd_list_addr_hi = 221 (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32); 222 223 } 224 225 226 /** 227 * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid' 228 * @hba: pointer to adapter instance 229 * @conn: pointer to iscsi connection 230 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) 231 * 232 * update iscsi cid table entry with connection pointer. This enables 233 * driver to quickly get hold of connection structure pointer in 234 * completion/interrupt thread using iscsi context ID 235 */ 236 static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, 237 struct bnx2i_conn *bnx2i_conn, 238 u32 iscsi_cid) 239 { 240 if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { 241 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, 242 "conn bind - entry #%d not free\n", iscsi_cid); 243 return -EBUSY; 244 } 245 246 hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; 247 return 0; 248 } 249 250 251 /** 252 * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr 253 * @hba: pointer to adapter instance 254 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) 255 */ 256 struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, 257 u16 iscsi_cid) 258 { 259 if (!hba->cid_que.conn_cid_tbl) { 260 printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n"); 261 return NULL; 262 263 } else if (iscsi_cid >= hba->max_active_conns) { 264 printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid); 265 return NULL; 266 } 267 return hba->cid_que.conn_cid_tbl[iscsi_cid]; 268 } 269 270 271 /** 272 * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool 273 * @hba: pointer to adapter instance 274 */ 275 static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) 276 { 277 int idx; 278 279 if (!hba->cid_que.cid_free_cnt) 280 return -1; 281 282 idx = hba->cid_que.cid_q_cons_idx; 283 hba->cid_que.cid_q_cons_idx++; 284 if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) 285 hba->cid_que.cid_q_cons_idx = 0; 286 287 hba->cid_que.cid_free_cnt--; 288 return hba->cid_que.cid_que[idx]; 289 } 290 291 292 /** 293 * bnx2i_free_iscsi_cid - returns tcp port to free list 294 * @hba: pointer to adapter instance 295 * @iscsi_cid: iscsi context ID to free 296 */ 297 static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) 298 { 299 int idx; 300 301 if (iscsi_cid == (u16) -1) 302 return; 303 304 hba->cid_que.cid_free_cnt++; 305 306 idx = hba->cid_que.cid_q_prod_idx; 307 hba->cid_que.cid_que[idx] = iscsi_cid; 308 hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; 309 hba->cid_que.cid_q_prod_idx++; 310 if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) 311 hba->cid_que.cid_q_prod_idx = 0; 312 } 313 314 315 /** 316 * bnx2i_setup_free_cid_que - sets up free iscsi cid queue 317 * @hba: pointer to adapter instance 318 * 319 * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table, 320 * and initialize table attributes 321 */ 322 static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) 323 { 324 int mem_size; 325 int i; 326 327 mem_size = hba->max_active_conns * sizeof(u32); 328 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 329 330 hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); 331 if (!hba->cid_que.cid_que_base) 332 return -ENOMEM; 333 334 mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); 335 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 336 hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); 337 if (!hba->cid_que.conn_cid_tbl) { 338 kfree(hba->cid_que.cid_que_base); 339 hba->cid_que.cid_que_base = NULL; 340 return -ENOMEM; 341 } 342 343 hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; 344 hba->cid_que.cid_q_prod_idx = 0; 345 hba->cid_que.cid_q_cons_idx = 0; 346 hba->cid_que.cid_q_max_idx = hba->max_active_conns; 347 hba->cid_que.cid_free_cnt = hba->max_active_conns; 348 349 for (i = 0; i < hba->max_active_conns; i++) { 350 hba->cid_que.cid_que[i] = i; 351 hba->cid_que.conn_cid_tbl[i] = NULL; 352 } 353 return 0; 354 } 355 356 357 /** 358 * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources 359 * @hba: pointer to adapter instance 360 */ 361 static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) 362 { 363 kfree(hba->cid_que.cid_que_base); 364 hba->cid_que.cid_que_base = NULL; 365 366 kfree(hba->cid_que.conn_cid_tbl); 367 hba->cid_que.conn_cid_tbl = NULL; 368 } 369 370 371 /** 372 * bnx2i_alloc_ep - allocates ep structure from global pool 373 * @hba: pointer to adapter instance 374 * 375 * routine allocates a free endpoint structure from global pool and 376 * a tcp port to be used for this connection. Global resource lock, 377 * 'bnx2i_resc_lock' is held while accessing shared global data structures 378 */ 379 static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) 380 { 381 struct iscsi_endpoint *ep; 382 struct bnx2i_endpoint *bnx2i_ep; 383 u32 ec_div; 384 385 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); 386 if (!ep) { 387 printk(KERN_ERR "bnx2i: Could not allocate ep\n"); 388 return NULL; 389 } 390 391 bnx2i_ep = ep->dd_data; 392 bnx2i_ep->cls_ep = ep; 393 INIT_LIST_HEAD(&bnx2i_ep->link); 394 bnx2i_ep->state = EP_STATE_IDLE; 395 bnx2i_ep->ep_iscsi_cid = (u16) -1; 396 bnx2i_ep->hba = hba; 397 bnx2i_ep->hba_age = hba->age; 398 399 ec_div = event_coal_div; 400 while (ec_div >>= 1) 401 bnx2i_ep->ec_shift += 1; 402 403 hba->ofld_conns_active++; 404 init_waitqueue_head(&bnx2i_ep->ofld_wait); 405 return ep; 406 } 407 408 409 /** 410 * bnx2i_free_ep - free endpoint 411 * @ep: pointer to iscsi endpoint structure 412 */ 413 static void bnx2i_free_ep(struct iscsi_endpoint *ep) 414 { 415 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; 416 unsigned long flags; 417 418 spin_lock_irqsave(&bnx2i_resc_lock, flags); 419 bnx2i_ep->state = EP_STATE_IDLE; 420 bnx2i_ep->hba->ofld_conns_active--; 421 422 if (bnx2i_ep->ep_iscsi_cid != (u16) -1) 423 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); 424 425 if (bnx2i_ep->conn) { 426 bnx2i_ep->conn->ep = NULL; 427 bnx2i_ep->conn = NULL; 428 } 429 430 bnx2i_ep->hba = NULL; 431 spin_unlock_irqrestore(&bnx2i_resc_lock, flags); 432 iscsi_destroy_endpoint(ep); 433 } 434 435 436 /** 437 * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command 438 * @hba: adapter instance pointer 439 * @session: iscsi session pointer 440 * @cmd: iscsi command structure 441 */ 442 static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, 443 struct bnx2i_cmd *cmd) 444 { 445 struct io_bdt *io = &cmd->io_tbl; 446 struct iscsi_bd *bd; 447 448 io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 449 ISCSI_MAX_BDS_PER_CMD * sizeof(*bd), 450 &io->bd_tbl_dma, GFP_KERNEL); 451 if (!io->bd_tbl) { 452 iscsi_session_printk(KERN_ERR, session, "Could not " 453 "allocate bdt.\n"); 454 return -ENOMEM; 455 } 456 io->bd_valid = 0; 457 return 0; 458 } 459 460 /** 461 * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table 462 * @hba: adapter instance pointer 463 * @session: iscsi session pointer 464 * @cmd: iscsi command structure 465 */ 466 static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, 467 struct iscsi_session *session) 468 { 469 int i; 470 471 for (i = 0; i < session->cmds_max; i++) { 472 struct iscsi_task *task = session->cmds[i]; 473 struct bnx2i_cmd *cmd = task->dd_data; 474 475 if (cmd->io_tbl.bd_tbl) 476 dma_free_coherent(&hba->pcidev->dev, 477 ISCSI_MAX_BDS_PER_CMD * 478 sizeof(struct iscsi_bd), 479 cmd->io_tbl.bd_tbl, 480 cmd->io_tbl.bd_tbl_dma); 481 } 482 483 } 484 485 486 /** 487 * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session 488 * @hba: adapter instance pointer 489 * @session: iscsi session pointer 490 */ 491 static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, 492 struct iscsi_session *session) 493 { 494 int i; 495 496 for (i = 0; i < session->cmds_max; i++) { 497 struct iscsi_task *task = session->cmds[i]; 498 struct bnx2i_cmd *cmd = task->dd_data; 499 500 task->hdr = &cmd->hdr; 501 task->hdr_max = sizeof(struct iscsi_hdr); 502 503 if (bnx2i_alloc_bdt(hba, session, cmd)) 504 goto free_bdts; 505 } 506 507 return 0; 508 509 free_bdts: 510 bnx2i_destroy_cmd_pool(hba, session); 511 return -ENOMEM; 512 } 513 514 515 /** 516 * bnx2i_setup_mp_bdt - allocate BD table resources 517 * @hba: pointer to adapter structure 518 * 519 * Allocate memory for dummy buffer and associated BD 520 * table to be used by middle path (MP) requests 521 */ 522 static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) 523 { 524 int rc = 0; 525 struct iscsi_bd *mp_bdt; 526 u64 addr; 527 528 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 529 &hba->mp_bd_dma, GFP_KERNEL); 530 if (!hba->mp_bd_tbl) { 531 printk(KERN_ERR "unable to allocate Middle Path BDT\n"); 532 rc = -1; 533 goto out; 534 } 535 536 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 537 &hba->dummy_buf_dma, GFP_KERNEL); 538 if (!hba->dummy_buffer) { 539 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); 540 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 541 hba->mp_bd_tbl, hba->mp_bd_dma); 542 hba->mp_bd_tbl = NULL; 543 rc = -1; 544 goto out; 545 } 546 547 mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; 548 addr = (unsigned long) hba->dummy_buf_dma; 549 mp_bdt->buffer_addr_lo = addr & 0xffffffff; 550 mp_bdt->buffer_addr_hi = addr >> 32; 551 mp_bdt->buffer_length = PAGE_SIZE; 552 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | 553 ISCSI_BD_FIRST_IN_BD_CHAIN; 554 out: 555 return rc; 556 } 557 558 559 /** 560 * bnx2i_free_mp_bdt - releases ITT back to free pool 561 * @hba: pointer to adapter instance 562 * 563 * free MP dummy buffer and associated BD table 564 */ 565 static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) 566 { 567 if (hba->mp_bd_tbl) { 568 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 569 hba->mp_bd_tbl, hba->mp_bd_dma); 570 hba->mp_bd_tbl = NULL; 571 } 572 if (hba->dummy_buffer) { 573 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 574 hba->dummy_buffer, hba->dummy_buf_dma); 575 hba->dummy_buffer = NULL; 576 } 577 return; 578 } 579 580 /** 581 * bnx2i_drop_session - notifies iscsid of connection error. 582 * @hba: adapter instance pointer 583 * @session: iscsi session pointer 584 * 585 * This notifies iscsid that there is a error, so it can initiate 586 * recovery. 587 * 588 * This relies on caller using the iscsi class iterator so the object 589 * is refcounted and does not disapper from under us. 590 */ 591 void bnx2i_drop_session(struct iscsi_cls_session *cls_session) 592 { 593 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); 594 } 595 596 /** 597 * bnx2i_ep_destroy_list_add - add an entry to EP destroy list 598 * @hba: pointer to adapter instance 599 * @ep: pointer to endpoint (transport identifier) structure 600 * 601 * EP destroy queue manager 602 */ 603 static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, 604 struct bnx2i_endpoint *ep) 605 { 606 write_lock_bh(&hba->ep_rdwr_lock); 607 list_add_tail(&ep->link, &hba->ep_destroy_list); 608 write_unlock_bh(&hba->ep_rdwr_lock); 609 return 0; 610 } 611 612 /** 613 * bnx2i_ep_destroy_list_del - add an entry to EP destroy list 614 * 615 * @hba: pointer to adapter instance 616 * @ep: pointer to endpoint (transport identifier) structure 617 * 618 * EP destroy queue manager 619 */ 620 static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, 621 struct bnx2i_endpoint *ep) 622 { 623 write_lock_bh(&hba->ep_rdwr_lock); 624 list_del_init(&ep->link); 625 write_unlock_bh(&hba->ep_rdwr_lock); 626 627 return 0; 628 } 629 630 /** 631 * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list 632 * @hba: pointer to adapter instance 633 * @ep: pointer to endpoint (transport identifier) structure 634 * 635 * pending conn offload completion queue manager 636 */ 637 static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, 638 struct bnx2i_endpoint *ep) 639 { 640 write_lock_bh(&hba->ep_rdwr_lock); 641 list_add_tail(&ep->link, &hba->ep_ofld_list); 642 write_unlock_bh(&hba->ep_rdwr_lock); 643 return 0; 644 } 645 646 /** 647 * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list 648 * @hba: pointer to adapter instance 649 * @ep: pointer to endpoint (transport identifier) structure 650 * 651 * pending conn offload completion queue manager 652 */ 653 static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, 654 struct bnx2i_endpoint *ep) 655 { 656 write_lock_bh(&hba->ep_rdwr_lock); 657 list_del_init(&ep->link); 658 write_unlock_bh(&hba->ep_rdwr_lock); 659 return 0; 660 } 661 662 663 /** 664 * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints 665 * 666 * @hba: pointer to adapter instance 667 * @iscsi_cid: iscsi context ID to find 668 * 669 */ 670 struct bnx2i_endpoint * 671 bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) 672 { 673 struct list_head *list; 674 struct list_head *tmp; 675 struct bnx2i_endpoint *ep; 676 677 read_lock_bh(&hba->ep_rdwr_lock); 678 list_for_each_safe(list, tmp, &hba->ep_ofld_list) { 679 ep = (struct bnx2i_endpoint *)list; 680 681 if (ep->ep_iscsi_cid == iscsi_cid) 682 break; 683 ep = NULL; 684 } 685 read_unlock_bh(&hba->ep_rdwr_lock); 686 687 if (!ep) 688 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); 689 return ep; 690 } 691 692 /** 693 * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list 694 * @hba: pointer to adapter instance 695 * @iscsi_cid: iscsi context ID to find 696 * 697 */ 698 struct bnx2i_endpoint * 699 bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) 700 { 701 struct list_head *list; 702 struct list_head *tmp; 703 struct bnx2i_endpoint *ep; 704 705 read_lock_bh(&hba->ep_rdwr_lock); 706 list_for_each_safe(list, tmp, &hba->ep_destroy_list) { 707 ep = (struct bnx2i_endpoint *)list; 708 709 if (ep->ep_iscsi_cid == iscsi_cid) 710 break; 711 ep = NULL; 712 } 713 read_unlock_bh(&hba->ep_rdwr_lock); 714 715 if (!ep) 716 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); 717 718 return ep; 719 } 720 721 /** 722 * bnx2i_ep_active_list_add - add an entry to ep active list 723 * @hba: pointer to adapter instance 724 * @ep: pointer to endpoint (transport identifier) structure 725 * 726 * current active conn queue manager 727 */ 728 static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba, 729 struct bnx2i_endpoint *ep) 730 { 731 write_lock_bh(&hba->ep_rdwr_lock); 732 list_add_tail(&ep->link, &hba->ep_active_list); 733 write_unlock_bh(&hba->ep_rdwr_lock); 734 } 735 736 737 /** 738 * bnx2i_ep_active_list_del - deletes an entry to ep active list 739 * @hba: pointer to adapter instance 740 * @ep: pointer to endpoint (transport identifier) structure 741 * 742 * current active conn queue manager 743 */ 744 static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba, 745 struct bnx2i_endpoint *ep) 746 { 747 write_lock_bh(&hba->ep_rdwr_lock); 748 list_del_init(&ep->link); 749 write_unlock_bh(&hba->ep_rdwr_lock); 750 } 751 752 753 /** 754 * bnx2i_setup_host_queue_size - assigns shost->can_queue param 755 * @hba: pointer to adapter instance 756 * @shost: scsi host pointer 757 * 758 * Initializes 'can_queue' parameter based on how many outstanding commands 759 * the device can handle. Each device 5708/5709/57710 has different 760 * capabilities 761 */ 762 static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, 763 struct Scsi_Host *shost) 764 { 765 if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) 766 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; 767 else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) 768 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709; 769 else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) 770 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710; 771 else 772 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; 773 } 774 775 776 /** 777 * bnx2i_alloc_hba - allocate and init adapter instance 778 * @cnic: cnic device pointer 779 * 780 * allocate & initialize adapter structure and call other 781 * support routines to do per adapter initialization 782 */ 783 struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) 784 { 785 struct Scsi_Host *shost; 786 struct bnx2i_hba *hba; 787 788 shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); 789 if (!shost) 790 return NULL; 791 shost->dma_boundary = cnic->pcidev->dma_mask; 792 shost->transportt = bnx2i_scsi_xport_template; 793 shost->max_id = ISCSI_MAX_CONNS_PER_HBA; 794 shost->max_channel = 0; 795 shost->max_lun = 512; 796 shost->max_cmd_len = 16; 797 798 hba = iscsi_host_priv(shost); 799 hba->shost = shost; 800 hba->netdev = cnic->netdev; 801 /* Get PCI related information and update hba struct members */ 802 hba->pcidev = cnic->pcidev; 803 pci_dev_get(hba->pcidev); 804 hba->pci_did = hba->pcidev->device; 805 hba->pci_vid = hba->pcidev->vendor; 806 hba->pci_sdid = hba->pcidev->subsystem_device; 807 hba->pci_svid = hba->pcidev->subsystem_vendor; 808 hba->pci_func = PCI_FUNC(hba->pcidev->devfn); 809 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); 810 811 bnx2i_identify_device(hba, cnic); 812 bnx2i_setup_host_queue_size(hba, shost); 813 814 hba->reg_base = pci_resource_start(hba->pcidev, 0); 815 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { 816 hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2); 817 if (!hba->regview) 818 goto ioreg_map_err; 819 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 820 hba->regview = pci_iomap(hba->pcidev, 0, 4096); 821 if (!hba->regview) 822 goto ioreg_map_err; 823 } 824 825 if (bnx2i_setup_mp_bdt(hba)) 826 goto mp_bdt_mem_err; 827 828 INIT_LIST_HEAD(&hba->ep_ofld_list); 829 INIT_LIST_HEAD(&hba->ep_active_list); 830 INIT_LIST_HEAD(&hba->ep_destroy_list); 831 rwlock_init(&hba->ep_rdwr_lock); 832 833 hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; 834 835 /* different values for 5708/5709/57710 */ 836 hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; 837 838 if (bnx2i_setup_free_cid_que(hba)) 839 goto cid_que_err; 840 841 /* SQ/RQ/CQ size can be changed via sysfx interface */ 842 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 843 if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX) 844 hba->max_sqes = sq_size; 845 else 846 hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; 847 } else { /* 5706/5708/5709 */ 848 if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX) 849 hba->max_sqes = sq_size; 850 else 851 hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; 852 } 853 854 hba->max_rqes = rq_size; 855 hba->max_cqes = hba->max_sqes + rq_size; 856 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 857 if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) 858 hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; 859 } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) 860 hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; 861 862 hba->num_ccell = hba->max_sqes / 2; 863 864 spin_lock_init(&hba->lock); 865 mutex_init(&hba->net_dev_lock); 866 init_waitqueue_head(&hba->eh_wait); 867 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 868 hba->hba_shutdown_tmo = 30 * HZ; 869 hba->conn_teardown_tmo = 20 * HZ; 870 hba->conn_ctx_destroy_tmo = 6 * HZ; 871 } else { /* 5706/5708/5709 */ 872 hba->hba_shutdown_tmo = 20 * HZ; 873 hba->conn_teardown_tmo = 10 * HZ; 874 hba->conn_ctx_destroy_tmo = 2 * HZ; 875 } 876 877 #ifdef CONFIG_32BIT 878 spin_lock_init(&hba->stat_lock); 879 #endif 880 memset(&hba->stats, 0, sizeof(struct iscsi_stats_info)); 881 882 if (iscsi_host_add(shost, &hba->pcidev->dev)) 883 goto free_dump_mem; 884 return hba; 885 886 free_dump_mem: 887 bnx2i_release_free_cid_que(hba); 888 cid_que_err: 889 bnx2i_free_mp_bdt(hba); 890 mp_bdt_mem_err: 891 if (hba->regview) { 892 pci_iounmap(hba->pcidev, hba->regview); 893 hba->regview = NULL; 894 } 895 ioreg_map_err: 896 pci_dev_put(hba->pcidev); 897 scsi_host_put(shost); 898 return NULL; 899 } 900 901 /** 902 * bnx2i_free_hba- releases hba structure and resources held by the adapter 903 * @hba: pointer to adapter instance 904 * 905 * free adapter structure and call various cleanup routines. 906 */ 907 void bnx2i_free_hba(struct bnx2i_hba *hba) 908 { 909 struct Scsi_Host *shost = hba->shost; 910 911 iscsi_host_remove(shost); 912 INIT_LIST_HEAD(&hba->ep_ofld_list); 913 INIT_LIST_HEAD(&hba->ep_active_list); 914 INIT_LIST_HEAD(&hba->ep_destroy_list); 915 pci_dev_put(hba->pcidev); 916 917 if (hba->regview) { 918 pci_iounmap(hba->pcidev, hba->regview); 919 hba->regview = NULL; 920 } 921 bnx2i_free_mp_bdt(hba); 922 bnx2i_release_free_cid_que(hba); 923 iscsi_host_free(shost); 924 } 925 926 /** 927 * bnx2i_conn_free_login_resources - free DMA resources used for login process 928 * @hba: pointer to adapter instance 929 * @bnx2i_conn: iscsi connection pointer 930 * 931 * Login related resources, mostly BDT & payload DMA memory is freed 932 */ 933 static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, 934 struct bnx2i_conn *bnx2i_conn) 935 { 936 if (bnx2i_conn->gen_pdu.resp_bd_tbl) { 937 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 938 bnx2i_conn->gen_pdu.resp_bd_tbl, 939 bnx2i_conn->gen_pdu.resp_bd_dma); 940 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; 941 } 942 943 if (bnx2i_conn->gen_pdu.req_bd_tbl) { 944 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 945 bnx2i_conn->gen_pdu.req_bd_tbl, 946 bnx2i_conn->gen_pdu.req_bd_dma); 947 bnx2i_conn->gen_pdu.req_bd_tbl = NULL; 948 } 949 950 if (bnx2i_conn->gen_pdu.resp_buf) { 951 dma_free_coherent(&hba->pcidev->dev, 952 ISCSI_DEF_MAX_RECV_SEG_LEN, 953 bnx2i_conn->gen_pdu.resp_buf, 954 bnx2i_conn->gen_pdu.resp_dma_addr); 955 bnx2i_conn->gen_pdu.resp_buf = NULL; 956 } 957 958 if (bnx2i_conn->gen_pdu.req_buf) { 959 dma_free_coherent(&hba->pcidev->dev, 960 ISCSI_DEF_MAX_RECV_SEG_LEN, 961 bnx2i_conn->gen_pdu.req_buf, 962 bnx2i_conn->gen_pdu.req_dma_addr); 963 bnx2i_conn->gen_pdu.req_buf = NULL; 964 } 965 } 966 967 /** 968 * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop. 969 * @hba: pointer to adapter instance 970 * @bnx2i_conn: iscsi connection pointer 971 * 972 * Mgmt task DNA resources are allocated in this routine. 973 */ 974 static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, 975 struct bnx2i_conn *bnx2i_conn) 976 { 977 /* Allocate memory for login request/response buffers */ 978 bnx2i_conn->gen_pdu.req_buf = 979 dma_alloc_coherent(&hba->pcidev->dev, 980 ISCSI_DEF_MAX_RECV_SEG_LEN, 981 &bnx2i_conn->gen_pdu.req_dma_addr, 982 GFP_KERNEL); 983 if (bnx2i_conn->gen_pdu.req_buf == NULL) 984 goto login_req_buf_failure; 985 986 bnx2i_conn->gen_pdu.req_buf_size = 0; 987 bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf; 988 989 bnx2i_conn->gen_pdu.resp_buf = 990 dma_alloc_coherent(&hba->pcidev->dev, 991 ISCSI_DEF_MAX_RECV_SEG_LEN, 992 &bnx2i_conn->gen_pdu.resp_dma_addr, 993 GFP_KERNEL); 994 if (bnx2i_conn->gen_pdu.resp_buf == NULL) 995 goto login_resp_buf_failure; 996 997 bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; 998 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; 999 1000 bnx2i_conn->gen_pdu.req_bd_tbl = 1001 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 1002 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); 1003 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) 1004 goto login_req_bd_tbl_failure; 1005 1006 bnx2i_conn->gen_pdu.resp_bd_tbl = 1007 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 1008 &bnx2i_conn->gen_pdu.resp_bd_dma, 1009 GFP_KERNEL); 1010 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) 1011 goto login_resp_bd_tbl_failure; 1012 1013 return 0; 1014 1015 login_resp_bd_tbl_failure: 1016 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1017 bnx2i_conn->gen_pdu.req_bd_tbl, 1018 bnx2i_conn->gen_pdu.req_bd_dma); 1019 bnx2i_conn->gen_pdu.req_bd_tbl = NULL; 1020 1021 login_req_bd_tbl_failure: 1022 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, 1023 bnx2i_conn->gen_pdu.resp_buf, 1024 bnx2i_conn->gen_pdu.resp_dma_addr); 1025 bnx2i_conn->gen_pdu.resp_buf = NULL; 1026 login_resp_buf_failure: 1027 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, 1028 bnx2i_conn->gen_pdu.req_buf, 1029 bnx2i_conn->gen_pdu.req_dma_addr); 1030 bnx2i_conn->gen_pdu.req_buf = NULL; 1031 login_req_buf_failure: 1032 iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data, 1033 "login resource alloc failed!!\n"); 1034 return -ENOMEM; 1035 1036 } 1037 1038 1039 /** 1040 * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table. 1041 * @bnx2i_conn: iscsi connection pointer 1042 * 1043 * Allocates buffers and BD tables before shipping requests to cnic 1044 * for PDUs prepared by 'iscsid' daemon 1045 */ 1046 static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn) 1047 { 1048 struct iscsi_bd *bd_tbl; 1049 1050 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl; 1051 1052 bd_tbl->buffer_addr_hi = 1053 (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32); 1054 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr; 1055 bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr - 1056 bnx2i_conn->gen_pdu.req_buf; 1057 bd_tbl->reserved0 = 0; 1058 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | 1059 ISCSI_BD_FIRST_IN_BD_CHAIN; 1060 1061 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl; 1062 bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32; 1063 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr; 1064 bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN; 1065 bd_tbl->reserved0 = 0; 1066 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | 1067 ISCSI_BD_FIRST_IN_BD_CHAIN; 1068 } 1069 1070 1071 /** 1072 * bnx2i_iscsi_send_generic_request - called to send mgmt tasks. 1073 * @task: transport layer task pointer 1074 * 1075 * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login, 1076 * Nop-out and Logout requests flow through this path. 1077 */ 1078 static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) 1079 { 1080 struct bnx2i_cmd *cmd = task->dd_data; 1081 struct bnx2i_conn *bnx2i_conn = cmd->conn; 1082 int rc = 0; 1083 char *buf; 1084 int data_len; 1085 1086 bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn); 1087 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 1088 case ISCSI_OP_LOGIN: 1089 bnx2i_send_iscsi_login(bnx2i_conn, task); 1090 break; 1091 case ISCSI_OP_NOOP_OUT: 1092 data_len = bnx2i_conn->gen_pdu.req_buf_size; 1093 buf = bnx2i_conn->gen_pdu.req_buf; 1094 if (data_len) 1095 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, 1096 buf, data_len, 1); 1097 else 1098 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, 1099 NULL, 0, 1); 1100 break; 1101 case ISCSI_OP_LOGOUT: 1102 rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); 1103 break; 1104 case ISCSI_OP_SCSI_TMFUNC: 1105 rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); 1106 break; 1107 case ISCSI_OP_TEXT: 1108 rc = bnx2i_send_iscsi_text(bnx2i_conn, task); 1109 break; 1110 default: 1111 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, 1112 "send_gen: unsupported op 0x%x\n", 1113 task->hdr->opcode); 1114 } 1115 return rc; 1116 } 1117 1118 1119 /********************************************************************** 1120 * SCSI-ML Interface 1121 **********************************************************************/ 1122 1123 /** 1124 * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe 1125 * @sc: SCSI-ML command pointer 1126 * @cmd: iscsi cmd pointer 1127 */ 1128 static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd) 1129 { 1130 u32 dword; 1131 int lpcnt; 1132 u8 *srcp; 1133 u32 *dstp; 1134 u32 scsi_lun[2]; 1135 1136 int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun); 1137 cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]); 1138 cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]); 1139 1140 lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword); 1141 srcp = (u8 *) sc->cmnd; 1142 dstp = (u32 *) cmd->req.cdb; 1143 while (lpcnt--) { 1144 memcpy(&dword, (const void *) srcp, 4); 1145 *dstp = cpu_to_be32(dword); 1146 srcp += 4; 1147 dstp++; 1148 } 1149 if (sc->cmd_len & 0x3) { 1150 dword = (u32) srcp[0] | ((u32) srcp[1] << 8); 1151 *dstp = cpu_to_be32(dword); 1152 } 1153 } 1154 1155 static void bnx2i_cleanup_task(struct iscsi_task *task) 1156 { 1157 struct iscsi_conn *conn = task->conn; 1158 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1159 struct bnx2i_hba *hba = bnx2i_conn->hba; 1160 1161 /* 1162 * mgmt task or cmd was never sent to us to transmit. 1163 */ 1164 if (!task->sc || task->state == ISCSI_TASK_PENDING) 1165 return; 1166 /* 1167 * need to clean-up task context to claim dma buffers 1168 */ 1169 if (task->state == ISCSI_TASK_ABRT_TMF) { 1170 bnx2i_send_cmd_cleanup_req(hba, task->dd_data); 1171 1172 spin_unlock_bh(&conn->session->lock); 1173 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, 1174 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); 1175 spin_lock_bh(&conn->session->lock); 1176 } 1177 bnx2i_iscsi_unmap_sg_list(task->dd_data); 1178 } 1179 1180 /** 1181 * bnx2i_mtask_xmit - transmit mtask to chip for further processing 1182 * @conn: transport layer conn structure pointer 1183 * @task: transport layer command structure pointer 1184 */ 1185 static int 1186 bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) 1187 { 1188 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1189 struct bnx2i_hba *hba = bnx2i_conn->hba; 1190 struct bnx2i_cmd *cmd = task->dd_data; 1191 1192 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); 1193 1194 bnx2i_setup_cmd_wqe_template(cmd); 1195 bnx2i_conn->gen_pdu.req_buf_size = task->data_count; 1196 1197 /* Tx PDU/data length count */ 1198 ADD_STATS_64(hba, tx_pdus, 1); 1199 ADD_STATS_64(hba, tx_bytes, task->data_count); 1200 1201 if (task->data_count) { 1202 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, 1203 task->data_count); 1204 bnx2i_conn->gen_pdu.req_wr_ptr = 1205 bnx2i_conn->gen_pdu.req_buf + task->data_count; 1206 } 1207 cmd->conn = conn->dd_data; 1208 cmd->scsi_cmd = NULL; 1209 return bnx2i_iscsi_send_generic_request(task); 1210 } 1211 1212 /** 1213 * bnx2i_task_xmit - transmit iscsi command to chip for further processing 1214 * @task: transport layer command structure pointer 1215 * 1216 * maps SG buffers and send request to chip/firmware in the form of SQ WQE 1217 */ 1218 static int bnx2i_task_xmit(struct iscsi_task *task) 1219 { 1220 struct iscsi_conn *conn = task->conn; 1221 struct iscsi_session *session = conn->session; 1222 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); 1223 struct bnx2i_hba *hba = iscsi_host_priv(shost); 1224 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1225 struct scsi_cmnd *sc = task->sc; 1226 struct bnx2i_cmd *cmd = task->dd_data; 1227 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; 1228 1229 if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 > 1230 hba->max_sqes) 1231 return -ENOMEM; 1232 1233 /* 1234 * If there is no scsi_cmnd this must be a mgmt task 1235 */ 1236 if (!sc) 1237 return bnx2i_mtask_xmit(conn, task); 1238 1239 bnx2i_setup_cmd_wqe_template(cmd); 1240 cmd->req.op_code = ISCSI_OP_SCSI_CMD; 1241 cmd->conn = bnx2i_conn; 1242 cmd->scsi_cmd = sc; 1243 cmd->req.total_data_transfer_length = scsi_bufflen(sc); 1244 cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn); 1245 1246 bnx2i_iscsi_map_sg_list(cmd); 1247 bnx2i_cpy_scsi_cdb(sc, cmd); 1248 1249 cmd->req.op_attr = ISCSI_ATTR_SIMPLE; 1250 if (sc->sc_data_direction == DMA_TO_DEVICE) { 1251 cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE; 1252 cmd->req.itt = task->itt | 1253 (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); 1254 bnx2i_setup_write_cmd_bd_info(task); 1255 } else { 1256 if (scsi_bufflen(sc)) 1257 cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ; 1258 cmd->req.itt = task->itt | 1259 (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); 1260 } 1261 1262 cmd->req.num_bds = cmd->io_tbl.bd_valid; 1263 if (!cmd->io_tbl.bd_valid) { 1264 cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; 1265 cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); 1266 cmd->req.num_bds = 1; 1267 } 1268 1269 bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd); 1270 return 0; 1271 } 1272 1273 /** 1274 * bnx2i_session_create - create a new iscsi session 1275 * @cmds_max: max commands supported 1276 * @qdepth: scsi queue depth to support 1277 * @initial_cmdsn: initial iscsi CMDSN to be used for this session 1278 * 1279 * Creates a new iSCSI session instance on given device. 1280 */ 1281 static struct iscsi_cls_session * 1282 bnx2i_session_create(struct iscsi_endpoint *ep, 1283 uint16_t cmds_max, uint16_t qdepth, 1284 uint32_t initial_cmdsn) 1285 { 1286 struct Scsi_Host *shost; 1287 struct iscsi_cls_session *cls_session; 1288 struct bnx2i_hba *hba; 1289 struct bnx2i_endpoint *bnx2i_ep; 1290 1291 if (!ep) { 1292 printk(KERN_ERR "bnx2i: missing ep.\n"); 1293 return NULL; 1294 } 1295 1296 bnx2i_ep = ep->dd_data; 1297 shost = bnx2i_ep->hba->shost; 1298 hba = iscsi_host_priv(shost); 1299 if (bnx2i_adapter_ready(hba)) 1300 return NULL; 1301 1302 /* 1303 * user can override hw limit as long as it is within 1304 * the min/max. 1305 */ 1306 if (cmds_max > hba->max_sqes) 1307 cmds_max = hba->max_sqes; 1308 else if (cmds_max < BNX2I_SQ_WQES_MIN) 1309 cmds_max = BNX2I_SQ_WQES_MIN; 1310 1311 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, 1312 cmds_max, 0, sizeof(struct bnx2i_cmd), 1313 initial_cmdsn, ISCSI_MAX_TARGET); 1314 if (!cls_session) 1315 return NULL; 1316 1317 if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) 1318 goto session_teardown; 1319 return cls_session; 1320 1321 session_teardown: 1322 iscsi_session_teardown(cls_session); 1323 return NULL; 1324 } 1325 1326 1327 /** 1328 * bnx2i_session_destroy - destroys iscsi session 1329 * @cls_session: pointer to iscsi cls session 1330 * 1331 * Destroys previously created iSCSI session instance and releases 1332 * all resources held by it 1333 */ 1334 static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session) 1335 { 1336 struct iscsi_session *session = cls_session->dd_data; 1337 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1338 struct bnx2i_hba *hba = iscsi_host_priv(shost); 1339 1340 bnx2i_destroy_cmd_pool(hba, session); 1341 iscsi_session_teardown(cls_session); 1342 } 1343 1344 1345 /** 1346 * bnx2i_conn_create - create iscsi connection instance 1347 * @cls_session: pointer to iscsi cls session 1348 * @cid: iscsi cid as per rfc (not NX2's CID terminology) 1349 * 1350 * Creates a new iSCSI connection instance for a given session 1351 */ 1352 static struct iscsi_cls_conn * 1353 bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) 1354 { 1355 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1356 struct bnx2i_hba *hba = iscsi_host_priv(shost); 1357 struct bnx2i_conn *bnx2i_conn; 1358 struct iscsi_cls_conn *cls_conn; 1359 struct iscsi_conn *conn; 1360 1361 cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn), 1362 cid); 1363 if (!cls_conn) 1364 return NULL; 1365 conn = cls_conn->dd_data; 1366 1367 bnx2i_conn = conn->dd_data; 1368 bnx2i_conn->cls_conn = cls_conn; 1369 bnx2i_conn->hba = hba; 1370 1371 atomic_set(&bnx2i_conn->work_cnt, 0); 1372 1373 /* 'ep' ptr will be assigned in bind() call */ 1374 bnx2i_conn->ep = NULL; 1375 init_completion(&bnx2i_conn->cmd_cleanup_cmpl); 1376 1377 if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { 1378 iscsi_conn_printk(KERN_ALERT, conn, 1379 "conn_new: login resc alloc failed!!\n"); 1380 goto free_conn; 1381 } 1382 1383 return cls_conn; 1384 1385 free_conn: 1386 iscsi_conn_teardown(cls_conn); 1387 return NULL; 1388 } 1389 1390 /** 1391 * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together 1392 * @cls_session: pointer to iscsi cls session 1393 * @cls_conn: pointer to iscsi cls conn 1394 * @transport_fd: 64-bit EP handle 1395 * @is_leading: leading connection on this session? 1396 * 1397 * Binds together iSCSI session instance, iSCSI connection instance 1398 * and the TCP connection. This routine returns error code if 1399 * TCP connection does not belong on the device iSCSI sess/conn 1400 * is bound 1401 */ 1402 static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, 1403 struct iscsi_cls_conn *cls_conn, 1404 uint64_t transport_fd, int is_leading) 1405 { 1406 struct iscsi_conn *conn = cls_conn->dd_data; 1407 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1408 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1409 struct bnx2i_hba *hba = iscsi_host_priv(shost); 1410 struct bnx2i_endpoint *bnx2i_ep; 1411 struct iscsi_endpoint *ep; 1412 int ret_code; 1413 1414 ep = iscsi_lookup_endpoint(transport_fd); 1415 if (!ep) 1416 return -EINVAL; 1417 /* 1418 * Forcefully terminate all in progress connection recovery at the 1419 * earliest, either in bind(), send_pdu(LOGIN), or conn_start() 1420 */ 1421 if (bnx2i_adapter_ready(hba)) 1422 return -EIO; 1423 1424 bnx2i_ep = ep->dd_data; 1425 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || 1426 (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) 1427 /* Peer disconnect via' FIN or RST */ 1428 return -EINVAL; 1429 1430 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 1431 return -EINVAL; 1432 1433 if (bnx2i_ep->hba != hba) { 1434 /* Error - TCP connection does not belong to this device 1435 */ 1436 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, 1437 "conn bind, ep=0x%p (%s) does not", 1438 bnx2i_ep, bnx2i_ep->hba->netdev->name); 1439 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, 1440 "belong to hba (%s)\n", 1441 hba->netdev->name); 1442 return -EEXIST; 1443 } 1444 bnx2i_ep->conn = bnx2i_conn; 1445 bnx2i_conn->ep = bnx2i_ep; 1446 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; 1447 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; 1448 1449 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, 1450 bnx2i_ep->ep_iscsi_cid); 1451 1452 /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710 1453 * driver needs to explicitly replenish RQ index during setup. 1454 */ 1455 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) 1456 bnx2i_put_rq_buf(bnx2i_conn, 0); 1457 1458 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); 1459 return ret_code; 1460 } 1461 1462 1463 /** 1464 * bnx2i_conn_destroy - destroy iscsi connection instance & release resources 1465 * @cls_conn: pointer to iscsi cls conn 1466 * 1467 * Destroy an iSCSI connection instance and release memory resources held by 1468 * this connection 1469 */ 1470 static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) 1471 { 1472 struct iscsi_conn *conn = cls_conn->dd_data; 1473 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1474 struct Scsi_Host *shost; 1475 struct bnx2i_hba *hba; 1476 struct bnx2i_work *work, *tmp; 1477 unsigned cpu = 0; 1478 struct bnx2i_percpu_s *p; 1479 1480 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); 1481 hba = iscsi_host_priv(shost); 1482 1483 bnx2i_conn_free_login_resources(hba, bnx2i_conn); 1484 1485 if (atomic_read(&bnx2i_conn->work_cnt)) { 1486 for_each_online_cpu(cpu) { 1487 p = &per_cpu(bnx2i_percpu, cpu); 1488 spin_lock_bh(&p->p_work_lock); 1489 list_for_each_entry_safe(work, tmp, 1490 &p->work_list, list) { 1491 if (work->session == conn->session && 1492 work->bnx2i_conn == bnx2i_conn) { 1493 list_del_init(&work->list); 1494 kfree(work); 1495 if (!atomic_dec_and_test( 1496 &bnx2i_conn->work_cnt)) 1497 break; 1498 } 1499 } 1500 spin_unlock_bh(&p->p_work_lock); 1501 } 1502 } 1503 1504 iscsi_conn_teardown(cls_conn); 1505 } 1506 1507 1508 /** 1509 * bnx2i_ep_get_param - return iscsi ep parameter to caller 1510 * @ep: pointer to iscsi endpoint 1511 * @param: parameter type identifier 1512 * @buf: buffer pointer 1513 * 1514 * returns iSCSI ep parameters 1515 */ 1516 static int bnx2i_ep_get_param(struct iscsi_endpoint *ep, 1517 enum iscsi_param param, char *buf) 1518 { 1519 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; 1520 struct bnx2i_hba *hba = bnx2i_ep->hba; 1521 int len = -ENOTCONN; 1522 1523 if (!hba) 1524 return -ENOTCONN; 1525 1526 switch (param) { 1527 case ISCSI_PARAM_CONN_PORT: 1528 mutex_lock(&hba->net_dev_lock); 1529 if (bnx2i_ep->cm_sk) 1530 len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port); 1531 mutex_unlock(&hba->net_dev_lock); 1532 break; 1533 case ISCSI_PARAM_CONN_ADDRESS: 1534 mutex_lock(&hba->net_dev_lock); 1535 if (bnx2i_ep->cm_sk) 1536 len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip); 1537 mutex_unlock(&hba->net_dev_lock); 1538 break; 1539 default: 1540 return -ENOSYS; 1541 } 1542 1543 return len; 1544 } 1545 1546 /** 1547 * bnx2i_host_get_param - returns host (adapter) related parameters 1548 * @shost: scsi host pointer 1549 * @param: parameter type identifier 1550 * @buf: buffer pointer 1551 */ 1552 static int bnx2i_host_get_param(struct Scsi_Host *shost, 1553 enum iscsi_host_param param, char *buf) 1554 { 1555 struct bnx2i_hba *hba = iscsi_host_priv(shost); 1556 int len = 0; 1557 1558 switch (param) { 1559 case ISCSI_HOST_PARAM_HWADDRESS: 1560 len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); 1561 break; 1562 case ISCSI_HOST_PARAM_NETDEV_NAME: 1563 len = sprintf(buf, "%s\n", hba->netdev->name); 1564 break; 1565 case ISCSI_HOST_PARAM_IPADDRESS: { 1566 struct list_head *active_list = &hba->ep_active_list; 1567 1568 read_lock_bh(&hba->ep_rdwr_lock); 1569 if (!list_empty(&hba->ep_active_list)) { 1570 struct bnx2i_endpoint *bnx2i_ep; 1571 struct cnic_sock *csk; 1572 1573 bnx2i_ep = list_first_entry(active_list, 1574 struct bnx2i_endpoint, 1575 link); 1576 csk = bnx2i_ep->cm_sk; 1577 if (test_bit(SK_F_IPV6, &csk->flags)) 1578 len = sprintf(buf, "%pI6\n", csk->src_ip); 1579 else 1580 len = sprintf(buf, "%pI4\n", csk->src_ip); 1581 } 1582 read_unlock_bh(&hba->ep_rdwr_lock); 1583 break; 1584 } 1585 default: 1586 return iscsi_host_get_param(shost, param, buf); 1587 } 1588 return len; 1589 } 1590 1591 /** 1592 * bnx2i_conn_start - completes iscsi connection migration to FFP 1593 * @cls_conn: pointer to iscsi cls conn 1594 * 1595 * last call in FFP migration to handover iscsi conn to the driver 1596 */ 1597 static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) 1598 { 1599 struct iscsi_conn *conn = cls_conn->dd_data; 1600 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1601 1602 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; 1603 bnx2i_update_iscsi_conn(conn); 1604 1605 /* 1606 * this should normally not sleep for a long time so it should 1607 * not disrupt the caller. 1608 */ 1609 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; 1610 bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1611 bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; 1612 add_timer(&bnx2i_conn->ep->ofld_timer); 1613 /* update iSCSI context for this conn, wait for CNIC to complete */ 1614 wait_event_interruptible(bnx2i_conn->ep->ofld_wait, 1615 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); 1616 1617 if (signal_pending(current)) 1618 flush_signals(current); 1619 del_timer_sync(&bnx2i_conn->ep->ofld_timer); 1620 1621 iscsi_conn_start(cls_conn); 1622 return 0; 1623 } 1624 1625 1626 /** 1627 * bnx2i_conn_get_stats - returns iSCSI stats 1628 * @cls_conn: pointer to iscsi cls conn 1629 * @stats: pointer to iscsi statistic struct 1630 */ 1631 static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1632 struct iscsi_stats *stats) 1633 { 1634 struct iscsi_conn *conn = cls_conn->dd_data; 1635 1636 stats->txdata_octets = conn->txdata_octets; 1637 stats->rxdata_octets = conn->rxdata_octets; 1638 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; 1639 stats->dataout_pdus = conn->dataout_pdus_cnt; 1640 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; 1641 stats->datain_pdus = conn->datain_pdus_cnt; 1642 stats->r2t_pdus = conn->r2t_pdus_cnt; 1643 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; 1644 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; 1645 stats->custom_length = 3; 1646 strcpy(stats->custom[2].desc, "eh_abort_cnt"); 1647 stats->custom[2].value = conn->eh_abort_cnt; 1648 stats->digest_err = 0; 1649 stats->timeout_err = 0; 1650 stats->custom_length = 0; 1651 } 1652 1653 1654 /** 1655 * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices 1656 * @dst_addr: target IP address 1657 * 1658 * check if route resolves to BNX2 device 1659 */ 1660 static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) 1661 { 1662 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; 1663 struct bnx2i_hba *hba; 1664 struct cnic_dev *cnic = NULL; 1665 1666 hba = get_adapter_list_head(); 1667 if (hba && hba->cnic) 1668 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); 1669 if (!cnic) { 1670 printk(KERN_ALERT "bnx2i: no route," 1671 "can't connect using cnic\n"); 1672 goto no_nx2_route; 1673 } 1674 hba = bnx2i_find_hba_for_cnic(cnic); 1675 if (!hba) 1676 goto no_nx2_route; 1677 1678 if (bnx2i_adapter_ready(hba)) { 1679 printk(KERN_ALERT "bnx2i: check route, hba not found\n"); 1680 goto no_nx2_route; 1681 } 1682 if (hba->netdev->mtu > hba->mtu_supported) { 1683 printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n", 1684 hba->netdev->name, hba->netdev->mtu); 1685 printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n", 1686 hba->mtu_supported); 1687 goto no_nx2_route; 1688 } 1689 return hba; 1690 no_nx2_route: 1691 return NULL; 1692 } 1693 1694 1695 /** 1696 * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources 1697 * @hba: pointer to adapter instance 1698 * @ep: endpoint (transport identifier) structure 1699 * 1700 * destroys cm_sock structure and on chip iscsi context 1701 */ 1702 static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, 1703 struct bnx2i_endpoint *ep) 1704 { 1705 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk) 1706 hba->cnic->cm_destroy(ep->cm_sk); 1707 1708 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && 1709 ep->state == EP_STATE_DISCONN_TIMEDOUT) { 1710 if (ep->conn && ep->conn->cls_conn && 1711 ep->conn->cls_conn->dd_data) { 1712 struct iscsi_conn *conn = ep->conn->cls_conn->dd_data; 1713 1714 /* Must suspend all rx queue activity for this ep */ 1715 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 1716 } 1717 /* CONN_DISCONNECT timeout may or may not be an issue depending 1718 * on what transcribed in TCP layer, different targets behave 1719 * differently 1720 */ 1721 printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, " 1722 "please submit GRC Dump, NW/PCIe trace, " 1723 "driver msgs to developers for analysis\n", 1724 hba->netdev->name); 1725 } 1726 1727 ep->state = EP_STATE_CLEANUP_START; 1728 init_timer(&ep->ofld_timer); 1729 ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies; 1730 ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1731 ep->ofld_timer.data = (unsigned long) ep; 1732 add_timer(&ep->ofld_timer); 1733 1734 bnx2i_ep_destroy_list_add(hba, ep); 1735 1736 /* destroy iSCSI context, wait for it to complete */ 1737 if (bnx2i_send_conn_destroy(hba, ep)) 1738 ep->state = EP_STATE_CLEANUP_CMPL; 1739 1740 wait_event_interruptible(ep->ofld_wait, 1741 (ep->state != EP_STATE_CLEANUP_START)); 1742 1743 if (signal_pending(current)) 1744 flush_signals(current); 1745 del_timer_sync(&ep->ofld_timer); 1746 1747 bnx2i_ep_destroy_list_del(hba, ep); 1748 1749 if (ep->state != EP_STATE_CLEANUP_CMPL) 1750 /* should never happen */ 1751 printk(KERN_ALERT "bnx2i - conn destroy failed\n"); 1752 1753 return 0; 1754 } 1755 1756 1757 /** 1758 * bnx2i_ep_connect - establish TCP connection to target portal 1759 * @shost: scsi host 1760 * @dst_addr: target IP address 1761 * @non_blocking: blocking or non-blocking call 1762 * 1763 * this routine initiates the TCP/IP connection by invoking Option-2 i/f 1764 * with l5_core and the CNIC. This is a multi-step process of resolving 1765 * route to target, create a iscsi connection context, handshaking with 1766 * CNIC module to create/initialize the socket struct and finally 1767 * sending down option-2 request to complete TCP 3-way handshake 1768 */ 1769 static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, 1770 struct sockaddr *dst_addr, 1771 int non_blocking) 1772 { 1773 u32 iscsi_cid = BNX2I_CID_RESERVED; 1774 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; 1775 struct sockaddr_in6 *desti6; 1776 struct bnx2i_endpoint *bnx2i_ep; 1777 struct bnx2i_hba *hba; 1778 struct cnic_dev *cnic; 1779 struct cnic_sockaddr saddr; 1780 struct iscsi_endpoint *ep; 1781 int rc = 0; 1782 1783 if (shost) { 1784 /* driver is given scsi host to work with */ 1785 hba = iscsi_host_priv(shost); 1786 } else 1787 /* 1788 * check if the given destination can be reached through 1789 * a iscsi capable NetXtreme2 device 1790 */ 1791 hba = bnx2i_check_route(dst_addr); 1792 1793 if (!hba) { 1794 rc = -EINVAL; 1795 goto nohba; 1796 } 1797 mutex_lock(&hba->net_dev_lock); 1798 1799 if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) { 1800 rc = -EPERM; 1801 goto check_busy; 1802 } 1803 cnic = hba->cnic; 1804 ep = bnx2i_alloc_ep(hba); 1805 if (!ep) { 1806 rc = -ENOMEM; 1807 goto check_busy; 1808 } 1809 bnx2i_ep = ep->dd_data; 1810 1811 atomic_set(&bnx2i_ep->num_active_cmds, 0); 1812 iscsi_cid = bnx2i_alloc_iscsi_cid(hba); 1813 if (iscsi_cid == -1) { 1814 printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate " 1815 "iscsi cid\n", hba->netdev->name); 1816 rc = -ENOMEM; 1817 bnx2i_free_ep(ep); 1818 goto check_busy; 1819 } 1820 bnx2i_ep->hba_age = hba->age; 1821 1822 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); 1823 if (rc != 0) { 1824 printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error" 1825 "\n", hba->netdev->name); 1826 rc = -ENOMEM; 1827 goto qp_resc_err; 1828 } 1829 1830 bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid; 1831 bnx2i_ep->state = EP_STATE_OFLD_START; 1832 bnx2i_ep_ofld_list_add(hba, bnx2i_ep); 1833 1834 init_timer(&bnx2i_ep->ofld_timer); 1835 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; 1836 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1837 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; 1838 add_timer(&bnx2i_ep->ofld_timer); 1839 1840 if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) { 1841 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { 1842 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", 1843 hba->netdev->name, bnx2i_ep->ep_iscsi_cid); 1844 rc = -EBUSY; 1845 } else 1846 rc = -ENOSPC; 1847 printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe" 1848 "\n", hba->netdev->name); 1849 bnx2i_ep_ofld_list_del(hba, bnx2i_ep); 1850 goto conn_failed; 1851 } 1852 1853 /* Wait for CNIC hardware to setup conn context and return 'cid' */ 1854 wait_event_interruptible(bnx2i_ep->ofld_wait, 1855 bnx2i_ep->state != EP_STATE_OFLD_START); 1856 1857 if (signal_pending(current)) 1858 flush_signals(current); 1859 del_timer_sync(&bnx2i_ep->ofld_timer); 1860 1861 bnx2i_ep_ofld_list_del(hba, bnx2i_ep); 1862 1863 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { 1864 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { 1865 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", 1866 hba->netdev->name, bnx2i_ep->ep_iscsi_cid); 1867 rc = -EBUSY; 1868 } else 1869 rc = -ENOSPC; 1870 goto conn_failed; 1871 } 1872 1873 rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid, 1874 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); 1875 if (rc) { 1876 rc = -EINVAL; 1877 /* Need to terminate and cleanup the connection */ 1878 goto release_ep; 1879 } 1880 1881 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; 1882 bnx2i_ep->cm_sk->snd_buf = 256 * 1024; 1883 clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags); 1884 1885 memset(&saddr, 0, sizeof(saddr)); 1886 if (dst_addr->sa_family == AF_INET) { 1887 desti = (struct sockaddr_in *) dst_addr; 1888 saddr.remote.v4 = *desti; 1889 saddr.local.v4.sin_family = desti->sin_family; 1890 } else if (dst_addr->sa_family == AF_INET6) { 1891 desti6 = (struct sockaddr_in6 *) dst_addr; 1892 saddr.remote.v6 = *desti6; 1893 saddr.local.v6.sin6_family = desti6->sin6_family; 1894 } 1895 1896 bnx2i_ep->timestamp = jiffies; 1897 bnx2i_ep->state = EP_STATE_CONNECT_START; 1898 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 1899 rc = -EINVAL; 1900 goto conn_failed; 1901 } else 1902 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); 1903 if (rc) 1904 goto release_ep; 1905 1906 bnx2i_ep_active_list_add(hba, bnx2i_ep); 1907 1908 if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) 1909 goto del_active_ep; 1910 1911 mutex_unlock(&hba->net_dev_lock); 1912 return ep; 1913 1914 del_active_ep: 1915 bnx2i_ep_active_list_del(hba, bnx2i_ep); 1916 release_ep: 1917 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { 1918 mutex_unlock(&hba->net_dev_lock); 1919 return ERR_PTR(rc); 1920 } 1921 conn_failed: 1922 bnx2i_free_qp_resc(hba, bnx2i_ep); 1923 qp_resc_err: 1924 bnx2i_free_ep(ep); 1925 check_busy: 1926 mutex_unlock(&hba->net_dev_lock); 1927 nohba: 1928 return ERR_PTR(rc); 1929 } 1930 1931 1932 /** 1933 * bnx2i_ep_poll - polls for TCP connection establishement 1934 * @ep: TCP connection (endpoint) handle 1935 * @timeout_ms: timeout value in milli secs 1936 * 1937 * polls for TCP connect request to complete 1938 */ 1939 static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1940 { 1941 struct bnx2i_endpoint *bnx2i_ep; 1942 int rc = 0; 1943 1944 bnx2i_ep = ep->dd_data; 1945 if ((bnx2i_ep->state == EP_STATE_IDLE) || 1946 (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || 1947 (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) 1948 return -1; 1949 if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL) 1950 return 1; 1951 1952 rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait, 1953 ((bnx2i_ep->state == 1954 EP_STATE_OFLD_FAILED) || 1955 (bnx2i_ep->state == 1956 EP_STATE_CONNECT_FAILED) || 1957 (bnx2i_ep->state == 1958 EP_STATE_CONNECT_COMPL)), 1959 msecs_to_jiffies(timeout_ms)); 1960 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED) 1961 rc = -1; 1962 1963 if (rc > 0) 1964 return 1; 1965 else if (!rc) 1966 return 0; /* timeout */ 1967 else 1968 return rc; 1969 } 1970 1971 1972 /** 1973 * bnx2i_ep_tcp_conn_active - check EP state transition 1974 * @ep: endpoint pointer 1975 * 1976 * check if underlying TCP connection is active 1977 */ 1978 static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) 1979 { 1980 int ret; 1981 int cnic_dev_10g = 0; 1982 1983 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) 1984 cnic_dev_10g = 1; 1985 1986 switch (bnx2i_ep->state) { 1987 case EP_STATE_CLEANUP_FAILED: 1988 case EP_STATE_OFLD_FAILED: 1989 case EP_STATE_DISCONN_TIMEDOUT: 1990 ret = 0; 1991 break; 1992 case EP_STATE_CONNECT_START: 1993 case EP_STATE_CONNECT_FAILED: 1994 case EP_STATE_CONNECT_COMPL: 1995 case EP_STATE_ULP_UPDATE_START: 1996 case EP_STATE_ULP_UPDATE_COMPL: 1997 case EP_STATE_TCP_FIN_RCVD: 1998 case EP_STATE_LOGOUT_SENT: 1999 case EP_STATE_LOGOUT_RESP_RCVD: 2000 case EP_STATE_ULP_UPDATE_FAILED: 2001 ret = 1; 2002 break; 2003 case EP_STATE_TCP_RST_RCVD: 2004 if (cnic_dev_10g) 2005 ret = 0; 2006 else 2007 ret = 1; 2008 break; 2009 default: 2010 ret = 0; 2011 } 2012 2013 return ret; 2014 } 2015 2016 2017 /* 2018 * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw 2019 * @ep: TCP connection (bnx2i endpoint) handle 2020 * 2021 * executes TCP connection teardown process 2022 */ 2023 int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) 2024 { 2025 struct bnx2i_hba *hba = bnx2i_ep->hba; 2026 struct cnic_dev *cnic; 2027 struct iscsi_session *session = NULL; 2028 struct iscsi_conn *conn = NULL; 2029 int ret = 0; 2030 int close = 0; 2031 int close_ret = 0; 2032 2033 if (!hba) 2034 return 0; 2035 2036 cnic = hba->cnic; 2037 if (!cnic) 2038 return 0; 2039 2040 if (bnx2i_ep->state == EP_STATE_IDLE || 2041 bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) 2042 return 0; 2043 2044 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) 2045 goto destroy_conn; 2046 2047 if (bnx2i_ep->conn) { 2048 conn = bnx2i_ep->conn->cls_conn->dd_data; 2049 session = conn->session; 2050 } 2051 2052 init_timer(&bnx2i_ep->ofld_timer); 2053 bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies; 2054 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; 2055 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; 2056 add_timer(&bnx2i_ep->ofld_timer); 2057 2058 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) 2059 goto out; 2060 2061 if (session) { 2062 spin_lock_bh(&session->lock); 2063 if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) { 2064 if (session->state == ISCSI_STATE_LOGGING_OUT) { 2065 if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { 2066 /* Logout sent, but no resp */ 2067 printk(KERN_ALERT "bnx2i (%s): WARNING" 2068 " logout response was not " 2069 "received!\n", 2070 bnx2i_ep->hba->netdev->name); 2071 } else if (bnx2i_ep->state == 2072 EP_STATE_LOGOUT_RESP_RCVD) 2073 close = 1; 2074 } 2075 } else 2076 close = 1; 2077 2078 spin_unlock_bh(&session->lock); 2079 } 2080 2081 bnx2i_ep->state = EP_STATE_DISCONN_START; 2082 2083 if (close) 2084 close_ret = cnic->cm_close(bnx2i_ep->cm_sk); 2085 else 2086 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); 2087 2088 if (close_ret) 2089 printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n", 2090 bnx2i_ep->hba->netdev->name, close, close_ret); 2091 else 2092 /* wait for option-2 conn teardown */ 2093 wait_event_interruptible(bnx2i_ep->ofld_wait, 2094 bnx2i_ep->state != EP_STATE_DISCONN_START); 2095 2096 if (signal_pending(current)) 2097 flush_signals(current); 2098 del_timer_sync(&bnx2i_ep->ofld_timer); 2099 2100 destroy_conn: 2101 bnx2i_ep_active_list_del(hba, bnx2i_ep); 2102 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) 2103 return -EINVAL; 2104 out: 2105 bnx2i_ep->state = EP_STATE_IDLE; 2106 return ret; 2107 } 2108 2109 2110 /** 2111 * bnx2i_ep_disconnect - executes TCP connection teardown process 2112 * @ep: TCP connection (iscsi endpoint) handle 2113 * 2114 * executes TCP connection teardown process 2115 */ 2116 static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) 2117 { 2118 struct bnx2i_endpoint *bnx2i_ep; 2119 struct bnx2i_conn *bnx2i_conn = NULL; 2120 struct iscsi_conn *conn = NULL; 2121 struct bnx2i_hba *hba; 2122 2123 bnx2i_ep = ep->dd_data; 2124 2125 /* driver should not attempt connection cleanup until TCP_CONNECT 2126 * completes either successfully or fails. Timeout is 9-secs, so 2127 * wait for it to complete 2128 */ 2129 while ((bnx2i_ep->state == EP_STATE_CONNECT_START) && 2130 !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ))) 2131 msleep(250); 2132 2133 if (bnx2i_ep->conn) { 2134 bnx2i_conn = bnx2i_ep->conn; 2135 conn = bnx2i_conn->cls_conn->dd_data; 2136 iscsi_suspend_queue(conn); 2137 } 2138 hba = bnx2i_ep->hba; 2139 2140 mutex_lock(&hba->net_dev_lock); 2141 2142 if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) 2143 goto out; 2144 2145 if (bnx2i_ep->state == EP_STATE_IDLE) 2146 goto free_resc; 2147 2148 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || 2149 (bnx2i_ep->hba_age != hba->age)) { 2150 bnx2i_ep_active_list_del(hba, bnx2i_ep); 2151 goto free_resc; 2152 } 2153 2154 /* Do all chip cleanup here */ 2155 if (bnx2i_hw_ep_disconnect(bnx2i_ep)) { 2156 mutex_unlock(&hba->net_dev_lock); 2157 return; 2158 } 2159 free_resc: 2160 bnx2i_free_qp_resc(hba, bnx2i_ep); 2161 2162 if (bnx2i_conn) 2163 bnx2i_conn->ep = NULL; 2164 2165 bnx2i_free_ep(ep); 2166 out: 2167 mutex_unlock(&hba->net_dev_lock); 2168 2169 wake_up_interruptible(&hba->eh_wait); 2170 } 2171 2172 2173 /** 2174 * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler 2175 * @buf: pointer to buffer containing iscsi path message 2176 * 2177 */ 2178 static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) 2179 { 2180 struct bnx2i_hba *hba = iscsi_host_priv(shost); 2181 char *buf = (char *) params; 2182 u16 len = sizeof(*params); 2183 2184 /* handled by cnic driver */ 2185 hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, 2186 len); 2187 2188 return 0; 2189 } 2190 2191 static umode_t bnx2i_attr_is_visible(int param_type, int param) 2192 { 2193 switch (param_type) { 2194 case ISCSI_HOST_PARAM: 2195 switch (param) { 2196 case ISCSI_HOST_PARAM_NETDEV_NAME: 2197 case ISCSI_HOST_PARAM_HWADDRESS: 2198 case ISCSI_HOST_PARAM_IPADDRESS: 2199 return S_IRUGO; 2200 default: 2201 return 0; 2202 } 2203 case ISCSI_PARAM: 2204 switch (param) { 2205 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2206 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2207 case ISCSI_PARAM_HDRDGST_EN: 2208 case ISCSI_PARAM_DATADGST_EN: 2209 case ISCSI_PARAM_CONN_ADDRESS: 2210 case ISCSI_PARAM_CONN_PORT: 2211 case ISCSI_PARAM_EXP_STATSN: 2212 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2213 case ISCSI_PARAM_PERSISTENT_PORT: 2214 case ISCSI_PARAM_PING_TMO: 2215 case ISCSI_PARAM_RECV_TMO: 2216 case ISCSI_PARAM_INITIAL_R2T_EN: 2217 case ISCSI_PARAM_MAX_R2T: 2218 case ISCSI_PARAM_IMM_DATA_EN: 2219 case ISCSI_PARAM_FIRST_BURST: 2220 case ISCSI_PARAM_MAX_BURST: 2221 case ISCSI_PARAM_PDU_INORDER_EN: 2222 case ISCSI_PARAM_DATASEQ_INORDER_EN: 2223 case ISCSI_PARAM_ERL: 2224 case ISCSI_PARAM_TARGET_NAME: 2225 case ISCSI_PARAM_TPGT: 2226 case ISCSI_PARAM_USERNAME: 2227 case ISCSI_PARAM_PASSWORD: 2228 case ISCSI_PARAM_USERNAME_IN: 2229 case ISCSI_PARAM_PASSWORD_IN: 2230 case ISCSI_PARAM_FAST_ABORT: 2231 case ISCSI_PARAM_ABORT_TMO: 2232 case ISCSI_PARAM_LU_RESET_TMO: 2233 case ISCSI_PARAM_TGT_RESET_TMO: 2234 case ISCSI_PARAM_IFACE_NAME: 2235 case ISCSI_PARAM_INITIATOR_NAME: 2236 return S_IRUGO; 2237 default: 2238 return 0; 2239 } 2240 } 2241 2242 return 0; 2243 } 2244 2245 /* 2246 * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template 2247 * used while registering with the scsi host and iSCSI transport module. 2248 */ 2249 static struct scsi_host_template bnx2i_host_template = { 2250 .module = THIS_MODULE, 2251 .name = "Broadcom Offload iSCSI Initiator", 2252 .proc_name = "bnx2i", 2253 .queuecommand = iscsi_queuecommand, 2254 .eh_abort_handler = iscsi_eh_abort, 2255 .eh_device_reset_handler = iscsi_eh_device_reset, 2256 .eh_target_reset_handler = iscsi_eh_recover_target, 2257 .change_queue_depth = iscsi_change_queue_depth, 2258 .target_alloc = iscsi_target_alloc, 2259 .can_queue = 2048, 2260 .max_sectors = 127, 2261 .cmd_per_lun = 128, 2262 .this_id = -1, 2263 .use_clustering = ENABLE_CLUSTERING, 2264 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2265 .shost_attrs = bnx2i_dev_attributes, 2266 }; 2267 2268 struct iscsi_transport bnx2i_iscsi_transport = { 2269 .owner = THIS_MODULE, 2270 .name = "bnx2i", 2271 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | 2272 CAP_MULTI_R2T | CAP_DATADGST | 2273 CAP_DATA_PATH_OFFLOAD | 2274 CAP_TEXT_NEGO, 2275 .create_session = bnx2i_session_create, 2276 .destroy_session = bnx2i_session_destroy, 2277 .create_conn = bnx2i_conn_create, 2278 .bind_conn = bnx2i_conn_bind, 2279 .destroy_conn = bnx2i_conn_destroy, 2280 .attr_is_visible = bnx2i_attr_is_visible, 2281 .set_param = iscsi_set_param, 2282 .get_conn_param = iscsi_conn_get_param, 2283 .get_session_param = iscsi_session_get_param, 2284 .get_host_param = bnx2i_host_get_param, 2285 .start_conn = bnx2i_conn_start, 2286 .stop_conn = iscsi_conn_stop, 2287 .send_pdu = iscsi_conn_send_pdu, 2288 .xmit_task = bnx2i_task_xmit, 2289 .get_stats = bnx2i_conn_get_stats, 2290 /* TCP connect - disconnect - option-2 interface calls */ 2291 .get_ep_param = bnx2i_ep_get_param, 2292 .ep_connect = bnx2i_ep_connect, 2293 .ep_poll = bnx2i_ep_poll, 2294 .ep_disconnect = bnx2i_ep_disconnect, 2295 .set_path = bnx2i_nl_set_path, 2296 /* Error recovery timeout call */ 2297 .session_recovery_timedout = iscsi_session_recovery_timedout, 2298 .cleanup_task = bnx2i_cleanup_task, 2299 }; 2300