1 /* cnic.c: Broadcom CNIC core network driver. 2 * 3 * Copyright (c) 2006-2011 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) 10 * Modified and maintained by: Michael Chan <mchan@broadcom.com> 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/module.h> 16 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/list.h> 20 #include <linux/slab.h> 21 #include <linux/pci.h> 22 #include <linux/init.h> 23 #include <linux/netdevice.h> 24 #include <linux/uio_driver.h> 25 #include <linux/in.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/delay.h> 28 #include <linux/ethtool.h> 29 #include <linux/if_vlan.h> 30 #include <linux/prefetch.h> 31 #include <linux/random.h> 32 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 33 #define BCM_VLAN 1 34 #endif 35 #include <net/ip.h> 36 #include <net/tcp.h> 37 #include <net/route.h> 38 #include <net/ipv6.h> 39 #include <net/ip6_route.h> 40 #include <net/ip6_checksum.h> 41 #include <scsi/iscsi_if.h> 42 43 #include "cnic_if.h" 44 #include "bnx2.h" 45 #include "bnx2x/bnx2x_reg.h" 46 #include "bnx2x/bnx2x_fw_defs.h" 47 #include "bnx2x/bnx2x_hsi.h" 48 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h" 49 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h" 50 #include "cnic.h" 51 #include "cnic_defs.h" 52 53 #define DRV_MODULE_NAME "cnic" 54 55 static char version[] __devinitdata = 56 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 57 58 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " 59 "Chen (zongxi@broadcom.com"); 60 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); 61 MODULE_LICENSE("GPL"); 62 MODULE_VERSION(CNIC_MODULE_VERSION); 63 64 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */ 65 static LIST_HEAD(cnic_dev_list); 66 static LIST_HEAD(cnic_udev_list); 67 static DEFINE_RWLOCK(cnic_dev_lock); 68 static DEFINE_MUTEX(cnic_lock); 69 70 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 71 72 /* helper function, assuming cnic_lock is held */ 73 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) 74 { 75 return rcu_dereference_protected(cnic_ulp_tbl[type], 76 lockdep_is_held(&cnic_lock)); 77 } 78 79 static int cnic_service_bnx2(void *, void *); 80 static int cnic_service_bnx2x(void *, void *); 81 static int cnic_ctl(void *, struct cnic_ctl_info *); 82 83 static struct cnic_ops cnic_bnx2_ops = { 84 .cnic_owner = THIS_MODULE, 85 .cnic_handler = cnic_service_bnx2, 86 .cnic_ctl = cnic_ctl, 87 }; 88 89 static struct cnic_ops cnic_bnx2x_ops = { 90 .cnic_owner = THIS_MODULE, 91 .cnic_handler = cnic_service_bnx2x, 92 .cnic_ctl = cnic_ctl, 93 }; 94 95 static struct workqueue_struct *cnic_wq; 96 97 static void cnic_shutdown_rings(struct cnic_dev *); 98 static void cnic_init_rings(struct cnic_dev *); 99 static int cnic_cm_set_pg(struct cnic_sock *); 100 101 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 102 { 103 struct cnic_uio_dev *udev = uinfo->priv; 104 struct cnic_dev *dev; 105 106 if (!capable(CAP_NET_ADMIN)) 107 return -EPERM; 108 109 if (udev->uio_dev != -1) 110 return -EBUSY; 111 112 rtnl_lock(); 113 dev = udev->dev; 114 115 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 116 rtnl_unlock(); 117 return -ENODEV; 118 } 119 120 udev->uio_dev = iminor(inode); 121 122 cnic_shutdown_rings(dev); 123 cnic_init_rings(dev); 124 rtnl_unlock(); 125 126 return 0; 127 } 128 129 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) 130 { 131 struct cnic_uio_dev *udev = uinfo->priv; 132 133 udev->uio_dev = -1; 134 return 0; 135 } 136 137 static inline void cnic_hold(struct cnic_dev *dev) 138 { 139 atomic_inc(&dev->ref_count); 140 } 141 142 static inline void cnic_put(struct cnic_dev *dev) 143 { 144 atomic_dec(&dev->ref_count); 145 } 146 147 static inline void csk_hold(struct cnic_sock *csk) 148 { 149 atomic_inc(&csk->ref_count); 150 } 151 152 static inline void csk_put(struct cnic_sock *csk) 153 { 154 atomic_dec(&csk->ref_count); 155 } 156 157 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) 158 { 159 struct cnic_dev *cdev; 160 161 read_lock(&cnic_dev_lock); 162 list_for_each_entry(cdev, &cnic_dev_list, list) { 163 if (netdev == cdev->netdev) { 164 cnic_hold(cdev); 165 read_unlock(&cnic_dev_lock); 166 return cdev; 167 } 168 } 169 read_unlock(&cnic_dev_lock); 170 return NULL; 171 } 172 173 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) 174 { 175 atomic_inc(&ulp_ops->ref_count); 176 } 177 178 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) 179 { 180 atomic_dec(&ulp_ops->ref_count); 181 } 182 183 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 184 { 185 struct cnic_local *cp = dev->cnic_priv; 186 struct cnic_eth_dev *ethdev = cp->ethdev; 187 struct drv_ctl_info info; 188 struct drv_ctl_io *io = &info.data.io; 189 190 info.cmd = DRV_CTL_CTX_WR_CMD; 191 io->cid_addr = cid_addr; 192 io->offset = off; 193 io->data = val; 194 ethdev->drv_ctl(dev->netdev, &info); 195 } 196 197 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) 198 { 199 struct cnic_local *cp = dev->cnic_priv; 200 struct cnic_eth_dev *ethdev = cp->ethdev; 201 struct drv_ctl_info info; 202 struct drv_ctl_io *io = &info.data.io; 203 204 info.cmd = DRV_CTL_CTXTBL_WR_CMD; 205 io->offset = off; 206 io->dma_addr = addr; 207 ethdev->drv_ctl(dev->netdev, &info); 208 } 209 210 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) 211 { 212 struct cnic_local *cp = dev->cnic_priv; 213 struct cnic_eth_dev *ethdev = cp->ethdev; 214 struct drv_ctl_info info; 215 struct drv_ctl_l2_ring *ring = &info.data.ring; 216 217 if (start) 218 info.cmd = DRV_CTL_START_L2_CMD; 219 else 220 info.cmd = DRV_CTL_STOP_L2_CMD; 221 222 ring->cid = cid; 223 ring->client_id = cl_id; 224 ethdev->drv_ctl(dev->netdev, &info); 225 } 226 227 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 228 { 229 struct cnic_local *cp = dev->cnic_priv; 230 struct cnic_eth_dev *ethdev = cp->ethdev; 231 struct drv_ctl_info info; 232 struct drv_ctl_io *io = &info.data.io; 233 234 info.cmd = DRV_CTL_IO_WR_CMD; 235 io->offset = off; 236 io->data = val; 237 ethdev->drv_ctl(dev->netdev, &info); 238 } 239 240 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) 241 { 242 struct cnic_local *cp = dev->cnic_priv; 243 struct cnic_eth_dev *ethdev = cp->ethdev; 244 struct drv_ctl_info info; 245 struct drv_ctl_io *io = &info.data.io; 246 247 info.cmd = DRV_CTL_IO_RD_CMD; 248 io->offset = off; 249 ethdev->drv_ctl(dev->netdev, &info); 250 return io->data; 251 } 252 253 static int cnic_in_use(struct cnic_sock *csk) 254 { 255 return test_bit(SK_F_INUSE, &csk->flags); 256 } 257 258 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) 259 { 260 struct cnic_local *cp = dev->cnic_priv; 261 struct cnic_eth_dev *ethdev = cp->ethdev; 262 struct drv_ctl_info info; 263 264 info.cmd = cmd; 265 info.data.credit.credit_count = count; 266 ethdev->drv_ctl(dev->netdev, &info); 267 } 268 269 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) 270 { 271 u32 i; 272 273 for (i = 0; i < cp->max_cid_space; i++) { 274 if (cp->ctx_tbl[i].cid == cid) { 275 *l5_cid = i; 276 return 0; 277 } 278 } 279 return -EINVAL; 280 } 281 282 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 283 struct cnic_sock *csk) 284 { 285 struct iscsi_path path_req; 286 char *buf = NULL; 287 u16 len = 0; 288 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 289 struct cnic_ulp_ops *ulp_ops; 290 struct cnic_uio_dev *udev = cp->udev; 291 int rc = 0, retry = 0; 292 293 if (!udev || udev->uio_dev == -1) 294 return -ENODEV; 295 296 if (csk) { 297 len = sizeof(path_req); 298 buf = (char *) &path_req; 299 memset(&path_req, 0, len); 300 301 msg_type = ISCSI_KEVENT_PATH_REQ; 302 path_req.handle = (u64) csk->l5_cid; 303 if (test_bit(SK_F_IPV6, &csk->flags)) { 304 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], 305 sizeof(struct in6_addr)); 306 path_req.ip_addr_len = 16; 307 } else { 308 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], 309 sizeof(struct in_addr)); 310 path_req.ip_addr_len = 4; 311 } 312 path_req.vlan_id = csk->vlan_id; 313 path_req.pmtu = csk->mtu; 314 } 315 316 while (retry < 3) { 317 rc = 0; 318 rcu_read_lock(); 319 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); 320 if (ulp_ops) 321 rc = ulp_ops->iscsi_nl_send_msg( 322 cp->ulp_handle[CNIC_ULP_ISCSI], 323 msg_type, buf, len); 324 rcu_read_unlock(); 325 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ) 326 break; 327 328 msleep(100); 329 retry++; 330 } 331 return rc; 332 } 333 334 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); 335 336 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, 337 char *buf, u16 len) 338 { 339 int rc = -EINVAL; 340 341 switch (msg_type) { 342 case ISCSI_UEVENT_PATH_UPDATE: { 343 struct cnic_local *cp; 344 u32 l5_cid; 345 struct cnic_sock *csk; 346 struct iscsi_path *path_resp; 347 348 if (len < sizeof(*path_resp)) 349 break; 350 351 path_resp = (struct iscsi_path *) buf; 352 cp = dev->cnic_priv; 353 l5_cid = (u32) path_resp->handle; 354 if (l5_cid >= MAX_CM_SK_TBL_SZ) 355 break; 356 357 rcu_read_lock(); 358 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { 359 rc = -ENODEV; 360 rcu_read_unlock(); 361 break; 362 } 363 csk = &cp->csk_tbl[l5_cid]; 364 csk_hold(csk); 365 if (cnic_in_use(csk) && 366 test_bit(SK_F_CONNECT_START, &csk->flags)) { 367 368 memcpy(csk->ha, path_resp->mac_addr, 6); 369 if (test_bit(SK_F_IPV6, &csk->flags)) 370 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 371 sizeof(struct in6_addr)); 372 else 373 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, 374 sizeof(struct in_addr)); 375 376 if (is_valid_ether_addr(csk->ha)) { 377 cnic_cm_set_pg(csk); 378 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && 379 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 380 381 cnic_cm_upcall(cp, csk, 382 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 383 clear_bit(SK_F_CONNECT_START, &csk->flags); 384 } 385 } 386 csk_put(csk); 387 rcu_read_unlock(); 388 rc = 0; 389 } 390 } 391 392 return rc; 393 } 394 395 static int cnic_offld_prep(struct cnic_sock *csk) 396 { 397 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 398 return 0; 399 400 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { 401 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 402 return 0; 403 } 404 405 return 1; 406 } 407 408 static int cnic_close_prep(struct cnic_sock *csk) 409 { 410 clear_bit(SK_F_CONNECT_START, &csk->flags); 411 smp_mb__after_clear_bit(); 412 413 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 414 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 415 msleep(1); 416 417 return 1; 418 } 419 return 0; 420 } 421 422 static int cnic_abort_prep(struct cnic_sock *csk) 423 { 424 clear_bit(SK_F_CONNECT_START, &csk->flags); 425 smp_mb__after_clear_bit(); 426 427 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 428 msleep(1); 429 430 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 431 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 432 return 1; 433 } 434 435 return 0; 436 } 437 438 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 439 { 440 struct cnic_dev *dev; 441 442 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 443 pr_err("%s: Bad type %d\n", __func__, ulp_type); 444 return -EINVAL; 445 } 446 mutex_lock(&cnic_lock); 447 if (cnic_ulp_tbl_prot(ulp_type)) { 448 pr_err("%s: Type %d has already been registered\n", 449 __func__, ulp_type); 450 mutex_unlock(&cnic_lock); 451 return -EBUSY; 452 } 453 454 read_lock(&cnic_dev_lock); 455 list_for_each_entry(dev, &cnic_dev_list, list) { 456 struct cnic_local *cp = dev->cnic_priv; 457 458 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); 459 } 460 read_unlock(&cnic_dev_lock); 461 462 atomic_set(&ulp_ops->ref_count, 0); 463 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 464 mutex_unlock(&cnic_lock); 465 466 /* Prevent race conditions with netdev_event */ 467 rtnl_lock(); 468 list_for_each_entry(dev, &cnic_dev_list, list) { 469 struct cnic_local *cp = dev->cnic_priv; 470 471 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) 472 ulp_ops->cnic_init(dev); 473 } 474 rtnl_unlock(); 475 476 return 0; 477 } 478 479 int cnic_unregister_driver(int ulp_type) 480 { 481 struct cnic_dev *dev; 482 struct cnic_ulp_ops *ulp_ops; 483 int i = 0; 484 485 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 486 pr_err("%s: Bad type %d\n", __func__, ulp_type); 487 return -EINVAL; 488 } 489 mutex_lock(&cnic_lock); 490 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 491 if (!ulp_ops) { 492 pr_err("%s: Type %d has not been registered\n", 493 __func__, ulp_type); 494 goto out_unlock; 495 } 496 read_lock(&cnic_dev_lock); 497 list_for_each_entry(dev, &cnic_dev_list, list) { 498 struct cnic_local *cp = dev->cnic_priv; 499 500 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 501 pr_err("%s: Type %d still has devices registered\n", 502 __func__, ulp_type); 503 read_unlock(&cnic_dev_lock); 504 goto out_unlock; 505 } 506 } 507 read_unlock(&cnic_dev_lock); 508 509 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); 510 511 mutex_unlock(&cnic_lock); 512 synchronize_rcu(); 513 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { 514 msleep(100); 515 i++; 516 } 517 518 if (atomic_read(&ulp_ops->ref_count) != 0) 519 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); 520 return 0; 521 522 out_unlock: 523 mutex_unlock(&cnic_lock); 524 return -EINVAL; 525 } 526 527 static int cnic_start_hw(struct cnic_dev *); 528 static void cnic_stop_hw(struct cnic_dev *); 529 530 static int cnic_register_device(struct cnic_dev *dev, int ulp_type, 531 void *ulp_ctx) 532 { 533 struct cnic_local *cp = dev->cnic_priv; 534 struct cnic_ulp_ops *ulp_ops; 535 536 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 537 pr_err("%s: Bad type %d\n", __func__, ulp_type); 538 return -EINVAL; 539 } 540 mutex_lock(&cnic_lock); 541 if (cnic_ulp_tbl_prot(ulp_type) == NULL) { 542 pr_err("%s: Driver with type %d has not been registered\n", 543 __func__, ulp_type); 544 mutex_unlock(&cnic_lock); 545 return -EAGAIN; 546 } 547 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 548 pr_err("%s: Type %d has already been registered to this device\n", 549 __func__, ulp_type); 550 mutex_unlock(&cnic_lock); 551 return -EBUSY; 552 } 553 554 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 555 cp->ulp_handle[ulp_type] = ulp_ctx; 556 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 557 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 558 cnic_hold(dev); 559 560 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 561 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) 562 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); 563 564 mutex_unlock(&cnic_lock); 565 566 return 0; 567 568 } 569 EXPORT_SYMBOL(cnic_register_driver); 570 571 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 572 { 573 struct cnic_local *cp = dev->cnic_priv; 574 int i = 0; 575 576 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 577 pr_err("%s: Bad type %d\n", __func__, ulp_type); 578 return -EINVAL; 579 } 580 mutex_lock(&cnic_lock); 581 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 582 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); 583 cnic_put(dev); 584 } else { 585 pr_err("%s: device not registered to this ulp type %d\n", 586 __func__, ulp_type); 587 mutex_unlock(&cnic_lock); 588 return -EINVAL; 589 } 590 mutex_unlock(&cnic_lock); 591 592 if (ulp_type == CNIC_ULP_ISCSI) 593 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 594 595 synchronize_rcu(); 596 597 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && 598 i < 20) { 599 msleep(100); 600 i++; 601 } 602 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) 603 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); 604 605 return 0; 606 } 607 EXPORT_SYMBOL(cnic_unregister_driver); 608 609 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, 610 u32 next) 611 { 612 id_tbl->start = start_id; 613 id_tbl->max = size; 614 id_tbl->next = next; 615 spin_lock_init(&id_tbl->lock); 616 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); 617 if (!id_tbl->table) 618 return -ENOMEM; 619 620 return 0; 621 } 622 623 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) 624 { 625 kfree(id_tbl->table); 626 id_tbl->table = NULL; 627 } 628 629 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) 630 { 631 int ret = -1; 632 633 id -= id_tbl->start; 634 if (id >= id_tbl->max) 635 return ret; 636 637 spin_lock(&id_tbl->lock); 638 if (!test_bit(id, id_tbl->table)) { 639 set_bit(id, id_tbl->table); 640 ret = 0; 641 } 642 spin_unlock(&id_tbl->lock); 643 return ret; 644 } 645 646 /* Returns -1 if not successful */ 647 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) 648 { 649 u32 id; 650 651 spin_lock(&id_tbl->lock); 652 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); 653 if (id >= id_tbl->max) { 654 id = -1; 655 if (id_tbl->next != 0) { 656 id = find_first_zero_bit(id_tbl->table, id_tbl->next); 657 if (id >= id_tbl->next) 658 id = -1; 659 } 660 } 661 662 if (id < id_tbl->max) { 663 set_bit(id, id_tbl->table); 664 id_tbl->next = (id + 1) & (id_tbl->max - 1); 665 id += id_tbl->start; 666 } 667 668 spin_unlock(&id_tbl->lock); 669 670 return id; 671 } 672 673 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) 674 { 675 if (id == -1) 676 return; 677 678 id -= id_tbl->start; 679 if (id >= id_tbl->max) 680 return; 681 682 clear_bit(id, id_tbl->table); 683 } 684 685 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) 686 { 687 int i; 688 689 if (!dma->pg_arr) 690 return; 691 692 for (i = 0; i < dma->num_pages; i++) { 693 if (dma->pg_arr[i]) { 694 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, 695 dma->pg_arr[i], dma->pg_map_arr[i]); 696 dma->pg_arr[i] = NULL; 697 } 698 } 699 if (dma->pgtbl) { 700 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, 701 dma->pgtbl, dma->pgtbl_map); 702 dma->pgtbl = NULL; 703 } 704 kfree(dma->pg_arr); 705 dma->pg_arr = NULL; 706 dma->num_pages = 0; 707 } 708 709 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 710 { 711 int i; 712 __le32 *page_table = (__le32 *) dma->pgtbl; 713 714 for (i = 0; i < dma->num_pages; i++) { 715 /* Each entry needs to be in big endian format. */ 716 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 717 page_table++; 718 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 719 page_table++; 720 } 721 } 722 723 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 724 { 725 int i; 726 __le32 *page_table = (__le32 *) dma->pgtbl; 727 728 for (i = 0; i < dma->num_pages; i++) { 729 /* Each entry needs to be in little endian format. */ 730 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 731 page_table++; 732 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 733 page_table++; 734 } 735 } 736 737 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 738 int pages, int use_pg_tbl) 739 { 740 int i, size; 741 struct cnic_local *cp = dev->cnic_priv; 742 743 size = pages * (sizeof(void *) + sizeof(dma_addr_t)); 744 dma->pg_arr = kzalloc(size, GFP_ATOMIC); 745 if (dma->pg_arr == NULL) 746 return -ENOMEM; 747 748 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); 749 dma->num_pages = pages; 750 751 for (i = 0; i < pages; i++) { 752 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 753 BCM_PAGE_SIZE, 754 &dma->pg_map_arr[i], 755 GFP_ATOMIC); 756 if (dma->pg_arr[i] == NULL) 757 goto error; 758 } 759 if (!use_pg_tbl) 760 return 0; 761 762 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & 763 ~(BCM_PAGE_SIZE - 1); 764 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 765 &dma->pgtbl_map, GFP_ATOMIC); 766 if (dma->pgtbl == NULL) 767 goto error; 768 769 cp->setup_pgtbl(dev, dma); 770 771 return 0; 772 773 error: 774 cnic_free_dma(dev, dma); 775 return -ENOMEM; 776 } 777 778 static void cnic_free_context(struct cnic_dev *dev) 779 { 780 struct cnic_local *cp = dev->cnic_priv; 781 int i; 782 783 for (i = 0; i < cp->ctx_blks; i++) { 784 if (cp->ctx_arr[i].ctx) { 785 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 786 cp->ctx_arr[i].ctx, 787 cp->ctx_arr[i].mapping); 788 cp->ctx_arr[i].ctx = NULL; 789 } 790 } 791 } 792 793 static void __cnic_free_uio(struct cnic_uio_dev *udev) 794 { 795 uio_unregister_device(&udev->cnic_uinfo); 796 797 if (udev->l2_buf) { 798 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, 799 udev->l2_buf, udev->l2_buf_map); 800 udev->l2_buf = NULL; 801 } 802 803 if (udev->l2_ring) { 804 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 805 udev->l2_ring, udev->l2_ring_map); 806 udev->l2_ring = NULL; 807 } 808 809 pci_dev_put(udev->pdev); 810 kfree(udev); 811 } 812 813 static void cnic_free_uio(struct cnic_uio_dev *udev) 814 { 815 if (!udev) 816 return; 817 818 write_lock(&cnic_dev_lock); 819 list_del_init(&udev->list); 820 write_unlock(&cnic_dev_lock); 821 __cnic_free_uio(udev); 822 } 823 824 static void cnic_free_resc(struct cnic_dev *dev) 825 { 826 struct cnic_local *cp = dev->cnic_priv; 827 struct cnic_uio_dev *udev = cp->udev; 828 829 if (udev) { 830 udev->dev = NULL; 831 cp->udev = NULL; 832 } 833 834 cnic_free_context(dev); 835 kfree(cp->ctx_arr); 836 cp->ctx_arr = NULL; 837 cp->ctx_blks = 0; 838 839 cnic_free_dma(dev, &cp->gbl_buf_info); 840 cnic_free_dma(dev, &cp->kwq_info); 841 cnic_free_dma(dev, &cp->kwq_16_data_info); 842 cnic_free_dma(dev, &cp->kcq2.dma); 843 cnic_free_dma(dev, &cp->kcq1.dma); 844 kfree(cp->iscsi_tbl); 845 cp->iscsi_tbl = NULL; 846 kfree(cp->ctx_tbl); 847 cp->ctx_tbl = NULL; 848 849 cnic_free_id_tbl(&cp->fcoe_cid_tbl); 850 cnic_free_id_tbl(&cp->cid_tbl); 851 } 852 853 static int cnic_alloc_context(struct cnic_dev *dev) 854 { 855 struct cnic_local *cp = dev->cnic_priv; 856 857 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 858 int i, k, arr_size; 859 860 cp->ctx_blk_size = BCM_PAGE_SIZE; 861 cp->cids_per_blk = BCM_PAGE_SIZE / 128; 862 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 863 sizeof(struct cnic_ctx); 864 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 865 if (cp->ctx_arr == NULL) 866 return -ENOMEM; 867 868 k = 0; 869 for (i = 0; i < 2; i++) { 870 u32 j, reg, off, lo, hi; 871 872 if (i == 0) 873 off = BNX2_PG_CTX_MAP; 874 else 875 off = BNX2_ISCSI_CTX_MAP; 876 877 reg = cnic_reg_rd_ind(dev, off); 878 lo = reg >> 16; 879 hi = reg & 0xffff; 880 for (j = lo; j < hi; j += cp->cids_per_blk, k++) 881 cp->ctx_arr[k].cid = j; 882 } 883 884 cp->ctx_blks = k; 885 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { 886 cp->ctx_blks = 0; 887 return -ENOMEM; 888 } 889 890 for (i = 0; i < cp->ctx_blks; i++) { 891 cp->ctx_arr[i].ctx = 892 dma_alloc_coherent(&dev->pcidev->dev, 893 BCM_PAGE_SIZE, 894 &cp->ctx_arr[i].mapping, 895 GFP_KERNEL); 896 if (cp->ctx_arr[i].ctx == NULL) 897 return -ENOMEM; 898 } 899 } 900 return 0; 901 } 902 903 static u16 cnic_bnx2_next_idx(u16 idx) 904 { 905 return idx + 1; 906 } 907 908 static u16 cnic_bnx2_hw_idx(u16 idx) 909 { 910 return idx; 911 } 912 913 static u16 cnic_bnx2x_next_idx(u16 idx) 914 { 915 idx++; 916 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 917 idx++; 918 919 return idx; 920 } 921 922 static u16 cnic_bnx2x_hw_idx(u16 idx) 923 { 924 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 925 idx++; 926 return idx; 927 } 928 929 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info, 930 bool use_pg_tbl) 931 { 932 int err, i, use_page_tbl = 0; 933 struct kcqe **kcq; 934 935 if (use_pg_tbl) 936 use_page_tbl = 1; 937 938 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl); 939 if (err) 940 return err; 941 942 kcq = (struct kcqe **) info->dma.pg_arr; 943 info->kcq = kcq; 944 945 info->next_idx = cnic_bnx2_next_idx; 946 info->hw_idx = cnic_bnx2_hw_idx; 947 if (use_pg_tbl) 948 return 0; 949 950 info->next_idx = cnic_bnx2x_next_idx; 951 info->hw_idx = cnic_bnx2x_hw_idx; 952 953 for (i = 0; i < KCQ_PAGE_CNT; i++) { 954 struct bnx2x_bd_chain_next *next = 955 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; 956 int j = i + 1; 957 958 if (j >= KCQ_PAGE_CNT) 959 j = 0; 960 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; 961 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; 962 } 963 return 0; 964 } 965 966 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) 967 { 968 struct cnic_local *cp = dev->cnic_priv; 969 struct cnic_uio_dev *udev; 970 971 read_lock(&cnic_dev_lock); 972 list_for_each_entry(udev, &cnic_udev_list, list) { 973 if (udev->pdev == dev->pcidev) { 974 udev->dev = dev; 975 cp->udev = udev; 976 read_unlock(&cnic_dev_lock); 977 return 0; 978 } 979 } 980 read_unlock(&cnic_dev_lock); 981 982 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 983 if (!udev) 984 return -ENOMEM; 985 986 udev->uio_dev = -1; 987 988 udev->dev = dev; 989 udev->pdev = dev->pcidev; 990 udev->l2_ring_size = pages * BCM_PAGE_SIZE; 991 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, 992 &udev->l2_ring_map, 993 GFP_KERNEL | __GFP_COMP); 994 if (!udev->l2_ring) 995 goto err_udev; 996 997 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 998 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); 999 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, 1000 &udev->l2_buf_map, 1001 GFP_KERNEL | __GFP_COMP); 1002 if (!udev->l2_buf) 1003 goto err_dma; 1004 1005 write_lock(&cnic_dev_lock); 1006 list_add(&udev->list, &cnic_udev_list); 1007 write_unlock(&cnic_dev_lock); 1008 1009 pci_dev_get(udev->pdev); 1010 1011 cp->udev = udev; 1012 1013 return 0; 1014 err_dma: 1015 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 1016 udev->l2_ring, udev->l2_ring_map); 1017 err_udev: 1018 kfree(udev); 1019 return -ENOMEM; 1020 } 1021 1022 static int cnic_init_uio(struct cnic_dev *dev) 1023 { 1024 struct cnic_local *cp = dev->cnic_priv; 1025 struct cnic_uio_dev *udev = cp->udev; 1026 struct uio_info *uinfo; 1027 int ret = 0; 1028 1029 if (!udev) 1030 return -ENOMEM; 1031 1032 uinfo = &udev->cnic_uinfo; 1033 1034 uinfo->mem[0].addr = dev->netdev->base_addr; 1035 uinfo->mem[0].internal_addr = dev->regview; 1036 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; 1037 uinfo->mem[0].memtype = UIO_MEM_PHYS; 1038 1039 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 1040 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 1041 PAGE_MASK; 1042 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 1043 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 1044 else 1045 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 1046 1047 uinfo->name = "bnx2_cnic"; 1048 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 1049 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 1050 PAGE_MASK; 1051 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); 1052 1053 uinfo->name = "bnx2x_cnic"; 1054 } 1055 1056 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 1057 1058 uinfo->mem[2].addr = (unsigned long) udev->l2_ring; 1059 uinfo->mem[2].size = udev->l2_ring_size; 1060 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 1061 1062 uinfo->mem[3].addr = (unsigned long) udev->l2_buf; 1063 uinfo->mem[3].size = udev->l2_buf_size; 1064 uinfo->mem[3].memtype = UIO_MEM_LOGICAL; 1065 1066 uinfo->version = CNIC_MODULE_VERSION; 1067 uinfo->irq = UIO_IRQ_CUSTOM; 1068 1069 uinfo->open = cnic_uio_open; 1070 uinfo->release = cnic_uio_close; 1071 1072 if (udev->uio_dev == -1) { 1073 if (!uinfo->priv) { 1074 uinfo->priv = udev; 1075 1076 ret = uio_register_device(&udev->pdev->dev, uinfo); 1077 } 1078 } else { 1079 cnic_init_rings(dev); 1080 } 1081 1082 return ret; 1083 } 1084 1085 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) 1086 { 1087 struct cnic_local *cp = dev->cnic_priv; 1088 int ret; 1089 1090 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); 1091 if (ret) 1092 goto error; 1093 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 1094 1095 ret = cnic_alloc_kcq(dev, &cp->kcq1, true); 1096 if (ret) 1097 goto error; 1098 1099 ret = cnic_alloc_context(dev); 1100 if (ret) 1101 goto error; 1102 1103 ret = cnic_alloc_uio_rings(dev, 2); 1104 if (ret) 1105 goto error; 1106 1107 ret = cnic_init_uio(dev); 1108 if (ret) 1109 goto error; 1110 1111 return 0; 1112 1113 error: 1114 cnic_free_resc(dev); 1115 return ret; 1116 } 1117 1118 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 1119 { 1120 struct cnic_local *cp = dev->cnic_priv; 1121 int ctx_blk_size = cp->ethdev->ctx_blk_size; 1122 int total_mem, blks, i; 1123 1124 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; 1125 blks = total_mem / ctx_blk_size; 1126 if (total_mem % ctx_blk_size) 1127 blks++; 1128 1129 if (blks > cp->ethdev->ctx_tbl_len) 1130 return -ENOMEM; 1131 1132 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); 1133 if (cp->ctx_arr == NULL) 1134 return -ENOMEM; 1135 1136 cp->ctx_blks = blks; 1137 cp->ctx_blk_size = ctx_blk_size; 1138 if (!BNX2X_CHIP_IS_57710(cp->chip_id)) 1139 cp->ctx_align = 0; 1140 else 1141 cp->ctx_align = ctx_blk_size; 1142 1143 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; 1144 1145 for (i = 0; i < blks; i++) { 1146 cp->ctx_arr[i].ctx = 1147 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 1148 &cp->ctx_arr[i].mapping, 1149 GFP_KERNEL); 1150 if (cp->ctx_arr[i].ctx == NULL) 1151 return -ENOMEM; 1152 1153 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { 1154 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { 1155 cnic_free_context(dev); 1156 cp->ctx_blk_size += cp->ctx_align; 1157 i = -1; 1158 continue; 1159 } 1160 } 1161 } 1162 return 0; 1163 } 1164 1165 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1166 { 1167 struct cnic_local *cp = dev->cnic_priv; 1168 struct cnic_eth_dev *ethdev = cp->ethdev; 1169 u32 start_cid = ethdev->starting_cid; 1170 int i, j, n, ret, pages; 1171 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1172 1173 cp->iro_arr = ethdev->iro_arr; 1174 1175 cp->max_cid_space = MAX_ISCSI_TBL_SZ; 1176 cp->iscsi_start_cid = start_cid; 1177 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; 1178 1179 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 1180 cp->max_cid_space += dev->max_fcoe_conn; 1181 cp->fcoe_init_cid = ethdev->fcoe_init_cid; 1182 if (!cp->fcoe_init_cid) 1183 cp->fcoe_init_cid = 0x10; 1184 } 1185 1186 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, 1187 GFP_KERNEL); 1188 if (!cp->iscsi_tbl) 1189 goto error; 1190 1191 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * 1192 cp->max_cid_space, GFP_KERNEL); 1193 if (!cp->ctx_tbl) 1194 goto error; 1195 1196 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { 1197 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; 1198 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; 1199 } 1200 1201 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) 1202 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; 1203 1204 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / 1205 PAGE_SIZE; 1206 1207 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1208 if (ret) 1209 return -ENOMEM; 1210 1211 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1212 for (i = 0, j = 0; i < cp->max_cid_space; i++) { 1213 long off = CNIC_KWQ16_DATA_SIZE * (i % n); 1214 1215 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; 1216 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + 1217 off; 1218 1219 if ((i % n) == (n - 1)) 1220 j++; 1221 } 1222 1223 ret = cnic_alloc_kcq(dev, &cp->kcq1, false); 1224 if (ret) 1225 goto error; 1226 1227 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 1228 ret = cnic_alloc_kcq(dev, &cp->kcq2, true); 1229 if (ret) 1230 goto error; 1231 } 1232 1233 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; 1234 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); 1235 if (ret) 1236 goto error; 1237 1238 ret = cnic_alloc_bnx2x_context(dev); 1239 if (ret) 1240 goto error; 1241 1242 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1243 1244 cp->l2_rx_ring_size = 15; 1245 1246 ret = cnic_alloc_uio_rings(dev, 4); 1247 if (ret) 1248 goto error; 1249 1250 ret = cnic_init_uio(dev); 1251 if (ret) 1252 goto error; 1253 1254 return 0; 1255 1256 error: 1257 cnic_free_resc(dev); 1258 return -ENOMEM; 1259 } 1260 1261 static inline u32 cnic_kwq_avail(struct cnic_local *cp) 1262 { 1263 return cp->max_kwq_idx - 1264 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); 1265 } 1266 1267 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 1268 u32 num_wqes) 1269 { 1270 struct cnic_local *cp = dev->cnic_priv; 1271 struct kwqe *prod_qe; 1272 u16 prod, sw_prod, i; 1273 1274 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 1275 return -EAGAIN; /* bnx2 is down */ 1276 1277 spin_lock_bh(&cp->cnic_ulp_lock); 1278 if (num_wqes > cnic_kwq_avail(cp) && 1279 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { 1280 spin_unlock_bh(&cp->cnic_ulp_lock); 1281 return -EAGAIN; 1282 } 1283 1284 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 1285 1286 prod = cp->kwq_prod_idx; 1287 sw_prod = prod & MAX_KWQ_IDX; 1288 for (i = 0; i < num_wqes; i++) { 1289 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; 1290 memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); 1291 prod++; 1292 sw_prod = prod & MAX_KWQ_IDX; 1293 } 1294 cp->kwq_prod_idx = prod; 1295 1296 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); 1297 1298 spin_unlock_bh(&cp->cnic_ulp_lock); 1299 return 0; 1300 } 1301 1302 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, 1303 union l5cm_specific_data *l5_data) 1304 { 1305 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1306 dma_addr_t map; 1307 1308 map = ctx->kwqe_data_mapping; 1309 l5_data->phy_address.lo = (u64) map & 0xffffffff; 1310 l5_data->phy_address.hi = (u64) map >> 32; 1311 return ctx->kwqe_data; 1312 } 1313 1314 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, 1315 u32 type, union l5cm_specific_data *l5_data) 1316 { 1317 struct cnic_local *cp = dev->cnic_priv; 1318 struct l5cm_spe kwqe; 1319 struct kwqe_16 *kwq[1]; 1320 u16 type_16; 1321 int ret; 1322 1323 kwqe.hdr.conn_and_cmd_data = 1324 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1325 BNX2X_HW_CID(cp, cid))); 1326 1327 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 1328 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & 1329 SPE_HDR_FUNCTION_ID; 1330 1331 kwqe.hdr.type = cpu_to_le16(type_16); 1332 kwqe.hdr.reserved1 = 0; 1333 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1334 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1335 1336 kwq[0] = (struct kwqe_16 *) &kwqe; 1337 1338 spin_lock_bh(&cp->cnic_ulp_lock); 1339 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); 1340 spin_unlock_bh(&cp->cnic_ulp_lock); 1341 1342 if (ret == 1) 1343 return 0; 1344 1345 return -EBUSY; 1346 } 1347 1348 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, 1349 struct kcqe *cqes[], u32 num_cqes) 1350 { 1351 struct cnic_local *cp = dev->cnic_priv; 1352 struct cnic_ulp_ops *ulp_ops; 1353 1354 rcu_read_lock(); 1355 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 1356 if (likely(ulp_ops)) { 1357 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 1358 cqes, num_cqes); 1359 } 1360 rcu_read_unlock(); 1361 } 1362 1363 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1364 { 1365 struct cnic_local *cp = dev->cnic_priv; 1366 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1367 int hq_bds, pages; 1368 u32 pfid = cp->pfid; 1369 1370 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1371 cp->num_ccells = req1->num_ccells_per_conn; 1372 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * 1373 cp->num_iscsi_tasks; 1374 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * 1375 BNX2X_ISCSI_R2TQE_SIZE; 1376 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; 1377 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1378 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); 1379 cp->num_cqs = req1->num_cqs; 1380 1381 if (!dev->max_iscsi_conn) 1382 return 0; 1383 1384 /* init Tstorm RAM */ 1385 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1386 req1->rq_num_wqes); 1387 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1388 PAGE_SIZE); 1389 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1390 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1391 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1392 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1393 req1->num_tasks_per_conn); 1394 1395 /* init Ustorm RAM */ 1396 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1397 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), 1398 req1->rq_buffer_size); 1399 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1400 PAGE_SIZE); 1401 CNIC_WR8(dev, BAR_USTRORM_INTMEM + 1402 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1403 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1404 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1405 req1->num_tasks_per_conn); 1406 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1407 req1->rq_num_wqes); 1408 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1409 req1->cq_num_wqes); 1410 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1411 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1412 1413 /* init Xstorm RAM */ 1414 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1415 PAGE_SIZE); 1416 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1417 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1418 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1419 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1420 req1->num_tasks_per_conn); 1421 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1422 hq_bds); 1423 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid), 1424 req1->num_tasks_per_conn); 1425 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1426 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1427 1428 /* init Cstorm RAM */ 1429 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1430 PAGE_SIZE); 1431 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 1432 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1433 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1434 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1435 req1->num_tasks_per_conn); 1436 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1437 req1->cq_num_wqes); 1438 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1439 hq_bds); 1440 1441 return 0; 1442 } 1443 1444 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) 1445 { 1446 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1447 struct cnic_local *cp = dev->cnic_priv; 1448 u32 pfid = cp->pfid; 1449 struct iscsi_kcqe kcqe; 1450 struct kcqe *cqes[1]; 1451 1452 memset(&kcqe, 0, sizeof(kcqe)); 1453 if (!dev->max_iscsi_conn) { 1454 kcqe.completion_status = 1455 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; 1456 goto done; 1457 } 1458 1459 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1460 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1461 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1462 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1463 req2->error_bit_map[1]); 1464 1465 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1466 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1467 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1468 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1469 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1470 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1471 req2->error_bit_map[1]); 1472 1473 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1474 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1475 1476 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1477 1478 done: 1479 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; 1480 cqes[0] = (struct kcqe *) &kcqe; 1481 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1482 1483 return 0; 1484 } 1485 1486 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1487 { 1488 struct cnic_local *cp = dev->cnic_priv; 1489 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1490 1491 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { 1492 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1493 1494 cnic_free_dma(dev, &iscsi->hq_info); 1495 cnic_free_dma(dev, &iscsi->r2tq_info); 1496 cnic_free_dma(dev, &iscsi->task_array_info); 1497 cnic_free_id(&cp->cid_tbl, ctx->cid); 1498 } else { 1499 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); 1500 } 1501 1502 ctx->cid = 0; 1503 } 1504 1505 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1506 { 1507 u32 cid; 1508 int ret, pages; 1509 struct cnic_local *cp = dev->cnic_priv; 1510 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1511 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1512 1513 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { 1514 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); 1515 if (cid == -1) { 1516 ret = -ENOMEM; 1517 goto error; 1518 } 1519 ctx->cid = cid; 1520 return 0; 1521 } 1522 1523 cid = cnic_alloc_new_id(&cp->cid_tbl); 1524 if (cid == -1) { 1525 ret = -ENOMEM; 1526 goto error; 1527 } 1528 1529 ctx->cid = cid; 1530 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; 1531 1532 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); 1533 if (ret) 1534 goto error; 1535 1536 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; 1537 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); 1538 if (ret) 1539 goto error; 1540 1541 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1542 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); 1543 if (ret) 1544 goto error; 1545 1546 return 0; 1547 1548 error: 1549 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1550 return ret; 1551 } 1552 1553 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, 1554 struct regpair *ctx_addr) 1555 { 1556 struct cnic_local *cp = dev->cnic_priv; 1557 struct cnic_eth_dev *ethdev = cp->ethdev; 1558 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; 1559 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; 1560 unsigned long align_off = 0; 1561 dma_addr_t ctx_map; 1562 void *ctx; 1563 1564 if (cp->ctx_align) { 1565 unsigned long mask = cp->ctx_align - 1; 1566 1567 if (cp->ctx_arr[blk].mapping & mask) 1568 align_off = cp->ctx_align - 1569 (cp->ctx_arr[blk].mapping & mask); 1570 } 1571 ctx_map = cp->ctx_arr[blk].mapping + align_off + 1572 (off * BNX2X_CONTEXT_MEM_SIZE); 1573 ctx = cp->ctx_arr[blk].ctx + align_off + 1574 (off * BNX2X_CONTEXT_MEM_SIZE); 1575 if (init) 1576 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); 1577 1578 ctx_addr->lo = ctx_map & 0xffffffff; 1579 ctx_addr->hi = (u64) ctx_map >> 32; 1580 return ctx; 1581 } 1582 1583 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], 1584 u32 num) 1585 { 1586 struct cnic_local *cp = dev->cnic_priv; 1587 struct iscsi_kwqe_conn_offload1 *req1 = 1588 (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1589 struct iscsi_kwqe_conn_offload2 *req2 = 1590 (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1591 struct iscsi_kwqe_conn_offload3 *req3; 1592 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1593 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1594 u32 cid = ctx->cid; 1595 u32 hw_cid = BNX2X_HW_CID(cp, cid); 1596 struct iscsi_context *ictx; 1597 struct regpair context_addr; 1598 int i, j, n = 2, n_max; 1599 u8 port = CNIC_PORT(cp); 1600 1601 ctx->ctx_flags = 0; 1602 if (!req2->num_additional_wqes) 1603 return -EINVAL; 1604 1605 n_max = req2->num_additional_wqes + 2; 1606 1607 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); 1608 if (ictx == NULL) 1609 return -ENOMEM; 1610 1611 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1612 1613 ictx->xstorm_ag_context.hq_prod = 1; 1614 1615 ictx->xstorm_st_context.iscsi.first_burst_length = 1616 ISCSI_DEF_FIRST_BURST_LEN; 1617 ictx->xstorm_st_context.iscsi.max_send_pdu_length = 1618 ISCSI_DEF_MAX_RECV_SEG_LEN; 1619 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = 1620 req1->sq_page_table_addr_lo; 1621 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = 1622 req1->sq_page_table_addr_hi; 1623 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; 1624 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; 1625 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = 1626 iscsi->hq_info.pgtbl_map & 0xffffffff; 1627 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = 1628 (u64) iscsi->hq_info.pgtbl_map >> 32; 1629 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = 1630 iscsi->hq_info.pgtbl[0]; 1631 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = 1632 iscsi->hq_info.pgtbl[1]; 1633 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = 1634 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1635 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = 1636 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1637 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = 1638 iscsi->r2tq_info.pgtbl[0]; 1639 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = 1640 iscsi->r2tq_info.pgtbl[1]; 1641 ictx->xstorm_st_context.iscsi.task_pbl_base.lo = 1642 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1643 ictx->xstorm_st_context.iscsi.task_pbl_base.hi = 1644 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1645 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = 1646 BNX2X_ISCSI_PBL_NOT_CACHED; 1647 ictx->xstorm_st_context.iscsi.flags.flags |= 1648 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; 1649 ictx->xstorm_st_context.iscsi.flags.flags |= 1650 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; 1651 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = 1652 ETH_P_8021Q; 1653 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && 1654 cp->port_mode == CHIP_2_PORT_MODE) { 1655 1656 port = 0; 1657 } 1658 ictx->xstorm_st_context.common.flags = 1659 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT; 1660 ictx->xstorm_st_context.common.flags = 1661 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT; 1662 1663 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; 1664 /* TSTORM requires the base address of RQ DB & not PTE */ 1665 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = 1666 req2->rq_page_table_addr_lo & PAGE_MASK; 1667 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = 1668 req2->rq_page_table_addr_hi; 1669 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; 1670 ictx->tstorm_st_context.tcp.cwnd = 0x5A8; 1671 ictx->tstorm_st_context.tcp.flags2 |= 1672 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; 1673 ictx->tstorm_st_context.tcp.ooo_support_mode = 1674 TCP_TSTORM_OOO_DROP_AND_PROC_ACK; 1675 1676 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; 1677 1678 ictx->ustorm_st_context.ring.rq.pbl_base.lo = 1679 req2->rq_page_table_addr_lo; 1680 ictx->ustorm_st_context.ring.rq.pbl_base.hi = 1681 req2->rq_page_table_addr_hi; 1682 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; 1683 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; 1684 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = 1685 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1686 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = 1687 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1688 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = 1689 iscsi->r2tq_info.pgtbl[0]; 1690 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = 1691 iscsi->r2tq_info.pgtbl[1]; 1692 ictx->ustorm_st_context.ring.cq_pbl_base.lo = 1693 req1->cq_page_table_addr_lo; 1694 ictx->ustorm_st_context.ring.cq_pbl_base.hi = 1695 req1->cq_page_table_addr_hi; 1696 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; 1697 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; 1698 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; 1699 ictx->ustorm_st_context.task_pbe_cache_index = 1700 BNX2X_ISCSI_PBL_NOT_CACHED; 1701 ictx->ustorm_st_context.task_pdu_cache_index = 1702 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; 1703 1704 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { 1705 if (j == 3) { 1706 if (n >= n_max) 1707 break; 1708 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1709 j = 0; 1710 } 1711 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; 1712 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = 1713 req3->qp_first_pte[j].hi; 1714 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = 1715 req3->qp_first_pte[j].lo; 1716 } 1717 1718 ictx->ustorm_st_context.task_pbl_base.lo = 1719 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1720 ictx->ustorm_st_context.task_pbl_base.hi = 1721 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1722 ictx->ustorm_st_context.tce_phy_addr.lo = 1723 iscsi->task_array_info.pgtbl[0]; 1724 ictx->ustorm_st_context.tce_phy_addr.hi = 1725 iscsi->task_array_info.pgtbl[1]; 1726 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1727 ictx->ustorm_st_context.num_cqs = cp->num_cqs; 1728 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; 1729 ictx->ustorm_st_context.negotiated_rx_and_flags |= 1730 ISCSI_DEF_MAX_BURST_LEN; 1731 ictx->ustorm_st_context.negotiated_rx |= 1732 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << 1733 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; 1734 1735 ictx->cstorm_st_context.hq_pbl_base.lo = 1736 iscsi->hq_info.pgtbl_map & 0xffffffff; 1737 ictx->cstorm_st_context.hq_pbl_base.hi = 1738 (u64) iscsi->hq_info.pgtbl_map >> 32; 1739 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; 1740 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; 1741 ictx->cstorm_st_context.task_pbl_base.lo = 1742 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1743 ictx->cstorm_st_context.task_pbl_base.hi = 1744 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1745 /* CSTORM and USTORM initialization is different, CSTORM requires 1746 * CQ DB base & not PTE addr */ 1747 ictx->cstorm_st_context.cq_db_base.lo = 1748 req1->cq_page_table_addr_lo & PAGE_MASK; 1749 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; 1750 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1751 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; 1752 for (i = 0; i < cp->num_cqs; i++) { 1753 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = 1754 ISCSI_INITIAL_SN; 1755 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = 1756 ISCSI_INITIAL_SN; 1757 } 1758 1759 ictx->xstorm_ag_context.cdu_reserved = 1760 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 1761 ISCSI_CONNECTION_TYPE); 1762 ictx->ustorm_ag_context.cdu_usage = 1763 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 1764 ISCSI_CONNECTION_TYPE); 1765 return 0; 1766 1767 } 1768 1769 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 1770 u32 num, int *work) 1771 { 1772 struct iscsi_kwqe_conn_offload1 *req1; 1773 struct iscsi_kwqe_conn_offload2 *req2; 1774 struct cnic_local *cp = dev->cnic_priv; 1775 struct cnic_context *ctx; 1776 struct iscsi_kcqe kcqe; 1777 struct kcqe *cqes[1]; 1778 u32 l5_cid; 1779 int ret = 0; 1780 1781 if (num < 2) { 1782 *work = num; 1783 return -EINVAL; 1784 } 1785 1786 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1787 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1788 if ((num - 2) < req2->num_additional_wqes) { 1789 *work = num; 1790 return -EINVAL; 1791 } 1792 *work = 2 + req2->num_additional_wqes; 1793 1794 l5_cid = req1->iscsi_conn_id; 1795 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1796 return -EINVAL; 1797 1798 memset(&kcqe, 0, sizeof(kcqe)); 1799 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; 1800 kcqe.iscsi_conn_id = l5_cid; 1801 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 1802 1803 ctx = &cp->ctx_tbl[l5_cid]; 1804 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { 1805 kcqe.completion_status = 1806 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; 1807 goto done; 1808 } 1809 1810 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { 1811 atomic_dec(&cp->iscsi_conn); 1812 goto done; 1813 } 1814 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 1815 if (ret) { 1816 atomic_dec(&cp->iscsi_conn); 1817 ret = 0; 1818 goto done; 1819 } 1820 ret = cnic_setup_bnx2x_ctx(dev, wqes, num); 1821 if (ret < 0) { 1822 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1823 atomic_dec(&cp->iscsi_conn); 1824 goto done; 1825 } 1826 1827 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1828 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); 1829 1830 done: 1831 cqes[0] = (struct kcqe *) &kcqe; 1832 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1833 return ret; 1834 } 1835 1836 1837 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) 1838 { 1839 struct cnic_local *cp = dev->cnic_priv; 1840 struct iscsi_kwqe_conn_update *req = 1841 (struct iscsi_kwqe_conn_update *) kwqe; 1842 void *data; 1843 union l5cm_specific_data l5_data; 1844 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); 1845 int ret; 1846 1847 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) 1848 return -EINVAL; 1849 1850 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1851 if (!data) 1852 return -ENOMEM; 1853 1854 memcpy(data, kwqe, sizeof(struct kwqe)); 1855 1856 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, 1857 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); 1858 return ret; 1859 } 1860 1861 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) 1862 { 1863 struct cnic_local *cp = dev->cnic_priv; 1864 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1865 union l5cm_specific_data l5_data; 1866 int ret; 1867 u32 hw_cid; 1868 1869 init_waitqueue_head(&ctx->waitq); 1870 ctx->wait_cond = 0; 1871 memset(&l5_data, 0, sizeof(l5_data)); 1872 hw_cid = BNX2X_HW_CID(cp, ctx->cid); 1873 1874 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 1875 hw_cid, NONE_CONNECTION_TYPE, &l5_data); 1876 1877 if (ret == 0) { 1878 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO); 1879 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags))) 1880 return -EBUSY; 1881 } 1882 1883 return 0; 1884 } 1885 1886 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 1887 { 1888 struct cnic_local *cp = dev->cnic_priv; 1889 struct iscsi_kwqe_conn_destroy *req = 1890 (struct iscsi_kwqe_conn_destroy *) kwqe; 1891 u32 l5_cid = req->reserved0; 1892 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1893 int ret = 0; 1894 struct iscsi_kcqe kcqe; 1895 struct kcqe *cqes[1]; 1896 1897 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 1898 goto skip_cfc_delete; 1899 1900 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 1901 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; 1902 1903 if (delta > (2 * HZ)) 1904 delta = 0; 1905 1906 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 1907 queue_delayed_work(cnic_wq, &cp->delete_task, delta); 1908 goto destroy_reply; 1909 } 1910 1911 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); 1912 1913 skip_cfc_delete: 1914 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1915 1916 if (!ret) { 1917 atomic_dec(&cp->iscsi_conn); 1918 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 1919 } 1920 1921 destroy_reply: 1922 memset(&kcqe, 0, sizeof(kcqe)); 1923 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; 1924 kcqe.iscsi_conn_id = l5_cid; 1925 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1926 kcqe.iscsi_conn_context_id = req->context_id; 1927 1928 cqes[0] = (struct kcqe *) &kcqe; 1929 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1930 1931 return ret; 1932 } 1933 1934 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, 1935 struct l4_kwq_connect_req1 *kwqe1, 1936 struct l4_kwq_connect_req3 *kwqe3, 1937 struct l5cm_active_conn_buffer *conn_buf) 1938 { 1939 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; 1940 struct l5cm_xstorm_conn_buffer *xstorm_buf = 1941 &conn_buf->xstorm_conn_buffer; 1942 struct l5cm_tstorm_conn_buffer *tstorm_buf = 1943 &conn_buf->tstorm_conn_buffer; 1944 struct regpair context_addr; 1945 u32 cid = BNX2X_SW_CID(kwqe1->cid); 1946 struct in6_addr src_ip, dst_ip; 1947 int i; 1948 u32 *addrp; 1949 1950 addrp = (u32 *) &conn_addr->local_ip_addr; 1951 for (i = 0; i < 4; i++, addrp++) 1952 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1953 1954 addrp = (u32 *) &conn_addr->remote_ip_addr; 1955 for (i = 0; i < 4; i++, addrp++) 1956 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1957 1958 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); 1959 1960 xstorm_buf->context_addr.hi = context_addr.hi; 1961 xstorm_buf->context_addr.lo = context_addr.lo; 1962 xstorm_buf->mss = 0xffff; 1963 xstorm_buf->rcv_buf = kwqe3->rcv_buf; 1964 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) 1965 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; 1966 xstorm_buf->pseudo_header_checksum = 1967 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); 1968 1969 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) 1970 tstorm_buf->params |= 1971 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; 1972 if (kwqe3->ka_timeout) { 1973 tstorm_buf->ka_enable = 1; 1974 tstorm_buf->ka_timeout = kwqe3->ka_timeout; 1975 tstorm_buf->ka_interval = kwqe3->ka_interval; 1976 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; 1977 } 1978 tstorm_buf->max_rt_time = 0xffffffff; 1979 } 1980 1981 static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 1982 { 1983 struct cnic_local *cp = dev->cnic_priv; 1984 u32 pfid = cp->pfid; 1985 u8 *mac = dev->mac_addr; 1986 1987 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1988 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]); 1989 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1990 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]); 1991 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1992 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]); 1993 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1994 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]); 1995 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1996 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]); 1997 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1998 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]); 1999 2000 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2001 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]); 2002 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2003 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2004 mac[4]); 2005 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2006 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); 2007 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2008 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2009 mac[2]); 2010 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2011 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]); 2012 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2013 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2014 mac[0]); 2015 } 2016 2017 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) 2018 { 2019 struct cnic_local *cp = dev->cnic_priv; 2020 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; 2021 u16 tstorm_flags = 0; 2022 2023 if (tcp_ts) { 2024 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 2025 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 2026 } 2027 2028 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2029 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); 2030 2031 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 2032 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); 2033 } 2034 2035 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 2036 u32 num, int *work) 2037 { 2038 struct cnic_local *cp = dev->cnic_priv; 2039 struct l4_kwq_connect_req1 *kwqe1 = 2040 (struct l4_kwq_connect_req1 *) wqes[0]; 2041 struct l4_kwq_connect_req3 *kwqe3; 2042 struct l5cm_active_conn_buffer *conn_buf; 2043 struct l5cm_conn_addr_params *conn_addr; 2044 union l5cm_specific_data l5_data; 2045 u32 l5_cid = kwqe1->pg_cid; 2046 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 2047 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 2048 int ret; 2049 2050 if (num < 2) { 2051 *work = num; 2052 return -EINVAL; 2053 } 2054 2055 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) 2056 *work = 3; 2057 else 2058 *work = 2; 2059 2060 if (num < *work) { 2061 *work = num; 2062 return -EINVAL; 2063 } 2064 2065 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { 2066 netdev_err(dev->netdev, "conn_buf size too big\n"); 2067 return -ENOMEM; 2068 } 2069 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2070 if (!conn_buf) 2071 return -ENOMEM; 2072 2073 memset(conn_buf, 0, sizeof(*conn_buf)); 2074 2075 conn_addr = &conn_buf->conn_addr_buf; 2076 conn_addr->remote_addr_0 = csk->ha[0]; 2077 conn_addr->remote_addr_1 = csk->ha[1]; 2078 conn_addr->remote_addr_2 = csk->ha[2]; 2079 conn_addr->remote_addr_3 = csk->ha[3]; 2080 conn_addr->remote_addr_4 = csk->ha[4]; 2081 conn_addr->remote_addr_5 = csk->ha[5]; 2082 2083 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { 2084 struct l4_kwq_connect_req2 *kwqe2 = 2085 (struct l4_kwq_connect_req2 *) wqes[1]; 2086 2087 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; 2088 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; 2089 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; 2090 2091 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; 2092 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; 2093 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; 2094 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; 2095 } 2096 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; 2097 2098 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; 2099 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; 2100 conn_addr->local_tcp_port = kwqe1->src_port; 2101 conn_addr->remote_tcp_port = kwqe1->dst_port; 2102 2103 conn_addr->pmtu = kwqe3->pmtu; 2104 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 2105 2106 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 2107 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); 2108 2109 cnic_bnx2x_set_tcp_timestamp(dev, 2110 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); 2111 2112 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 2113 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2114 if (!ret) 2115 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2116 2117 return ret; 2118 } 2119 2120 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) 2121 { 2122 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; 2123 union l5cm_specific_data l5_data; 2124 int ret; 2125 2126 memset(&l5_data, 0, sizeof(l5_data)); 2127 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, 2128 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2129 return ret; 2130 } 2131 2132 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) 2133 { 2134 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; 2135 union l5cm_specific_data l5_data; 2136 int ret; 2137 2138 memset(&l5_data, 0, sizeof(l5_data)); 2139 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, 2140 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2141 return ret; 2142 } 2143 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2144 { 2145 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; 2146 struct l4_kcq kcqe; 2147 struct kcqe *cqes[1]; 2148 2149 memset(&kcqe, 0, sizeof(kcqe)); 2150 kcqe.pg_host_opaque = req->host_opaque; 2151 kcqe.pg_cid = req->host_opaque; 2152 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; 2153 cqes[0] = (struct kcqe *) &kcqe; 2154 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2155 return 0; 2156 } 2157 2158 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2159 { 2160 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; 2161 struct l4_kcq kcqe; 2162 struct kcqe *cqes[1]; 2163 2164 memset(&kcqe, 0, sizeof(kcqe)); 2165 kcqe.pg_host_opaque = req->pg_host_opaque; 2166 kcqe.pg_cid = req->pg_cid; 2167 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; 2168 cqes[0] = (struct kcqe *) &kcqe; 2169 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2170 return 0; 2171 } 2172 2173 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) 2174 { 2175 struct fcoe_kwqe_stat *req; 2176 struct fcoe_stat_ramrod_params *fcoe_stat; 2177 union l5cm_specific_data l5_data; 2178 struct cnic_local *cp = dev->cnic_priv; 2179 int ret; 2180 u32 cid; 2181 2182 req = (struct fcoe_kwqe_stat *) kwqe; 2183 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2184 2185 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2186 if (!fcoe_stat) 2187 return -ENOMEM; 2188 2189 memset(fcoe_stat, 0, sizeof(*fcoe_stat)); 2190 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); 2191 2192 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid, 2193 FCOE_CONNECTION_TYPE, &l5_data); 2194 return ret; 2195 } 2196 2197 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], 2198 u32 num, int *work) 2199 { 2200 int ret; 2201 struct cnic_local *cp = dev->cnic_priv; 2202 u32 cid; 2203 struct fcoe_init_ramrod_params *fcoe_init; 2204 struct fcoe_kwqe_init1 *req1; 2205 struct fcoe_kwqe_init2 *req2; 2206 struct fcoe_kwqe_init3 *req3; 2207 union l5cm_specific_data l5_data; 2208 2209 if (num < 3) { 2210 *work = num; 2211 return -EINVAL; 2212 } 2213 req1 = (struct fcoe_kwqe_init1 *) wqes[0]; 2214 req2 = (struct fcoe_kwqe_init2 *) wqes[1]; 2215 req3 = (struct fcoe_kwqe_init3 *) wqes[2]; 2216 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { 2217 *work = 1; 2218 return -EINVAL; 2219 } 2220 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { 2221 *work = 2; 2222 return -EINVAL; 2223 } 2224 2225 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { 2226 netdev_err(dev->netdev, "fcoe_init size too big\n"); 2227 return -ENOMEM; 2228 } 2229 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2230 if (!fcoe_init) 2231 return -ENOMEM; 2232 2233 memset(fcoe_init, 0, sizeof(*fcoe_init)); 2234 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); 2235 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); 2236 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); 2237 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff; 2238 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32; 2239 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages; 2240 2241 fcoe_init->sb_num = cp->status_blk_num; 2242 fcoe_init->eq_prod = MAX_KCQ_IDX; 2243 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; 2244 cp->kcq2.sw_prod_idx = 0; 2245 2246 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2247 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, 2248 FCOE_CONNECTION_TYPE, &l5_data); 2249 *work = 3; 2250 return ret; 2251 } 2252 2253 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 2254 u32 num, int *work) 2255 { 2256 int ret = 0; 2257 u32 cid = -1, l5_cid; 2258 struct cnic_local *cp = dev->cnic_priv; 2259 struct fcoe_kwqe_conn_offload1 *req1; 2260 struct fcoe_kwqe_conn_offload2 *req2; 2261 struct fcoe_kwqe_conn_offload3 *req3; 2262 struct fcoe_kwqe_conn_offload4 *req4; 2263 struct fcoe_conn_offload_ramrod_params *fcoe_offload; 2264 struct cnic_context *ctx; 2265 struct fcoe_context *fctx; 2266 struct regpair ctx_addr; 2267 union l5cm_specific_data l5_data; 2268 struct fcoe_kcqe kcqe; 2269 struct kcqe *cqes[1]; 2270 2271 if (num < 4) { 2272 *work = num; 2273 return -EINVAL; 2274 } 2275 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; 2276 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; 2277 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; 2278 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; 2279 2280 *work = 4; 2281 2282 l5_cid = req1->fcoe_conn_id; 2283 if (l5_cid >= dev->max_fcoe_conn) 2284 goto err_reply; 2285 2286 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2287 2288 ctx = &cp->ctx_tbl[l5_cid]; 2289 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2290 goto err_reply; 2291 2292 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 2293 if (ret) { 2294 ret = 0; 2295 goto err_reply; 2296 } 2297 cid = ctx->cid; 2298 2299 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); 2300 if (fctx) { 2301 u32 hw_cid = BNX2X_HW_CID(cp, cid); 2302 u32 val; 2303 2304 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 2305 FCOE_CONNECTION_TYPE); 2306 fctx->xstorm_ag_context.cdu_reserved = val; 2307 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 2308 FCOE_CONNECTION_TYPE); 2309 fctx->ustorm_ag_context.cdu_usage = val; 2310 } 2311 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { 2312 netdev_err(dev->netdev, "fcoe_offload size too big\n"); 2313 goto err_reply; 2314 } 2315 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2316 if (!fcoe_offload) 2317 goto err_reply; 2318 2319 memset(fcoe_offload, 0, sizeof(*fcoe_offload)); 2320 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); 2321 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); 2322 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); 2323 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); 2324 2325 cid = BNX2X_HW_CID(cp, cid); 2326 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, 2327 FCOE_CONNECTION_TYPE, &l5_data); 2328 if (!ret) 2329 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2330 2331 return ret; 2332 2333 err_reply: 2334 if (cid != -1) 2335 cnic_free_bnx2x_conn_resc(dev, l5_cid); 2336 2337 memset(&kcqe, 0, sizeof(kcqe)); 2338 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; 2339 kcqe.fcoe_conn_id = req1->fcoe_conn_id; 2340 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 2341 2342 cqes[0] = (struct kcqe *) &kcqe; 2343 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); 2344 return ret; 2345 } 2346 2347 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) 2348 { 2349 struct fcoe_kwqe_conn_enable_disable *req; 2350 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; 2351 union l5cm_specific_data l5_data; 2352 int ret; 2353 u32 cid, l5_cid; 2354 struct cnic_local *cp = dev->cnic_priv; 2355 2356 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2357 cid = req->context_id; 2358 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; 2359 2360 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { 2361 netdev_err(dev->netdev, "fcoe_enable size too big\n"); 2362 return -ENOMEM; 2363 } 2364 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2365 if (!fcoe_enable) 2366 return -ENOMEM; 2367 2368 memset(fcoe_enable, 0, sizeof(*fcoe_enable)); 2369 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); 2370 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, 2371 FCOE_CONNECTION_TYPE, &l5_data); 2372 return ret; 2373 } 2374 2375 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) 2376 { 2377 struct fcoe_kwqe_conn_enable_disable *req; 2378 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; 2379 union l5cm_specific_data l5_data; 2380 int ret; 2381 u32 cid, l5_cid; 2382 struct cnic_local *cp = dev->cnic_priv; 2383 2384 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2385 cid = req->context_id; 2386 l5_cid = req->conn_id; 2387 if (l5_cid >= dev->max_fcoe_conn) 2388 return -EINVAL; 2389 2390 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2391 2392 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { 2393 netdev_err(dev->netdev, "fcoe_disable size too big\n"); 2394 return -ENOMEM; 2395 } 2396 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2397 if (!fcoe_disable) 2398 return -ENOMEM; 2399 2400 memset(fcoe_disable, 0, sizeof(*fcoe_disable)); 2401 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); 2402 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, 2403 FCOE_CONNECTION_TYPE, &l5_data); 2404 return ret; 2405 } 2406 2407 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 2408 { 2409 struct fcoe_kwqe_conn_destroy *req; 2410 union l5cm_specific_data l5_data; 2411 int ret; 2412 u32 cid, l5_cid; 2413 struct cnic_local *cp = dev->cnic_priv; 2414 struct cnic_context *ctx; 2415 struct fcoe_kcqe kcqe; 2416 struct kcqe *cqes[1]; 2417 2418 req = (struct fcoe_kwqe_conn_destroy *) kwqe; 2419 cid = req->context_id; 2420 l5_cid = req->conn_id; 2421 if (l5_cid >= dev->max_fcoe_conn) 2422 return -EINVAL; 2423 2424 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2425 2426 ctx = &cp->ctx_tbl[l5_cid]; 2427 2428 init_waitqueue_head(&ctx->waitq); 2429 ctx->wait_cond = 0; 2430 2431 memset(&kcqe, 0, sizeof(kcqe)); 2432 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR; 2433 memset(&l5_data, 0, sizeof(l5_data)); 2434 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, 2435 FCOE_CONNECTION_TYPE, &l5_data); 2436 if (ret == 0) { 2437 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO); 2438 if (ctx->wait_cond) 2439 kcqe.completion_status = 0; 2440 } 2441 2442 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 2443 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000)); 2444 2445 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; 2446 kcqe.fcoe_conn_id = req->conn_id; 2447 kcqe.fcoe_conn_context_id = cid; 2448 2449 cqes[0] = (struct kcqe *) &kcqe; 2450 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); 2451 return ret; 2452 } 2453 2454 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid) 2455 { 2456 struct cnic_local *cp = dev->cnic_priv; 2457 u32 i; 2458 2459 for (i = start_cid; i < cp->max_cid_space; i++) { 2460 struct cnic_context *ctx = &cp->ctx_tbl[i]; 2461 int j; 2462 2463 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 2464 msleep(10); 2465 2466 for (j = 0; j < 5; j++) { 2467 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2468 break; 2469 msleep(20); 2470 } 2471 2472 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2473 netdev_warn(dev->netdev, "CID %x not deleted\n", 2474 ctx->cid); 2475 } 2476 } 2477 2478 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 2479 { 2480 struct fcoe_kwqe_destroy *req; 2481 union l5cm_specific_data l5_data; 2482 struct cnic_local *cp = dev->cnic_priv; 2483 int ret; 2484 u32 cid; 2485 2486 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); 2487 2488 req = (struct fcoe_kwqe_destroy *) kwqe; 2489 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2490 2491 memset(&l5_data, 0, sizeof(l5_data)); 2492 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, 2493 FCOE_CONNECTION_TYPE, &l5_data); 2494 return ret; 2495 } 2496 2497 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, 2498 struct kwqe *wqes[], u32 num_wqes) 2499 { 2500 int i, work, ret; 2501 u32 opcode; 2502 struct kwqe *kwqe; 2503 2504 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2505 return -EAGAIN; /* bnx2 is down */ 2506 2507 for (i = 0; i < num_wqes; ) { 2508 kwqe = wqes[i]; 2509 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2510 work = 1; 2511 2512 switch (opcode) { 2513 case ISCSI_KWQE_OPCODE_INIT1: 2514 ret = cnic_bnx2x_iscsi_init1(dev, kwqe); 2515 break; 2516 case ISCSI_KWQE_OPCODE_INIT2: 2517 ret = cnic_bnx2x_iscsi_init2(dev, kwqe); 2518 break; 2519 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: 2520 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], 2521 num_wqes - i, &work); 2522 break; 2523 case ISCSI_KWQE_OPCODE_UPDATE_CONN: 2524 ret = cnic_bnx2x_iscsi_update(dev, kwqe); 2525 break; 2526 case ISCSI_KWQE_OPCODE_DESTROY_CONN: 2527 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); 2528 break; 2529 case L4_KWQE_OPCODE_VALUE_CONNECT1: 2530 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, 2531 &work); 2532 break; 2533 case L4_KWQE_OPCODE_VALUE_CLOSE: 2534 ret = cnic_bnx2x_close(dev, kwqe); 2535 break; 2536 case L4_KWQE_OPCODE_VALUE_RESET: 2537 ret = cnic_bnx2x_reset(dev, kwqe); 2538 break; 2539 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: 2540 ret = cnic_bnx2x_offload_pg(dev, kwqe); 2541 break; 2542 case L4_KWQE_OPCODE_VALUE_UPDATE_PG: 2543 ret = cnic_bnx2x_update_pg(dev, kwqe); 2544 break; 2545 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: 2546 ret = 0; 2547 break; 2548 default: 2549 ret = 0; 2550 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2551 opcode); 2552 break; 2553 } 2554 if (ret < 0) 2555 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2556 opcode); 2557 i += work; 2558 } 2559 return 0; 2560 } 2561 2562 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, 2563 struct kwqe *wqes[], u32 num_wqes) 2564 { 2565 struct cnic_local *cp = dev->cnic_priv; 2566 int i, work, ret; 2567 u32 opcode; 2568 struct kwqe *kwqe; 2569 2570 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2571 return -EAGAIN; /* bnx2 is down */ 2572 2573 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 2574 return -EINVAL; 2575 2576 for (i = 0; i < num_wqes; ) { 2577 kwqe = wqes[i]; 2578 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2579 work = 1; 2580 2581 switch (opcode) { 2582 case FCOE_KWQE_OPCODE_INIT1: 2583 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], 2584 num_wqes - i, &work); 2585 break; 2586 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: 2587 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], 2588 num_wqes - i, &work); 2589 break; 2590 case FCOE_KWQE_OPCODE_ENABLE_CONN: 2591 ret = cnic_bnx2x_fcoe_enable(dev, kwqe); 2592 break; 2593 case FCOE_KWQE_OPCODE_DISABLE_CONN: 2594 ret = cnic_bnx2x_fcoe_disable(dev, kwqe); 2595 break; 2596 case FCOE_KWQE_OPCODE_DESTROY_CONN: 2597 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); 2598 break; 2599 case FCOE_KWQE_OPCODE_DESTROY: 2600 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); 2601 break; 2602 case FCOE_KWQE_OPCODE_STAT: 2603 ret = cnic_bnx2x_fcoe_stat(dev, kwqe); 2604 break; 2605 default: 2606 ret = 0; 2607 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2608 opcode); 2609 break; 2610 } 2611 if (ret < 0) 2612 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2613 opcode); 2614 i += work; 2615 } 2616 return 0; 2617 } 2618 2619 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 2620 u32 num_wqes) 2621 { 2622 int ret = -EINVAL; 2623 u32 layer_code; 2624 2625 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2626 return -EAGAIN; /* bnx2x is down */ 2627 2628 if (!num_wqes) 2629 return 0; 2630 2631 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; 2632 switch (layer_code) { 2633 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: 2634 case KWQE_FLAGS_LAYER_MASK_L4: 2635 case KWQE_FLAGS_LAYER_MASK_L2: 2636 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); 2637 break; 2638 2639 case KWQE_FLAGS_LAYER_MASK_L5_FCOE: 2640 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); 2641 break; 2642 } 2643 return ret; 2644 } 2645 2646 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) 2647 { 2648 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) 2649 return KCQE_FLAGS_LAYER_MASK_L4; 2650 2651 return opflag & KCQE_FLAGS_LAYER_MASK; 2652 } 2653 2654 static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2655 { 2656 struct cnic_local *cp = dev->cnic_priv; 2657 int i, j, comp = 0; 2658 2659 i = 0; 2660 j = 1; 2661 while (num_cqes) { 2662 struct cnic_ulp_ops *ulp_ops; 2663 int ulp_type; 2664 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; 2665 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); 2666 2667 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 2668 comp++; 2669 2670 while (j < num_cqes) { 2671 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 2672 2673 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) 2674 break; 2675 2676 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 2677 comp++; 2678 j++; 2679 } 2680 2681 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) 2682 ulp_type = CNIC_ULP_RDMA; 2683 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) 2684 ulp_type = CNIC_ULP_ISCSI; 2685 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) 2686 ulp_type = CNIC_ULP_FCOE; 2687 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) 2688 ulp_type = CNIC_ULP_L4; 2689 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 2690 goto end; 2691 else { 2692 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", 2693 kcqe_op_flag); 2694 goto end; 2695 } 2696 2697 rcu_read_lock(); 2698 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 2699 if (likely(ulp_ops)) { 2700 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 2701 cp->completed_kcq + i, j); 2702 } 2703 rcu_read_unlock(); 2704 end: 2705 num_cqes -= j; 2706 i += j; 2707 j = 1; 2708 } 2709 if (unlikely(comp)) 2710 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp); 2711 } 2712 2713 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) 2714 { 2715 struct cnic_local *cp = dev->cnic_priv; 2716 u16 i, ri, hw_prod, last; 2717 struct kcqe *kcqe; 2718 int kcqe_cnt = 0, last_cnt = 0; 2719 2720 i = ri = last = info->sw_prod_idx; 2721 ri &= MAX_KCQ_IDX; 2722 hw_prod = *info->hw_prod_idx_ptr; 2723 hw_prod = info->hw_idx(hw_prod); 2724 2725 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 2726 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 2727 cp->completed_kcq[kcqe_cnt++] = kcqe; 2728 i = info->next_idx(i); 2729 ri = i & MAX_KCQ_IDX; 2730 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { 2731 last_cnt = kcqe_cnt; 2732 last = i; 2733 } 2734 } 2735 2736 info->sw_prod_idx = last; 2737 return last_cnt; 2738 } 2739 2740 static int cnic_l2_completion(struct cnic_local *cp) 2741 { 2742 u16 hw_cons, sw_cons; 2743 struct cnic_uio_dev *udev = cp->udev; 2744 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2745 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 2746 u32 cmd; 2747 int comp = 0; 2748 2749 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) 2750 return 0; 2751 2752 hw_cons = *cp->rx_cons_ptr; 2753 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) 2754 hw_cons++; 2755 2756 sw_cons = cp->rx_cons; 2757 while (sw_cons != hw_cons) { 2758 u8 cqe_fp_flags; 2759 2760 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; 2761 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 2762 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { 2763 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); 2764 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; 2765 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || 2766 cmd == RAMROD_CMD_ID_ETH_HALT) 2767 comp++; 2768 } 2769 sw_cons = BNX2X_NEXT_RCQE(sw_cons); 2770 } 2771 return comp; 2772 } 2773 2774 static void cnic_chk_pkt_rings(struct cnic_local *cp) 2775 { 2776 u16 rx_cons, tx_cons; 2777 int comp = 0; 2778 2779 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 2780 return; 2781 2782 rx_cons = *cp->rx_cons_ptr; 2783 tx_cons = *cp->tx_cons_ptr; 2784 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2785 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 2786 comp = cnic_l2_completion(cp); 2787 2788 cp->tx_cons = tx_cons; 2789 cp->rx_cons = rx_cons; 2790 2791 if (cp->udev) 2792 uio_event_notify(&cp->udev->cnic_uinfo); 2793 } 2794 if (comp) 2795 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 2796 } 2797 2798 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) 2799 { 2800 struct cnic_local *cp = dev->cnic_priv; 2801 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2802 int kcqe_cnt; 2803 2804 /* status block index must be read before reading other fields */ 2805 rmb(); 2806 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2807 2808 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2809 2810 service_kcqes(dev, kcqe_cnt); 2811 2812 /* Tell compiler that status_blk fields can change. */ 2813 barrier(); 2814 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2815 /* status block index must be read first */ 2816 rmb(); 2817 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2818 } 2819 2820 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); 2821 2822 cnic_chk_pkt_rings(cp); 2823 2824 return status_idx; 2825 } 2826 2827 static int cnic_service_bnx2(void *data, void *status_blk) 2828 { 2829 struct cnic_dev *dev = data; 2830 2831 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2832 struct status_block *sblk = status_blk; 2833 2834 return sblk->status_idx; 2835 } 2836 2837 return cnic_service_bnx2_queues(dev); 2838 } 2839 2840 static void cnic_service_bnx2_msix(unsigned long data) 2841 { 2842 struct cnic_dev *dev = (struct cnic_dev *) data; 2843 struct cnic_local *cp = dev->cnic_priv; 2844 2845 cp->last_status_idx = cnic_service_bnx2_queues(dev); 2846 2847 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2848 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 2849 } 2850 2851 static void cnic_doirq(struct cnic_dev *dev) 2852 { 2853 struct cnic_local *cp = dev->cnic_priv; 2854 2855 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2856 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; 2857 2858 prefetch(cp->status_blk.gen); 2859 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2860 2861 tasklet_schedule(&cp->cnic_irq_task); 2862 } 2863 } 2864 2865 static irqreturn_t cnic_irq(int irq, void *dev_instance) 2866 { 2867 struct cnic_dev *dev = dev_instance; 2868 struct cnic_local *cp = dev->cnic_priv; 2869 2870 if (cp->ack_int) 2871 cp->ack_int(dev); 2872 2873 cnic_doirq(dev); 2874 2875 return IRQ_HANDLED; 2876 } 2877 2878 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, 2879 u16 index, u8 op, u8 update) 2880 { 2881 struct cnic_local *cp = dev->cnic_priv; 2882 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + 2883 COMMAND_REG_INT_ACK); 2884 struct igu_ack_register igu_ack; 2885 2886 igu_ack.status_block_index = index; 2887 igu_ack.sb_id_and_flags = 2888 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 2889 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 2890 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 2891 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 2892 2893 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); 2894 } 2895 2896 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment, 2897 u16 index, u8 op, u8 update) 2898 { 2899 struct igu_regular cmd_data; 2900 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; 2901 2902 cmd_data.sb_id_and_flags = 2903 (index << IGU_REGULAR_SB_INDEX_SHIFT) | 2904 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 2905 (update << IGU_REGULAR_BUPDATE_SHIFT) | 2906 (op << IGU_REGULAR_ENABLE_INT_SHIFT); 2907 2908 2909 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags); 2910 } 2911 2912 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) 2913 { 2914 struct cnic_local *cp = dev->cnic_priv; 2915 2916 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0, 2917 IGU_INT_DISABLE, 0); 2918 } 2919 2920 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) 2921 { 2922 struct cnic_local *cp = dev->cnic_priv; 2923 2924 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, 2925 IGU_INT_DISABLE, 0); 2926 } 2927 2928 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) 2929 { 2930 u32 last_status = *info->status_idx_ptr; 2931 int kcqe_cnt; 2932 2933 /* status block index must be read before reading the KCQ */ 2934 rmb(); 2935 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2936 2937 service_kcqes(dev, kcqe_cnt); 2938 2939 /* Tell compiler that sblk fields can change. */ 2940 barrier(); 2941 2942 last_status = *info->status_idx_ptr; 2943 /* status block index must be read before reading the KCQ */ 2944 rmb(); 2945 } 2946 return last_status; 2947 } 2948 2949 static void cnic_service_bnx2x_bh(unsigned long data) 2950 { 2951 struct cnic_dev *dev = (struct cnic_dev *) data; 2952 struct cnic_local *cp = dev->cnic_priv; 2953 u32 status_idx, new_status_idx; 2954 2955 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2956 return; 2957 2958 while (1) { 2959 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2960 2961 CNIC_WR16(dev, cp->kcq1.io_addr, 2962 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2963 2964 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 2965 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2966 status_idx, IGU_INT_ENABLE, 1); 2967 break; 2968 } 2969 2970 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 2971 2972 if (new_status_idx != status_idx) 2973 continue; 2974 2975 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 2976 MAX_KCQ_IDX); 2977 2978 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2979 status_idx, IGU_INT_ENABLE, 1); 2980 2981 break; 2982 } 2983 } 2984 2985 static int cnic_service_bnx2x(void *data, void *status_blk) 2986 { 2987 struct cnic_dev *dev = data; 2988 struct cnic_local *cp = dev->cnic_priv; 2989 2990 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 2991 cnic_doirq(dev); 2992 2993 cnic_chk_pkt_rings(cp); 2994 2995 return 0; 2996 } 2997 2998 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type) 2999 { 3000 struct cnic_ulp_ops *ulp_ops; 3001 3002 if (if_type == CNIC_ULP_ISCSI) 3003 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 3004 3005 mutex_lock(&cnic_lock); 3006 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 3007 lockdep_is_held(&cnic_lock)); 3008 if (!ulp_ops) { 3009 mutex_unlock(&cnic_lock); 3010 return; 3011 } 3012 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3013 mutex_unlock(&cnic_lock); 3014 3015 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 3016 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 3017 3018 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3019 } 3020 3021 static void cnic_ulp_stop(struct cnic_dev *dev) 3022 { 3023 struct cnic_local *cp = dev->cnic_priv; 3024 int if_type; 3025 3026 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) 3027 cnic_ulp_stop_one(cp, if_type); 3028 } 3029 3030 static void cnic_ulp_start(struct cnic_dev *dev) 3031 { 3032 struct cnic_local *cp = dev->cnic_priv; 3033 int if_type; 3034 3035 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 3036 struct cnic_ulp_ops *ulp_ops; 3037 3038 mutex_lock(&cnic_lock); 3039 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 3040 lockdep_is_held(&cnic_lock)); 3041 if (!ulp_ops || !ulp_ops->cnic_start) { 3042 mutex_unlock(&cnic_lock); 3043 continue; 3044 } 3045 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3046 mutex_unlock(&cnic_lock); 3047 3048 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 3049 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 3050 3051 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3052 } 3053 } 3054 3055 static int cnic_ctl(void *data, struct cnic_ctl_info *info) 3056 { 3057 struct cnic_dev *dev = data; 3058 3059 switch (info->cmd) { 3060 case CNIC_CTL_STOP_CMD: 3061 cnic_hold(dev); 3062 3063 cnic_ulp_stop(dev); 3064 cnic_stop_hw(dev); 3065 3066 cnic_put(dev); 3067 break; 3068 case CNIC_CTL_START_CMD: 3069 cnic_hold(dev); 3070 3071 if (!cnic_start_hw(dev)) 3072 cnic_ulp_start(dev); 3073 3074 cnic_put(dev); 3075 break; 3076 case CNIC_CTL_STOP_ISCSI_CMD: { 3077 struct cnic_local *cp = dev->cnic_priv; 3078 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags); 3079 queue_delayed_work(cnic_wq, &cp->delete_task, 0); 3080 break; 3081 } 3082 case CNIC_CTL_COMPLETION_CMD: { 3083 struct cnic_ctl_completion *comp = &info->data.comp; 3084 u32 cid = BNX2X_SW_CID(comp->cid); 3085 u32 l5_cid; 3086 struct cnic_local *cp = dev->cnic_priv; 3087 3088 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { 3089 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3090 3091 if (unlikely(comp->error)) { 3092 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags); 3093 netdev_err(dev->netdev, 3094 "CID %x CFC delete comp error %x\n", 3095 cid, comp->error); 3096 } 3097 3098 ctx->wait_cond = 1; 3099 wake_up(&ctx->waitq); 3100 } 3101 break; 3102 } 3103 default: 3104 return -EINVAL; 3105 } 3106 return 0; 3107 } 3108 3109 static void cnic_ulp_init(struct cnic_dev *dev) 3110 { 3111 int i; 3112 struct cnic_local *cp = dev->cnic_priv; 3113 3114 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 3115 struct cnic_ulp_ops *ulp_ops; 3116 3117 mutex_lock(&cnic_lock); 3118 ulp_ops = cnic_ulp_tbl_prot(i); 3119 if (!ulp_ops || !ulp_ops->cnic_init) { 3120 mutex_unlock(&cnic_lock); 3121 continue; 3122 } 3123 ulp_get(ulp_ops); 3124 mutex_unlock(&cnic_lock); 3125 3126 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 3127 ulp_ops->cnic_init(dev); 3128 3129 ulp_put(ulp_ops); 3130 } 3131 } 3132 3133 static void cnic_ulp_exit(struct cnic_dev *dev) 3134 { 3135 int i; 3136 struct cnic_local *cp = dev->cnic_priv; 3137 3138 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 3139 struct cnic_ulp_ops *ulp_ops; 3140 3141 mutex_lock(&cnic_lock); 3142 ulp_ops = cnic_ulp_tbl_prot(i); 3143 if (!ulp_ops || !ulp_ops->cnic_exit) { 3144 mutex_unlock(&cnic_lock); 3145 continue; 3146 } 3147 ulp_get(ulp_ops); 3148 mutex_unlock(&cnic_lock); 3149 3150 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 3151 ulp_ops->cnic_exit(dev); 3152 3153 ulp_put(ulp_ops); 3154 } 3155 } 3156 3157 static int cnic_cm_offload_pg(struct cnic_sock *csk) 3158 { 3159 struct cnic_dev *dev = csk->dev; 3160 struct l4_kwq_offload_pg *l4kwqe; 3161 struct kwqe *wqes[1]; 3162 3163 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; 3164 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3165 wqes[0] = (struct kwqe *) l4kwqe; 3166 3167 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; 3168 l4kwqe->flags = 3169 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; 3170 l4kwqe->l2hdr_nbytes = ETH_HLEN; 3171 3172 l4kwqe->da0 = csk->ha[0]; 3173 l4kwqe->da1 = csk->ha[1]; 3174 l4kwqe->da2 = csk->ha[2]; 3175 l4kwqe->da3 = csk->ha[3]; 3176 l4kwqe->da4 = csk->ha[4]; 3177 l4kwqe->da5 = csk->ha[5]; 3178 3179 l4kwqe->sa0 = dev->mac_addr[0]; 3180 l4kwqe->sa1 = dev->mac_addr[1]; 3181 l4kwqe->sa2 = dev->mac_addr[2]; 3182 l4kwqe->sa3 = dev->mac_addr[3]; 3183 l4kwqe->sa4 = dev->mac_addr[4]; 3184 l4kwqe->sa5 = dev->mac_addr[5]; 3185 3186 l4kwqe->etype = ETH_P_IP; 3187 l4kwqe->ipid_start = DEF_IPID_START; 3188 l4kwqe->host_opaque = csk->l5_cid; 3189 3190 if (csk->vlan_id) { 3191 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; 3192 l4kwqe->vlan_tag = csk->vlan_id; 3193 l4kwqe->l2hdr_nbytes += 4; 3194 } 3195 3196 return dev->submit_kwqes(dev, wqes, 1); 3197 } 3198 3199 static int cnic_cm_update_pg(struct cnic_sock *csk) 3200 { 3201 struct cnic_dev *dev = csk->dev; 3202 struct l4_kwq_update_pg *l4kwqe; 3203 struct kwqe *wqes[1]; 3204 3205 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; 3206 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3207 wqes[0] = (struct kwqe *) l4kwqe; 3208 3209 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; 3210 l4kwqe->flags = 3211 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; 3212 l4kwqe->pg_cid = csk->pg_cid; 3213 3214 l4kwqe->da0 = csk->ha[0]; 3215 l4kwqe->da1 = csk->ha[1]; 3216 l4kwqe->da2 = csk->ha[2]; 3217 l4kwqe->da3 = csk->ha[3]; 3218 l4kwqe->da4 = csk->ha[4]; 3219 l4kwqe->da5 = csk->ha[5]; 3220 3221 l4kwqe->pg_host_opaque = csk->l5_cid; 3222 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; 3223 3224 return dev->submit_kwqes(dev, wqes, 1); 3225 } 3226 3227 static int cnic_cm_upload_pg(struct cnic_sock *csk) 3228 { 3229 struct cnic_dev *dev = csk->dev; 3230 struct l4_kwq_upload *l4kwqe; 3231 struct kwqe *wqes[1]; 3232 3233 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; 3234 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3235 wqes[0] = (struct kwqe *) l4kwqe; 3236 3237 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; 3238 l4kwqe->flags = 3239 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; 3240 l4kwqe->cid = csk->pg_cid; 3241 3242 return dev->submit_kwqes(dev, wqes, 1); 3243 } 3244 3245 static int cnic_cm_conn_req(struct cnic_sock *csk) 3246 { 3247 struct cnic_dev *dev = csk->dev; 3248 struct l4_kwq_connect_req1 *l4kwqe1; 3249 struct l4_kwq_connect_req2 *l4kwqe2; 3250 struct l4_kwq_connect_req3 *l4kwqe3; 3251 struct kwqe *wqes[3]; 3252 u8 tcp_flags = 0; 3253 int num_wqes = 2; 3254 3255 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; 3256 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; 3257 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; 3258 memset(l4kwqe1, 0, sizeof(*l4kwqe1)); 3259 memset(l4kwqe2, 0, sizeof(*l4kwqe2)); 3260 memset(l4kwqe3, 0, sizeof(*l4kwqe3)); 3261 3262 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; 3263 l4kwqe3->flags = 3264 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; 3265 l4kwqe3->ka_timeout = csk->ka_timeout; 3266 l4kwqe3->ka_interval = csk->ka_interval; 3267 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; 3268 l4kwqe3->tos = csk->tos; 3269 l4kwqe3->ttl = csk->ttl; 3270 l4kwqe3->snd_seq_scale = csk->snd_seq_scale; 3271 l4kwqe3->pmtu = csk->mtu; 3272 l4kwqe3->rcv_buf = csk->rcv_buf; 3273 l4kwqe3->snd_buf = csk->snd_buf; 3274 l4kwqe3->seed = csk->seed; 3275 3276 wqes[0] = (struct kwqe *) l4kwqe1; 3277 if (test_bit(SK_F_IPV6, &csk->flags)) { 3278 wqes[1] = (struct kwqe *) l4kwqe2; 3279 wqes[2] = (struct kwqe *) l4kwqe3; 3280 num_wqes = 3; 3281 3282 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; 3283 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; 3284 l4kwqe2->flags = 3285 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | 3286 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; 3287 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); 3288 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); 3289 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); 3290 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); 3291 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); 3292 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); 3293 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - 3294 sizeof(struct tcphdr); 3295 } else { 3296 wqes[1] = (struct kwqe *) l4kwqe3; 3297 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - 3298 sizeof(struct tcphdr); 3299 } 3300 3301 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; 3302 l4kwqe1->flags = 3303 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | 3304 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; 3305 l4kwqe1->cid = csk->cid; 3306 l4kwqe1->pg_cid = csk->pg_cid; 3307 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); 3308 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); 3309 l4kwqe1->src_port = be16_to_cpu(csk->src_port); 3310 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); 3311 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) 3312 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; 3313 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) 3314 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; 3315 if (csk->tcp_flags & SK_TCP_NAGLE) 3316 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; 3317 if (csk->tcp_flags & SK_TCP_TIMESTAMP) 3318 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; 3319 if (csk->tcp_flags & SK_TCP_SACK) 3320 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; 3321 if (csk->tcp_flags & SK_TCP_SEG_SCALING) 3322 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; 3323 3324 l4kwqe1->tcp_flags = tcp_flags; 3325 3326 return dev->submit_kwqes(dev, wqes, num_wqes); 3327 } 3328 3329 static int cnic_cm_close_req(struct cnic_sock *csk) 3330 { 3331 struct cnic_dev *dev = csk->dev; 3332 struct l4_kwq_close_req *l4kwqe; 3333 struct kwqe *wqes[1]; 3334 3335 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; 3336 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3337 wqes[0] = (struct kwqe *) l4kwqe; 3338 3339 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; 3340 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; 3341 l4kwqe->cid = csk->cid; 3342 3343 return dev->submit_kwqes(dev, wqes, 1); 3344 } 3345 3346 static int cnic_cm_abort_req(struct cnic_sock *csk) 3347 { 3348 struct cnic_dev *dev = csk->dev; 3349 struct l4_kwq_reset_req *l4kwqe; 3350 struct kwqe *wqes[1]; 3351 3352 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; 3353 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3354 wqes[0] = (struct kwqe *) l4kwqe; 3355 3356 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; 3357 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; 3358 l4kwqe->cid = csk->cid; 3359 3360 return dev->submit_kwqes(dev, wqes, 1); 3361 } 3362 3363 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, 3364 u32 l5_cid, struct cnic_sock **csk, void *context) 3365 { 3366 struct cnic_local *cp = dev->cnic_priv; 3367 struct cnic_sock *csk1; 3368 3369 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3370 return -EINVAL; 3371 3372 if (cp->ctx_tbl) { 3373 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3374 3375 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 3376 return -EAGAIN; 3377 } 3378 3379 csk1 = &cp->csk_tbl[l5_cid]; 3380 if (atomic_read(&csk1->ref_count)) 3381 return -EAGAIN; 3382 3383 if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) 3384 return -EBUSY; 3385 3386 csk1->dev = dev; 3387 csk1->cid = cid; 3388 csk1->l5_cid = l5_cid; 3389 csk1->ulp_type = ulp_type; 3390 csk1->context = context; 3391 3392 csk1->ka_timeout = DEF_KA_TIMEOUT; 3393 csk1->ka_interval = DEF_KA_INTERVAL; 3394 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; 3395 csk1->tos = DEF_TOS; 3396 csk1->ttl = DEF_TTL; 3397 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; 3398 csk1->rcv_buf = DEF_RCV_BUF; 3399 csk1->snd_buf = DEF_SND_BUF; 3400 csk1->seed = DEF_SEED; 3401 3402 *csk = csk1; 3403 return 0; 3404 } 3405 3406 static void cnic_cm_cleanup(struct cnic_sock *csk) 3407 { 3408 if (csk->src_port) { 3409 struct cnic_dev *dev = csk->dev; 3410 struct cnic_local *cp = dev->cnic_priv; 3411 3412 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port)); 3413 csk->src_port = 0; 3414 } 3415 } 3416 3417 static void cnic_close_conn(struct cnic_sock *csk) 3418 { 3419 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { 3420 cnic_cm_upload_pg(csk); 3421 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3422 } 3423 cnic_cm_cleanup(csk); 3424 } 3425 3426 static int cnic_cm_destroy(struct cnic_sock *csk) 3427 { 3428 if (!cnic_in_use(csk)) 3429 return -EINVAL; 3430 3431 csk_hold(csk); 3432 clear_bit(SK_F_INUSE, &csk->flags); 3433 smp_mb__after_clear_bit(); 3434 while (atomic_read(&csk->ref_count) != 1) 3435 msleep(1); 3436 cnic_cm_cleanup(csk); 3437 3438 csk->flags = 0; 3439 csk_put(csk); 3440 return 0; 3441 } 3442 3443 static inline u16 cnic_get_vlan(struct net_device *dev, 3444 struct net_device **vlan_dev) 3445 { 3446 if (dev->priv_flags & IFF_802_1Q_VLAN) { 3447 *vlan_dev = vlan_dev_real_dev(dev); 3448 return vlan_dev_vlan_id(dev); 3449 } 3450 *vlan_dev = dev; 3451 return 0; 3452 } 3453 3454 static int cnic_get_v4_route(struct sockaddr_in *dst_addr, 3455 struct dst_entry **dst) 3456 { 3457 #if defined(CONFIG_INET) 3458 struct rtable *rt; 3459 3460 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); 3461 if (!IS_ERR(rt)) { 3462 *dst = &rt->dst; 3463 return 0; 3464 } 3465 return PTR_ERR(rt); 3466 #else 3467 return -ENETUNREACH; 3468 #endif 3469 } 3470 3471 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 3472 struct dst_entry **dst) 3473 { 3474 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 3475 struct flowi6 fl6; 3476 3477 memset(&fl6, 0, sizeof(fl6)); 3478 ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr); 3479 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 3480 fl6.flowi6_oif = dst_addr->sin6_scope_id; 3481 3482 *dst = ip6_route_output(&init_net, NULL, &fl6); 3483 if (*dst) 3484 return 0; 3485 #endif 3486 3487 return -ENETUNREACH; 3488 } 3489 3490 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, 3491 int ulp_type) 3492 { 3493 struct cnic_dev *dev = NULL; 3494 struct dst_entry *dst; 3495 struct net_device *netdev = NULL; 3496 int err = -ENETUNREACH; 3497 3498 if (dst_addr->sin_family == AF_INET) 3499 err = cnic_get_v4_route(dst_addr, &dst); 3500 else if (dst_addr->sin_family == AF_INET6) { 3501 struct sockaddr_in6 *dst_addr6 = 3502 (struct sockaddr_in6 *) dst_addr; 3503 3504 err = cnic_get_v6_route(dst_addr6, &dst); 3505 } else 3506 return NULL; 3507 3508 if (err) 3509 return NULL; 3510 3511 if (!dst->dev) 3512 goto done; 3513 3514 cnic_get_vlan(dst->dev, &netdev); 3515 3516 dev = cnic_from_netdev(netdev); 3517 3518 done: 3519 dst_release(dst); 3520 if (dev) 3521 cnic_put(dev); 3522 return dev; 3523 } 3524 3525 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3526 { 3527 struct cnic_dev *dev = csk->dev; 3528 struct cnic_local *cp = dev->cnic_priv; 3529 3530 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); 3531 } 3532 3533 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3534 { 3535 struct cnic_dev *dev = csk->dev; 3536 struct cnic_local *cp = dev->cnic_priv; 3537 int is_v6, rc = 0; 3538 struct dst_entry *dst = NULL; 3539 struct net_device *realdev; 3540 __be16 local_port; 3541 u32 port_id; 3542 3543 if (saddr->local.v6.sin6_family == AF_INET6 && 3544 saddr->remote.v6.sin6_family == AF_INET6) 3545 is_v6 = 1; 3546 else if (saddr->local.v4.sin_family == AF_INET && 3547 saddr->remote.v4.sin_family == AF_INET) 3548 is_v6 = 0; 3549 else 3550 return -EINVAL; 3551 3552 clear_bit(SK_F_IPV6, &csk->flags); 3553 3554 if (is_v6) { 3555 set_bit(SK_F_IPV6, &csk->flags); 3556 cnic_get_v6_route(&saddr->remote.v6, &dst); 3557 3558 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, 3559 sizeof(struct in6_addr)); 3560 csk->dst_port = saddr->remote.v6.sin6_port; 3561 local_port = saddr->local.v6.sin6_port; 3562 3563 } else { 3564 cnic_get_v4_route(&saddr->remote.v4, &dst); 3565 3566 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; 3567 csk->dst_port = saddr->remote.v4.sin_port; 3568 local_port = saddr->local.v4.sin_port; 3569 } 3570 3571 csk->vlan_id = 0; 3572 csk->mtu = dev->netdev->mtu; 3573 if (dst && dst->dev) { 3574 u16 vlan = cnic_get_vlan(dst->dev, &realdev); 3575 if (realdev == dev->netdev) { 3576 csk->vlan_id = vlan; 3577 csk->mtu = dst_mtu(dst); 3578 } 3579 } 3580 3581 port_id = be16_to_cpu(local_port); 3582 if (port_id >= CNIC_LOCAL_PORT_MIN && 3583 port_id < CNIC_LOCAL_PORT_MAX) { 3584 if (cnic_alloc_id(&cp->csk_port_tbl, port_id)) 3585 port_id = 0; 3586 } else 3587 port_id = 0; 3588 3589 if (!port_id) { 3590 port_id = cnic_alloc_new_id(&cp->csk_port_tbl); 3591 if (port_id == -1) { 3592 rc = -ENOMEM; 3593 goto err_out; 3594 } 3595 local_port = cpu_to_be16(port_id); 3596 } 3597 csk->src_port = local_port; 3598 3599 err_out: 3600 dst_release(dst); 3601 return rc; 3602 } 3603 3604 static void cnic_init_csk_state(struct cnic_sock *csk) 3605 { 3606 csk->state = 0; 3607 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3608 clear_bit(SK_F_CLOSING, &csk->flags); 3609 } 3610 3611 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3612 { 3613 struct cnic_local *cp = csk->dev->cnic_priv; 3614 int err = 0; 3615 3616 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) 3617 return -EOPNOTSUPP; 3618 3619 if (!cnic_in_use(csk)) 3620 return -EINVAL; 3621 3622 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) 3623 return -EINVAL; 3624 3625 cnic_init_csk_state(csk); 3626 3627 err = cnic_get_route(csk, saddr); 3628 if (err) 3629 goto err_out; 3630 3631 err = cnic_resolve_addr(csk, saddr); 3632 if (!err) 3633 return 0; 3634 3635 err_out: 3636 clear_bit(SK_F_CONNECT_START, &csk->flags); 3637 return err; 3638 } 3639 3640 static int cnic_cm_abort(struct cnic_sock *csk) 3641 { 3642 struct cnic_local *cp = csk->dev->cnic_priv; 3643 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; 3644 3645 if (!cnic_in_use(csk)) 3646 return -EINVAL; 3647 3648 if (cnic_abort_prep(csk)) 3649 return cnic_cm_abort_req(csk); 3650 3651 /* Getting here means that we haven't started connect, or 3652 * connect was not successful. 3653 */ 3654 3655 cp->close_conn(csk, opcode); 3656 if (csk->state != opcode) 3657 return -EALREADY; 3658 3659 return 0; 3660 } 3661 3662 static int cnic_cm_close(struct cnic_sock *csk) 3663 { 3664 if (!cnic_in_use(csk)) 3665 return -EINVAL; 3666 3667 if (cnic_close_prep(csk)) { 3668 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3669 return cnic_cm_close_req(csk); 3670 } else { 3671 return -EALREADY; 3672 } 3673 return 0; 3674 } 3675 3676 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, 3677 u8 opcode) 3678 { 3679 struct cnic_ulp_ops *ulp_ops; 3680 int ulp_type = csk->ulp_type; 3681 3682 rcu_read_lock(); 3683 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 3684 if (ulp_ops) { 3685 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) 3686 ulp_ops->cm_connect_complete(csk); 3687 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3688 ulp_ops->cm_close_complete(csk); 3689 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) 3690 ulp_ops->cm_remote_abort(csk); 3691 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) 3692 ulp_ops->cm_abort_complete(csk); 3693 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) 3694 ulp_ops->cm_remote_close(csk); 3695 } 3696 rcu_read_unlock(); 3697 } 3698 3699 static int cnic_cm_set_pg(struct cnic_sock *csk) 3700 { 3701 if (cnic_offld_prep(csk)) { 3702 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3703 cnic_cm_update_pg(csk); 3704 else 3705 cnic_cm_offload_pg(csk); 3706 } 3707 return 0; 3708 } 3709 3710 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) 3711 { 3712 struct cnic_local *cp = dev->cnic_priv; 3713 u32 l5_cid = kcqe->pg_host_opaque; 3714 u8 opcode = kcqe->op_code; 3715 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 3716 3717 csk_hold(csk); 3718 if (!cnic_in_use(csk)) 3719 goto done; 3720 3721 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3722 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3723 goto done; 3724 } 3725 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ 3726 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { 3727 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3728 cnic_cm_upcall(cp, csk, 3729 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3730 goto done; 3731 } 3732 3733 csk->pg_cid = kcqe->pg_cid; 3734 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3735 cnic_cm_conn_req(csk); 3736 3737 done: 3738 csk_put(csk); 3739 } 3740 3741 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) 3742 { 3743 struct cnic_local *cp = dev->cnic_priv; 3744 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; 3745 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; 3746 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3747 3748 ctx->timestamp = jiffies; 3749 ctx->wait_cond = 1; 3750 wake_up(&ctx->waitq); 3751 } 3752 3753 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) 3754 { 3755 struct cnic_local *cp = dev->cnic_priv; 3756 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; 3757 u8 opcode = l4kcqe->op_code; 3758 u32 l5_cid; 3759 struct cnic_sock *csk; 3760 3761 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { 3762 cnic_process_fcoe_term_conn(dev, kcqe); 3763 return; 3764 } 3765 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || 3766 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3767 cnic_cm_process_offld_pg(dev, l4kcqe); 3768 return; 3769 } 3770 3771 l5_cid = l4kcqe->conn_id; 3772 if (opcode & 0x80) 3773 l5_cid = l4kcqe->cid; 3774 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3775 return; 3776 3777 csk = &cp->csk_tbl[l5_cid]; 3778 csk_hold(csk); 3779 3780 if (!cnic_in_use(csk)) { 3781 csk_put(csk); 3782 return; 3783 } 3784 3785 switch (opcode) { 3786 case L5CM_RAMROD_CMD_ID_TCP_CONNECT: 3787 if (l4kcqe->status != 0) { 3788 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3789 cnic_cm_upcall(cp, csk, 3790 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3791 } 3792 break; 3793 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: 3794 if (l4kcqe->status == 0) 3795 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); 3796 3797 smp_mb__before_clear_bit(); 3798 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3799 cnic_cm_upcall(cp, csk, opcode); 3800 break; 3801 3802 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3803 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3804 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3805 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3806 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3807 cp->close_conn(csk, opcode); 3808 break; 3809 3810 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: 3811 /* after we already sent CLOSE_REQ */ 3812 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) && 3813 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) && 3814 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3815 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP); 3816 else 3817 cnic_cm_upcall(cp, csk, opcode); 3818 break; 3819 } 3820 csk_put(csk); 3821 } 3822 3823 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) 3824 { 3825 struct cnic_dev *dev = data; 3826 int i; 3827 3828 for (i = 0; i < num; i++) 3829 cnic_cm_process_kcqe(dev, kcqe[i]); 3830 } 3831 3832 static struct cnic_ulp_ops cm_ulp_ops = { 3833 .indicate_kcqes = cnic_cm_indicate_kcqe, 3834 }; 3835 3836 static void cnic_cm_free_mem(struct cnic_dev *dev) 3837 { 3838 struct cnic_local *cp = dev->cnic_priv; 3839 3840 kfree(cp->csk_tbl); 3841 cp->csk_tbl = NULL; 3842 cnic_free_id_tbl(&cp->csk_port_tbl); 3843 } 3844 3845 static int cnic_cm_alloc_mem(struct cnic_dev *dev) 3846 { 3847 struct cnic_local *cp = dev->cnic_priv; 3848 u32 port_id; 3849 3850 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, 3851 GFP_KERNEL); 3852 if (!cp->csk_tbl) 3853 return -ENOMEM; 3854 3855 port_id = random32(); 3856 port_id %= CNIC_LOCAL_PORT_RANGE; 3857 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 3858 CNIC_LOCAL_PORT_MIN, port_id)) { 3859 cnic_cm_free_mem(dev); 3860 return -ENOMEM; 3861 } 3862 return 0; 3863 } 3864 3865 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 3866 { 3867 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 3868 /* Unsolicited RESET_COMP or RESET_RECEIVED */ 3869 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; 3870 csk->state = opcode; 3871 } 3872 3873 /* 1. If event opcode matches the expected event in csk->state 3874 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any 3875 * event 3876 * 3. If the expected event is 0, meaning the connection was never 3877 * never established, we accept the opcode from cm_abort. 3878 */ 3879 if (opcode == csk->state || csk->state == 0 || 3880 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP || 3881 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) { 3882 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { 3883 if (csk->state == 0) 3884 csk->state = opcode; 3885 return 1; 3886 } 3887 } 3888 return 0; 3889 } 3890 3891 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) 3892 { 3893 struct cnic_dev *dev = csk->dev; 3894 struct cnic_local *cp = dev->cnic_priv; 3895 3896 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { 3897 cnic_cm_upcall(cp, csk, opcode); 3898 return; 3899 } 3900 3901 clear_bit(SK_F_CONNECT_START, &csk->flags); 3902 cnic_close_conn(csk); 3903 csk->state = opcode; 3904 cnic_cm_upcall(cp, csk, opcode); 3905 } 3906 3907 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) 3908 { 3909 } 3910 3911 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) 3912 { 3913 u32 seed; 3914 3915 seed = random32(); 3916 cnic_ctx_wr(dev, 45, 0, seed); 3917 return 0; 3918 } 3919 3920 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) 3921 { 3922 struct cnic_dev *dev = csk->dev; 3923 struct cnic_local *cp = dev->cnic_priv; 3924 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; 3925 union l5cm_specific_data l5_data; 3926 u32 cmd = 0; 3927 int close_complete = 0; 3928 3929 switch (opcode) { 3930 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3931 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3932 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3933 if (cnic_ready_to_close(csk, opcode)) { 3934 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3935 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; 3936 else 3937 close_complete = 1; 3938 } 3939 break; 3940 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3941 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 3942 break; 3943 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3944 close_complete = 1; 3945 break; 3946 } 3947 if (cmd) { 3948 memset(&l5_data, 0, sizeof(l5_data)); 3949 3950 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, 3951 &l5_data); 3952 } else if (close_complete) { 3953 ctx->timestamp = jiffies; 3954 cnic_close_conn(csk); 3955 cnic_cm_upcall(cp, csk, csk->state); 3956 } 3957 } 3958 3959 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) 3960 { 3961 struct cnic_local *cp = dev->cnic_priv; 3962 3963 if (!cp->ctx_tbl) 3964 return; 3965 3966 if (!netif_running(dev->netdev)) 3967 return; 3968 3969 cnic_bnx2x_delete_wait(dev, 0); 3970 3971 cancel_delayed_work(&cp->delete_task); 3972 flush_workqueue(cnic_wq); 3973 3974 if (atomic_read(&cp->iscsi_conn) != 0) 3975 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", 3976 atomic_read(&cp->iscsi_conn)); 3977 } 3978 3979 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 3980 { 3981 struct cnic_local *cp = dev->cnic_priv; 3982 u32 pfid = cp->pfid; 3983 u32 port = CNIC_PORT(cp); 3984 3985 cnic_init_bnx2x_mac(dev); 3986 cnic_bnx2x_set_tcp_timestamp(dev, 1); 3987 3988 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 3989 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); 3990 3991 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3992 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1); 3993 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3994 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port), 3995 DEF_MAX_DA_COUNT); 3996 3997 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3998 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL); 3999 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4000 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS); 4001 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4002 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2); 4003 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 4004 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER); 4005 4006 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid), 4007 DEF_MAX_CWND); 4008 return 0; 4009 } 4010 4011 static void cnic_delete_task(struct work_struct *work) 4012 { 4013 struct cnic_local *cp; 4014 struct cnic_dev *dev; 4015 u32 i; 4016 int need_resched = 0; 4017 4018 cp = container_of(work, struct cnic_local, delete_task.work); 4019 dev = cp->dev; 4020 4021 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) { 4022 struct drv_ctl_info info; 4023 4024 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI); 4025 4026 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD; 4027 cp->ethdev->drv_ctl(dev->netdev, &info); 4028 } 4029 4030 for (i = 0; i < cp->max_cid_space; i++) { 4031 struct cnic_context *ctx = &cp->ctx_tbl[i]; 4032 int err; 4033 4034 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || 4035 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 4036 continue; 4037 4038 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 4039 need_resched = 1; 4040 continue; 4041 } 4042 4043 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 4044 continue; 4045 4046 err = cnic_bnx2x_destroy_ramrod(dev, i); 4047 4048 cnic_free_bnx2x_conn_resc(dev, i); 4049 if (!err) { 4050 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) 4051 atomic_dec(&cp->iscsi_conn); 4052 4053 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 4054 } 4055 } 4056 4057 if (need_resched) 4058 queue_delayed_work(cnic_wq, &cp->delete_task, 4059 msecs_to_jiffies(10)); 4060 4061 } 4062 4063 static int cnic_cm_open(struct cnic_dev *dev) 4064 { 4065 struct cnic_local *cp = dev->cnic_priv; 4066 int err; 4067 4068 err = cnic_cm_alloc_mem(dev); 4069 if (err) 4070 return err; 4071 4072 err = cp->start_cm(dev); 4073 4074 if (err) 4075 goto err_out; 4076 4077 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); 4078 4079 dev->cm_create = cnic_cm_create; 4080 dev->cm_destroy = cnic_cm_destroy; 4081 dev->cm_connect = cnic_cm_connect; 4082 dev->cm_abort = cnic_cm_abort; 4083 dev->cm_close = cnic_cm_close; 4084 dev->cm_select_dev = cnic_cm_select_dev; 4085 4086 cp->ulp_handle[CNIC_ULP_L4] = dev; 4087 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); 4088 return 0; 4089 4090 err_out: 4091 cnic_cm_free_mem(dev); 4092 return err; 4093 } 4094 4095 static int cnic_cm_shutdown(struct cnic_dev *dev) 4096 { 4097 struct cnic_local *cp = dev->cnic_priv; 4098 int i; 4099 4100 cp->stop_cm(dev); 4101 4102 if (!cp->csk_tbl) 4103 return 0; 4104 4105 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { 4106 struct cnic_sock *csk = &cp->csk_tbl[i]; 4107 4108 clear_bit(SK_F_INUSE, &csk->flags); 4109 cnic_cm_cleanup(csk); 4110 } 4111 cnic_cm_free_mem(dev); 4112 4113 return 0; 4114 } 4115 4116 static void cnic_init_context(struct cnic_dev *dev, u32 cid) 4117 { 4118 u32 cid_addr; 4119 int i; 4120 4121 cid_addr = GET_CID_ADDR(cid); 4122 4123 for (i = 0; i < CTX_SIZE; i += 4) 4124 cnic_ctx_wr(dev, cid_addr, i, 0); 4125 } 4126 4127 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) 4128 { 4129 struct cnic_local *cp = dev->cnic_priv; 4130 int ret = 0, i; 4131 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; 4132 4133 if (CHIP_NUM(cp) != CHIP_NUM_5709) 4134 return 0; 4135 4136 for (i = 0; i < cp->ctx_blks; i++) { 4137 int j; 4138 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 4139 u32 val; 4140 4141 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); 4142 4143 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 4144 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 4145 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, 4146 (u64) cp->ctx_arr[i].mapping >> 32); 4147 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | 4148 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 4149 for (j = 0; j < 10; j++) { 4150 4151 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); 4152 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 4153 break; 4154 udelay(5); 4155 } 4156 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { 4157 ret = -EBUSY; 4158 break; 4159 } 4160 } 4161 return ret; 4162 } 4163 4164 static void cnic_free_irq(struct cnic_dev *dev) 4165 { 4166 struct cnic_local *cp = dev->cnic_priv; 4167 struct cnic_eth_dev *ethdev = cp->ethdev; 4168 4169 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4170 cp->disable_int_sync(dev); 4171 tasklet_kill(&cp->cnic_irq_task); 4172 free_irq(ethdev->irq_arr[0].vector, dev); 4173 } 4174 } 4175 4176 static int cnic_request_irq(struct cnic_dev *dev) 4177 { 4178 struct cnic_local *cp = dev->cnic_priv; 4179 struct cnic_eth_dev *ethdev = cp->ethdev; 4180 int err; 4181 4182 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); 4183 if (err) 4184 tasklet_disable(&cp->cnic_irq_task); 4185 4186 return err; 4187 } 4188 4189 static int cnic_init_bnx2_irq(struct cnic_dev *dev) 4190 { 4191 struct cnic_local *cp = dev->cnic_priv; 4192 struct cnic_eth_dev *ethdev = cp->ethdev; 4193 4194 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4195 int err, i = 0; 4196 int sblk_num = cp->status_blk_num; 4197 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + 4198 BNX2_HC_SB_CONFIG_1; 4199 4200 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); 4201 4202 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); 4203 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); 4204 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); 4205 4206 cp->last_status_idx = cp->status_blk.bnx2->status_idx; 4207 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, 4208 (unsigned long) dev); 4209 err = cnic_request_irq(dev); 4210 if (err) 4211 return err; 4212 4213 while (cp->status_blk.bnx2->status_completion_producer_index && 4214 i < 10) { 4215 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 4216 1 << (11 + sblk_num)); 4217 udelay(10); 4218 i++; 4219 barrier(); 4220 } 4221 if (cp->status_blk.bnx2->status_completion_producer_index) { 4222 cnic_free_irq(dev); 4223 goto failed; 4224 } 4225 4226 } else { 4227 struct status_block *sblk = cp->status_blk.gen; 4228 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); 4229 int i = 0; 4230 4231 while (sblk->status_completion_producer_index && i < 10) { 4232 CNIC_WR(dev, BNX2_HC_COMMAND, 4233 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 4234 udelay(10); 4235 i++; 4236 barrier(); 4237 } 4238 if (sblk->status_completion_producer_index) 4239 goto failed; 4240 4241 } 4242 return 0; 4243 4244 failed: 4245 netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); 4246 return -EBUSY; 4247 } 4248 4249 static void cnic_enable_bnx2_int(struct cnic_dev *dev) 4250 { 4251 struct cnic_local *cp = dev->cnic_priv; 4252 struct cnic_eth_dev *ethdev = cp->ethdev; 4253 4254 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 4255 return; 4256 4257 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 4258 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 4259 } 4260 4261 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 4262 { 4263 struct cnic_local *cp = dev->cnic_priv; 4264 struct cnic_eth_dev *ethdev = cp->ethdev; 4265 4266 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 4267 return; 4268 4269 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 4270 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 4271 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); 4272 synchronize_irq(ethdev->irq_arr[0].vector); 4273 } 4274 4275 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) 4276 { 4277 struct cnic_local *cp = dev->cnic_priv; 4278 struct cnic_eth_dev *ethdev = cp->ethdev; 4279 struct cnic_uio_dev *udev = cp->udev; 4280 u32 cid_addr, tx_cid, sb_id; 4281 u32 val, offset0, offset1, offset2, offset3; 4282 int i; 4283 struct tx_bd *txbd; 4284 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4285 struct status_block *s_blk = cp->status_blk.gen; 4286 4287 sb_id = cp->status_blk_num; 4288 tx_cid = 20; 4289 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 4290 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4291 struct status_block_msix *sblk = cp->status_blk.bnx2; 4292 4293 tx_cid = TX_TSS_CID + sb_id - 1; 4294 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 4295 (TX_TSS_CID << 7)); 4296 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 4297 } 4298 cp->tx_cons = *cp->tx_cons_ptr; 4299 4300 cid_addr = GET_CID_ADDR(tx_cid); 4301 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 4302 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; 4303 4304 for (i = 0; i < PHY_CTX_SIZE; i += 4) 4305 cnic_ctx_wr(dev, cid_addr2, i, 0); 4306 4307 offset0 = BNX2_L2CTX_TYPE_XI; 4308 offset1 = BNX2_L2CTX_CMD_TYPE_XI; 4309 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 4310 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 4311 } else { 4312 cnic_init_context(dev, tx_cid); 4313 cnic_init_context(dev, tx_cid + 1); 4314 4315 offset0 = BNX2_L2CTX_TYPE; 4316 offset1 = BNX2_L2CTX_CMD_TYPE; 4317 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 4318 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; 4319 } 4320 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; 4321 cnic_ctx_wr(dev, cid_addr, offset0, val); 4322 4323 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4324 cnic_ctx_wr(dev, cid_addr, offset1, val); 4325 4326 txbd = udev->l2_ring; 4327 4328 buf_map = udev->l2_buf_map; 4329 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 4330 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 4331 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4332 } 4333 val = (u64) ring_map >> 32; 4334 cnic_ctx_wr(dev, cid_addr, offset2, val); 4335 txbd->tx_bd_haddr_hi = val; 4336 4337 val = (u64) ring_map & 0xffffffff; 4338 cnic_ctx_wr(dev, cid_addr, offset3, val); 4339 txbd->tx_bd_haddr_lo = val; 4340 } 4341 4342 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) 4343 { 4344 struct cnic_local *cp = dev->cnic_priv; 4345 struct cnic_eth_dev *ethdev = cp->ethdev; 4346 struct cnic_uio_dev *udev = cp->udev; 4347 u32 cid_addr, sb_id, val, coal_reg, coal_val; 4348 int i; 4349 struct rx_bd *rxbd; 4350 struct status_block *s_blk = cp->status_blk.gen; 4351 dma_addr_t ring_map = udev->l2_ring_map; 4352 4353 sb_id = cp->status_blk_num; 4354 cnic_init_context(dev, 2); 4355 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; 4356 coal_reg = BNX2_HC_COMMAND; 4357 coal_val = CNIC_RD(dev, coal_reg); 4358 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4359 struct status_block_msix *sblk = cp->status_blk.bnx2; 4360 4361 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; 4362 coal_reg = BNX2_HC_COALESCE_NOW; 4363 coal_val = 1 << (11 + sb_id); 4364 } 4365 i = 0; 4366 while (!(*cp->rx_cons_ptr != 0) && i < 10) { 4367 CNIC_WR(dev, coal_reg, coal_val); 4368 udelay(10); 4369 i++; 4370 barrier(); 4371 } 4372 cp->rx_cons = *cp->rx_cons_ptr; 4373 4374 cid_addr = GET_CID_ADDR(2); 4375 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4376 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4377 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 4378 4379 if (sb_id == 0) 4380 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; 4381 else 4382 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 4383 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 4384 4385 rxbd = udev->l2_ring + BCM_PAGE_SIZE; 4386 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 4387 dma_addr_t buf_map; 4388 int n = (i % cp->l2_rx_ring_size) + 1; 4389 4390 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4391 rxbd->rx_bd_len = cp->l2_single_buf_size; 4392 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 4393 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 4394 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4395 } 4396 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4397 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 4398 rxbd->rx_bd_haddr_hi = val; 4399 4400 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4401 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4402 rxbd->rx_bd_haddr_lo = val; 4403 4404 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); 4405 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); 4406 } 4407 4408 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) 4409 { 4410 struct kwqe *wqes[1], l2kwqe; 4411 4412 memset(&l2kwqe, 0, sizeof(l2kwqe)); 4413 wqes[0] = &l2kwqe; 4414 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | 4415 (L2_KWQE_OPCODE_VALUE_FLUSH << 4416 KWQE_OPCODE_SHIFT) | 2; 4417 dev->submit_kwqes(dev, wqes, 1); 4418 } 4419 4420 static void cnic_set_bnx2_mac(struct cnic_dev *dev) 4421 { 4422 struct cnic_local *cp = dev->cnic_priv; 4423 u32 val; 4424 4425 val = cp->func << 2; 4426 4427 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); 4428 4429 val = cnic_reg_rd_ind(dev, cp->shmem_base + 4430 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); 4431 dev->mac_addr[0] = (u8) (val >> 8); 4432 dev->mac_addr[1] = (u8) val; 4433 4434 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); 4435 4436 val = cnic_reg_rd_ind(dev, cp->shmem_base + 4437 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); 4438 dev->mac_addr[2] = (u8) (val >> 24); 4439 dev->mac_addr[3] = (u8) (val >> 16); 4440 dev->mac_addr[4] = (u8) (val >> 8); 4441 dev->mac_addr[5] = (u8) val; 4442 4443 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); 4444 4445 val = 4 | BNX2_RPM_SORT_USER2_BC_EN; 4446 if (CHIP_NUM(cp) != CHIP_NUM_5709) 4447 val |= BNX2_RPM_SORT_USER2_PROM_VLAN; 4448 4449 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); 4450 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); 4451 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); 4452 } 4453 4454 static int cnic_start_bnx2_hw(struct cnic_dev *dev) 4455 { 4456 struct cnic_local *cp = dev->cnic_priv; 4457 struct cnic_eth_dev *ethdev = cp->ethdev; 4458 struct status_block *sblk = cp->status_blk.gen; 4459 u32 val, kcq_cid_addr, kwq_cid_addr; 4460 int err; 4461 4462 cnic_set_bnx2_mac(dev); 4463 4464 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 4465 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4466 if (BCM_PAGE_BITS > 12) 4467 val |= (12 - 8) << 4; 4468 else 4469 val |= (BCM_PAGE_BITS - 8) << 4; 4470 4471 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 4472 4473 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); 4474 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); 4475 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); 4476 4477 err = cnic_setup_5709_context(dev, 1); 4478 if (err) 4479 return err; 4480 4481 cnic_init_context(dev, KWQ_CID); 4482 cnic_init_context(dev, KCQ_CID); 4483 4484 kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 4485 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 4486 4487 cp->max_kwq_idx = MAX_KWQ_IDX; 4488 cp->kwq_prod_idx = 0; 4489 cp->kwq_con_idx = 0; 4490 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 4491 4492 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) 4493 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 4494 else 4495 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; 4496 4497 /* Initialize the kernel work queue context. */ 4498 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4499 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4500 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); 4501 4502 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 4503 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4504 4505 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 4506 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4507 4508 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 4509 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 4510 4511 val = (u32) cp->kwq_info.pgtbl_map; 4512 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 4513 4514 kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 4515 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 4516 4517 cp->kcq1.sw_prod_idx = 0; 4518 cp->kcq1.hw_prod_idx_ptr = 4519 (u16 *) &sblk->status_completion_producer_index; 4520 4521 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; 4522 4523 /* Initialize the kernel complete queue context. */ 4524 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4525 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4526 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); 4527 4528 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 4529 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4530 4531 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 4532 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4533 4534 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); 4535 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 4536 4537 val = (u32) cp->kcq1.dma.pgtbl_map; 4538 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 4539 4540 cp->int_num = 0; 4541 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4542 struct status_block_msix *msblk = cp->status_blk.bnx2; 4543 u32 sb_id = cp->status_blk_num; 4544 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 4545 4546 cp->kcq1.hw_prod_idx_ptr = 4547 (u16 *) &msblk->status_completion_producer_index; 4548 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; 4549 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; 4550 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 4551 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4552 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4553 } 4554 4555 /* Enable Commnad Scheduler notification when we write to the 4556 * host producer index of the kernel contexts. */ 4557 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); 4558 4559 /* Enable Command Scheduler notification when we write to either 4560 * the Send Queue or Receive Queue producer indexes of the kernel 4561 * bypass contexts. */ 4562 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); 4563 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); 4564 4565 /* Notify COM when the driver post an application buffer. */ 4566 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); 4567 4568 /* Set the CP and COM doorbells. These two processors polls the 4569 * doorbell for a non zero value before running. This must be done 4570 * after setting up the kernel queue contexts. */ 4571 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); 4572 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); 4573 4574 cnic_init_bnx2_tx_ring(dev); 4575 cnic_init_bnx2_rx_ring(dev); 4576 4577 err = cnic_init_bnx2_irq(dev); 4578 if (err) { 4579 netdev_err(dev->netdev, "cnic_init_irq failed\n"); 4580 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 4581 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 4582 return err; 4583 } 4584 4585 return 0; 4586 } 4587 4588 static void cnic_setup_bnx2x_context(struct cnic_dev *dev) 4589 { 4590 struct cnic_local *cp = dev->cnic_priv; 4591 struct cnic_eth_dev *ethdev = cp->ethdev; 4592 u32 start_offset = ethdev->ctx_tbl_offset; 4593 int i; 4594 4595 for (i = 0; i < cp->ctx_blks; i++) { 4596 struct cnic_ctx *ctx = &cp->ctx_arr[i]; 4597 dma_addr_t map = ctx->mapping; 4598 4599 if (cp->ctx_align) { 4600 unsigned long mask = cp->ctx_align - 1; 4601 4602 map = (map + mask) & ~mask; 4603 } 4604 4605 cnic_ctx_tbl_wr(dev, start_offset + i, map); 4606 } 4607 } 4608 4609 static int cnic_init_bnx2x_irq(struct cnic_dev *dev) 4610 { 4611 struct cnic_local *cp = dev->cnic_priv; 4612 struct cnic_eth_dev *ethdev = cp->ethdev; 4613 int err = 0; 4614 4615 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, 4616 (unsigned long) dev); 4617 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 4618 err = cnic_request_irq(dev); 4619 4620 return err; 4621 } 4622 4623 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev, 4624 u16 sb_id, u8 sb_index, 4625 u8 disable) 4626 { 4627 4628 u32 addr = BAR_CSTRORM_INTMEM + 4629 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4630 offsetof(struct hc_status_block_data_e1x, index_data) + 4631 sizeof(struct hc_index_data)*sb_index + 4632 offsetof(struct hc_index_data, flags); 4633 u16 flags = CNIC_RD16(dev, addr); 4634 /* clear and set */ 4635 flags &= ~HC_INDEX_DATA_HC_ENABLED; 4636 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) & 4637 HC_INDEX_DATA_HC_ENABLED); 4638 CNIC_WR16(dev, addr, flags); 4639 } 4640 4641 static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 4642 { 4643 struct cnic_local *cp = dev->cnic_priv; 4644 u8 sb_id = cp->status_blk_num; 4645 4646 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4647 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4648 offsetof(struct hc_status_block_data_e1x, index_data) + 4649 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + 4650 offsetof(struct hc_index_data, timeout), 64 / 4); 4651 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); 4652 } 4653 4654 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) 4655 { 4656 } 4657 4658 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, 4659 struct client_init_ramrod_data *data) 4660 { 4661 struct cnic_local *cp = dev->cnic_priv; 4662 struct cnic_uio_dev *udev = cp->udev; 4663 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; 4664 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4665 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4666 int i; 4667 u32 cli = cp->ethdev->iscsi_l2_client_id; 4668 u32 val; 4669 4670 memset(txbd, 0, BCM_PAGE_SIZE); 4671 4672 buf_map = udev->l2_buf_map; 4673 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4674 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4675 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 4676 4677 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4678 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4679 reg_bd->addr_hi = start_bd->addr_hi; 4680 reg_bd->addr_lo = start_bd->addr_lo + 0x10; 4681 start_bd->nbytes = cpu_to_le16(0x10); 4682 start_bd->nbd = cpu_to_le16(3); 4683 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 4684 start_bd->general_data = (UNICAST_ADDRESS << 4685 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 4686 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4687 4688 } 4689 4690 val = (u64) ring_map >> 32; 4691 txbd->next_bd.addr_hi = cpu_to_le32(val); 4692 4693 data->tx.tx_bd_page_base.hi = cpu_to_le32(val); 4694 4695 val = (u64) ring_map & 0xffffffff; 4696 txbd->next_bd.addr_lo = cpu_to_le32(val); 4697 4698 data->tx.tx_bd_page_base.lo = cpu_to_le32(val); 4699 4700 /* Other ramrod params */ 4701 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS; 4702 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID; 4703 4704 /* reset xstorm per client statistics */ 4705 if (cli < MAX_STAT_COUNTER_ID) { 4706 data->general.statistics_zero_flg = 1; 4707 data->general.statistics_en_flg = 1; 4708 data->general.statistics_counter_id = cli; 4709 } 4710 4711 cp->tx_cons_ptr = 4712 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS]; 4713 } 4714 4715 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, 4716 struct client_init_ramrod_data *data) 4717 { 4718 struct cnic_local *cp = dev->cnic_priv; 4719 struct cnic_uio_dev *udev = cp->udev; 4720 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4721 BCM_PAGE_SIZE); 4722 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 4723 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 4724 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4725 int i; 4726 u32 cli = cp->ethdev->iscsi_l2_client_id; 4727 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4728 u32 val; 4729 dma_addr_t ring_map = udev->l2_ring_map; 4730 4731 /* General data */ 4732 data->general.client_id = cli; 4733 data->general.activate_flg = 1; 4734 data->general.sp_client_id = cli; 4735 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); 4736 data->general.func_id = cp->pfid; 4737 4738 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 4739 dma_addr_t buf_map; 4740 int n = (i % cp->l2_rx_ring_size) + 1; 4741 4742 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4743 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4744 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4745 } 4746 4747 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4748 rxbd->addr_hi = cpu_to_le32(val); 4749 data->rx.bd_page_base.hi = cpu_to_le32(val); 4750 4751 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4752 rxbd->addr_lo = cpu_to_le32(val); 4753 data->rx.bd_page_base.lo = cpu_to_le32(val); 4754 4755 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 4756 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 4757 rxcqe->addr_hi = cpu_to_le32(val); 4758 data->rx.cqe_page_base.hi = cpu_to_le32(val); 4759 4760 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; 4761 rxcqe->addr_lo = cpu_to_le32(val); 4762 data->rx.cqe_page_base.lo = cpu_to_le32(val); 4763 4764 /* Other ramrod params */ 4765 data->rx.client_qzone_id = cl_qzone_id; 4766 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS; 4767 data->rx.status_block_id = BNX2X_DEF_SB_ID; 4768 4769 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; 4770 4771 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size); 4772 data->rx.outer_vlan_removal_enable_flg = 1; 4773 data->rx.silent_vlan_removal_flg = 1; 4774 data->rx.silent_vlan_value = 0; 4775 data->rx.silent_vlan_mask = 0xffff; 4776 4777 cp->rx_cons_ptr = 4778 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; 4779 cp->rx_cons = *cp->rx_cons_ptr; 4780 } 4781 4782 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) 4783 { 4784 struct cnic_local *cp = dev->cnic_priv; 4785 u32 pfid = cp->pfid; 4786 4787 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 4788 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); 4789 cp->kcq1.sw_prod_idx = 0; 4790 4791 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 4792 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 4793 4794 cp->kcq1.hw_prod_idx_ptr = 4795 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4796 cp->kcq1.status_idx_ptr = 4797 &sb->sb.running_index[SM_RX_ID]; 4798 } else { 4799 struct host_hc_status_block_e1x *sb = cp->status_blk.gen; 4800 4801 cp->kcq1.hw_prod_idx_ptr = 4802 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4803 cp->kcq1.status_idx_ptr = 4804 &sb->sb.running_index[SM_RX_ID]; 4805 } 4806 4807 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 4808 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 4809 4810 cp->kcq2.io_addr = BAR_USTRORM_INTMEM + 4811 USTORM_FCOE_EQ_PROD_OFFSET(pfid); 4812 cp->kcq2.sw_prod_idx = 0; 4813 cp->kcq2.hw_prod_idx_ptr = 4814 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]; 4815 cp->kcq2.status_idx_ptr = 4816 &sb->sb.running_index[SM_RX_ID]; 4817 } 4818 } 4819 4820 static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 4821 { 4822 struct cnic_local *cp = dev->cnic_priv; 4823 struct cnic_eth_dev *ethdev = cp->ethdev; 4824 int func = CNIC_FUNC(cp), ret; 4825 u32 pfid; 4826 4827 cp->port_mode = CHIP_PORT_MODE_NONE; 4828 4829 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 4830 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); 4831 4832 if (!(val & 1)) 4833 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN); 4834 else 4835 val = (val >> 1) & 1; 4836 4837 if (val) { 4838 cp->port_mode = CHIP_4_PORT_MODE; 4839 cp->pfid = func >> 1; 4840 } else { 4841 cp->port_mode = CHIP_2_PORT_MODE; 4842 cp->pfid = func & 0x6; 4843 } 4844 } else { 4845 cp->pfid = func; 4846 } 4847 pfid = cp->pfid; 4848 4849 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 4850 cp->iscsi_start_cid, 0); 4851 4852 if (ret) 4853 return -ENOMEM; 4854 4855 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 4856 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, 4857 cp->fcoe_start_cid, 0); 4858 4859 if (ret) 4860 return -ENOMEM; 4861 } 4862 4863 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; 4864 4865 cnic_init_bnx2x_kcq(dev); 4866 4867 /* Only 1 EQ */ 4868 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 4869 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4870 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0); 4871 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4872 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0), 4873 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); 4874 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4875 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4, 4876 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); 4877 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4878 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0), 4879 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); 4880 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4881 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4, 4882 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); 4883 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4884 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1); 4885 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4886 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); 4887 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4888 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), 4889 HC_INDEX_ISCSI_EQ_CONS); 4890 4891 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4892 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), 4893 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); 4894 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4895 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, 4896 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); 4897 4898 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4899 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF); 4900 4901 cnic_setup_bnx2x_context(dev); 4902 4903 ret = cnic_init_bnx2x_irq(dev); 4904 if (ret) 4905 return ret; 4906 4907 return 0; 4908 } 4909 4910 static void cnic_init_rings(struct cnic_dev *dev) 4911 { 4912 struct cnic_local *cp = dev->cnic_priv; 4913 struct cnic_uio_dev *udev = cp->udev; 4914 4915 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 4916 return; 4917 4918 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4919 cnic_init_bnx2_tx_ring(dev); 4920 cnic_init_bnx2_rx_ring(dev); 4921 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4922 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4923 u32 cli = cp->ethdev->iscsi_l2_client_id; 4924 u32 cid = cp->ethdev->iscsi_l2_cid; 4925 u32 cl_qzone_id; 4926 struct client_init_ramrod_data *data; 4927 union l5cm_specific_data l5_data; 4928 struct ustorm_eth_rx_producers rx_prods = {0}; 4929 u32 off, i, *cid_ptr; 4930 4931 rx_prods.bd_prod = 0; 4932 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 4933 barrier(); 4934 4935 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4936 4937 off = BAR_USTRORM_INTMEM + 4938 (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? 4939 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : 4940 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); 4941 4942 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 4943 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 4944 4945 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4946 4947 data = udev->l2_buf; 4948 cid_ptr = udev->l2_buf + 12; 4949 4950 memset(data, 0, sizeof(*data)); 4951 4952 cnic_init_bnx2x_tx_ring(dev, data); 4953 cnic_init_bnx2x_rx_ring(dev, data); 4954 4955 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; 4956 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; 4957 4958 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4959 4960 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 4961 cid, ETH_CONNECTION_TYPE, &l5_data); 4962 4963 i = 0; 4964 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4965 ++i < 10) 4966 msleep(1); 4967 4968 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4969 netdev_err(dev->netdev, 4970 "iSCSI CLIENT_SETUP did not complete\n"); 4971 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 4972 cnic_ring_ctl(dev, cid, cli, 1); 4973 *cid_ptr = cid; 4974 } 4975 } 4976 4977 static void cnic_shutdown_rings(struct cnic_dev *dev) 4978 { 4979 struct cnic_local *cp = dev->cnic_priv; 4980 struct cnic_uio_dev *udev = cp->udev; 4981 void *rx_ring; 4982 4983 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 4984 return; 4985 4986 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4987 cnic_shutdown_bnx2_rx_ring(dev); 4988 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4989 u32 cli = cp->ethdev->iscsi_l2_client_id; 4990 u32 cid = cp->ethdev->iscsi_l2_cid; 4991 union l5cm_specific_data l5_data; 4992 int i; 4993 4994 cnic_ring_ctl(dev, cid, cli, 0); 4995 4996 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4997 4998 l5_data.phy_address.lo = cli; 4999 l5_data.phy_address.hi = 0; 5000 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 5001 cid, ETH_CONNECTION_TYPE, &l5_data); 5002 i = 0; 5003 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 5004 ++i < 10) 5005 msleep(1); 5006 5007 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 5008 netdev_err(dev->netdev, 5009 "iSCSI CLIENT_HALT did not complete\n"); 5010 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5011 5012 memset(&l5_data, 0, sizeof(l5_data)); 5013 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 5014 cid, NONE_CONNECTION_TYPE, &l5_data); 5015 msleep(10); 5016 } 5017 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5018 rx_ring = udev->l2_ring + BCM_PAGE_SIZE; 5019 memset(rx_ring, 0, BCM_PAGE_SIZE); 5020 } 5021 5022 static int cnic_register_netdev(struct cnic_dev *dev) 5023 { 5024 struct cnic_local *cp = dev->cnic_priv; 5025 struct cnic_eth_dev *ethdev = cp->ethdev; 5026 int err; 5027 5028 if (!ethdev) 5029 return -ENODEV; 5030 5031 if (ethdev->drv_state & CNIC_DRV_STATE_REGD) 5032 return 0; 5033 5034 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 5035 if (err) 5036 netdev_err(dev->netdev, "register_cnic failed\n"); 5037 5038 return err; 5039 } 5040 5041 static void cnic_unregister_netdev(struct cnic_dev *dev) 5042 { 5043 struct cnic_local *cp = dev->cnic_priv; 5044 struct cnic_eth_dev *ethdev = cp->ethdev; 5045 5046 if (!ethdev) 5047 return; 5048 5049 ethdev->drv_unregister_cnic(dev->netdev); 5050 } 5051 5052 static int cnic_start_hw(struct cnic_dev *dev) 5053 { 5054 struct cnic_local *cp = dev->cnic_priv; 5055 struct cnic_eth_dev *ethdev = cp->ethdev; 5056 int err; 5057 5058 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 5059 return -EALREADY; 5060 5061 dev->regview = ethdev->io_base; 5062 pci_dev_get(dev->pcidev); 5063 cp->func = PCI_FUNC(dev->pcidev->devfn); 5064 cp->status_blk.gen = ethdev->irq_arr[0].status_blk; 5065 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; 5066 5067 err = cp->alloc_resc(dev); 5068 if (err) { 5069 netdev_err(dev->netdev, "allocate resource failure\n"); 5070 goto err1; 5071 } 5072 5073 err = cp->start_hw(dev); 5074 if (err) 5075 goto err1; 5076 5077 err = cnic_cm_open(dev); 5078 if (err) 5079 goto err1; 5080 5081 set_bit(CNIC_F_CNIC_UP, &dev->flags); 5082 5083 cp->enable_int(dev); 5084 5085 return 0; 5086 5087 err1: 5088 cp->free_resc(dev); 5089 pci_dev_put(dev->pcidev); 5090 return err; 5091 } 5092 5093 static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 5094 { 5095 cnic_disable_bnx2_int_sync(dev); 5096 5097 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 5098 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 5099 5100 cnic_init_context(dev, KWQ_CID); 5101 cnic_init_context(dev, KCQ_CID); 5102 5103 cnic_setup_5709_context(dev, 0); 5104 cnic_free_irq(dev); 5105 5106 cnic_free_resc(dev); 5107 } 5108 5109 5110 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 5111 { 5112 struct cnic_local *cp = dev->cnic_priv; 5113 5114 cnic_free_irq(dev); 5115 *cp->kcq1.hw_prod_idx_ptr = 0; 5116 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5117 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); 5118 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 5119 cnic_free_resc(dev); 5120 } 5121 5122 static void cnic_stop_hw(struct cnic_dev *dev) 5123 { 5124 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 5125 struct cnic_local *cp = dev->cnic_priv; 5126 int i = 0; 5127 5128 /* Need to wait for the ring shutdown event to complete 5129 * before clearing the CNIC_UP flag. 5130 */ 5131 while (cp->udev->uio_dev != -1 && i < 15) { 5132 msleep(100); 5133 i++; 5134 } 5135 cnic_shutdown_rings(dev); 5136 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 5137 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); 5138 synchronize_rcu(); 5139 cnic_cm_shutdown(dev); 5140 cp->stop_hw(dev); 5141 pci_dev_put(dev->pcidev); 5142 } 5143 } 5144 5145 static void cnic_free_dev(struct cnic_dev *dev) 5146 { 5147 int i = 0; 5148 5149 while ((atomic_read(&dev->ref_count) != 0) && i < 10) { 5150 msleep(100); 5151 i++; 5152 } 5153 if (atomic_read(&dev->ref_count) != 0) 5154 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); 5155 5156 netdev_info(dev->netdev, "Removed CNIC device\n"); 5157 dev_put(dev->netdev); 5158 kfree(dev); 5159 } 5160 5161 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, 5162 struct pci_dev *pdev) 5163 { 5164 struct cnic_dev *cdev; 5165 struct cnic_local *cp; 5166 int alloc_size; 5167 5168 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); 5169 5170 cdev = kzalloc(alloc_size , GFP_KERNEL); 5171 if (cdev == NULL) { 5172 netdev_err(dev, "allocate dev struct failure\n"); 5173 return NULL; 5174 } 5175 5176 cdev->netdev = dev; 5177 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); 5178 cdev->register_device = cnic_register_device; 5179 cdev->unregister_device = cnic_unregister_device; 5180 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; 5181 5182 cp = cdev->cnic_priv; 5183 cp->dev = cdev; 5184 cp->l2_single_buf_size = 0x400; 5185 cp->l2_rx_ring_size = 3; 5186 5187 spin_lock_init(&cp->cnic_ulp_lock); 5188 5189 netdev_info(dev, "Added CNIC device\n"); 5190 5191 return cdev; 5192 } 5193 5194 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) 5195 { 5196 struct pci_dev *pdev; 5197 struct cnic_dev *cdev; 5198 struct cnic_local *cp; 5199 struct cnic_eth_dev *ethdev = NULL; 5200 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 5201 5202 probe = symbol_get(bnx2_cnic_probe); 5203 if (probe) { 5204 ethdev = (*probe)(dev); 5205 symbol_put(bnx2_cnic_probe); 5206 } 5207 if (!ethdev) 5208 return NULL; 5209 5210 pdev = ethdev->pdev; 5211 if (!pdev) 5212 return NULL; 5213 5214 dev_hold(dev); 5215 pci_dev_get(pdev); 5216 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 || 5217 pdev->device == PCI_DEVICE_ID_NX2_5709S) && 5218 (pdev->revision < 0x10)) { 5219 pci_dev_put(pdev); 5220 goto cnic_err; 5221 } 5222 pci_dev_put(pdev); 5223 5224 cdev = cnic_alloc_dev(dev, pdev); 5225 if (cdev == NULL) 5226 goto cnic_err; 5227 5228 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); 5229 cdev->submit_kwqes = cnic_submit_bnx2_kwqes; 5230 5231 cp = cdev->cnic_priv; 5232 cp->ethdev = ethdev; 5233 cdev->pcidev = pdev; 5234 cp->chip_id = ethdev->chip_id; 5235 5236 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5237 5238 cp->cnic_ops = &cnic_bnx2_ops; 5239 cp->start_hw = cnic_start_bnx2_hw; 5240 cp->stop_hw = cnic_stop_bnx2_hw; 5241 cp->setup_pgtbl = cnic_setup_page_tbl; 5242 cp->alloc_resc = cnic_alloc_bnx2_resc; 5243 cp->free_resc = cnic_free_resc; 5244 cp->start_cm = cnic_cm_init_bnx2_hw; 5245 cp->stop_cm = cnic_cm_stop_bnx2_hw; 5246 cp->enable_int = cnic_enable_bnx2_int; 5247 cp->disable_int_sync = cnic_disable_bnx2_int_sync; 5248 cp->close_conn = cnic_close_bnx2_conn; 5249 return cdev; 5250 5251 cnic_err: 5252 dev_put(dev); 5253 return NULL; 5254 } 5255 5256 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) 5257 { 5258 struct pci_dev *pdev; 5259 struct cnic_dev *cdev; 5260 struct cnic_local *cp; 5261 struct cnic_eth_dev *ethdev = NULL; 5262 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 5263 5264 probe = symbol_get(bnx2x_cnic_probe); 5265 if (probe) { 5266 ethdev = (*probe)(dev); 5267 symbol_put(bnx2x_cnic_probe); 5268 } 5269 if (!ethdev) 5270 return NULL; 5271 5272 pdev = ethdev->pdev; 5273 if (!pdev) 5274 return NULL; 5275 5276 dev_hold(dev); 5277 cdev = cnic_alloc_dev(dev, pdev); 5278 if (cdev == NULL) { 5279 dev_put(dev); 5280 return NULL; 5281 } 5282 5283 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); 5284 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; 5285 5286 cp = cdev->cnic_priv; 5287 cp->ethdev = ethdev; 5288 cdev->pcidev = pdev; 5289 cp->chip_id = ethdev->chip_id; 5290 5291 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5292 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5293 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && 5294 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) 5295 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5296 5297 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) 5298 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; 5299 5300 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); 5301 5302 cp->cnic_ops = &cnic_bnx2x_ops; 5303 cp->start_hw = cnic_start_bnx2x_hw; 5304 cp->stop_hw = cnic_stop_bnx2x_hw; 5305 cp->setup_pgtbl = cnic_setup_page_tbl_le; 5306 cp->alloc_resc = cnic_alloc_bnx2x_resc; 5307 cp->free_resc = cnic_free_resc; 5308 cp->start_cm = cnic_cm_init_bnx2x_hw; 5309 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 5310 cp->enable_int = cnic_enable_bnx2x_int; 5311 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 5312 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 5313 cp->ack_int = cnic_ack_bnx2x_e2_msix; 5314 else 5315 cp->ack_int = cnic_ack_bnx2x_msix; 5316 cp->close_conn = cnic_close_bnx2x_conn; 5317 return cdev; 5318 } 5319 5320 static struct cnic_dev *is_cnic_dev(struct net_device *dev) 5321 { 5322 struct ethtool_drvinfo drvinfo; 5323 struct cnic_dev *cdev = NULL; 5324 5325 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { 5326 memset(&drvinfo, 0, sizeof(drvinfo)); 5327 dev->ethtool_ops->get_drvinfo(dev, &drvinfo); 5328 5329 if (!strcmp(drvinfo.driver, "bnx2")) 5330 cdev = init_bnx2_cnic(dev); 5331 if (!strcmp(drvinfo.driver, "bnx2x")) 5332 cdev = init_bnx2x_cnic(dev); 5333 if (cdev) { 5334 write_lock(&cnic_dev_lock); 5335 list_add(&cdev->list, &cnic_dev_list); 5336 write_unlock(&cnic_dev_lock); 5337 } 5338 } 5339 return cdev; 5340 } 5341 5342 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, 5343 u16 vlan_id) 5344 { 5345 int if_type; 5346 5347 rcu_read_lock(); 5348 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5349 struct cnic_ulp_ops *ulp_ops; 5350 void *ctx; 5351 5352 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 5353 if (!ulp_ops || !ulp_ops->indicate_netevent) 5354 continue; 5355 5356 ctx = cp->ulp_handle[if_type]; 5357 5358 ulp_ops->indicate_netevent(ctx, event, vlan_id); 5359 } 5360 rcu_read_unlock(); 5361 } 5362 5363 /** 5364 * netdev event handler 5365 */ 5366 static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 5367 void *ptr) 5368 { 5369 struct net_device *netdev = ptr; 5370 struct cnic_dev *dev; 5371 int new_dev = 0; 5372 5373 dev = cnic_from_netdev(netdev); 5374 5375 if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { 5376 /* Check for the hot-plug device */ 5377 dev = is_cnic_dev(netdev); 5378 if (dev) { 5379 new_dev = 1; 5380 cnic_hold(dev); 5381 } 5382 } 5383 if (dev) { 5384 struct cnic_local *cp = dev->cnic_priv; 5385 5386 if (new_dev) 5387 cnic_ulp_init(dev); 5388 else if (event == NETDEV_UNREGISTER) 5389 cnic_ulp_exit(dev); 5390 5391 if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { 5392 if (cnic_register_netdev(dev) != 0) { 5393 cnic_put(dev); 5394 goto done; 5395 } 5396 if (!cnic_start_hw(dev)) 5397 cnic_ulp_start(dev); 5398 } 5399 5400 cnic_rcv_netevent(cp, event, 0); 5401 5402 if (event == NETDEV_GOING_DOWN) { 5403 cnic_ulp_stop(dev); 5404 cnic_stop_hw(dev); 5405 cnic_unregister_netdev(dev); 5406 } else if (event == NETDEV_UNREGISTER) { 5407 write_lock(&cnic_dev_lock); 5408 list_del_init(&dev->list); 5409 write_unlock(&cnic_dev_lock); 5410 5411 cnic_put(dev); 5412 cnic_free_dev(dev); 5413 goto done; 5414 } 5415 cnic_put(dev); 5416 } else { 5417 struct net_device *realdev; 5418 u16 vid; 5419 5420 vid = cnic_get_vlan(netdev, &realdev); 5421 if (realdev) { 5422 dev = cnic_from_netdev(realdev); 5423 if (dev) { 5424 vid |= VLAN_TAG_PRESENT; 5425 cnic_rcv_netevent(dev->cnic_priv, event, vid); 5426 cnic_put(dev); 5427 } 5428 } 5429 } 5430 done: 5431 return NOTIFY_DONE; 5432 } 5433 5434 static struct notifier_block cnic_netdev_notifier = { 5435 .notifier_call = cnic_netdev_event 5436 }; 5437 5438 static void cnic_release(void) 5439 { 5440 struct cnic_dev *dev; 5441 struct cnic_uio_dev *udev; 5442 5443 while (!list_empty(&cnic_dev_list)) { 5444 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); 5445 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 5446 cnic_ulp_stop(dev); 5447 cnic_stop_hw(dev); 5448 } 5449 5450 cnic_ulp_exit(dev); 5451 cnic_unregister_netdev(dev); 5452 list_del_init(&dev->list); 5453 cnic_free_dev(dev); 5454 } 5455 while (!list_empty(&cnic_udev_list)) { 5456 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, 5457 list); 5458 cnic_free_uio(udev); 5459 } 5460 } 5461 5462 static int __init cnic_init(void) 5463 { 5464 int rc = 0; 5465 5466 pr_info("%s", version); 5467 5468 rc = register_netdevice_notifier(&cnic_netdev_notifier); 5469 if (rc) { 5470 cnic_release(); 5471 return rc; 5472 } 5473 5474 cnic_wq = create_singlethread_workqueue("cnic_wq"); 5475 if (!cnic_wq) { 5476 cnic_release(); 5477 unregister_netdevice_notifier(&cnic_netdev_notifier); 5478 return -ENOMEM; 5479 } 5480 5481 return 0; 5482 } 5483 5484 static void __exit cnic_exit(void) 5485 { 5486 unregister_netdevice_notifier(&cnic_netdev_notifier); 5487 cnic_release(); 5488 destroy_workqueue(cnic_wq); 5489 } 5490 5491 module_init(cnic_init); 5492 module_exit(cnic_exit); 5493