1 /* 2 * Copyright (c) 2011-2012 Qlogic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: qla_hw.c 30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31 * Content: Contains Hardware dependant functions 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "qla_os.h" 38 #include "qla_reg.h" 39 #include "qla_hw.h" 40 #include "qla_def.h" 41 #include "qla_inline.h" 42 #include "qla_ver.h" 43 #include "qla_glbl.h" 44 #include "qla_dbg.h" 45 46 static uint32_t sysctl_num_rds_rings = 2; 47 static uint32_t sysctl_num_sds_rings = 4; 48 49 /* 50 * Static Functions 51 */ 52 53 static void qla_init_cntxt_regions(qla_host_t *ha); 54 static int qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp); 55 static int qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size); 56 static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, 57 uint16_t cntxt_id, uint32_t add_multi); 58 static void qla_del_rcv_cntxt(qla_host_t *ha); 59 static int qla_init_rcv_cntxt(qla_host_t *ha); 60 static void qla_del_xmt_cntxt(qla_host_t *ha); 61 static int qla_init_xmt_cntxt(qla_host_t *ha); 62 static int qla_get_max_rds(qla_host_t *ha); 63 static int qla_get_max_sds(qla_host_t *ha); 64 static int qla_get_max_rules(qla_host_t *ha); 65 static int qla_get_max_rcv_cntxts(qla_host_t *ha); 66 static int qla_get_max_tx_cntxts(qla_host_t *ha); 67 static int qla_get_max_mtu(qla_host_t *ha); 68 static int qla_get_max_lro(qla_host_t *ha); 69 static int qla_get_flow_control(qla_host_t *ha); 70 static void qla_hw_tx_done_locked(qla_host_t *ha); 71 72 int 73 qla_get_msix_count(qla_host_t *ha) 74 { 75 return (sysctl_num_sds_rings); 76 } 77 78 /* 79 * Name: qla_hw_add_sysctls 80 * Function: Add P3Plus specific sysctls 81 */ 82 void 83 qla_hw_add_sysctls(qla_host_t *ha) 84 { 85 device_t dev; 86 87 dev = ha->pci_dev; 88 89 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 90 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 91 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &sysctl_num_rds_rings, 92 sysctl_num_rds_rings, "Number of Rcv Descriptor Rings"); 93 94 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 95 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 96 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &sysctl_num_sds_rings, 97 sysctl_num_sds_rings, "Number of Status Descriptor Rings"); 98 } 99 100 /* 101 * Name: qla_free_dma 102 * Function: Frees the DMA'able memory allocated in qla_alloc_dma() 103 */ 104 void 105 qla_free_dma(qla_host_t *ha) 106 { 107 uint32_t i; 108 109 if (ha->hw.dma_buf.flags.context) { 110 qla_free_dmabuf(ha, &ha->hw.dma_buf.context); 111 ha->hw.dma_buf.flags.context = 0; 112 } 113 114 if (ha->hw.dma_buf.flags.sds_ring) { 115 for (i = 0; i < ha->hw.num_sds_rings; i++) 116 qla_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); 117 ha->hw.dma_buf.flags.sds_ring = 0; 118 } 119 120 if (ha->hw.dma_buf.flags.rds_ring) { 121 for (i = 0; i < ha->hw.num_rds_rings; i++) 122 qla_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); 123 ha->hw.dma_buf.flags.rds_ring = 0; 124 } 125 126 if (ha->hw.dma_buf.flags.tx_ring) { 127 qla_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); 128 ha->hw.dma_buf.flags.tx_ring = 0; 129 } 130 } 131 132 /* 133 * Name: qla_alloc_dma 134 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. 135 */ 136 int 137 qla_alloc_dma(qla_host_t *ha) 138 { 139 device_t dev; 140 uint32_t i, j, size; 141 142 dev = ha->pci_dev; 143 144 QL_DPRINT2((dev, "%s: enter\n", __func__)); 145 146 ha->hw.num_rds_rings = (uint16_t)sysctl_num_rds_rings; 147 ha->hw.num_sds_rings = (uint16_t)sysctl_num_sds_rings; 148 149 /* 150 * Allocate Transmit Ring 151 */ 152 153 ha->hw.dma_buf.tx_ring.alignment = 8; 154 ha->hw.dma_buf.tx_ring.size = 155 (sizeof(q80_tx_cmd_t)) * NUM_TX_DESCRIPTORS; 156 157 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) { 158 device_printf(dev, "%s: tx ring alloc failed\n", __func__); 159 goto qla_alloc_dma_exit; 160 } 161 ha->hw.dma_buf.flags.tx_ring = 1; 162 163 QL_DPRINT2((dev, "%s: tx_ring phys %p virt %p\n", 164 __func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr), 165 ha->hw.dma_buf.tx_ring.dma_b)); 166 /* 167 * Allocate Receive Descriptor Rings 168 */ 169 170 for (i = 0; i < ha->hw.num_rds_rings; i++) { 171 ha->hw.dma_buf.rds_ring[i].alignment = 8; 172 173 if (i == RDS_RING_INDEX_NORMAL) { 174 ha->hw.dma_buf.rds_ring[i].size = 175 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; 176 } else if (i == RDS_RING_INDEX_JUMBO) { 177 ha->hw.dma_buf.rds_ring[i].size = 178 (sizeof(q80_recv_desc_t)) * 179 NUM_RX_JUMBO_DESCRIPTORS; 180 } else 181 break; 182 183 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i])) { 184 QL_DPRINT4((dev, "%s: rds ring alloc failed\n", 185 __func__)); 186 187 for (j = 0; j < i; j++) 188 qla_free_dmabuf(ha, 189 &ha->hw.dma_buf.rds_ring[j]); 190 191 goto qla_alloc_dma_exit; 192 } 193 QL_DPRINT4((dev, "%s: rx_ring[%d] phys %p virt %p\n", 194 __func__, i, 195 (void *)(ha->hw.dma_buf.rds_ring[i].dma_addr), 196 ha->hw.dma_buf.rds_ring[i].dma_b)); 197 } 198 ha->hw.dma_buf.flags.rds_ring = 1; 199 200 /* 201 * Allocate Status Descriptor Rings 202 */ 203 204 for (i = 0; i < ha->hw.num_sds_rings; i++) { 205 ha->hw.dma_buf.sds_ring[i].alignment = 8; 206 ha->hw.dma_buf.sds_ring[i].size = 207 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; 208 209 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i])) { 210 device_printf(dev, "%s: sds ring alloc failed\n", 211 __func__); 212 213 for (j = 0; j < i; j++) 214 qla_free_dmabuf(ha, 215 &ha->hw.dma_buf.sds_ring[j]); 216 217 goto qla_alloc_dma_exit; 218 } 219 QL_DPRINT4((dev, "%s: sds_ring[%d] phys %p virt %p\n", 220 __func__, i, 221 (void *)(ha->hw.dma_buf.sds_ring[i].dma_addr), 222 ha->hw.dma_buf.sds_ring[i].dma_b)); 223 } 224 ha->hw.dma_buf.flags.sds_ring = 1; 225 226 /* 227 * Allocate Context Area 228 */ 229 size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN); 230 231 size += QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN); 232 233 size += QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN); 234 235 size += QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN); 236 237 size += sizeof (uint32_t); /* for tx consumer index */ 238 239 size = QL_ALIGN(size, PAGE_SIZE); 240 241 ha->hw.dma_buf.context.alignment = 8; 242 ha->hw.dma_buf.context.size = size; 243 244 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.context)) { 245 device_printf(dev, "%s: context alloc failed\n", __func__); 246 goto qla_alloc_dma_exit; 247 } 248 ha->hw.dma_buf.flags.context = 1; 249 QL_DPRINT2((dev, "%s: context phys %p virt %p\n", 250 __func__, (void *)(ha->hw.dma_buf.context.dma_addr), 251 ha->hw.dma_buf.context.dma_b)); 252 253 qla_init_cntxt_regions(ha); 254 255 return 0; 256 257 qla_alloc_dma_exit: 258 qla_free_dma(ha); 259 return -1; 260 } 261 262 /* 263 * Name: qla_init_cntxt_regions 264 * Function: Initializes Tx/Rx Contexts. 265 */ 266 static void 267 qla_init_cntxt_regions(qla_host_t *ha) 268 { 269 qla_hw_t *hw; 270 q80_tx_cntxt_req_t *tx_cntxt_req; 271 q80_rcv_cntxt_req_t *rx_cntxt_req; 272 bus_addr_t phys_addr; 273 uint32_t i; 274 device_t dev; 275 uint32_t size; 276 277 dev = ha->pci_dev; 278 279 hw = &ha->hw; 280 281 hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b; 282 283 for (i = 0; i < ha->hw.num_sds_rings; i++) 284 hw->sds[i].sds_ring_base = 285 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; 286 287 288 phys_addr = hw->dma_buf.context.dma_addr; 289 290 memset((void *)hw->dma_buf.context.dma_b, 0, 291 ha->hw.dma_buf.context.size); 292 293 hw->tx_cntxt_req = 294 (q80_tx_cntxt_req_t *)hw->dma_buf.context.dma_b; 295 hw->tx_cntxt_req_paddr = phys_addr; 296 297 size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN); 298 299 hw->tx_cntxt_rsp = 300 (q80_tx_cntxt_rsp_t *)((uint8_t *)hw->tx_cntxt_req + size); 301 hw->tx_cntxt_rsp_paddr = hw->tx_cntxt_req_paddr + size; 302 303 size = QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN); 304 305 hw->rx_cntxt_req = 306 (q80_rcv_cntxt_req_t *)((uint8_t *)hw->tx_cntxt_rsp + size); 307 hw->rx_cntxt_req_paddr = hw->tx_cntxt_rsp_paddr + size; 308 309 size = QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN); 310 311 hw->rx_cntxt_rsp = 312 (q80_rcv_cntxt_rsp_t *)((uint8_t *)hw->rx_cntxt_req + size); 313 hw->rx_cntxt_rsp_paddr = hw->rx_cntxt_req_paddr + size; 314 315 size = QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN); 316 317 hw->tx_cons = (uint32_t *)((uint8_t *)hw->rx_cntxt_rsp + size); 318 hw->tx_cons_paddr = hw->rx_cntxt_rsp_paddr + size; 319 320 /* 321 * Initialize the Transmit Context Request so that we don't need to 322 * do it everytime we need to create a context 323 */ 324 tx_cntxt_req = hw->tx_cntxt_req; 325 326 tx_cntxt_req->rsp_dma_addr = qla_host_to_le64(hw->tx_cntxt_rsp_paddr); 327 328 tx_cntxt_req->cmd_cons_dma_addr = qla_host_to_le64(hw->tx_cons_paddr); 329 330 tx_cntxt_req->caps[0] = qla_host_to_le32((CNTXT_CAP0_BASEFW | 331 CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_LSO)); 332 333 tx_cntxt_req->intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED); 334 335 tx_cntxt_req->phys_addr = 336 qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr); 337 338 tx_cntxt_req->num_entries = qla_host_to_le32(NUM_TX_DESCRIPTORS); 339 340 /* 341 * Initialize the Receive Context Request 342 */ 343 344 rx_cntxt_req = hw->rx_cntxt_req; 345 346 rx_cntxt_req->rx_req.rsp_dma_addr = 347 qla_host_to_le64(hw->rx_cntxt_rsp_paddr); 348 349 rx_cntxt_req->rx_req.caps[0] = qla_host_to_le32(CNTXT_CAP0_BASEFW | 350 CNTXT_CAP0_LEGACY_MN | 351 CNTXT_CAP0_JUMBO | 352 CNTXT_CAP0_LRO| 353 CNTXT_CAP0_HW_LRO); 354 355 rx_cntxt_req->rx_req.intr_mode = 356 qla_host_to_le32(CNTXT_INTR_MODE_SHARED); 357 358 rx_cntxt_req->rx_req.rds_intr_mode = 359 qla_host_to_le32(CNTXT_INTR_MODE_UNIQUE); 360 361 rx_cntxt_req->rx_req.rds_ring_offset = 0; 362 rx_cntxt_req->rx_req.sds_ring_offset = qla_host_to_le32( 363 (hw->num_rds_rings * sizeof(q80_rq_rds_ring_t))); 364 rx_cntxt_req->rx_req.num_rds_rings = 365 qla_host_to_le16(hw->num_rds_rings); 366 rx_cntxt_req->rx_req.num_sds_rings = 367 qla_host_to_le16(hw->num_sds_rings); 368 369 for (i = 0; i < hw->num_rds_rings; i++) { 370 rx_cntxt_req->rds_req[i].phys_addr = 371 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); 372 373 if (i == RDS_RING_INDEX_NORMAL) { 374 rx_cntxt_req->rds_req[i].buf_size = 375 qla_host_to_le64(MCLBYTES); 376 rx_cntxt_req->rds_req[i].size = 377 qla_host_to_le32(NUM_RX_DESCRIPTORS); 378 } else { 379 rx_cntxt_req->rds_req[i].buf_size = 380 qla_host_to_le64(MJUM9BYTES); 381 rx_cntxt_req->rds_req[i].size = 382 qla_host_to_le32(NUM_RX_JUMBO_DESCRIPTORS); 383 } 384 } 385 386 for (i = 0; i < hw->num_sds_rings; i++) { 387 rx_cntxt_req->sds_req[i].phys_addr = 388 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); 389 rx_cntxt_req->sds_req[i].size = 390 qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 391 rx_cntxt_req->sds_req[i].msi_index = qla_host_to_le16(i); 392 } 393 394 QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_req = %p paddr %p\n", 395 __func__, hw->tx_cntxt_req, (void *)hw->tx_cntxt_req_paddr)); 396 QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_rsp = %p paddr %p\n", 397 __func__, hw->tx_cntxt_rsp, (void *)hw->tx_cntxt_rsp_paddr)); 398 QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_req = %p paddr %p\n", 399 __func__, hw->rx_cntxt_req, (void *)hw->rx_cntxt_req_paddr)); 400 QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_rsp = %p paddr %p\n", 401 __func__, hw->rx_cntxt_rsp, (void *)hw->rx_cntxt_rsp_paddr)); 402 QL_DPRINT2((ha->pci_dev, "%s: tx_cons = %p paddr %p\n", 403 __func__, hw->tx_cons, (void *)hw->tx_cons_paddr)); 404 } 405 406 /* 407 * Name: qla_issue_cmd 408 * Function: Issues commands on the CDRP interface and returns responses. 409 */ 410 static int 411 qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp) 412 { 413 int ret = 0; 414 uint32_t signature; 415 uint32_t count = 400; /* 4 seconds or 400 10ms intervals */ 416 uint32_t data; 417 device_t dev; 418 419 dev = ha->pci_dev; 420 421 signature = 0xcafe0000 | 0x0100 | ha->pci_func; 422 423 ret = qla_sem_lock(ha, Q8_SEM5_LOCK, 0, (uint32_t)ha->pci_func); 424 425 if (ret) { 426 device_printf(dev, "%s: SEM5_LOCK lock failed\n", __func__); 427 return (ret); 428 } 429 430 WRITE_OFFSET32(ha, Q8_NX_CDRP_SIGNATURE, signature); 431 432 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG1, (cdrp->cmd_arg1)); 433 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG2, (cdrp->cmd_arg2)); 434 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG3, (cdrp->cmd_arg3)); 435 436 WRITE_OFFSET32(ha, Q8_NX_CDRP_CMD_RSP, cdrp->cmd); 437 438 while (count) { 439 qla_mdelay(__func__, 10); 440 441 data = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP); 442 443 if ((!(data & 0x80000000))) 444 break; 445 count--; 446 } 447 if ((!count) || (data != 1)) 448 ret = -1; 449 450 cdrp->rsp = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP); 451 cdrp->rsp_arg1 = READ_REG32(ha, Q8_NX_CDRP_ARG1); 452 cdrp->rsp_arg2 = READ_REG32(ha, Q8_NX_CDRP_ARG2); 453 cdrp->rsp_arg3 = READ_REG32(ha, Q8_NX_CDRP_ARG3); 454 455 qla_sem_unlock(ha, Q8_SEM5_UNLOCK); 456 457 if (ret) { 458 device_printf(dev, "%s: " 459 "cmd[0x%08x] = 0x%08x\n" 460 "\tsig[0x%08x] = 0x%08x\n" 461 "\targ1[0x%08x] = 0x%08x\n" 462 "\targ2[0x%08x] = 0x%08x\n" 463 "\targ3[0x%08x] = 0x%08x\n", 464 __func__, Q8_NX_CDRP_CMD_RSP, cdrp->cmd, 465 Q8_NX_CDRP_SIGNATURE, signature, 466 Q8_NX_CDRP_ARG1, cdrp->cmd_arg1, 467 Q8_NX_CDRP_ARG2, cdrp->cmd_arg2, 468 Q8_NX_CDRP_ARG3, cdrp->cmd_arg3); 469 470 device_printf(dev, "%s: exit (ret = 0x%x)\n" 471 "\t\t rsp = 0x%08x\n" 472 "\t\t arg1 = 0x%08x\n" 473 "\t\t arg2 = 0x%08x\n" 474 "\t\t arg3 = 0x%08x\n", 475 __func__, ret, cdrp->rsp, 476 cdrp->rsp_arg1, cdrp->rsp_arg2, cdrp->rsp_arg3); 477 } 478 479 return (ret); 480 } 481 482 #define QLA_TX_MIN_FREE 2 483 484 /* 485 * Name: qla_fw_cmd 486 * Function: Issues firmware control commands on the Tx Ring. 487 */ 488 static int 489 qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size) 490 { 491 device_t dev; 492 q80_tx_cmd_t *tx_cmd; 493 qla_hw_t *hw = &ha->hw; 494 int count = 100; 495 496 dev = ha->pci_dev; 497 498 QLA_TX_LOCK(ha); 499 500 if (hw->txr_free <= QLA_TX_MIN_FREE) { 501 while (count--) { 502 qla_hw_tx_done_locked(ha); 503 if (hw->txr_free > QLA_TX_MIN_FREE) 504 break; 505 506 QLA_TX_UNLOCK(ha); 507 qla_mdelay(__func__, 10); 508 QLA_TX_LOCK(ha); 509 } 510 if (hw->txr_free <= QLA_TX_MIN_FREE) { 511 QLA_TX_UNLOCK(ha); 512 device_printf(dev, "%s: xmit queue full\n", __func__); 513 return (-1); 514 } 515 } 516 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 517 518 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 519 520 bcopy(fw_cmd, tx_cmd, size); 521 522 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 523 hw->txr_free--; 524 525 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next); 526 527 QLA_TX_UNLOCK(ha); 528 529 return (0); 530 } 531 532 /* 533 * Name: qla_config_rss 534 * Function: Configure RSS for the context/interface. 535 */ 536 const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 537 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 538 0x255b0ec26d5a56daULL }; 539 540 static int 541 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id) 542 { 543 qla_fw_cds_config_rss_t rss_config; 544 int ret, i; 545 546 bzero(&rss_config, sizeof(qla_fw_cds_config_rss_t)); 547 548 rss_config.hdr.cmd = Q8_FWCD_CNTRL_REQ; 549 rss_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_RSS; 550 rss_config.hdr.cntxt_id = cntxt_id; 551 552 rss_config.hash_type = (Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP_IP | 553 Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP_IP); 554 rss_config.flags = Q8_FWCD_RSS_FLAGS_ENABLE_RSS; 555 556 rss_config.ind_tbl_mask = 0x7; 557 558 for (i = 0; i < 5; i++) 559 rss_config.rss_key[i] = rss_key[i]; 560 561 ret = qla_fw_cmd(ha, &rss_config, sizeof(qla_fw_cds_config_rss_t)); 562 563 return ret; 564 } 565 566 /* 567 * Name: qla_config_intr_coalesce 568 * Function: Configure Interrupt Coalescing. 569 */ 570 static int 571 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable) 572 { 573 qla_fw_cds_config_intr_coalesc_t intr_coalesce; 574 int ret; 575 576 bzero(&intr_coalesce, sizeof(qla_fw_cds_config_intr_coalesc_t)); 577 578 intr_coalesce.hdr.cmd = Q8_FWCD_CNTRL_REQ; 579 intr_coalesce.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_INTR_COALESCING; 580 intr_coalesce.hdr.cntxt_id = cntxt_id; 581 582 intr_coalesce.flags = 0x04; 583 intr_coalesce.max_rcv_pkts = 256; 584 intr_coalesce.max_rcv_usecs = 3; 585 intr_coalesce.max_snd_pkts = 64; 586 intr_coalesce.max_snd_usecs = 4; 587 588 if (tenable) { 589 intr_coalesce.usecs_to = 1000; /* 1 millisecond */ 590 intr_coalesce.timer_type = Q8_FWCMD_INTR_COALESC_TIMER_PERIODIC; 591 intr_coalesce.sds_ring_bitmask = 592 Q8_FWCMD_INTR_COALESC_SDS_RING_0; 593 } 594 595 ret = qla_fw_cmd(ha, &intr_coalesce, 596 sizeof(qla_fw_cds_config_intr_coalesc_t)); 597 598 return ret; 599 } 600 601 602 /* 603 * Name: qla_config_mac_addr 604 * Function: binds a MAC address to the context/interface. 605 * Can be unicast, multicast or broadcast. 606 */ 607 static int 608 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint16_t cntxt_id, 609 uint32_t add_multi) 610 { 611 qla_fw_cds_config_mac_addr_t mac_config; 612 int ret; 613 614 // device_printf(ha->pci_dev, 615 // "%s: mac_addr %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 616 // mac_addr[0], mac_addr[1], mac_addr[2], 617 // mac_addr[3], mac_addr[4], mac_addr[5]); 618 619 bzero(&mac_config, sizeof(qla_fw_cds_config_mac_addr_t)); 620 621 mac_config.hdr.cmd = Q8_FWCD_CNTRL_REQ; 622 mac_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_ADDR; 623 mac_config.hdr.cntxt_id = cntxt_id; 624 625 if (add_multi) 626 mac_config.cmd = Q8_FWCD_ADD_MAC_ADDR; 627 else 628 mac_config.cmd = Q8_FWCD_DEL_MAC_ADDR; 629 bcopy(mac_addr, mac_config.mac_addr,6); 630 631 ret = qla_fw_cmd(ha, &mac_config, sizeof(qla_fw_cds_config_mac_addr_t)); 632 633 return ret; 634 } 635 636 637 /* 638 * Name: qla_set_mac_rcv_mode 639 * Function: Enable/Disable AllMulticast and Promiscous Modes. 640 */ 641 static int 642 qla_set_mac_rcv_mode(qla_host_t *ha, uint16_t cntxt_id, uint32_t mode) 643 { 644 qla_set_mac_rcv_mode_t rcv_mode; 645 int ret; 646 647 bzero(&rcv_mode, sizeof(qla_set_mac_rcv_mode_t)); 648 649 rcv_mode.hdr.cmd = Q8_FWCD_CNTRL_REQ; 650 rcv_mode.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_RCV_MODE; 651 rcv_mode.hdr.cntxt_id = cntxt_id; 652 653 rcv_mode.mode = mode; 654 655 ret = qla_fw_cmd(ha, &rcv_mode, sizeof(qla_set_mac_rcv_mode_t)); 656 657 return ret; 658 } 659 660 void 661 qla_set_promisc(qla_host_t *ha) 662 { 663 (void)qla_set_mac_rcv_mode(ha, 664 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 665 Q8_MAC_RCV_ENABLE_PROMISCUOUS); 666 } 667 668 void 669 qla_set_allmulti(qla_host_t *ha) 670 { 671 (void)qla_set_mac_rcv_mode(ha, 672 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 673 Q8_MAC_RCV_ENABLE_ALLMULTI); 674 } 675 676 void 677 qla_reset_promisc_allmulti(qla_host_t *ha) 678 { 679 (void)qla_set_mac_rcv_mode(ha, 680 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 681 Q8_MAC_RCV_RESET_PROMISC_ALLMULTI); 682 } 683 684 /* 685 * Name: qla_config_ipv4_addr 686 * Function: Configures the Destination IP Addr for LRO. 687 */ 688 void 689 qla_config_ipv4_addr(qla_host_t *ha, uint32_t ipv4_addr) 690 { 691 qla_config_ipv4_t ip_conf; 692 693 bzero(&ip_conf, sizeof(qla_config_ipv4_t)); 694 695 ip_conf.hdr.cmd = Q8_FWCD_CNTRL_REQ; 696 ip_conf.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_IPADDR; 697 ip_conf.hdr.cntxt_id = (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id; 698 699 ip_conf.cmd = (uint64_t)Q8_CONFIG_CMD_IP_ENABLE; 700 ip_conf.ipv4_addr = (uint64_t)ipv4_addr; 701 702 (void)qla_fw_cmd(ha, &ip_conf, sizeof(qla_config_ipv4_t)); 703 704 return; 705 } 706 707 /* 708 * Name: qla_tx_tso 709 * Function: Checks if the packet to be transmitted is a candidate for 710 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx 711 * Ring Structure are plugged in. 712 */ 713 static int 714 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) 715 { 716 struct ether_vlan_header *eh; 717 struct ip *ip = NULL; 718 struct tcphdr *th = NULL; 719 uint32_t ehdrlen, hdrlen = 0, ip_hlen, tcp_hlen, tcp_opt_off; 720 uint16_t etype, opcode, offload = 1; 721 uint8_t *tcp_opt; 722 device_t dev; 723 724 dev = ha->pci_dev; 725 726 eh = mtod(mp, struct ether_vlan_header *); 727 728 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 729 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 730 etype = ntohs(eh->evl_proto); 731 } else { 732 ehdrlen = ETHER_HDR_LEN; 733 etype = ntohs(eh->evl_encap_proto); 734 } 735 736 switch (etype) { 737 case ETHERTYPE_IP: 738 739 tcp_opt_off = ehdrlen + sizeof(struct ip) + 740 sizeof(struct tcphdr); 741 742 if (mp->m_len < tcp_opt_off) { 743 m_copydata(mp, 0, tcp_opt_off, hdr); 744 ip = (struct ip *)hdr; 745 } else { 746 ip = (struct ip *)(mp->m_data + ehdrlen); 747 } 748 749 ip_hlen = ip->ip_hl << 2; 750 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; 751 752 if ((ip->ip_p != IPPROTO_TCP) || 753 (ip_hlen != sizeof (struct ip))) { 754 offload = 0; 755 } else { 756 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 757 } 758 break; 759 760 default: 761 QL_DPRINT8((dev, "%s: type!=ip\n", __func__)); 762 offload = 0; 763 break; 764 } 765 766 if (!offload) 767 return (-1); 768 769 tcp_hlen = th->th_off << 2; 770 771 772 hdrlen = ehdrlen + ip_hlen + tcp_hlen; 773 774 if (mp->m_len < hdrlen) { 775 if (mp->m_len < tcp_opt_off) { 776 if (tcp_hlen > sizeof(struct tcphdr)) { 777 m_copydata(mp, tcp_opt_off, 778 (tcp_hlen - sizeof(struct tcphdr)), 779 &hdr[tcp_opt_off]); 780 } 781 } else { 782 m_copydata(mp, 0, hdrlen, hdr); 783 } 784 } 785 786 if ((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) { 787 788 /* If TCP options are preset only time stamp option is supported */ 789 if ((tcp_hlen - sizeof(struct tcphdr)) != 10) 790 return -1; 791 else { 792 793 if (mp->m_len < hdrlen) { 794 tcp_opt = &hdr[tcp_opt_off]; 795 } else { 796 tcp_opt = (uint8_t *)(mp->m_data + tcp_opt_off); 797 } 798 799 if ((*tcp_opt != 0x01) || (*(tcp_opt + 1) != 0x01) || 800 (*(tcp_opt + 2) != 0x08) || (*(tcp_opt + 2) != 10)) { 801 return -1; 802 } 803 } 804 805 tx_cmd->mss = ha->max_frame_size - ETHER_CRC_LEN - hdrlen; 806 } else { 807 tx_cmd->mss = mp->m_pkthdr.tso_segsz; 808 } 809 810 tx_cmd->flags_opcode = opcode ; 811 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 812 tx_cmd->ip_hdr_off = ehdrlen; 813 tx_cmd->mss = mp->m_pkthdr.tso_segsz; 814 tx_cmd->total_hdr_len = hdrlen; 815 816 /* Check for Multicast least significant bit of MSB == 1 */ 817 if (eh->evl_dhost[0] & 0x01) { 818 tx_cmd->flags_opcode = Q8_TX_CMD_FLAGS_MULTICAST; 819 } 820 821 if (mp->m_len < hdrlen) { 822 return (1); 823 } 824 825 return (0); 826 } 827 828 /* 829 * Name: qla_tx_chksum 830 * Function: Checks if the packet to be transmitted is a candidate for 831 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx 832 * Ring Structure are plugged in. 833 */ 834 static int 835 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd) 836 { 837 struct ether_vlan_header *eh; 838 struct ip *ip; 839 struct ip6_hdr *ip6; 840 uint32_t ehdrlen, ip_hlen; 841 uint16_t etype, opcode, offload = 1; 842 device_t dev; 843 844 dev = ha->pci_dev; 845 846 if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0) 847 return (-1); 848 849 eh = mtod(mp, struct ether_vlan_header *); 850 851 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 852 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 853 etype = ntohs(eh->evl_proto); 854 } else { 855 ehdrlen = ETHER_HDR_LEN; 856 etype = ntohs(eh->evl_encap_proto); 857 } 858 859 860 switch (etype) { 861 case ETHERTYPE_IP: 862 ip = (struct ip *)(mp->m_data + ehdrlen); 863 864 ip_hlen = sizeof (struct ip); 865 866 if (mp->m_len < (ehdrlen + ip_hlen)) { 867 device_printf(dev, "%s: ipv4 mlen\n", __func__); 868 offload = 0; 869 break; 870 } 871 872 if (ip->ip_p == IPPROTO_TCP) 873 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; 874 else if (ip->ip_p == IPPROTO_UDP) 875 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; 876 else { 877 device_printf(dev, "%s: ipv4\n", __func__); 878 offload = 0; 879 } 880 break; 881 882 case ETHERTYPE_IPV6: 883 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 884 885 ip_hlen = sizeof(struct ip6_hdr); 886 887 if (mp->m_len < (ehdrlen + ip_hlen)) { 888 device_printf(dev, "%s: ipv6 mlen\n", __func__); 889 offload = 0; 890 break; 891 } 892 893 if (ip6->ip6_nxt == IPPROTO_TCP) 894 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; 895 else if (ip6->ip6_nxt == IPPROTO_UDP) 896 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; 897 else { 898 device_printf(dev, "%s: ipv6\n", __func__); 899 offload = 0; 900 } 901 break; 902 903 default: 904 offload = 0; 905 break; 906 } 907 if (!offload) 908 return (-1); 909 910 tx_cmd->flags_opcode = opcode; 911 912 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 913 914 return (0); 915 } 916 917 /* 918 * Name: qla_hw_send 919 * Function: Transmits a packet. It first checks if the packet is a 920 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum 921 * offload. If either of these creteria are not met, it is transmitted 922 * as a regular ethernet frame. 923 */ 924 int 925 qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, 926 uint32_t *tx_idx, struct mbuf *mp) 927 { 928 struct ether_vlan_header *eh; 929 qla_hw_t *hw = &ha->hw; 930 q80_tx_cmd_t *tx_cmd, tso_cmd; 931 bus_dma_segment_t *c_seg; 932 uint32_t num_tx_cmds, hdr_len = 0; 933 uint32_t total_length = 0, bytes, tx_cmd_count = 0; 934 device_t dev; 935 int i, ret; 936 uint8_t *src = NULL, *dst = NULL; 937 938 dev = ha->pci_dev; 939 940 /* 941 * Always make sure there is atleast one empty slot in the tx_ring 942 * tx_ring is considered full when there only one entry available 943 */ 944 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; 945 946 total_length = mp->m_pkthdr.len; 947 if (total_length > QLA_MAX_TSO_FRAME_SIZE) { 948 device_printf(dev, "%s: total length exceeds maxlen(%d)\n", 949 __func__, total_length); 950 return (-1); 951 } 952 eh = mtod(mp, struct ether_vlan_header *); 953 954 if ((mp->m_pkthdr.len > ha->max_frame_size)||(nsegs > Q8_TX_MAX_SEGMENTS)) { 955 956 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); 957 958 src = ha->hw.frame_hdr; 959 ret = qla_tx_tso(ha, mp, &tso_cmd, src); 960 961 if (!(ret & ~1)) { 962 /* find the additional tx_cmd descriptors required */ 963 964 hdr_len = tso_cmd.total_hdr_len; 965 966 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 967 bytes = QL_MIN(bytes, hdr_len); 968 969 num_tx_cmds++; 970 hdr_len -= bytes; 971 972 while (hdr_len) { 973 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 974 hdr_len -= bytes; 975 num_tx_cmds++; 976 } 977 hdr_len = tso_cmd.total_hdr_len; 978 979 if (ret == 0) 980 src = (uint8_t *)eh; 981 } 982 } 983 984 if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 985 qla_hw_tx_done_locked(ha); 986 if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 987 QL_DPRINT8((dev, "%s: (hw->txr_free <= " 988 "(num_tx_cmds + QLA_TX_MIN_FREE))\n", 989 __func__)); 990 return (-1); 991 } 992 } 993 994 *tx_idx = hw->txr_next; 995 996 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 997 998 if (hdr_len == 0) { 999 if ((nsegs > Q8_TX_MAX_SEGMENTS) || 1000 (mp->m_pkthdr.len > ha->max_frame_size)){ 1001 /* TBD: copy into private buffer and send it */ 1002 device_printf(dev, 1003 "%s: (nsegs[%d, %d, 0x%x] > Q8_TX_MAX_SEGMENTS)\n", 1004 __func__, nsegs, mp->m_pkthdr.len, 1005 mp->m_pkthdr.csum_flags); 1006 qla_dump_buf8(ha, "qla_hw_send: wrong pkt", 1007 mtod(mp, char *), mp->m_len); 1008 return (EINVAL); 1009 } 1010 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1011 if (qla_tx_chksum(ha, mp, tx_cmd) != 0) 1012 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; 1013 } else { 1014 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); 1015 } 1016 1017 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) 1018 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; 1019 else if (mp->m_flags & M_VLANTAG) { 1020 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | 1021 Q8_TX_CMD_FLAGS_HW_VLAN_ID); 1022 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; 1023 } 1024 1025 1026 tx_cmd->n_bufs = (uint8_t)nsegs; 1027 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); 1028 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); 1029 tx_cmd->port_cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); 1030 1031 c_seg = segs; 1032 1033 while (1) { 1034 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { 1035 1036 switch (i) { 1037 case 0: 1038 tx_cmd->buf1_addr = c_seg->ds_addr; 1039 tx_cmd->buf1_len = c_seg->ds_len; 1040 break; 1041 1042 case 1: 1043 tx_cmd->buf2_addr = c_seg->ds_addr; 1044 tx_cmd->buf2_len = c_seg->ds_len; 1045 break; 1046 1047 case 2: 1048 tx_cmd->buf3_addr = c_seg->ds_addr; 1049 tx_cmd->buf3_len = c_seg->ds_len; 1050 break; 1051 1052 case 3: 1053 tx_cmd->buf4_addr = c_seg->ds_addr; 1054 tx_cmd->buf4_len = c_seg->ds_len; 1055 break; 1056 } 1057 1058 c_seg++; 1059 nsegs--; 1060 } 1061 1062 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 1063 tx_cmd_count++; 1064 1065 if (!nsegs) 1066 break; 1067 1068 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 1069 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1070 } 1071 1072 if (hdr_len) { 1073 /* TSO : Copy the header in the following tx cmd descriptors */ 1074 1075 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 1076 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1077 1078 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 1079 bytes = QL_MIN(bytes, hdr_len); 1080 1081 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; 1082 1083 if (mp->m_flags & M_VLANTAG) { 1084 /* first copy the src/dst MAC addresses */ 1085 bcopy(src, dst, (ETHER_ADDR_LEN * 2)); 1086 dst += (ETHER_ADDR_LEN * 2); 1087 src += (ETHER_ADDR_LEN * 2); 1088 1089 hdr_len -= (ETHER_ADDR_LEN * 2); 1090 1091 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); 1092 dst += 2; 1093 *((uint16_t *)dst) = mp->m_pkthdr.ether_vtag; 1094 dst += 2; 1095 1096 bytes -= ((ETHER_ADDR_LEN * 2) + 4); 1097 1098 bcopy(src, dst, bytes); 1099 src += bytes; 1100 hdr_len -= bytes; 1101 } else { 1102 bcopy(src, dst, bytes); 1103 src += bytes; 1104 hdr_len -= bytes; 1105 } 1106 1107 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 1108 tx_cmd_count++; 1109 1110 while (hdr_len) { 1111 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 1112 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1113 1114 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 1115 1116 bcopy(src, tx_cmd, bytes); 1117 src += bytes; 1118 hdr_len -= bytes; 1119 hw->txr_next = 1120 (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 1121 tx_cmd_count++; 1122 } 1123 } 1124 1125 hw->txr_free = hw->txr_free - tx_cmd_count; 1126 1127 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next); 1128 QL_DPRINT8((dev, "%s: return\n", __func__)); 1129 return (0); 1130 } 1131 1132 /* 1133 * Name: qla_del_hw_if 1134 * Function: Destroys the hardware specific entities corresponding to an 1135 * Ethernet Interface 1136 */ 1137 void 1138 qla_del_hw_if(qla_host_t *ha) 1139 { 1140 int i; 1141 1142 for (i = 0; i < ha->hw.num_sds_rings; i++) 1143 QL_DISABLE_INTERRUPTS(ha, i); 1144 1145 qla_del_rcv_cntxt(ha); 1146 qla_del_xmt_cntxt(ha); 1147 1148 ha->hw.flags.lro = 0; 1149 } 1150 1151 /* 1152 * Name: qla_init_hw_if 1153 * Function: Creates the hardware specific entities corresponding to an 1154 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address 1155 * corresponding to the interface. Enables LRO if allowed. 1156 */ 1157 int 1158 qla_init_hw_if(qla_host_t *ha) 1159 { 1160 device_t dev; 1161 int i; 1162 uint8_t bcast_mac[6]; 1163 1164 qla_get_hw_caps(ha); 1165 1166 dev = ha->pci_dev; 1167 1168 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1169 bzero(ha->hw.dma_buf.sds_ring[i].dma_b, 1170 ha->hw.dma_buf.sds_ring[i].size); 1171 } 1172 /* 1173 * Create Receive Context 1174 */ 1175 if (qla_init_rcv_cntxt(ha)) { 1176 return (-1); 1177 } 1178 1179 ha->hw.rx_next = NUM_RX_DESCRIPTORS - 2; 1180 ha->hw.rxj_next = NUM_RX_JUMBO_DESCRIPTORS - 2; 1181 ha->hw.rx_in = ha->hw.rxj_in = 0; 1182 1183 /* Update the RDS Producer Indices */ 1184 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next); 1185 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next); 1186 1187 /* 1188 * Create Transmit Context 1189 */ 1190 if (qla_init_xmt_cntxt(ha)) { 1191 qla_del_rcv_cntxt(ha); 1192 return (-1); 1193 } 1194 1195 qla_config_mac_addr(ha, ha->hw.mac_addr, 1196 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1); 1197 1198 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 1199 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 1200 qla_config_mac_addr(ha, bcast_mac, 1201 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1); 1202 1203 qla_config_rss(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); 1204 1205 qla_config_intr_coalesce(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 0); 1206 1207 for (i = 0; i < ha->hw.num_sds_rings; i++) 1208 QL_ENABLE_INTERRUPTS(ha, i); 1209 1210 return (0); 1211 } 1212 1213 /* 1214 * Name: qla_init_rcv_cntxt 1215 * Function: Creates the Receive Context. 1216 */ 1217 static int 1218 qla_init_rcv_cntxt(qla_host_t *ha) 1219 { 1220 device_t dev; 1221 qla_cdrp_t cdrp; 1222 q80_rcv_cntxt_rsp_t *rsp; 1223 q80_stat_desc_t *sdesc; 1224 bus_addr_t phys_addr; 1225 int i, j; 1226 qla_hw_t *hw = &ha->hw; 1227 1228 dev = ha->pci_dev; 1229 1230 /* 1231 * Create Receive Context 1232 */ 1233 1234 for (i = 0; i < hw->num_sds_rings; i++) { 1235 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; 1236 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { 1237 sdesc->data[0] = 1238 Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW); 1239 } 1240 } 1241 1242 phys_addr = ha->hw.rx_cntxt_req_paddr; 1243 1244 bzero(&cdrp, sizeof(qla_cdrp_t)); 1245 1246 cdrp.cmd = Q8_CMD_CREATE_RX_CNTXT; 1247 cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32); 1248 cdrp.cmd_arg2 = (uint32_t)(phys_addr); 1249 cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_rcv_cntxt_req_t)); 1250 1251 if (qla_issue_cmd(ha, &cdrp)) { 1252 device_printf(dev, "%s: Q8_CMD_CREATE_RX_CNTXT failed\n", 1253 __func__); 1254 return (-1); 1255 } else { 1256 rsp = ha->hw.rx_cntxt_rsp; 1257 1258 QL_DPRINT2((dev, "%s: rcv cntxt successful" 1259 " rds_ring_offset = 0x%08x" 1260 " sds_ring_offset = 0x%08x" 1261 " cntxt_state = 0x%08x" 1262 " funcs_per_port = 0x%08x" 1263 " num_rds_rings = 0x%04x" 1264 " num_sds_rings = 0x%04x" 1265 " cntxt_id = 0x%04x" 1266 " phys_port = 0x%02x" 1267 " virt_port = 0x%02x\n", 1268 __func__, 1269 rsp->rx_rsp.rds_ring_offset, 1270 rsp->rx_rsp.sds_ring_offset, 1271 rsp->rx_rsp.cntxt_state, 1272 rsp->rx_rsp.funcs_per_port, 1273 rsp->rx_rsp.num_rds_rings, 1274 rsp->rx_rsp.num_sds_rings, 1275 rsp->rx_rsp.cntxt_id, 1276 rsp->rx_rsp.phys_port, 1277 rsp->rx_rsp.virt_port)); 1278 1279 for (i = 0; i < ha->hw.num_rds_rings; i++) { 1280 QL_DPRINT2((dev, 1281 "%s: rcv cntxt rds[%i].producer_reg = 0x%08x\n", 1282 __func__, i, rsp->rds_rsp[i].producer_reg)); 1283 } 1284 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1285 QL_DPRINT2((dev, 1286 "%s: rcv cntxt sds[%i].consumer_reg = 0x%08x" 1287 " sds[%i].intr_mask_reg = 0x%08x\n", 1288 __func__, i, rsp->sds_rsp[i].consumer_reg, 1289 i, rsp->sds_rsp[i].intr_mask_reg)); 1290 } 1291 } 1292 ha->hw.flags.init_rx_cnxt = 1; 1293 return (0); 1294 } 1295 1296 /* 1297 * Name: qla_del_rcv_cntxt 1298 * Function: Destroys the Receive Context. 1299 */ 1300 void 1301 qla_del_rcv_cntxt(qla_host_t *ha) 1302 { 1303 qla_cdrp_t cdrp; 1304 device_t dev = ha->pci_dev; 1305 1306 if (!ha->hw.flags.init_rx_cnxt) 1307 return; 1308 1309 bzero(&cdrp, sizeof(qla_cdrp_t)); 1310 1311 cdrp.cmd = Q8_CMD_DESTROY_RX_CNTXT; 1312 cdrp.cmd_arg1 = (uint32_t) (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id; 1313 1314 if (qla_issue_cmd(ha, &cdrp)) { 1315 device_printf(dev, "%s: Q8_CMD_DESTROY_RX_CNTXT failed\n", 1316 __func__); 1317 } 1318 ha->hw.flags.init_rx_cnxt = 0; 1319 } 1320 1321 /* 1322 * Name: qla_init_xmt_cntxt 1323 * Function: Creates the Transmit Context. 1324 */ 1325 static int 1326 qla_init_xmt_cntxt(qla_host_t *ha) 1327 { 1328 bus_addr_t phys_addr; 1329 device_t dev; 1330 q80_tx_cntxt_rsp_t *tx_rsp; 1331 qla_cdrp_t cdrp; 1332 qla_hw_t *hw = &ha->hw; 1333 1334 dev = ha->pci_dev; 1335 1336 /* 1337 * Create Transmit Context 1338 */ 1339 phys_addr = ha->hw.tx_cntxt_req_paddr; 1340 tx_rsp = ha->hw.tx_cntxt_rsp; 1341 1342 hw->txr_comp = hw->txr_next = 0; 1343 *(hw->tx_cons) = 0; 1344 1345 bzero(&cdrp, sizeof(qla_cdrp_t)); 1346 1347 cdrp.cmd = Q8_CMD_CREATE_TX_CNTXT; 1348 cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32); 1349 cdrp.cmd_arg2 = (uint32_t)(phys_addr); 1350 cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_tx_cntxt_req_t)); 1351 1352 if (qla_issue_cmd(ha, &cdrp)) { 1353 device_printf(dev, "%s: Q8_CMD_CREATE_TX_CNTXT failed\n", 1354 __func__); 1355 return (-1); 1356 } else { 1357 ha->hw.tx_prod_reg = tx_rsp->producer_reg; 1358 1359 QL_DPRINT2((dev, "%s: tx cntxt successful" 1360 " cntxt_state = 0x%08x " 1361 " cntxt_id = 0x%04x " 1362 " phys_port_id = 0x%02x " 1363 " virt_port_id = 0x%02x " 1364 " producer_reg = 0x%08x " 1365 " intr_mask_reg = 0x%08x\n", 1366 __func__, tx_rsp->cntxt_state, tx_rsp->cntxt_id, 1367 tx_rsp->phys_port_id, tx_rsp->virt_port_id, 1368 tx_rsp->producer_reg, tx_rsp->intr_mask_reg)); 1369 } 1370 ha->hw.txr_free = NUM_TX_DESCRIPTORS; 1371 1372 ha->hw.flags.init_tx_cnxt = 1; 1373 return (0); 1374 } 1375 1376 /* 1377 * Name: qla_del_xmt_cntxt 1378 * Function: Destroys the Transmit Context. 1379 */ 1380 static void 1381 qla_del_xmt_cntxt(qla_host_t *ha) 1382 { 1383 qla_cdrp_t cdrp; 1384 device_t dev = ha->pci_dev; 1385 1386 if (!ha->hw.flags.init_tx_cnxt) 1387 return; 1388 1389 bzero(&cdrp, sizeof(qla_cdrp_t)); 1390 1391 cdrp.cmd = Q8_CMD_DESTROY_TX_CNTXT; 1392 cdrp.cmd_arg1 = (uint32_t) (ha->hw.tx_cntxt_rsp)->cntxt_id; 1393 1394 if (qla_issue_cmd(ha, &cdrp)) { 1395 device_printf(dev, "%s: Q8_CMD_DESTROY_TX_CNTXT failed\n", 1396 __func__); 1397 } 1398 ha->hw.flags.init_tx_cnxt = 0; 1399 } 1400 1401 /* 1402 * Name: qla_get_max_rds 1403 * Function: Returns the maximum number of Receive Descriptor Rings per context. 1404 */ 1405 static int 1406 qla_get_max_rds(qla_host_t *ha) 1407 { 1408 qla_cdrp_t cdrp; 1409 device_t dev; 1410 1411 dev = ha->pci_dev; 1412 1413 bzero(&cdrp, sizeof(qla_cdrp_t)); 1414 1415 cdrp.cmd = Q8_CMD_RD_MAX_RDS_PER_CNTXT; 1416 1417 if (qla_issue_cmd(ha, &cdrp)) { 1418 device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n", 1419 __func__); 1420 return (-1); 1421 } else { 1422 ha->hw.max_rds_per_cntxt = cdrp.rsp_arg1; 1423 QL_DPRINT2((dev, "%s: max_rds_per_context 0x%08x\n", 1424 __func__, ha->hw.max_rds_per_cntxt)); 1425 } 1426 return 0; 1427 } 1428 1429 /* 1430 * Name: qla_get_max_sds 1431 * Function: Returns the maximum number of Status Descriptor Rings per context. 1432 */ 1433 static int 1434 qla_get_max_sds(qla_host_t *ha) 1435 { 1436 qla_cdrp_t cdrp; 1437 device_t dev; 1438 1439 dev = ha->pci_dev; 1440 1441 bzero(&cdrp, sizeof(qla_cdrp_t)); 1442 1443 cdrp.cmd = Q8_CMD_RD_MAX_SDS_PER_CNTXT; 1444 1445 if (qla_issue_cmd(ha, &cdrp)) { 1446 device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n", 1447 __func__); 1448 return (-1); 1449 } else { 1450 ha->hw.max_sds_per_cntxt = cdrp.rsp_arg1; 1451 QL_DPRINT2((dev, "%s: max_sds_per_context 0x%08x\n", 1452 __func__, ha->hw.max_sds_per_cntxt)); 1453 } 1454 return 0; 1455 } 1456 1457 /* 1458 * Name: qla_get_max_rules 1459 * Function: Returns the maximum number of Rules per context. 1460 */ 1461 static int 1462 qla_get_max_rules(qla_host_t *ha) 1463 { 1464 qla_cdrp_t cdrp; 1465 device_t dev; 1466 1467 dev = ha->pci_dev; 1468 1469 bzero(&cdrp, sizeof(qla_cdrp_t)); 1470 1471 cdrp.cmd = Q8_CMD_RD_MAX_RULES_PER_CNTXT; 1472 1473 if (qla_issue_cmd(ha, &cdrp)) { 1474 device_printf(dev, "%s: Q8_CMD_RD_MAX_RULES_PER_CNTXT failed\n", 1475 __func__); 1476 return (-1); 1477 } else { 1478 ha->hw.max_rules_per_cntxt = cdrp.rsp_arg1; 1479 QL_DPRINT2((dev, "%s: max_rules_per_cntxt 0x%08x\n", 1480 __func__, ha->hw.max_rules_per_cntxt)); 1481 } 1482 return 0; 1483 } 1484 1485 /* 1486 * Name: qla_get_max_rcv_cntxts 1487 * Function: Returns the maximum number of Receive Contexts supported. 1488 */ 1489 static int 1490 qla_get_max_rcv_cntxts(qla_host_t *ha) 1491 { 1492 qla_cdrp_t cdrp; 1493 device_t dev; 1494 1495 dev = ha->pci_dev; 1496 1497 bzero(&cdrp, sizeof(qla_cdrp_t)); 1498 1499 cdrp.cmd = Q8_CMD_RD_MAX_RX_CNTXT; 1500 1501 if (qla_issue_cmd(ha, &cdrp)) { 1502 device_printf(dev, "%s: Q8_CMD_RD_MAX_RX_CNTXT failed\n", 1503 __func__); 1504 return (-1); 1505 } else { 1506 ha->hw.max_rcv_cntxts = cdrp.rsp_arg1; 1507 QL_DPRINT2((dev, "%s: max_rcv_cntxts 0x%08x\n", 1508 __func__, ha->hw.max_rcv_cntxts)); 1509 } 1510 return 0; 1511 } 1512 1513 /* 1514 * Name: qla_get_max_tx_cntxts 1515 * Function: Returns the maximum number of Transmit Contexts supported. 1516 */ 1517 static int 1518 qla_get_max_tx_cntxts(qla_host_t *ha) 1519 { 1520 qla_cdrp_t cdrp; 1521 device_t dev; 1522 1523 dev = ha->pci_dev; 1524 1525 bzero(&cdrp, sizeof(qla_cdrp_t)); 1526 1527 cdrp.cmd = Q8_CMD_RD_MAX_TX_CNTXT; 1528 1529 if (qla_issue_cmd(ha, &cdrp)) { 1530 device_printf(dev, "%s: Q8_CMD_RD_MAX_TX_CNTXT failed\n", 1531 __func__); 1532 return (-1); 1533 } else { 1534 ha->hw.max_xmt_cntxts = cdrp.rsp_arg1; 1535 QL_DPRINT2((dev, "%s: max_xmt_cntxts 0x%08x\n", 1536 __func__, ha->hw.max_xmt_cntxts)); 1537 } 1538 return 0; 1539 } 1540 1541 /* 1542 * Name: qla_get_max_mtu 1543 * Function: Returns the MTU supported for a context. 1544 */ 1545 static int 1546 qla_get_max_mtu(qla_host_t *ha) 1547 { 1548 qla_cdrp_t cdrp; 1549 device_t dev; 1550 1551 dev = ha->pci_dev; 1552 1553 bzero(&cdrp, sizeof(qla_cdrp_t)); 1554 1555 cdrp.cmd = Q8_CMD_RD_MAX_MTU; 1556 1557 if (qla_issue_cmd(ha, &cdrp)) { 1558 device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__); 1559 return (-1); 1560 } else { 1561 ha->hw.max_mtu = cdrp.rsp_arg1; 1562 QL_DPRINT2((dev, "%s: max_mtu 0x%08x\n", __func__, 1563 ha->hw.max_mtu)); 1564 } 1565 return 0; 1566 } 1567 1568 /* 1569 * Name: qla_set_max_mtu 1570 * Function: 1571 * Sets the maximum transfer unit size for the specified rcv context. 1572 */ 1573 int 1574 qla_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) 1575 { 1576 qla_cdrp_t cdrp; 1577 device_t dev; 1578 1579 dev = ha->pci_dev; 1580 1581 bzero(&cdrp, sizeof(qla_cdrp_t)); 1582 1583 cdrp.cmd = Q8_CMD_SET_MTU; 1584 cdrp.cmd_arg1 = (uint32_t)cntxt_id; 1585 cdrp.cmd_arg2 = mtu; 1586 1587 if (qla_issue_cmd(ha, &cdrp)) { 1588 device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__); 1589 return (-1); 1590 } else { 1591 ha->hw.max_mtu = cdrp.rsp_arg1; 1592 } 1593 return 0; 1594 } 1595 1596 /* 1597 * Name: qla_get_max_lro 1598 * Function: Returns the maximum number of TCP Connection which can be supported 1599 * with LRO. 1600 */ 1601 static int 1602 qla_get_max_lro(qla_host_t *ha) 1603 { 1604 qla_cdrp_t cdrp; 1605 device_t dev; 1606 1607 dev = ha->pci_dev; 1608 1609 bzero(&cdrp, sizeof(qla_cdrp_t)); 1610 1611 cdrp.cmd = Q8_CMD_RD_MAX_LRO; 1612 1613 if (qla_issue_cmd(ha, &cdrp)) { 1614 device_printf(dev, "%s: Q8_CMD_RD_MAX_LRO failed\n", __func__); 1615 return (-1); 1616 } else { 1617 ha->hw.max_lro = cdrp.rsp_arg1; 1618 QL_DPRINT2((dev, "%s: max_lro 0x%08x\n", __func__, 1619 ha->hw.max_lro)); 1620 } 1621 return 0; 1622 } 1623 1624 /* 1625 * Name: qla_get_flow_control 1626 * Function: Returns the Receive/Transmit Flow Control (PAUSE) settings for 1627 * PCI function. 1628 */ 1629 static int 1630 qla_get_flow_control(qla_host_t *ha) 1631 { 1632 qla_cdrp_t cdrp; 1633 device_t dev; 1634 1635 dev = ha->pci_dev; 1636 1637 bzero(&cdrp, sizeof(qla_cdrp_t)); 1638 1639 cdrp.cmd = Q8_CMD_GET_FLOW_CNTRL; 1640 1641 if (qla_issue_cmd(ha, &cdrp)) { 1642 device_printf(dev, "%s: Q8_CMD_GET_FLOW_CNTRL failed\n", 1643 __func__); 1644 return (-1); 1645 } else { 1646 QL_DPRINT2((dev, "%s: flow control 0x%08x\n", __func__, 1647 cdrp.rsp_arg1)); 1648 } 1649 return 0; 1650 } 1651 1652 /* 1653 * Name: qla_get_flow_control 1654 * Function: Retrieves hardware capabilities 1655 */ 1656 void 1657 qla_get_hw_caps(qla_host_t *ha) 1658 { 1659 //qla_read_mac_addr(ha); 1660 qla_get_max_rds(ha); 1661 qla_get_max_sds(ha); 1662 qla_get_max_rules(ha); 1663 qla_get_max_rcv_cntxts(ha); 1664 qla_get_max_tx_cntxts(ha); 1665 qla_get_max_mtu(ha); 1666 qla_get_max_lro(ha); 1667 qla_get_flow_control(ha); 1668 return; 1669 } 1670 1671 /* 1672 * Name: qla_hw_set_multi 1673 * Function: Sets the Multicast Addresses provided the host O.S into the 1674 * hardware (for the given interface) 1675 */ 1676 void 1677 qla_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt, 1678 uint32_t add_multi) 1679 { 1680 q80_rcv_cntxt_rsp_t *rsp; 1681 int i; 1682 1683 rsp = ha->hw.rx_cntxt_rsp; 1684 for (i = 0; i < mcnt; i++) { 1685 qla_config_mac_addr(ha, mta, rsp->rx_rsp.cntxt_id, add_multi); 1686 mta += Q8_MAC_ADDR_LEN; 1687 } 1688 return; 1689 } 1690 1691 /* 1692 * Name: qla_hw_tx_done_locked 1693 * Function: Handle Transmit Completions 1694 */ 1695 static void 1696 qla_hw_tx_done_locked(qla_host_t *ha) 1697 { 1698 qla_tx_buf_t *txb; 1699 qla_hw_t *hw = &ha->hw; 1700 uint32_t comp_idx, comp_count = 0; 1701 1702 /* retrieve index of last entry in tx ring completed */ 1703 comp_idx = qla_le32_to_host(*(hw->tx_cons)); 1704 1705 while (comp_idx != hw->txr_comp) { 1706 1707 txb = &ha->tx_buf[hw->txr_comp]; 1708 1709 hw->txr_comp++; 1710 if (hw->txr_comp == NUM_TX_DESCRIPTORS) 1711 hw->txr_comp = 0; 1712 1713 comp_count++; 1714 1715 if (txb->m_head) { 1716 bus_dmamap_sync(ha->tx_tag, txb->map, 1717 BUS_DMASYNC_POSTWRITE); 1718 bus_dmamap_unload(ha->tx_tag, txb->map); 1719 bus_dmamap_destroy(ha->tx_tag, txb->map); 1720 m_freem(txb->m_head); 1721 1722 txb->map = (bus_dmamap_t)0; 1723 txb->m_head = NULL; 1724 } 1725 } 1726 1727 hw->txr_free += comp_count; 1728 1729 QL_DPRINT8((ha->pci_dev, "%s: return [c,f, p, pn][%d, %d, %d, %d]\n", __func__, 1730 hw->txr_comp, hw->txr_free, hw->txr_next, READ_REG32(ha, (ha->hw.tx_prod_reg + 0x1b2000)))); 1731 1732 return; 1733 } 1734 1735 /* 1736 * Name: qla_hw_tx_done 1737 * Function: Handle Transmit Completions 1738 */ 1739 void 1740 qla_hw_tx_done(qla_host_t *ha) 1741 { 1742 if (!mtx_trylock(&ha->tx_lock)) { 1743 QL_DPRINT8((ha->pci_dev, 1744 "%s: !mtx_trylock(&ha->tx_lock)\n", __func__)); 1745 return; 1746 } 1747 qla_hw_tx_done_locked(ha); 1748 1749 if (ha->hw.txr_free > free_pkt_thres) 1750 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1751 1752 mtx_unlock(&ha->tx_lock); 1753 return; 1754 } 1755 1756 void 1757 qla_update_link_state(qla_host_t *ha) 1758 { 1759 uint32_t link_state; 1760 uint32_t prev_link_state; 1761 1762 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1763 ha->hw.flags.link_up = 0; 1764 return; 1765 } 1766 link_state = READ_REG32(ha, Q8_LINK_STATE); 1767 1768 prev_link_state = ha->hw.flags.link_up; 1769 1770 if (ha->pci_func == 0) 1771 ha->hw.flags.link_up = (((link_state & 0xF) == 1)? 1 : 0); 1772 else 1773 ha->hw.flags.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); 1774 1775 if (prev_link_state != ha->hw.flags.link_up) { 1776 if (ha->hw.flags.link_up) { 1777 if_link_state_change(ha->ifp, LINK_STATE_UP); 1778 } else { 1779 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 1780 } 1781 } 1782 } 1783 1784 int 1785 qla_config_lro(qla_host_t *ha) 1786 { 1787 int i; 1788 qla_hw_t *hw = &ha->hw; 1789 struct lro_ctrl *lro; 1790 1791 for (i = 0; i < hw->num_sds_rings; i++) { 1792 lro = &hw->sds[i].lro; 1793 if (tcp_lro_init(lro)) { 1794 device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n", 1795 __func__); 1796 return (-1); 1797 } 1798 lro->ifp = ha->ifp; 1799 } 1800 ha->flags.lro_init = 1; 1801 1802 QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__)); 1803 return (0); 1804 } 1805 1806 void 1807 qla_free_lro(qla_host_t *ha) 1808 { 1809 int i; 1810 qla_hw_t *hw = &ha->hw; 1811 struct lro_ctrl *lro; 1812 1813 if (!ha->flags.lro_init) 1814 return; 1815 1816 for (i = 0; i < hw->num_sds_rings; i++) { 1817 lro = &hw->sds[i].lro; 1818 tcp_lro_free(lro); 1819 } 1820 ha->flags.lro_init = 0; 1821 } 1822 1823 void 1824 qla_hw_stop_rcv(qla_host_t *ha) 1825 { 1826 int i, done, count = 100; 1827 1828 while (count--) { 1829 done = 1; 1830 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1831 if (ha->hw.sds[i].rcv_active) 1832 done = 0; 1833 } 1834 if (done) 1835 break; 1836 else 1837 qla_mdelay(__func__, 10); 1838 } 1839 } 1840 1841