1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011-2012 Qlogic Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * File: qla_hw.c 32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 33 * Content: Contains Hardware dependent functions 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "qla_os.h" 40 #include "qla_reg.h" 41 #include "qla_hw.h" 42 #include "qla_def.h" 43 #include "qla_inline.h" 44 #include "qla_ver.h" 45 #include "qla_glbl.h" 46 #include "qla_dbg.h" 47 48 static uint32_t sysctl_num_rds_rings = 2; 49 static uint32_t sysctl_num_sds_rings = 4; 50 51 /* 52 * Static Functions 53 */ 54 55 static void qla_init_cntxt_regions(qla_host_t *ha); 56 static int qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp); 57 static int qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size); 58 static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, 59 uint16_t cntxt_id, uint32_t add_multi); 60 static void qla_del_rcv_cntxt(qla_host_t *ha); 61 static int qla_init_rcv_cntxt(qla_host_t *ha); 62 static void qla_del_xmt_cntxt(qla_host_t *ha); 63 static int qla_init_xmt_cntxt(qla_host_t *ha); 64 static int qla_get_max_rds(qla_host_t *ha); 65 static int qla_get_max_sds(qla_host_t *ha); 66 static int qla_get_max_rules(qla_host_t *ha); 67 static int qla_get_max_rcv_cntxts(qla_host_t *ha); 68 static int qla_get_max_tx_cntxts(qla_host_t *ha); 69 static int qla_get_max_mtu(qla_host_t *ha); 70 static int qla_get_max_lro(qla_host_t *ha); 71 static int qla_get_flow_control(qla_host_t *ha); 72 static void qla_hw_tx_done_locked(qla_host_t *ha); 73 74 int 75 qla_get_msix_count(qla_host_t *ha) 76 { 77 return (sysctl_num_sds_rings); 78 } 79 80 /* 81 * Name: qla_hw_add_sysctls 82 * Function: Add P3Plus specific sysctls 83 */ 84 void 85 qla_hw_add_sysctls(qla_host_t *ha) 86 { 87 device_t dev; 88 89 dev = ha->pci_dev; 90 91 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 92 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 93 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &sysctl_num_rds_rings, 94 sysctl_num_rds_rings, "Number of Rcv Descriptor Rings"); 95 96 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 97 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 98 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &sysctl_num_sds_rings, 99 sysctl_num_sds_rings, "Number of Status Descriptor Rings"); 100 } 101 102 /* 103 * Name: qla_free_dma 104 * Function: Frees the DMA'able memory allocated in qla_alloc_dma() 105 */ 106 void 107 qla_free_dma(qla_host_t *ha) 108 { 109 uint32_t i; 110 111 if (ha->hw.dma_buf.flags.context) { 112 qla_free_dmabuf(ha, &ha->hw.dma_buf.context); 113 ha->hw.dma_buf.flags.context = 0; 114 } 115 116 if (ha->hw.dma_buf.flags.sds_ring) { 117 for (i = 0; i < ha->hw.num_sds_rings; i++) 118 qla_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); 119 ha->hw.dma_buf.flags.sds_ring = 0; 120 } 121 122 if (ha->hw.dma_buf.flags.rds_ring) { 123 for (i = 0; i < ha->hw.num_rds_rings; i++) 124 qla_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); 125 ha->hw.dma_buf.flags.rds_ring = 0; 126 } 127 128 if (ha->hw.dma_buf.flags.tx_ring) { 129 qla_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); 130 ha->hw.dma_buf.flags.tx_ring = 0; 131 } 132 } 133 134 /* 135 * Name: qla_alloc_dma 136 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. 137 */ 138 int 139 qla_alloc_dma(qla_host_t *ha) 140 { 141 device_t dev; 142 uint32_t i, j, size; 143 144 dev = ha->pci_dev; 145 146 QL_DPRINT2((dev, "%s: enter\n", __func__)); 147 148 ha->hw.num_rds_rings = (uint16_t)sysctl_num_rds_rings; 149 ha->hw.num_sds_rings = (uint16_t)sysctl_num_sds_rings; 150 151 /* 152 * Allocate Transmit Ring 153 */ 154 155 ha->hw.dma_buf.tx_ring.alignment = 8; 156 ha->hw.dma_buf.tx_ring.size = 157 (sizeof(q80_tx_cmd_t)) * NUM_TX_DESCRIPTORS; 158 159 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) { 160 device_printf(dev, "%s: tx ring alloc failed\n", __func__); 161 goto qla_alloc_dma_exit; 162 } 163 ha->hw.dma_buf.flags.tx_ring = 1; 164 165 QL_DPRINT2((dev, "%s: tx_ring phys %p virt %p\n", 166 __func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr), 167 ha->hw.dma_buf.tx_ring.dma_b)); 168 /* 169 * Allocate Receive Descriptor Rings 170 */ 171 172 for (i = 0; i < ha->hw.num_rds_rings; i++) { 173 ha->hw.dma_buf.rds_ring[i].alignment = 8; 174 175 if (i == RDS_RING_INDEX_NORMAL) { 176 ha->hw.dma_buf.rds_ring[i].size = 177 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; 178 } else if (i == RDS_RING_INDEX_JUMBO) { 179 ha->hw.dma_buf.rds_ring[i].size = 180 (sizeof(q80_recv_desc_t)) * 181 NUM_RX_JUMBO_DESCRIPTORS; 182 } else 183 break; 184 185 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i])) { 186 QL_DPRINT4((dev, "%s: rds ring alloc failed\n", 187 __func__)); 188 189 for (j = 0; j < i; j++) 190 qla_free_dmabuf(ha, 191 &ha->hw.dma_buf.rds_ring[j]); 192 193 goto qla_alloc_dma_exit; 194 } 195 QL_DPRINT4((dev, "%s: rx_ring[%d] phys %p virt %p\n", 196 __func__, i, 197 (void *)(ha->hw.dma_buf.rds_ring[i].dma_addr), 198 ha->hw.dma_buf.rds_ring[i].dma_b)); 199 } 200 ha->hw.dma_buf.flags.rds_ring = 1; 201 202 /* 203 * Allocate Status Descriptor Rings 204 */ 205 206 for (i = 0; i < ha->hw.num_sds_rings; i++) { 207 ha->hw.dma_buf.sds_ring[i].alignment = 8; 208 ha->hw.dma_buf.sds_ring[i].size = 209 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; 210 211 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i])) { 212 device_printf(dev, "%s: sds ring alloc failed\n", 213 __func__); 214 215 for (j = 0; j < i; j++) 216 qla_free_dmabuf(ha, 217 &ha->hw.dma_buf.sds_ring[j]); 218 219 goto qla_alloc_dma_exit; 220 } 221 QL_DPRINT4((dev, "%s: sds_ring[%d] phys %p virt %p\n", 222 __func__, i, 223 (void *)(ha->hw.dma_buf.sds_ring[i].dma_addr), 224 ha->hw.dma_buf.sds_ring[i].dma_b)); 225 } 226 ha->hw.dma_buf.flags.sds_ring = 1; 227 228 /* 229 * Allocate Context Area 230 */ 231 size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN); 232 233 size += QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN); 234 235 size += QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN); 236 237 size += QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN); 238 239 size += sizeof (uint32_t); /* for tx consumer index */ 240 241 size = QL_ALIGN(size, PAGE_SIZE); 242 243 ha->hw.dma_buf.context.alignment = 8; 244 ha->hw.dma_buf.context.size = size; 245 246 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.context)) { 247 device_printf(dev, "%s: context alloc failed\n", __func__); 248 goto qla_alloc_dma_exit; 249 } 250 ha->hw.dma_buf.flags.context = 1; 251 QL_DPRINT2((dev, "%s: context phys %p virt %p\n", 252 __func__, (void *)(ha->hw.dma_buf.context.dma_addr), 253 ha->hw.dma_buf.context.dma_b)); 254 255 qla_init_cntxt_regions(ha); 256 257 return 0; 258 259 qla_alloc_dma_exit: 260 qla_free_dma(ha); 261 return -1; 262 } 263 264 /* 265 * Name: qla_init_cntxt_regions 266 * Function: Initializes Tx/Rx Contexts. 267 */ 268 static void 269 qla_init_cntxt_regions(qla_host_t *ha) 270 { 271 qla_hw_t *hw; 272 q80_tx_cntxt_req_t *tx_cntxt_req; 273 q80_rcv_cntxt_req_t *rx_cntxt_req; 274 bus_addr_t phys_addr; 275 uint32_t i; 276 device_t dev; 277 uint32_t size; 278 279 dev = ha->pci_dev; 280 281 hw = &ha->hw; 282 283 hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b; 284 285 for (i = 0; i < ha->hw.num_sds_rings; i++) 286 hw->sds[i].sds_ring_base = 287 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; 288 289 290 phys_addr = hw->dma_buf.context.dma_addr; 291 292 memset((void *)hw->dma_buf.context.dma_b, 0, 293 ha->hw.dma_buf.context.size); 294 295 hw->tx_cntxt_req = 296 (q80_tx_cntxt_req_t *)hw->dma_buf.context.dma_b; 297 hw->tx_cntxt_req_paddr = phys_addr; 298 299 size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN); 300 301 hw->tx_cntxt_rsp = 302 (q80_tx_cntxt_rsp_t *)((uint8_t *)hw->tx_cntxt_req + size); 303 hw->tx_cntxt_rsp_paddr = hw->tx_cntxt_req_paddr + size; 304 305 size = QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN); 306 307 hw->rx_cntxt_req = 308 (q80_rcv_cntxt_req_t *)((uint8_t *)hw->tx_cntxt_rsp + size); 309 hw->rx_cntxt_req_paddr = hw->tx_cntxt_rsp_paddr + size; 310 311 size = QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN); 312 313 hw->rx_cntxt_rsp = 314 (q80_rcv_cntxt_rsp_t *)((uint8_t *)hw->rx_cntxt_req + size); 315 hw->rx_cntxt_rsp_paddr = hw->rx_cntxt_req_paddr + size; 316 317 size = QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN); 318 319 hw->tx_cons = (uint32_t *)((uint8_t *)hw->rx_cntxt_rsp + size); 320 hw->tx_cons_paddr = hw->rx_cntxt_rsp_paddr + size; 321 322 /* 323 * Initialize the Transmit Context Request so that we don't need to 324 * do it every time we need to create a context 325 */ 326 tx_cntxt_req = hw->tx_cntxt_req; 327 328 tx_cntxt_req->rsp_dma_addr = qla_host_to_le64(hw->tx_cntxt_rsp_paddr); 329 330 tx_cntxt_req->cmd_cons_dma_addr = qla_host_to_le64(hw->tx_cons_paddr); 331 332 tx_cntxt_req->caps[0] = qla_host_to_le32((CNTXT_CAP0_BASEFW | 333 CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_LSO)); 334 335 tx_cntxt_req->intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED); 336 337 tx_cntxt_req->phys_addr = 338 qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr); 339 340 tx_cntxt_req->num_entries = qla_host_to_le32(NUM_TX_DESCRIPTORS); 341 342 /* 343 * Initialize the Receive Context Request 344 */ 345 346 rx_cntxt_req = hw->rx_cntxt_req; 347 348 rx_cntxt_req->rx_req.rsp_dma_addr = 349 qla_host_to_le64(hw->rx_cntxt_rsp_paddr); 350 351 rx_cntxt_req->rx_req.caps[0] = qla_host_to_le32(CNTXT_CAP0_BASEFW | 352 CNTXT_CAP0_LEGACY_MN | 353 CNTXT_CAP0_JUMBO | 354 CNTXT_CAP0_LRO| 355 CNTXT_CAP0_HW_LRO); 356 357 rx_cntxt_req->rx_req.intr_mode = 358 qla_host_to_le32(CNTXT_INTR_MODE_SHARED); 359 360 rx_cntxt_req->rx_req.rds_intr_mode = 361 qla_host_to_le32(CNTXT_INTR_MODE_UNIQUE); 362 363 rx_cntxt_req->rx_req.rds_ring_offset = 0; 364 rx_cntxt_req->rx_req.sds_ring_offset = qla_host_to_le32( 365 (hw->num_rds_rings * sizeof(q80_rq_rds_ring_t))); 366 rx_cntxt_req->rx_req.num_rds_rings = 367 qla_host_to_le16(hw->num_rds_rings); 368 rx_cntxt_req->rx_req.num_sds_rings = 369 qla_host_to_le16(hw->num_sds_rings); 370 371 for (i = 0; i < hw->num_rds_rings; i++) { 372 rx_cntxt_req->rds_req[i].phys_addr = 373 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); 374 375 if (i == RDS_RING_INDEX_NORMAL) { 376 rx_cntxt_req->rds_req[i].buf_size = 377 qla_host_to_le64(MCLBYTES); 378 rx_cntxt_req->rds_req[i].size = 379 qla_host_to_le32(NUM_RX_DESCRIPTORS); 380 } else { 381 rx_cntxt_req->rds_req[i].buf_size = 382 qla_host_to_le64(MJUM9BYTES); 383 rx_cntxt_req->rds_req[i].size = 384 qla_host_to_le32(NUM_RX_JUMBO_DESCRIPTORS); 385 } 386 } 387 388 for (i = 0; i < hw->num_sds_rings; i++) { 389 rx_cntxt_req->sds_req[i].phys_addr = 390 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); 391 rx_cntxt_req->sds_req[i].size = 392 qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 393 rx_cntxt_req->sds_req[i].msi_index = qla_host_to_le16(i); 394 } 395 396 QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_req = %p paddr %p\n", 397 __func__, hw->tx_cntxt_req, (void *)hw->tx_cntxt_req_paddr)); 398 QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_rsp = %p paddr %p\n", 399 __func__, hw->tx_cntxt_rsp, (void *)hw->tx_cntxt_rsp_paddr)); 400 QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_req = %p paddr %p\n", 401 __func__, hw->rx_cntxt_req, (void *)hw->rx_cntxt_req_paddr)); 402 QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_rsp = %p paddr %p\n", 403 __func__, hw->rx_cntxt_rsp, (void *)hw->rx_cntxt_rsp_paddr)); 404 QL_DPRINT2((ha->pci_dev, "%s: tx_cons = %p paddr %p\n", 405 __func__, hw->tx_cons, (void *)hw->tx_cons_paddr)); 406 } 407 408 /* 409 * Name: qla_issue_cmd 410 * Function: Issues commands on the CDRP interface and returns responses. 411 */ 412 static int 413 qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp) 414 { 415 int ret = 0; 416 uint32_t signature; 417 uint32_t count = 400; /* 4 seconds or 400 10ms intervals */ 418 uint32_t data; 419 device_t dev; 420 421 dev = ha->pci_dev; 422 423 signature = 0xcafe0000 | 0x0100 | ha->pci_func; 424 425 ret = qla_sem_lock(ha, Q8_SEM5_LOCK, 0, (uint32_t)ha->pci_func); 426 427 if (ret) { 428 device_printf(dev, "%s: SEM5_LOCK lock failed\n", __func__); 429 return (ret); 430 } 431 432 WRITE_OFFSET32(ha, Q8_NX_CDRP_SIGNATURE, signature); 433 434 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG1, (cdrp->cmd_arg1)); 435 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG2, (cdrp->cmd_arg2)); 436 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG3, (cdrp->cmd_arg3)); 437 438 WRITE_OFFSET32(ha, Q8_NX_CDRP_CMD_RSP, cdrp->cmd); 439 440 while (count) { 441 qla_mdelay(__func__, 10); 442 443 data = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP); 444 445 if ((!(data & 0x80000000))) 446 break; 447 count--; 448 } 449 if ((!count) || (data != 1)) 450 ret = -1; 451 452 cdrp->rsp = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP); 453 cdrp->rsp_arg1 = READ_REG32(ha, Q8_NX_CDRP_ARG1); 454 cdrp->rsp_arg2 = READ_REG32(ha, Q8_NX_CDRP_ARG2); 455 cdrp->rsp_arg3 = READ_REG32(ha, Q8_NX_CDRP_ARG3); 456 457 qla_sem_unlock(ha, Q8_SEM5_UNLOCK); 458 459 if (ret) { 460 device_printf(dev, "%s: " 461 "cmd[0x%08x] = 0x%08x\n" 462 "\tsig[0x%08x] = 0x%08x\n" 463 "\targ1[0x%08x] = 0x%08x\n" 464 "\targ2[0x%08x] = 0x%08x\n" 465 "\targ3[0x%08x] = 0x%08x\n", 466 __func__, Q8_NX_CDRP_CMD_RSP, cdrp->cmd, 467 Q8_NX_CDRP_SIGNATURE, signature, 468 Q8_NX_CDRP_ARG1, cdrp->cmd_arg1, 469 Q8_NX_CDRP_ARG2, cdrp->cmd_arg2, 470 Q8_NX_CDRP_ARG3, cdrp->cmd_arg3); 471 472 device_printf(dev, "%s: exit (ret = 0x%x)\n" 473 "\t\t rsp = 0x%08x\n" 474 "\t\t arg1 = 0x%08x\n" 475 "\t\t arg2 = 0x%08x\n" 476 "\t\t arg3 = 0x%08x\n", 477 __func__, ret, cdrp->rsp, 478 cdrp->rsp_arg1, cdrp->rsp_arg2, cdrp->rsp_arg3); 479 } 480 481 return (ret); 482 } 483 484 #define QLA_TX_MIN_FREE 2 485 486 /* 487 * Name: qla_fw_cmd 488 * Function: Issues firmware control commands on the Tx Ring. 489 */ 490 static int 491 qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size) 492 { 493 device_t dev; 494 q80_tx_cmd_t *tx_cmd; 495 qla_hw_t *hw = &ha->hw; 496 int count = 100; 497 498 dev = ha->pci_dev; 499 500 QLA_TX_LOCK(ha); 501 502 if (hw->txr_free <= QLA_TX_MIN_FREE) { 503 while (count--) { 504 qla_hw_tx_done_locked(ha); 505 if (hw->txr_free > QLA_TX_MIN_FREE) 506 break; 507 508 QLA_TX_UNLOCK(ha); 509 qla_mdelay(__func__, 10); 510 QLA_TX_LOCK(ha); 511 } 512 if (hw->txr_free <= QLA_TX_MIN_FREE) { 513 QLA_TX_UNLOCK(ha); 514 device_printf(dev, "%s: xmit queue full\n", __func__); 515 return (-1); 516 } 517 } 518 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 519 520 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 521 522 bcopy(fw_cmd, tx_cmd, size); 523 524 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 525 hw->txr_free--; 526 527 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next); 528 529 QLA_TX_UNLOCK(ha); 530 531 return (0); 532 } 533 534 /* 535 * Name: qla_config_rss 536 * Function: Configure RSS for the context/interface. 537 */ 538 const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 539 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 540 0x255b0ec26d5a56daULL }; 541 542 static int 543 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id) 544 { 545 qla_fw_cds_config_rss_t rss_config; 546 int ret, i; 547 548 bzero(&rss_config, sizeof(qla_fw_cds_config_rss_t)); 549 550 rss_config.hdr.cmd = Q8_FWCD_CNTRL_REQ; 551 rss_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_RSS; 552 rss_config.hdr.cntxt_id = cntxt_id; 553 554 rss_config.hash_type = (Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP_IP | 555 Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP_IP); 556 rss_config.flags = Q8_FWCD_RSS_FLAGS_ENABLE_RSS; 557 558 rss_config.ind_tbl_mask = 0x7; 559 560 for (i = 0; i < 5; i++) 561 rss_config.rss_key[i] = rss_key[i]; 562 563 ret = qla_fw_cmd(ha, &rss_config, sizeof(qla_fw_cds_config_rss_t)); 564 565 return ret; 566 } 567 568 /* 569 * Name: qla_config_intr_coalesce 570 * Function: Configure Interrupt Coalescing. 571 */ 572 static int 573 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable) 574 { 575 qla_fw_cds_config_intr_coalesc_t intr_coalesce; 576 int ret; 577 578 bzero(&intr_coalesce, sizeof(qla_fw_cds_config_intr_coalesc_t)); 579 580 intr_coalesce.hdr.cmd = Q8_FWCD_CNTRL_REQ; 581 intr_coalesce.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_INTR_COALESCING; 582 intr_coalesce.hdr.cntxt_id = cntxt_id; 583 584 intr_coalesce.flags = 0x04; 585 intr_coalesce.max_rcv_pkts = 256; 586 intr_coalesce.max_rcv_usecs = 3; 587 intr_coalesce.max_snd_pkts = 64; 588 intr_coalesce.max_snd_usecs = 4; 589 590 if (tenable) { 591 intr_coalesce.usecs_to = 1000; /* 1 millisecond */ 592 intr_coalesce.timer_type = Q8_FWCMD_INTR_COALESC_TIMER_PERIODIC; 593 intr_coalesce.sds_ring_bitmask = 594 Q8_FWCMD_INTR_COALESC_SDS_RING_0; 595 } 596 597 ret = qla_fw_cmd(ha, &intr_coalesce, 598 sizeof(qla_fw_cds_config_intr_coalesc_t)); 599 600 return ret; 601 } 602 603 604 /* 605 * Name: qla_config_mac_addr 606 * Function: binds a MAC address to the context/interface. 607 * Can be unicast, multicast or broadcast. 608 */ 609 static int 610 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint16_t cntxt_id, 611 uint32_t add_multi) 612 { 613 qla_fw_cds_config_mac_addr_t mac_config; 614 int ret; 615 616 // device_printf(ha->pci_dev, 617 // "%s: mac_addr %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 618 // mac_addr[0], mac_addr[1], mac_addr[2], 619 // mac_addr[3], mac_addr[4], mac_addr[5]); 620 621 bzero(&mac_config, sizeof(qla_fw_cds_config_mac_addr_t)); 622 623 mac_config.hdr.cmd = Q8_FWCD_CNTRL_REQ; 624 mac_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_ADDR; 625 mac_config.hdr.cntxt_id = cntxt_id; 626 627 if (add_multi) 628 mac_config.cmd = Q8_FWCD_ADD_MAC_ADDR; 629 else 630 mac_config.cmd = Q8_FWCD_DEL_MAC_ADDR; 631 bcopy(mac_addr, mac_config.mac_addr,6); 632 633 ret = qla_fw_cmd(ha, &mac_config, sizeof(qla_fw_cds_config_mac_addr_t)); 634 635 return ret; 636 } 637 638 639 /* 640 * Name: qla_set_mac_rcv_mode 641 * Function: Enable/Disable AllMulticast and Promiscuous Modes. 642 */ 643 static int 644 qla_set_mac_rcv_mode(qla_host_t *ha, uint16_t cntxt_id, uint32_t mode) 645 { 646 qla_set_mac_rcv_mode_t rcv_mode; 647 int ret; 648 649 bzero(&rcv_mode, sizeof(qla_set_mac_rcv_mode_t)); 650 651 rcv_mode.hdr.cmd = Q8_FWCD_CNTRL_REQ; 652 rcv_mode.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_RCV_MODE; 653 rcv_mode.hdr.cntxt_id = cntxt_id; 654 655 rcv_mode.mode = mode; 656 657 ret = qla_fw_cmd(ha, &rcv_mode, sizeof(qla_set_mac_rcv_mode_t)); 658 659 return ret; 660 } 661 662 void 663 qla_set_promisc(qla_host_t *ha) 664 { 665 (void)qla_set_mac_rcv_mode(ha, 666 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 667 Q8_MAC_RCV_ENABLE_PROMISCUOUS); 668 } 669 670 void 671 qla_set_allmulti(qla_host_t *ha) 672 { 673 (void)qla_set_mac_rcv_mode(ha, 674 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 675 Q8_MAC_RCV_ENABLE_ALLMULTI); 676 } 677 678 void 679 qla_reset_promisc_allmulti(qla_host_t *ha) 680 { 681 (void)qla_set_mac_rcv_mode(ha, 682 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 683 Q8_MAC_RCV_RESET_PROMISC_ALLMULTI); 684 } 685 686 /* 687 * Name: qla_config_ipv4_addr 688 * Function: Configures the Destination IP Addr for LRO. 689 */ 690 void 691 qla_config_ipv4_addr(qla_host_t *ha, uint32_t ipv4_addr) 692 { 693 qla_config_ipv4_t ip_conf; 694 695 bzero(&ip_conf, sizeof(qla_config_ipv4_t)); 696 697 ip_conf.hdr.cmd = Q8_FWCD_CNTRL_REQ; 698 ip_conf.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_IPADDR; 699 ip_conf.hdr.cntxt_id = (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id; 700 701 ip_conf.cmd = (uint64_t)Q8_CONFIG_CMD_IP_ENABLE; 702 ip_conf.ipv4_addr = (uint64_t)ipv4_addr; 703 704 (void)qla_fw_cmd(ha, &ip_conf, sizeof(qla_config_ipv4_t)); 705 706 return; 707 } 708 709 /* 710 * Name: qla_tx_tso 711 * Function: Checks if the packet to be transmitted is a candidate for 712 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx 713 * Ring Structure are plugged in. 714 */ 715 static int 716 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) 717 { 718 struct ether_vlan_header *eh; 719 struct ip *ip = NULL; 720 struct tcphdr *th = NULL; 721 uint32_t ehdrlen, hdrlen = 0, ip_hlen, tcp_hlen, tcp_opt_off; 722 uint16_t etype, opcode, offload = 1; 723 uint8_t *tcp_opt; 724 device_t dev; 725 726 dev = ha->pci_dev; 727 728 eh = mtod(mp, struct ether_vlan_header *); 729 730 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 731 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 732 etype = ntohs(eh->evl_proto); 733 } else { 734 ehdrlen = ETHER_HDR_LEN; 735 etype = ntohs(eh->evl_encap_proto); 736 } 737 738 switch (etype) { 739 case ETHERTYPE_IP: 740 741 tcp_opt_off = ehdrlen + sizeof(struct ip) + 742 sizeof(struct tcphdr); 743 744 if (mp->m_len < tcp_opt_off) { 745 m_copydata(mp, 0, tcp_opt_off, hdr); 746 ip = (struct ip *)hdr; 747 } else { 748 ip = (struct ip *)(mp->m_data + ehdrlen); 749 } 750 751 ip_hlen = ip->ip_hl << 2; 752 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; 753 754 if ((ip->ip_p != IPPROTO_TCP) || 755 (ip_hlen != sizeof (struct ip))) { 756 offload = 0; 757 } else { 758 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 759 } 760 break; 761 762 default: 763 QL_DPRINT8((dev, "%s: type!=ip\n", __func__)); 764 offload = 0; 765 break; 766 } 767 768 if (!offload) 769 return (-1); 770 771 tcp_hlen = th->th_off << 2; 772 773 774 hdrlen = ehdrlen + ip_hlen + tcp_hlen; 775 776 if (mp->m_len < hdrlen) { 777 if (mp->m_len < tcp_opt_off) { 778 if (tcp_hlen > sizeof(struct tcphdr)) { 779 m_copydata(mp, tcp_opt_off, 780 (tcp_hlen - sizeof(struct tcphdr)), 781 &hdr[tcp_opt_off]); 782 } 783 } else { 784 m_copydata(mp, 0, hdrlen, hdr); 785 } 786 } 787 788 if ((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) { 789 790 /* If TCP options are preset only time stamp option is supported */ 791 if ((tcp_hlen - sizeof(struct tcphdr)) != 10) 792 return -1; 793 else { 794 795 if (mp->m_len < hdrlen) { 796 tcp_opt = &hdr[tcp_opt_off]; 797 } else { 798 tcp_opt = (uint8_t *)(mp->m_data + tcp_opt_off); 799 } 800 801 if ((*tcp_opt != 0x01) || (*(tcp_opt + 1) != 0x01) || 802 (*(tcp_opt + 2) != 0x08) || 803 (*(tcp_opt + 3) != 10)) { 804 return -1; 805 } 806 } 807 808 tx_cmd->mss = ha->max_frame_size - ETHER_CRC_LEN - hdrlen; 809 } else { 810 tx_cmd->mss = mp->m_pkthdr.tso_segsz; 811 } 812 813 tx_cmd->flags_opcode = opcode ; 814 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 815 tx_cmd->ip_hdr_off = ehdrlen; 816 tx_cmd->mss = mp->m_pkthdr.tso_segsz; 817 tx_cmd->total_hdr_len = hdrlen; 818 819 /* Check for Multicast least significant bit of MSB == 1 */ 820 if (eh->evl_dhost[0] & 0x01) { 821 tx_cmd->flags_opcode = Q8_TX_CMD_FLAGS_MULTICAST; 822 } 823 824 if (mp->m_len < hdrlen) { 825 return (1); 826 } 827 828 return (0); 829 } 830 831 /* 832 * Name: qla_tx_chksum 833 * Function: Checks if the packet to be transmitted is a candidate for 834 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx 835 * Ring Structure are plugged in. 836 */ 837 static int 838 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd) 839 { 840 struct ether_vlan_header *eh; 841 struct ip *ip; 842 struct ip6_hdr *ip6; 843 uint32_t ehdrlen, ip_hlen; 844 uint16_t etype, opcode, offload = 1; 845 device_t dev; 846 847 dev = ha->pci_dev; 848 849 if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0) 850 return (-1); 851 852 eh = mtod(mp, struct ether_vlan_header *); 853 854 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 855 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 856 etype = ntohs(eh->evl_proto); 857 } else { 858 ehdrlen = ETHER_HDR_LEN; 859 etype = ntohs(eh->evl_encap_proto); 860 } 861 862 863 switch (etype) { 864 case ETHERTYPE_IP: 865 ip = (struct ip *)(mp->m_data + ehdrlen); 866 867 ip_hlen = sizeof (struct ip); 868 869 if (mp->m_len < (ehdrlen + ip_hlen)) { 870 device_printf(dev, "%s: ipv4 mlen\n", __func__); 871 offload = 0; 872 break; 873 } 874 875 if (ip->ip_p == IPPROTO_TCP) 876 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; 877 else if (ip->ip_p == IPPROTO_UDP) 878 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; 879 else { 880 device_printf(dev, "%s: ipv4\n", __func__); 881 offload = 0; 882 } 883 break; 884 885 case ETHERTYPE_IPV6: 886 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 887 888 ip_hlen = sizeof(struct ip6_hdr); 889 890 if (mp->m_len < (ehdrlen + ip_hlen)) { 891 device_printf(dev, "%s: ipv6 mlen\n", __func__); 892 offload = 0; 893 break; 894 } 895 896 if (ip6->ip6_nxt == IPPROTO_TCP) 897 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; 898 else if (ip6->ip6_nxt == IPPROTO_UDP) 899 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; 900 else { 901 device_printf(dev, "%s: ipv6\n", __func__); 902 offload = 0; 903 } 904 break; 905 906 default: 907 offload = 0; 908 break; 909 } 910 if (!offload) 911 return (-1); 912 913 tx_cmd->flags_opcode = opcode; 914 915 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 916 917 return (0); 918 } 919 920 /* 921 * Name: qla_hw_send 922 * Function: Transmits a packet. It first checks if the packet is a 923 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum 924 * offload. If either of these creteria are not met, it is transmitted 925 * as a regular ethernet frame. 926 */ 927 int 928 qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, 929 uint32_t *tx_idx, struct mbuf *mp) 930 { 931 struct ether_vlan_header *eh; 932 qla_hw_t *hw = &ha->hw; 933 q80_tx_cmd_t *tx_cmd, tso_cmd; 934 bus_dma_segment_t *c_seg; 935 uint32_t num_tx_cmds, hdr_len = 0; 936 uint32_t total_length = 0, bytes, tx_cmd_count = 0; 937 device_t dev; 938 int i, ret; 939 uint8_t *src = NULL, *dst = NULL; 940 941 dev = ha->pci_dev; 942 943 /* 944 * Always make sure there is atleast one empty slot in the tx_ring 945 * tx_ring is considered full when there only one entry available 946 */ 947 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; 948 949 total_length = mp->m_pkthdr.len; 950 if (total_length > QLA_MAX_TSO_FRAME_SIZE) { 951 device_printf(dev, "%s: total length exceeds maxlen(%d)\n", 952 __func__, total_length); 953 return (-1); 954 } 955 eh = mtod(mp, struct ether_vlan_header *); 956 957 if ((mp->m_pkthdr.len > ha->max_frame_size)||(nsegs > Q8_TX_MAX_SEGMENTS)) { 958 959 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); 960 961 src = ha->hw.frame_hdr; 962 ret = qla_tx_tso(ha, mp, &tso_cmd, src); 963 964 if (!(ret & ~1)) { 965 /* find the additional tx_cmd descriptors required */ 966 967 hdr_len = tso_cmd.total_hdr_len; 968 969 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 970 bytes = QL_MIN(bytes, hdr_len); 971 972 num_tx_cmds++; 973 hdr_len -= bytes; 974 975 while (hdr_len) { 976 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 977 hdr_len -= bytes; 978 num_tx_cmds++; 979 } 980 hdr_len = tso_cmd.total_hdr_len; 981 982 if (ret == 0) 983 src = (uint8_t *)eh; 984 } 985 } 986 987 if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 988 qla_hw_tx_done_locked(ha); 989 if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 990 QL_DPRINT8((dev, "%s: (hw->txr_free <= " 991 "(num_tx_cmds + QLA_TX_MIN_FREE))\n", 992 __func__)); 993 return (-1); 994 } 995 } 996 997 *tx_idx = hw->txr_next; 998 999 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 1000 1001 if (hdr_len == 0) { 1002 if ((nsegs > Q8_TX_MAX_SEGMENTS) || 1003 (mp->m_pkthdr.len > ha->max_frame_size)){ 1004 device_printf(dev, 1005 "%s: (nsegs[%d, %d, 0x%b] > Q8_TX_MAX_SEGMENTS)\n", 1006 __func__, nsegs, mp->m_pkthdr.len, 1007 (int)mp->m_pkthdr.csum_flags, CSUM_BITS); 1008 qla_dump_buf8(ha, "qla_hw_send: wrong pkt", 1009 mtod(mp, char *), mp->m_len); 1010 return (EINVAL); 1011 } 1012 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1013 if (qla_tx_chksum(ha, mp, tx_cmd) != 0) 1014 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; 1015 } else { 1016 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); 1017 } 1018 1019 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) 1020 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; 1021 else if (mp->m_flags & M_VLANTAG) { 1022 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | 1023 Q8_TX_CMD_FLAGS_HW_VLAN_ID); 1024 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; 1025 } 1026 1027 1028 tx_cmd->n_bufs = (uint8_t)nsegs; 1029 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); 1030 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); 1031 tx_cmd->port_cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); 1032 1033 c_seg = segs; 1034 1035 while (1) { 1036 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { 1037 1038 switch (i) { 1039 case 0: 1040 tx_cmd->buf1_addr = c_seg->ds_addr; 1041 tx_cmd->buf1_len = c_seg->ds_len; 1042 break; 1043 1044 case 1: 1045 tx_cmd->buf2_addr = c_seg->ds_addr; 1046 tx_cmd->buf2_len = c_seg->ds_len; 1047 break; 1048 1049 case 2: 1050 tx_cmd->buf3_addr = c_seg->ds_addr; 1051 tx_cmd->buf3_len = c_seg->ds_len; 1052 break; 1053 1054 case 3: 1055 tx_cmd->buf4_addr = c_seg->ds_addr; 1056 tx_cmd->buf4_len = c_seg->ds_len; 1057 break; 1058 } 1059 1060 c_seg++; 1061 nsegs--; 1062 } 1063 1064 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 1065 tx_cmd_count++; 1066 1067 if (!nsegs) 1068 break; 1069 1070 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 1071 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1072 } 1073 1074 if (hdr_len) { 1075 /* TSO : Copy the header in the following tx cmd descriptors */ 1076 1077 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 1078 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1079 1080 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 1081 bytes = QL_MIN(bytes, hdr_len); 1082 1083 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; 1084 1085 if (mp->m_flags & M_VLANTAG) { 1086 /* first copy the src/dst MAC addresses */ 1087 bcopy(src, dst, (ETHER_ADDR_LEN * 2)); 1088 dst += (ETHER_ADDR_LEN * 2); 1089 src += (ETHER_ADDR_LEN * 2); 1090 1091 hdr_len -= (ETHER_ADDR_LEN * 2); 1092 1093 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); 1094 dst += 2; 1095 *((uint16_t *)dst) = mp->m_pkthdr.ether_vtag; 1096 dst += 2; 1097 1098 bytes -= ((ETHER_ADDR_LEN * 2) + 4); 1099 1100 bcopy(src, dst, bytes); 1101 src += bytes; 1102 hdr_len -= bytes; 1103 } else { 1104 bcopy(src, dst, bytes); 1105 src += bytes; 1106 hdr_len -= bytes; 1107 } 1108 1109 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 1110 tx_cmd_count++; 1111 1112 while (hdr_len) { 1113 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 1114 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1115 1116 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 1117 1118 bcopy(src, tx_cmd, bytes); 1119 src += bytes; 1120 hdr_len -= bytes; 1121 hw->txr_next = 1122 (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 1123 tx_cmd_count++; 1124 } 1125 } 1126 1127 hw->txr_free = hw->txr_free - tx_cmd_count; 1128 1129 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next); 1130 QL_DPRINT8((dev, "%s: return\n", __func__)); 1131 return (0); 1132 } 1133 1134 /* 1135 * Name: qla_del_hw_if 1136 * Function: Destroys the hardware specific entities corresponding to an 1137 * Ethernet Interface 1138 */ 1139 void 1140 qla_del_hw_if(qla_host_t *ha) 1141 { 1142 int i; 1143 1144 for (i = 0; i < ha->hw.num_sds_rings; i++) 1145 QL_DISABLE_INTERRUPTS(ha, i); 1146 1147 qla_del_rcv_cntxt(ha); 1148 qla_del_xmt_cntxt(ha); 1149 1150 ha->hw.flags.lro = 0; 1151 } 1152 1153 /* 1154 * Name: qla_init_hw_if 1155 * Function: Creates the hardware specific entities corresponding to an 1156 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address 1157 * corresponding to the interface. Enables LRO if allowed. 1158 */ 1159 int 1160 qla_init_hw_if(qla_host_t *ha) 1161 { 1162 device_t dev; 1163 int i; 1164 uint8_t bcast_mac[6]; 1165 1166 qla_get_hw_caps(ha); 1167 1168 dev = ha->pci_dev; 1169 1170 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1171 bzero(ha->hw.dma_buf.sds_ring[i].dma_b, 1172 ha->hw.dma_buf.sds_ring[i].size); 1173 } 1174 /* 1175 * Create Receive Context 1176 */ 1177 if (qla_init_rcv_cntxt(ha)) { 1178 return (-1); 1179 } 1180 1181 ha->hw.rx_next = NUM_RX_DESCRIPTORS - 2; 1182 ha->hw.rxj_next = NUM_RX_JUMBO_DESCRIPTORS - 2; 1183 ha->hw.rx_in = ha->hw.rxj_in = 0; 1184 1185 /* Update the RDS Producer Indices */ 1186 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next); 1187 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next); 1188 1189 /* 1190 * Create Transmit Context 1191 */ 1192 if (qla_init_xmt_cntxt(ha)) { 1193 qla_del_rcv_cntxt(ha); 1194 return (-1); 1195 } 1196 1197 qla_config_mac_addr(ha, ha->hw.mac_addr, 1198 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1); 1199 1200 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 1201 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 1202 qla_config_mac_addr(ha, bcast_mac, 1203 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1); 1204 1205 qla_config_rss(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); 1206 1207 qla_config_intr_coalesce(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 0); 1208 1209 for (i = 0; i < ha->hw.num_sds_rings; i++) 1210 QL_ENABLE_INTERRUPTS(ha, i); 1211 1212 return (0); 1213 } 1214 1215 /* 1216 * Name: qla_init_rcv_cntxt 1217 * Function: Creates the Receive Context. 1218 */ 1219 static int 1220 qla_init_rcv_cntxt(qla_host_t *ha) 1221 { 1222 device_t dev; 1223 qla_cdrp_t cdrp; 1224 q80_rcv_cntxt_rsp_t *rsp; 1225 q80_stat_desc_t *sdesc; 1226 bus_addr_t phys_addr; 1227 int i, j; 1228 qla_hw_t *hw = &ha->hw; 1229 1230 dev = ha->pci_dev; 1231 1232 /* 1233 * Create Receive Context 1234 */ 1235 1236 for (i = 0; i < hw->num_sds_rings; i++) { 1237 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; 1238 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { 1239 sdesc->data[0] = 1240 Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW); 1241 } 1242 } 1243 1244 phys_addr = ha->hw.rx_cntxt_req_paddr; 1245 1246 bzero(&cdrp, sizeof(qla_cdrp_t)); 1247 1248 cdrp.cmd = Q8_CMD_CREATE_RX_CNTXT; 1249 cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32); 1250 cdrp.cmd_arg2 = (uint32_t)(phys_addr); 1251 cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_rcv_cntxt_req_t)); 1252 1253 if (qla_issue_cmd(ha, &cdrp)) { 1254 device_printf(dev, "%s: Q8_CMD_CREATE_RX_CNTXT failed\n", 1255 __func__); 1256 return (-1); 1257 } else { 1258 rsp = ha->hw.rx_cntxt_rsp; 1259 1260 QL_DPRINT2((dev, "%s: rcv cntxt successful" 1261 " rds_ring_offset = 0x%08x" 1262 " sds_ring_offset = 0x%08x" 1263 " cntxt_state = 0x%08x" 1264 " funcs_per_port = 0x%08x" 1265 " num_rds_rings = 0x%04x" 1266 " num_sds_rings = 0x%04x" 1267 " cntxt_id = 0x%04x" 1268 " phys_port = 0x%02x" 1269 " virt_port = 0x%02x\n", 1270 __func__, 1271 rsp->rx_rsp.rds_ring_offset, 1272 rsp->rx_rsp.sds_ring_offset, 1273 rsp->rx_rsp.cntxt_state, 1274 rsp->rx_rsp.funcs_per_port, 1275 rsp->rx_rsp.num_rds_rings, 1276 rsp->rx_rsp.num_sds_rings, 1277 rsp->rx_rsp.cntxt_id, 1278 rsp->rx_rsp.phys_port, 1279 rsp->rx_rsp.virt_port)); 1280 1281 for (i = 0; i < ha->hw.num_rds_rings; i++) { 1282 QL_DPRINT2((dev, 1283 "%s: rcv cntxt rds[%i].producer_reg = 0x%08x\n", 1284 __func__, i, rsp->rds_rsp[i].producer_reg)); 1285 } 1286 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1287 QL_DPRINT2((dev, 1288 "%s: rcv cntxt sds[%i].consumer_reg = 0x%08x" 1289 " sds[%i].intr_mask_reg = 0x%08x\n", 1290 __func__, i, rsp->sds_rsp[i].consumer_reg, 1291 i, rsp->sds_rsp[i].intr_mask_reg)); 1292 } 1293 } 1294 ha->hw.flags.init_rx_cnxt = 1; 1295 return (0); 1296 } 1297 1298 /* 1299 * Name: qla_del_rcv_cntxt 1300 * Function: Destroys the Receive Context. 1301 */ 1302 void 1303 qla_del_rcv_cntxt(qla_host_t *ha) 1304 { 1305 qla_cdrp_t cdrp; 1306 device_t dev = ha->pci_dev; 1307 1308 if (!ha->hw.flags.init_rx_cnxt) 1309 return; 1310 1311 bzero(&cdrp, sizeof(qla_cdrp_t)); 1312 1313 cdrp.cmd = Q8_CMD_DESTROY_RX_CNTXT; 1314 cdrp.cmd_arg1 = (uint32_t) (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id; 1315 1316 if (qla_issue_cmd(ha, &cdrp)) { 1317 device_printf(dev, "%s: Q8_CMD_DESTROY_RX_CNTXT failed\n", 1318 __func__); 1319 } 1320 ha->hw.flags.init_rx_cnxt = 0; 1321 } 1322 1323 /* 1324 * Name: qla_init_xmt_cntxt 1325 * Function: Creates the Transmit Context. 1326 */ 1327 static int 1328 qla_init_xmt_cntxt(qla_host_t *ha) 1329 { 1330 bus_addr_t phys_addr; 1331 device_t dev; 1332 q80_tx_cntxt_rsp_t *tx_rsp; 1333 qla_cdrp_t cdrp; 1334 qla_hw_t *hw = &ha->hw; 1335 1336 dev = ha->pci_dev; 1337 1338 /* 1339 * Create Transmit Context 1340 */ 1341 phys_addr = ha->hw.tx_cntxt_req_paddr; 1342 tx_rsp = ha->hw.tx_cntxt_rsp; 1343 1344 hw->txr_comp = hw->txr_next = 0; 1345 *(hw->tx_cons) = 0; 1346 1347 bzero(&cdrp, sizeof(qla_cdrp_t)); 1348 1349 cdrp.cmd = Q8_CMD_CREATE_TX_CNTXT; 1350 cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32); 1351 cdrp.cmd_arg2 = (uint32_t)(phys_addr); 1352 cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_tx_cntxt_req_t)); 1353 1354 if (qla_issue_cmd(ha, &cdrp)) { 1355 device_printf(dev, "%s: Q8_CMD_CREATE_TX_CNTXT failed\n", 1356 __func__); 1357 return (-1); 1358 } else { 1359 ha->hw.tx_prod_reg = tx_rsp->producer_reg; 1360 1361 QL_DPRINT2((dev, "%s: tx cntxt successful" 1362 " cntxt_state = 0x%08x " 1363 " cntxt_id = 0x%04x " 1364 " phys_port_id = 0x%02x " 1365 " virt_port_id = 0x%02x " 1366 " producer_reg = 0x%08x " 1367 " intr_mask_reg = 0x%08x\n", 1368 __func__, tx_rsp->cntxt_state, tx_rsp->cntxt_id, 1369 tx_rsp->phys_port_id, tx_rsp->virt_port_id, 1370 tx_rsp->producer_reg, tx_rsp->intr_mask_reg)); 1371 } 1372 ha->hw.txr_free = NUM_TX_DESCRIPTORS; 1373 1374 ha->hw.flags.init_tx_cnxt = 1; 1375 return (0); 1376 } 1377 1378 /* 1379 * Name: qla_del_xmt_cntxt 1380 * Function: Destroys the Transmit Context. 1381 */ 1382 static void 1383 qla_del_xmt_cntxt(qla_host_t *ha) 1384 { 1385 qla_cdrp_t cdrp; 1386 device_t dev = ha->pci_dev; 1387 1388 if (!ha->hw.flags.init_tx_cnxt) 1389 return; 1390 1391 bzero(&cdrp, sizeof(qla_cdrp_t)); 1392 1393 cdrp.cmd = Q8_CMD_DESTROY_TX_CNTXT; 1394 cdrp.cmd_arg1 = (uint32_t) (ha->hw.tx_cntxt_rsp)->cntxt_id; 1395 1396 if (qla_issue_cmd(ha, &cdrp)) { 1397 device_printf(dev, "%s: Q8_CMD_DESTROY_TX_CNTXT failed\n", 1398 __func__); 1399 } 1400 ha->hw.flags.init_tx_cnxt = 0; 1401 } 1402 1403 /* 1404 * Name: qla_get_max_rds 1405 * Function: Returns the maximum number of Receive Descriptor Rings per context. 1406 */ 1407 static int 1408 qla_get_max_rds(qla_host_t *ha) 1409 { 1410 qla_cdrp_t cdrp; 1411 device_t dev; 1412 1413 dev = ha->pci_dev; 1414 1415 bzero(&cdrp, sizeof(qla_cdrp_t)); 1416 1417 cdrp.cmd = Q8_CMD_RD_MAX_RDS_PER_CNTXT; 1418 1419 if (qla_issue_cmd(ha, &cdrp)) { 1420 device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n", 1421 __func__); 1422 return (-1); 1423 } else { 1424 ha->hw.max_rds_per_cntxt = cdrp.rsp_arg1; 1425 QL_DPRINT2((dev, "%s: max_rds_per_context 0x%08x\n", 1426 __func__, ha->hw.max_rds_per_cntxt)); 1427 } 1428 return 0; 1429 } 1430 1431 /* 1432 * Name: qla_get_max_sds 1433 * Function: Returns the maximum number of Status Descriptor Rings per context. 1434 */ 1435 static int 1436 qla_get_max_sds(qla_host_t *ha) 1437 { 1438 qla_cdrp_t cdrp; 1439 device_t dev; 1440 1441 dev = ha->pci_dev; 1442 1443 bzero(&cdrp, sizeof(qla_cdrp_t)); 1444 1445 cdrp.cmd = Q8_CMD_RD_MAX_SDS_PER_CNTXT; 1446 1447 if (qla_issue_cmd(ha, &cdrp)) { 1448 device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n", 1449 __func__); 1450 return (-1); 1451 } else { 1452 ha->hw.max_sds_per_cntxt = cdrp.rsp_arg1; 1453 QL_DPRINT2((dev, "%s: max_sds_per_context 0x%08x\n", 1454 __func__, ha->hw.max_sds_per_cntxt)); 1455 } 1456 return 0; 1457 } 1458 1459 /* 1460 * Name: qla_get_max_rules 1461 * Function: Returns the maximum number of Rules per context. 1462 */ 1463 static int 1464 qla_get_max_rules(qla_host_t *ha) 1465 { 1466 qla_cdrp_t cdrp; 1467 device_t dev; 1468 1469 dev = ha->pci_dev; 1470 1471 bzero(&cdrp, sizeof(qla_cdrp_t)); 1472 1473 cdrp.cmd = Q8_CMD_RD_MAX_RULES_PER_CNTXT; 1474 1475 if (qla_issue_cmd(ha, &cdrp)) { 1476 device_printf(dev, "%s: Q8_CMD_RD_MAX_RULES_PER_CNTXT failed\n", 1477 __func__); 1478 return (-1); 1479 } else { 1480 ha->hw.max_rules_per_cntxt = cdrp.rsp_arg1; 1481 QL_DPRINT2((dev, "%s: max_rules_per_cntxt 0x%08x\n", 1482 __func__, ha->hw.max_rules_per_cntxt)); 1483 } 1484 return 0; 1485 } 1486 1487 /* 1488 * Name: qla_get_max_rcv_cntxts 1489 * Function: Returns the maximum number of Receive Contexts supported. 1490 */ 1491 static int 1492 qla_get_max_rcv_cntxts(qla_host_t *ha) 1493 { 1494 qla_cdrp_t cdrp; 1495 device_t dev; 1496 1497 dev = ha->pci_dev; 1498 1499 bzero(&cdrp, sizeof(qla_cdrp_t)); 1500 1501 cdrp.cmd = Q8_CMD_RD_MAX_RX_CNTXT; 1502 1503 if (qla_issue_cmd(ha, &cdrp)) { 1504 device_printf(dev, "%s: Q8_CMD_RD_MAX_RX_CNTXT failed\n", 1505 __func__); 1506 return (-1); 1507 } else { 1508 ha->hw.max_rcv_cntxts = cdrp.rsp_arg1; 1509 QL_DPRINT2((dev, "%s: max_rcv_cntxts 0x%08x\n", 1510 __func__, ha->hw.max_rcv_cntxts)); 1511 } 1512 return 0; 1513 } 1514 1515 /* 1516 * Name: qla_get_max_tx_cntxts 1517 * Function: Returns the maximum number of Transmit Contexts supported. 1518 */ 1519 static int 1520 qla_get_max_tx_cntxts(qla_host_t *ha) 1521 { 1522 qla_cdrp_t cdrp; 1523 device_t dev; 1524 1525 dev = ha->pci_dev; 1526 1527 bzero(&cdrp, sizeof(qla_cdrp_t)); 1528 1529 cdrp.cmd = Q8_CMD_RD_MAX_TX_CNTXT; 1530 1531 if (qla_issue_cmd(ha, &cdrp)) { 1532 device_printf(dev, "%s: Q8_CMD_RD_MAX_TX_CNTXT failed\n", 1533 __func__); 1534 return (-1); 1535 } else { 1536 ha->hw.max_xmt_cntxts = cdrp.rsp_arg1; 1537 QL_DPRINT2((dev, "%s: max_xmt_cntxts 0x%08x\n", 1538 __func__, ha->hw.max_xmt_cntxts)); 1539 } 1540 return 0; 1541 } 1542 1543 /* 1544 * Name: qla_get_max_mtu 1545 * Function: Returns the MTU supported for a context. 1546 */ 1547 static int 1548 qla_get_max_mtu(qla_host_t *ha) 1549 { 1550 qla_cdrp_t cdrp; 1551 device_t dev; 1552 1553 dev = ha->pci_dev; 1554 1555 bzero(&cdrp, sizeof(qla_cdrp_t)); 1556 1557 cdrp.cmd = Q8_CMD_RD_MAX_MTU; 1558 1559 if (qla_issue_cmd(ha, &cdrp)) { 1560 device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__); 1561 return (-1); 1562 } else { 1563 ha->hw.max_mtu = cdrp.rsp_arg1; 1564 QL_DPRINT2((dev, "%s: max_mtu 0x%08x\n", __func__, 1565 ha->hw.max_mtu)); 1566 } 1567 return 0; 1568 } 1569 1570 /* 1571 * Name: qla_set_max_mtu 1572 * Function: 1573 * Sets the maximum transfer unit size for the specified rcv context. 1574 */ 1575 int 1576 qla_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) 1577 { 1578 qla_cdrp_t cdrp; 1579 device_t dev; 1580 1581 dev = ha->pci_dev; 1582 1583 bzero(&cdrp, sizeof(qla_cdrp_t)); 1584 1585 cdrp.cmd = Q8_CMD_SET_MTU; 1586 cdrp.cmd_arg1 = (uint32_t)cntxt_id; 1587 cdrp.cmd_arg2 = mtu; 1588 1589 if (qla_issue_cmd(ha, &cdrp)) { 1590 device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__); 1591 return (-1); 1592 } else { 1593 ha->hw.max_mtu = cdrp.rsp_arg1; 1594 } 1595 return 0; 1596 } 1597 1598 /* 1599 * Name: qla_get_max_lro 1600 * Function: Returns the maximum number of TCP Connection which can be supported 1601 * with LRO. 1602 */ 1603 static int 1604 qla_get_max_lro(qla_host_t *ha) 1605 { 1606 qla_cdrp_t cdrp; 1607 device_t dev; 1608 1609 dev = ha->pci_dev; 1610 1611 bzero(&cdrp, sizeof(qla_cdrp_t)); 1612 1613 cdrp.cmd = Q8_CMD_RD_MAX_LRO; 1614 1615 if (qla_issue_cmd(ha, &cdrp)) { 1616 device_printf(dev, "%s: Q8_CMD_RD_MAX_LRO failed\n", __func__); 1617 return (-1); 1618 } else { 1619 ha->hw.max_lro = cdrp.rsp_arg1; 1620 QL_DPRINT2((dev, "%s: max_lro 0x%08x\n", __func__, 1621 ha->hw.max_lro)); 1622 } 1623 return 0; 1624 } 1625 1626 /* 1627 * Name: qla_get_flow_control 1628 * Function: Returns the Receive/Transmit Flow Control (PAUSE) settings for 1629 * PCI function. 1630 */ 1631 static int 1632 qla_get_flow_control(qla_host_t *ha) 1633 { 1634 qla_cdrp_t cdrp; 1635 device_t dev; 1636 1637 dev = ha->pci_dev; 1638 1639 bzero(&cdrp, sizeof(qla_cdrp_t)); 1640 1641 cdrp.cmd = Q8_CMD_GET_FLOW_CNTRL; 1642 1643 if (qla_issue_cmd(ha, &cdrp)) { 1644 device_printf(dev, "%s: Q8_CMD_GET_FLOW_CNTRL failed\n", 1645 __func__); 1646 return (-1); 1647 } else { 1648 QL_DPRINT2((dev, "%s: flow control 0x%08x\n", __func__, 1649 cdrp.rsp_arg1)); 1650 } 1651 return 0; 1652 } 1653 1654 /* 1655 * Name: qla_get_flow_control 1656 * Function: Retrieves hardware capabilities 1657 */ 1658 void 1659 qla_get_hw_caps(qla_host_t *ha) 1660 { 1661 //qla_read_mac_addr(ha); 1662 qla_get_max_rds(ha); 1663 qla_get_max_sds(ha); 1664 qla_get_max_rules(ha); 1665 qla_get_max_rcv_cntxts(ha); 1666 qla_get_max_tx_cntxts(ha); 1667 qla_get_max_mtu(ha); 1668 qla_get_max_lro(ha); 1669 qla_get_flow_control(ha); 1670 return; 1671 } 1672 1673 /* 1674 * Name: qla_hw_set_multi 1675 * Function: Sets the Multicast Addresses provided the host O.S into the 1676 * hardware (for the given interface) 1677 */ 1678 void 1679 qla_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt, 1680 uint32_t add_multi) 1681 { 1682 q80_rcv_cntxt_rsp_t *rsp; 1683 int i; 1684 1685 rsp = ha->hw.rx_cntxt_rsp; 1686 for (i = 0; i < mcnt; i++) { 1687 qla_config_mac_addr(ha, mta, rsp->rx_rsp.cntxt_id, add_multi); 1688 mta += Q8_MAC_ADDR_LEN; 1689 } 1690 return; 1691 } 1692 1693 /* 1694 * Name: qla_hw_tx_done_locked 1695 * Function: Handle Transmit Completions 1696 */ 1697 static void 1698 qla_hw_tx_done_locked(qla_host_t *ha) 1699 { 1700 qla_tx_buf_t *txb; 1701 qla_hw_t *hw = &ha->hw; 1702 uint32_t comp_idx, comp_count = 0; 1703 1704 /* retrieve index of last entry in tx ring completed */ 1705 comp_idx = qla_le32_to_host(*(hw->tx_cons)); 1706 1707 while (comp_idx != hw->txr_comp) { 1708 1709 txb = &ha->tx_buf[hw->txr_comp]; 1710 1711 hw->txr_comp++; 1712 if (hw->txr_comp == NUM_TX_DESCRIPTORS) 1713 hw->txr_comp = 0; 1714 1715 comp_count++; 1716 1717 if (txb->m_head) { 1718 bus_dmamap_sync(ha->tx_tag, txb->map, 1719 BUS_DMASYNC_POSTWRITE); 1720 bus_dmamap_unload(ha->tx_tag, txb->map); 1721 bus_dmamap_destroy(ha->tx_tag, txb->map); 1722 m_freem(txb->m_head); 1723 1724 txb->map = (bus_dmamap_t)0; 1725 txb->m_head = NULL; 1726 } 1727 } 1728 1729 hw->txr_free += comp_count; 1730 1731 QL_DPRINT8((ha->pci_dev, "%s: return [c,f, p, pn][%d, %d, %d, %d]\n", __func__, 1732 hw->txr_comp, hw->txr_free, hw->txr_next, READ_REG32(ha, (ha->hw.tx_prod_reg + 0x1b2000)))); 1733 1734 return; 1735 } 1736 1737 /* 1738 * Name: qla_hw_tx_done 1739 * Function: Handle Transmit Completions 1740 */ 1741 void 1742 qla_hw_tx_done(qla_host_t *ha) 1743 { 1744 if (!mtx_trylock(&ha->tx_lock)) { 1745 QL_DPRINT8((ha->pci_dev, 1746 "%s: !mtx_trylock(&ha->tx_lock)\n", __func__)); 1747 return; 1748 } 1749 qla_hw_tx_done_locked(ha); 1750 1751 if (ha->hw.txr_free > free_pkt_thres) 1752 ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1753 1754 mtx_unlock(&ha->tx_lock); 1755 return; 1756 } 1757 1758 void 1759 qla_update_link_state(qla_host_t *ha) 1760 { 1761 uint32_t link_state; 1762 uint32_t prev_link_state; 1763 1764 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1765 ha->hw.flags.link_up = 0; 1766 return; 1767 } 1768 link_state = READ_REG32(ha, Q8_LINK_STATE); 1769 1770 prev_link_state = ha->hw.flags.link_up; 1771 1772 if (ha->pci_func == 0) 1773 ha->hw.flags.link_up = (((link_state & 0xF) == 1)? 1 : 0); 1774 else 1775 ha->hw.flags.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); 1776 1777 if (prev_link_state != ha->hw.flags.link_up) { 1778 if (ha->hw.flags.link_up) { 1779 if_link_state_change(ha->ifp, LINK_STATE_UP); 1780 } else { 1781 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 1782 } 1783 } 1784 } 1785 1786 int 1787 qla_config_lro(qla_host_t *ha) 1788 { 1789 int i; 1790 qla_hw_t *hw = &ha->hw; 1791 struct lro_ctrl *lro; 1792 1793 for (i = 0; i < hw->num_sds_rings; i++) { 1794 lro = &hw->sds[i].lro; 1795 if (tcp_lro_init(lro)) { 1796 device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n", 1797 __func__); 1798 return (-1); 1799 } 1800 lro->ifp = ha->ifp; 1801 } 1802 ha->flags.lro_init = 1; 1803 1804 QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__)); 1805 return (0); 1806 } 1807 1808 void 1809 qla_free_lro(qla_host_t *ha) 1810 { 1811 int i; 1812 qla_hw_t *hw = &ha->hw; 1813 struct lro_ctrl *lro; 1814 1815 if (!ha->flags.lro_init) 1816 return; 1817 1818 for (i = 0; i < hw->num_sds_rings; i++) { 1819 lro = &hw->sds[i].lro; 1820 tcp_lro_free(lro); 1821 } 1822 ha->flags.lro_init = 0; 1823 } 1824 1825 void 1826 qla_hw_stop_rcv(qla_host_t *ha) 1827 { 1828 int i, done, count = 100; 1829 1830 while (count--) { 1831 done = 1; 1832 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1833 if (ha->hw.sds[i].rcv_active) 1834 done = 0; 1835 } 1836 if (done) 1837 break; 1838 else 1839 qla_mdelay(__func__, 10); 1840 } 1841 } 1842 1843