1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011-2012 Qlogic Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * File: qla_hw.c 32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 33 * Content: Contains Hardware dependent functions 34 */ 35 36 #include <sys/cdefs.h> 37 #include "qla_os.h" 38 #include "qla_reg.h" 39 #include "qla_hw.h" 40 #include "qla_def.h" 41 #include "qla_inline.h" 42 #include "qla_ver.h" 43 #include "qla_glbl.h" 44 #include "qla_dbg.h" 45 46 static uint32_t sysctl_num_rds_rings = 2; 47 static uint32_t sysctl_num_sds_rings = 4; 48 49 /* 50 * Static Functions 51 */ 52 53 static void qla_init_cntxt_regions(qla_host_t *ha); 54 static int qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp); 55 static int qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size); 56 static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, 57 uint16_t cntxt_id, uint32_t add_multi); 58 static void qla_del_rcv_cntxt(qla_host_t *ha); 59 static int qla_init_rcv_cntxt(qla_host_t *ha); 60 static void qla_del_xmt_cntxt(qla_host_t *ha); 61 static int qla_init_xmt_cntxt(qla_host_t *ha); 62 static int qla_get_max_rds(qla_host_t *ha); 63 static int qla_get_max_sds(qla_host_t *ha); 64 static int qla_get_max_rules(qla_host_t *ha); 65 static int qla_get_max_rcv_cntxts(qla_host_t *ha); 66 static int qla_get_max_tx_cntxts(qla_host_t *ha); 67 static int qla_get_max_mtu(qla_host_t *ha); 68 static int qla_get_max_lro(qla_host_t *ha); 69 static int qla_get_flow_control(qla_host_t *ha); 70 static void qla_hw_tx_done_locked(qla_host_t *ha); 71 72 int 73 qla_get_msix_count(qla_host_t *ha) 74 { 75 return (sysctl_num_sds_rings); 76 } 77 78 /* 79 * Name: qla_hw_add_sysctls 80 * Function: Add P3Plus specific sysctls 81 */ 82 void 83 qla_hw_add_sysctls(qla_host_t *ha) 84 { 85 device_t dev; 86 87 dev = ha->pci_dev; 88 89 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 90 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 91 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &sysctl_num_rds_rings, 92 sysctl_num_rds_rings, "Number of Rcv Descriptor Rings"); 93 94 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 95 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 96 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &sysctl_num_sds_rings, 97 sysctl_num_sds_rings, "Number of Status Descriptor Rings"); 98 } 99 100 /* 101 * Name: qla_free_dma 102 * Function: Frees the DMA'able memory allocated in qla_alloc_dma() 103 */ 104 void 105 qla_free_dma(qla_host_t *ha) 106 { 107 uint32_t i; 108 109 if (ha->hw.dma_buf.flags.context) { 110 qla_free_dmabuf(ha, &ha->hw.dma_buf.context); 111 ha->hw.dma_buf.flags.context = 0; 112 } 113 114 if (ha->hw.dma_buf.flags.sds_ring) { 115 for (i = 0; i < ha->hw.num_sds_rings; i++) 116 qla_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); 117 ha->hw.dma_buf.flags.sds_ring = 0; 118 } 119 120 if (ha->hw.dma_buf.flags.rds_ring) { 121 for (i = 0; i < ha->hw.num_rds_rings; i++) 122 qla_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); 123 ha->hw.dma_buf.flags.rds_ring = 0; 124 } 125 126 if (ha->hw.dma_buf.flags.tx_ring) { 127 qla_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); 128 ha->hw.dma_buf.flags.tx_ring = 0; 129 } 130 } 131 132 /* 133 * Name: qla_alloc_dma 134 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. 135 */ 136 int 137 qla_alloc_dma(qla_host_t *ha) 138 { 139 device_t dev; 140 uint32_t i, j, size; 141 142 dev = ha->pci_dev; 143 144 QL_DPRINT2((dev, "%s: enter\n", __func__)); 145 146 ha->hw.num_rds_rings = (uint16_t)sysctl_num_rds_rings; 147 ha->hw.num_sds_rings = (uint16_t)sysctl_num_sds_rings; 148 149 /* 150 * Allocate Transmit Ring 151 */ 152 153 ha->hw.dma_buf.tx_ring.alignment = 8; 154 ha->hw.dma_buf.tx_ring.size = 155 (sizeof(q80_tx_cmd_t)) * NUM_TX_DESCRIPTORS; 156 157 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) { 158 device_printf(dev, "%s: tx ring alloc failed\n", __func__); 159 goto qla_alloc_dma_exit; 160 } 161 ha->hw.dma_buf.flags.tx_ring = 1; 162 163 QL_DPRINT2((dev, "%s: tx_ring phys %p virt %p\n", 164 __func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr), 165 ha->hw.dma_buf.tx_ring.dma_b)); 166 /* 167 * Allocate Receive Descriptor Rings 168 */ 169 170 for (i = 0; i < ha->hw.num_rds_rings; i++) { 171 ha->hw.dma_buf.rds_ring[i].alignment = 8; 172 173 if (i == RDS_RING_INDEX_NORMAL) { 174 ha->hw.dma_buf.rds_ring[i].size = 175 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; 176 } else if (i == RDS_RING_INDEX_JUMBO) { 177 ha->hw.dma_buf.rds_ring[i].size = 178 (sizeof(q80_recv_desc_t)) * 179 NUM_RX_JUMBO_DESCRIPTORS; 180 } else 181 break; 182 183 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i])) { 184 QL_DPRINT4((dev, "%s: rds ring alloc failed\n", 185 __func__)); 186 187 for (j = 0; j < i; j++) 188 qla_free_dmabuf(ha, 189 &ha->hw.dma_buf.rds_ring[j]); 190 191 goto qla_alloc_dma_exit; 192 } 193 QL_DPRINT4((dev, "%s: rx_ring[%d] phys %p virt %p\n", 194 __func__, i, 195 (void *)(ha->hw.dma_buf.rds_ring[i].dma_addr), 196 ha->hw.dma_buf.rds_ring[i].dma_b)); 197 } 198 ha->hw.dma_buf.flags.rds_ring = 1; 199 200 /* 201 * Allocate Status Descriptor Rings 202 */ 203 204 for (i = 0; i < ha->hw.num_sds_rings; i++) { 205 ha->hw.dma_buf.sds_ring[i].alignment = 8; 206 ha->hw.dma_buf.sds_ring[i].size = 207 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; 208 209 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i])) { 210 device_printf(dev, "%s: sds ring alloc failed\n", 211 __func__); 212 213 for (j = 0; j < i; j++) 214 qla_free_dmabuf(ha, 215 &ha->hw.dma_buf.sds_ring[j]); 216 217 goto qla_alloc_dma_exit; 218 } 219 QL_DPRINT4((dev, "%s: sds_ring[%d] phys %p virt %p\n", 220 __func__, i, 221 (void *)(ha->hw.dma_buf.sds_ring[i].dma_addr), 222 ha->hw.dma_buf.sds_ring[i].dma_b)); 223 } 224 ha->hw.dma_buf.flags.sds_ring = 1; 225 226 /* 227 * Allocate Context Area 228 */ 229 size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN); 230 231 size += QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN); 232 233 size += QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN); 234 235 size += QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN); 236 237 size += sizeof (uint32_t); /* for tx consumer index */ 238 239 size = QL_ALIGN(size, PAGE_SIZE); 240 241 ha->hw.dma_buf.context.alignment = 8; 242 ha->hw.dma_buf.context.size = size; 243 244 if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.context)) { 245 device_printf(dev, "%s: context alloc failed\n", __func__); 246 goto qla_alloc_dma_exit; 247 } 248 ha->hw.dma_buf.flags.context = 1; 249 QL_DPRINT2((dev, "%s: context phys %p virt %p\n", 250 __func__, (void *)(ha->hw.dma_buf.context.dma_addr), 251 ha->hw.dma_buf.context.dma_b)); 252 253 qla_init_cntxt_regions(ha); 254 255 return 0; 256 257 qla_alloc_dma_exit: 258 qla_free_dma(ha); 259 return -1; 260 } 261 262 /* 263 * Name: qla_init_cntxt_regions 264 * Function: Initializes Tx/Rx Contexts. 265 */ 266 static void 267 qla_init_cntxt_regions(qla_host_t *ha) 268 { 269 qla_hw_t *hw; 270 q80_tx_cntxt_req_t *tx_cntxt_req; 271 q80_rcv_cntxt_req_t *rx_cntxt_req; 272 bus_addr_t phys_addr; 273 uint32_t i; 274 uint32_t size; 275 276 hw = &ha->hw; 277 278 hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b; 279 280 for (i = 0; i < ha->hw.num_sds_rings; i++) 281 hw->sds[i].sds_ring_base = 282 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; 283 284 phys_addr = hw->dma_buf.context.dma_addr; 285 286 memset((void *)hw->dma_buf.context.dma_b, 0, 287 ha->hw.dma_buf.context.size); 288 289 hw->tx_cntxt_req = 290 (q80_tx_cntxt_req_t *)hw->dma_buf.context.dma_b; 291 hw->tx_cntxt_req_paddr = phys_addr; 292 293 size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN); 294 295 hw->tx_cntxt_rsp = 296 (q80_tx_cntxt_rsp_t *)((uint8_t *)hw->tx_cntxt_req + size); 297 hw->tx_cntxt_rsp_paddr = hw->tx_cntxt_req_paddr + size; 298 299 size = QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN); 300 301 hw->rx_cntxt_req = 302 (q80_rcv_cntxt_req_t *)((uint8_t *)hw->tx_cntxt_rsp + size); 303 hw->rx_cntxt_req_paddr = hw->tx_cntxt_rsp_paddr + size; 304 305 size = QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN); 306 307 hw->rx_cntxt_rsp = 308 (q80_rcv_cntxt_rsp_t *)((uint8_t *)hw->rx_cntxt_req + size); 309 hw->rx_cntxt_rsp_paddr = hw->rx_cntxt_req_paddr + size; 310 311 size = QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN); 312 313 hw->tx_cons = (uint32_t *)((uint8_t *)hw->rx_cntxt_rsp + size); 314 hw->tx_cons_paddr = hw->rx_cntxt_rsp_paddr + size; 315 316 /* 317 * Initialize the Transmit Context Request so that we don't need to 318 * do it every time we need to create a context 319 */ 320 tx_cntxt_req = hw->tx_cntxt_req; 321 322 tx_cntxt_req->rsp_dma_addr = qla_host_to_le64(hw->tx_cntxt_rsp_paddr); 323 324 tx_cntxt_req->cmd_cons_dma_addr = qla_host_to_le64(hw->tx_cons_paddr); 325 326 tx_cntxt_req->caps[0] = qla_host_to_le32((CNTXT_CAP0_BASEFW | 327 CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_LSO)); 328 329 tx_cntxt_req->intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED); 330 331 tx_cntxt_req->phys_addr = 332 qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr); 333 334 tx_cntxt_req->num_entries = qla_host_to_le32(NUM_TX_DESCRIPTORS); 335 336 /* 337 * Initialize the Receive Context Request 338 */ 339 340 rx_cntxt_req = hw->rx_cntxt_req; 341 342 rx_cntxt_req->rx_req.rsp_dma_addr = 343 qla_host_to_le64(hw->rx_cntxt_rsp_paddr); 344 345 rx_cntxt_req->rx_req.caps[0] = qla_host_to_le32(CNTXT_CAP0_BASEFW | 346 CNTXT_CAP0_LEGACY_MN | 347 CNTXT_CAP0_JUMBO | 348 CNTXT_CAP0_LRO| 349 CNTXT_CAP0_HW_LRO); 350 351 rx_cntxt_req->rx_req.intr_mode = 352 qla_host_to_le32(CNTXT_INTR_MODE_SHARED); 353 354 rx_cntxt_req->rx_req.rds_intr_mode = 355 qla_host_to_le32(CNTXT_INTR_MODE_UNIQUE); 356 357 rx_cntxt_req->rx_req.rds_ring_offset = 0; 358 rx_cntxt_req->rx_req.sds_ring_offset = qla_host_to_le32( 359 (hw->num_rds_rings * sizeof(q80_rq_rds_ring_t))); 360 rx_cntxt_req->rx_req.num_rds_rings = 361 qla_host_to_le16(hw->num_rds_rings); 362 rx_cntxt_req->rx_req.num_sds_rings = 363 qla_host_to_le16(hw->num_sds_rings); 364 365 for (i = 0; i < hw->num_rds_rings; i++) { 366 rx_cntxt_req->rds_req[i].phys_addr = 367 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); 368 369 if (i == RDS_RING_INDEX_NORMAL) { 370 rx_cntxt_req->rds_req[i].buf_size = 371 qla_host_to_le64(MCLBYTES); 372 rx_cntxt_req->rds_req[i].size = 373 qla_host_to_le32(NUM_RX_DESCRIPTORS); 374 } else { 375 rx_cntxt_req->rds_req[i].buf_size = 376 qla_host_to_le64(MJUM9BYTES); 377 rx_cntxt_req->rds_req[i].size = 378 qla_host_to_le32(NUM_RX_JUMBO_DESCRIPTORS); 379 } 380 } 381 382 for (i = 0; i < hw->num_sds_rings; i++) { 383 rx_cntxt_req->sds_req[i].phys_addr = 384 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); 385 rx_cntxt_req->sds_req[i].size = 386 qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 387 rx_cntxt_req->sds_req[i].msi_index = qla_host_to_le16(i); 388 } 389 390 QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_req = %p paddr %p\n", 391 __func__, hw->tx_cntxt_req, (void *)hw->tx_cntxt_req_paddr)); 392 QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_rsp = %p paddr %p\n", 393 __func__, hw->tx_cntxt_rsp, (void *)hw->tx_cntxt_rsp_paddr)); 394 QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_req = %p paddr %p\n", 395 __func__, hw->rx_cntxt_req, (void *)hw->rx_cntxt_req_paddr)); 396 QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_rsp = %p paddr %p\n", 397 __func__, hw->rx_cntxt_rsp, (void *)hw->rx_cntxt_rsp_paddr)); 398 QL_DPRINT2((ha->pci_dev, "%s: tx_cons = %p paddr %p\n", 399 __func__, hw->tx_cons, (void *)hw->tx_cons_paddr)); 400 } 401 402 /* 403 * Name: qla_issue_cmd 404 * Function: Issues commands on the CDRP interface and returns responses. 405 */ 406 static int 407 qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp) 408 { 409 int ret = 0; 410 uint32_t signature; 411 uint32_t count = 400; /* 4 seconds or 400 10ms intervals */ 412 uint32_t data; 413 device_t dev; 414 415 dev = ha->pci_dev; 416 417 signature = 0xcafe0000 | 0x0100 | ha->pci_func; 418 419 ret = qla_sem_lock(ha, Q8_SEM5_LOCK, 0, (uint32_t)ha->pci_func); 420 421 if (ret) { 422 device_printf(dev, "%s: SEM5_LOCK lock failed\n", __func__); 423 return (ret); 424 } 425 426 WRITE_OFFSET32(ha, Q8_NX_CDRP_SIGNATURE, signature); 427 428 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG1, (cdrp->cmd_arg1)); 429 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG2, (cdrp->cmd_arg2)); 430 WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG3, (cdrp->cmd_arg3)); 431 432 WRITE_OFFSET32(ha, Q8_NX_CDRP_CMD_RSP, cdrp->cmd); 433 434 while (count) { 435 qla_mdelay(__func__, 10); 436 437 data = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP); 438 439 if ((!(data & 0x80000000))) 440 break; 441 count--; 442 } 443 if ((!count) || (data != 1)) 444 ret = -1; 445 446 cdrp->rsp = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP); 447 cdrp->rsp_arg1 = READ_REG32(ha, Q8_NX_CDRP_ARG1); 448 cdrp->rsp_arg2 = READ_REG32(ha, Q8_NX_CDRP_ARG2); 449 cdrp->rsp_arg3 = READ_REG32(ha, Q8_NX_CDRP_ARG3); 450 451 qla_sem_unlock(ha, Q8_SEM5_UNLOCK); 452 453 if (ret) { 454 device_printf(dev, "%s: " 455 "cmd[0x%08x] = 0x%08x\n" 456 "\tsig[0x%08x] = 0x%08x\n" 457 "\targ1[0x%08x] = 0x%08x\n" 458 "\targ2[0x%08x] = 0x%08x\n" 459 "\targ3[0x%08x] = 0x%08x\n", 460 __func__, Q8_NX_CDRP_CMD_RSP, cdrp->cmd, 461 Q8_NX_CDRP_SIGNATURE, signature, 462 Q8_NX_CDRP_ARG1, cdrp->cmd_arg1, 463 Q8_NX_CDRP_ARG2, cdrp->cmd_arg2, 464 Q8_NX_CDRP_ARG3, cdrp->cmd_arg3); 465 466 device_printf(dev, "%s: exit (ret = 0x%x)\n" 467 "\t\t rsp = 0x%08x\n" 468 "\t\t arg1 = 0x%08x\n" 469 "\t\t arg2 = 0x%08x\n" 470 "\t\t arg3 = 0x%08x\n", 471 __func__, ret, cdrp->rsp, 472 cdrp->rsp_arg1, cdrp->rsp_arg2, cdrp->rsp_arg3); 473 } 474 475 return (ret); 476 } 477 478 #define QLA_TX_MIN_FREE 2 479 480 /* 481 * Name: qla_fw_cmd 482 * Function: Issues firmware control commands on the Tx Ring. 483 */ 484 static int 485 qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size) 486 { 487 device_t dev; 488 q80_tx_cmd_t *tx_cmd; 489 qla_hw_t *hw = &ha->hw; 490 int count = 100; 491 492 dev = ha->pci_dev; 493 494 QLA_TX_LOCK(ha); 495 496 if (hw->txr_free <= QLA_TX_MIN_FREE) { 497 while (count--) { 498 qla_hw_tx_done_locked(ha); 499 if (hw->txr_free > QLA_TX_MIN_FREE) 500 break; 501 502 QLA_TX_UNLOCK(ha); 503 qla_mdelay(__func__, 10); 504 QLA_TX_LOCK(ha); 505 } 506 if (hw->txr_free <= QLA_TX_MIN_FREE) { 507 QLA_TX_UNLOCK(ha); 508 device_printf(dev, "%s: xmit queue full\n", __func__); 509 return (-1); 510 } 511 } 512 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 513 514 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 515 516 bcopy(fw_cmd, tx_cmd, size); 517 518 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 519 hw->txr_free--; 520 521 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next); 522 523 QLA_TX_UNLOCK(ha); 524 525 return (0); 526 } 527 528 /* 529 * Name: qla_config_rss 530 * Function: Configure RSS for the context/interface. 531 */ 532 const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 533 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 534 0x255b0ec26d5a56daULL }; 535 536 static int 537 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id) 538 { 539 qla_fw_cds_config_rss_t rss_config; 540 int ret, i; 541 542 bzero(&rss_config, sizeof(qla_fw_cds_config_rss_t)); 543 544 rss_config.hdr.cmd = Q8_FWCD_CNTRL_REQ; 545 rss_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_RSS; 546 rss_config.hdr.cntxt_id = cntxt_id; 547 548 rss_config.hash_type = (Q8_FWCD_RSS_HASH_TYPE_IPV4_TCP_IP | 549 Q8_FWCD_RSS_HASH_TYPE_IPV6_TCP_IP); 550 rss_config.flags = Q8_FWCD_RSS_FLAGS_ENABLE_RSS; 551 552 rss_config.ind_tbl_mask = 0x7; 553 554 for (i = 0; i < 5; i++) 555 rss_config.rss_key[i] = rss_key[i]; 556 557 ret = qla_fw_cmd(ha, &rss_config, sizeof(qla_fw_cds_config_rss_t)); 558 559 return ret; 560 } 561 562 /* 563 * Name: qla_config_intr_coalesce 564 * Function: Configure Interrupt Coalescing. 565 */ 566 static int 567 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable) 568 { 569 qla_fw_cds_config_intr_coalesc_t intr_coalesce; 570 int ret; 571 572 bzero(&intr_coalesce, sizeof(qla_fw_cds_config_intr_coalesc_t)); 573 574 intr_coalesce.hdr.cmd = Q8_FWCD_CNTRL_REQ; 575 intr_coalesce.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_INTR_COALESCING; 576 intr_coalesce.hdr.cntxt_id = cntxt_id; 577 578 intr_coalesce.flags = 0x04; 579 intr_coalesce.max_rcv_pkts = 256; 580 intr_coalesce.max_rcv_usecs = 3; 581 intr_coalesce.max_snd_pkts = 64; 582 intr_coalesce.max_snd_usecs = 4; 583 584 if (tenable) { 585 intr_coalesce.usecs_to = 1000; /* 1 millisecond */ 586 intr_coalesce.timer_type = Q8_FWCMD_INTR_COALESC_TIMER_PERIODIC; 587 intr_coalesce.sds_ring_bitmask = 588 Q8_FWCMD_INTR_COALESC_SDS_RING_0; 589 } 590 591 ret = qla_fw_cmd(ha, &intr_coalesce, 592 sizeof(qla_fw_cds_config_intr_coalesc_t)); 593 594 return ret; 595 } 596 597 /* 598 * Name: qla_config_mac_addr 599 * Function: binds a MAC address to the context/interface. 600 * Can be unicast, multicast or broadcast. 601 */ 602 static int 603 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint16_t cntxt_id, 604 uint32_t add_multi) 605 { 606 qla_fw_cds_config_mac_addr_t mac_config; 607 int ret; 608 609 // device_printf(ha->pci_dev, 610 // "%s: mac_addr %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 611 // mac_addr[0], mac_addr[1], mac_addr[2], 612 // mac_addr[3], mac_addr[4], mac_addr[5]); 613 614 bzero(&mac_config, sizeof(qla_fw_cds_config_mac_addr_t)); 615 616 mac_config.hdr.cmd = Q8_FWCD_CNTRL_REQ; 617 mac_config.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_ADDR; 618 mac_config.hdr.cntxt_id = cntxt_id; 619 620 if (add_multi) 621 mac_config.cmd = Q8_FWCD_ADD_MAC_ADDR; 622 else 623 mac_config.cmd = Q8_FWCD_DEL_MAC_ADDR; 624 bcopy(mac_addr, mac_config.mac_addr,6); 625 626 ret = qla_fw_cmd(ha, &mac_config, sizeof(qla_fw_cds_config_mac_addr_t)); 627 628 return ret; 629 } 630 631 /* 632 * Name: qla_set_mac_rcv_mode 633 * Function: Enable/Disable AllMulticast and Promiscuous Modes. 634 */ 635 static int 636 qla_set_mac_rcv_mode(qla_host_t *ha, uint16_t cntxt_id, uint32_t mode) 637 { 638 qla_set_mac_rcv_mode_t rcv_mode; 639 int ret; 640 641 bzero(&rcv_mode, sizeof(qla_set_mac_rcv_mode_t)); 642 643 rcv_mode.hdr.cmd = Q8_FWCD_CNTRL_REQ; 644 rcv_mode.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_MAC_RCV_MODE; 645 rcv_mode.hdr.cntxt_id = cntxt_id; 646 647 rcv_mode.mode = mode; 648 649 ret = qla_fw_cmd(ha, &rcv_mode, sizeof(qla_set_mac_rcv_mode_t)); 650 651 return ret; 652 } 653 654 void 655 qla_set_promisc(qla_host_t *ha) 656 { 657 (void)qla_set_mac_rcv_mode(ha, 658 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 659 Q8_MAC_RCV_ENABLE_PROMISCUOUS); 660 } 661 662 void 663 qla_set_allmulti(qla_host_t *ha) 664 { 665 (void)qla_set_mac_rcv_mode(ha, 666 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 667 Q8_MAC_RCV_ENABLE_ALLMULTI); 668 } 669 670 void 671 qla_reset_promisc_allmulti(qla_host_t *ha) 672 { 673 (void)qla_set_mac_rcv_mode(ha, 674 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 675 Q8_MAC_RCV_RESET_PROMISC_ALLMULTI); 676 } 677 678 /* 679 * Name: qla_config_ipv4_addr 680 * Function: Configures the Destination IP Addr for LRO. 681 */ 682 void 683 qla_config_ipv4_addr(qla_host_t *ha, uint32_t ipv4_addr) 684 { 685 qla_config_ipv4_t ip_conf; 686 687 bzero(&ip_conf, sizeof(qla_config_ipv4_t)); 688 689 ip_conf.hdr.cmd = Q8_FWCD_CNTRL_REQ; 690 ip_conf.hdr.opcode = Q8_FWCD_OPCODE_CONFIG_IPADDR; 691 ip_conf.hdr.cntxt_id = (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id; 692 693 ip_conf.cmd = (uint64_t)Q8_CONFIG_CMD_IP_ENABLE; 694 ip_conf.ipv4_addr = (uint64_t)ipv4_addr; 695 696 (void)qla_fw_cmd(ha, &ip_conf, sizeof(qla_config_ipv4_t)); 697 698 return; 699 } 700 701 /* 702 * Name: qla_tx_tso 703 * Function: Checks if the packet to be transmitted is a candidate for 704 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx 705 * Ring Structure are plugged in. 706 */ 707 static int 708 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) 709 { 710 struct ether_vlan_header *eh; 711 struct ip *ip = NULL; 712 struct tcphdr *th = NULL; 713 uint32_t ehdrlen, hdrlen = 0, ip_hlen, tcp_hlen, tcp_opt_off; 714 uint16_t etype, opcode, offload = 1; 715 uint8_t *tcp_opt; 716 device_t dev; 717 718 dev = ha->pci_dev; 719 720 eh = mtod(mp, struct ether_vlan_header *); 721 722 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 723 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 724 etype = ntohs(eh->evl_proto); 725 } else { 726 ehdrlen = ETHER_HDR_LEN; 727 etype = ntohs(eh->evl_encap_proto); 728 } 729 730 switch (etype) { 731 case ETHERTYPE_IP: 732 733 tcp_opt_off = ehdrlen + sizeof(struct ip) + 734 sizeof(struct tcphdr); 735 736 if (mp->m_len < tcp_opt_off) { 737 m_copydata(mp, 0, tcp_opt_off, hdr); 738 ip = (struct ip *)hdr; 739 } else { 740 ip = (struct ip *)(mp->m_data + ehdrlen); 741 } 742 743 ip_hlen = ip->ip_hl << 2; 744 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; 745 746 if ((ip->ip_p != IPPROTO_TCP) || 747 (ip_hlen != sizeof (struct ip))) { 748 offload = 0; 749 } else { 750 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 751 } 752 break; 753 754 default: 755 QL_DPRINT8((dev, "%s: type!=ip\n", __func__)); 756 offload = 0; 757 break; 758 } 759 760 if (!offload) 761 return (-1); 762 763 tcp_hlen = th->th_off << 2; 764 765 hdrlen = ehdrlen + ip_hlen + tcp_hlen; 766 767 if (mp->m_len < hdrlen) { 768 if (mp->m_len < tcp_opt_off) { 769 if (tcp_hlen > sizeof(struct tcphdr)) { 770 m_copydata(mp, tcp_opt_off, 771 (tcp_hlen - sizeof(struct tcphdr)), 772 &hdr[tcp_opt_off]); 773 } 774 } else { 775 m_copydata(mp, 0, hdrlen, hdr); 776 } 777 } 778 779 if ((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) { 780 /* If TCP options are preset only time stamp option is supported */ 781 if ((tcp_hlen - sizeof(struct tcphdr)) != 10) 782 return -1; 783 else { 784 if (mp->m_len < hdrlen) { 785 tcp_opt = &hdr[tcp_opt_off]; 786 } else { 787 tcp_opt = (uint8_t *)(mp->m_data + tcp_opt_off); 788 } 789 790 if ((*tcp_opt != 0x01) || (*(tcp_opt + 1) != 0x01) || 791 (*(tcp_opt + 2) != 0x08) || 792 (*(tcp_opt + 3) != 10)) { 793 return -1; 794 } 795 } 796 797 tx_cmd->mss = ha->max_frame_size - ETHER_CRC_LEN - hdrlen; 798 } else { 799 tx_cmd->mss = mp->m_pkthdr.tso_segsz; 800 } 801 802 tx_cmd->flags_opcode = opcode ; 803 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 804 tx_cmd->ip_hdr_off = ehdrlen; 805 tx_cmd->mss = mp->m_pkthdr.tso_segsz; 806 tx_cmd->total_hdr_len = hdrlen; 807 808 /* Check for Multicast least significant bit of MSB == 1 */ 809 if (eh->evl_dhost[0] & 0x01) { 810 tx_cmd->flags_opcode = Q8_TX_CMD_FLAGS_MULTICAST; 811 } 812 813 if (mp->m_len < hdrlen) { 814 return (1); 815 } 816 817 return (0); 818 } 819 820 /* 821 * Name: qla_tx_chksum 822 * Function: Checks if the packet to be transmitted is a candidate for 823 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx 824 * Ring Structure are plugged in. 825 */ 826 static int 827 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd) 828 { 829 struct ether_vlan_header *eh; 830 struct ip *ip; 831 struct ip6_hdr *ip6; 832 uint32_t ehdrlen, ip_hlen; 833 uint16_t etype, opcode, offload = 1; 834 device_t dev; 835 836 dev = ha->pci_dev; 837 838 if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0) 839 return (-1); 840 841 eh = mtod(mp, struct ether_vlan_header *); 842 843 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 844 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 845 etype = ntohs(eh->evl_proto); 846 } else { 847 ehdrlen = ETHER_HDR_LEN; 848 etype = ntohs(eh->evl_encap_proto); 849 } 850 851 852 switch (etype) { 853 case ETHERTYPE_IP: 854 ip = (struct ip *)(mp->m_data + ehdrlen); 855 856 ip_hlen = sizeof (struct ip); 857 858 if (mp->m_len < (ehdrlen + ip_hlen)) { 859 device_printf(dev, "%s: ipv4 mlen\n", __func__); 860 offload = 0; 861 break; 862 } 863 864 if (ip->ip_p == IPPROTO_TCP) 865 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; 866 else if (ip->ip_p == IPPROTO_UDP) 867 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; 868 else { 869 device_printf(dev, "%s: ipv4\n", __func__); 870 offload = 0; 871 } 872 break; 873 874 case ETHERTYPE_IPV6: 875 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 876 877 ip_hlen = sizeof(struct ip6_hdr); 878 879 if (mp->m_len < (ehdrlen + ip_hlen)) { 880 device_printf(dev, "%s: ipv6 mlen\n", __func__); 881 offload = 0; 882 break; 883 } 884 885 if (ip6->ip6_nxt == IPPROTO_TCP) 886 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; 887 else if (ip6->ip6_nxt == IPPROTO_UDP) 888 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; 889 else { 890 device_printf(dev, "%s: ipv6\n", __func__); 891 offload = 0; 892 } 893 break; 894 895 default: 896 offload = 0; 897 break; 898 } 899 if (!offload) 900 return (-1); 901 902 tx_cmd->flags_opcode = opcode; 903 904 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 905 906 return (0); 907 } 908 909 /* 910 * Name: qla_hw_send 911 * Function: Transmits a packet. It first checks if the packet is a 912 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum 913 * offload. If either of these creteria are not met, it is transmitted 914 * as a regular ethernet frame. 915 */ 916 int 917 qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, 918 uint32_t *tx_idx, struct mbuf *mp) 919 { 920 struct ether_vlan_header *eh; 921 qla_hw_t *hw = &ha->hw; 922 q80_tx_cmd_t *tx_cmd, tso_cmd; 923 bus_dma_segment_t *c_seg; 924 uint32_t num_tx_cmds, hdr_len = 0; 925 uint32_t total_length = 0, bytes, tx_cmd_count = 0; 926 device_t dev; 927 int i, ret; 928 uint8_t *src = NULL, *dst = NULL; 929 930 dev = ha->pci_dev; 931 932 /* 933 * Always make sure there is atleast one empty slot in the tx_ring 934 * tx_ring is considered full when there only one entry available 935 */ 936 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; 937 938 total_length = mp->m_pkthdr.len; 939 if (total_length > QLA_MAX_TSO_FRAME_SIZE) { 940 device_printf(dev, "%s: total length exceeds maxlen(%d)\n", 941 __func__, total_length); 942 return (-1); 943 } 944 eh = mtod(mp, struct ether_vlan_header *); 945 946 if ((mp->m_pkthdr.len > ha->max_frame_size)||(nsegs > Q8_TX_MAX_SEGMENTS)) { 947 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); 948 949 src = ha->hw.frame_hdr; 950 ret = qla_tx_tso(ha, mp, &tso_cmd, src); 951 952 if (!(ret & ~1)) { 953 /* find the additional tx_cmd descriptors required */ 954 955 hdr_len = tso_cmd.total_hdr_len; 956 957 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 958 bytes = QL_MIN(bytes, hdr_len); 959 960 num_tx_cmds++; 961 hdr_len -= bytes; 962 963 while (hdr_len) { 964 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 965 hdr_len -= bytes; 966 num_tx_cmds++; 967 } 968 hdr_len = tso_cmd.total_hdr_len; 969 970 if (ret == 0) 971 src = (uint8_t *)eh; 972 } 973 } 974 975 if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 976 qla_hw_tx_done_locked(ha); 977 if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 978 QL_DPRINT8((dev, "%s: (hw->txr_free <= " 979 "(num_tx_cmds + QLA_TX_MIN_FREE))\n", 980 __func__)); 981 return (-1); 982 } 983 } 984 985 *tx_idx = hw->txr_next; 986 987 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 988 989 if (hdr_len == 0) { 990 if ((nsegs > Q8_TX_MAX_SEGMENTS) || 991 (mp->m_pkthdr.len > ha->max_frame_size)){ 992 device_printf(dev, 993 "%s: (nsegs[%d, %d, 0x%b] > Q8_TX_MAX_SEGMENTS)\n", 994 __func__, nsegs, mp->m_pkthdr.len, 995 (int)mp->m_pkthdr.csum_flags, CSUM_BITS); 996 qla_dump_buf8(ha, "qla_hw_send: wrong pkt", 997 mtod(mp, char *), mp->m_len); 998 return (EINVAL); 999 } 1000 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1001 if (qla_tx_chksum(ha, mp, tx_cmd) != 0) 1002 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; 1003 } else { 1004 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); 1005 } 1006 1007 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) 1008 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; 1009 else if (mp->m_flags & M_VLANTAG) { 1010 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | 1011 Q8_TX_CMD_FLAGS_HW_VLAN_ID); 1012 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; 1013 } 1014 1015 tx_cmd->n_bufs = (uint8_t)nsegs; 1016 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); 1017 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); 1018 tx_cmd->port_cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); 1019 1020 c_seg = segs; 1021 1022 while (1) { 1023 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { 1024 switch (i) { 1025 case 0: 1026 tx_cmd->buf1_addr = c_seg->ds_addr; 1027 tx_cmd->buf1_len = c_seg->ds_len; 1028 break; 1029 1030 case 1: 1031 tx_cmd->buf2_addr = c_seg->ds_addr; 1032 tx_cmd->buf2_len = c_seg->ds_len; 1033 break; 1034 1035 case 2: 1036 tx_cmd->buf3_addr = c_seg->ds_addr; 1037 tx_cmd->buf3_len = c_seg->ds_len; 1038 break; 1039 1040 case 3: 1041 tx_cmd->buf4_addr = c_seg->ds_addr; 1042 tx_cmd->buf4_len = c_seg->ds_len; 1043 break; 1044 } 1045 1046 c_seg++; 1047 nsegs--; 1048 } 1049 1050 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 1051 tx_cmd_count++; 1052 1053 if (!nsegs) 1054 break; 1055 1056 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 1057 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1058 } 1059 1060 if (hdr_len) { 1061 /* TSO : Copy the header in the following tx cmd descriptors */ 1062 1063 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 1064 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1065 1066 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 1067 bytes = QL_MIN(bytes, hdr_len); 1068 1069 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; 1070 1071 if (mp->m_flags & M_VLANTAG) { 1072 /* first copy the src/dst MAC addresses */ 1073 bcopy(src, dst, (ETHER_ADDR_LEN * 2)); 1074 dst += (ETHER_ADDR_LEN * 2); 1075 src += (ETHER_ADDR_LEN * 2); 1076 1077 hdr_len -= (ETHER_ADDR_LEN * 2); 1078 1079 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); 1080 dst += 2; 1081 *((uint16_t *)dst) = mp->m_pkthdr.ether_vtag; 1082 dst += 2; 1083 1084 bytes -= ((ETHER_ADDR_LEN * 2) + 4); 1085 1086 bcopy(src, dst, bytes); 1087 src += bytes; 1088 hdr_len -= bytes; 1089 } else { 1090 bcopy(src, dst, bytes); 1091 src += bytes; 1092 hdr_len -= bytes; 1093 } 1094 1095 hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 1096 tx_cmd_count++; 1097 1098 while (hdr_len) { 1099 tx_cmd = &hw->tx_ring_base[hw->txr_next]; 1100 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1101 1102 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 1103 1104 bcopy(src, tx_cmd, bytes); 1105 src += bytes; 1106 hdr_len -= bytes; 1107 hw->txr_next = 1108 (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); 1109 tx_cmd_count++; 1110 } 1111 } 1112 1113 hw->txr_free = hw->txr_free - tx_cmd_count; 1114 1115 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next); 1116 QL_DPRINT8((dev, "%s: return\n", __func__)); 1117 return (0); 1118 } 1119 1120 /* 1121 * Name: qla_del_hw_if 1122 * Function: Destroys the hardware specific entities corresponding to an 1123 * Ethernet Interface 1124 */ 1125 void 1126 qla_del_hw_if(qla_host_t *ha) 1127 { 1128 int i; 1129 1130 for (i = 0; i < ha->hw.num_sds_rings; i++) 1131 QL_DISABLE_INTERRUPTS(ha, i); 1132 1133 qla_del_rcv_cntxt(ha); 1134 qla_del_xmt_cntxt(ha); 1135 1136 ha->hw.flags.lro = 0; 1137 } 1138 1139 /* 1140 * Name: qla_init_hw_if 1141 * Function: Creates the hardware specific entities corresponding to an 1142 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address 1143 * corresponding to the interface. Enables LRO if allowed. 1144 */ 1145 int 1146 qla_init_hw_if(qla_host_t *ha) 1147 { 1148 int i; 1149 uint8_t bcast_mac[6]; 1150 1151 qla_get_hw_caps(ha); 1152 1153 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1154 bzero(ha->hw.dma_buf.sds_ring[i].dma_b, 1155 ha->hw.dma_buf.sds_ring[i].size); 1156 } 1157 /* 1158 * Create Receive Context 1159 */ 1160 if (qla_init_rcv_cntxt(ha)) { 1161 return (-1); 1162 } 1163 1164 ha->hw.rx_next = NUM_RX_DESCRIPTORS - 2; 1165 ha->hw.rxj_next = NUM_RX_JUMBO_DESCRIPTORS - 2; 1166 ha->hw.rx_in = ha->hw.rxj_in = 0; 1167 1168 /* Update the RDS Producer Indices */ 1169 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next); 1170 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next); 1171 1172 /* 1173 * Create Transmit Context 1174 */ 1175 if (qla_init_xmt_cntxt(ha)) { 1176 qla_del_rcv_cntxt(ha); 1177 return (-1); 1178 } 1179 1180 qla_config_mac_addr(ha, ha->hw.mac_addr, 1181 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1); 1182 1183 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 1184 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 1185 qla_config_mac_addr(ha, bcast_mac, 1186 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 1); 1187 1188 qla_config_rss(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); 1189 1190 qla_config_intr_coalesce(ha, (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id, 0); 1191 1192 for (i = 0; i < ha->hw.num_sds_rings; i++) 1193 QL_ENABLE_INTERRUPTS(ha, i); 1194 1195 return (0); 1196 } 1197 1198 /* 1199 * Name: qla_init_rcv_cntxt 1200 * Function: Creates the Receive Context. 1201 */ 1202 static int 1203 qla_init_rcv_cntxt(qla_host_t *ha) 1204 { 1205 device_t dev; 1206 qla_cdrp_t cdrp; 1207 q80_rcv_cntxt_rsp_t *rsp; 1208 q80_stat_desc_t *sdesc; 1209 bus_addr_t phys_addr; 1210 int i, j; 1211 qla_hw_t *hw = &ha->hw; 1212 1213 dev = ha->pci_dev; 1214 1215 /* 1216 * Create Receive Context 1217 */ 1218 1219 for (i = 0; i < hw->num_sds_rings; i++) { 1220 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; 1221 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { 1222 sdesc->data[0] = 1223 Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW); 1224 } 1225 } 1226 1227 phys_addr = ha->hw.rx_cntxt_req_paddr; 1228 1229 bzero(&cdrp, sizeof(qla_cdrp_t)); 1230 1231 cdrp.cmd = Q8_CMD_CREATE_RX_CNTXT; 1232 cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32); 1233 cdrp.cmd_arg2 = (uint32_t)(phys_addr); 1234 cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_rcv_cntxt_req_t)); 1235 1236 if (qla_issue_cmd(ha, &cdrp)) { 1237 device_printf(dev, "%s: Q8_CMD_CREATE_RX_CNTXT failed\n", 1238 __func__); 1239 return (-1); 1240 } else { 1241 rsp = ha->hw.rx_cntxt_rsp; 1242 1243 QL_DPRINT2((dev, "%s: rcv cntxt successful" 1244 " rds_ring_offset = 0x%08x" 1245 " sds_ring_offset = 0x%08x" 1246 " cntxt_state = 0x%08x" 1247 " funcs_per_port = 0x%08x" 1248 " num_rds_rings = 0x%04x" 1249 " num_sds_rings = 0x%04x" 1250 " cntxt_id = 0x%04x" 1251 " phys_port = 0x%02x" 1252 " virt_port = 0x%02x\n", 1253 __func__, 1254 rsp->rx_rsp.rds_ring_offset, 1255 rsp->rx_rsp.sds_ring_offset, 1256 rsp->rx_rsp.cntxt_state, 1257 rsp->rx_rsp.funcs_per_port, 1258 rsp->rx_rsp.num_rds_rings, 1259 rsp->rx_rsp.num_sds_rings, 1260 rsp->rx_rsp.cntxt_id, 1261 rsp->rx_rsp.phys_port, 1262 rsp->rx_rsp.virt_port)); 1263 1264 for (i = 0; i < ha->hw.num_rds_rings; i++) { 1265 QL_DPRINT2((dev, 1266 "%s: rcv cntxt rds[%i].producer_reg = 0x%08x\n", 1267 __func__, i, rsp->rds_rsp[i].producer_reg)); 1268 } 1269 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1270 QL_DPRINT2((dev, 1271 "%s: rcv cntxt sds[%i].consumer_reg = 0x%08x" 1272 " sds[%i].intr_mask_reg = 0x%08x\n", 1273 __func__, i, rsp->sds_rsp[i].consumer_reg, 1274 i, rsp->sds_rsp[i].intr_mask_reg)); 1275 } 1276 } 1277 ha->hw.flags.init_rx_cnxt = 1; 1278 return (0); 1279 } 1280 1281 /* 1282 * Name: qla_del_rcv_cntxt 1283 * Function: Destroys the Receive Context. 1284 */ 1285 void 1286 qla_del_rcv_cntxt(qla_host_t *ha) 1287 { 1288 qla_cdrp_t cdrp; 1289 device_t dev = ha->pci_dev; 1290 1291 if (!ha->hw.flags.init_rx_cnxt) 1292 return; 1293 1294 bzero(&cdrp, sizeof(qla_cdrp_t)); 1295 1296 cdrp.cmd = Q8_CMD_DESTROY_RX_CNTXT; 1297 cdrp.cmd_arg1 = (uint32_t) (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id; 1298 1299 if (qla_issue_cmd(ha, &cdrp)) { 1300 device_printf(dev, "%s: Q8_CMD_DESTROY_RX_CNTXT failed\n", 1301 __func__); 1302 } 1303 ha->hw.flags.init_rx_cnxt = 0; 1304 } 1305 1306 /* 1307 * Name: qla_init_xmt_cntxt 1308 * Function: Creates the Transmit Context. 1309 */ 1310 static int 1311 qla_init_xmt_cntxt(qla_host_t *ha) 1312 { 1313 bus_addr_t phys_addr; 1314 device_t dev; 1315 q80_tx_cntxt_rsp_t *tx_rsp; 1316 qla_cdrp_t cdrp; 1317 qla_hw_t *hw = &ha->hw; 1318 1319 dev = ha->pci_dev; 1320 1321 /* 1322 * Create Transmit Context 1323 */ 1324 phys_addr = ha->hw.tx_cntxt_req_paddr; 1325 tx_rsp = ha->hw.tx_cntxt_rsp; 1326 1327 hw->txr_comp = hw->txr_next = 0; 1328 *(hw->tx_cons) = 0; 1329 1330 bzero(&cdrp, sizeof(qla_cdrp_t)); 1331 1332 cdrp.cmd = Q8_CMD_CREATE_TX_CNTXT; 1333 cdrp.cmd_arg1 = (uint32_t)(phys_addr >> 32); 1334 cdrp.cmd_arg2 = (uint32_t)(phys_addr); 1335 cdrp.cmd_arg3 = (uint32_t)(sizeof (q80_tx_cntxt_req_t)); 1336 1337 if (qla_issue_cmd(ha, &cdrp)) { 1338 device_printf(dev, "%s: Q8_CMD_CREATE_TX_CNTXT failed\n", 1339 __func__); 1340 return (-1); 1341 } else { 1342 ha->hw.tx_prod_reg = tx_rsp->producer_reg; 1343 1344 QL_DPRINT2((dev, "%s: tx cntxt successful" 1345 " cntxt_state = 0x%08x " 1346 " cntxt_id = 0x%04x " 1347 " phys_port_id = 0x%02x " 1348 " virt_port_id = 0x%02x " 1349 " producer_reg = 0x%08x " 1350 " intr_mask_reg = 0x%08x\n", 1351 __func__, tx_rsp->cntxt_state, tx_rsp->cntxt_id, 1352 tx_rsp->phys_port_id, tx_rsp->virt_port_id, 1353 tx_rsp->producer_reg, tx_rsp->intr_mask_reg)); 1354 } 1355 ha->hw.txr_free = NUM_TX_DESCRIPTORS; 1356 1357 ha->hw.flags.init_tx_cnxt = 1; 1358 return (0); 1359 } 1360 1361 /* 1362 * Name: qla_del_xmt_cntxt 1363 * Function: Destroys the Transmit Context. 1364 */ 1365 static void 1366 qla_del_xmt_cntxt(qla_host_t *ha) 1367 { 1368 qla_cdrp_t cdrp; 1369 device_t dev = ha->pci_dev; 1370 1371 if (!ha->hw.flags.init_tx_cnxt) 1372 return; 1373 1374 bzero(&cdrp, sizeof(qla_cdrp_t)); 1375 1376 cdrp.cmd = Q8_CMD_DESTROY_TX_CNTXT; 1377 cdrp.cmd_arg1 = (uint32_t) (ha->hw.tx_cntxt_rsp)->cntxt_id; 1378 1379 if (qla_issue_cmd(ha, &cdrp)) { 1380 device_printf(dev, "%s: Q8_CMD_DESTROY_TX_CNTXT failed\n", 1381 __func__); 1382 } 1383 ha->hw.flags.init_tx_cnxt = 0; 1384 } 1385 1386 /* 1387 * Name: qla_get_max_rds 1388 * Function: Returns the maximum number of Receive Descriptor Rings per context. 1389 */ 1390 static int 1391 qla_get_max_rds(qla_host_t *ha) 1392 { 1393 qla_cdrp_t cdrp; 1394 device_t dev; 1395 1396 dev = ha->pci_dev; 1397 1398 bzero(&cdrp, sizeof(qla_cdrp_t)); 1399 1400 cdrp.cmd = Q8_CMD_RD_MAX_RDS_PER_CNTXT; 1401 1402 if (qla_issue_cmd(ha, &cdrp)) { 1403 device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n", 1404 __func__); 1405 return (-1); 1406 } else { 1407 ha->hw.max_rds_per_cntxt = cdrp.rsp_arg1; 1408 QL_DPRINT2((dev, "%s: max_rds_per_context 0x%08x\n", 1409 __func__, ha->hw.max_rds_per_cntxt)); 1410 } 1411 return 0; 1412 } 1413 1414 /* 1415 * Name: qla_get_max_sds 1416 * Function: Returns the maximum number of Status Descriptor Rings per context. 1417 */ 1418 static int 1419 qla_get_max_sds(qla_host_t *ha) 1420 { 1421 qla_cdrp_t cdrp; 1422 device_t dev; 1423 1424 dev = ha->pci_dev; 1425 1426 bzero(&cdrp, sizeof(qla_cdrp_t)); 1427 1428 cdrp.cmd = Q8_CMD_RD_MAX_SDS_PER_CNTXT; 1429 1430 if (qla_issue_cmd(ha, &cdrp)) { 1431 device_printf(dev, "%s: Q8_CMD_RD_MAX_RDS_PER_CNTXT failed\n", 1432 __func__); 1433 return (-1); 1434 } else { 1435 ha->hw.max_sds_per_cntxt = cdrp.rsp_arg1; 1436 QL_DPRINT2((dev, "%s: max_sds_per_context 0x%08x\n", 1437 __func__, ha->hw.max_sds_per_cntxt)); 1438 } 1439 return 0; 1440 } 1441 1442 /* 1443 * Name: qla_get_max_rules 1444 * Function: Returns the maximum number of Rules per context. 1445 */ 1446 static int 1447 qla_get_max_rules(qla_host_t *ha) 1448 { 1449 qla_cdrp_t cdrp; 1450 device_t dev; 1451 1452 dev = ha->pci_dev; 1453 1454 bzero(&cdrp, sizeof(qla_cdrp_t)); 1455 1456 cdrp.cmd = Q8_CMD_RD_MAX_RULES_PER_CNTXT; 1457 1458 if (qla_issue_cmd(ha, &cdrp)) { 1459 device_printf(dev, "%s: Q8_CMD_RD_MAX_RULES_PER_CNTXT failed\n", 1460 __func__); 1461 return (-1); 1462 } else { 1463 ha->hw.max_rules_per_cntxt = cdrp.rsp_arg1; 1464 QL_DPRINT2((dev, "%s: max_rules_per_cntxt 0x%08x\n", 1465 __func__, ha->hw.max_rules_per_cntxt)); 1466 } 1467 return 0; 1468 } 1469 1470 /* 1471 * Name: qla_get_max_rcv_cntxts 1472 * Function: Returns the maximum number of Receive Contexts supported. 1473 */ 1474 static int 1475 qla_get_max_rcv_cntxts(qla_host_t *ha) 1476 { 1477 qla_cdrp_t cdrp; 1478 device_t dev; 1479 1480 dev = ha->pci_dev; 1481 1482 bzero(&cdrp, sizeof(qla_cdrp_t)); 1483 1484 cdrp.cmd = Q8_CMD_RD_MAX_RX_CNTXT; 1485 1486 if (qla_issue_cmd(ha, &cdrp)) { 1487 device_printf(dev, "%s: Q8_CMD_RD_MAX_RX_CNTXT failed\n", 1488 __func__); 1489 return (-1); 1490 } else { 1491 ha->hw.max_rcv_cntxts = cdrp.rsp_arg1; 1492 QL_DPRINT2((dev, "%s: max_rcv_cntxts 0x%08x\n", 1493 __func__, ha->hw.max_rcv_cntxts)); 1494 } 1495 return 0; 1496 } 1497 1498 /* 1499 * Name: qla_get_max_tx_cntxts 1500 * Function: Returns the maximum number of Transmit Contexts supported. 1501 */ 1502 static int 1503 qla_get_max_tx_cntxts(qla_host_t *ha) 1504 { 1505 qla_cdrp_t cdrp; 1506 device_t dev; 1507 1508 dev = ha->pci_dev; 1509 1510 bzero(&cdrp, sizeof(qla_cdrp_t)); 1511 1512 cdrp.cmd = Q8_CMD_RD_MAX_TX_CNTXT; 1513 1514 if (qla_issue_cmd(ha, &cdrp)) { 1515 device_printf(dev, "%s: Q8_CMD_RD_MAX_TX_CNTXT failed\n", 1516 __func__); 1517 return (-1); 1518 } else { 1519 ha->hw.max_xmt_cntxts = cdrp.rsp_arg1; 1520 QL_DPRINT2((dev, "%s: max_xmt_cntxts 0x%08x\n", 1521 __func__, ha->hw.max_xmt_cntxts)); 1522 } 1523 return 0; 1524 } 1525 1526 /* 1527 * Name: qla_get_max_mtu 1528 * Function: Returns the MTU supported for a context. 1529 */ 1530 static int 1531 qla_get_max_mtu(qla_host_t *ha) 1532 { 1533 qla_cdrp_t cdrp; 1534 device_t dev; 1535 1536 dev = ha->pci_dev; 1537 1538 bzero(&cdrp, sizeof(qla_cdrp_t)); 1539 1540 cdrp.cmd = Q8_CMD_RD_MAX_MTU; 1541 1542 if (qla_issue_cmd(ha, &cdrp)) { 1543 device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__); 1544 return (-1); 1545 } else { 1546 ha->hw.max_mtu = cdrp.rsp_arg1; 1547 QL_DPRINT2((dev, "%s: max_mtu 0x%08x\n", __func__, 1548 ha->hw.max_mtu)); 1549 } 1550 return 0; 1551 } 1552 1553 /* 1554 * Name: qla_set_max_mtu 1555 * Function: 1556 * Sets the maximum transfer unit size for the specified rcv context. 1557 */ 1558 int 1559 qla_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) 1560 { 1561 qla_cdrp_t cdrp; 1562 device_t dev; 1563 1564 dev = ha->pci_dev; 1565 1566 bzero(&cdrp, sizeof(qla_cdrp_t)); 1567 1568 cdrp.cmd = Q8_CMD_SET_MTU; 1569 cdrp.cmd_arg1 = (uint32_t)cntxt_id; 1570 cdrp.cmd_arg2 = mtu; 1571 1572 if (qla_issue_cmd(ha, &cdrp)) { 1573 device_printf(dev, "%s: Q8_CMD_RD_MAX_MTU failed\n", __func__); 1574 return (-1); 1575 } else { 1576 ha->hw.max_mtu = cdrp.rsp_arg1; 1577 } 1578 return 0; 1579 } 1580 1581 /* 1582 * Name: qla_get_max_lro 1583 * Function: Returns the maximum number of TCP Connection which can be supported 1584 * with LRO. 1585 */ 1586 static int 1587 qla_get_max_lro(qla_host_t *ha) 1588 { 1589 qla_cdrp_t cdrp; 1590 device_t dev; 1591 1592 dev = ha->pci_dev; 1593 1594 bzero(&cdrp, sizeof(qla_cdrp_t)); 1595 1596 cdrp.cmd = Q8_CMD_RD_MAX_LRO; 1597 1598 if (qla_issue_cmd(ha, &cdrp)) { 1599 device_printf(dev, "%s: Q8_CMD_RD_MAX_LRO failed\n", __func__); 1600 return (-1); 1601 } else { 1602 ha->hw.max_lro = cdrp.rsp_arg1; 1603 QL_DPRINT2((dev, "%s: max_lro 0x%08x\n", __func__, 1604 ha->hw.max_lro)); 1605 } 1606 return 0; 1607 } 1608 1609 /* 1610 * Name: qla_get_flow_control 1611 * Function: Returns the Receive/Transmit Flow Control (PAUSE) settings for 1612 * PCI function. 1613 */ 1614 static int 1615 qla_get_flow_control(qla_host_t *ha) 1616 { 1617 qla_cdrp_t cdrp; 1618 device_t dev; 1619 1620 dev = ha->pci_dev; 1621 1622 bzero(&cdrp, sizeof(qla_cdrp_t)); 1623 1624 cdrp.cmd = Q8_CMD_GET_FLOW_CNTRL; 1625 1626 if (qla_issue_cmd(ha, &cdrp)) { 1627 device_printf(dev, "%s: Q8_CMD_GET_FLOW_CNTRL failed\n", 1628 __func__); 1629 return (-1); 1630 } else { 1631 QL_DPRINT2((dev, "%s: flow control 0x%08x\n", __func__, 1632 cdrp.rsp_arg1)); 1633 } 1634 return 0; 1635 } 1636 1637 /* 1638 * Name: qla_get_flow_control 1639 * Function: Retrieves hardware capabilities 1640 */ 1641 void 1642 qla_get_hw_caps(qla_host_t *ha) 1643 { 1644 //qla_read_mac_addr(ha); 1645 qla_get_max_rds(ha); 1646 qla_get_max_sds(ha); 1647 qla_get_max_rules(ha); 1648 qla_get_max_rcv_cntxts(ha); 1649 qla_get_max_tx_cntxts(ha); 1650 qla_get_max_mtu(ha); 1651 qla_get_max_lro(ha); 1652 qla_get_flow_control(ha); 1653 return; 1654 } 1655 1656 /* 1657 * Name: qla_hw_set_multi 1658 * Function: Sets the Multicast Addresses provided the host O.S into the 1659 * hardware (for the given interface) 1660 */ 1661 void 1662 qla_hw_set_multi(qla_host_t *ha, uint8_t *mta, uint32_t mcnt, 1663 uint32_t add_multi) 1664 { 1665 q80_rcv_cntxt_rsp_t *rsp; 1666 int i; 1667 1668 rsp = ha->hw.rx_cntxt_rsp; 1669 for (i = 0; i < mcnt; i++) { 1670 qla_config_mac_addr(ha, mta, rsp->rx_rsp.cntxt_id, add_multi); 1671 mta += Q8_MAC_ADDR_LEN; 1672 } 1673 return; 1674 } 1675 1676 /* 1677 * Name: qla_hw_tx_done_locked 1678 * Function: Handle Transmit Completions 1679 */ 1680 static void 1681 qla_hw_tx_done_locked(qla_host_t *ha) 1682 { 1683 qla_tx_buf_t *txb; 1684 qla_hw_t *hw = &ha->hw; 1685 uint32_t comp_idx, comp_count = 0; 1686 1687 /* retrieve index of last entry in tx ring completed */ 1688 comp_idx = qla_le32_to_host(*(hw->tx_cons)); 1689 1690 while (comp_idx != hw->txr_comp) { 1691 txb = &ha->tx_buf[hw->txr_comp]; 1692 1693 hw->txr_comp++; 1694 if (hw->txr_comp == NUM_TX_DESCRIPTORS) 1695 hw->txr_comp = 0; 1696 1697 comp_count++; 1698 1699 if (txb->m_head) { 1700 bus_dmamap_sync(ha->tx_tag, txb->map, 1701 BUS_DMASYNC_POSTWRITE); 1702 bus_dmamap_unload(ha->tx_tag, txb->map); 1703 bus_dmamap_destroy(ha->tx_tag, txb->map); 1704 m_freem(txb->m_head); 1705 1706 txb->map = (bus_dmamap_t)0; 1707 txb->m_head = NULL; 1708 } 1709 } 1710 1711 hw->txr_free += comp_count; 1712 1713 QL_DPRINT8((ha->pci_dev, "%s: return [c,f, p, pn][%d, %d, %d, %d]\n", __func__, 1714 hw->txr_comp, hw->txr_free, hw->txr_next, READ_REG32(ha, (ha->hw.tx_prod_reg + 0x1b2000)))); 1715 1716 return; 1717 } 1718 1719 /* 1720 * Name: qla_hw_tx_done 1721 * Function: Handle Transmit Completions 1722 */ 1723 void 1724 qla_hw_tx_done(qla_host_t *ha) 1725 { 1726 if (!mtx_trylock(&ha->tx_lock)) { 1727 QL_DPRINT8((ha->pci_dev, 1728 "%s: !mtx_trylock(&ha->tx_lock)\n", __func__)); 1729 return; 1730 } 1731 qla_hw_tx_done_locked(ha); 1732 1733 if (ha->hw.txr_free > free_pkt_thres) 1734 if_setdrvflagbits(ha->ifp, 0, IFF_DRV_OACTIVE); 1735 1736 mtx_unlock(&ha->tx_lock); 1737 return; 1738 } 1739 1740 void 1741 qla_update_link_state(qla_host_t *ha) 1742 { 1743 uint32_t link_state; 1744 uint32_t prev_link_state; 1745 1746 if (!(if_getdrvflags(ha->ifp) & IFF_DRV_RUNNING)) { 1747 ha->hw.flags.link_up = 0; 1748 return; 1749 } 1750 link_state = READ_REG32(ha, Q8_LINK_STATE); 1751 1752 prev_link_state = ha->hw.flags.link_up; 1753 1754 if (ha->pci_func == 0) 1755 ha->hw.flags.link_up = (((link_state & 0xF) == 1)? 1 : 0); 1756 else 1757 ha->hw.flags.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); 1758 1759 if (prev_link_state != ha->hw.flags.link_up) { 1760 if (ha->hw.flags.link_up) { 1761 if_link_state_change(ha->ifp, LINK_STATE_UP); 1762 } else { 1763 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 1764 } 1765 } 1766 } 1767 1768 int 1769 qla_config_lro(qla_host_t *ha) 1770 { 1771 #if defined(INET) || defined(INET6) 1772 int i; 1773 qla_hw_t *hw = &ha->hw; 1774 struct lro_ctrl *lro; 1775 1776 for (i = 0; i < hw->num_sds_rings; i++) { 1777 lro = &hw->sds[i].lro; 1778 if (tcp_lro_init(lro)) { 1779 device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n", 1780 __func__); 1781 return (-1); 1782 } 1783 lro->ifp = ha->ifp; 1784 } 1785 ha->flags.lro_init = 1; 1786 1787 QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__)); 1788 #endif 1789 return (0); 1790 } 1791 1792 void 1793 qla_free_lro(qla_host_t *ha) 1794 { 1795 #if defined(INET) || defined(INET6) 1796 int i; 1797 qla_hw_t *hw = &ha->hw; 1798 struct lro_ctrl *lro; 1799 1800 if (!ha->flags.lro_init) 1801 return; 1802 1803 for (i = 0; i < hw->num_sds_rings; i++) { 1804 lro = &hw->sds[i].lro; 1805 tcp_lro_free(lro); 1806 } 1807 ha->flags.lro_init = 0; 1808 #endif 1809 } 1810 1811 void 1812 qla_hw_stop_rcv(qla_host_t *ha) 1813 { 1814 int i, done, count = 100; 1815 1816 while (count--) { 1817 done = 1; 1818 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1819 if (ha->hw.sds[i].rcv_active) 1820 done = 0; 1821 } 1822 if (done) 1823 break; 1824 else 1825 qla_mdelay(__func__, 10); 1826 } 1827 } 1828