1 /* 2 * Copyright (c) 2013-2016 Qlogic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: ql_hw.c 30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31 * Content: Contains Hardware dependent functions 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "ql_os.h" 38 #include "ql_hw.h" 39 #include "ql_def.h" 40 #include "ql_inline.h" 41 #include "ql_ver.h" 42 #include "ql_glbl.h" 43 #include "ql_dbg.h" 44 #include "ql_minidump.h" 45 46 /* 47 * Static Functions 48 */ 49 50 static void qla_del_rcv_cntxt(qla_host_t *ha); 51 static int qla_init_rcv_cntxt(qla_host_t *ha); 52 static void qla_del_xmt_cntxt(qla_host_t *ha); 53 static int qla_init_xmt_cntxt(qla_host_t *ha); 54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, 55 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause); 56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, 57 uint32_t num_intrs, uint32_t create); 58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id); 59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, 60 int tenable, int rcv); 61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode); 62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id); 63 64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, 65 uint8_t *hdr); 66 static int qla_hw_add_all_mcast(qla_host_t *ha); 67 static int qla_hw_del_all_mcast(qla_host_t *ha); 68 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds); 69 70 static int qla_init_nic_func(qla_host_t *ha); 71 static int qla_stop_nic_func(qla_host_t *ha); 72 static int qla_query_fw_dcbx_caps(qla_host_t *ha); 73 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits); 74 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits); 75 static void qla_get_quick_stats(qla_host_t *ha); 76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode); 77 static int qla_get_cam_search_mode(qla_host_t *ha); 78 79 static void ql_minidump_free(qla_host_t *ha); 80 81 82 static int 83 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS) 84 { 85 int err = 0, ret; 86 qla_host_t *ha; 87 uint32_t i; 88 89 err = sysctl_handle_int(oidp, &ret, 0, req); 90 91 if (err || !req->newptr) 92 return (err); 93 94 if (ret == 1) { 95 96 ha = (qla_host_t *)arg1; 97 98 for (i = 0; i < ha->hw.num_sds_rings; i++) { 99 100 device_printf(ha->pci_dev, 101 "%s: sds_ring[%d] = %p\n", __func__,i, 102 (void *)ha->hw.sds[i].intr_count); 103 104 device_printf(ha->pci_dev, 105 "%s: sds_ring[%d].spurious_intr_count = %p\n", 106 __func__, 107 i, (void *)ha->hw.sds[i].spurious_intr_count); 108 109 device_printf(ha->pci_dev, 110 "%s: sds_ring[%d].rx_free = %d\n", __func__,i, 111 ha->hw.sds[i].rx_free); 112 } 113 114 for (i = 0; i < ha->hw.num_tx_rings; i++) 115 device_printf(ha->pci_dev, 116 "%s: tx[%d] = %p\n", __func__,i, 117 (void *)ha->tx_ring[i].count); 118 119 for (i = 0; i < ha->hw.num_rds_rings; i++) 120 device_printf(ha->pci_dev, 121 "%s: rds_ring[%d] = %p\n", __func__,i, 122 (void *)ha->hw.rds[i].count); 123 124 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__, 125 (void *)ha->lro_pkt_count); 126 127 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__, 128 (void *)ha->lro_bytes); 129 130 #ifdef QL_ENABLE_ISCSI_TLV 131 device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__, 132 (void *)ha->hw.iscsi_pkt_count); 133 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 134 135 } 136 return (err); 137 } 138 139 static int 140 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS) 141 { 142 int err, ret = 0; 143 qla_host_t *ha; 144 145 err = sysctl_handle_int(oidp, &ret, 0, req); 146 147 if (err || !req->newptr) 148 return (err); 149 150 if (ret == 1) { 151 ha = (qla_host_t *)arg1; 152 qla_get_quick_stats(ha); 153 } 154 return (err); 155 } 156 157 #ifdef QL_DBG 158 159 static void 160 qla_stop_pegs(qla_host_t *ha) 161 { 162 uint32_t val = 1; 163 164 ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0); 165 ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0); 166 ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0); 167 ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0); 168 ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0); 169 device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__); 170 } 171 172 static int 173 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS) 174 { 175 int err, ret = 0; 176 qla_host_t *ha; 177 178 err = sysctl_handle_int(oidp, &ret, 0, req); 179 180 181 if (err || !req->newptr) 182 return (err); 183 184 if (ret == 1) { 185 ha = (qla_host_t *)arg1; 186 QLA_LOCK(ha); 187 qla_stop_pegs(ha); 188 QLA_UNLOCK(ha); 189 } 190 191 return err; 192 } 193 #endif /* #ifdef QL_DBG */ 194 195 static int 196 qla_validate_set_port_cfg_bit(uint32_t bits) 197 { 198 if ((bits & 0xF) > 1) 199 return (-1); 200 201 if (((bits >> 4) & 0xF) > 2) 202 return (-1); 203 204 if (((bits >> 8) & 0xF) > 2) 205 return (-1); 206 207 return (0); 208 } 209 210 static int 211 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS) 212 { 213 int err, ret = 0; 214 qla_host_t *ha; 215 uint32_t cfg_bits; 216 217 err = sysctl_handle_int(oidp, &ret, 0, req); 218 219 if (err || !req->newptr) 220 return (err); 221 222 if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) { 223 224 ha = (qla_host_t *)arg1; 225 226 err = qla_get_port_config(ha, &cfg_bits); 227 228 if (err) 229 goto qla_sysctl_set_port_cfg_exit; 230 231 if (ret & 0x1) { 232 cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE; 233 } else { 234 cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE; 235 } 236 237 ret = ret >> 4; 238 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK; 239 240 if ((ret & 0xF) == 0) { 241 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED; 242 } else if ((ret & 0xF) == 1){ 243 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD; 244 } else { 245 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM; 246 } 247 248 ret = ret >> 4; 249 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK; 250 251 if (ret == 0) { 252 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV; 253 } else if (ret == 1){ 254 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT; 255 } else { 256 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV; 257 } 258 259 err = qla_set_port_config(ha, cfg_bits); 260 } else { 261 ha = (qla_host_t *)arg1; 262 263 err = qla_get_port_config(ha, &cfg_bits); 264 } 265 266 qla_sysctl_set_port_cfg_exit: 267 return err; 268 } 269 270 static int 271 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS) 272 { 273 int err, ret = 0; 274 qla_host_t *ha; 275 276 err = sysctl_handle_int(oidp, &ret, 0, req); 277 278 if (err || !req->newptr) 279 return (err); 280 281 ha = (qla_host_t *)arg1; 282 283 if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) || 284 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) { 285 err = qla_set_cam_search_mode(ha, (uint32_t)ret); 286 } else { 287 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret); 288 } 289 290 return (err); 291 } 292 293 static int 294 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS) 295 { 296 int err, ret = 0; 297 qla_host_t *ha; 298 299 err = sysctl_handle_int(oidp, &ret, 0, req); 300 301 if (err || !req->newptr) 302 return (err); 303 304 ha = (qla_host_t *)arg1; 305 err = qla_get_cam_search_mode(ha); 306 307 return (err); 308 } 309 310 311 /* 312 * Name: ql_hw_add_sysctls 313 * Function: Add P3Plus specific sysctls 314 */ 315 void 316 ql_hw_add_sysctls(qla_host_t *ha) 317 { 318 device_t dev; 319 320 dev = ha->pci_dev; 321 322 ha->hw.num_sds_rings = MAX_SDS_RINGS; 323 ha->hw.num_rds_rings = MAX_RDS_RINGS; 324 ha->hw.num_tx_rings = NUM_TX_RINGS; 325 326 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 327 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 328 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings, 329 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings"); 330 331 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 332 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 333 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings, 334 ha->hw.num_sds_rings, "Number of Status Descriptor Rings"); 335 336 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 337 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 338 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings, 339 ha->hw.num_tx_rings, "Number of Transmit Rings"); 340 341 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 342 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 343 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx, 344 ha->txr_idx, "Tx Ring Used"); 345 346 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 347 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 348 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW, 349 (void *)ha, 0, 350 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics"); 351 352 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 353 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 354 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW, 355 (void *)ha, 0, 356 qla_sysctl_get_quick_stats, "I", "Quick Statistics"); 357 358 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 359 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 360 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs, 361 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt"); 362 363 ha->hw.sds_cidx_thres = 32; 364 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 365 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 366 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres, 367 ha->hw.sds_cidx_thres, 368 "Number of SDS entries to process before updating" 369 " SDS Ring Consumer Index"); 370 371 ha->hw.rds_pidx_thres = 32; 372 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 373 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 374 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres, 375 ha->hw.rds_pidx_thres, 376 "Number of Rcv Rings Entries to post before updating" 377 " RDS Ring Producer Index"); 378 379 ha->hw.rcv_intr_coalesce = (3 << 16) | 256; 380 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 381 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 382 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW, 383 &ha->hw.rcv_intr_coalesce, 384 ha->hw.rcv_intr_coalesce, 385 "Rcv Intr Coalescing Parameters\n" 386 "\tbits 15:0 max packets\n" 387 "\tbits 31:16 max micro-seconds to wait\n" 388 "\tplease run\n" 389 "\tifconfig <if> down && ifconfig <if> up\n" 390 "\tto take effect \n"); 391 392 ha->hw.xmt_intr_coalesce = (64 << 16) | 64; 393 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 394 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 395 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW, 396 &ha->hw.xmt_intr_coalesce, 397 ha->hw.xmt_intr_coalesce, 398 "Xmt Intr Coalescing Parameters\n" 399 "\tbits 15:0 max packets\n" 400 "\tbits 31:16 max micro-seconds to wait\n" 401 "\tplease run\n" 402 "\tifconfig <if> down && ifconfig <if> up\n" 403 "\tto take effect \n"); 404 405 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 406 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 407 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW, 408 (void *)ha, 0, 409 qla_sysctl_port_cfg, "I", 410 "Set Port Configuration if values below " 411 "otherwise Get Port Configuration\n" 412 "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n" 413 "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n" 414 "\tBits 8-11: std pause cfg; 0 = xmt and rcv;" 415 " 1 = xmt only; 2 = rcv only;\n" 416 ); 417 418 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 419 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 420 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW, 421 (void *)ha, 0, 422 qla_sysctl_set_cam_search_mode, "I", 423 "Set CAM Search Mode" 424 "\t 1 = search mode internal\n" 425 "\t 2 = search mode auto\n"); 426 427 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 428 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 429 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW, 430 (void *)ha, 0, 431 qla_sysctl_get_cam_search_mode, "I", 432 "Get CAM Search Mode" 433 "\t 1 = search mode internal\n" 434 "\t 2 = search mode auto\n"); 435 436 ha->hw.enable_9kb = 1; 437 438 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 439 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 440 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb, 441 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000"); 442 443 ha->hw.enable_hw_lro = 1; 444 445 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 446 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 447 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro, 448 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n" 449 "\t 1 : Hardware LRO if LRO is enabled\n" 450 "\t 0 : Software LRO if LRO is enabled\n" 451 "\t Any change requires ifconfig down/up to take effect\n" 452 "\t Note that LRO may be turned off/on via ifconfig\n"); 453 454 ha->hw.mdump_active = 0; 455 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 456 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 457 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active, 458 ha->hw.mdump_active, 459 "Minidump retrieval is Active"); 460 461 ha->hw.mdump_done = 0; 462 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 463 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 464 OID_AUTO, "mdump_done", CTLFLAG_RW, 465 &ha->hw.mdump_done, ha->hw.mdump_done, 466 "Minidump has been done and available for retrieval"); 467 468 ha->hw.mdump_capture_mask = 0xF; 469 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 470 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 471 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW, 472 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask, 473 "Minidump capture mask"); 474 #ifdef QL_DBG 475 476 ha->err_inject = 0; 477 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 478 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 479 OID_AUTO, "err_inject", 480 CTLFLAG_RW, &ha->err_inject, ha->err_inject, 481 "Error to be injected\n" 482 "\t\t\t 0: No Errors\n" 483 "\t\t\t 1: rcv: rxb struct invalid\n" 484 "\t\t\t 2: rcv: mp == NULL\n" 485 "\t\t\t 3: lro: rxb struct invalid\n" 486 "\t\t\t 4: lro: mp == NULL\n" 487 "\t\t\t 5: rcv: num handles invalid\n" 488 "\t\t\t 6: reg: indirect reg rd_wr failure\n" 489 "\t\t\t 7: ocm: offchip memory rd_wr failure\n" 490 "\t\t\t 8: mbx: mailbox command failure\n" 491 "\t\t\t 9: heartbeat failure\n" 492 "\t\t\t A: temperature failure\n" 493 "\t\t\t 11: m_getcl or m_getjcl failure\n" ); 494 495 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 496 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 497 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW, 498 (void *)ha, 0, 499 qla_sysctl_stop_pegs, "I", "Peg Stop"); 500 501 #endif /* #ifdef QL_DBG */ 502 503 ha->hw.user_pri_nic = 0; 504 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 505 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 506 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic, 507 ha->hw.user_pri_nic, 508 "VLAN Tag User Priority for Normal Ethernet Packets"); 509 510 ha->hw.user_pri_iscsi = 4; 511 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 512 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 513 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi, 514 ha->hw.user_pri_iscsi, 515 "VLAN Tag User Priority for iSCSI Packets"); 516 517 } 518 519 void 520 ql_hw_link_status(qla_host_t *ha) 521 { 522 device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui); 523 524 if (ha->hw.link_up) { 525 device_printf(ha->pci_dev, "link Up\n"); 526 } else { 527 device_printf(ha->pci_dev, "link Down\n"); 528 } 529 530 if (ha->hw.flags.fduplex) { 531 device_printf(ha->pci_dev, "Full Duplex\n"); 532 } else { 533 device_printf(ha->pci_dev, "Half Duplex\n"); 534 } 535 536 if (ha->hw.flags.autoneg) { 537 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n"); 538 } else { 539 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n"); 540 } 541 542 switch (ha->hw.link_speed) { 543 case 0x710: 544 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n"); 545 break; 546 547 case 0x3E8: 548 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n"); 549 break; 550 551 case 0x64: 552 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n"); 553 break; 554 555 default: 556 device_printf(ha->pci_dev, "link speed\t\t Unknown\n"); 557 break; 558 } 559 560 switch (ha->hw.module_type) { 561 562 case 0x01: 563 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n"); 564 break; 565 566 case 0x02: 567 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n"); 568 break; 569 570 case 0x03: 571 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n"); 572 break; 573 574 case 0x04: 575 device_printf(ha->pci_dev, 576 "Module Type 10GE Passive Copper(Compliant)[%d m]\n", 577 ha->hw.cable_length); 578 break; 579 580 case 0x05: 581 device_printf(ha->pci_dev, "Module Type 10GE Active" 582 " Limiting Copper(Compliant)[%d m]\n", 583 ha->hw.cable_length); 584 break; 585 586 case 0x06: 587 device_printf(ha->pci_dev, 588 "Module Type 10GE Passive Copper" 589 " (Legacy, Best Effort)[%d m]\n", 590 ha->hw.cable_length); 591 break; 592 593 case 0x07: 594 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n"); 595 break; 596 597 case 0x08: 598 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n"); 599 break; 600 601 case 0x09: 602 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n"); 603 break; 604 605 case 0x0A: 606 device_printf(ha->pci_dev, "Module Type 1000Base-T\n"); 607 break; 608 609 case 0x0B: 610 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper" 611 "(Legacy, Best Effort)\n"); 612 break; 613 614 default: 615 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n", 616 ha->hw.module_type); 617 break; 618 } 619 620 if (ha->hw.link_faults == 1) 621 device_printf(ha->pci_dev, "SFP Power Fault\n"); 622 } 623 624 /* 625 * Name: ql_free_dma 626 * Function: Frees the DMA'able memory allocated in ql_alloc_dma() 627 */ 628 void 629 ql_free_dma(qla_host_t *ha) 630 { 631 uint32_t i; 632 633 if (ha->hw.dma_buf.flags.sds_ring) { 634 for (i = 0; i < ha->hw.num_sds_rings; i++) { 635 ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); 636 } 637 ha->hw.dma_buf.flags.sds_ring = 0; 638 } 639 640 if (ha->hw.dma_buf.flags.rds_ring) { 641 for (i = 0; i < ha->hw.num_rds_rings; i++) { 642 ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); 643 } 644 ha->hw.dma_buf.flags.rds_ring = 0; 645 } 646 647 if (ha->hw.dma_buf.flags.tx_ring) { 648 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); 649 ha->hw.dma_buf.flags.tx_ring = 0; 650 } 651 ql_minidump_free(ha); 652 } 653 654 /* 655 * Name: ql_alloc_dma 656 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. 657 */ 658 int 659 ql_alloc_dma(qla_host_t *ha) 660 { 661 device_t dev; 662 uint32_t i, j, size, tx_ring_size; 663 qla_hw_t *hw; 664 qla_hw_tx_cntxt_t *tx_cntxt; 665 uint8_t *vaddr; 666 bus_addr_t paddr; 667 668 dev = ha->pci_dev; 669 670 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 671 672 hw = &ha->hw; 673 /* 674 * Allocate Transmit Ring 675 */ 676 tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS); 677 size = (tx_ring_size * ha->hw.num_tx_rings); 678 679 hw->dma_buf.tx_ring.alignment = 8; 680 hw->dma_buf.tx_ring.size = size + PAGE_SIZE; 681 682 if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) { 683 device_printf(dev, "%s: tx ring alloc failed\n", __func__); 684 goto ql_alloc_dma_exit; 685 } 686 687 vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b; 688 paddr = hw->dma_buf.tx_ring.dma_addr; 689 690 for (i = 0; i < ha->hw.num_tx_rings; i++) { 691 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; 692 693 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr; 694 tx_cntxt->tx_ring_paddr = paddr; 695 696 vaddr += tx_ring_size; 697 paddr += tx_ring_size; 698 } 699 700 for (i = 0; i < ha->hw.num_tx_rings; i++) { 701 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; 702 703 tx_cntxt->tx_cons = (uint32_t *)vaddr; 704 tx_cntxt->tx_cons_paddr = paddr; 705 706 vaddr += sizeof (uint32_t); 707 paddr += sizeof (uint32_t); 708 } 709 710 ha->hw.dma_buf.flags.tx_ring = 1; 711 712 QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n", 713 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr), 714 hw->dma_buf.tx_ring.dma_b)); 715 /* 716 * Allocate Receive Descriptor Rings 717 */ 718 719 for (i = 0; i < hw->num_rds_rings; i++) { 720 721 hw->dma_buf.rds_ring[i].alignment = 8; 722 hw->dma_buf.rds_ring[i].size = 723 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; 724 725 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) { 726 device_printf(dev, "%s: rds ring[%d] alloc failed\n", 727 __func__, i); 728 729 for (j = 0; j < i; j++) 730 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]); 731 732 goto ql_alloc_dma_exit; 733 } 734 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n", 735 __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr), 736 hw->dma_buf.rds_ring[i].dma_b)); 737 } 738 739 hw->dma_buf.flags.rds_ring = 1; 740 741 /* 742 * Allocate Status Descriptor Rings 743 */ 744 745 for (i = 0; i < hw->num_sds_rings; i++) { 746 hw->dma_buf.sds_ring[i].alignment = 8; 747 hw->dma_buf.sds_ring[i].size = 748 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; 749 750 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) { 751 device_printf(dev, "%s: sds ring alloc failed\n", 752 __func__); 753 754 for (j = 0; j < i; j++) 755 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]); 756 757 goto ql_alloc_dma_exit; 758 } 759 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n", 760 __func__, i, 761 (void *)(hw->dma_buf.sds_ring[i].dma_addr), 762 hw->dma_buf.sds_ring[i].dma_b)); 763 } 764 for (i = 0; i < hw->num_sds_rings; i++) { 765 hw->sds[i].sds_ring_base = 766 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; 767 } 768 769 hw->dma_buf.flags.sds_ring = 1; 770 771 return 0; 772 773 ql_alloc_dma_exit: 774 ql_free_dma(ha); 775 return -1; 776 } 777 778 #define Q8_MBX_MSEC_DELAY 5000 779 780 static int 781 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, 782 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause) 783 { 784 uint32_t i; 785 uint32_t data; 786 int ret = 0; 787 788 if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) { 789 ret = -3; 790 ha->qla_initiate_recovery = 1; 791 goto exit_qla_mbx_cmd; 792 } 793 794 if (no_pause) 795 i = 1000; 796 else 797 i = Q8_MBX_MSEC_DELAY; 798 799 while (i) { 800 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL); 801 if (data == 0) 802 break; 803 if (no_pause) { 804 DELAY(1000); 805 } else { 806 qla_mdelay(__func__, 1); 807 } 808 i--; 809 } 810 811 if (i == 0) { 812 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n", 813 __func__, data); 814 ret = -1; 815 ha->qla_initiate_recovery = 1; 816 goto exit_qla_mbx_cmd; 817 } 818 819 for (i = 0; i < n_hmbox; i++) { 820 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox); 821 h_mbox++; 822 } 823 824 WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1); 825 826 827 i = Q8_MBX_MSEC_DELAY; 828 while (i) { 829 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL); 830 831 if ((data & 0x3) == 1) { 832 data = READ_REG32(ha, Q8_FW_MBOX0); 833 if ((data & 0xF000) != 0x8000) 834 break; 835 } 836 if (no_pause) { 837 DELAY(1000); 838 } else { 839 qla_mdelay(__func__, 1); 840 } 841 i--; 842 } 843 if (i == 0) { 844 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n", 845 __func__, data); 846 ret = -2; 847 ha->qla_initiate_recovery = 1; 848 goto exit_qla_mbx_cmd; 849 } 850 851 for (i = 0; i < n_fwmbox; i++) { 852 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2))); 853 } 854 855 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0); 856 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); 857 858 exit_qla_mbx_cmd: 859 return (ret); 860 } 861 862 int 863 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb, 864 uint32_t *num_rcvq) 865 { 866 uint32_t *mbox, err; 867 device_t dev = ha->pci_dev; 868 869 bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX)); 870 871 mbox = ha->hw.mbox; 872 873 mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 874 875 if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) { 876 device_printf(dev, "%s: failed0\n", __func__); 877 return (-1); 878 } 879 err = mbox[0] >> 25; 880 881 if (supports_9kb != NULL) { 882 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */ 883 *supports_9kb = 1; 884 else 885 *supports_9kb = 0; 886 } 887 888 if (num_rcvq != NULL) 889 *num_rcvq = ((mbox[6] >> 16) & 0xFFFF); 890 891 if ((err != 1) && (err != 0)) { 892 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 893 return (-1); 894 } 895 return 0; 896 } 897 898 static int 899 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, 900 uint32_t create) 901 { 902 uint32_t i, err; 903 device_t dev = ha->pci_dev; 904 q80_config_intr_t *c_intr; 905 q80_config_intr_rsp_t *c_intr_rsp; 906 907 c_intr = (q80_config_intr_t *)ha->hw.mbox; 908 bzero(c_intr, (sizeof (q80_config_intr_t))); 909 910 c_intr->opcode = Q8_MBX_CONFIG_INTR; 911 912 c_intr->count_version = (sizeof (q80_config_intr_t) >> 2); 913 c_intr->count_version |= Q8_MBX_CMD_VERSION; 914 915 c_intr->nentries = num_intrs; 916 917 for (i = 0; i < num_intrs; i++) { 918 if (create) { 919 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE; 920 c_intr->intr[i].msix_index = start_idx + 1 + i; 921 } else { 922 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE; 923 c_intr->intr[i].msix_index = 924 ha->hw.intr_id[(start_idx + i)]; 925 } 926 927 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X; 928 } 929 930 if (qla_mbx_cmd(ha, (uint32_t *)c_intr, 931 (sizeof (q80_config_intr_t) >> 2), 932 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) { 933 device_printf(dev, "%s: failed0\n", __func__); 934 return (-1); 935 } 936 937 c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox; 938 939 err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status); 940 941 if (err) { 942 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err, 943 c_intr_rsp->nentries); 944 945 for (i = 0; i < c_intr_rsp->nentries; i++) { 946 device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n", 947 __func__, i, 948 c_intr_rsp->intr[i].status, 949 c_intr_rsp->intr[i].intr_id, 950 c_intr_rsp->intr[i].intr_src); 951 } 952 953 return (-1); 954 } 955 956 for (i = 0; ((i < num_intrs) && create); i++) { 957 if (!c_intr_rsp->intr[i].status) { 958 ha->hw.intr_id[(start_idx + i)] = 959 c_intr_rsp->intr[i].intr_id; 960 ha->hw.intr_src[(start_idx + i)] = 961 c_intr_rsp->intr[i].intr_src; 962 } 963 } 964 965 return (0); 966 } 967 968 /* 969 * Name: qla_config_rss 970 * Function: Configure RSS for the context/interface. 971 */ 972 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 973 0x8030f20c77cb2da3ULL, 974 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 975 0x255b0ec26d5a56daULL }; 976 977 static int 978 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id) 979 { 980 q80_config_rss_t *c_rss; 981 q80_config_rss_rsp_t *c_rss_rsp; 982 uint32_t err, i; 983 device_t dev = ha->pci_dev; 984 985 c_rss = (q80_config_rss_t *)ha->hw.mbox; 986 bzero(c_rss, (sizeof (q80_config_rss_t))); 987 988 c_rss->opcode = Q8_MBX_CONFIG_RSS; 989 990 c_rss->count_version = (sizeof (q80_config_rss_t) >> 2); 991 c_rss->count_version |= Q8_MBX_CMD_VERSION; 992 993 c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP | 994 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP); 995 //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP | 996 // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP); 997 998 c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS; 999 c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE; 1000 1001 c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK; 1002 1003 c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID; 1004 c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS; 1005 1006 c_rss->cntxt_id = cntxt_id; 1007 1008 for (i = 0; i < 5; i++) { 1009 c_rss->rss_key[i] = rss_key[i]; 1010 } 1011 1012 if (qla_mbx_cmd(ha, (uint32_t *)c_rss, 1013 (sizeof (q80_config_rss_t) >> 2), 1014 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) { 1015 device_printf(dev, "%s: failed0\n", __func__); 1016 return (-1); 1017 } 1018 c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox; 1019 1020 err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status); 1021 1022 if (err) { 1023 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1024 return (-1); 1025 } 1026 return 0; 1027 } 1028 1029 static int 1030 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count, 1031 uint16_t cntxt_id, uint8_t *ind_table) 1032 { 1033 q80_config_rss_ind_table_t *c_rss_ind; 1034 q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp; 1035 uint32_t err; 1036 device_t dev = ha->pci_dev; 1037 1038 if ((count > Q8_RSS_IND_TBL_SIZE) || 1039 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) { 1040 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__, 1041 start_idx, count); 1042 return (-1); 1043 } 1044 1045 c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox; 1046 bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t)); 1047 1048 c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE; 1049 c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2); 1050 c_rss_ind->count_version |= Q8_MBX_CMD_VERSION; 1051 1052 c_rss_ind->start_idx = start_idx; 1053 c_rss_ind->end_idx = start_idx + count - 1; 1054 c_rss_ind->cntxt_id = cntxt_id; 1055 bcopy(ind_table, c_rss_ind->ind_table, count); 1056 1057 if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind, 1058 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox, 1059 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) { 1060 device_printf(dev, "%s: failed0\n", __func__); 1061 return (-1); 1062 } 1063 1064 c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox; 1065 err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status); 1066 1067 if (err) { 1068 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1069 return (-1); 1070 } 1071 return 0; 1072 } 1073 1074 /* 1075 * Name: qla_config_intr_coalesce 1076 * Function: Configure Interrupt Coalescing. 1077 */ 1078 static int 1079 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, 1080 int rcv) 1081 { 1082 q80_config_intr_coalesc_t *intrc; 1083 q80_config_intr_coalesc_rsp_t *intrc_rsp; 1084 uint32_t err, i; 1085 device_t dev = ha->pci_dev; 1086 1087 intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox; 1088 bzero(intrc, (sizeof (q80_config_intr_coalesc_t))); 1089 1090 intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE; 1091 intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2); 1092 intrc->count_version |= Q8_MBX_CMD_VERSION; 1093 1094 if (rcv) { 1095 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV; 1096 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF; 1097 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF; 1098 } else { 1099 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT; 1100 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF; 1101 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF; 1102 } 1103 1104 intrc->cntxt_id = cntxt_id; 1105 1106 if (tenable) { 1107 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC; 1108 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC; 1109 1110 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1111 intrc->sds_ring_mask |= (1 << i); 1112 } 1113 intrc->ms_timeout = 1000; 1114 } 1115 1116 if (qla_mbx_cmd(ha, (uint32_t *)intrc, 1117 (sizeof (q80_config_intr_coalesc_t) >> 2), 1118 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) { 1119 device_printf(dev, "%s: failed0\n", __func__); 1120 return (-1); 1121 } 1122 intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox; 1123 1124 err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status); 1125 1126 if (err) { 1127 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1128 return (-1); 1129 } 1130 1131 return 0; 1132 } 1133 1134 1135 /* 1136 * Name: qla_config_mac_addr 1137 * Function: binds a MAC address to the context/interface. 1138 * Can be unicast, multicast or broadcast. 1139 */ 1140 static int 1141 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac, 1142 uint32_t num_mac) 1143 { 1144 q80_config_mac_addr_t *cmac; 1145 q80_config_mac_addr_rsp_t *cmac_rsp; 1146 uint32_t err; 1147 device_t dev = ha->pci_dev; 1148 int i; 1149 uint8_t *mac_cpy = mac_addr; 1150 1151 if (num_mac > Q8_MAX_MAC_ADDRS) { 1152 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n", 1153 __func__, (add_mac ? "Add" : "Del"), num_mac); 1154 return (-1); 1155 } 1156 1157 cmac = (q80_config_mac_addr_t *)ha->hw.mbox; 1158 bzero(cmac, (sizeof (q80_config_mac_addr_t))); 1159 1160 cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR; 1161 cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2; 1162 cmac->count_version |= Q8_MBX_CMD_VERSION; 1163 1164 if (add_mac) 1165 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR; 1166 else 1167 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR; 1168 1169 cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS; 1170 1171 cmac->nmac_entries = num_mac; 1172 cmac->cntxt_id = ha->hw.rcv_cntxt_id; 1173 1174 for (i = 0; i < num_mac; i++) { 1175 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 1176 mac_addr = mac_addr + ETHER_ADDR_LEN; 1177 } 1178 1179 if (qla_mbx_cmd(ha, (uint32_t *)cmac, 1180 (sizeof (q80_config_mac_addr_t) >> 2), 1181 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) { 1182 device_printf(dev, "%s: %s failed0\n", __func__, 1183 (add_mac ? "Add" : "Del")); 1184 return (-1); 1185 } 1186 cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox; 1187 1188 err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status); 1189 1190 if (err) { 1191 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__, 1192 (add_mac ? "Add" : "Del"), err); 1193 for (i = 0; i < num_mac; i++) { 1194 device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n", 1195 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2], 1196 mac_cpy[3], mac_cpy[4], mac_cpy[5]); 1197 mac_cpy += ETHER_ADDR_LEN; 1198 } 1199 return (-1); 1200 } 1201 1202 return 0; 1203 } 1204 1205 1206 /* 1207 * Name: qla_set_mac_rcv_mode 1208 * Function: Enable/Disable AllMulticast and Promiscous Modes. 1209 */ 1210 static int 1211 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode) 1212 { 1213 q80_config_mac_rcv_mode_t *rcv_mode; 1214 uint32_t err; 1215 q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp; 1216 device_t dev = ha->pci_dev; 1217 1218 rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox; 1219 bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t))); 1220 1221 rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE; 1222 rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2; 1223 rcv_mode->count_version |= Q8_MBX_CMD_VERSION; 1224 1225 rcv_mode->mode = mode; 1226 1227 rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id; 1228 1229 if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode, 1230 (sizeof (q80_config_mac_rcv_mode_t) >> 2), 1231 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) { 1232 device_printf(dev, "%s: failed0\n", __func__); 1233 return (-1); 1234 } 1235 rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox; 1236 1237 err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status); 1238 1239 if (err) { 1240 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1241 return (-1); 1242 } 1243 1244 return 0; 1245 } 1246 1247 int 1248 ql_set_promisc(qla_host_t *ha) 1249 { 1250 int ret; 1251 1252 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE; 1253 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1254 return (ret); 1255 } 1256 1257 void 1258 qla_reset_promisc(qla_host_t *ha) 1259 { 1260 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE; 1261 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1262 } 1263 1264 int 1265 ql_set_allmulti(qla_host_t *ha) 1266 { 1267 int ret; 1268 1269 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE; 1270 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1271 return (ret); 1272 } 1273 1274 void 1275 qla_reset_allmulti(qla_host_t *ha) 1276 { 1277 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE; 1278 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1279 } 1280 1281 /* 1282 * Name: ql_set_max_mtu 1283 * Function: 1284 * Sets the maximum transfer unit size for the specified rcv context. 1285 */ 1286 int 1287 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) 1288 { 1289 device_t dev; 1290 q80_set_max_mtu_t *max_mtu; 1291 q80_set_max_mtu_rsp_t *max_mtu_rsp; 1292 uint32_t err; 1293 1294 dev = ha->pci_dev; 1295 1296 max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox; 1297 bzero(max_mtu, (sizeof (q80_set_max_mtu_t))); 1298 1299 max_mtu->opcode = Q8_MBX_SET_MAX_MTU; 1300 max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2); 1301 max_mtu->count_version |= Q8_MBX_CMD_VERSION; 1302 1303 max_mtu->cntxt_id = cntxt_id; 1304 max_mtu->mtu = mtu; 1305 1306 if (qla_mbx_cmd(ha, (uint32_t *)max_mtu, 1307 (sizeof (q80_set_max_mtu_t) >> 2), 1308 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) { 1309 device_printf(dev, "%s: failed\n", __func__); 1310 return -1; 1311 } 1312 1313 max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox; 1314 1315 err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status); 1316 1317 if (err) { 1318 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1319 } 1320 1321 return 0; 1322 } 1323 1324 static int 1325 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id) 1326 { 1327 device_t dev; 1328 q80_link_event_t *lnk; 1329 q80_link_event_rsp_t *lnk_rsp; 1330 uint32_t err; 1331 1332 dev = ha->pci_dev; 1333 1334 lnk = (q80_link_event_t *)ha->hw.mbox; 1335 bzero(lnk, (sizeof (q80_link_event_t))); 1336 1337 lnk->opcode = Q8_MBX_LINK_EVENT_REQ; 1338 lnk->count_version = (sizeof (q80_link_event_t) >> 2); 1339 lnk->count_version |= Q8_MBX_CMD_VERSION; 1340 1341 lnk->cntxt_id = cntxt_id; 1342 lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC; 1343 1344 if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2), 1345 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) { 1346 device_printf(dev, "%s: failed\n", __func__); 1347 return -1; 1348 } 1349 1350 lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox; 1351 1352 err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status); 1353 1354 if (err) { 1355 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1356 } 1357 1358 return 0; 1359 } 1360 1361 static int 1362 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id) 1363 { 1364 device_t dev; 1365 q80_config_fw_lro_t *fw_lro; 1366 q80_config_fw_lro_rsp_t *fw_lro_rsp; 1367 uint32_t err; 1368 1369 dev = ha->pci_dev; 1370 1371 fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox; 1372 bzero(fw_lro, sizeof(q80_config_fw_lro_t)); 1373 1374 fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO; 1375 fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2); 1376 fw_lro->count_version |= Q8_MBX_CMD_VERSION; 1377 1378 fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK; 1379 fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK; 1380 1381 fw_lro->cntxt_id = cntxt_id; 1382 1383 if (qla_mbx_cmd(ha, (uint32_t *)fw_lro, 1384 (sizeof (q80_config_fw_lro_t) >> 2), 1385 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) { 1386 device_printf(dev, "%s: failed\n", __func__); 1387 return -1; 1388 } 1389 1390 fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox; 1391 1392 err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status); 1393 1394 if (err) { 1395 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1396 } 1397 1398 return 0; 1399 } 1400 1401 static int 1402 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode) 1403 { 1404 device_t dev; 1405 q80_hw_config_t *hw_config; 1406 q80_hw_config_rsp_t *hw_config_rsp; 1407 uint32_t err; 1408 1409 dev = ha->pci_dev; 1410 1411 hw_config = (q80_hw_config_t *)ha->hw.mbox; 1412 bzero(hw_config, sizeof (q80_hw_config_t)); 1413 1414 hw_config->opcode = Q8_MBX_HW_CONFIG; 1415 hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT; 1416 hw_config->count_version |= Q8_MBX_CMD_VERSION; 1417 1418 hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE; 1419 1420 hw_config->u.set_cam_search_mode.mode = search_mode; 1421 1422 if (qla_mbx_cmd(ha, (uint32_t *)hw_config, 1423 (sizeof (q80_hw_config_t) >> 2), 1424 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { 1425 device_printf(dev, "%s: failed\n", __func__); 1426 return -1; 1427 } 1428 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; 1429 1430 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); 1431 1432 if (err) { 1433 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1434 } 1435 1436 return 0; 1437 } 1438 1439 static int 1440 qla_get_cam_search_mode(qla_host_t *ha) 1441 { 1442 device_t dev; 1443 q80_hw_config_t *hw_config; 1444 q80_hw_config_rsp_t *hw_config_rsp; 1445 uint32_t err; 1446 1447 dev = ha->pci_dev; 1448 1449 hw_config = (q80_hw_config_t *)ha->hw.mbox; 1450 bzero(hw_config, sizeof (q80_hw_config_t)); 1451 1452 hw_config->opcode = Q8_MBX_HW_CONFIG; 1453 hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT; 1454 hw_config->count_version |= Q8_MBX_CMD_VERSION; 1455 1456 hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE; 1457 1458 if (qla_mbx_cmd(ha, (uint32_t *)hw_config, 1459 (sizeof (q80_hw_config_t) >> 2), 1460 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { 1461 device_printf(dev, "%s: failed\n", __func__); 1462 return -1; 1463 } 1464 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; 1465 1466 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); 1467 1468 if (err) { 1469 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1470 } else { 1471 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__, 1472 hw_config_rsp->u.get_cam_search_mode.mode); 1473 } 1474 1475 return 0; 1476 } 1477 1478 1479 1480 static void 1481 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i) 1482 { 1483 device_t dev = ha->pci_dev; 1484 1485 if (i < ha->hw.num_tx_rings) { 1486 device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n", 1487 __func__, i, xstat->total_bytes); 1488 device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n", 1489 __func__, i, xstat->total_pkts); 1490 device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n", 1491 __func__, i, xstat->errors); 1492 device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n", 1493 __func__, i, xstat->pkts_dropped); 1494 device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n", 1495 __func__, i, xstat->switch_pkts); 1496 device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n", 1497 __func__, i, xstat->num_buffers); 1498 } else { 1499 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", 1500 __func__, xstat->total_bytes); 1501 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", 1502 __func__, xstat->total_pkts); 1503 device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n", 1504 __func__, xstat->errors); 1505 device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n", 1506 __func__, xstat->pkts_dropped); 1507 device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n", 1508 __func__, xstat->switch_pkts); 1509 device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n", 1510 __func__, xstat->num_buffers); 1511 } 1512 } 1513 1514 static void 1515 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat) 1516 { 1517 device_t dev = ha->pci_dev; 1518 1519 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__, 1520 rstat->total_bytes); 1521 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__, 1522 rstat->total_pkts); 1523 device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__, 1524 rstat->lro_pkt_count); 1525 device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__, 1526 rstat->sw_pkt_count); 1527 device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__, 1528 rstat->ip_chksum_err); 1529 device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__, 1530 rstat->pkts_wo_acntxts); 1531 device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n", 1532 __func__, rstat->pkts_dropped_no_sds_card); 1533 device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n", 1534 __func__, rstat->pkts_dropped_no_sds_host); 1535 device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__, 1536 rstat->oversized_pkts); 1537 device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n", 1538 __func__, rstat->pkts_dropped_no_rds); 1539 device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n", 1540 __func__, rstat->unxpctd_mcast_pkts); 1541 device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__, 1542 rstat->re1_fbq_error); 1543 device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__, 1544 rstat->invalid_mac_addr); 1545 device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__, 1546 rstat->rds_prime_trys); 1547 device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__, 1548 rstat->rds_prime_success); 1549 device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__, 1550 rstat->lro_flows_added); 1551 device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__, 1552 rstat->lro_flows_deleted); 1553 device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__, 1554 rstat->lro_flows_active); 1555 device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n", 1556 __func__, rstat->pkts_droped_unknown); 1557 device_printf(dev, "%s: pkts_cnt_oversized\t\t%" PRIu64 "\n", 1558 __func__, rstat->pkts_cnt_oversized); 1559 } 1560 1561 static void 1562 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat) 1563 { 1564 device_t dev = ha->pci_dev; 1565 1566 device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__, 1567 mstat->xmt_frames); 1568 device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__, 1569 mstat->xmt_bytes); 1570 device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__, 1571 mstat->xmt_mcast_pkts); 1572 device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__, 1573 mstat->xmt_bcast_pkts); 1574 device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__, 1575 mstat->xmt_pause_frames); 1576 device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__, 1577 mstat->xmt_cntrl_pkts); 1578 device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n", 1579 __func__, mstat->xmt_pkt_lt_64bytes); 1580 device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n", 1581 __func__, mstat->xmt_pkt_lt_127bytes); 1582 device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n", 1583 __func__, mstat->xmt_pkt_lt_255bytes); 1584 device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n", 1585 __func__, mstat->xmt_pkt_lt_511bytes); 1586 device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n", 1587 __func__, mstat->xmt_pkt_lt_1023bytes); 1588 device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n", 1589 __func__, mstat->xmt_pkt_lt_1518bytes); 1590 device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n", 1591 __func__, mstat->xmt_pkt_gt_1518bytes); 1592 1593 device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__, 1594 mstat->rcv_frames); 1595 device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__, 1596 mstat->rcv_bytes); 1597 device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__, 1598 mstat->rcv_mcast_pkts); 1599 device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__, 1600 mstat->rcv_bcast_pkts); 1601 device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__, 1602 mstat->rcv_pause_frames); 1603 device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__, 1604 mstat->rcv_cntrl_pkts); 1605 device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n", 1606 __func__, mstat->rcv_pkt_lt_64bytes); 1607 device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n", 1608 __func__, mstat->rcv_pkt_lt_127bytes); 1609 device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n", 1610 __func__, mstat->rcv_pkt_lt_255bytes); 1611 device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n", 1612 __func__, mstat->rcv_pkt_lt_511bytes); 1613 device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n", 1614 __func__, mstat->rcv_pkt_lt_1023bytes); 1615 device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n", 1616 __func__, mstat->rcv_pkt_lt_1518bytes); 1617 device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n", 1618 __func__, mstat->rcv_pkt_gt_1518bytes); 1619 1620 device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__, 1621 mstat->rcv_len_error); 1622 device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__, 1623 mstat->rcv_len_small); 1624 device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__, 1625 mstat->rcv_len_large); 1626 device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__, 1627 mstat->rcv_jabber); 1628 device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__, 1629 mstat->rcv_dropped); 1630 device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__, 1631 mstat->fcs_error); 1632 device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__, 1633 mstat->align_error); 1634 } 1635 1636 1637 static int 1638 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size) 1639 { 1640 device_t dev; 1641 q80_get_stats_t *stat; 1642 q80_get_stats_rsp_t *stat_rsp; 1643 uint32_t err; 1644 1645 dev = ha->pci_dev; 1646 1647 stat = (q80_get_stats_t *)ha->hw.mbox; 1648 bzero(stat, (sizeof (q80_get_stats_t))); 1649 1650 stat->opcode = Q8_MBX_GET_STATS; 1651 stat->count_version = 2; 1652 stat->count_version |= Q8_MBX_CMD_VERSION; 1653 1654 stat->cmd = cmd; 1655 1656 if (qla_mbx_cmd(ha, (uint32_t *)stat, 2, 1657 ha->hw.mbox, (rsp_size >> 2), 0)) { 1658 device_printf(dev, "%s: failed\n", __func__); 1659 return -1; 1660 } 1661 1662 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; 1663 1664 err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status); 1665 1666 if (err) { 1667 return -1; 1668 } 1669 1670 return 0; 1671 } 1672 1673 void 1674 ql_get_stats(qla_host_t *ha) 1675 { 1676 q80_get_stats_rsp_t *stat_rsp; 1677 q80_mac_stats_t *mstat; 1678 q80_xmt_stats_t *xstat; 1679 q80_rcv_stats_t *rstat; 1680 uint32_t cmd; 1681 int i; 1682 1683 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; 1684 /* 1685 * Get MAC Statistics 1686 */ 1687 cmd = Q8_GET_STATS_CMD_TYPE_MAC; 1688 // cmd |= Q8_GET_STATS_CMD_CLEAR; 1689 1690 cmd |= ((ha->pci_func & 0x1) << 16); 1691 1692 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { 1693 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac; 1694 qla_mac_stats(ha, mstat); 1695 } else { 1696 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n", 1697 __func__, ha->hw.mbox[0]); 1698 } 1699 /* 1700 * Get RCV Statistics 1701 */ 1702 cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT; 1703 // cmd |= Q8_GET_STATS_CMD_CLEAR; 1704 cmd |= (ha->hw.rcv_cntxt_id << 16); 1705 1706 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { 1707 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv; 1708 qla_rcv_stats(ha, rstat); 1709 } else { 1710 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n", 1711 __func__, ha->hw.mbox[0]); 1712 } 1713 /* 1714 * Get XMT Statistics 1715 */ 1716 for (i = 0 ; i < ha->hw.num_tx_rings; i++) { 1717 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT; 1718 // cmd |= Q8_GET_STATS_CMD_CLEAR; 1719 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16); 1720 1721 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t)) 1722 == 0) { 1723 xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt; 1724 qla_xmt_stats(ha, xstat, i); 1725 } else { 1726 device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n", 1727 __func__, ha->hw.mbox[0]); 1728 } 1729 } 1730 return; 1731 } 1732 1733 static void 1734 qla_get_quick_stats(qla_host_t *ha) 1735 { 1736 q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp; 1737 q80_mac_stats_t *mstat; 1738 q80_xmt_stats_t *xstat; 1739 q80_rcv_stats_t *rstat; 1740 uint32_t cmd; 1741 1742 stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox; 1743 1744 cmd = Q8_GET_STATS_CMD_TYPE_ALL; 1745 // cmd |= Q8_GET_STATS_CMD_CLEAR; 1746 1747 // cmd |= ((ha->pci_func & 0x3) << 16); 1748 cmd |= (0xFFFF << 16); 1749 1750 if (qla_get_hw_stats(ha, cmd, 1751 sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) { 1752 1753 mstat = (q80_mac_stats_t *)&stat_rsp->mac; 1754 rstat = (q80_rcv_stats_t *)&stat_rsp->rcv; 1755 xstat = (q80_xmt_stats_t *)&stat_rsp->xmt; 1756 qla_mac_stats(ha, mstat); 1757 qla_rcv_stats(ha, rstat); 1758 qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings); 1759 } else { 1760 device_printf(ha->pci_dev, "%s: failed [0x%08x]\n", 1761 __func__, ha->hw.mbox[0]); 1762 } 1763 return; 1764 } 1765 1766 /* 1767 * Name: qla_tx_tso 1768 * Function: Checks if the packet to be transmitted is a candidate for 1769 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx 1770 * Ring Structure are plugged in. 1771 */ 1772 static int 1773 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) 1774 { 1775 struct ether_vlan_header *eh; 1776 struct ip *ip = NULL; 1777 struct ip6_hdr *ip6 = NULL; 1778 struct tcphdr *th = NULL; 1779 uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off; 1780 uint16_t etype, opcode, offload = 1; 1781 device_t dev; 1782 1783 dev = ha->pci_dev; 1784 1785 1786 eh = mtod(mp, struct ether_vlan_header *); 1787 1788 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1789 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1790 etype = ntohs(eh->evl_proto); 1791 } else { 1792 ehdrlen = ETHER_HDR_LEN; 1793 etype = ntohs(eh->evl_encap_proto); 1794 } 1795 1796 hdrlen = 0; 1797 1798 switch (etype) { 1799 case ETHERTYPE_IP: 1800 1801 tcp_opt_off = ehdrlen + sizeof(struct ip) + 1802 sizeof(struct tcphdr); 1803 1804 if (mp->m_len < tcp_opt_off) { 1805 m_copydata(mp, 0, tcp_opt_off, hdr); 1806 ip = (struct ip *)(hdr + ehdrlen); 1807 } else { 1808 ip = (struct ip *)(mp->m_data + ehdrlen); 1809 } 1810 1811 ip_hlen = ip->ip_hl << 2; 1812 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; 1813 1814 1815 if ((ip->ip_p != IPPROTO_TCP) || 1816 (ip_hlen != sizeof (struct ip))){ 1817 /* IP Options are not supported */ 1818 1819 offload = 0; 1820 } else 1821 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 1822 1823 break; 1824 1825 case ETHERTYPE_IPV6: 1826 1827 tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) + 1828 sizeof (struct tcphdr); 1829 1830 if (mp->m_len < tcp_opt_off) { 1831 m_copydata(mp, 0, tcp_opt_off, hdr); 1832 ip6 = (struct ip6_hdr *)(hdr + ehdrlen); 1833 } else { 1834 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1835 } 1836 1837 ip_hlen = sizeof(struct ip6_hdr); 1838 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6; 1839 1840 if (ip6->ip6_nxt != IPPROTO_TCP) { 1841 //device_printf(dev, "%s: ipv6\n", __func__); 1842 offload = 0; 1843 } else 1844 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); 1845 break; 1846 1847 default: 1848 QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__)); 1849 offload = 0; 1850 break; 1851 } 1852 1853 if (!offload) 1854 return (-1); 1855 1856 tcp_hlen = th->th_off << 2; 1857 hdrlen = ehdrlen + ip_hlen + tcp_hlen; 1858 1859 if (mp->m_len < hdrlen) { 1860 if (mp->m_len < tcp_opt_off) { 1861 if (tcp_hlen > sizeof(struct tcphdr)) { 1862 m_copydata(mp, tcp_opt_off, 1863 (tcp_hlen - sizeof(struct tcphdr)), 1864 &hdr[tcp_opt_off]); 1865 } 1866 } else { 1867 m_copydata(mp, 0, hdrlen, hdr); 1868 } 1869 } 1870 1871 tx_cmd->mss = mp->m_pkthdr.tso_segsz; 1872 1873 tx_cmd->flags_opcode = opcode ; 1874 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 1875 tx_cmd->total_hdr_len = hdrlen; 1876 1877 /* Check for Multicast least significant bit of MSB == 1 */ 1878 if (eh->evl_dhost[0] & 0x01) { 1879 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST; 1880 } 1881 1882 if (mp->m_len < hdrlen) { 1883 printf("%d\n", hdrlen); 1884 return (1); 1885 } 1886 1887 return (0); 1888 } 1889 1890 /* 1891 * Name: qla_tx_chksum 1892 * Function: Checks if the packet to be transmitted is a candidate for 1893 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx 1894 * Ring Structure are plugged in. 1895 */ 1896 static int 1897 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code, 1898 uint32_t *tcp_hdr_off) 1899 { 1900 struct ether_vlan_header *eh; 1901 struct ip *ip; 1902 struct ip6_hdr *ip6; 1903 uint32_t ehdrlen, ip_hlen; 1904 uint16_t etype, opcode, offload = 1; 1905 device_t dev; 1906 uint8_t buf[sizeof(struct ip6_hdr)]; 1907 1908 dev = ha->pci_dev; 1909 1910 *op_code = 0; 1911 1912 if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0) 1913 return (-1); 1914 1915 eh = mtod(mp, struct ether_vlan_header *); 1916 1917 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1918 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1919 etype = ntohs(eh->evl_proto); 1920 } else { 1921 ehdrlen = ETHER_HDR_LEN; 1922 etype = ntohs(eh->evl_encap_proto); 1923 } 1924 1925 1926 switch (etype) { 1927 case ETHERTYPE_IP: 1928 ip = (struct ip *)(mp->m_data + ehdrlen); 1929 1930 ip_hlen = sizeof (struct ip); 1931 1932 if (mp->m_len < (ehdrlen + ip_hlen)) { 1933 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 1934 ip = (struct ip *)buf; 1935 } 1936 1937 if (ip->ip_p == IPPROTO_TCP) 1938 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; 1939 else if (ip->ip_p == IPPROTO_UDP) 1940 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; 1941 else { 1942 //device_printf(dev, "%s: ipv4\n", __func__); 1943 offload = 0; 1944 } 1945 break; 1946 1947 case ETHERTYPE_IPV6: 1948 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1949 1950 ip_hlen = sizeof(struct ip6_hdr); 1951 1952 if (mp->m_len < (ehdrlen + ip_hlen)) { 1953 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 1954 buf); 1955 ip6 = (struct ip6_hdr *)buf; 1956 } 1957 1958 if (ip6->ip6_nxt == IPPROTO_TCP) 1959 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; 1960 else if (ip6->ip6_nxt == IPPROTO_UDP) 1961 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; 1962 else { 1963 //device_printf(dev, "%s: ipv6\n", __func__); 1964 offload = 0; 1965 } 1966 break; 1967 1968 default: 1969 offload = 0; 1970 break; 1971 } 1972 if (!offload) 1973 return (-1); 1974 1975 *op_code = opcode; 1976 *tcp_hdr_off = (ip_hlen + ehdrlen); 1977 1978 return (0); 1979 } 1980 1981 #define QLA_TX_MIN_FREE 2 1982 /* 1983 * Name: ql_hw_send 1984 * Function: Transmits a packet. It first checks if the packet is a 1985 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum 1986 * offload. If either of these creteria are not met, it is transmitted 1987 * as a regular ethernet frame. 1988 */ 1989 int 1990 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, 1991 uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu) 1992 { 1993 struct ether_vlan_header *eh; 1994 qla_hw_t *hw = &ha->hw; 1995 q80_tx_cmd_t *tx_cmd, tso_cmd; 1996 bus_dma_segment_t *c_seg; 1997 uint32_t num_tx_cmds, hdr_len = 0; 1998 uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next; 1999 device_t dev; 2000 int i, ret; 2001 uint8_t *src = NULL, *dst = NULL; 2002 uint8_t frame_hdr[QL_FRAME_HDR_SIZE]; 2003 uint32_t op_code = 0; 2004 uint32_t tcp_hdr_off = 0; 2005 2006 dev = ha->pci_dev; 2007 2008 /* 2009 * Always make sure there is atleast one empty slot in the tx_ring 2010 * tx_ring is considered full when there only one entry available 2011 */ 2012 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; 2013 2014 total_length = mp->m_pkthdr.len; 2015 if (total_length > QLA_MAX_TSO_FRAME_SIZE) { 2016 device_printf(dev, "%s: total length exceeds maxlen(%d)\n", 2017 __func__, total_length); 2018 return (-1); 2019 } 2020 eh = mtod(mp, struct ether_vlan_header *); 2021 2022 if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 2023 2024 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); 2025 2026 src = frame_hdr; 2027 ret = qla_tx_tso(ha, mp, &tso_cmd, src); 2028 2029 if (!(ret & ~1)) { 2030 /* find the additional tx_cmd descriptors required */ 2031 2032 if (mp->m_flags & M_VLANTAG) 2033 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN; 2034 2035 hdr_len = tso_cmd.total_hdr_len; 2036 2037 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 2038 bytes = QL_MIN(bytes, hdr_len); 2039 2040 num_tx_cmds++; 2041 hdr_len -= bytes; 2042 2043 while (hdr_len) { 2044 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 2045 hdr_len -= bytes; 2046 num_tx_cmds++; 2047 } 2048 hdr_len = tso_cmd.total_hdr_len; 2049 2050 if (ret == 0) 2051 src = (uint8_t *)eh; 2052 } else 2053 return (EINVAL); 2054 } else { 2055 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off); 2056 } 2057 2058 if (iscsi_pdu) 2059 ha->hw.iscsi_pkt_count++; 2060 2061 if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 2062 ql_hw_tx_done_locked(ha, txr_idx); 2063 if (hw->tx_cntxt[txr_idx].txr_free <= 2064 (num_tx_cmds + QLA_TX_MIN_FREE)) { 2065 QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= " 2066 "(num_tx_cmds + QLA_TX_MIN_FREE))\n", 2067 __func__)); 2068 return (-1); 2069 } 2070 } 2071 2072 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx]; 2073 2074 if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) { 2075 2076 if (nsegs > ha->hw.max_tx_segs) 2077 ha->hw.max_tx_segs = nsegs; 2078 2079 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2080 2081 if (op_code) { 2082 tx_cmd->flags_opcode = op_code; 2083 tx_cmd->tcp_hdr_off = tcp_hdr_off; 2084 2085 } else { 2086 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; 2087 } 2088 } else { 2089 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); 2090 ha->tx_tso_frames++; 2091 } 2092 2093 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2094 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; 2095 2096 if (iscsi_pdu) 2097 eh->evl_tag |= ha->hw.user_pri_iscsi << 13; 2098 2099 } else if (mp->m_flags & M_VLANTAG) { 2100 2101 if (hdr_len) { /* TSO */ 2102 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | 2103 Q8_TX_CMD_FLAGS_HW_VLAN_ID); 2104 tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN; 2105 } else 2106 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID; 2107 2108 ha->hw_vlan_tx_frames++; 2109 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; 2110 2111 if (iscsi_pdu) { 2112 tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13; 2113 mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci; 2114 } 2115 } 2116 2117 2118 tx_cmd->n_bufs = (uint8_t)nsegs; 2119 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); 2120 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); 2121 tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); 2122 2123 c_seg = segs; 2124 2125 while (1) { 2126 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { 2127 2128 switch (i) { 2129 case 0: 2130 tx_cmd->buf1_addr = c_seg->ds_addr; 2131 tx_cmd->buf1_len = c_seg->ds_len; 2132 break; 2133 2134 case 1: 2135 tx_cmd->buf2_addr = c_seg->ds_addr; 2136 tx_cmd->buf2_len = c_seg->ds_len; 2137 break; 2138 2139 case 2: 2140 tx_cmd->buf3_addr = c_seg->ds_addr; 2141 tx_cmd->buf3_len = c_seg->ds_len; 2142 break; 2143 2144 case 3: 2145 tx_cmd->buf4_addr = c_seg->ds_addr; 2146 tx_cmd->buf4_len = c_seg->ds_len; 2147 break; 2148 } 2149 2150 c_seg++; 2151 nsegs--; 2152 } 2153 2154 txr_next = hw->tx_cntxt[txr_idx].txr_next = 2155 (hw->tx_cntxt[txr_idx].txr_next + 1) & 2156 (NUM_TX_DESCRIPTORS - 1); 2157 tx_cmd_count++; 2158 2159 if (!nsegs) 2160 break; 2161 2162 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2163 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2164 } 2165 2166 if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 2167 2168 /* TSO : Copy the header in the following tx cmd descriptors */ 2169 2170 txr_next = hw->tx_cntxt[txr_idx].txr_next; 2171 2172 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2173 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2174 2175 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 2176 bytes = QL_MIN(bytes, hdr_len); 2177 2178 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; 2179 2180 if (mp->m_flags & M_VLANTAG) { 2181 /* first copy the src/dst MAC addresses */ 2182 bcopy(src, dst, (ETHER_ADDR_LEN * 2)); 2183 dst += (ETHER_ADDR_LEN * 2); 2184 src += (ETHER_ADDR_LEN * 2); 2185 2186 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); 2187 dst += 2; 2188 *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag); 2189 dst += 2; 2190 2191 /* bytes left in src header */ 2192 hdr_len -= ((ETHER_ADDR_LEN * 2) + 2193 ETHER_VLAN_ENCAP_LEN); 2194 2195 /* bytes left in TxCmd Entry */ 2196 bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); 2197 2198 2199 bcopy(src, dst, bytes); 2200 src += bytes; 2201 hdr_len -= bytes; 2202 } else { 2203 bcopy(src, dst, bytes); 2204 src += bytes; 2205 hdr_len -= bytes; 2206 } 2207 2208 txr_next = hw->tx_cntxt[txr_idx].txr_next = 2209 (hw->tx_cntxt[txr_idx].txr_next + 1) & 2210 (NUM_TX_DESCRIPTORS - 1); 2211 tx_cmd_count++; 2212 2213 while (hdr_len) { 2214 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2215 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2216 2217 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 2218 2219 bcopy(src, tx_cmd, bytes); 2220 src += bytes; 2221 hdr_len -= bytes; 2222 2223 txr_next = hw->tx_cntxt[txr_idx].txr_next = 2224 (hw->tx_cntxt[txr_idx].txr_next + 1) & 2225 (NUM_TX_DESCRIPTORS - 1); 2226 tx_cmd_count++; 2227 } 2228 } 2229 2230 hw->tx_cntxt[txr_idx].txr_free = 2231 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count; 2232 2233 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\ 2234 txr_idx); 2235 QL_DPRINT8(ha, (dev, "%s: return\n", __func__)); 2236 2237 return (0); 2238 } 2239 2240 2241 2242 #define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */ 2243 static int 2244 qla_config_rss_ind_table(qla_host_t *ha) 2245 { 2246 uint32_t i, count; 2247 uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE]; 2248 2249 2250 for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) { 2251 rss_ind_tbl[i] = i % ha->hw.num_sds_rings; 2252 } 2253 2254 for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; 2255 i = i + Q8_CONFIG_IND_TBL_SIZE) { 2256 2257 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) { 2258 count = Q8_RSS_IND_TBL_MAX_IDX - i + 1; 2259 } else { 2260 count = Q8_CONFIG_IND_TBL_SIZE; 2261 } 2262 2263 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id, 2264 rss_ind_tbl)) 2265 return (-1); 2266 } 2267 2268 return (0); 2269 } 2270 2271 static int 2272 qla_config_soft_lro(qla_host_t *ha) 2273 { 2274 int i; 2275 qla_hw_t *hw = &ha->hw; 2276 struct lro_ctrl *lro; 2277 2278 for (i = 0; i < hw->num_sds_rings; i++) { 2279 lro = &hw->sds[i].lro; 2280 2281 bzero(lro, sizeof(struct lro_ctrl)); 2282 2283 #if (__FreeBSD_version >= 1100101) 2284 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) { 2285 device_printf(ha->pci_dev, 2286 "%s: tcp_lro_init_args [%d] failed\n", 2287 __func__, i); 2288 return (-1); 2289 } 2290 #else 2291 if (tcp_lro_init(lro)) { 2292 device_printf(ha->pci_dev, 2293 "%s: tcp_lro_init [%d] failed\n", 2294 __func__, i); 2295 return (-1); 2296 } 2297 #endif /* #if (__FreeBSD_version >= 1100101) */ 2298 2299 lro->ifp = ha->ifp; 2300 } 2301 2302 QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__)); 2303 return (0); 2304 } 2305 2306 static void 2307 qla_drain_soft_lro(qla_host_t *ha) 2308 { 2309 int i; 2310 qla_hw_t *hw = &ha->hw; 2311 struct lro_ctrl *lro; 2312 2313 for (i = 0; i < hw->num_sds_rings; i++) { 2314 lro = &hw->sds[i].lro; 2315 2316 #if (__FreeBSD_version >= 1100101) 2317 tcp_lro_flush_all(lro); 2318 #else 2319 struct lro_entry *queued; 2320 2321 while ((!SLIST_EMPTY(&lro->lro_active))) { 2322 queued = SLIST_FIRST(&lro->lro_active); 2323 SLIST_REMOVE_HEAD(&lro->lro_active, next); 2324 tcp_lro_flush(lro, queued); 2325 } 2326 #endif /* #if (__FreeBSD_version >= 1100101) */ 2327 } 2328 2329 return; 2330 } 2331 2332 static void 2333 qla_free_soft_lro(qla_host_t *ha) 2334 { 2335 int i; 2336 qla_hw_t *hw = &ha->hw; 2337 struct lro_ctrl *lro; 2338 2339 for (i = 0; i < hw->num_sds_rings; i++) { 2340 lro = &hw->sds[i].lro; 2341 tcp_lro_free(lro); 2342 } 2343 2344 return; 2345 } 2346 2347 2348 /* 2349 * Name: ql_del_hw_if 2350 * Function: Destroys the hardware specific entities corresponding to an 2351 * Ethernet Interface 2352 */ 2353 void 2354 ql_del_hw_if(qla_host_t *ha) 2355 { 2356 uint32_t i; 2357 uint32_t num_msix; 2358 2359 (void)qla_stop_nic_func(ha); 2360 2361 qla_del_rcv_cntxt(ha); 2362 2363 qla_del_xmt_cntxt(ha); 2364 2365 if (ha->hw.flags.init_intr_cnxt) { 2366 for (i = 0; i < ha->hw.num_sds_rings; ) { 2367 2368 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) 2369 num_msix = Q8_MAX_INTR_VECTORS; 2370 else 2371 num_msix = ha->hw.num_sds_rings - i; 2372 qla_config_intr_cntxt(ha, i, num_msix, 0); 2373 2374 i += num_msix; 2375 } 2376 2377 ha->hw.flags.init_intr_cnxt = 0; 2378 } 2379 2380 if (ha->hw.enable_soft_lro) { 2381 qla_drain_soft_lro(ha); 2382 qla_free_soft_lro(ha); 2383 } 2384 2385 return; 2386 } 2387 2388 void 2389 qla_confirm_9kb_enable(qla_host_t *ha) 2390 { 2391 uint32_t supports_9kb = 0; 2392 2393 ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX); 2394 2395 /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */ 2396 WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2); 2397 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); 2398 2399 qla_get_nic_partition(ha, &supports_9kb, NULL); 2400 2401 if (!supports_9kb) 2402 ha->hw.enable_9kb = 0; 2403 2404 return; 2405 } 2406 2407 /* 2408 * Name: ql_init_hw_if 2409 * Function: Creates the hardware specific entities corresponding to an 2410 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address 2411 * corresponding to the interface. Enables LRO if allowed. 2412 */ 2413 int 2414 ql_init_hw_if(qla_host_t *ha) 2415 { 2416 device_t dev; 2417 uint32_t i; 2418 uint8_t bcast_mac[6]; 2419 qla_rdesc_t *rdesc; 2420 uint32_t num_msix; 2421 2422 dev = ha->pci_dev; 2423 2424 for (i = 0; i < ha->hw.num_sds_rings; i++) { 2425 bzero(ha->hw.dma_buf.sds_ring[i].dma_b, 2426 ha->hw.dma_buf.sds_ring[i].size); 2427 } 2428 2429 for (i = 0; i < ha->hw.num_sds_rings; ) { 2430 2431 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) 2432 num_msix = Q8_MAX_INTR_VECTORS; 2433 else 2434 num_msix = ha->hw.num_sds_rings - i; 2435 2436 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) { 2437 2438 if (i > 0) { 2439 2440 num_msix = i; 2441 2442 for (i = 0; i < num_msix; ) { 2443 qla_config_intr_cntxt(ha, i, 2444 Q8_MAX_INTR_VECTORS, 0); 2445 i += Q8_MAX_INTR_VECTORS; 2446 } 2447 } 2448 return (-1); 2449 } 2450 2451 i = i + num_msix; 2452 } 2453 2454 ha->hw.flags.init_intr_cnxt = 1; 2455 2456 /* 2457 * Create Receive Context 2458 */ 2459 if (qla_init_rcv_cntxt(ha)) { 2460 return (-1); 2461 } 2462 2463 for (i = 0; i < ha->hw.num_rds_rings; i++) { 2464 rdesc = &ha->hw.rds[i]; 2465 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2; 2466 rdesc->rx_in = 0; 2467 /* Update the RDS Producer Indices */ 2468 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\ 2469 rdesc->rx_next); 2470 } 2471 2472 2473 /* 2474 * Create Transmit Context 2475 */ 2476 if (qla_init_xmt_cntxt(ha)) { 2477 qla_del_rcv_cntxt(ha); 2478 return (-1); 2479 } 2480 ha->hw.max_tx_segs = 0; 2481 2482 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1)) 2483 return(-1); 2484 2485 ha->hw.flags.unicast_mac = 1; 2486 2487 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 2488 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 2489 2490 if (qla_config_mac_addr(ha, bcast_mac, 1, 1)) 2491 return (-1); 2492 2493 ha->hw.flags.bcast_mac = 1; 2494 2495 /* 2496 * program any cached multicast addresses 2497 */ 2498 if (qla_hw_add_all_mcast(ha)) 2499 return (-1); 2500 2501 if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id)) 2502 return (-1); 2503 2504 if (qla_config_rss(ha, ha->hw.rcv_cntxt_id)) 2505 return (-1); 2506 2507 if (qla_config_rss_ind_table(ha)) 2508 return (-1); 2509 2510 if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1)) 2511 return (-1); 2512 2513 if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id)) 2514 return (-1); 2515 2516 if (ha->ifp->if_capenable & IFCAP_LRO) { 2517 if (ha->hw.enable_hw_lro) { 2518 ha->hw.enable_soft_lro = 0; 2519 2520 if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id)) 2521 return (-1); 2522 } else { 2523 ha->hw.enable_soft_lro = 1; 2524 2525 if (qla_config_soft_lro(ha)) 2526 return (-1); 2527 } 2528 } 2529 2530 if (qla_init_nic_func(ha)) 2531 return (-1); 2532 2533 if (qla_query_fw_dcbx_caps(ha)) 2534 return (-1); 2535 2536 for (i = 0; i < ha->hw.num_sds_rings; i++) 2537 QL_ENABLE_INTERRUPTS(ha, i); 2538 2539 return (0); 2540 } 2541 2542 static int 2543 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx) 2544 { 2545 device_t dev = ha->pci_dev; 2546 q80_rq_map_sds_to_rds_t *map_rings; 2547 q80_rsp_map_sds_to_rds_t *map_rings_rsp; 2548 uint32_t i, err; 2549 qla_hw_t *hw = &ha->hw; 2550 2551 map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox; 2552 bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t)); 2553 2554 map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS; 2555 map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2); 2556 map_rings->count_version |= Q8_MBX_CMD_VERSION; 2557 2558 map_rings->cntxt_id = hw->rcv_cntxt_id; 2559 map_rings->num_rings = num_idx; 2560 2561 for (i = 0; i < num_idx; i++) { 2562 map_rings->sds_rds[i].sds_ring = i + start_idx; 2563 map_rings->sds_rds[i].rds_ring = i + start_idx; 2564 } 2565 2566 if (qla_mbx_cmd(ha, (uint32_t *)map_rings, 2567 (sizeof (q80_rq_map_sds_to_rds_t) >> 2), 2568 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { 2569 device_printf(dev, "%s: failed0\n", __func__); 2570 return (-1); 2571 } 2572 2573 map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox; 2574 2575 err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status); 2576 2577 if (err) { 2578 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2579 return (-1); 2580 } 2581 2582 return (0); 2583 } 2584 2585 /* 2586 * Name: qla_init_rcv_cntxt 2587 * Function: Creates the Receive Context. 2588 */ 2589 static int 2590 qla_init_rcv_cntxt(qla_host_t *ha) 2591 { 2592 q80_rq_rcv_cntxt_t *rcntxt; 2593 q80_rsp_rcv_cntxt_t *rcntxt_rsp; 2594 q80_stat_desc_t *sdesc; 2595 int i, j; 2596 qla_hw_t *hw = &ha->hw; 2597 device_t dev; 2598 uint32_t err; 2599 uint32_t rcntxt_sds_rings; 2600 uint32_t rcntxt_rds_rings; 2601 uint32_t max_idx; 2602 2603 dev = ha->pci_dev; 2604 2605 /* 2606 * Create Receive Context 2607 */ 2608 2609 for (i = 0; i < hw->num_sds_rings; i++) { 2610 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; 2611 2612 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { 2613 sdesc->data[0] = 1ULL; 2614 sdesc->data[1] = 1ULL; 2615 } 2616 } 2617 2618 rcntxt_sds_rings = hw->num_sds_rings; 2619 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) 2620 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS; 2621 2622 rcntxt_rds_rings = hw->num_rds_rings; 2623 2624 if (hw->num_rds_rings > MAX_RDS_RING_SETS) 2625 rcntxt_rds_rings = MAX_RDS_RING_SETS; 2626 2627 rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox; 2628 bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t))); 2629 2630 rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT; 2631 rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2); 2632 rcntxt->count_version |= Q8_MBX_CMD_VERSION; 2633 2634 rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW | 2635 Q8_RCV_CNTXT_CAP0_LRO | 2636 Q8_RCV_CNTXT_CAP0_HW_LRO | 2637 Q8_RCV_CNTXT_CAP0_RSS | 2638 Q8_RCV_CNTXT_CAP0_SGL_LRO; 2639 2640 if (ha->hw.enable_9kb) 2641 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO; 2642 else 2643 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO; 2644 2645 if (ha->hw.num_rds_rings > 1) { 2646 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5); 2647 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS; 2648 } else 2649 rcntxt->nrds_sets_rings = 0x1 | (1 << 5); 2650 2651 rcntxt->nsds_rings = rcntxt_sds_rings; 2652 2653 rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE; 2654 2655 rcntxt->rcv_vpid = 0; 2656 2657 for (i = 0; i < rcntxt_sds_rings; i++) { 2658 rcntxt->sds[i].paddr = 2659 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); 2660 rcntxt->sds[i].size = 2661 qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 2662 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]); 2663 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0); 2664 } 2665 2666 for (i = 0; i < rcntxt_rds_rings; i++) { 2667 rcntxt->rds[i].paddr_std = 2668 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); 2669 2670 if (ha->hw.enable_9kb) 2671 rcntxt->rds[i].std_bsize = 2672 qla_host_to_le64(MJUM9BYTES); 2673 else 2674 rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); 2675 2676 rcntxt->rds[i].std_nentries = 2677 qla_host_to_le32(NUM_RX_DESCRIPTORS); 2678 } 2679 2680 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, 2681 (sizeof (q80_rq_rcv_cntxt_t) >> 2), 2682 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) { 2683 device_printf(dev, "%s: failed0\n", __func__); 2684 return (-1); 2685 } 2686 2687 rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox; 2688 2689 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); 2690 2691 if (err) { 2692 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2693 return (-1); 2694 } 2695 2696 for (i = 0; i < rcntxt_sds_rings; i++) { 2697 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i]; 2698 } 2699 2700 for (i = 0; i < rcntxt_rds_rings; i++) { 2701 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std; 2702 } 2703 2704 hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id; 2705 2706 ha->hw.flags.init_rx_cnxt = 1; 2707 2708 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) { 2709 2710 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) { 2711 2712 if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings) 2713 max_idx = MAX_RCNTXT_SDS_RINGS; 2714 else 2715 max_idx = hw->num_sds_rings - i; 2716 2717 err = qla_add_rcv_rings(ha, i, max_idx); 2718 if (err) 2719 return -1; 2720 2721 i += max_idx; 2722 } 2723 } 2724 2725 if (hw->num_rds_rings > 1) { 2726 2727 for (i = 0; i < hw->num_rds_rings; ) { 2728 2729 if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings) 2730 max_idx = MAX_SDS_TO_RDS_MAP; 2731 else 2732 max_idx = hw->num_rds_rings - i; 2733 2734 err = qla_map_sds_to_rds(ha, i, max_idx); 2735 if (err) 2736 return -1; 2737 2738 i += max_idx; 2739 } 2740 } 2741 2742 return (0); 2743 } 2744 2745 static int 2746 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds) 2747 { 2748 device_t dev = ha->pci_dev; 2749 q80_rq_add_rcv_rings_t *add_rcv; 2750 q80_rsp_add_rcv_rings_t *add_rcv_rsp; 2751 uint32_t i,j, err; 2752 qla_hw_t *hw = &ha->hw; 2753 2754 add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox; 2755 bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t)); 2756 2757 add_rcv->opcode = Q8_MBX_ADD_RX_RINGS; 2758 add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2); 2759 add_rcv->count_version |= Q8_MBX_CMD_VERSION; 2760 2761 add_rcv->nrds_sets_rings = nsds | (1 << 5); 2762 add_rcv->nsds_rings = nsds; 2763 add_rcv->cntxt_id = hw->rcv_cntxt_id; 2764 2765 for (i = 0; i < nsds; i++) { 2766 2767 j = i + sds_idx; 2768 2769 add_rcv->sds[i].paddr = 2770 qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr); 2771 2772 add_rcv->sds[i].size = 2773 qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 2774 2775 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]); 2776 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0); 2777 2778 } 2779 2780 for (i = 0; (i < nsds); i++) { 2781 j = i + sds_idx; 2782 2783 add_rcv->rds[i].paddr_std = 2784 qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr); 2785 2786 if (ha->hw.enable_9kb) 2787 add_rcv->rds[i].std_bsize = 2788 qla_host_to_le64(MJUM9BYTES); 2789 else 2790 add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); 2791 2792 add_rcv->rds[i].std_nentries = 2793 qla_host_to_le32(NUM_RX_DESCRIPTORS); 2794 } 2795 2796 2797 if (qla_mbx_cmd(ha, (uint32_t *)add_rcv, 2798 (sizeof (q80_rq_add_rcv_rings_t) >> 2), 2799 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { 2800 device_printf(dev, "%s: failed0\n", __func__); 2801 return (-1); 2802 } 2803 2804 add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox; 2805 2806 err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status); 2807 2808 if (err) { 2809 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2810 return (-1); 2811 } 2812 2813 for (i = 0; i < nsds; i++) { 2814 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i]; 2815 } 2816 2817 for (i = 0; i < nsds; i++) { 2818 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std; 2819 } 2820 2821 return (0); 2822 } 2823 2824 /* 2825 * Name: qla_del_rcv_cntxt 2826 * Function: Destroys the Receive Context. 2827 */ 2828 static void 2829 qla_del_rcv_cntxt(qla_host_t *ha) 2830 { 2831 device_t dev = ha->pci_dev; 2832 q80_rcv_cntxt_destroy_t *rcntxt; 2833 q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp; 2834 uint32_t err; 2835 uint8_t bcast_mac[6]; 2836 2837 if (!ha->hw.flags.init_rx_cnxt) 2838 return; 2839 2840 if (qla_hw_del_all_mcast(ha)) 2841 return; 2842 2843 if (ha->hw.flags.bcast_mac) { 2844 2845 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 2846 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 2847 2848 if (qla_config_mac_addr(ha, bcast_mac, 0, 1)) 2849 return; 2850 ha->hw.flags.bcast_mac = 0; 2851 2852 } 2853 2854 if (ha->hw.flags.unicast_mac) { 2855 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1)) 2856 return; 2857 ha->hw.flags.unicast_mac = 0; 2858 } 2859 2860 rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox; 2861 bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t))); 2862 2863 rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT; 2864 rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2); 2865 rcntxt->count_version |= Q8_MBX_CMD_VERSION; 2866 2867 rcntxt->cntxt_id = ha->hw.rcv_cntxt_id; 2868 2869 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, 2870 (sizeof (q80_rcv_cntxt_destroy_t) >> 2), 2871 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) { 2872 device_printf(dev, "%s: failed0\n", __func__); 2873 return; 2874 } 2875 rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox; 2876 2877 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); 2878 2879 if (err) { 2880 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2881 } 2882 2883 ha->hw.flags.init_rx_cnxt = 0; 2884 return; 2885 } 2886 2887 /* 2888 * Name: qla_init_xmt_cntxt 2889 * Function: Creates the Transmit Context. 2890 */ 2891 static int 2892 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) 2893 { 2894 device_t dev; 2895 qla_hw_t *hw = &ha->hw; 2896 q80_rq_tx_cntxt_t *tcntxt; 2897 q80_rsp_tx_cntxt_t *tcntxt_rsp; 2898 uint32_t err; 2899 qla_hw_tx_cntxt_t *hw_tx_cntxt; 2900 uint32_t intr_idx; 2901 2902 hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; 2903 2904 dev = ha->pci_dev; 2905 2906 /* 2907 * Create Transmit Context 2908 */ 2909 tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox; 2910 bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t))); 2911 2912 tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT; 2913 tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2); 2914 tcntxt->count_version |= Q8_MBX_CMD_VERSION; 2915 2916 intr_idx = txr_idx; 2917 2918 #ifdef QL_ENABLE_ISCSI_TLV 2919 2920 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO | 2921 Q8_TX_CNTXT_CAP0_TC; 2922 2923 if (txr_idx >= (ha->hw.num_tx_rings >> 1)) { 2924 tcntxt->traffic_class = 1; 2925 } 2926 2927 intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1); 2928 2929 #else 2930 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO; 2931 2932 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 2933 2934 tcntxt->ntx_rings = 1; 2935 2936 tcntxt->tx_ring[0].paddr = 2937 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr); 2938 tcntxt->tx_ring[0].tx_consumer = 2939 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr); 2940 tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS); 2941 2942 tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]); 2943 tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0); 2944 2945 hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS; 2946 hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0; 2947 2948 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, 2949 (sizeof (q80_rq_tx_cntxt_t) >> 2), 2950 ha->hw.mbox, 2951 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) { 2952 device_printf(dev, "%s: failed0\n", __func__); 2953 return (-1); 2954 } 2955 tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox; 2956 2957 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); 2958 2959 if (err) { 2960 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2961 return -1; 2962 } 2963 2964 hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index; 2965 hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id; 2966 2967 if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0)) 2968 return (-1); 2969 2970 return (0); 2971 } 2972 2973 2974 /* 2975 * Name: qla_del_xmt_cntxt 2976 * Function: Destroys the Transmit Context. 2977 */ 2978 static int 2979 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) 2980 { 2981 device_t dev = ha->pci_dev; 2982 q80_tx_cntxt_destroy_t *tcntxt; 2983 q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp; 2984 uint32_t err; 2985 2986 tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox; 2987 bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t))); 2988 2989 tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT; 2990 tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2); 2991 tcntxt->count_version |= Q8_MBX_CMD_VERSION; 2992 2993 tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id; 2994 2995 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, 2996 (sizeof (q80_tx_cntxt_destroy_t) >> 2), 2997 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) { 2998 device_printf(dev, "%s: failed0\n", __func__); 2999 return (-1); 3000 } 3001 tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox; 3002 3003 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); 3004 3005 if (err) { 3006 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 3007 return (-1); 3008 } 3009 3010 return (0); 3011 } 3012 static void 3013 qla_del_xmt_cntxt(qla_host_t *ha) 3014 { 3015 uint32_t i; 3016 3017 if (!ha->hw.flags.init_tx_cnxt) 3018 return; 3019 3020 for (i = 0; i < ha->hw.num_tx_rings; i++) { 3021 if (qla_del_xmt_cntxt_i(ha, i)) 3022 break; 3023 } 3024 ha->hw.flags.init_tx_cnxt = 0; 3025 } 3026 3027 static int 3028 qla_init_xmt_cntxt(qla_host_t *ha) 3029 { 3030 uint32_t i, j; 3031 3032 for (i = 0; i < ha->hw.num_tx_rings; i++) { 3033 if (qla_init_xmt_cntxt_i(ha, i) != 0) { 3034 for (j = 0; j < i; j++) 3035 qla_del_xmt_cntxt_i(ha, j); 3036 return (-1); 3037 } 3038 } 3039 ha->hw.flags.init_tx_cnxt = 1; 3040 return (0); 3041 } 3042 3043 static int 3044 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast) 3045 { 3046 int i, nmcast; 3047 uint32_t count = 0; 3048 uint8_t *mcast; 3049 3050 nmcast = ha->hw.nmcast; 3051 3052 QL_DPRINT2(ha, (ha->pci_dev, 3053 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast)); 3054 3055 mcast = ha->hw.mac_addr_arr; 3056 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3057 3058 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) { 3059 if ((ha->hw.mcast[i].addr[0] != 0) || 3060 (ha->hw.mcast[i].addr[1] != 0) || 3061 (ha->hw.mcast[i].addr[2] != 0) || 3062 (ha->hw.mcast[i].addr[3] != 0) || 3063 (ha->hw.mcast[i].addr[4] != 0) || 3064 (ha->hw.mcast[i].addr[5] != 0)) { 3065 3066 bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN); 3067 mcast = mcast + ETHER_ADDR_LEN; 3068 count++; 3069 3070 if (count == Q8_MAX_MAC_ADDRS) { 3071 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, 3072 add_mcast, count)) { 3073 device_printf(ha->pci_dev, 3074 "%s: failed\n", __func__); 3075 return (-1); 3076 } 3077 3078 count = 0; 3079 mcast = ha->hw.mac_addr_arr; 3080 memset(mcast, 0, 3081 (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3082 } 3083 3084 nmcast--; 3085 } 3086 } 3087 3088 if (count) { 3089 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast, 3090 count)) { 3091 device_printf(ha->pci_dev, "%s: failed\n", __func__); 3092 return (-1); 3093 } 3094 } 3095 QL_DPRINT2(ha, (ha->pci_dev, 3096 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast)); 3097 3098 return 0; 3099 } 3100 3101 static int 3102 qla_hw_add_all_mcast(qla_host_t *ha) 3103 { 3104 int ret; 3105 3106 ret = qla_hw_all_mcast(ha, 1); 3107 3108 return (ret); 3109 } 3110 3111 static int 3112 qla_hw_del_all_mcast(qla_host_t *ha) 3113 { 3114 int ret; 3115 3116 ret = qla_hw_all_mcast(ha, 0); 3117 3118 bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS)); 3119 ha->hw.nmcast = 0; 3120 3121 return (ret); 3122 } 3123 3124 static int 3125 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta) 3126 { 3127 int i; 3128 3129 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3130 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) 3131 return (0); /* its been already added */ 3132 } 3133 return (-1); 3134 } 3135 3136 static int 3137 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) 3138 { 3139 int i; 3140 3141 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3142 3143 if ((ha->hw.mcast[i].addr[0] == 0) && 3144 (ha->hw.mcast[i].addr[1] == 0) && 3145 (ha->hw.mcast[i].addr[2] == 0) && 3146 (ha->hw.mcast[i].addr[3] == 0) && 3147 (ha->hw.mcast[i].addr[4] == 0) && 3148 (ha->hw.mcast[i].addr[5] == 0)) { 3149 3150 bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN); 3151 ha->hw.nmcast++; 3152 3153 mta = mta + ETHER_ADDR_LEN; 3154 nmcast--; 3155 3156 if (nmcast == 0) 3157 break; 3158 } 3159 3160 } 3161 return 0; 3162 } 3163 3164 static int 3165 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) 3166 { 3167 int i; 3168 3169 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3170 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) { 3171 3172 ha->hw.mcast[i].addr[0] = 0; 3173 ha->hw.mcast[i].addr[1] = 0; 3174 ha->hw.mcast[i].addr[2] = 0; 3175 ha->hw.mcast[i].addr[3] = 0; 3176 ha->hw.mcast[i].addr[4] = 0; 3177 ha->hw.mcast[i].addr[5] = 0; 3178 3179 ha->hw.nmcast--; 3180 3181 mta = mta + ETHER_ADDR_LEN; 3182 nmcast--; 3183 3184 if (nmcast == 0) 3185 break; 3186 } 3187 } 3188 return 0; 3189 } 3190 3191 /* 3192 * Name: ql_hw_set_multi 3193 * Function: Sets the Multicast Addresses provided by the host O.S into the 3194 * hardware (for the given interface) 3195 */ 3196 int 3197 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt, 3198 uint32_t add_mac) 3199 { 3200 uint8_t *mta = mcast_addr; 3201 int i; 3202 int ret = 0; 3203 uint32_t count = 0; 3204 uint8_t *mcast; 3205 3206 mcast = ha->hw.mac_addr_arr; 3207 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3208 3209 for (i = 0; i < mcnt; i++) { 3210 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) { 3211 if (add_mac) { 3212 if (qla_hw_mac_addr_present(ha, mta) != 0) { 3213 bcopy(mta, mcast, ETHER_ADDR_LEN); 3214 mcast = mcast + ETHER_ADDR_LEN; 3215 count++; 3216 } 3217 } else { 3218 if (qla_hw_mac_addr_present(ha, mta) == 0) { 3219 bcopy(mta, mcast, ETHER_ADDR_LEN); 3220 mcast = mcast + ETHER_ADDR_LEN; 3221 count++; 3222 } 3223 } 3224 } 3225 if (count == Q8_MAX_MAC_ADDRS) { 3226 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, 3227 add_mac, count)) { 3228 device_printf(ha->pci_dev, "%s: failed\n", 3229 __func__); 3230 return (-1); 3231 } 3232 3233 if (add_mac) { 3234 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, 3235 count); 3236 } else { 3237 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, 3238 count); 3239 } 3240 3241 count = 0; 3242 mcast = ha->hw.mac_addr_arr; 3243 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3244 } 3245 3246 mta += Q8_MAC_ADDR_LEN; 3247 } 3248 3249 if (count) { 3250 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac, 3251 count)) { 3252 device_printf(ha->pci_dev, "%s: failed\n", __func__); 3253 return (-1); 3254 } 3255 if (add_mac) { 3256 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count); 3257 } else { 3258 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count); 3259 } 3260 } 3261 3262 return (ret); 3263 } 3264 3265 /* 3266 * Name: ql_hw_tx_done_locked 3267 * Function: Handle Transmit Completions 3268 */ 3269 void 3270 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) 3271 { 3272 qla_tx_buf_t *txb; 3273 qla_hw_t *hw = &ha->hw; 3274 uint32_t comp_idx, comp_count = 0; 3275 qla_hw_tx_cntxt_t *hw_tx_cntxt; 3276 3277 hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; 3278 3279 /* retrieve index of last entry in tx ring completed */ 3280 comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons)); 3281 3282 while (comp_idx != hw_tx_cntxt->txr_comp) { 3283 3284 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp]; 3285 3286 hw_tx_cntxt->txr_comp++; 3287 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS) 3288 hw_tx_cntxt->txr_comp = 0; 3289 3290 comp_count++; 3291 3292 if (txb->m_head) { 3293 if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1); 3294 3295 bus_dmamap_sync(ha->tx_tag, txb->map, 3296 BUS_DMASYNC_POSTWRITE); 3297 bus_dmamap_unload(ha->tx_tag, txb->map); 3298 m_freem(txb->m_head); 3299 3300 txb->m_head = NULL; 3301 } 3302 } 3303 3304 hw_tx_cntxt->txr_free += comp_count; 3305 return; 3306 } 3307 3308 void 3309 ql_update_link_state(qla_host_t *ha) 3310 { 3311 uint32_t link_state; 3312 uint32_t prev_link_state; 3313 3314 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3315 ha->hw.link_up = 0; 3316 return; 3317 } 3318 link_state = READ_REG32(ha, Q8_LINK_STATE); 3319 3320 prev_link_state = ha->hw.link_up; 3321 3322 if (ha->pci_func == 0) 3323 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0); 3324 else 3325 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); 3326 3327 if (prev_link_state != ha->hw.link_up) { 3328 if (ha->hw.link_up) { 3329 if_link_state_change(ha->ifp, LINK_STATE_UP); 3330 } else { 3331 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 3332 } 3333 } 3334 return; 3335 } 3336 3337 void 3338 ql_hw_stop_rcv(qla_host_t *ha) 3339 { 3340 int i, done, count = 100; 3341 3342 ha->flags.stop_rcv = 1; 3343 3344 while (count) { 3345 done = 1; 3346 for (i = 0; i < ha->hw.num_sds_rings; i++) { 3347 if (ha->hw.sds[i].rcv_active) 3348 done = 0; 3349 } 3350 if (done) 3351 break; 3352 else 3353 qla_mdelay(__func__, 10); 3354 count--; 3355 } 3356 if (!count) 3357 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__); 3358 3359 return; 3360 } 3361 3362 int 3363 ql_hw_check_health(qla_host_t *ha) 3364 { 3365 uint32_t val; 3366 3367 ha->hw.health_count++; 3368 3369 if (ha->hw.health_count < 500) 3370 return 0; 3371 3372 ha->hw.health_count = 0; 3373 3374 val = READ_REG32(ha, Q8_ASIC_TEMPERATURE); 3375 3376 if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) || 3377 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) { 3378 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n", 3379 __func__, val); 3380 return -1; 3381 } 3382 3383 val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT); 3384 3385 if ((val != ha->hw.hbeat_value) && 3386 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) { 3387 ha->hw.hbeat_value = val; 3388 ha->hw.hbeat_failure = 0; 3389 return 0; 3390 } 3391 3392 ha->hw.hbeat_failure++; 3393 3394 if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */ 3395 return 0; 3396 else 3397 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n", 3398 __func__, val); 3399 3400 3401 return -1; 3402 } 3403 3404 static int 3405 qla_init_nic_func(qla_host_t *ha) 3406 { 3407 device_t dev; 3408 q80_init_nic_func_t *init_nic; 3409 q80_init_nic_func_rsp_t *init_nic_rsp; 3410 uint32_t err; 3411 3412 dev = ha->pci_dev; 3413 3414 init_nic = (q80_init_nic_func_t *)ha->hw.mbox; 3415 bzero(init_nic, sizeof(q80_init_nic_func_t)); 3416 3417 init_nic->opcode = Q8_MBX_INIT_NIC_FUNC; 3418 init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2); 3419 init_nic->count_version |= Q8_MBX_CMD_VERSION; 3420 3421 init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN; 3422 init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN; 3423 init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN; 3424 3425 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t)); 3426 if (qla_mbx_cmd(ha, (uint32_t *)init_nic, 3427 (sizeof (q80_init_nic_func_t) >> 2), 3428 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) { 3429 device_printf(dev, "%s: failed\n", __func__); 3430 return -1; 3431 } 3432 3433 init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox; 3434 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t)); 3435 3436 err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status); 3437 3438 if (err) { 3439 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3440 } 3441 3442 return 0; 3443 } 3444 3445 static int 3446 qla_stop_nic_func(qla_host_t *ha) 3447 { 3448 device_t dev; 3449 q80_stop_nic_func_t *stop_nic; 3450 q80_stop_nic_func_rsp_t *stop_nic_rsp; 3451 uint32_t err; 3452 3453 dev = ha->pci_dev; 3454 3455 stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox; 3456 bzero(stop_nic, sizeof(q80_stop_nic_func_t)); 3457 3458 stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC; 3459 stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2); 3460 stop_nic->count_version |= Q8_MBX_CMD_VERSION; 3461 3462 stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN; 3463 stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN; 3464 3465 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t)); 3466 if (qla_mbx_cmd(ha, (uint32_t *)stop_nic, 3467 (sizeof (q80_stop_nic_func_t) >> 2), 3468 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) { 3469 device_printf(dev, "%s: failed\n", __func__); 3470 return -1; 3471 } 3472 3473 stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox; 3474 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t)); 3475 3476 err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status); 3477 3478 if (err) { 3479 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3480 } 3481 3482 return 0; 3483 } 3484 3485 static int 3486 qla_query_fw_dcbx_caps(qla_host_t *ha) 3487 { 3488 device_t dev; 3489 q80_query_fw_dcbx_caps_t *fw_dcbx; 3490 q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp; 3491 uint32_t err; 3492 3493 dev = ha->pci_dev; 3494 3495 fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox; 3496 bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t)); 3497 3498 fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS; 3499 fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2); 3500 fw_dcbx->count_version |= Q8_MBX_CMD_VERSION; 3501 3502 ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t)); 3503 if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx, 3504 (sizeof (q80_query_fw_dcbx_caps_t) >> 2), 3505 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) { 3506 device_printf(dev, "%s: failed\n", __func__); 3507 return -1; 3508 } 3509 3510 fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox; 3511 ql_dump_buf8(ha, __func__, fw_dcbx_rsp, 3512 sizeof (q80_query_fw_dcbx_caps_rsp_t)); 3513 3514 err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status); 3515 3516 if (err) { 3517 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3518 } 3519 3520 return 0; 3521 } 3522 3523 static int 3524 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2, 3525 uint32_t aen_mb3, uint32_t aen_mb4) 3526 { 3527 device_t dev; 3528 q80_idc_ack_t *idc_ack; 3529 q80_idc_ack_rsp_t *idc_ack_rsp; 3530 uint32_t err; 3531 int count = 300; 3532 3533 dev = ha->pci_dev; 3534 3535 idc_ack = (q80_idc_ack_t *)ha->hw.mbox; 3536 bzero(idc_ack, sizeof(q80_idc_ack_t)); 3537 3538 idc_ack->opcode = Q8_MBX_IDC_ACK; 3539 idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2); 3540 idc_ack->count_version |= Q8_MBX_CMD_VERSION; 3541 3542 idc_ack->aen_mb1 = aen_mb1; 3543 idc_ack->aen_mb2 = aen_mb2; 3544 idc_ack->aen_mb3 = aen_mb3; 3545 idc_ack->aen_mb4 = aen_mb4; 3546 3547 ha->hw.imd_compl= 0; 3548 3549 if (qla_mbx_cmd(ha, (uint32_t *)idc_ack, 3550 (sizeof (q80_idc_ack_t) >> 2), 3551 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) { 3552 device_printf(dev, "%s: failed\n", __func__); 3553 return -1; 3554 } 3555 3556 idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox; 3557 3558 err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status); 3559 3560 if (err) { 3561 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3562 return(-1); 3563 } 3564 3565 while (count && !ha->hw.imd_compl) { 3566 qla_mdelay(__func__, 100); 3567 count--; 3568 } 3569 3570 if (!count) 3571 return -1; 3572 else 3573 device_printf(dev, "%s: count %d\n", __func__, count); 3574 3575 return (0); 3576 } 3577 3578 static int 3579 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits) 3580 { 3581 device_t dev; 3582 q80_set_port_cfg_t *pcfg; 3583 q80_set_port_cfg_rsp_t *pfg_rsp; 3584 uint32_t err; 3585 int count = 300; 3586 3587 dev = ha->pci_dev; 3588 3589 pcfg = (q80_set_port_cfg_t *)ha->hw.mbox; 3590 bzero(pcfg, sizeof(q80_set_port_cfg_t)); 3591 3592 pcfg->opcode = Q8_MBX_SET_PORT_CONFIG; 3593 pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2); 3594 pcfg->count_version |= Q8_MBX_CMD_VERSION; 3595 3596 pcfg->cfg_bits = cfg_bits; 3597 3598 device_printf(dev, "%s: cfg_bits" 3599 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" 3600 " [0x%x, 0x%x, 0x%x]\n", __func__, 3601 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), 3602 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), 3603 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)); 3604 3605 ha->hw.imd_compl= 0; 3606 3607 if (qla_mbx_cmd(ha, (uint32_t *)pcfg, 3608 (sizeof (q80_set_port_cfg_t) >> 2), 3609 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) { 3610 device_printf(dev, "%s: failed\n", __func__); 3611 return -1; 3612 } 3613 3614 pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox; 3615 3616 err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status); 3617 3618 if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) { 3619 while (count && !ha->hw.imd_compl) { 3620 qla_mdelay(__func__, 100); 3621 count--; 3622 } 3623 if (count) { 3624 device_printf(dev, "%s: count %d\n", __func__, count); 3625 3626 err = 0; 3627 } 3628 } 3629 3630 if (err) { 3631 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3632 return(-1); 3633 } 3634 3635 return (0); 3636 } 3637 3638 3639 static int 3640 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size) 3641 { 3642 uint32_t err; 3643 device_t dev = ha->pci_dev; 3644 q80_config_md_templ_size_t *md_size; 3645 q80_config_md_templ_size_rsp_t *md_size_rsp; 3646 3647 #ifndef QL_LDFLASH_FW 3648 3649 ql_minidump_template_hdr_t *hdr; 3650 3651 hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump; 3652 *size = hdr->size_of_template; 3653 return (0); 3654 3655 #endif /* #ifdef QL_LDFLASH_FW */ 3656 3657 md_size = (q80_config_md_templ_size_t *) ha->hw.mbox; 3658 bzero(md_size, sizeof(q80_config_md_templ_size_t)); 3659 3660 md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE; 3661 md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2); 3662 md_size->count_version |= Q8_MBX_CMD_VERSION; 3663 3664 if (qla_mbx_cmd(ha, (uint32_t *) md_size, 3665 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox, 3666 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) { 3667 3668 device_printf(dev, "%s: failed\n", __func__); 3669 3670 return (-1); 3671 } 3672 3673 md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox; 3674 3675 err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status); 3676 3677 if (err) { 3678 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3679 return(-1); 3680 } 3681 3682 *size = md_size_rsp->templ_size; 3683 3684 return (0); 3685 } 3686 3687 static int 3688 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits) 3689 { 3690 device_t dev; 3691 q80_get_port_cfg_t *pcfg; 3692 q80_get_port_cfg_rsp_t *pcfg_rsp; 3693 uint32_t err; 3694 3695 dev = ha->pci_dev; 3696 3697 pcfg = (q80_get_port_cfg_t *)ha->hw.mbox; 3698 bzero(pcfg, sizeof(q80_get_port_cfg_t)); 3699 3700 pcfg->opcode = Q8_MBX_GET_PORT_CONFIG; 3701 pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2); 3702 pcfg->count_version |= Q8_MBX_CMD_VERSION; 3703 3704 if (qla_mbx_cmd(ha, (uint32_t *)pcfg, 3705 (sizeof (q80_get_port_cfg_t) >> 2), 3706 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) { 3707 device_printf(dev, "%s: failed\n", __func__); 3708 return -1; 3709 } 3710 3711 pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox; 3712 3713 err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status); 3714 3715 if (err) { 3716 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3717 return(-1); 3718 } 3719 3720 device_printf(dev, "%s: [cfg_bits, port type]" 3721 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" 3722 " [0x%x, 0x%x, 0x%x]\n", __func__, 3723 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type, 3724 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), 3725 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), 3726 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0) 3727 ); 3728 3729 *cfg_bits = pcfg_rsp->cfg_bits; 3730 3731 return (0); 3732 } 3733 3734 int 3735 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp) 3736 { 3737 struct ether_vlan_header *eh; 3738 uint16_t etype; 3739 struct ip *ip = NULL; 3740 struct ip6_hdr *ip6 = NULL; 3741 struct tcphdr *th = NULL; 3742 uint32_t hdrlen; 3743 uint32_t offset; 3744 uint8_t buf[sizeof(struct ip6_hdr)]; 3745 3746 eh = mtod(mp, struct ether_vlan_header *); 3747 3748 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3749 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3750 etype = ntohs(eh->evl_proto); 3751 } else { 3752 hdrlen = ETHER_HDR_LEN; 3753 etype = ntohs(eh->evl_encap_proto); 3754 } 3755 3756 if (etype == ETHERTYPE_IP) { 3757 3758 offset = (hdrlen + sizeof (struct ip)); 3759 3760 if (mp->m_len >= offset) { 3761 ip = (struct ip *)(mp->m_data + hdrlen); 3762 } else { 3763 m_copydata(mp, hdrlen, sizeof (struct ip), buf); 3764 ip = (struct ip *)buf; 3765 } 3766 3767 if (ip->ip_p == IPPROTO_TCP) { 3768 3769 hdrlen += ip->ip_hl << 2; 3770 offset = hdrlen + 4; 3771 3772 if (mp->m_len >= offset) { 3773 th = (struct tcphdr *)(mp->m_data + hdrlen);; 3774 } else { 3775 m_copydata(mp, hdrlen, 4, buf); 3776 th = (struct tcphdr *)buf; 3777 } 3778 } 3779 3780 } else if (etype == ETHERTYPE_IPV6) { 3781 3782 offset = (hdrlen + sizeof (struct ip6_hdr)); 3783 3784 if (mp->m_len >= offset) { 3785 ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen); 3786 } else { 3787 m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf); 3788 ip6 = (struct ip6_hdr *)buf; 3789 } 3790 3791 if (ip6->ip6_nxt == IPPROTO_TCP) { 3792 3793 hdrlen += sizeof(struct ip6_hdr); 3794 offset = hdrlen + 4; 3795 3796 if (mp->m_len >= offset) { 3797 th = (struct tcphdr *)(mp->m_data + hdrlen);; 3798 } else { 3799 m_copydata(mp, hdrlen, 4, buf); 3800 th = (struct tcphdr *)buf; 3801 } 3802 } 3803 } 3804 3805 if (th != NULL) { 3806 if ((th->th_sport == htons(3260)) || 3807 (th->th_dport == htons(3260))) 3808 return 0; 3809 } 3810 return (-1); 3811 } 3812 3813 void 3814 qla_hw_async_event(qla_host_t *ha) 3815 { 3816 switch (ha->hw.aen_mb0) { 3817 case 0x8101: 3818 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2, 3819 ha->hw.aen_mb3, ha->hw.aen_mb4); 3820 3821 break; 3822 3823 default: 3824 break; 3825 } 3826 3827 return; 3828 } 3829 3830 #ifdef QL_LDFLASH_FW 3831 static int 3832 ql_get_minidump_template(qla_host_t *ha) 3833 { 3834 uint32_t err; 3835 device_t dev = ha->pci_dev; 3836 q80_config_md_templ_cmd_t *md_templ; 3837 q80_config_md_templ_cmd_rsp_t *md_templ_rsp; 3838 3839 md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox; 3840 bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t))); 3841 3842 md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT; 3843 md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2); 3844 md_templ->count_version |= Q8_MBX_CMD_VERSION; 3845 3846 md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr; 3847 md_templ->buff_size = ha->hw.dma_buf.minidump.size; 3848 3849 if (qla_mbx_cmd(ha, (uint32_t *) md_templ, 3850 (sizeof(q80_config_md_templ_cmd_t) >> 2), 3851 ha->hw.mbox, 3852 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) { 3853 3854 device_printf(dev, "%s: failed\n", __func__); 3855 3856 return (-1); 3857 } 3858 3859 md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox; 3860 3861 err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status); 3862 3863 if (err) { 3864 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3865 return (-1); 3866 } 3867 3868 return (0); 3869 3870 } 3871 #endif /* #ifdef QL_LDFLASH_FW */ 3872 3873 /* 3874 * Minidump related functionality 3875 */ 3876 3877 static int ql_parse_template(qla_host_t *ha); 3878 3879 static uint32_t ql_rdcrb(qla_host_t *ha, 3880 ql_minidump_entry_rdcrb_t *crb_entry, 3881 uint32_t * data_buff); 3882 3883 static uint32_t ql_pollrd(qla_host_t *ha, 3884 ql_minidump_entry_pollrd_t *entry, 3885 uint32_t * data_buff); 3886 3887 static uint32_t ql_pollrd_modify_write(qla_host_t *ha, 3888 ql_minidump_entry_rd_modify_wr_with_poll_t *entry, 3889 uint32_t *data_buff); 3890 3891 static uint32_t ql_L2Cache(qla_host_t *ha, 3892 ql_minidump_entry_cache_t *cacheEntry, 3893 uint32_t * data_buff); 3894 3895 static uint32_t ql_L1Cache(qla_host_t *ha, 3896 ql_minidump_entry_cache_t *cacheEntry, 3897 uint32_t *data_buff); 3898 3899 static uint32_t ql_rdocm(qla_host_t *ha, 3900 ql_minidump_entry_rdocm_t *ocmEntry, 3901 uint32_t *data_buff); 3902 3903 static uint32_t ql_rdmem(qla_host_t *ha, 3904 ql_minidump_entry_rdmem_t *mem_entry, 3905 uint32_t *data_buff); 3906 3907 static uint32_t ql_rdrom(qla_host_t *ha, 3908 ql_minidump_entry_rdrom_t *romEntry, 3909 uint32_t *data_buff); 3910 3911 static uint32_t ql_rdmux(qla_host_t *ha, 3912 ql_minidump_entry_mux_t *muxEntry, 3913 uint32_t *data_buff); 3914 3915 static uint32_t ql_rdmux2(qla_host_t *ha, 3916 ql_minidump_entry_mux2_t *muxEntry, 3917 uint32_t *data_buff); 3918 3919 static uint32_t ql_rdqueue(qla_host_t *ha, 3920 ql_minidump_entry_queue_t *queueEntry, 3921 uint32_t *data_buff); 3922 3923 static uint32_t ql_cntrl(qla_host_t *ha, 3924 ql_minidump_template_hdr_t *template_hdr, 3925 ql_minidump_entry_cntrl_t *crbEntry); 3926 3927 3928 static uint32_t 3929 ql_minidump_size(qla_host_t *ha) 3930 { 3931 uint32_t i, k; 3932 uint32_t size = 0; 3933 ql_minidump_template_hdr_t *hdr; 3934 3935 hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b; 3936 3937 i = 0x2; 3938 3939 for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) { 3940 if (i & ha->hw.mdump_capture_mask) 3941 size += hdr->capture_size_array[k]; 3942 i = i << 1; 3943 } 3944 return (size); 3945 } 3946 3947 static void 3948 ql_free_minidump_buffer(qla_host_t *ha) 3949 { 3950 if (ha->hw.mdump_buffer != NULL) { 3951 free(ha->hw.mdump_buffer, M_QLA83XXBUF); 3952 ha->hw.mdump_buffer = NULL; 3953 ha->hw.mdump_buffer_size = 0; 3954 } 3955 return; 3956 } 3957 3958 static int 3959 ql_alloc_minidump_buffer(qla_host_t *ha) 3960 { 3961 ha->hw.mdump_buffer_size = ql_minidump_size(ha); 3962 3963 if (!ha->hw.mdump_buffer_size) 3964 return (-1); 3965 3966 ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF, 3967 M_NOWAIT); 3968 3969 if (ha->hw.mdump_buffer == NULL) 3970 return (-1); 3971 3972 return (0); 3973 } 3974 3975 static void 3976 ql_free_minidump_template_buffer(qla_host_t *ha) 3977 { 3978 if (ha->hw.mdump_template != NULL) { 3979 free(ha->hw.mdump_template, M_QLA83XXBUF); 3980 ha->hw.mdump_template = NULL; 3981 ha->hw.mdump_template_size = 0; 3982 } 3983 return; 3984 } 3985 3986 static int 3987 ql_alloc_minidump_template_buffer(qla_host_t *ha) 3988 { 3989 ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size; 3990 3991 ha->hw.mdump_template = malloc(ha->hw.mdump_template_size, 3992 M_QLA83XXBUF, M_NOWAIT); 3993 3994 if (ha->hw.mdump_template == NULL) 3995 return (-1); 3996 3997 return (0); 3998 } 3999 4000 static int 4001 ql_alloc_minidump_buffers(qla_host_t *ha) 4002 { 4003 int ret; 4004 4005 ret = ql_alloc_minidump_template_buffer(ha); 4006 4007 if (ret) 4008 return (ret); 4009 4010 ret = ql_alloc_minidump_buffer(ha); 4011 4012 if (ret) 4013 ql_free_minidump_template_buffer(ha); 4014 4015 return (ret); 4016 } 4017 4018 4019 static uint32_t 4020 ql_validate_minidump_checksum(qla_host_t *ha) 4021 { 4022 uint64_t sum = 0; 4023 int count; 4024 uint32_t *template_buff; 4025 4026 count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t); 4027 template_buff = ha->hw.dma_buf.minidump.dma_b; 4028 4029 while (count-- > 0) { 4030 sum += *template_buff++; 4031 } 4032 4033 while (sum >> 32) { 4034 sum = (sum & 0xFFFFFFFF) + (sum >> 32); 4035 } 4036 4037 return (~sum); 4038 } 4039 4040 int 4041 ql_minidump_init(qla_host_t *ha) 4042 { 4043 int ret = 0; 4044 uint32_t template_size = 0; 4045 device_t dev = ha->pci_dev; 4046 4047 /* 4048 * Get Minidump Template Size 4049 */ 4050 ret = qla_get_minidump_tmplt_size(ha, &template_size); 4051 4052 if (ret || (template_size == 0)) { 4053 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret, 4054 template_size); 4055 return (-1); 4056 } 4057 4058 /* 4059 * Allocate Memory for Minidump Template 4060 */ 4061 4062 ha->hw.dma_buf.minidump.alignment = 8; 4063 ha->hw.dma_buf.minidump.size = template_size; 4064 4065 #ifdef QL_LDFLASH_FW 4066 if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) { 4067 4068 device_printf(dev, "%s: minidump dma alloc failed\n", __func__); 4069 4070 return (-1); 4071 } 4072 ha->hw.dma_buf.flags.minidump = 1; 4073 4074 /* 4075 * Retrieve Minidump Template 4076 */ 4077 ret = ql_get_minidump_template(ha); 4078 #else 4079 ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump; 4080 4081 #endif /* #ifdef QL_LDFLASH_FW */ 4082 4083 if (ret == 0) { 4084 4085 ret = ql_validate_minidump_checksum(ha); 4086 4087 if (ret == 0) { 4088 4089 ret = ql_alloc_minidump_buffers(ha); 4090 4091 if (ret == 0) 4092 ha->hw.mdump_init = 1; 4093 else 4094 device_printf(dev, 4095 "%s: ql_alloc_minidump_buffers" 4096 " failed\n", __func__); 4097 } else { 4098 device_printf(dev, "%s: ql_validate_minidump_checksum" 4099 " failed\n", __func__); 4100 } 4101 } else { 4102 device_printf(dev, "%s: ql_get_minidump_template failed\n", 4103 __func__); 4104 } 4105 4106 if (ret) 4107 ql_minidump_free(ha); 4108 4109 return (ret); 4110 } 4111 4112 static void 4113 ql_minidump_free(qla_host_t *ha) 4114 { 4115 ha->hw.mdump_init = 0; 4116 if (ha->hw.dma_buf.flags.minidump) { 4117 ha->hw.dma_buf.flags.minidump = 0; 4118 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump); 4119 } 4120 4121 ql_free_minidump_template_buffer(ha); 4122 ql_free_minidump_buffer(ha); 4123 4124 return; 4125 } 4126 4127 void 4128 ql_minidump(qla_host_t *ha) 4129 { 4130 if (!ha->hw.mdump_init) 4131 return; 4132 4133 if (ha->hw.mdump_done) 4134 return; 4135 4136 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha); 4137 4138 bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size); 4139 bzero(ha->hw.mdump_template, ha->hw.mdump_template_size); 4140 4141 bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template, 4142 ha->hw.mdump_template_size); 4143 4144 ql_parse_template(ha); 4145 4146 ql_start_sequence(ha, ha->hw.mdump_start_seq_index); 4147 4148 ha->hw.mdump_done = 1; 4149 4150 return; 4151 } 4152 4153 4154 /* 4155 * helper routines 4156 */ 4157 static void 4158 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize) 4159 { 4160 if (esize != entry->hdr.entry_capture_size) { 4161 entry->hdr.entry_capture_size = esize; 4162 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG; 4163 } 4164 return; 4165 } 4166 4167 4168 static int 4169 ql_parse_template(qla_host_t *ha) 4170 { 4171 uint32_t num_of_entries, buff_level, e_cnt, esize; 4172 uint32_t end_cnt, rv = 0; 4173 char *dump_buff, *dbuff; 4174 int sane_start = 0, sane_end = 0; 4175 ql_minidump_template_hdr_t *template_hdr; 4176 ql_minidump_entry_t *entry; 4177 uint32_t capture_mask; 4178 uint32_t dump_size; 4179 4180 /* Setup parameters */ 4181 template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template; 4182 4183 if (template_hdr->entry_type == TLHDR) 4184 sane_start = 1; 4185 4186 dump_buff = (char *) ha->hw.mdump_buffer; 4187 4188 num_of_entries = template_hdr->num_of_entries; 4189 4190 entry = (ql_minidump_entry_t *) ((char *)template_hdr 4191 + template_hdr->first_entry_offset ); 4192 4193 template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] = 4194 template_hdr->ocm_window_array[ha->pci_func]; 4195 template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func; 4196 4197 capture_mask = ha->hw.mdump_capture_mask; 4198 dump_size = ha->hw.mdump_buffer_size; 4199 4200 template_hdr->driver_capture_mask = capture_mask; 4201 4202 QL_DPRINT80(ha, (ha->pci_dev, 4203 "%s: sane_start = %d num_of_entries = %d " 4204 "capture_mask = 0x%x dump_size = %d \n", 4205 __func__, sane_start, num_of_entries, capture_mask, dump_size)); 4206 4207 for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) { 4208 4209 /* 4210 * If the capture_mask of the entry does not match capture mask 4211 * skip the entry after marking the driver_flags indicator. 4212 */ 4213 4214 if (!(entry->hdr.entry_capture_mask & capture_mask)) { 4215 4216 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4217 entry = (ql_minidump_entry_t *) ((char *) entry 4218 + entry->hdr.entry_size); 4219 continue; 4220 } 4221 4222 /* 4223 * This is ONLY needed in implementations where 4224 * the capture buffer allocated is too small to capture 4225 * all of the required entries for a given capture mask. 4226 * We need to empty the buffer contents to a file 4227 * if possible, before processing the next entry 4228 * If the buff_full_flag is set, no further capture will happen 4229 * and all remaining non-control entries will be skipped. 4230 */ 4231 if (entry->hdr.entry_capture_size != 0) { 4232 if ((buff_level + entry->hdr.entry_capture_size) > 4233 dump_size) { 4234 /* Try to recover by emptying buffer to file */ 4235 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4236 entry = (ql_minidump_entry_t *) ((char *) entry 4237 + entry->hdr.entry_size); 4238 continue; 4239 } 4240 } 4241 4242 /* 4243 * Decode the entry type and process it accordingly 4244 */ 4245 4246 switch (entry->hdr.entry_type) { 4247 case RDNOP: 4248 break; 4249 4250 case RDEND: 4251 if (sane_end == 0) { 4252 end_cnt = e_cnt; 4253 } 4254 sane_end++; 4255 break; 4256 4257 case RDCRB: 4258 dbuff = dump_buff + buff_level; 4259 esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff); 4260 ql_entry_err_chk(entry, esize); 4261 buff_level += esize; 4262 break; 4263 4264 case POLLRD: 4265 dbuff = dump_buff + buff_level; 4266 esize = ql_pollrd(ha, (void *)entry, (void *)dbuff); 4267 ql_entry_err_chk(entry, esize); 4268 buff_level += esize; 4269 break; 4270 4271 case POLLRDMWR: 4272 dbuff = dump_buff + buff_level; 4273 esize = ql_pollrd_modify_write(ha, (void *)entry, 4274 (void *)dbuff); 4275 ql_entry_err_chk(entry, esize); 4276 buff_level += esize; 4277 break; 4278 4279 case L2ITG: 4280 case L2DTG: 4281 case L2DAT: 4282 case L2INS: 4283 dbuff = dump_buff + buff_level; 4284 esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff); 4285 if (esize == -1) { 4286 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4287 } else { 4288 ql_entry_err_chk(entry, esize); 4289 buff_level += esize; 4290 } 4291 break; 4292 4293 case L1DAT: 4294 case L1INS: 4295 dbuff = dump_buff + buff_level; 4296 esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff); 4297 ql_entry_err_chk(entry, esize); 4298 buff_level += esize; 4299 break; 4300 4301 case RDOCM: 4302 dbuff = dump_buff + buff_level; 4303 esize = ql_rdocm(ha, (void *)entry, (void *)dbuff); 4304 ql_entry_err_chk(entry, esize); 4305 buff_level += esize; 4306 break; 4307 4308 case RDMEM: 4309 dbuff = dump_buff + buff_level; 4310 esize = ql_rdmem(ha, (void *)entry, (void *)dbuff); 4311 ql_entry_err_chk(entry, esize); 4312 buff_level += esize; 4313 break; 4314 4315 case BOARD: 4316 case RDROM: 4317 dbuff = dump_buff + buff_level; 4318 esize = ql_rdrom(ha, (void *)entry, (void *)dbuff); 4319 ql_entry_err_chk(entry, esize); 4320 buff_level += esize; 4321 break; 4322 4323 case RDMUX: 4324 dbuff = dump_buff + buff_level; 4325 esize = ql_rdmux(ha, (void *)entry, (void *)dbuff); 4326 ql_entry_err_chk(entry, esize); 4327 buff_level += esize; 4328 break; 4329 4330 case RDMUX2: 4331 dbuff = dump_buff + buff_level; 4332 esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff); 4333 ql_entry_err_chk(entry, esize); 4334 buff_level += esize; 4335 break; 4336 4337 case QUEUE: 4338 dbuff = dump_buff + buff_level; 4339 esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff); 4340 ql_entry_err_chk(entry, esize); 4341 buff_level += esize; 4342 break; 4343 4344 case CNTRL: 4345 if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) { 4346 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4347 } 4348 break; 4349 default: 4350 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4351 break; 4352 } 4353 /* next entry in the template */ 4354 entry = (ql_minidump_entry_t *) ((char *) entry 4355 + entry->hdr.entry_size); 4356 } 4357 4358 if (!sane_start || (sane_end > 1)) { 4359 device_printf(ha->pci_dev, 4360 "\n%s: Template configuration error. Check Template\n", 4361 __func__); 4362 } 4363 4364 QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n", 4365 __func__, template_hdr->num_of_entries)); 4366 4367 return 0; 4368 } 4369 4370 /* 4371 * Read CRB operation. 4372 */ 4373 static uint32_t 4374 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry, 4375 uint32_t * data_buff) 4376 { 4377 int loop_cnt; 4378 int ret; 4379 uint32_t op_count, addr, stride, value = 0; 4380 4381 addr = crb_entry->addr; 4382 op_count = crb_entry->op_count; 4383 stride = crb_entry->addr_stride; 4384 4385 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { 4386 4387 ret = ql_rdwr_indreg32(ha, addr, &value, 1); 4388 4389 if (ret) 4390 return (0); 4391 4392 *data_buff++ = addr; 4393 *data_buff++ = value; 4394 addr = addr + stride; 4395 } 4396 4397 /* 4398 * for testing purpose we return amount of data written 4399 */ 4400 return (op_count * (2 * sizeof(uint32_t))); 4401 } 4402 4403 /* 4404 * Handle L2 Cache. 4405 */ 4406 4407 static uint32_t 4408 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, 4409 uint32_t * data_buff) 4410 { 4411 int i, k; 4412 int loop_cnt; 4413 int ret; 4414 4415 uint32_t read_value; 4416 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w; 4417 uint32_t tag_value, read_cnt; 4418 volatile uint8_t cntl_value_r; 4419 long timeout; 4420 uint32_t data; 4421 4422 loop_cnt = cacheEntry->op_count; 4423 4424 read_addr = cacheEntry->read_addr; 4425 cntrl_addr = cacheEntry->control_addr; 4426 cntl_value_w = (uint32_t) cacheEntry->write_value; 4427 4428 tag_reg_addr = cacheEntry->tag_reg_addr; 4429 4430 tag_value = cacheEntry->init_tag_value; 4431 read_cnt = cacheEntry->read_addr_cnt; 4432 4433 for (i = 0; i < loop_cnt; i++) { 4434 4435 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); 4436 if (ret) 4437 return (0); 4438 4439 if (cacheEntry->write_value != 0) { 4440 4441 ret = ql_rdwr_indreg32(ha, cntrl_addr, 4442 &cntl_value_w, 0); 4443 if (ret) 4444 return (0); 4445 } 4446 4447 if (cacheEntry->poll_mask != 0) { 4448 4449 timeout = cacheEntry->poll_wait; 4450 4451 ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1); 4452 if (ret) 4453 return (0); 4454 4455 cntl_value_r = (uint8_t)data; 4456 4457 while ((cntl_value_r & cacheEntry->poll_mask) != 0) { 4458 4459 if (timeout) { 4460 qla_mdelay(__func__, 1); 4461 timeout--; 4462 } else 4463 break; 4464 4465 ret = ql_rdwr_indreg32(ha, cntrl_addr, 4466 &data, 1); 4467 if (ret) 4468 return (0); 4469 4470 cntl_value_r = (uint8_t)data; 4471 } 4472 if (!timeout) { 4473 /* Report timeout error. 4474 * core dump capture failed 4475 * Skip remaining entries. 4476 * Write buffer out to file 4477 * Use driver specific fields in template header 4478 * to report this error. 4479 */ 4480 return (-1); 4481 } 4482 } 4483 4484 addr = read_addr; 4485 for (k = 0; k < read_cnt; k++) { 4486 4487 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4488 if (ret) 4489 return (0); 4490 4491 *data_buff++ = read_value; 4492 addr += cacheEntry->read_addr_stride; 4493 } 4494 4495 tag_value += cacheEntry->tag_value_stride; 4496 } 4497 4498 return (read_cnt * loop_cnt * sizeof(uint32_t)); 4499 } 4500 4501 /* 4502 * Handle L1 Cache. 4503 */ 4504 4505 static uint32_t 4506 ql_L1Cache(qla_host_t *ha, 4507 ql_minidump_entry_cache_t *cacheEntry, 4508 uint32_t *data_buff) 4509 { 4510 int ret; 4511 int i, k; 4512 int loop_cnt; 4513 4514 uint32_t read_value; 4515 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr; 4516 uint32_t tag_value, read_cnt; 4517 uint32_t cntl_value_w; 4518 4519 loop_cnt = cacheEntry->op_count; 4520 4521 read_addr = cacheEntry->read_addr; 4522 cntrl_addr = cacheEntry->control_addr; 4523 cntl_value_w = (uint32_t) cacheEntry->write_value; 4524 4525 tag_reg_addr = cacheEntry->tag_reg_addr; 4526 4527 tag_value = cacheEntry->init_tag_value; 4528 read_cnt = cacheEntry->read_addr_cnt; 4529 4530 for (i = 0; i < loop_cnt; i++) { 4531 4532 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); 4533 if (ret) 4534 return (0); 4535 4536 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0); 4537 if (ret) 4538 return (0); 4539 4540 addr = read_addr; 4541 for (k = 0; k < read_cnt; k++) { 4542 4543 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4544 if (ret) 4545 return (0); 4546 4547 *data_buff++ = read_value; 4548 addr += cacheEntry->read_addr_stride; 4549 } 4550 4551 tag_value += cacheEntry->tag_value_stride; 4552 } 4553 4554 return (read_cnt * loop_cnt * sizeof(uint32_t)); 4555 } 4556 4557 /* 4558 * Reading OCM memory 4559 */ 4560 4561 static uint32_t 4562 ql_rdocm(qla_host_t *ha, 4563 ql_minidump_entry_rdocm_t *ocmEntry, 4564 uint32_t *data_buff) 4565 { 4566 int i, loop_cnt; 4567 volatile uint32_t addr; 4568 volatile uint32_t value; 4569 4570 addr = ocmEntry->read_addr; 4571 loop_cnt = ocmEntry->op_count; 4572 4573 for (i = 0; i < loop_cnt; i++) { 4574 value = READ_REG32(ha, addr); 4575 *data_buff++ = value; 4576 addr += ocmEntry->read_addr_stride; 4577 } 4578 return (loop_cnt * sizeof(value)); 4579 } 4580 4581 /* 4582 * Read memory 4583 */ 4584 4585 static uint32_t 4586 ql_rdmem(qla_host_t *ha, 4587 ql_minidump_entry_rdmem_t *mem_entry, 4588 uint32_t *data_buff) 4589 { 4590 int ret; 4591 int i, loop_cnt; 4592 volatile uint32_t addr; 4593 q80_offchip_mem_val_t val; 4594 4595 addr = mem_entry->read_addr; 4596 4597 /* size in bytes / 16 */ 4598 loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4); 4599 4600 for (i = 0; i < loop_cnt; i++) { 4601 4602 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1); 4603 if (ret) 4604 return (0); 4605 4606 *data_buff++ = val.data_lo; 4607 *data_buff++ = val.data_hi; 4608 *data_buff++ = val.data_ulo; 4609 *data_buff++ = val.data_uhi; 4610 4611 addr += (sizeof(uint32_t) * 4); 4612 } 4613 4614 return (loop_cnt * (sizeof(uint32_t) * 4)); 4615 } 4616 4617 /* 4618 * Read Rom 4619 */ 4620 4621 static uint32_t 4622 ql_rdrom(qla_host_t *ha, 4623 ql_minidump_entry_rdrom_t *romEntry, 4624 uint32_t *data_buff) 4625 { 4626 int ret; 4627 int i, loop_cnt; 4628 uint32_t addr; 4629 uint32_t value; 4630 4631 addr = romEntry->read_addr; 4632 loop_cnt = romEntry->read_data_size; /* This is size in bytes */ 4633 loop_cnt /= sizeof(value); 4634 4635 for (i = 0; i < loop_cnt; i++) { 4636 4637 ret = ql_rd_flash32(ha, addr, &value); 4638 if (ret) 4639 return (0); 4640 4641 *data_buff++ = value; 4642 addr += sizeof(value); 4643 } 4644 4645 return (loop_cnt * sizeof(value)); 4646 } 4647 4648 /* 4649 * Read MUX data 4650 */ 4651 4652 static uint32_t 4653 ql_rdmux(qla_host_t *ha, 4654 ql_minidump_entry_mux_t *muxEntry, 4655 uint32_t *data_buff) 4656 { 4657 int ret; 4658 int loop_cnt; 4659 uint32_t read_value, sel_value; 4660 uint32_t read_addr, select_addr; 4661 4662 select_addr = muxEntry->select_addr; 4663 sel_value = muxEntry->select_value; 4664 read_addr = muxEntry->read_addr; 4665 4666 for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) { 4667 4668 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0); 4669 if (ret) 4670 return (0); 4671 4672 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4673 if (ret) 4674 return (0); 4675 4676 *data_buff++ = sel_value; 4677 *data_buff++ = read_value; 4678 4679 sel_value += muxEntry->select_value_stride; 4680 } 4681 4682 return (loop_cnt * (2 * sizeof(uint32_t))); 4683 } 4684 4685 static uint32_t 4686 ql_rdmux2(qla_host_t *ha, 4687 ql_minidump_entry_mux2_t *muxEntry, 4688 uint32_t *data_buff) 4689 { 4690 int ret; 4691 int loop_cnt; 4692 4693 uint32_t select_addr_1, select_addr_2; 4694 uint32_t select_value_1, select_value_2; 4695 uint32_t select_value_count, select_value_mask; 4696 uint32_t read_addr, read_value; 4697 4698 select_addr_1 = muxEntry->select_addr_1; 4699 select_addr_2 = muxEntry->select_addr_2; 4700 select_value_1 = muxEntry->select_value_1; 4701 select_value_2 = muxEntry->select_value_2; 4702 select_value_count = muxEntry->select_value_count; 4703 select_value_mask = muxEntry->select_value_mask; 4704 4705 read_addr = muxEntry->read_addr; 4706 4707 for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count; 4708 loop_cnt++) { 4709 4710 uint32_t temp_sel_val; 4711 4712 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0); 4713 if (ret) 4714 return (0); 4715 4716 temp_sel_val = select_value_1 & select_value_mask; 4717 4718 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); 4719 if (ret) 4720 return (0); 4721 4722 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4723 if (ret) 4724 return (0); 4725 4726 *data_buff++ = temp_sel_val; 4727 *data_buff++ = read_value; 4728 4729 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0); 4730 if (ret) 4731 return (0); 4732 4733 temp_sel_val = select_value_2 & select_value_mask; 4734 4735 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); 4736 if (ret) 4737 return (0); 4738 4739 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4740 if (ret) 4741 return (0); 4742 4743 *data_buff++ = temp_sel_val; 4744 *data_buff++ = read_value; 4745 4746 select_value_1 += muxEntry->select_value_stride; 4747 select_value_2 += muxEntry->select_value_stride; 4748 } 4749 4750 return (loop_cnt * (4 * sizeof(uint32_t))); 4751 } 4752 4753 /* 4754 * Handling Queue State Reads. 4755 */ 4756 4757 static uint32_t 4758 ql_rdqueue(qla_host_t *ha, 4759 ql_minidump_entry_queue_t *queueEntry, 4760 uint32_t *data_buff) 4761 { 4762 int ret; 4763 int loop_cnt, k; 4764 uint32_t read_value; 4765 uint32_t read_addr, read_stride, select_addr; 4766 uint32_t queue_id, read_cnt; 4767 4768 read_cnt = queueEntry->read_addr_cnt; 4769 read_stride = queueEntry->read_addr_stride; 4770 select_addr = queueEntry->select_addr; 4771 4772 for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count; 4773 loop_cnt++) { 4774 4775 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0); 4776 if (ret) 4777 return (0); 4778 4779 read_addr = queueEntry->read_addr; 4780 4781 for (k = 0; k < read_cnt; k++) { 4782 4783 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4784 if (ret) 4785 return (0); 4786 4787 *data_buff++ = read_value; 4788 read_addr += read_stride; 4789 } 4790 4791 queue_id += queueEntry->queue_id_stride; 4792 } 4793 4794 return (loop_cnt * (read_cnt * sizeof(uint32_t))); 4795 } 4796 4797 /* 4798 * Handling control entries. 4799 */ 4800 4801 static uint32_t 4802 ql_cntrl(qla_host_t *ha, 4803 ql_minidump_template_hdr_t *template_hdr, 4804 ql_minidump_entry_cntrl_t *crbEntry) 4805 { 4806 int ret; 4807 int count; 4808 uint32_t opcode, read_value, addr, entry_addr; 4809 long timeout; 4810 4811 entry_addr = crbEntry->addr; 4812 4813 for (count = 0; count < crbEntry->op_count; count++) { 4814 opcode = crbEntry->opcode; 4815 4816 if (opcode & QL_DBG_OPCODE_WR) { 4817 4818 ret = ql_rdwr_indreg32(ha, entry_addr, 4819 &crbEntry->value_1, 0); 4820 if (ret) 4821 return (0); 4822 4823 opcode &= ~QL_DBG_OPCODE_WR; 4824 } 4825 4826 if (opcode & QL_DBG_OPCODE_RW) { 4827 4828 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 4829 if (ret) 4830 return (0); 4831 4832 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 4833 if (ret) 4834 return (0); 4835 4836 opcode &= ~QL_DBG_OPCODE_RW; 4837 } 4838 4839 if (opcode & QL_DBG_OPCODE_AND) { 4840 4841 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 4842 if (ret) 4843 return (0); 4844 4845 read_value &= crbEntry->value_2; 4846 opcode &= ~QL_DBG_OPCODE_AND; 4847 4848 if (opcode & QL_DBG_OPCODE_OR) { 4849 read_value |= crbEntry->value_3; 4850 opcode &= ~QL_DBG_OPCODE_OR; 4851 } 4852 4853 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 4854 if (ret) 4855 return (0); 4856 } 4857 4858 if (opcode & QL_DBG_OPCODE_OR) { 4859 4860 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 4861 if (ret) 4862 return (0); 4863 4864 read_value |= crbEntry->value_3; 4865 4866 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 4867 if (ret) 4868 return (0); 4869 4870 opcode &= ~QL_DBG_OPCODE_OR; 4871 } 4872 4873 if (opcode & QL_DBG_OPCODE_POLL) { 4874 4875 opcode &= ~QL_DBG_OPCODE_POLL; 4876 timeout = crbEntry->poll_timeout; 4877 addr = entry_addr; 4878 4879 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4880 if (ret) 4881 return (0); 4882 4883 while ((read_value & crbEntry->value_2) 4884 != crbEntry->value_1) { 4885 4886 if (timeout) { 4887 qla_mdelay(__func__, 1); 4888 timeout--; 4889 } else 4890 break; 4891 4892 ret = ql_rdwr_indreg32(ha, addr, 4893 &read_value, 1); 4894 if (ret) 4895 return (0); 4896 } 4897 4898 if (!timeout) { 4899 /* 4900 * Report timeout error. 4901 * core dump capture failed 4902 * Skip remaining entries. 4903 * Write buffer out to file 4904 * Use driver specific fields in template header 4905 * to report this error. 4906 */ 4907 return (-1); 4908 } 4909 } 4910 4911 if (opcode & QL_DBG_OPCODE_RDSTATE) { 4912 /* 4913 * decide which address to use. 4914 */ 4915 if (crbEntry->state_index_a) { 4916 addr = template_hdr->saved_state_array[ 4917 crbEntry-> state_index_a]; 4918 } else { 4919 addr = entry_addr; 4920 } 4921 4922 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4923 if (ret) 4924 return (0); 4925 4926 template_hdr->saved_state_array[crbEntry->state_index_v] 4927 = read_value; 4928 opcode &= ~QL_DBG_OPCODE_RDSTATE; 4929 } 4930 4931 if (opcode & QL_DBG_OPCODE_WRSTATE) { 4932 /* 4933 * decide which value to use. 4934 */ 4935 if (crbEntry->state_index_v) { 4936 read_value = template_hdr->saved_state_array[ 4937 crbEntry->state_index_v]; 4938 } else { 4939 read_value = crbEntry->value_1; 4940 } 4941 /* 4942 * decide which address to use. 4943 */ 4944 if (crbEntry->state_index_a) { 4945 addr = template_hdr->saved_state_array[ 4946 crbEntry-> state_index_a]; 4947 } else { 4948 addr = entry_addr; 4949 } 4950 4951 ret = ql_rdwr_indreg32(ha, addr, &read_value, 0); 4952 if (ret) 4953 return (0); 4954 4955 opcode &= ~QL_DBG_OPCODE_WRSTATE; 4956 } 4957 4958 if (opcode & QL_DBG_OPCODE_MDSTATE) { 4959 /* Read value from saved state using index */ 4960 read_value = template_hdr->saved_state_array[ 4961 crbEntry->state_index_v]; 4962 4963 read_value <<= crbEntry->shl; /*Shift left operation */ 4964 read_value >>= crbEntry->shr; /*Shift right operation */ 4965 4966 if (crbEntry->value_2) { 4967 /* check if AND mask is provided */ 4968 read_value &= crbEntry->value_2; 4969 } 4970 4971 read_value |= crbEntry->value_3; /* OR operation */ 4972 read_value += crbEntry->value_1; /* increment op */ 4973 4974 /* Write value back to state area. */ 4975 4976 template_hdr->saved_state_array[crbEntry->state_index_v] 4977 = read_value; 4978 opcode &= ~QL_DBG_OPCODE_MDSTATE; 4979 } 4980 4981 entry_addr += crbEntry->addr_stride; 4982 } 4983 4984 return (0); 4985 } 4986 4987 /* 4988 * Handling rd poll entry. 4989 */ 4990 4991 static uint32_t 4992 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry, 4993 uint32_t *data_buff) 4994 { 4995 int ret; 4996 int loop_cnt; 4997 uint32_t op_count, select_addr, select_value_stride, select_value; 4998 uint32_t read_addr, poll, mask, data_size, data; 4999 uint32_t wait_count = 0; 5000 5001 select_addr = entry->select_addr; 5002 read_addr = entry->read_addr; 5003 select_value = entry->select_value; 5004 select_value_stride = entry->select_value_stride; 5005 op_count = entry->op_count; 5006 poll = entry->poll; 5007 mask = entry->mask; 5008 data_size = entry->data_size; 5009 5010 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { 5011 5012 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0); 5013 if (ret) 5014 return (0); 5015 5016 wait_count = 0; 5017 5018 while (wait_count < poll) { 5019 5020 uint32_t temp; 5021 5022 ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1); 5023 if (ret) 5024 return (0); 5025 5026 if ( (temp & mask) != 0 ) { 5027 break; 5028 } 5029 wait_count++; 5030 } 5031 5032 if (wait_count == poll) { 5033 device_printf(ha->pci_dev, 5034 "%s: Error in processing entry\n", __func__); 5035 device_printf(ha->pci_dev, 5036 "%s: wait_count <0x%x> poll <0x%x>\n", 5037 __func__, wait_count, poll); 5038 return 0; 5039 } 5040 5041 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1); 5042 if (ret) 5043 return (0); 5044 5045 *data_buff++ = select_value; 5046 *data_buff++ = data; 5047 select_value = select_value + select_value_stride; 5048 } 5049 5050 /* 5051 * for testing purpose we return amount of data written 5052 */ 5053 return (loop_cnt * (2 * sizeof(uint32_t))); 5054 } 5055 5056 5057 /* 5058 * Handling rd modify write poll entry. 5059 */ 5060 5061 static uint32_t 5062 ql_pollrd_modify_write(qla_host_t *ha, 5063 ql_minidump_entry_rd_modify_wr_with_poll_t *entry, 5064 uint32_t *data_buff) 5065 { 5066 int ret; 5067 uint32_t addr_1, addr_2, value_1, value_2, data; 5068 uint32_t poll, mask, data_size, modify_mask; 5069 uint32_t wait_count = 0; 5070 5071 addr_1 = entry->addr_1; 5072 addr_2 = entry->addr_2; 5073 value_1 = entry->value_1; 5074 value_2 = entry->value_2; 5075 5076 poll = entry->poll; 5077 mask = entry->mask; 5078 modify_mask = entry->modify_mask; 5079 data_size = entry->data_size; 5080 5081 5082 ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0); 5083 if (ret) 5084 return (0); 5085 5086 wait_count = 0; 5087 while (wait_count < poll) { 5088 5089 uint32_t temp; 5090 5091 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); 5092 if (ret) 5093 return (0); 5094 5095 if ( (temp & mask) != 0 ) { 5096 break; 5097 } 5098 wait_count++; 5099 } 5100 5101 if (wait_count == poll) { 5102 device_printf(ha->pci_dev, "%s Error in processing entry\n", 5103 __func__); 5104 } else { 5105 5106 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1); 5107 if (ret) 5108 return (0); 5109 5110 data = (data & modify_mask); 5111 5112 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0); 5113 if (ret) 5114 return (0); 5115 5116 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0); 5117 if (ret) 5118 return (0); 5119 5120 /* Poll again */ 5121 wait_count = 0; 5122 while (wait_count < poll) { 5123 5124 uint32_t temp; 5125 5126 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); 5127 if (ret) 5128 return (0); 5129 5130 if ( (temp & mask) != 0 ) { 5131 break; 5132 } 5133 wait_count++; 5134 } 5135 *data_buff++ = addr_2; 5136 *data_buff++ = data; 5137 } 5138 5139 /* 5140 * for testing purpose we return amount of data written 5141 */ 5142 return (2 * sizeof(uint32_t)); 5143 } 5144 5145 5146