1 /* 2 * Copyright (c) 2013-2016 Qlogic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: ql_hw.c 30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31 * Content: Contains Hardware dependent functions 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "ql_os.h" 38 #include "ql_hw.h" 39 #include "ql_def.h" 40 #include "ql_inline.h" 41 #include "ql_ver.h" 42 #include "ql_glbl.h" 43 #include "ql_dbg.h" 44 #include "ql_minidump.h" 45 46 /* 47 * Static Functions 48 */ 49 50 static void qla_del_rcv_cntxt(qla_host_t *ha); 51 static int qla_init_rcv_cntxt(qla_host_t *ha); 52 static void qla_del_xmt_cntxt(qla_host_t *ha); 53 static int qla_init_xmt_cntxt(qla_host_t *ha); 54 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, 55 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause); 56 static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, 57 uint32_t num_intrs, uint32_t create); 58 static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id); 59 static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, 60 int tenable, int rcv); 61 static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode); 62 static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id); 63 64 static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, 65 uint8_t *hdr); 66 static int qla_hw_add_all_mcast(qla_host_t *ha); 67 static int qla_hw_del_all_mcast(qla_host_t *ha); 68 static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds); 69 70 static int qla_init_nic_func(qla_host_t *ha); 71 static int qla_stop_nic_func(qla_host_t *ha); 72 static int qla_query_fw_dcbx_caps(qla_host_t *ha); 73 static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits); 74 static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits); 75 static void qla_get_quick_stats(qla_host_t *ha); 76 static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode); 77 static int qla_get_cam_search_mode(qla_host_t *ha); 78 79 static void ql_minidump_free(qla_host_t *ha); 80 81 82 static int 83 qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS) 84 { 85 int err = 0, ret; 86 qla_host_t *ha; 87 uint32_t i; 88 89 err = sysctl_handle_int(oidp, &ret, 0, req); 90 91 if (err || !req->newptr) 92 return (err); 93 94 if (ret == 1) { 95 96 ha = (qla_host_t *)arg1; 97 98 for (i = 0; i < ha->hw.num_sds_rings; i++) { 99 100 device_printf(ha->pci_dev, 101 "%s: sds_ring[%d] = %p\n", __func__,i, 102 (void *)ha->hw.sds[i].intr_count); 103 104 device_printf(ha->pci_dev, 105 "%s: sds_ring[%d].spurious_intr_count = %p\n", 106 __func__, 107 i, (void *)ha->hw.sds[i].spurious_intr_count); 108 109 device_printf(ha->pci_dev, 110 "%s: sds_ring[%d].rx_free = %d\n", __func__,i, 111 ha->hw.sds[i].rx_free); 112 } 113 114 for (i = 0; i < ha->hw.num_tx_rings; i++) 115 device_printf(ha->pci_dev, 116 "%s: tx[%d] = %p\n", __func__,i, 117 (void *)ha->tx_ring[i].count); 118 119 for (i = 0; i < ha->hw.num_rds_rings; i++) 120 device_printf(ha->pci_dev, 121 "%s: rds_ring[%d] = %p\n", __func__,i, 122 (void *)ha->hw.rds[i].count); 123 124 device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__, 125 (void *)ha->lro_pkt_count); 126 127 device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__, 128 (void *)ha->lro_bytes); 129 130 #ifdef QL_ENABLE_ISCSI_TLV 131 device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__, 132 (void *)ha->hw.iscsi_pkt_count); 133 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 134 135 } 136 return (err); 137 } 138 139 static int 140 qla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS) 141 { 142 int err, ret = 0; 143 qla_host_t *ha; 144 145 err = sysctl_handle_int(oidp, &ret, 0, req); 146 147 if (err || !req->newptr) 148 return (err); 149 150 if (ret == 1) { 151 ha = (qla_host_t *)arg1; 152 qla_get_quick_stats(ha); 153 } 154 return (err); 155 } 156 157 #ifdef QL_DBG 158 159 static void 160 qla_stop_pegs(qla_host_t *ha) 161 { 162 uint32_t val = 1; 163 164 ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0); 165 ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0); 166 ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0); 167 ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0); 168 ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0); 169 device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__); 170 } 171 172 static int 173 qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS) 174 { 175 int err, ret = 0; 176 qla_host_t *ha; 177 178 err = sysctl_handle_int(oidp, &ret, 0, req); 179 180 181 if (err || !req->newptr) 182 return (err); 183 184 if (ret == 1) { 185 ha = (qla_host_t *)arg1; 186 (void)QLA_LOCK(ha, __func__, 0); 187 qla_stop_pegs(ha); 188 QLA_UNLOCK(ha, __func__); 189 } 190 191 return err; 192 } 193 #endif /* #ifdef QL_DBG */ 194 195 static int 196 qla_validate_set_port_cfg_bit(uint32_t bits) 197 { 198 if ((bits & 0xF) > 1) 199 return (-1); 200 201 if (((bits >> 4) & 0xF) > 2) 202 return (-1); 203 204 if (((bits >> 8) & 0xF) > 2) 205 return (-1); 206 207 return (0); 208 } 209 210 static int 211 qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS) 212 { 213 int err, ret = 0; 214 qla_host_t *ha; 215 uint32_t cfg_bits; 216 217 err = sysctl_handle_int(oidp, &ret, 0, req); 218 219 if (err || !req->newptr) 220 return (err); 221 222 if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) { 223 224 ha = (qla_host_t *)arg1; 225 226 err = qla_get_port_config(ha, &cfg_bits); 227 228 if (err) 229 goto qla_sysctl_set_port_cfg_exit; 230 231 if (ret & 0x1) { 232 cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE; 233 } else { 234 cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE; 235 } 236 237 ret = ret >> 4; 238 cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK; 239 240 if ((ret & 0xF) == 0) { 241 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED; 242 } else if ((ret & 0xF) == 1){ 243 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD; 244 } else { 245 cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM; 246 } 247 248 ret = ret >> 4; 249 cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK; 250 251 if (ret == 0) { 252 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV; 253 } else if (ret == 1){ 254 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT; 255 } else { 256 cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV; 257 } 258 259 err = qla_set_port_config(ha, cfg_bits); 260 } else { 261 ha = (qla_host_t *)arg1; 262 263 err = qla_get_port_config(ha, &cfg_bits); 264 } 265 266 qla_sysctl_set_port_cfg_exit: 267 return err; 268 } 269 270 static int 271 qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS) 272 { 273 int err, ret = 0; 274 qla_host_t *ha; 275 276 err = sysctl_handle_int(oidp, &ret, 0, req); 277 278 if (err || !req->newptr) 279 return (err); 280 281 ha = (qla_host_t *)arg1; 282 283 if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) || 284 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) { 285 err = qla_set_cam_search_mode(ha, (uint32_t)ret); 286 } else { 287 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret); 288 } 289 290 return (err); 291 } 292 293 static int 294 qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS) 295 { 296 int err, ret = 0; 297 qla_host_t *ha; 298 299 err = sysctl_handle_int(oidp, &ret, 0, req); 300 301 if (err || !req->newptr) 302 return (err); 303 304 ha = (qla_host_t *)arg1; 305 err = qla_get_cam_search_mode(ha); 306 307 return (err); 308 } 309 310 311 /* 312 * Name: ql_hw_add_sysctls 313 * Function: Add P3Plus specific sysctls 314 */ 315 void 316 ql_hw_add_sysctls(qla_host_t *ha) 317 { 318 device_t dev; 319 320 dev = ha->pci_dev; 321 322 ha->hw.num_sds_rings = MAX_SDS_RINGS; 323 ha->hw.num_rds_rings = MAX_RDS_RINGS; 324 ha->hw.num_tx_rings = NUM_TX_RINGS; 325 326 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 327 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 328 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings, 329 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings"); 330 331 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 332 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 333 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings, 334 ha->hw.num_sds_rings, "Number of Status Descriptor Rings"); 335 336 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 337 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 338 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings, 339 ha->hw.num_tx_rings, "Number of Transmit Rings"); 340 341 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 342 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 343 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx, 344 ha->txr_idx, "Tx Ring Used"); 345 346 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 347 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 348 OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW, 349 (void *)ha, 0, 350 qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics"); 351 352 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 353 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 354 OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW, 355 (void *)ha, 0, 356 qla_sysctl_get_quick_stats, "I", "Quick Statistics"); 357 358 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 359 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 360 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs, 361 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt"); 362 363 ha->hw.sds_cidx_thres = 32; 364 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 365 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 366 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres, 367 ha->hw.sds_cidx_thres, 368 "Number of SDS entries to process before updating" 369 " SDS Ring Consumer Index"); 370 371 ha->hw.rds_pidx_thres = 32; 372 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 373 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 374 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres, 375 ha->hw.rds_pidx_thres, 376 "Number of Rcv Rings Entries to post before updating" 377 " RDS Ring Producer Index"); 378 379 ha->hw.rcv_intr_coalesce = (3 << 16) | 256; 380 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 381 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 382 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW, 383 &ha->hw.rcv_intr_coalesce, 384 ha->hw.rcv_intr_coalesce, 385 "Rcv Intr Coalescing Parameters\n" 386 "\tbits 15:0 max packets\n" 387 "\tbits 31:16 max micro-seconds to wait\n" 388 "\tplease run\n" 389 "\tifconfig <if> down && ifconfig <if> up\n" 390 "\tto take effect \n"); 391 392 ha->hw.xmt_intr_coalesce = (64 << 16) | 64; 393 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 394 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 395 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW, 396 &ha->hw.xmt_intr_coalesce, 397 ha->hw.xmt_intr_coalesce, 398 "Xmt Intr Coalescing Parameters\n" 399 "\tbits 15:0 max packets\n" 400 "\tbits 31:16 max micro-seconds to wait\n" 401 "\tplease run\n" 402 "\tifconfig <if> down && ifconfig <if> up\n" 403 "\tto take effect \n"); 404 405 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 406 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 407 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW, 408 (void *)ha, 0, 409 qla_sysctl_port_cfg, "I", 410 "Set Port Configuration if values below " 411 "otherwise Get Port Configuration\n" 412 "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n" 413 "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n" 414 "\tBits 8-11: std pause cfg; 0 = xmt and rcv;" 415 " 1 = xmt only; 2 = rcv only;\n" 416 ); 417 418 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 419 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 420 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW, 421 (void *)ha, 0, 422 qla_sysctl_set_cam_search_mode, "I", 423 "Set CAM Search Mode" 424 "\t 1 = search mode internal\n" 425 "\t 2 = search mode auto\n"); 426 427 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 428 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 429 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW, 430 (void *)ha, 0, 431 qla_sysctl_get_cam_search_mode, "I", 432 "Get CAM Search Mode" 433 "\t 1 = search mode internal\n" 434 "\t 2 = search mode auto\n"); 435 436 ha->hw.enable_9kb = 1; 437 438 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 439 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 440 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb, 441 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000"); 442 443 ha->hw.mdump_active = 0; 444 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 445 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 446 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active, 447 ha->hw.mdump_active, 448 "Minidump retrieval is Active"); 449 450 ha->hw.mdump_done = 0; 451 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 452 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 453 OID_AUTO, "mdump_done", CTLFLAG_RW, 454 &ha->hw.mdump_done, ha->hw.mdump_done, 455 "Minidump has been done and available for retrieval"); 456 457 ha->hw.mdump_capture_mask = 0xF; 458 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 459 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 460 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW, 461 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask, 462 "Minidump capture mask"); 463 #ifdef QL_DBG 464 465 ha->err_inject = 0; 466 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 467 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 468 OID_AUTO, "err_inject", 469 CTLFLAG_RW, &ha->err_inject, ha->err_inject, 470 "Error to be injected\n" 471 "\t\t\t 0: No Errors\n" 472 "\t\t\t 1: rcv: rxb struct invalid\n" 473 "\t\t\t 2: rcv: mp == NULL\n" 474 "\t\t\t 3: lro: rxb struct invalid\n" 475 "\t\t\t 4: lro: mp == NULL\n" 476 "\t\t\t 5: rcv: num handles invalid\n" 477 "\t\t\t 6: reg: indirect reg rd_wr failure\n" 478 "\t\t\t 7: ocm: offchip memory rd_wr failure\n" 479 "\t\t\t 8: mbx: mailbox command failure\n" 480 "\t\t\t 9: heartbeat failure\n" 481 "\t\t\t A: temperature failure\n" 482 "\t\t\t 11: m_getcl or m_getjcl failure\n" ); 483 484 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 485 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 486 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW, 487 (void *)ha, 0, 488 qla_sysctl_stop_pegs, "I", "Peg Stop"); 489 490 #endif /* #ifdef QL_DBG */ 491 492 ha->hw.user_pri_nic = 0; 493 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 494 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 495 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic, 496 ha->hw.user_pri_nic, 497 "VLAN Tag User Priority for Normal Ethernet Packets"); 498 499 ha->hw.user_pri_iscsi = 4; 500 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 501 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 502 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi, 503 ha->hw.user_pri_iscsi, 504 "VLAN Tag User Priority for iSCSI Packets"); 505 506 } 507 508 void 509 ql_hw_link_status(qla_host_t *ha) 510 { 511 device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui); 512 513 if (ha->hw.link_up) { 514 device_printf(ha->pci_dev, "link Up\n"); 515 } else { 516 device_printf(ha->pci_dev, "link Down\n"); 517 } 518 519 if (ha->hw.flags.fduplex) { 520 device_printf(ha->pci_dev, "Full Duplex\n"); 521 } else { 522 device_printf(ha->pci_dev, "Half Duplex\n"); 523 } 524 525 if (ha->hw.flags.autoneg) { 526 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n"); 527 } else { 528 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n"); 529 } 530 531 switch (ha->hw.link_speed) { 532 case 0x710: 533 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n"); 534 break; 535 536 case 0x3E8: 537 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n"); 538 break; 539 540 case 0x64: 541 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n"); 542 break; 543 544 default: 545 device_printf(ha->pci_dev, "link speed\t\t Unknown\n"); 546 break; 547 } 548 549 switch (ha->hw.module_type) { 550 551 case 0x01: 552 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n"); 553 break; 554 555 case 0x02: 556 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n"); 557 break; 558 559 case 0x03: 560 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n"); 561 break; 562 563 case 0x04: 564 device_printf(ha->pci_dev, 565 "Module Type 10GE Passive Copper(Compliant)[%d m]\n", 566 ha->hw.cable_length); 567 break; 568 569 case 0x05: 570 device_printf(ha->pci_dev, "Module Type 10GE Active" 571 " Limiting Copper(Compliant)[%d m]\n", 572 ha->hw.cable_length); 573 break; 574 575 case 0x06: 576 device_printf(ha->pci_dev, 577 "Module Type 10GE Passive Copper" 578 " (Legacy, Best Effort)[%d m]\n", 579 ha->hw.cable_length); 580 break; 581 582 case 0x07: 583 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n"); 584 break; 585 586 case 0x08: 587 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n"); 588 break; 589 590 case 0x09: 591 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n"); 592 break; 593 594 case 0x0A: 595 device_printf(ha->pci_dev, "Module Type 1000Base-T\n"); 596 break; 597 598 case 0x0B: 599 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper" 600 "(Legacy, Best Effort)\n"); 601 break; 602 603 default: 604 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n", 605 ha->hw.module_type); 606 break; 607 } 608 609 if (ha->hw.link_faults == 1) 610 device_printf(ha->pci_dev, "SFP Power Fault\n"); 611 } 612 613 /* 614 * Name: ql_free_dma 615 * Function: Frees the DMA'able memory allocated in ql_alloc_dma() 616 */ 617 void 618 ql_free_dma(qla_host_t *ha) 619 { 620 uint32_t i; 621 622 if (ha->hw.dma_buf.flags.sds_ring) { 623 for (i = 0; i < ha->hw.num_sds_rings; i++) { 624 ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); 625 } 626 ha->hw.dma_buf.flags.sds_ring = 0; 627 } 628 629 if (ha->hw.dma_buf.flags.rds_ring) { 630 for (i = 0; i < ha->hw.num_rds_rings; i++) { 631 ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); 632 } 633 ha->hw.dma_buf.flags.rds_ring = 0; 634 } 635 636 if (ha->hw.dma_buf.flags.tx_ring) { 637 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); 638 ha->hw.dma_buf.flags.tx_ring = 0; 639 } 640 ql_minidump_free(ha); 641 } 642 643 /* 644 * Name: ql_alloc_dma 645 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. 646 */ 647 int 648 ql_alloc_dma(qla_host_t *ha) 649 { 650 device_t dev; 651 uint32_t i, j, size, tx_ring_size; 652 qla_hw_t *hw; 653 qla_hw_tx_cntxt_t *tx_cntxt; 654 uint8_t *vaddr; 655 bus_addr_t paddr; 656 657 dev = ha->pci_dev; 658 659 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 660 661 hw = &ha->hw; 662 /* 663 * Allocate Transmit Ring 664 */ 665 tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS); 666 size = (tx_ring_size * ha->hw.num_tx_rings); 667 668 hw->dma_buf.tx_ring.alignment = 8; 669 hw->dma_buf.tx_ring.size = size + PAGE_SIZE; 670 671 if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) { 672 device_printf(dev, "%s: tx ring alloc failed\n", __func__); 673 goto ql_alloc_dma_exit; 674 } 675 676 vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b; 677 paddr = hw->dma_buf.tx_ring.dma_addr; 678 679 for (i = 0; i < ha->hw.num_tx_rings; i++) { 680 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; 681 682 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr; 683 tx_cntxt->tx_ring_paddr = paddr; 684 685 vaddr += tx_ring_size; 686 paddr += tx_ring_size; 687 } 688 689 for (i = 0; i < ha->hw.num_tx_rings; i++) { 690 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; 691 692 tx_cntxt->tx_cons = (uint32_t *)vaddr; 693 tx_cntxt->tx_cons_paddr = paddr; 694 695 vaddr += sizeof (uint32_t); 696 paddr += sizeof (uint32_t); 697 } 698 699 ha->hw.dma_buf.flags.tx_ring = 1; 700 701 QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n", 702 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr), 703 hw->dma_buf.tx_ring.dma_b)); 704 /* 705 * Allocate Receive Descriptor Rings 706 */ 707 708 for (i = 0; i < hw->num_rds_rings; i++) { 709 710 hw->dma_buf.rds_ring[i].alignment = 8; 711 hw->dma_buf.rds_ring[i].size = 712 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; 713 714 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) { 715 device_printf(dev, "%s: rds ring[%d] alloc failed\n", 716 __func__, i); 717 718 for (j = 0; j < i; j++) 719 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]); 720 721 goto ql_alloc_dma_exit; 722 } 723 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n", 724 __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr), 725 hw->dma_buf.rds_ring[i].dma_b)); 726 } 727 728 hw->dma_buf.flags.rds_ring = 1; 729 730 /* 731 * Allocate Status Descriptor Rings 732 */ 733 734 for (i = 0; i < hw->num_sds_rings; i++) { 735 hw->dma_buf.sds_ring[i].alignment = 8; 736 hw->dma_buf.sds_ring[i].size = 737 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; 738 739 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) { 740 device_printf(dev, "%s: sds ring alloc failed\n", 741 __func__); 742 743 for (j = 0; j < i; j++) 744 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]); 745 746 goto ql_alloc_dma_exit; 747 } 748 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n", 749 __func__, i, 750 (void *)(hw->dma_buf.sds_ring[i].dma_addr), 751 hw->dma_buf.sds_ring[i].dma_b)); 752 } 753 for (i = 0; i < hw->num_sds_rings; i++) { 754 hw->sds[i].sds_ring_base = 755 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; 756 } 757 758 hw->dma_buf.flags.sds_ring = 1; 759 760 return 0; 761 762 ql_alloc_dma_exit: 763 ql_free_dma(ha); 764 return -1; 765 } 766 767 #define Q8_MBX_MSEC_DELAY 5000 768 769 static int 770 qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, 771 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause) 772 { 773 uint32_t i; 774 uint32_t data; 775 int ret = 0; 776 777 if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) { 778 ret = -3; 779 ha->qla_initiate_recovery = 1; 780 goto exit_qla_mbx_cmd; 781 } 782 783 if (no_pause) 784 i = 1000; 785 else 786 i = Q8_MBX_MSEC_DELAY; 787 788 while (i) { 789 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL); 790 if (data == 0) 791 break; 792 if (no_pause) { 793 DELAY(1000); 794 } else { 795 qla_mdelay(__func__, 1); 796 } 797 i--; 798 } 799 800 if (i == 0) { 801 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n", 802 __func__, data); 803 ret = -1; 804 ha->qla_initiate_recovery = 1; 805 goto exit_qla_mbx_cmd; 806 } 807 808 for (i = 0; i < n_hmbox; i++) { 809 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox); 810 h_mbox++; 811 } 812 813 WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1); 814 815 816 i = Q8_MBX_MSEC_DELAY; 817 while (i) { 818 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL); 819 820 if ((data & 0x3) == 1) { 821 data = READ_REG32(ha, Q8_FW_MBOX0); 822 if ((data & 0xF000) != 0x8000) 823 break; 824 } 825 if (no_pause) { 826 DELAY(1000); 827 } else { 828 qla_mdelay(__func__, 1); 829 } 830 i--; 831 } 832 if (i == 0) { 833 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n", 834 __func__, data); 835 ret = -2; 836 ha->qla_initiate_recovery = 1; 837 goto exit_qla_mbx_cmd; 838 } 839 840 for (i = 0; i < n_fwmbox; i++) { 841 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2))); 842 } 843 844 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0); 845 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); 846 847 exit_qla_mbx_cmd: 848 return (ret); 849 } 850 851 int 852 qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb, 853 uint32_t *num_rcvq) 854 { 855 uint32_t *mbox, err; 856 device_t dev = ha->pci_dev; 857 858 bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX)); 859 860 mbox = ha->hw.mbox; 861 862 mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 863 864 if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) { 865 device_printf(dev, "%s: failed0\n", __func__); 866 return (-1); 867 } 868 err = mbox[0] >> 25; 869 870 if (supports_9kb != NULL) { 871 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */ 872 *supports_9kb = 1; 873 else 874 *supports_9kb = 0; 875 } 876 877 if (num_rcvq != NULL) 878 *num_rcvq = ((mbox[6] >> 16) & 0xFFFF); 879 880 if ((err != 1) && (err != 0)) { 881 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 882 return (-1); 883 } 884 return 0; 885 } 886 887 static int 888 qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, 889 uint32_t create) 890 { 891 uint32_t i, err; 892 device_t dev = ha->pci_dev; 893 q80_config_intr_t *c_intr; 894 q80_config_intr_rsp_t *c_intr_rsp; 895 896 c_intr = (q80_config_intr_t *)ha->hw.mbox; 897 bzero(c_intr, (sizeof (q80_config_intr_t))); 898 899 c_intr->opcode = Q8_MBX_CONFIG_INTR; 900 901 c_intr->count_version = (sizeof (q80_config_intr_t) >> 2); 902 c_intr->count_version |= Q8_MBX_CMD_VERSION; 903 904 c_intr->nentries = num_intrs; 905 906 for (i = 0; i < num_intrs; i++) { 907 if (create) { 908 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE; 909 c_intr->intr[i].msix_index = start_idx + 1 + i; 910 } else { 911 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE; 912 c_intr->intr[i].msix_index = 913 ha->hw.intr_id[(start_idx + i)]; 914 } 915 916 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X; 917 } 918 919 if (qla_mbx_cmd(ha, (uint32_t *)c_intr, 920 (sizeof (q80_config_intr_t) >> 2), 921 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) { 922 device_printf(dev, "%s: failed0\n", __func__); 923 return (-1); 924 } 925 926 c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox; 927 928 err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status); 929 930 if (err) { 931 device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err, 932 c_intr_rsp->nentries); 933 934 for (i = 0; i < c_intr_rsp->nentries; i++) { 935 device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n", 936 __func__, i, 937 c_intr_rsp->intr[i].status, 938 c_intr_rsp->intr[i].intr_id, 939 c_intr_rsp->intr[i].intr_src); 940 } 941 942 return (-1); 943 } 944 945 for (i = 0; ((i < num_intrs) && create); i++) { 946 if (!c_intr_rsp->intr[i].status) { 947 ha->hw.intr_id[(start_idx + i)] = 948 c_intr_rsp->intr[i].intr_id; 949 ha->hw.intr_src[(start_idx + i)] = 950 c_intr_rsp->intr[i].intr_src; 951 } 952 } 953 954 return (0); 955 } 956 957 /* 958 * Name: qla_config_rss 959 * Function: Configure RSS for the context/interface. 960 */ 961 static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 962 0x8030f20c77cb2da3ULL, 963 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 964 0x255b0ec26d5a56daULL }; 965 966 static int 967 qla_config_rss(qla_host_t *ha, uint16_t cntxt_id) 968 { 969 q80_config_rss_t *c_rss; 970 q80_config_rss_rsp_t *c_rss_rsp; 971 uint32_t err, i; 972 device_t dev = ha->pci_dev; 973 974 c_rss = (q80_config_rss_t *)ha->hw.mbox; 975 bzero(c_rss, (sizeof (q80_config_rss_t))); 976 977 c_rss->opcode = Q8_MBX_CONFIG_RSS; 978 979 c_rss->count_version = (sizeof (q80_config_rss_t) >> 2); 980 c_rss->count_version |= Q8_MBX_CMD_VERSION; 981 982 c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP | 983 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP); 984 //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP | 985 // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP); 986 987 c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS; 988 c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE; 989 990 c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK; 991 992 c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID; 993 c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS; 994 995 c_rss->cntxt_id = cntxt_id; 996 997 for (i = 0; i < 5; i++) { 998 c_rss->rss_key[i] = rss_key[i]; 999 } 1000 1001 if (qla_mbx_cmd(ha, (uint32_t *)c_rss, 1002 (sizeof (q80_config_rss_t) >> 2), 1003 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) { 1004 device_printf(dev, "%s: failed0\n", __func__); 1005 return (-1); 1006 } 1007 c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox; 1008 1009 err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status); 1010 1011 if (err) { 1012 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1013 return (-1); 1014 } 1015 return 0; 1016 } 1017 1018 static int 1019 qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count, 1020 uint16_t cntxt_id, uint8_t *ind_table) 1021 { 1022 q80_config_rss_ind_table_t *c_rss_ind; 1023 q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp; 1024 uint32_t err; 1025 device_t dev = ha->pci_dev; 1026 1027 if ((count > Q8_RSS_IND_TBL_SIZE) || 1028 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) { 1029 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__, 1030 start_idx, count); 1031 return (-1); 1032 } 1033 1034 c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox; 1035 bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t)); 1036 1037 c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE; 1038 c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2); 1039 c_rss_ind->count_version |= Q8_MBX_CMD_VERSION; 1040 1041 c_rss_ind->start_idx = start_idx; 1042 c_rss_ind->end_idx = start_idx + count - 1; 1043 c_rss_ind->cntxt_id = cntxt_id; 1044 bcopy(ind_table, c_rss_ind->ind_table, count); 1045 1046 if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind, 1047 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox, 1048 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) { 1049 device_printf(dev, "%s: failed0\n", __func__); 1050 return (-1); 1051 } 1052 1053 c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox; 1054 err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status); 1055 1056 if (err) { 1057 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1058 return (-1); 1059 } 1060 return 0; 1061 } 1062 1063 /* 1064 * Name: qla_config_intr_coalesce 1065 * Function: Configure Interrupt Coalescing. 1066 */ 1067 static int 1068 qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, 1069 int rcv) 1070 { 1071 q80_config_intr_coalesc_t *intrc; 1072 q80_config_intr_coalesc_rsp_t *intrc_rsp; 1073 uint32_t err, i; 1074 device_t dev = ha->pci_dev; 1075 1076 intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox; 1077 bzero(intrc, (sizeof (q80_config_intr_coalesc_t))); 1078 1079 intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE; 1080 intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2); 1081 intrc->count_version |= Q8_MBX_CMD_VERSION; 1082 1083 if (rcv) { 1084 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV; 1085 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF; 1086 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF; 1087 } else { 1088 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT; 1089 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF; 1090 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF; 1091 } 1092 1093 intrc->cntxt_id = cntxt_id; 1094 1095 if (tenable) { 1096 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC; 1097 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC; 1098 1099 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1100 intrc->sds_ring_mask |= (1 << i); 1101 } 1102 intrc->ms_timeout = 1000; 1103 } 1104 1105 if (qla_mbx_cmd(ha, (uint32_t *)intrc, 1106 (sizeof (q80_config_intr_coalesc_t) >> 2), 1107 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) { 1108 device_printf(dev, "%s: failed0\n", __func__); 1109 return (-1); 1110 } 1111 intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox; 1112 1113 err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status); 1114 1115 if (err) { 1116 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1117 return (-1); 1118 } 1119 1120 return 0; 1121 } 1122 1123 1124 /* 1125 * Name: qla_config_mac_addr 1126 * Function: binds a MAC address to the context/interface. 1127 * Can be unicast, multicast or broadcast. 1128 */ 1129 static int 1130 qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac, 1131 uint32_t num_mac) 1132 { 1133 q80_config_mac_addr_t *cmac; 1134 q80_config_mac_addr_rsp_t *cmac_rsp; 1135 uint32_t err; 1136 device_t dev = ha->pci_dev; 1137 int i; 1138 uint8_t *mac_cpy = mac_addr; 1139 1140 if (num_mac > Q8_MAX_MAC_ADDRS) { 1141 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n", 1142 __func__, (add_mac ? "Add" : "Del"), num_mac); 1143 return (-1); 1144 } 1145 1146 cmac = (q80_config_mac_addr_t *)ha->hw.mbox; 1147 bzero(cmac, (sizeof (q80_config_mac_addr_t))); 1148 1149 cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR; 1150 cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2; 1151 cmac->count_version |= Q8_MBX_CMD_VERSION; 1152 1153 if (add_mac) 1154 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR; 1155 else 1156 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR; 1157 1158 cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS; 1159 1160 cmac->nmac_entries = num_mac; 1161 cmac->cntxt_id = ha->hw.rcv_cntxt_id; 1162 1163 for (i = 0; i < num_mac; i++) { 1164 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 1165 mac_addr = mac_addr + ETHER_ADDR_LEN; 1166 } 1167 1168 if (qla_mbx_cmd(ha, (uint32_t *)cmac, 1169 (sizeof (q80_config_mac_addr_t) >> 2), 1170 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) { 1171 device_printf(dev, "%s: %s failed0\n", __func__, 1172 (add_mac ? "Add" : "Del")); 1173 return (-1); 1174 } 1175 cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox; 1176 1177 err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status); 1178 1179 if (err) { 1180 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__, 1181 (add_mac ? "Add" : "Del"), err); 1182 for (i = 0; i < num_mac; i++) { 1183 device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n", 1184 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2], 1185 mac_cpy[3], mac_cpy[4], mac_cpy[5]); 1186 mac_cpy += ETHER_ADDR_LEN; 1187 } 1188 return (-1); 1189 } 1190 1191 return 0; 1192 } 1193 1194 1195 /* 1196 * Name: qla_set_mac_rcv_mode 1197 * Function: Enable/Disable AllMulticast and Promiscous Modes. 1198 */ 1199 static int 1200 qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode) 1201 { 1202 q80_config_mac_rcv_mode_t *rcv_mode; 1203 uint32_t err; 1204 q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp; 1205 device_t dev = ha->pci_dev; 1206 1207 rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox; 1208 bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t))); 1209 1210 rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE; 1211 rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2; 1212 rcv_mode->count_version |= Q8_MBX_CMD_VERSION; 1213 1214 rcv_mode->mode = mode; 1215 1216 rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id; 1217 1218 if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode, 1219 (sizeof (q80_config_mac_rcv_mode_t) >> 2), 1220 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) { 1221 device_printf(dev, "%s: failed0\n", __func__); 1222 return (-1); 1223 } 1224 rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox; 1225 1226 err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status); 1227 1228 if (err) { 1229 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1230 return (-1); 1231 } 1232 1233 return 0; 1234 } 1235 1236 int 1237 ql_set_promisc(qla_host_t *ha) 1238 { 1239 int ret; 1240 1241 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE; 1242 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1243 return (ret); 1244 } 1245 1246 void 1247 qla_reset_promisc(qla_host_t *ha) 1248 { 1249 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE; 1250 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1251 } 1252 1253 int 1254 ql_set_allmulti(qla_host_t *ha) 1255 { 1256 int ret; 1257 1258 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE; 1259 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1260 return (ret); 1261 } 1262 1263 void 1264 qla_reset_allmulti(qla_host_t *ha) 1265 { 1266 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE; 1267 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1268 } 1269 1270 /* 1271 * Name: ql_set_max_mtu 1272 * Function: 1273 * Sets the maximum transfer unit size for the specified rcv context. 1274 */ 1275 int 1276 ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) 1277 { 1278 device_t dev; 1279 q80_set_max_mtu_t *max_mtu; 1280 q80_set_max_mtu_rsp_t *max_mtu_rsp; 1281 uint32_t err; 1282 1283 dev = ha->pci_dev; 1284 1285 max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox; 1286 bzero(max_mtu, (sizeof (q80_set_max_mtu_t))); 1287 1288 max_mtu->opcode = Q8_MBX_SET_MAX_MTU; 1289 max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2); 1290 max_mtu->count_version |= Q8_MBX_CMD_VERSION; 1291 1292 max_mtu->cntxt_id = cntxt_id; 1293 max_mtu->mtu = mtu; 1294 1295 if (qla_mbx_cmd(ha, (uint32_t *)max_mtu, 1296 (sizeof (q80_set_max_mtu_t) >> 2), 1297 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) { 1298 device_printf(dev, "%s: failed\n", __func__); 1299 return -1; 1300 } 1301 1302 max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox; 1303 1304 err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status); 1305 1306 if (err) { 1307 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1308 } 1309 1310 return 0; 1311 } 1312 1313 static int 1314 qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id) 1315 { 1316 device_t dev; 1317 q80_link_event_t *lnk; 1318 q80_link_event_rsp_t *lnk_rsp; 1319 uint32_t err; 1320 1321 dev = ha->pci_dev; 1322 1323 lnk = (q80_link_event_t *)ha->hw.mbox; 1324 bzero(lnk, (sizeof (q80_link_event_t))); 1325 1326 lnk->opcode = Q8_MBX_LINK_EVENT_REQ; 1327 lnk->count_version = (sizeof (q80_link_event_t) >> 2); 1328 lnk->count_version |= Q8_MBX_CMD_VERSION; 1329 1330 lnk->cntxt_id = cntxt_id; 1331 lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC; 1332 1333 if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2), 1334 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) { 1335 device_printf(dev, "%s: failed\n", __func__); 1336 return -1; 1337 } 1338 1339 lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox; 1340 1341 err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status); 1342 1343 if (err) { 1344 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1345 } 1346 1347 return 0; 1348 } 1349 1350 static int 1351 qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id) 1352 { 1353 device_t dev; 1354 q80_config_fw_lro_t *fw_lro; 1355 q80_config_fw_lro_rsp_t *fw_lro_rsp; 1356 uint32_t err; 1357 1358 dev = ha->pci_dev; 1359 1360 fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox; 1361 bzero(fw_lro, sizeof(q80_config_fw_lro_t)); 1362 1363 fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO; 1364 fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2); 1365 fw_lro->count_version |= Q8_MBX_CMD_VERSION; 1366 1367 fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK; 1368 fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK; 1369 1370 fw_lro->cntxt_id = cntxt_id; 1371 1372 if (qla_mbx_cmd(ha, (uint32_t *)fw_lro, 1373 (sizeof (q80_config_fw_lro_t) >> 2), 1374 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) { 1375 device_printf(dev, "%s: failed\n", __func__); 1376 return -1; 1377 } 1378 1379 fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox; 1380 1381 err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status); 1382 1383 if (err) { 1384 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1385 } 1386 1387 return 0; 1388 } 1389 1390 static int 1391 qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode) 1392 { 1393 device_t dev; 1394 q80_hw_config_t *hw_config; 1395 q80_hw_config_rsp_t *hw_config_rsp; 1396 uint32_t err; 1397 1398 dev = ha->pci_dev; 1399 1400 hw_config = (q80_hw_config_t *)ha->hw.mbox; 1401 bzero(hw_config, sizeof (q80_hw_config_t)); 1402 1403 hw_config->opcode = Q8_MBX_HW_CONFIG; 1404 hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT; 1405 hw_config->count_version |= Q8_MBX_CMD_VERSION; 1406 1407 hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE; 1408 1409 hw_config->u.set_cam_search_mode.mode = search_mode; 1410 1411 if (qla_mbx_cmd(ha, (uint32_t *)hw_config, 1412 (sizeof (q80_hw_config_t) >> 2), 1413 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { 1414 device_printf(dev, "%s: failed\n", __func__); 1415 return -1; 1416 } 1417 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; 1418 1419 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); 1420 1421 if (err) { 1422 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1423 } 1424 1425 return 0; 1426 } 1427 1428 static int 1429 qla_get_cam_search_mode(qla_host_t *ha) 1430 { 1431 device_t dev; 1432 q80_hw_config_t *hw_config; 1433 q80_hw_config_rsp_t *hw_config_rsp; 1434 uint32_t err; 1435 1436 dev = ha->pci_dev; 1437 1438 hw_config = (q80_hw_config_t *)ha->hw.mbox; 1439 bzero(hw_config, sizeof (q80_hw_config_t)); 1440 1441 hw_config->opcode = Q8_MBX_HW_CONFIG; 1442 hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT; 1443 hw_config->count_version |= Q8_MBX_CMD_VERSION; 1444 1445 hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE; 1446 1447 if (qla_mbx_cmd(ha, (uint32_t *)hw_config, 1448 (sizeof (q80_hw_config_t) >> 2), 1449 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { 1450 device_printf(dev, "%s: failed\n", __func__); 1451 return -1; 1452 } 1453 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; 1454 1455 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); 1456 1457 if (err) { 1458 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1459 } else { 1460 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__, 1461 hw_config_rsp->u.get_cam_search_mode.mode); 1462 } 1463 1464 return 0; 1465 } 1466 1467 1468 1469 static void 1470 qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i) 1471 { 1472 device_t dev = ha->pci_dev; 1473 1474 if (i < ha->hw.num_tx_rings) { 1475 device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n", 1476 __func__, i, xstat->total_bytes); 1477 device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n", 1478 __func__, i, xstat->total_pkts); 1479 device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n", 1480 __func__, i, xstat->errors); 1481 device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n", 1482 __func__, i, xstat->pkts_dropped); 1483 device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n", 1484 __func__, i, xstat->switch_pkts); 1485 device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n", 1486 __func__, i, xstat->num_buffers); 1487 } else { 1488 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", 1489 __func__, xstat->total_bytes); 1490 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", 1491 __func__, xstat->total_pkts); 1492 device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n", 1493 __func__, xstat->errors); 1494 device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n", 1495 __func__, xstat->pkts_dropped); 1496 device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n", 1497 __func__, xstat->switch_pkts); 1498 device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n", 1499 __func__, xstat->num_buffers); 1500 } 1501 } 1502 1503 static void 1504 qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat) 1505 { 1506 device_t dev = ha->pci_dev; 1507 1508 device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__, 1509 rstat->total_bytes); 1510 device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__, 1511 rstat->total_pkts); 1512 device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__, 1513 rstat->lro_pkt_count); 1514 device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__, 1515 rstat->sw_pkt_count); 1516 device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__, 1517 rstat->ip_chksum_err); 1518 device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__, 1519 rstat->pkts_wo_acntxts); 1520 device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n", 1521 __func__, rstat->pkts_dropped_no_sds_card); 1522 device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n", 1523 __func__, rstat->pkts_dropped_no_sds_host); 1524 device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__, 1525 rstat->oversized_pkts); 1526 device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n", 1527 __func__, rstat->pkts_dropped_no_rds); 1528 device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n", 1529 __func__, rstat->unxpctd_mcast_pkts); 1530 device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__, 1531 rstat->re1_fbq_error); 1532 device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__, 1533 rstat->invalid_mac_addr); 1534 device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__, 1535 rstat->rds_prime_trys); 1536 device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__, 1537 rstat->rds_prime_success); 1538 device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__, 1539 rstat->lro_flows_added); 1540 device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__, 1541 rstat->lro_flows_deleted); 1542 device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__, 1543 rstat->lro_flows_active); 1544 device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n", 1545 __func__, rstat->pkts_droped_unknown); 1546 } 1547 1548 static void 1549 qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat) 1550 { 1551 device_t dev = ha->pci_dev; 1552 1553 device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__, 1554 mstat->xmt_frames); 1555 device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__, 1556 mstat->xmt_bytes); 1557 device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__, 1558 mstat->xmt_mcast_pkts); 1559 device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__, 1560 mstat->xmt_bcast_pkts); 1561 device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__, 1562 mstat->xmt_pause_frames); 1563 device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__, 1564 mstat->xmt_cntrl_pkts); 1565 device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n", 1566 __func__, mstat->xmt_pkt_lt_64bytes); 1567 device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n", 1568 __func__, mstat->xmt_pkt_lt_127bytes); 1569 device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n", 1570 __func__, mstat->xmt_pkt_lt_255bytes); 1571 device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n", 1572 __func__, mstat->xmt_pkt_lt_511bytes); 1573 device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n", 1574 __func__, mstat->xmt_pkt_lt_1023bytes); 1575 device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n", 1576 __func__, mstat->xmt_pkt_lt_1518bytes); 1577 device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n", 1578 __func__, mstat->xmt_pkt_gt_1518bytes); 1579 1580 device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__, 1581 mstat->rcv_frames); 1582 device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__, 1583 mstat->rcv_bytes); 1584 device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__, 1585 mstat->rcv_mcast_pkts); 1586 device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__, 1587 mstat->rcv_bcast_pkts); 1588 device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__, 1589 mstat->rcv_pause_frames); 1590 device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__, 1591 mstat->rcv_cntrl_pkts); 1592 device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n", 1593 __func__, mstat->rcv_pkt_lt_64bytes); 1594 device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n", 1595 __func__, mstat->rcv_pkt_lt_127bytes); 1596 device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n", 1597 __func__, mstat->rcv_pkt_lt_255bytes); 1598 device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n", 1599 __func__, mstat->rcv_pkt_lt_511bytes); 1600 device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n", 1601 __func__, mstat->rcv_pkt_lt_1023bytes); 1602 device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n", 1603 __func__, mstat->rcv_pkt_lt_1518bytes); 1604 device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n", 1605 __func__, mstat->rcv_pkt_gt_1518bytes); 1606 1607 device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__, 1608 mstat->rcv_len_error); 1609 device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__, 1610 mstat->rcv_len_small); 1611 device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__, 1612 mstat->rcv_len_large); 1613 device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__, 1614 mstat->rcv_jabber); 1615 device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__, 1616 mstat->rcv_dropped); 1617 device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__, 1618 mstat->fcs_error); 1619 device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__, 1620 mstat->align_error); 1621 } 1622 1623 1624 static int 1625 qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size) 1626 { 1627 device_t dev; 1628 q80_get_stats_t *stat; 1629 q80_get_stats_rsp_t *stat_rsp; 1630 uint32_t err; 1631 1632 dev = ha->pci_dev; 1633 1634 stat = (q80_get_stats_t *)ha->hw.mbox; 1635 bzero(stat, (sizeof (q80_get_stats_t))); 1636 1637 stat->opcode = Q8_MBX_GET_STATS; 1638 stat->count_version = 2; 1639 stat->count_version |= Q8_MBX_CMD_VERSION; 1640 1641 stat->cmd = cmd; 1642 1643 if (qla_mbx_cmd(ha, (uint32_t *)stat, 2, 1644 ha->hw.mbox, (rsp_size >> 2), 0)) { 1645 device_printf(dev, "%s: failed\n", __func__); 1646 return -1; 1647 } 1648 1649 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; 1650 1651 err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status); 1652 1653 if (err) { 1654 return -1; 1655 } 1656 1657 return 0; 1658 } 1659 1660 void 1661 ql_get_stats(qla_host_t *ha) 1662 { 1663 q80_get_stats_rsp_t *stat_rsp; 1664 q80_mac_stats_t *mstat; 1665 q80_xmt_stats_t *xstat; 1666 q80_rcv_stats_t *rstat; 1667 uint32_t cmd; 1668 int i; 1669 1670 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; 1671 /* 1672 * Get MAC Statistics 1673 */ 1674 cmd = Q8_GET_STATS_CMD_TYPE_MAC; 1675 // cmd |= Q8_GET_STATS_CMD_CLEAR; 1676 1677 cmd |= ((ha->pci_func & 0x1) << 16); 1678 1679 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { 1680 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac; 1681 qla_mac_stats(ha, mstat); 1682 } else { 1683 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n", 1684 __func__, ha->hw.mbox[0]); 1685 } 1686 /* 1687 * Get RCV Statistics 1688 */ 1689 cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT; 1690 // cmd |= Q8_GET_STATS_CMD_CLEAR; 1691 cmd |= (ha->hw.rcv_cntxt_id << 16); 1692 1693 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { 1694 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv; 1695 qla_rcv_stats(ha, rstat); 1696 } else { 1697 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n", 1698 __func__, ha->hw.mbox[0]); 1699 } 1700 /* 1701 * Get XMT Statistics 1702 */ 1703 for (i = 0 ; i < ha->hw.num_tx_rings; i++) { 1704 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT; 1705 // cmd |= Q8_GET_STATS_CMD_CLEAR; 1706 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16); 1707 1708 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t)) 1709 == 0) { 1710 xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt; 1711 qla_xmt_stats(ha, xstat, i); 1712 } else { 1713 device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n", 1714 __func__, ha->hw.mbox[0]); 1715 } 1716 } 1717 return; 1718 } 1719 1720 static void 1721 qla_get_quick_stats(qla_host_t *ha) 1722 { 1723 q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp; 1724 q80_mac_stats_t *mstat; 1725 q80_xmt_stats_t *xstat; 1726 q80_rcv_stats_t *rstat; 1727 uint32_t cmd; 1728 1729 stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox; 1730 1731 cmd = Q8_GET_STATS_CMD_TYPE_ALL; 1732 // cmd |= Q8_GET_STATS_CMD_CLEAR; 1733 1734 // cmd |= ((ha->pci_func & 0x3) << 16); 1735 cmd |= (0xFFFF << 16); 1736 1737 if (qla_get_hw_stats(ha, cmd, 1738 sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) { 1739 1740 mstat = (q80_mac_stats_t *)&stat_rsp->mac; 1741 rstat = (q80_rcv_stats_t *)&stat_rsp->rcv; 1742 xstat = (q80_xmt_stats_t *)&stat_rsp->xmt; 1743 qla_mac_stats(ha, mstat); 1744 qla_rcv_stats(ha, rstat); 1745 qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings); 1746 } else { 1747 device_printf(ha->pci_dev, "%s: failed [0x%08x]\n", 1748 __func__, ha->hw.mbox[0]); 1749 } 1750 return; 1751 } 1752 1753 /* 1754 * Name: qla_tx_tso 1755 * Function: Checks if the packet to be transmitted is a candidate for 1756 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx 1757 * Ring Structure are plugged in. 1758 */ 1759 static int 1760 qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) 1761 { 1762 struct ether_vlan_header *eh; 1763 struct ip *ip = NULL; 1764 struct ip6_hdr *ip6 = NULL; 1765 struct tcphdr *th = NULL; 1766 uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off; 1767 uint16_t etype, opcode, offload = 1; 1768 device_t dev; 1769 1770 dev = ha->pci_dev; 1771 1772 1773 eh = mtod(mp, struct ether_vlan_header *); 1774 1775 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1776 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1777 etype = ntohs(eh->evl_proto); 1778 } else { 1779 ehdrlen = ETHER_HDR_LEN; 1780 etype = ntohs(eh->evl_encap_proto); 1781 } 1782 1783 hdrlen = 0; 1784 1785 switch (etype) { 1786 case ETHERTYPE_IP: 1787 1788 tcp_opt_off = ehdrlen + sizeof(struct ip) + 1789 sizeof(struct tcphdr); 1790 1791 if (mp->m_len < tcp_opt_off) { 1792 m_copydata(mp, 0, tcp_opt_off, hdr); 1793 ip = (struct ip *)(hdr + ehdrlen); 1794 } else { 1795 ip = (struct ip *)(mp->m_data + ehdrlen); 1796 } 1797 1798 ip_hlen = ip->ip_hl << 2; 1799 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; 1800 1801 1802 if ((ip->ip_p != IPPROTO_TCP) || 1803 (ip_hlen != sizeof (struct ip))){ 1804 /* IP Options are not supported */ 1805 1806 offload = 0; 1807 } else 1808 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 1809 1810 break; 1811 1812 case ETHERTYPE_IPV6: 1813 1814 tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) + 1815 sizeof (struct tcphdr); 1816 1817 if (mp->m_len < tcp_opt_off) { 1818 m_copydata(mp, 0, tcp_opt_off, hdr); 1819 ip6 = (struct ip6_hdr *)(hdr + ehdrlen); 1820 } else { 1821 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1822 } 1823 1824 ip_hlen = sizeof(struct ip6_hdr); 1825 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6; 1826 1827 if (ip6->ip6_nxt != IPPROTO_TCP) { 1828 //device_printf(dev, "%s: ipv6\n", __func__); 1829 offload = 0; 1830 } else 1831 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); 1832 break; 1833 1834 default: 1835 QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__)); 1836 offload = 0; 1837 break; 1838 } 1839 1840 if (!offload) 1841 return (-1); 1842 1843 tcp_hlen = th->th_off << 2; 1844 hdrlen = ehdrlen + ip_hlen + tcp_hlen; 1845 1846 if (mp->m_len < hdrlen) { 1847 if (mp->m_len < tcp_opt_off) { 1848 if (tcp_hlen > sizeof(struct tcphdr)) { 1849 m_copydata(mp, tcp_opt_off, 1850 (tcp_hlen - sizeof(struct tcphdr)), 1851 &hdr[tcp_opt_off]); 1852 } 1853 } else { 1854 m_copydata(mp, 0, hdrlen, hdr); 1855 } 1856 } 1857 1858 tx_cmd->mss = mp->m_pkthdr.tso_segsz; 1859 1860 tx_cmd->flags_opcode = opcode ; 1861 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 1862 tx_cmd->total_hdr_len = hdrlen; 1863 1864 /* Check for Multicast least significant bit of MSB == 1 */ 1865 if (eh->evl_dhost[0] & 0x01) { 1866 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST; 1867 } 1868 1869 if (mp->m_len < hdrlen) { 1870 printf("%d\n", hdrlen); 1871 return (1); 1872 } 1873 1874 return (0); 1875 } 1876 1877 /* 1878 * Name: qla_tx_chksum 1879 * Function: Checks if the packet to be transmitted is a candidate for 1880 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx 1881 * Ring Structure are plugged in. 1882 */ 1883 static int 1884 qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code, 1885 uint32_t *tcp_hdr_off) 1886 { 1887 struct ether_vlan_header *eh; 1888 struct ip *ip; 1889 struct ip6_hdr *ip6; 1890 uint32_t ehdrlen, ip_hlen; 1891 uint16_t etype, opcode, offload = 1; 1892 device_t dev; 1893 uint8_t buf[sizeof(struct ip6_hdr)]; 1894 1895 dev = ha->pci_dev; 1896 1897 *op_code = 0; 1898 1899 if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0) 1900 return (-1); 1901 1902 eh = mtod(mp, struct ether_vlan_header *); 1903 1904 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1905 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1906 etype = ntohs(eh->evl_proto); 1907 } else { 1908 ehdrlen = ETHER_HDR_LEN; 1909 etype = ntohs(eh->evl_encap_proto); 1910 } 1911 1912 1913 switch (etype) { 1914 case ETHERTYPE_IP: 1915 ip = (struct ip *)(mp->m_data + ehdrlen); 1916 1917 ip_hlen = sizeof (struct ip); 1918 1919 if (mp->m_len < (ehdrlen + ip_hlen)) { 1920 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 1921 ip = (struct ip *)buf; 1922 } 1923 1924 if (ip->ip_p == IPPROTO_TCP) 1925 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; 1926 else if (ip->ip_p == IPPROTO_UDP) 1927 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; 1928 else { 1929 //device_printf(dev, "%s: ipv4\n", __func__); 1930 offload = 0; 1931 } 1932 break; 1933 1934 case ETHERTYPE_IPV6: 1935 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1936 1937 ip_hlen = sizeof(struct ip6_hdr); 1938 1939 if (mp->m_len < (ehdrlen + ip_hlen)) { 1940 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 1941 buf); 1942 ip6 = (struct ip6_hdr *)buf; 1943 } 1944 1945 if (ip6->ip6_nxt == IPPROTO_TCP) 1946 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; 1947 else if (ip6->ip6_nxt == IPPROTO_UDP) 1948 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; 1949 else { 1950 //device_printf(dev, "%s: ipv6\n", __func__); 1951 offload = 0; 1952 } 1953 break; 1954 1955 default: 1956 offload = 0; 1957 break; 1958 } 1959 if (!offload) 1960 return (-1); 1961 1962 *op_code = opcode; 1963 *tcp_hdr_off = (ip_hlen + ehdrlen); 1964 1965 return (0); 1966 } 1967 1968 #define QLA_TX_MIN_FREE 2 1969 /* 1970 * Name: ql_hw_send 1971 * Function: Transmits a packet. It first checks if the packet is a 1972 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum 1973 * offload. If either of these creteria are not met, it is transmitted 1974 * as a regular ethernet frame. 1975 */ 1976 int 1977 ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, 1978 uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu) 1979 { 1980 struct ether_vlan_header *eh; 1981 qla_hw_t *hw = &ha->hw; 1982 q80_tx_cmd_t *tx_cmd, tso_cmd; 1983 bus_dma_segment_t *c_seg; 1984 uint32_t num_tx_cmds, hdr_len = 0; 1985 uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next; 1986 device_t dev; 1987 int i, ret; 1988 uint8_t *src = NULL, *dst = NULL; 1989 uint8_t frame_hdr[QL_FRAME_HDR_SIZE]; 1990 uint32_t op_code = 0; 1991 uint32_t tcp_hdr_off = 0; 1992 1993 dev = ha->pci_dev; 1994 1995 /* 1996 * Always make sure there is atleast one empty slot in the tx_ring 1997 * tx_ring is considered full when there only one entry available 1998 */ 1999 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; 2000 2001 total_length = mp->m_pkthdr.len; 2002 if (total_length > QLA_MAX_TSO_FRAME_SIZE) { 2003 device_printf(dev, "%s: total length exceeds maxlen(%d)\n", 2004 __func__, total_length); 2005 return (-1); 2006 } 2007 eh = mtod(mp, struct ether_vlan_header *); 2008 2009 if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 2010 2011 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); 2012 2013 src = frame_hdr; 2014 ret = qla_tx_tso(ha, mp, &tso_cmd, src); 2015 2016 if (!(ret & ~1)) { 2017 /* find the additional tx_cmd descriptors required */ 2018 2019 if (mp->m_flags & M_VLANTAG) 2020 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN; 2021 2022 hdr_len = tso_cmd.total_hdr_len; 2023 2024 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 2025 bytes = QL_MIN(bytes, hdr_len); 2026 2027 num_tx_cmds++; 2028 hdr_len -= bytes; 2029 2030 while (hdr_len) { 2031 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 2032 hdr_len -= bytes; 2033 num_tx_cmds++; 2034 } 2035 hdr_len = tso_cmd.total_hdr_len; 2036 2037 if (ret == 0) 2038 src = (uint8_t *)eh; 2039 } else 2040 return (EINVAL); 2041 } else { 2042 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off); 2043 } 2044 2045 if (iscsi_pdu) 2046 ha->hw.iscsi_pkt_count++; 2047 2048 if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 2049 ql_hw_tx_done_locked(ha, txr_idx); 2050 if (hw->tx_cntxt[txr_idx].txr_free <= 2051 (num_tx_cmds + QLA_TX_MIN_FREE)) { 2052 QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= " 2053 "(num_tx_cmds + QLA_TX_MIN_FREE))\n", 2054 __func__)); 2055 return (-1); 2056 } 2057 } 2058 2059 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx]; 2060 2061 if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) { 2062 2063 if (nsegs > ha->hw.max_tx_segs) 2064 ha->hw.max_tx_segs = nsegs; 2065 2066 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2067 2068 if (op_code) { 2069 tx_cmd->flags_opcode = op_code; 2070 tx_cmd->tcp_hdr_off = tcp_hdr_off; 2071 2072 } else { 2073 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; 2074 } 2075 } else { 2076 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); 2077 ha->tx_tso_frames++; 2078 } 2079 2080 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2081 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; 2082 2083 if (iscsi_pdu) 2084 eh->evl_tag |= ha->hw.user_pri_iscsi << 13; 2085 2086 } else if (mp->m_flags & M_VLANTAG) { 2087 2088 if (hdr_len) { /* TSO */ 2089 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | 2090 Q8_TX_CMD_FLAGS_HW_VLAN_ID); 2091 tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN; 2092 } else 2093 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID; 2094 2095 ha->hw_vlan_tx_frames++; 2096 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; 2097 2098 if (iscsi_pdu) { 2099 tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13; 2100 mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci; 2101 } 2102 } 2103 2104 2105 tx_cmd->n_bufs = (uint8_t)nsegs; 2106 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); 2107 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); 2108 tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); 2109 2110 c_seg = segs; 2111 2112 while (1) { 2113 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { 2114 2115 switch (i) { 2116 case 0: 2117 tx_cmd->buf1_addr = c_seg->ds_addr; 2118 tx_cmd->buf1_len = c_seg->ds_len; 2119 break; 2120 2121 case 1: 2122 tx_cmd->buf2_addr = c_seg->ds_addr; 2123 tx_cmd->buf2_len = c_seg->ds_len; 2124 break; 2125 2126 case 2: 2127 tx_cmd->buf3_addr = c_seg->ds_addr; 2128 tx_cmd->buf3_len = c_seg->ds_len; 2129 break; 2130 2131 case 3: 2132 tx_cmd->buf4_addr = c_seg->ds_addr; 2133 tx_cmd->buf4_len = c_seg->ds_len; 2134 break; 2135 } 2136 2137 c_seg++; 2138 nsegs--; 2139 } 2140 2141 txr_next = hw->tx_cntxt[txr_idx].txr_next = 2142 (hw->tx_cntxt[txr_idx].txr_next + 1) & 2143 (NUM_TX_DESCRIPTORS - 1); 2144 tx_cmd_count++; 2145 2146 if (!nsegs) 2147 break; 2148 2149 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2150 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2151 } 2152 2153 if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 2154 2155 /* TSO : Copy the header in the following tx cmd descriptors */ 2156 2157 txr_next = hw->tx_cntxt[txr_idx].txr_next; 2158 2159 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2160 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2161 2162 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 2163 bytes = QL_MIN(bytes, hdr_len); 2164 2165 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; 2166 2167 if (mp->m_flags & M_VLANTAG) { 2168 /* first copy the src/dst MAC addresses */ 2169 bcopy(src, dst, (ETHER_ADDR_LEN * 2)); 2170 dst += (ETHER_ADDR_LEN * 2); 2171 src += (ETHER_ADDR_LEN * 2); 2172 2173 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); 2174 dst += 2; 2175 *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag); 2176 dst += 2; 2177 2178 /* bytes left in src header */ 2179 hdr_len -= ((ETHER_ADDR_LEN * 2) + 2180 ETHER_VLAN_ENCAP_LEN); 2181 2182 /* bytes left in TxCmd Entry */ 2183 bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); 2184 2185 2186 bcopy(src, dst, bytes); 2187 src += bytes; 2188 hdr_len -= bytes; 2189 } else { 2190 bcopy(src, dst, bytes); 2191 src += bytes; 2192 hdr_len -= bytes; 2193 } 2194 2195 txr_next = hw->tx_cntxt[txr_idx].txr_next = 2196 (hw->tx_cntxt[txr_idx].txr_next + 1) & 2197 (NUM_TX_DESCRIPTORS - 1); 2198 tx_cmd_count++; 2199 2200 while (hdr_len) { 2201 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2202 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2203 2204 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 2205 2206 bcopy(src, tx_cmd, bytes); 2207 src += bytes; 2208 hdr_len -= bytes; 2209 2210 txr_next = hw->tx_cntxt[txr_idx].txr_next = 2211 (hw->tx_cntxt[txr_idx].txr_next + 1) & 2212 (NUM_TX_DESCRIPTORS - 1); 2213 tx_cmd_count++; 2214 } 2215 } 2216 2217 hw->tx_cntxt[txr_idx].txr_free = 2218 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count; 2219 2220 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\ 2221 txr_idx); 2222 QL_DPRINT8(ha, (dev, "%s: return\n", __func__)); 2223 2224 return (0); 2225 } 2226 2227 2228 2229 #define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */ 2230 static int 2231 qla_config_rss_ind_table(qla_host_t *ha) 2232 { 2233 uint32_t i, count; 2234 uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE]; 2235 2236 2237 for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) { 2238 rss_ind_tbl[i] = i % ha->hw.num_sds_rings; 2239 } 2240 2241 for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; 2242 i = i + Q8_CONFIG_IND_TBL_SIZE) { 2243 2244 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) { 2245 count = Q8_RSS_IND_TBL_MAX_IDX - i + 1; 2246 } else { 2247 count = Q8_CONFIG_IND_TBL_SIZE; 2248 } 2249 2250 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id, 2251 rss_ind_tbl)) 2252 return (-1); 2253 } 2254 2255 return (0); 2256 } 2257 2258 /* 2259 * Name: ql_del_hw_if 2260 * Function: Destroys the hardware specific entities corresponding to an 2261 * Ethernet Interface 2262 */ 2263 void 2264 ql_del_hw_if(qla_host_t *ha) 2265 { 2266 uint32_t i; 2267 uint32_t num_msix; 2268 2269 (void)qla_stop_nic_func(ha); 2270 2271 qla_del_rcv_cntxt(ha); 2272 2273 qla_del_xmt_cntxt(ha); 2274 2275 if (ha->hw.flags.init_intr_cnxt) { 2276 for (i = 0; i < ha->hw.num_sds_rings; ) { 2277 2278 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) 2279 num_msix = Q8_MAX_INTR_VECTORS; 2280 else 2281 num_msix = ha->hw.num_sds_rings - i; 2282 qla_config_intr_cntxt(ha, i, num_msix, 0); 2283 2284 i += num_msix; 2285 } 2286 2287 ha->hw.flags.init_intr_cnxt = 0; 2288 } 2289 2290 return; 2291 } 2292 2293 void 2294 qla_confirm_9kb_enable(qla_host_t *ha) 2295 { 2296 uint32_t supports_9kb = 0; 2297 2298 ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX); 2299 2300 /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */ 2301 WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2); 2302 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); 2303 2304 qla_get_nic_partition(ha, &supports_9kb, NULL); 2305 2306 if (!supports_9kb) 2307 ha->hw.enable_9kb = 0; 2308 2309 return; 2310 } 2311 2312 2313 /* 2314 * Name: ql_init_hw_if 2315 * Function: Creates the hardware specific entities corresponding to an 2316 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address 2317 * corresponding to the interface. Enables LRO if allowed. 2318 */ 2319 int 2320 ql_init_hw_if(qla_host_t *ha) 2321 { 2322 device_t dev; 2323 uint32_t i; 2324 uint8_t bcast_mac[6]; 2325 qla_rdesc_t *rdesc; 2326 uint32_t num_msix; 2327 2328 dev = ha->pci_dev; 2329 2330 for (i = 0; i < ha->hw.num_sds_rings; i++) { 2331 bzero(ha->hw.dma_buf.sds_ring[i].dma_b, 2332 ha->hw.dma_buf.sds_ring[i].size); 2333 } 2334 2335 for (i = 0; i < ha->hw.num_sds_rings; ) { 2336 2337 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) 2338 num_msix = Q8_MAX_INTR_VECTORS; 2339 else 2340 num_msix = ha->hw.num_sds_rings - i; 2341 2342 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) { 2343 2344 if (i > 0) { 2345 2346 num_msix = i; 2347 2348 for (i = 0; i < num_msix; ) { 2349 qla_config_intr_cntxt(ha, i, 2350 Q8_MAX_INTR_VECTORS, 0); 2351 i += Q8_MAX_INTR_VECTORS; 2352 } 2353 } 2354 return (-1); 2355 } 2356 2357 i = i + num_msix; 2358 } 2359 2360 ha->hw.flags.init_intr_cnxt = 1; 2361 2362 /* 2363 * Create Receive Context 2364 */ 2365 if (qla_init_rcv_cntxt(ha)) { 2366 return (-1); 2367 } 2368 2369 for (i = 0; i < ha->hw.num_rds_rings; i++) { 2370 rdesc = &ha->hw.rds[i]; 2371 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2; 2372 rdesc->rx_in = 0; 2373 /* Update the RDS Producer Indices */ 2374 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\ 2375 rdesc->rx_next); 2376 } 2377 2378 2379 /* 2380 * Create Transmit Context 2381 */ 2382 if (qla_init_xmt_cntxt(ha)) { 2383 qla_del_rcv_cntxt(ha); 2384 return (-1); 2385 } 2386 ha->hw.max_tx_segs = 0; 2387 2388 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1)) 2389 return(-1); 2390 2391 ha->hw.flags.unicast_mac = 1; 2392 2393 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 2394 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 2395 2396 if (qla_config_mac_addr(ha, bcast_mac, 1, 1)) 2397 return (-1); 2398 2399 ha->hw.flags.bcast_mac = 1; 2400 2401 /* 2402 * program any cached multicast addresses 2403 */ 2404 if (qla_hw_add_all_mcast(ha)) 2405 return (-1); 2406 2407 if (qla_config_rss(ha, ha->hw.rcv_cntxt_id)) 2408 return (-1); 2409 2410 if (qla_config_rss_ind_table(ha)) 2411 return (-1); 2412 2413 if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1)) 2414 return (-1); 2415 2416 if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id)) 2417 return (-1); 2418 2419 if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id)) 2420 return (-1); 2421 2422 if (qla_init_nic_func(ha)) 2423 return (-1); 2424 2425 if (qla_query_fw_dcbx_caps(ha)) 2426 return (-1); 2427 2428 for (i = 0; i < ha->hw.num_sds_rings; i++) 2429 QL_ENABLE_INTERRUPTS(ha, i); 2430 2431 return (0); 2432 } 2433 2434 static int 2435 qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx) 2436 { 2437 device_t dev = ha->pci_dev; 2438 q80_rq_map_sds_to_rds_t *map_rings; 2439 q80_rsp_map_sds_to_rds_t *map_rings_rsp; 2440 uint32_t i, err; 2441 qla_hw_t *hw = &ha->hw; 2442 2443 map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox; 2444 bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t)); 2445 2446 map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS; 2447 map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2); 2448 map_rings->count_version |= Q8_MBX_CMD_VERSION; 2449 2450 map_rings->cntxt_id = hw->rcv_cntxt_id; 2451 map_rings->num_rings = num_idx; 2452 2453 for (i = 0; i < num_idx; i++) { 2454 map_rings->sds_rds[i].sds_ring = i + start_idx; 2455 map_rings->sds_rds[i].rds_ring = i + start_idx; 2456 } 2457 2458 if (qla_mbx_cmd(ha, (uint32_t *)map_rings, 2459 (sizeof (q80_rq_map_sds_to_rds_t) >> 2), 2460 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { 2461 device_printf(dev, "%s: failed0\n", __func__); 2462 return (-1); 2463 } 2464 2465 map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox; 2466 2467 err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status); 2468 2469 if (err) { 2470 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2471 return (-1); 2472 } 2473 2474 return (0); 2475 } 2476 2477 /* 2478 * Name: qla_init_rcv_cntxt 2479 * Function: Creates the Receive Context. 2480 */ 2481 static int 2482 qla_init_rcv_cntxt(qla_host_t *ha) 2483 { 2484 q80_rq_rcv_cntxt_t *rcntxt; 2485 q80_rsp_rcv_cntxt_t *rcntxt_rsp; 2486 q80_stat_desc_t *sdesc; 2487 int i, j; 2488 qla_hw_t *hw = &ha->hw; 2489 device_t dev; 2490 uint32_t err; 2491 uint32_t rcntxt_sds_rings; 2492 uint32_t rcntxt_rds_rings; 2493 uint32_t max_idx; 2494 2495 dev = ha->pci_dev; 2496 2497 /* 2498 * Create Receive Context 2499 */ 2500 2501 for (i = 0; i < hw->num_sds_rings; i++) { 2502 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; 2503 2504 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { 2505 sdesc->data[0] = 1ULL; 2506 sdesc->data[1] = 1ULL; 2507 } 2508 } 2509 2510 rcntxt_sds_rings = hw->num_sds_rings; 2511 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) 2512 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS; 2513 2514 rcntxt_rds_rings = hw->num_rds_rings; 2515 2516 if (hw->num_rds_rings > MAX_RDS_RING_SETS) 2517 rcntxt_rds_rings = MAX_RDS_RING_SETS; 2518 2519 rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox; 2520 bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t))); 2521 2522 rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT; 2523 rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2); 2524 rcntxt->count_version |= Q8_MBX_CMD_VERSION; 2525 2526 rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW | 2527 Q8_RCV_CNTXT_CAP0_LRO | 2528 Q8_RCV_CNTXT_CAP0_HW_LRO | 2529 Q8_RCV_CNTXT_CAP0_RSS | 2530 Q8_RCV_CNTXT_CAP0_SGL_LRO; 2531 2532 if (ha->hw.enable_9kb) 2533 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO; 2534 else 2535 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO; 2536 2537 if (ha->hw.num_rds_rings > 1) { 2538 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5); 2539 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS; 2540 } else 2541 rcntxt->nrds_sets_rings = 0x1 | (1 << 5); 2542 2543 rcntxt->nsds_rings = rcntxt_sds_rings; 2544 2545 rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE; 2546 2547 rcntxt->rcv_vpid = 0; 2548 2549 for (i = 0; i < rcntxt_sds_rings; i++) { 2550 rcntxt->sds[i].paddr = 2551 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); 2552 rcntxt->sds[i].size = 2553 qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 2554 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]); 2555 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0); 2556 } 2557 2558 for (i = 0; i < rcntxt_rds_rings; i++) { 2559 rcntxt->rds[i].paddr_std = 2560 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); 2561 2562 if (ha->hw.enable_9kb) 2563 rcntxt->rds[i].std_bsize = 2564 qla_host_to_le64(MJUM9BYTES); 2565 else 2566 rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); 2567 2568 rcntxt->rds[i].std_nentries = 2569 qla_host_to_le32(NUM_RX_DESCRIPTORS); 2570 } 2571 2572 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, 2573 (sizeof (q80_rq_rcv_cntxt_t) >> 2), 2574 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) { 2575 device_printf(dev, "%s: failed0\n", __func__); 2576 return (-1); 2577 } 2578 2579 rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox; 2580 2581 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); 2582 2583 if (err) { 2584 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2585 return (-1); 2586 } 2587 2588 for (i = 0; i < rcntxt_sds_rings; i++) { 2589 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i]; 2590 } 2591 2592 for (i = 0; i < rcntxt_rds_rings; i++) { 2593 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std; 2594 } 2595 2596 hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id; 2597 2598 ha->hw.flags.init_rx_cnxt = 1; 2599 2600 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) { 2601 2602 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) { 2603 2604 if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings) 2605 max_idx = MAX_RCNTXT_SDS_RINGS; 2606 else 2607 max_idx = hw->num_sds_rings - i; 2608 2609 err = qla_add_rcv_rings(ha, i, max_idx); 2610 if (err) 2611 return -1; 2612 2613 i += max_idx; 2614 } 2615 } 2616 2617 if (hw->num_rds_rings > 1) { 2618 2619 for (i = 0; i < hw->num_rds_rings; ) { 2620 2621 if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings) 2622 max_idx = MAX_SDS_TO_RDS_MAP; 2623 else 2624 max_idx = hw->num_rds_rings - i; 2625 2626 err = qla_map_sds_to_rds(ha, i, max_idx); 2627 if (err) 2628 return -1; 2629 2630 i += max_idx; 2631 } 2632 } 2633 2634 return (0); 2635 } 2636 2637 static int 2638 qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds) 2639 { 2640 device_t dev = ha->pci_dev; 2641 q80_rq_add_rcv_rings_t *add_rcv; 2642 q80_rsp_add_rcv_rings_t *add_rcv_rsp; 2643 uint32_t i,j, err; 2644 qla_hw_t *hw = &ha->hw; 2645 2646 add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox; 2647 bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t)); 2648 2649 add_rcv->opcode = Q8_MBX_ADD_RX_RINGS; 2650 add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2); 2651 add_rcv->count_version |= Q8_MBX_CMD_VERSION; 2652 2653 add_rcv->nrds_sets_rings = nsds | (1 << 5); 2654 add_rcv->nsds_rings = nsds; 2655 add_rcv->cntxt_id = hw->rcv_cntxt_id; 2656 2657 for (i = 0; i < nsds; i++) { 2658 2659 j = i + sds_idx; 2660 2661 add_rcv->sds[i].paddr = 2662 qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr); 2663 2664 add_rcv->sds[i].size = 2665 qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 2666 2667 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]); 2668 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0); 2669 2670 } 2671 2672 for (i = 0; (i < nsds); i++) { 2673 j = i + sds_idx; 2674 2675 add_rcv->rds[i].paddr_std = 2676 qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr); 2677 2678 if (ha->hw.enable_9kb) 2679 add_rcv->rds[i].std_bsize = 2680 qla_host_to_le64(MJUM9BYTES); 2681 else 2682 add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); 2683 2684 add_rcv->rds[i].std_nentries = 2685 qla_host_to_le32(NUM_RX_DESCRIPTORS); 2686 } 2687 2688 2689 if (qla_mbx_cmd(ha, (uint32_t *)add_rcv, 2690 (sizeof (q80_rq_add_rcv_rings_t) >> 2), 2691 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { 2692 device_printf(dev, "%s: failed0\n", __func__); 2693 return (-1); 2694 } 2695 2696 add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox; 2697 2698 err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status); 2699 2700 if (err) { 2701 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2702 return (-1); 2703 } 2704 2705 for (i = 0; i < nsds; i++) { 2706 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i]; 2707 } 2708 2709 for (i = 0; i < nsds; i++) { 2710 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std; 2711 } 2712 2713 return (0); 2714 } 2715 2716 /* 2717 * Name: qla_del_rcv_cntxt 2718 * Function: Destroys the Receive Context. 2719 */ 2720 static void 2721 qla_del_rcv_cntxt(qla_host_t *ha) 2722 { 2723 device_t dev = ha->pci_dev; 2724 q80_rcv_cntxt_destroy_t *rcntxt; 2725 q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp; 2726 uint32_t err; 2727 uint8_t bcast_mac[6]; 2728 2729 if (!ha->hw.flags.init_rx_cnxt) 2730 return; 2731 2732 if (qla_hw_del_all_mcast(ha)) 2733 return; 2734 2735 if (ha->hw.flags.bcast_mac) { 2736 2737 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 2738 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 2739 2740 if (qla_config_mac_addr(ha, bcast_mac, 0, 1)) 2741 return; 2742 ha->hw.flags.bcast_mac = 0; 2743 2744 } 2745 2746 if (ha->hw.flags.unicast_mac) { 2747 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1)) 2748 return; 2749 ha->hw.flags.unicast_mac = 0; 2750 } 2751 2752 rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox; 2753 bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t))); 2754 2755 rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT; 2756 rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2); 2757 rcntxt->count_version |= Q8_MBX_CMD_VERSION; 2758 2759 rcntxt->cntxt_id = ha->hw.rcv_cntxt_id; 2760 2761 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, 2762 (sizeof (q80_rcv_cntxt_destroy_t) >> 2), 2763 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) { 2764 device_printf(dev, "%s: failed0\n", __func__); 2765 return; 2766 } 2767 rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox; 2768 2769 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); 2770 2771 if (err) { 2772 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2773 } 2774 2775 ha->hw.flags.init_rx_cnxt = 0; 2776 return; 2777 } 2778 2779 /* 2780 * Name: qla_init_xmt_cntxt 2781 * Function: Creates the Transmit Context. 2782 */ 2783 static int 2784 qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) 2785 { 2786 device_t dev; 2787 qla_hw_t *hw = &ha->hw; 2788 q80_rq_tx_cntxt_t *tcntxt; 2789 q80_rsp_tx_cntxt_t *tcntxt_rsp; 2790 uint32_t err; 2791 qla_hw_tx_cntxt_t *hw_tx_cntxt; 2792 uint32_t intr_idx; 2793 2794 hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; 2795 2796 dev = ha->pci_dev; 2797 2798 /* 2799 * Create Transmit Context 2800 */ 2801 tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox; 2802 bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t))); 2803 2804 tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT; 2805 tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2); 2806 tcntxt->count_version |= Q8_MBX_CMD_VERSION; 2807 2808 intr_idx = txr_idx; 2809 2810 #ifdef QL_ENABLE_ISCSI_TLV 2811 2812 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO | 2813 Q8_TX_CNTXT_CAP0_TC; 2814 2815 if (txr_idx >= (ha->hw.num_tx_rings >> 1)) { 2816 tcntxt->traffic_class = 1; 2817 } 2818 2819 intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1); 2820 2821 #else 2822 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO; 2823 2824 #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 2825 2826 tcntxt->ntx_rings = 1; 2827 2828 tcntxt->tx_ring[0].paddr = 2829 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr); 2830 tcntxt->tx_ring[0].tx_consumer = 2831 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr); 2832 tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS); 2833 2834 tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]); 2835 tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0); 2836 2837 hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS; 2838 hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0; 2839 2840 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, 2841 (sizeof (q80_rq_tx_cntxt_t) >> 2), 2842 ha->hw.mbox, 2843 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) { 2844 device_printf(dev, "%s: failed0\n", __func__); 2845 return (-1); 2846 } 2847 tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox; 2848 2849 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); 2850 2851 if (err) { 2852 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2853 return -1; 2854 } 2855 2856 hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index; 2857 hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id; 2858 2859 if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0)) 2860 return (-1); 2861 2862 return (0); 2863 } 2864 2865 2866 /* 2867 * Name: qla_del_xmt_cntxt 2868 * Function: Destroys the Transmit Context. 2869 */ 2870 static int 2871 qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) 2872 { 2873 device_t dev = ha->pci_dev; 2874 q80_tx_cntxt_destroy_t *tcntxt; 2875 q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp; 2876 uint32_t err; 2877 2878 tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox; 2879 bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t))); 2880 2881 tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT; 2882 tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2); 2883 tcntxt->count_version |= Q8_MBX_CMD_VERSION; 2884 2885 tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id; 2886 2887 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, 2888 (sizeof (q80_tx_cntxt_destroy_t) >> 2), 2889 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) { 2890 device_printf(dev, "%s: failed0\n", __func__); 2891 return (-1); 2892 } 2893 tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox; 2894 2895 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); 2896 2897 if (err) { 2898 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2899 return (-1); 2900 } 2901 2902 return (0); 2903 } 2904 static void 2905 qla_del_xmt_cntxt(qla_host_t *ha) 2906 { 2907 uint32_t i; 2908 2909 if (!ha->hw.flags.init_tx_cnxt) 2910 return; 2911 2912 for (i = 0; i < ha->hw.num_tx_rings; i++) { 2913 if (qla_del_xmt_cntxt_i(ha, i)) 2914 break; 2915 } 2916 ha->hw.flags.init_tx_cnxt = 0; 2917 } 2918 2919 static int 2920 qla_init_xmt_cntxt(qla_host_t *ha) 2921 { 2922 uint32_t i, j; 2923 2924 for (i = 0; i < ha->hw.num_tx_rings; i++) { 2925 if (qla_init_xmt_cntxt_i(ha, i) != 0) { 2926 for (j = 0; j < i; j++) 2927 qla_del_xmt_cntxt_i(ha, j); 2928 return (-1); 2929 } 2930 } 2931 ha->hw.flags.init_tx_cnxt = 1; 2932 return (0); 2933 } 2934 2935 static int 2936 qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast) 2937 { 2938 int i, nmcast; 2939 uint32_t count = 0; 2940 uint8_t *mcast; 2941 2942 nmcast = ha->hw.nmcast; 2943 2944 QL_DPRINT2(ha, (ha->pci_dev, 2945 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast)); 2946 2947 mcast = ha->hw.mac_addr_arr; 2948 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 2949 2950 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) { 2951 if ((ha->hw.mcast[i].addr[0] != 0) || 2952 (ha->hw.mcast[i].addr[1] != 0) || 2953 (ha->hw.mcast[i].addr[2] != 0) || 2954 (ha->hw.mcast[i].addr[3] != 0) || 2955 (ha->hw.mcast[i].addr[4] != 0) || 2956 (ha->hw.mcast[i].addr[5] != 0)) { 2957 2958 bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN); 2959 mcast = mcast + ETHER_ADDR_LEN; 2960 count++; 2961 2962 if (count == Q8_MAX_MAC_ADDRS) { 2963 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, 2964 add_mcast, count)) { 2965 device_printf(ha->pci_dev, 2966 "%s: failed\n", __func__); 2967 return (-1); 2968 } 2969 2970 count = 0; 2971 mcast = ha->hw.mac_addr_arr; 2972 memset(mcast, 0, 2973 (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 2974 } 2975 2976 nmcast--; 2977 } 2978 } 2979 2980 if (count) { 2981 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast, 2982 count)) { 2983 device_printf(ha->pci_dev, "%s: failed\n", __func__); 2984 return (-1); 2985 } 2986 } 2987 QL_DPRINT2(ha, (ha->pci_dev, 2988 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast)); 2989 2990 return 0; 2991 } 2992 2993 static int 2994 qla_hw_add_all_mcast(qla_host_t *ha) 2995 { 2996 int ret; 2997 2998 ret = qla_hw_all_mcast(ha, 1); 2999 3000 return (ret); 3001 } 3002 3003 static int 3004 qla_hw_del_all_mcast(qla_host_t *ha) 3005 { 3006 int ret; 3007 3008 ret = qla_hw_all_mcast(ha, 0); 3009 3010 bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS)); 3011 ha->hw.nmcast = 0; 3012 3013 return (ret); 3014 } 3015 3016 static int 3017 qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta) 3018 { 3019 int i; 3020 3021 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3022 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) 3023 return (0); /* its been already added */ 3024 } 3025 return (-1); 3026 } 3027 3028 static int 3029 qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) 3030 { 3031 int i; 3032 3033 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3034 3035 if ((ha->hw.mcast[i].addr[0] == 0) && 3036 (ha->hw.mcast[i].addr[1] == 0) && 3037 (ha->hw.mcast[i].addr[2] == 0) && 3038 (ha->hw.mcast[i].addr[3] == 0) && 3039 (ha->hw.mcast[i].addr[4] == 0) && 3040 (ha->hw.mcast[i].addr[5] == 0)) { 3041 3042 bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN); 3043 ha->hw.nmcast++; 3044 3045 mta = mta + ETHER_ADDR_LEN; 3046 nmcast--; 3047 3048 if (nmcast == 0) 3049 break; 3050 } 3051 3052 } 3053 return 0; 3054 } 3055 3056 static int 3057 qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) 3058 { 3059 int i; 3060 3061 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3062 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) { 3063 3064 ha->hw.mcast[i].addr[0] = 0; 3065 ha->hw.mcast[i].addr[1] = 0; 3066 ha->hw.mcast[i].addr[2] = 0; 3067 ha->hw.mcast[i].addr[3] = 0; 3068 ha->hw.mcast[i].addr[4] = 0; 3069 ha->hw.mcast[i].addr[5] = 0; 3070 3071 ha->hw.nmcast--; 3072 3073 mta = mta + ETHER_ADDR_LEN; 3074 nmcast--; 3075 3076 if (nmcast == 0) 3077 break; 3078 } 3079 } 3080 return 0; 3081 } 3082 3083 /* 3084 * Name: ql_hw_set_multi 3085 * Function: Sets the Multicast Addresses provided by the host O.S into the 3086 * hardware (for the given interface) 3087 */ 3088 int 3089 ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt, 3090 uint32_t add_mac) 3091 { 3092 uint8_t *mta = mcast_addr; 3093 int i; 3094 int ret = 0; 3095 uint32_t count = 0; 3096 uint8_t *mcast; 3097 3098 mcast = ha->hw.mac_addr_arr; 3099 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3100 3101 for (i = 0; i < mcnt; i++) { 3102 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) { 3103 if (add_mac) { 3104 if (qla_hw_mac_addr_present(ha, mta) != 0) { 3105 bcopy(mta, mcast, ETHER_ADDR_LEN); 3106 mcast = mcast + ETHER_ADDR_LEN; 3107 count++; 3108 } 3109 } else { 3110 if (qla_hw_mac_addr_present(ha, mta) == 0) { 3111 bcopy(mta, mcast, ETHER_ADDR_LEN); 3112 mcast = mcast + ETHER_ADDR_LEN; 3113 count++; 3114 } 3115 } 3116 } 3117 if (count == Q8_MAX_MAC_ADDRS) { 3118 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, 3119 add_mac, count)) { 3120 device_printf(ha->pci_dev, "%s: failed\n", 3121 __func__); 3122 return (-1); 3123 } 3124 3125 if (add_mac) { 3126 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, 3127 count); 3128 } else { 3129 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, 3130 count); 3131 } 3132 3133 count = 0; 3134 mcast = ha->hw.mac_addr_arr; 3135 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3136 } 3137 3138 mta += Q8_MAC_ADDR_LEN; 3139 } 3140 3141 if (count) { 3142 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac, 3143 count)) { 3144 device_printf(ha->pci_dev, "%s: failed\n", __func__); 3145 return (-1); 3146 } 3147 if (add_mac) { 3148 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count); 3149 } else { 3150 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count); 3151 } 3152 } 3153 3154 return (ret); 3155 } 3156 3157 /* 3158 * Name: ql_hw_tx_done_locked 3159 * Function: Handle Transmit Completions 3160 */ 3161 void 3162 ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) 3163 { 3164 qla_tx_buf_t *txb; 3165 qla_hw_t *hw = &ha->hw; 3166 uint32_t comp_idx, comp_count = 0; 3167 qla_hw_tx_cntxt_t *hw_tx_cntxt; 3168 3169 hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; 3170 3171 /* retrieve index of last entry in tx ring completed */ 3172 comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons)); 3173 3174 while (comp_idx != hw_tx_cntxt->txr_comp) { 3175 3176 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp]; 3177 3178 hw_tx_cntxt->txr_comp++; 3179 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS) 3180 hw_tx_cntxt->txr_comp = 0; 3181 3182 comp_count++; 3183 3184 if (txb->m_head) { 3185 if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1); 3186 3187 bus_dmamap_sync(ha->tx_tag, txb->map, 3188 BUS_DMASYNC_POSTWRITE); 3189 bus_dmamap_unload(ha->tx_tag, txb->map); 3190 m_freem(txb->m_head); 3191 3192 txb->m_head = NULL; 3193 } 3194 } 3195 3196 hw_tx_cntxt->txr_free += comp_count; 3197 return; 3198 } 3199 3200 void 3201 ql_update_link_state(qla_host_t *ha) 3202 { 3203 uint32_t link_state; 3204 uint32_t prev_link_state; 3205 3206 if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3207 ha->hw.link_up = 0; 3208 return; 3209 } 3210 link_state = READ_REG32(ha, Q8_LINK_STATE); 3211 3212 prev_link_state = ha->hw.link_up; 3213 3214 if (ha->pci_func == 0) 3215 ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0); 3216 else 3217 ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); 3218 3219 if (prev_link_state != ha->hw.link_up) { 3220 if (ha->hw.link_up) { 3221 if_link_state_change(ha->ifp, LINK_STATE_UP); 3222 } else { 3223 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 3224 } 3225 } 3226 return; 3227 } 3228 3229 void 3230 ql_hw_stop_rcv(qla_host_t *ha) 3231 { 3232 int i, done, count = 100; 3233 3234 ha->flags.stop_rcv = 1; 3235 3236 while (count) { 3237 done = 1; 3238 for (i = 0; i < ha->hw.num_sds_rings; i++) { 3239 if (ha->hw.sds[i].rcv_active) 3240 done = 0; 3241 } 3242 if (done) 3243 break; 3244 else 3245 qla_mdelay(__func__, 10); 3246 count--; 3247 } 3248 if (!count) 3249 device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__); 3250 3251 return; 3252 } 3253 3254 int 3255 ql_hw_check_health(qla_host_t *ha) 3256 { 3257 uint32_t val; 3258 3259 ha->hw.health_count++; 3260 3261 if (ha->hw.health_count < 1000) 3262 return 0; 3263 3264 ha->hw.health_count = 0; 3265 3266 val = READ_REG32(ha, Q8_ASIC_TEMPERATURE); 3267 3268 if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) || 3269 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) { 3270 device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n", 3271 __func__, val); 3272 return -1; 3273 } 3274 3275 val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT); 3276 3277 if ((val != ha->hw.hbeat_value) && 3278 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) { 3279 ha->hw.hbeat_value = val; 3280 return 0; 3281 } 3282 device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n", 3283 __func__, val); 3284 3285 return -1; 3286 } 3287 3288 static int 3289 qla_init_nic_func(qla_host_t *ha) 3290 { 3291 device_t dev; 3292 q80_init_nic_func_t *init_nic; 3293 q80_init_nic_func_rsp_t *init_nic_rsp; 3294 uint32_t err; 3295 3296 dev = ha->pci_dev; 3297 3298 init_nic = (q80_init_nic_func_t *)ha->hw.mbox; 3299 bzero(init_nic, sizeof(q80_init_nic_func_t)); 3300 3301 init_nic->opcode = Q8_MBX_INIT_NIC_FUNC; 3302 init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2); 3303 init_nic->count_version |= Q8_MBX_CMD_VERSION; 3304 3305 init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN; 3306 init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN; 3307 init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN; 3308 3309 //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t)); 3310 if (qla_mbx_cmd(ha, (uint32_t *)init_nic, 3311 (sizeof (q80_init_nic_func_t) >> 2), 3312 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) { 3313 device_printf(dev, "%s: failed\n", __func__); 3314 return -1; 3315 } 3316 3317 init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox; 3318 // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t)); 3319 3320 err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status); 3321 3322 if (err) { 3323 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3324 } 3325 3326 return 0; 3327 } 3328 3329 static int 3330 qla_stop_nic_func(qla_host_t *ha) 3331 { 3332 device_t dev; 3333 q80_stop_nic_func_t *stop_nic; 3334 q80_stop_nic_func_rsp_t *stop_nic_rsp; 3335 uint32_t err; 3336 3337 dev = ha->pci_dev; 3338 3339 stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox; 3340 bzero(stop_nic, sizeof(q80_stop_nic_func_t)); 3341 3342 stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC; 3343 stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2); 3344 stop_nic->count_version |= Q8_MBX_CMD_VERSION; 3345 3346 stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN; 3347 stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN; 3348 3349 //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t)); 3350 if (qla_mbx_cmd(ha, (uint32_t *)stop_nic, 3351 (sizeof (q80_stop_nic_func_t) >> 2), 3352 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) { 3353 device_printf(dev, "%s: failed\n", __func__); 3354 return -1; 3355 } 3356 3357 stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox; 3358 //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t)); 3359 3360 err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status); 3361 3362 if (err) { 3363 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3364 } 3365 3366 return 0; 3367 } 3368 3369 static int 3370 qla_query_fw_dcbx_caps(qla_host_t *ha) 3371 { 3372 device_t dev; 3373 q80_query_fw_dcbx_caps_t *fw_dcbx; 3374 q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp; 3375 uint32_t err; 3376 3377 dev = ha->pci_dev; 3378 3379 fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox; 3380 bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t)); 3381 3382 fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS; 3383 fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2); 3384 fw_dcbx->count_version |= Q8_MBX_CMD_VERSION; 3385 3386 ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t)); 3387 if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx, 3388 (sizeof (q80_query_fw_dcbx_caps_t) >> 2), 3389 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) { 3390 device_printf(dev, "%s: failed\n", __func__); 3391 return -1; 3392 } 3393 3394 fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox; 3395 ql_dump_buf8(ha, __func__, fw_dcbx_rsp, 3396 sizeof (q80_query_fw_dcbx_caps_rsp_t)); 3397 3398 err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status); 3399 3400 if (err) { 3401 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3402 } 3403 3404 return 0; 3405 } 3406 3407 static int 3408 qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2, 3409 uint32_t aen_mb3, uint32_t aen_mb4) 3410 { 3411 device_t dev; 3412 q80_idc_ack_t *idc_ack; 3413 q80_idc_ack_rsp_t *idc_ack_rsp; 3414 uint32_t err; 3415 int count = 300; 3416 3417 dev = ha->pci_dev; 3418 3419 idc_ack = (q80_idc_ack_t *)ha->hw.mbox; 3420 bzero(idc_ack, sizeof(q80_idc_ack_t)); 3421 3422 idc_ack->opcode = Q8_MBX_IDC_ACK; 3423 idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2); 3424 idc_ack->count_version |= Q8_MBX_CMD_VERSION; 3425 3426 idc_ack->aen_mb1 = aen_mb1; 3427 idc_ack->aen_mb2 = aen_mb2; 3428 idc_ack->aen_mb3 = aen_mb3; 3429 idc_ack->aen_mb4 = aen_mb4; 3430 3431 ha->hw.imd_compl= 0; 3432 3433 if (qla_mbx_cmd(ha, (uint32_t *)idc_ack, 3434 (sizeof (q80_idc_ack_t) >> 2), 3435 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) { 3436 device_printf(dev, "%s: failed\n", __func__); 3437 return -1; 3438 } 3439 3440 idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox; 3441 3442 err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status); 3443 3444 if (err) { 3445 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3446 return(-1); 3447 } 3448 3449 while (count && !ha->hw.imd_compl) { 3450 qla_mdelay(__func__, 100); 3451 count--; 3452 } 3453 3454 if (!count) 3455 return -1; 3456 else 3457 device_printf(dev, "%s: count %d\n", __func__, count); 3458 3459 return (0); 3460 } 3461 3462 static int 3463 qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits) 3464 { 3465 device_t dev; 3466 q80_set_port_cfg_t *pcfg; 3467 q80_set_port_cfg_rsp_t *pfg_rsp; 3468 uint32_t err; 3469 int count = 300; 3470 3471 dev = ha->pci_dev; 3472 3473 pcfg = (q80_set_port_cfg_t *)ha->hw.mbox; 3474 bzero(pcfg, sizeof(q80_set_port_cfg_t)); 3475 3476 pcfg->opcode = Q8_MBX_SET_PORT_CONFIG; 3477 pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2); 3478 pcfg->count_version |= Q8_MBX_CMD_VERSION; 3479 3480 pcfg->cfg_bits = cfg_bits; 3481 3482 device_printf(dev, "%s: cfg_bits" 3483 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" 3484 " [0x%x, 0x%x, 0x%x]\n", __func__, 3485 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), 3486 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), 3487 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)); 3488 3489 ha->hw.imd_compl= 0; 3490 3491 if (qla_mbx_cmd(ha, (uint32_t *)pcfg, 3492 (sizeof (q80_set_port_cfg_t) >> 2), 3493 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) { 3494 device_printf(dev, "%s: failed\n", __func__); 3495 return -1; 3496 } 3497 3498 pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox; 3499 3500 err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status); 3501 3502 if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) { 3503 while (count && !ha->hw.imd_compl) { 3504 qla_mdelay(__func__, 100); 3505 count--; 3506 } 3507 if (count) { 3508 device_printf(dev, "%s: count %d\n", __func__, count); 3509 3510 err = 0; 3511 } 3512 } 3513 3514 if (err) { 3515 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3516 return(-1); 3517 } 3518 3519 return (0); 3520 } 3521 3522 3523 static int 3524 qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size) 3525 { 3526 uint32_t err; 3527 device_t dev = ha->pci_dev; 3528 q80_config_md_templ_size_t *md_size; 3529 q80_config_md_templ_size_rsp_t *md_size_rsp; 3530 3531 #ifndef QL_LDFLASH_FW 3532 3533 ql_minidump_template_hdr_t *hdr; 3534 3535 hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump; 3536 *size = hdr->size_of_template; 3537 return (0); 3538 3539 #endif /* #ifdef QL_LDFLASH_FW */ 3540 3541 md_size = (q80_config_md_templ_size_t *) ha->hw.mbox; 3542 bzero(md_size, sizeof(q80_config_md_templ_size_t)); 3543 3544 md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE; 3545 md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2); 3546 md_size->count_version |= Q8_MBX_CMD_VERSION; 3547 3548 if (qla_mbx_cmd(ha, (uint32_t *) md_size, 3549 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox, 3550 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) { 3551 3552 device_printf(dev, "%s: failed\n", __func__); 3553 3554 return (-1); 3555 } 3556 3557 md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox; 3558 3559 err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status); 3560 3561 if (err) { 3562 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3563 return(-1); 3564 } 3565 3566 *size = md_size_rsp->templ_size; 3567 3568 return (0); 3569 } 3570 3571 static int 3572 qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits) 3573 { 3574 device_t dev; 3575 q80_get_port_cfg_t *pcfg; 3576 q80_get_port_cfg_rsp_t *pcfg_rsp; 3577 uint32_t err; 3578 3579 dev = ha->pci_dev; 3580 3581 pcfg = (q80_get_port_cfg_t *)ha->hw.mbox; 3582 bzero(pcfg, sizeof(q80_get_port_cfg_t)); 3583 3584 pcfg->opcode = Q8_MBX_GET_PORT_CONFIG; 3585 pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2); 3586 pcfg->count_version |= Q8_MBX_CMD_VERSION; 3587 3588 if (qla_mbx_cmd(ha, (uint32_t *)pcfg, 3589 (sizeof (q80_get_port_cfg_t) >> 2), 3590 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) { 3591 device_printf(dev, "%s: failed\n", __func__); 3592 return -1; 3593 } 3594 3595 pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox; 3596 3597 err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status); 3598 3599 if (err) { 3600 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3601 return(-1); 3602 } 3603 3604 device_printf(dev, "%s: [cfg_bits, port type]" 3605 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" 3606 " [0x%x, 0x%x, 0x%x]\n", __func__, 3607 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type, 3608 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), 3609 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), 3610 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0) 3611 ); 3612 3613 *cfg_bits = pcfg_rsp->cfg_bits; 3614 3615 return (0); 3616 } 3617 3618 int 3619 ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp) 3620 { 3621 struct ether_vlan_header *eh; 3622 uint16_t etype; 3623 struct ip *ip = NULL; 3624 struct ip6_hdr *ip6 = NULL; 3625 struct tcphdr *th = NULL; 3626 uint32_t hdrlen; 3627 uint32_t offset; 3628 uint8_t buf[sizeof(struct ip6_hdr)]; 3629 3630 eh = mtod(mp, struct ether_vlan_header *); 3631 3632 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3633 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3634 etype = ntohs(eh->evl_proto); 3635 } else { 3636 hdrlen = ETHER_HDR_LEN; 3637 etype = ntohs(eh->evl_encap_proto); 3638 } 3639 3640 if (etype == ETHERTYPE_IP) { 3641 3642 offset = (hdrlen + sizeof (struct ip)); 3643 3644 if (mp->m_len >= offset) { 3645 ip = (struct ip *)(mp->m_data + hdrlen); 3646 } else { 3647 m_copydata(mp, hdrlen, sizeof (struct ip), buf); 3648 ip = (struct ip *)buf; 3649 } 3650 3651 if (ip->ip_p == IPPROTO_TCP) { 3652 3653 hdrlen += ip->ip_hl << 2; 3654 offset = hdrlen + 4; 3655 3656 if (mp->m_len >= offset) { 3657 th = (struct tcphdr *)(mp->m_data + hdrlen);; 3658 } else { 3659 m_copydata(mp, hdrlen, 4, buf); 3660 th = (struct tcphdr *)buf; 3661 } 3662 } 3663 3664 } else if (etype == ETHERTYPE_IPV6) { 3665 3666 offset = (hdrlen + sizeof (struct ip6_hdr)); 3667 3668 if (mp->m_len >= offset) { 3669 ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen); 3670 } else { 3671 m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf); 3672 ip6 = (struct ip6_hdr *)buf; 3673 } 3674 3675 if (ip6->ip6_nxt == IPPROTO_TCP) { 3676 3677 hdrlen += sizeof(struct ip6_hdr); 3678 offset = hdrlen + 4; 3679 3680 if (mp->m_len >= offset) { 3681 th = (struct tcphdr *)(mp->m_data + hdrlen);; 3682 } else { 3683 m_copydata(mp, hdrlen, 4, buf); 3684 th = (struct tcphdr *)buf; 3685 } 3686 } 3687 } 3688 3689 if (th != NULL) { 3690 if ((th->th_sport == htons(3260)) || 3691 (th->th_dport == htons(3260))) 3692 return 0; 3693 } 3694 return (-1); 3695 } 3696 3697 void 3698 qla_hw_async_event(qla_host_t *ha) 3699 { 3700 switch (ha->hw.aen_mb0) { 3701 case 0x8101: 3702 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2, 3703 ha->hw.aen_mb3, ha->hw.aen_mb4); 3704 3705 break; 3706 3707 default: 3708 break; 3709 } 3710 3711 return; 3712 } 3713 3714 #ifdef QL_LDFLASH_FW 3715 static int 3716 ql_get_minidump_template(qla_host_t *ha) 3717 { 3718 uint32_t err; 3719 device_t dev = ha->pci_dev; 3720 q80_config_md_templ_cmd_t *md_templ; 3721 q80_config_md_templ_cmd_rsp_t *md_templ_rsp; 3722 3723 md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox; 3724 bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t))); 3725 3726 md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT; 3727 md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2); 3728 md_templ->count_version |= Q8_MBX_CMD_VERSION; 3729 3730 md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr; 3731 md_templ->buff_size = ha->hw.dma_buf.minidump.size; 3732 3733 if (qla_mbx_cmd(ha, (uint32_t *) md_templ, 3734 (sizeof(q80_config_md_templ_cmd_t) >> 2), 3735 ha->hw.mbox, 3736 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) { 3737 3738 device_printf(dev, "%s: failed\n", __func__); 3739 3740 return (-1); 3741 } 3742 3743 md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox; 3744 3745 err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status); 3746 3747 if (err) { 3748 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3749 return (-1); 3750 } 3751 3752 return (0); 3753 3754 } 3755 #endif /* #ifdef QL_LDFLASH_FW */ 3756 3757 /* 3758 * Minidump related functionality 3759 */ 3760 3761 static int ql_parse_template(qla_host_t *ha); 3762 3763 static uint32_t ql_rdcrb(qla_host_t *ha, 3764 ql_minidump_entry_rdcrb_t *crb_entry, 3765 uint32_t * data_buff); 3766 3767 static uint32_t ql_pollrd(qla_host_t *ha, 3768 ql_minidump_entry_pollrd_t *entry, 3769 uint32_t * data_buff); 3770 3771 static uint32_t ql_pollrd_modify_write(qla_host_t *ha, 3772 ql_minidump_entry_rd_modify_wr_with_poll_t *entry, 3773 uint32_t *data_buff); 3774 3775 static uint32_t ql_L2Cache(qla_host_t *ha, 3776 ql_minidump_entry_cache_t *cacheEntry, 3777 uint32_t * data_buff); 3778 3779 static uint32_t ql_L1Cache(qla_host_t *ha, 3780 ql_minidump_entry_cache_t *cacheEntry, 3781 uint32_t *data_buff); 3782 3783 static uint32_t ql_rdocm(qla_host_t *ha, 3784 ql_minidump_entry_rdocm_t *ocmEntry, 3785 uint32_t *data_buff); 3786 3787 static uint32_t ql_rdmem(qla_host_t *ha, 3788 ql_minidump_entry_rdmem_t *mem_entry, 3789 uint32_t *data_buff); 3790 3791 static uint32_t ql_rdrom(qla_host_t *ha, 3792 ql_minidump_entry_rdrom_t *romEntry, 3793 uint32_t *data_buff); 3794 3795 static uint32_t ql_rdmux(qla_host_t *ha, 3796 ql_minidump_entry_mux_t *muxEntry, 3797 uint32_t *data_buff); 3798 3799 static uint32_t ql_rdmux2(qla_host_t *ha, 3800 ql_minidump_entry_mux2_t *muxEntry, 3801 uint32_t *data_buff); 3802 3803 static uint32_t ql_rdqueue(qla_host_t *ha, 3804 ql_minidump_entry_queue_t *queueEntry, 3805 uint32_t *data_buff); 3806 3807 static uint32_t ql_cntrl(qla_host_t *ha, 3808 ql_minidump_template_hdr_t *template_hdr, 3809 ql_minidump_entry_cntrl_t *crbEntry); 3810 3811 3812 static uint32_t 3813 ql_minidump_size(qla_host_t *ha) 3814 { 3815 uint32_t i, k; 3816 uint32_t size = 0; 3817 ql_minidump_template_hdr_t *hdr; 3818 3819 hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b; 3820 3821 i = 0x2; 3822 3823 for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) { 3824 if (i & ha->hw.mdump_capture_mask) 3825 size += hdr->capture_size_array[k]; 3826 i = i << 1; 3827 } 3828 return (size); 3829 } 3830 3831 static void 3832 ql_free_minidump_buffer(qla_host_t *ha) 3833 { 3834 if (ha->hw.mdump_buffer != NULL) { 3835 free(ha->hw.mdump_buffer, M_QLA83XXBUF); 3836 ha->hw.mdump_buffer = NULL; 3837 ha->hw.mdump_buffer_size = 0; 3838 } 3839 return; 3840 } 3841 3842 static int 3843 ql_alloc_minidump_buffer(qla_host_t *ha) 3844 { 3845 ha->hw.mdump_buffer_size = ql_minidump_size(ha); 3846 3847 if (!ha->hw.mdump_buffer_size) 3848 return (-1); 3849 3850 ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF, 3851 M_NOWAIT); 3852 3853 if (ha->hw.mdump_buffer == NULL) 3854 return (-1); 3855 3856 return (0); 3857 } 3858 3859 static void 3860 ql_free_minidump_template_buffer(qla_host_t *ha) 3861 { 3862 if (ha->hw.mdump_template != NULL) { 3863 free(ha->hw.mdump_template, M_QLA83XXBUF); 3864 ha->hw.mdump_template = NULL; 3865 ha->hw.mdump_template_size = 0; 3866 } 3867 return; 3868 } 3869 3870 static int 3871 ql_alloc_minidump_template_buffer(qla_host_t *ha) 3872 { 3873 ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size; 3874 3875 ha->hw.mdump_template = malloc(ha->hw.mdump_template_size, 3876 M_QLA83XXBUF, M_NOWAIT); 3877 3878 if (ha->hw.mdump_template == NULL) 3879 return (-1); 3880 3881 return (0); 3882 } 3883 3884 static int 3885 ql_alloc_minidump_buffers(qla_host_t *ha) 3886 { 3887 int ret; 3888 3889 ret = ql_alloc_minidump_template_buffer(ha); 3890 3891 if (ret) 3892 return (ret); 3893 3894 ret = ql_alloc_minidump_buffer(ha); 3895 3896 if (ret) 3897 ql_free_minidump_template_buffer(ha); 3898 3899 return (ret); 3900 } 3901 3902 3903 static uint32_t 3904 ql_validate_minidump_checksum(qla_host_t *ha) 3905 { 3906 uint64_t sum = 0; 3907 int count; 3908 uint32_t *template_buff; 3909 3910 count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t); 3911 template_buff = ha->hw.dma_buf.minidump.dma_b; 3912 3913 while (count-- > 0) { 3914 sum += *template_buff++; 3915 } 3916 3917 while (sum >> 32) { 3918 sum = (sum & 0xFFFFFFFF) + (sum >> 32); 3919 } 3920 3921 return (~sum); 3922 } 3923 3924 int 3925 ql_minidump_init(qla_host_t *ha) 3926 { 3927 int ret = 0; 3928 uint32_t template_size = 0; 3929 device_t dev = ha->pci_dev; 3930 3931 /* 3932 * Get Minidump Template Size 3933 */ 3934 ret = qla_get_minidump_tmplt_size(ha, &template_size); 3935 3936 if (ret || (template_size == 0)) { 3937 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret, 3938 template_size); 3939 return (-1); 3940 } 3941 3942 /* 3943 * Allocate Memory for Minidump Template 3944 */ 3945 3946 ha->hw.dma_buf.minidump.alignment = 8; 3947 ha->hw.dma_buf.minidump.size = template_size; 3948 3949 #ifdef QL_LDFLASH_FW 3950 if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) { 3951 3952 device_printf(dev, "%s: minidump dma alloc failed\n", __func__); 3953 3954 return (-1); 3955 } 3956 ha->hw.dma_buf.flags.minidump = 1; 3957 3958 /* 3959 * Retrieve Minidump Template 3960 */ 3961 ret = ql_get_minidump_template(ha); 3962 #else 3963 ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump; 3964 3965 #endif /* #ifdef QL_LDFLASH_FW */ 3966 3967 if (ret == 0) { 3968 3969 ret = ql_validate_minidump_checksum(ha); 3970 3971 if (ret == 0) { 3972 3973 ret = ql_alloc_minidump_buffers(ha); 3974 3975 if (ret == 0) 3976 ha->hw.mdump_init = 1; 3977 else 3978 device_printf(dev, 3979 "%s: ql_alloc_minidump_buffers" 3980 " failed\n", __func__); 3981 } else { 3982 device_printf(dev, "%s: ql_validate_minidump_checksum" 3983 " failed\n", __func__); 3984 } 3985 } else { 3986 device_printf(dev, "%s: ql_get_minidump_template failed\n", 3987 __func__); 3988 } 3989 3990 if (ret) 3991 ql_minidump_free(ha); 3992 3993 return (ret); 3994 } 3995 3996 static void 3997 ql_minidump_free(qla_host_t *ha) 3998 { 3999 ha->hw.mdump_init = 0; 4000 if (ha->hw.dma_buf.flags.minidump) { 4001 ha->hw.dma_buf.flags.minidump = 0; 4002 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump); 4003 } 4004 4005 ql_free_minidump_template_buffer(ha); 4006 ql_free_minidump_buffer(ha); 4007 4008 return; 4009 } 4010 4011 void 4012 ql_minidump(qla_host_t *ha) 4013 { 4014 if (!ha->hw.mdump_init) 4015 return; 4016 4017 if (ha->hw.mdump_done) 4018 return; 4019 4020 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha); 4021 4022 bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size); 4023 bzero(ha->hw.mdump_template, ha->hw.mdump_template_size); 4024 4025 bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template, 4026 ha->hw.mdump_template_size); 4027 4028 ql_parse_template(ha); 4029 4030 ql_start_sequence(ha, ha->hw.mdump_start_seq_index); 4031 4032 ha->hw.mdump_done = 1; 4033 4034 return; 4035 } 4036 4037 4038 /* 4039 * helper routines 4040 */ 4041 static void 4042 ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize) 4043 { 4044 if (esize != entry->hdr.entry_capture_size) { 4045 entry->hdr.entry_capture_size = esize; 4046 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG; 4047 } 4048 return; 4049 } 4050 4051 4052 static int 4053 ql_parse_template(qla_host_t *ha) 4054 { 4055 uint32_t num_of_entries, buff_level, e_cnt, esize; 4056 uint32_t end_cnt, rv = 0; 4057 char *dump_buff, *dbuff; 4058 int sane_start = 0, sane_end = 0; 4059 ql_minidump_template_hdr_t *template_hdr; 4060 ql_minidump_entry_t *entry; 4061 uint32_t capture_mask; 4062 uint32_t dump_size; 4063 4064 /* Setup parameters */ 4065 template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template; 4066 4067 if (template_hdr->entry_type == TLHDR) 4068 sane_start = 1; 4069 4070 dump_buff = (char *) ha->hw.mdump_buffer; 4071 4072 num_of_entries = template_hdr->num_of_entries; 4073 4074 entry = (ql_minidump_entry_t *) ((char *)template_hdr 4075 + template_hdr->first_entry_offset ); 4076 4077 template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] = 4078 template_hdr->ocm_window_array[ha->pci_func]; 4079 template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func; 4080 4081 capture_mask = ha->hw.mdump_capture_mask; 4082 dump_size = ha->hw.mdump_buffer_size; 4083 4084 template_hdr->driver_capture_mask = capture_mask; 4085 4086 QL_DPRINT80(ha, (ha->pci_dev, 4087 "%s: sane_start = %d num_of_entries = %d " 4088 "capture_mask = 0x%x dump_size = %d \n", 4089 __func__, sane_start, num_of_entries, capture_mask, dump_size)); 4090 4091 for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) { 4092 4093 /* 4094 * If the capture_mask of the entry does not match capture mask 4095 * skip the entry after marking the driver_flags indicator. 4096 */ 4097 4098 if (!(entry->hdr.entry_capture_mask & capture_mask)) { 4099 4100 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4101 entry = (ql_minidump_entry_t *) ((char *) entry 4102 + entry->hdr.entry_size); 4103 continue; 4104 } 4105 4106 /* 4107 * This is ONLY needed in implementations where 4108 * the capture buffer allocated is too small to capture 4109 * all of the required entries for a given capture mask. 4110 * We need to empty the buffer contents to a file 4111 * if possible, before processing the next entry 4112 * If the buff_full_flag is set, no further capture will happen 4113 * and all remaining non-control entries will be skipped. 4114 */ 4115 if (entry->hdr.entry_capture_size != 0) { 4116 if ((buff_level + entry->hdr.entry_capture_size) > 4117 dump_size) { 4118 /* Try to recover by emptying buffer to file */ 4119 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4120 entry = (ql_minidump_entry_t *) ((char *) entry 4121 + entry->hdr.entry_size); 4122 continue; 4123 } 4124 } 4125 4126 /* 4127 * Decode the entry type and process it accordingly 4128 */ 4129 4130 switch (entry->hdr.entry_type) { 4131 case RDNOP: 4132 break; 4133 4134 case RDEND: 4135 if (sane_end == 0) { 4136 end_cnt = e_cnt; 4137 } 4138 sane_end++; 4139 break; 4140 4141 case RDCRB: 4142 dbuff = dump_buff + buff_level; 4143 esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff); 4144 ql_entry_err_chk(entry, esize); 4145 buff_level += esize; 4146 break; 4147 4148 case POLLRD: 4149 dbuff = dump_buff + buff_level; 4150 esize = ql_pollrd(ha, (void *)entry, (void *)dbuff); 4151 ql_entry_err_chk(entry, esize); 4152 buff_level += esize; 4153 break; 4154 4155 case POLLRDMWR: 4156 dbuff = dump_buff + buff_level; 4157 esize = ql_pollrd_modify_write(ha, (void *)entry, 4158 (void *)dbuff); 4159 ql_entry_err_chk(entry, esize); 4160 buff_level += esize; 4161 break; 4162 4163 case L2ITG: 4164 case L2DTG: 4165 case L2DAT: 4166 case L2INS: 4167 dbuff = dump_buff + buff_level; 4168 esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff); 4169 if (esize == -1) { 4170 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4171 } else { 4172 ql_entry_err_chk(entry, esize); 4173 buff_level += esize; 4174 } 4175 break; 4176 4177 case L1DAT: 4178 case L1INS: 4179 dbuff = dump_buff + buff_level; 4180 esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff); 4181 ql_entry_err_chk(entry, esize); 4182 buff_level += esize; 4183 break; 4184 4185 case RDOCM: 4186 dbuff = dump_buff + buff_level; 4187 esize = ql_rdocm(ha, (void *)entry, (void *)dbuff); 4188 ql_entry_err_chk(entry, esize); 4189 buff_level += esize; 4190 break; 4191 4192 case RDMEM: 4193 dbuff = dump_buff + buff_level; 4194 esize = ql_rdmem(ha, (void *)entry, (void *)dbuff); 4195 ql_entry_err_chk(entry, esize); 4196 buff_level += esize; 4197 break; 4198 4199 case BOARD: 4200 case RDROM: 4201 dbuff = dump_buff + buff_level; 4202 esize = ql_rdrom(ha, (void *)entry, (void *)dbuff); 4203 ql_entry_err_chk(entry, esize); 4204 buff_level += esize; 4205 break; 4206 4207 case RDMUX: 4208 dbuff = dump_buff + buff_level; 4209 esize = ql_rdmux(ha, (void *)entry, (void *)dbuff); 4210 ql_entry_err_chk(entry, esize); 4211 buff_level += esize; 4212 break; 4213 4214 case RDMUX2: 4215 dbuff = dump_buff + buff_level; 4216 esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff); 4217 ql_entry_err_chk(entry, esize); 4218 buff_level += esize; 4219 break; 4220 4221 case QUEUE: 4222 dbuff = dump_buff + buff_level; 4223 esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff); 4224 ql_entry_err_chk(entry, esize); 4225 buff_level += esize; 4226 break; 4227 4228 case CNTRL: 4229 if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) { 4230 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4231 } 4232 break; 4233 default: 4234 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4235 break; 4236 } 4237 /* next entry in the template */ 4238 entry = (ql_minidump_entry_t *) ((char *) entry 4239 + entry->hdr.entry_size); 4240 } 4241 4242 if (!sane_start || (sane_end > 1)) { 4243 device_printf(ha->pci_dev, 4244 "\n%s: Template configuration error. Check Template\n", 4245 __func__); 4246 } 4247 4248 QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n", 4249 __func__, template_hdr->num_of_entries)); 4250 4251 return 0; 4252 } 4253 4254 /* 4255 * Read CRB operation. 4256 */ 4257 static uint32_t 4258 ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry, 4259 uint32_t * data_buff) 4260 { 4261 int loop_cnt; 4262 int ret; 4263 uint32_t op_count, addr, stride, value = 0; 4264 4265 addr = crb_entry->addr; 4266 op_count = crb_entry->op_count; 4267 stride = crb_entry->addr_stride; 4268 4269 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { 4270 4271 ret = ql_rdwr_indreg32(ha, addr, &value, 1); 4272 4273 if (ret) 4274 return (0); 4275 4276 *data_buff++ = addr; 4277 *data_buff++ = value; 4278 addr = addr + stride; 4279 } 4280 4281 /* 4282 * for testing purpose we return amount of data written 4283 */ 4284 return (op_count * (2 * sizeof(uint32_t))); 4285 } 4286 4287 /* 4288 * Handle L2 Cache. 4289 */ 4290 4291 static uint32_t 4292 ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, 4293 uint32_t * data_buff) 4294 { 4295 int i, k; 4296 int loop_cnt; 4297 int ret; 4298 4299 uint32_t read_value; 4300 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w; 4301 uint32_t tag_value, read_cnt; 4302 volatile uint8_t cntl_value_r; 4303 long timeout; 4304 uint32_t data; 4305 4306 loop_cnt = cacheEntry->op_count; 4307 4308 read_addr = cacheEntry->read_addr; 4309 cntrl_addr = cacheEntry->control_addr; 4310 cntl_value_w = (uint32_t) cacheEntry->write_value; 4311 4312 tag_reg_addr = cacheEntry->tag_reg_addr; 4313 4314 tag_value = cacheEntry->init_tag_value; 4315 read_cnt = cacheEntry->read_addr_cnt; 4316 4317 for (i = 0; i < loop_cnt; i++) { 4318 4319 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); 4320 if (ret) 4321 return (0); 4322 4323 if (cacheEntry->write_value != 0) { 4324 4325 ret = ql_rdwr_indreg32(ha, cntrl_addr, 4326 &cntl_value_w, 0); 4327 if (ret) 4328 return (0); 4329 } 4330 4331 if (cacheEntry->poll_mask != 0) { 4332 4333 timeout = cacheEntry->poll_wait; 4334 4335 ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1); 4336 if (ret) 4337 return (0); 4338 4339 cntl_value_r = (uint8_t)data; 4340 4341 while ((cntl_value_r & cacheEntry->poll_mask) != 0) { 4342 4343 if (timeout) { 4344 qla_mdelay(__func__, 1); 4345 timeout--; 4346 } else 4347 break; 4348 4349 ret = ql_rdwr_indreg32(ha, cntrl_addr, 4350 &data, 1); 4351 if (ret) 4352 return (0); 4353 4354 cntl_value_r = (uint8_t)data; 4355 } 4356 if (!timeout) { 4357 /* Report timeout error. 4358 * core dump capture failed 4359 * Skip remaining entries. 4360 * Write buffer out to file 4361 * Use driver specific fields in template header 4362 * to report this error. 4363 */ 4364 return (-1); 4365 } 4366 } 4367 4368 addr = read_addr; 4369 for (k = 0; k < read_cnt; k++) { 4370 4371 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4372 if (ret) 4373 return (0); 4374 4375 *data_buff++ = read_value; 4376 addr += cacheEntry->read_addr_stride; 4377 } 4378 4379 tag_value += cacheEntry->tag_value_stride; 4380 } 4381 4382 return (read_cnt * loop_cnt * sizeof(uint32_t)); 4383 } 4384 4385 /* 4386 * Handle L1 Cache. 4387 */ 4388 4389 static uint32_t 4390 ql_L1Cache(qla_host_t *ha, 4391 ql_minidump_entry_cache_t *cacheEntry, 4392 uint32_t *data_buff) 4393 { 4394 int ret; 4395 int i, k; 4396 int loop_cnt; 4397 4398 uint32_t read_value; 4399 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr; 4400 uint32_t tag_value, read_cnt; 4401 uint32_t cntl_value_w; 4402 4403 loop_cnt = cacheEntry->op_count; 4404 4405 read_addr = cacheEntry->read_addr; 4406 cntrl_addr = cacheEntry->control_addr; 4407 cntl_value_w = (uint32_t) cacheEntry->write_value; 4408 4409 tag_reg_addr = cacheEntry->tag_reg_addr; 4410 4411 tag_value = cacheEntry->init_tag_value; 4412 read_cnt = cacheEntry->read_addr_cnt; 4413 4414 for (i = 0; i < loop_cnt; i++) { 4415 4416 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); 4417 if (ret) 4418 return (0); 4419 4420 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0); 4421 if (ret) 4422 return (0); 4423 4424 addr = read_addr; 4425 for (k = 0; k < read_cnt; k++) { 4426 4427 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4428 if (ret) 4429 return (0); 4430 4431 *data_buff++ = read_value; 4432 addr += cacheEntry->read_addr_stride; 4433 } 4434 4435 tag_value += cacheEntry->tag_value_stride; 4436 } 4437 4438 return (read_cnt * loop_cnt * sizeof(uint32_t)); 4439 } 4440 4441 /* 4442 * Reading OCM memory 4443 */ 4444 4445 static uint32_t 4446 ql_rdocm(qla_host_t *ha, 4447 ql_minidump_entry_rdocm_t *ocmEntry, 4448 uint32_t *data_buff) 4449 { 4450 int i, loop_cnt; 4451 volatile uint32_t addr; 4452 volatile uint32_t value; 4453 4454 addr = ocmEntry->read_addr; 4455 loop_cnt = ocmEntry->op_count; 4456 4457 for (i = 0; i < loop_cnt; i++) { 4458 value = READ_REG32(ha, addr); 4459 *data_buff++ = value; 4460 addr += ocmEntry->read_addr_stride; 4461 } 4462 return (loop_cnt * sizeof(value)); 4463 } 4464 4465 /* 4466 * Read memory 4467 */ 4468 4469 static uint32_t 4470 ql_rdmem(qla_host_t *ha, 4471 ql_minidump_entry_rdmem_t *mem_entry, 4472 uint32_t *data_buff) 4473 { 4474 int ret; 4475 int i, loop_cnt; 4476 volatile uint32_t addr; 4477 q80_offchip_mem_val_t val; 4478 4479 addr = mem_entry->read_addr; 4480 4481 /* size in bytes / 16 */ 4482 loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4); 4483 4484 for (i = 0; i < loop_cnt; i++) { 4485 4486 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1); 4487 if (ret) 4488 return (0); 4489 4490 *data_buff++ = val.data_lo; 4491 *data_buff++ = val.data_hi; 4492 *data_buff++ = val.data_ulo; 4493 *data_buff++ = val.data_uhi; 4494 4495 addr += (sizeof(uint32_t) * 4); 4496 } 4497 4498 return (loop_cnt * (sizeof(uint32_t) * 4)); 4499 } 4500 4501 /* 4502 * Read Rom 4503 */ 4504 4505 static uint32_t 4506 ql_rdrom(qla_host_t *ha, 4507 ql_minidump_entry_rdrom_t *romEntry, 4508 uint32_t *data_buff) 4509 { 4510 int ret; 4511 int i, loop_cnt; 4512 uint32_t addr; 4513 uint32_t value; 4514 4515 addr = romEntry->read_addr; 4516 loop_cnt = romEntry->read_data_size; /* This is size in bytes */ 4517 loop_cnt /= sizeof(value); 4518 4519 for (i = 0; i < loop_cnt; i++) { 4520 4521 ret = ql_rd_flash32(ha, addr, &value); 4522 if (ret) 4523 return (0); 4524 4525 *data_buff++ = value; 4526 addr += sizeof(value); 4527 } 4528 4529 return (loop_cnt * sizeof(value)); 4530 } 4531 4532 /* 4533 * Read MUX data 4534 */ 4535 4536 static uint32_t 4537 ql_rdmux(qla_host_t *ha, 4538 ql_minidump_entry_mux_t *muxEntry, 4539 uint32_t *data_buff) 4540 { 4541 int ret; 4542 int loop_cnt; 4543 uint32_t read_value, sel_value; 4544 uint32_t read_addr, select_addr; 4545 4546 select_addr = muxEntry->select_addr; 4547 sel_value = muxEntry->select_value; 4548 read_addr = muxEntry->read_addr; 4549 4550 for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) { 4551 4552 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0); 4553 if (ret) 4554 return (0); 4555 4556 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4557 if (ret) 4558 return (0); 4559 4560 *data_buff++ = sel_value; 4561 *data_buff++ = read_value; 4562 4563 sel_value += muxEntry->select_value_stride; 4564 } 4565 4566 return (loop_cnt * (2 * sizeof(uint32_t))); 4567 } 4568 4569 static uint32_t 4570 ql_rdmux2(qla_host_t *ha, 4571 ql_minidump_entry_mux2_t *muxEntry, 4572 uint32_t *data_buff) 4573 { 4574 int ret; 4575 int loop_cnt; 4576 4577 uint32_t select_addr_1, select_addr_2; 4578 uint32_t select_value_1, select_value_2; 4579 uint32_t select_value_count, select_value_mask; 4580 uint32_t read_addr, read_value; 4581 4582 select_addr_1 = muxEntry->select_addr_1; 4583 select_addr_2 = muxEntry->select_addr_2; 4584 select_value_1 = muxEntry->select_value_1; 4585 select_value_2 = muxEntry->select_value_2; 4586 select_value_count = muxEntry->select_value_count; 4587 select_value_mask = muxEntry->select_value_mask; 4588 4589 read_addr = muxEntry->read_addr; 4590 4591 for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count; 4592 loop_cnt++) { 4593 4594 uint32_t temp_sel_val; 4595 4596 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0); 4597 if (ret) 4598 return (0); 4599 4600 temp_sel_val = select_value_1 & select_value_mask; 4601 4602 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); 4603 if (ret) 4604 return (0); 4605 4606 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4607 if (ret) 4608 return (0); 4609 4610 *data_buff++ = temp_sel_val; 4611 *data_buff++ = read_value; 4612 4613 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0); 4614 if (ret) 4615 return (0); 4616 4617 temp_sel_val = select_value_2 & select_value_mask; 4618 4619 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); 4620 if (ret) 4621 return (0); 4622 4623 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4624 if (ret) 4625 return (0); 4626 4627 *data_buff++ = temp_sel_val; 4628 *data_buff++ = read_value; 4629 4630 select_value_1 += muxEntry->select_value_stride; 4631 select_value_2 += muxEntry->select_value_stride; 4632 } 4633 4634 return (loop_cnt * (4 * sizeof(uint32_t))); 4635 } 4636 4637 /* 4638 * Handling Queue State Reads. 4639 */ 4640 4641 static uint32_t 4642 ql_rdqueue(qla_host_t *ha, 4643 ql_minidump_entry_queue_t *queueEntry, 4644 uint32_t *data_buff) 4645 { 4646 int ret; 4647 int loop_cnt, k; 4648 uint32_t read_value; 4649 uint32_t read_addr, read_stride, select_addr; 4650 uint32_t queue_id, read_cnt; 4651 4652 read_cnt = queueEntry->read_addr_cnt; 4653 read_stride = queueEntry->read_addr_stride; 4654 select_addr = queueEntry->select_addr; 4655 4656 for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count; 4657 loop_cnt++) { 4658 4659 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0); 4660 if (ret) 4661 return (0); 4662 4663 read_addr = queueEntry->read_addr; 4664 4665 for (k = 0; k < read_cnt; k++) { 4666 4667 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4668 if (ret) 4669 return (0); 4670 4671 *data_buff++ = read_value; 4672 read_addr += read_stride; 4673 } 4674 4675 queue_id += queueEntry->queue_id_stride; 4676 } 4677 4678 return (loop_cnt * (read_cnt * sizeof(uint32_t))); 4679 } 4680 4681 /* 4682 * Handling control entries. 4683 */ 4684 4685 static uint32_t 4686 ql_cntrl(qla_host_t *ha, 4687 ql_minidump_template_hdr_t *template_hdr, 4688 ql_minidump_entry_cntrl_t *crbEntry) 4689 { 4690 int ret; 4691 int count; 4692 uint32_t opcode, read_value, addr, entry_addr; 4693 long timeout; 4694 4695 entry_addr = crbEntry->addr; 4696 4697 for (count = 0; count < crbEntry->op_count; count++) { 4698 opcode = crbEntry->opcode; 4699 4700 if (opcode & QL_DBG_OPCODE_WR) { 4701 4702 ret = ql_rdwr_indreg32(ha, entry_addr, 4703 &crbEntry->value_1, 0); 4704 if (ret) 4705 return (0); 4706 4707 opcode &= ~QL_DBG_OPCODE_WR; 4708 } 4709 4710 if (opcode & QL_DBG_OPCODE_RW) { 4711 4712 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 4713 if (ret) 4714 return (0); 4715 4716 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 4717 if (ret) 4718 return (0); 4719 4720 opcode &= ~QL_DBG_OPCODE_RW; 4721 } 4722 4723 if (opcode & QL_DBG_OPCODE_AND) { 4724 4725 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 4726 if (ret) 4727 return (0); 4728 4729 read_value &= crbEntry->value_2; 4730 opcode &= ~QL_DBG_OPCODE_AND; 4731 4732 if (opcode & QL_DBG_OPCODE_OR) { 4733 read_value |= crbEntry->value_3; 4734 opcode &= ~QL_DBG_OPCODE_OR; 4735 } 4736 4737 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 4738 if (ret) 4739 return (0); 4740 } 4741 4742 if (opcode & QL_DBG_OPCODE_OR) { 4743 4744 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 4745 if (ret) 4746 return (0); 4747 4748 read_value |= crbEntry->value_3; 4749 4750 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 4751 if (ret) 4752 return (0); 4753 4754 opcode &= ~QL_DBG_OPCODE_OR; 4755 } 4756 4757 if (opcode & QL_DBG_OPCODE_POLL) { 4758 4759 opcode &= ~QL_DBG_OPCODE_POLL; 4760 timeout = crbEntry->poll_timeout; 4761 addr = entry_addr; 4762 4763 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4764 if (ret) 4765 return (0); 4766 4767 while ((read_value & crbEntry->value_2) 4768 != crbEntry->value_1) { 4769 4770 if (timeout) { 4771 qla_mdelay(__func__, 1); 4772 timeout--; 4773 } else 4774 break; 4775 4776 ret = ql_rdwr_indreg32(ha, addr, 4777 &read_value, 1); 4778 if (ret) 4779 return (0); 4780 } 4781 4782 if (!timeout) { 4783 /* 4784 * Report timeout error. 4785 * core dump capture failed 4786 * Skip remaining entries. 4787 * Write buffer out to file 4788 * Use driver specific fields in template header 4789 * to report this error. 4790 */ 4791 return (-1); 4792 } 4793 } 4794 4795 if (opcode & QL_DBG_OPCODE_RDSTATE) { 4796 /* 4797 * decide which address to use. 4798 */ 4799 if (crbEntry->state_index_a) { 4800 addr = template_hdr->saved_state_array[ 4801 crbEntry-> state_index_a]; 4802 } else { 4803 addr = entry_addr; 4804 } 4805 4806 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4807 if (ret) 4808 return (0); 4809 4810 template_hdr->saved_state_array[crbEntry->state_index_v] 4811 = read_value; 4812 opcode &= ~QL_DBG_OPCODE_RDSTATE; 4813 } 4814 4815 if (opcode & QL_DBG_OPCODE_WRSTATE) { 4816 /* 4817 * decide which value to use. 4818 */ 4819 if (crbEntry->state_index_v) { 4820 read_value = template_hdr->saved_state_array[ 4821 crbEntry->state_index_v]; 4822 } else { 4823 read_value = crbEntry->value_1; 4824 } 4825 /* 4826 * decide which address to use. 4827 */ 4828 if (crbEntry->state_index_a) { 4829 addr = template_hdr->saved_state_array[ 4830 crbEntry-> state_index_a]; 4831 } else { 4832 addr = entry_addr; 4833 } 4834 4835 ret = ql_rdwr_indreg32(ha, addr, &read_value, 0); 4836 if (ret) 4837 return (0); 4838 4839 opcode &= ~QL_DBG_OPCODE_WRSTATE; 4840 } 4841 4842 if (opcode & QL_DBG_OPCODE_MDSTATE) { 4843 /* Read value from saved state using index */ 4844 read_value = template_hdr->saved_state_array[ 4845 crbEntry->state_index_v]; 4846 4847 read_value <<= crbEntry->shl; /*Shift left operation */ 4848 read_value >>= crbEntry->shr; /*Shift right operation */ 4849 4850 if (crbEntry->value_2) { 4851 /* check if AND mask is provided */ 4852 read_value &= crbEntry->value_2; 4853 } 4854 4855 read_value |= crbEntry->value_3; /* OR operation */ 4856 read_value += crbEntry->value_1; /* increment op */ 4857 4858 /* Write value back to state area. */ 4859 4860 template_hdr->saved_state_array[crbEntry->state_index_v] 4861 = read_value; 4862 opcode &= ~QL_DBG_OPCODE_MDSTATE; 4863 } 4864 4865 entry_addr += crbEntry->addr_stride; 4866 } 4867 4868 return (0); 4869 } 4870 4871 /* 4872 * Handling rd poll entry. 4873 */ 4874 4875 static uint32_t 4876 ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry, 4877 uint32_t *data_buff) 4878 { 4879 int ret; 4880 int loop_cnt; 4881 uint32_t op_count, select_addr, select_value_stride, select_value; 4882 uint32_t read_addr, poll, mask, data_size, data; 4883 uint32_t wait_count = 0; 4884 4885 select_addr = entry->select_addr; 4886 read_addr = entry->read_addr; 4887 select_value = entry->select_value; 4888 select_value_stride = entry->select_value_stride; 4889 op_count = entry->op_count; 4890 poll = entry->poll; 4891 mask = entry->mask; 4892 data_size = entry->data_size; 4893 4894 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { 4895 4896 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0); 4897 if (ret) 4898 return (0); 4899 4900 wait_count = 0; 4901 4902 while (wait_count < poll) { 4903 4904 uint32_t temp; 4905 4906 ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1); 4907 if (ret) 4908 return (0); 4909 4910 if ( (temp & mask) != 0 ) { 4911 break; 4912 } 4913 wait_count++; 4914 } 4915 4916 if (wait_count == poll) { 4917 device_printf(ha->pci_dev, 4918 "%s: Error in processing entry\n", __func__); 4919 device_printf(ha->pci_dev, 4920 "%s: wait_count <0x%x> poll <0x%x>\n", 4921 __func__, wait_count, poll); 4922 return 0; 4923 } 4924 4925 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1); 4926 if (ret) 4927 return (0); 4928 4929 *data_buff++ = select_value; 4930 *data_buff++ = data; 4931 select_value = select_value + select_value_stride; 4932 } 4933 4934 /* 4935 * for testing purpose we return amount of data written 4936 */ 4937 return (loop_cnt * (2 * sizeof(uint32_t))); 4938 } 4939 4940 4941 /* 4942 * Handling rd modify write poll entry. 4943 */ 4944 4945 static uint32_t 4946 ql_pollrd_modify_write(qla_host_t *ha, 4947 ql_minidump_entry_rd_modify_wr_with_poll_t *entry, 4948 uint32_t *data_buff) 4949 { 4950 int ret; 4951 uint32_t addr_1, addr_2, value_1, value_2, data; 4952 uint32_t poll, mask, data_size, modify_mask; 4953 uint32_t wait_count = 0; 4954 4955 addr_1 = entry->addr_1; 4956 addr_2 = entry->addr_2; 4957 value_1 = entry->value_1; 4958 value_2 = entry->value_2; 4959 4960 poll = entry->poll; 4961 mask = entry->mask; 4962 modify_mask = entry->modify_mask; 4963 data_size = entry->data_size; 4964 4965 4966 ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0); 4967 if (ret) 4968 return (0); 4969 4970 wait_count = 0; 4971 while (wait_count < poll) { 4972 4973 uint32_t temp; 4974 4975 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); 4976 if (ret) 4977 return (0); 4978 4979 if ( (temp & mask) != 0 ) { 4980 break; 4981 } 4982 wait_count++; 4983 } 4984 4985 if (wait_count == poll) { 4986 device_printf(ha->pci_dev, "%s Error in processing entry\n", 4987 __func__); 4988 } else { 4989 4990 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1); 4991 if (ret) 4992 return (0); 4993 4994 data = (data & modify_mask); 4995 4996 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0); 4997 if (ret) 4998 return (0); 4999 5000 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0); 5001 if (ret) 5002 return (0); 5003 5004 /* Poll again */ 5005 wait_count = 0; 5006 while (wait_count < poll) { 5007 5008 uint32_t temp; 5009 5010 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); 5011 if (ret) 5012 return (0); 5013 5014 if ( (temp & mask) != 0 ) { 5015 break; 5016 } 5017 wait_count++; 5018 } 5019 *data_buff++ = addr_2; 5020 *data_buff++ = data; 5021 } 5022 5023 /* 5024 * for testing purpose we return amount of data written 5025 */ 5026 return (2 * sizeof(uint32_t)); 5027 } 5028 5029 5030