Lines Matching +full:mbox +full:-
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2013-2014 Qlogic Corporation
96 return (ha->num_rx_rings); in qls_get_msix_count()
107 if (err || !req->newptr) in qls_syctl_mpi_dump()
125 if (err || !req->newptr) in qls_syctl_link_status()
141 dev = ha->pci_dev; in qls_hw_add_sysctls()
143 ha->num_rx_rings = MAX_RX_RINGS; ha->num_tx_rings = MAX_TX_RINGS; in qls_hw_add_sysctls()
147 OID_AUTO, "num_rx_rings", CTLFLAG_RD, &ha->num_rx_rings, in qls_hw_add_sysctls()
148 ha->num_rx_rings, "Number of Completion Queues"); in qls_hw_add_sysctls()
152 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->num_tx_rings, in qls_hw_add_sysctls()
153 ha->num_tx_rings, "Number of Transmit Rings"); in qls_hw_add_sysctls()
190 return (-1); in qls_alloc_dma()
194 return (-1); in qls_alloc_dma()
200 return (-1); in qls_alloc_dma()
207 return (-1); in qls_alloc_dma()
219 while (count--) { in qls_wait_for_mac_proto_idx_ready()
227 ha->qla_initiate_recovery = 1; in qls_wait_for_mac_proto_idx_ready()
228 return (-1); in qls_wait_for_mac_proto_idx_ready()
245 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); in qls_config_unicast_mac_addr()
246 return(-1); in qls_config_unicast_mac_addr()
250 mac_upper = (ha->mac_addr[0] << 8) | ha->mac_addr[1]; in qls_config_unicast_mac_addr()
251 mac_lower = (ha->mac_addr[2] << 24) | (ha->mac_addr[3] << 16) | in qls_config_unicast_mac_addr()
252 (ha->mac_addr[4] << 8) | ha->mac_addr[5]; in qls_config_unicast_mac_addr()
258 index = 128 * (ha->pci_func & 0x1); /* index */ in qls_config_unicast_mac_addr()
286 ((ha->pci_func & 0x1) << Q81_CAM_MAC_OFF2_FUNC_SHIFT) | in qls_config_unicast_mac_addr()
311 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); in qls_config_mcast_mac_addr()
312 return(-1); in qls_config_mcast_mac_addr()
358 while (count--) { in qls_wait_for_route_idx_ready()
366 ha->qla_initiate_recovery = 1; in qls_wait_for_route_idx_ready()
367 return (-1); in qls_wait_for_route_idx_ready()
378 device_printf(ha->pci_dev, "%s: [0x%08x, 0x%08x] failed\n", in qls_load_route_idx_reg()
397 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); in qls_load_route_idx_reg_locked()
398 return(-1); in qls_load_route_idx_reg_locked()
415 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); in qls_clear_routing_table()
416 return(-1); in qls_clear_routing_table()
478 return (-1); in qls_init_fw_routing_table()
482 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); in qls_init_fw_routing_table()
483 return(-1); in qls_init_fw_routing_table()
498 if (ha->num_rx_rings > 1 ) { in qls_init_fw_routing_table()
542 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { in qls_tx_tso_chksum()
544 etype = ntohs(eh->evl_proto); in qls_tx_tso_chksum()
547 etype = ntohs(eh->evl_encap_proto); in qls_tx_tso_chksum()
553 ip = (struct ip *)(mp->m_data + ehdrlen); in qls_tx_tso_chksum()
557 if (mp->m_len < (ehdrlen + ip_hlen)) { in qls_tx_tso_chksum()
561 tx_mac->opcode = Q81_IOCB_TX_TSO; in qls_tx_tso_chksum()
562 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV4 ; in qls_tx_tso_chksum()
564 tx_mac->phdr_offsets = ehdrlen; in qls_tx_tso_chksum()
566 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) << in qls_tx_tso_chksum()
569 ip->ip_sum = 0; in qls_tx_tso_chksum()
571 if (mp->m_pkthdr.csum_flags & CSUM_TSO) { in qls_tx_tso_chksum()
572 tx_mac->flags |= Q81_TX_TSO_FLAGS_LSO; in qls_tx_tso_chksum()
576 th->th_sum = in_pseudo(ip->ip_src.s_addr, in qls_tx_tso_chksum()
577 ip->ip_dst.s_addr, in qls_tx_tso_chksum()
579 tx_mac->mss = mp->m_pkthdr.tso_segsz; in qls_tx_tso_chksum()
580 tx_mac->phdr_length = ip_hlen + ehdrlen + in qls_tx_tso_chksum()
581 (th->th_off << 2); in qls_tx_tso_chksum()
584 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ; in qls_tx_tso_chksum()
586 if (ip->ip_p == IPPROTO_TCP) { in qls_tx_tso_chksum()
587 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC; in qls_tx_tso_chksum()
588 } else if (ip->ip_p == IPPROTO_UDP) { in qls_tx_tso_chksum()
589 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC; in qls_tx_tso_chksum()
596 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); in qls_tx_tso_chksum()
600 if (mp->m_len < (ehdrlen + ip_hlen)) { in qls_tx_tso_chksum()
606 tx_mac->opcode = Q81_IOCB_TX_TSO; in qls_tx_tso_chksum()
607 tx_mac->flags |= Q81_TX_TSO_FLAGS_IPV6 ; in qls_tx_tso_chksum()
608 tx_mac->vlan_off |= Q81_TX_TSO_VLAN_OFF_IC ; in qls_tx_tso_chksum()
610 tx_mac->phdr_offsets = ehdrlen; in qls_tx_tso_chksum()
611 tx_mac->phdr_offsets |= ((ehdrlen + ip_hlen) << in qls_tx_tso_chksum()
614 if (ip6->ip6_nxt == IPPROTO_TCP) { in qls_tx_tso_chksum()
615 tx_mac->flags |= Q81_TX_TSO_FLAGS_TC; in qls_tx_tso_chksum()
616 } else if (ip6->ip6_nxt == IPPROTO_UDP) { in qls_tx_tso_chksum()
617 tx_mac->flags |= Q81_TX_TSO_FLAGS_UC; in qls_tx_tso_chksum()
623 ret = -1; in qls_tx_tso_chksum()
629 return (-1); in qls_tx_tso_chksum()
639 txr_done = ha->tx_ring[txr_idx].txr_done; in qls_hw_tx_done()
640 txr_next = ha->tx_ring[txr_idx].txr_next; in qls_hw_tx_done()
643 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS; in qls_hw_tx_done()
645 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next; in qls_hw_tx_done()
647 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS + in qls_hw_tx_done()
648 txr_done - txr_next; in qls_hw_tx_done()
651 if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE) in qls_hw_tx_done()
652 return (-1); in qls_hw_tx_done()
675 dev = ha->pci_dev; in qls_hw_send()
677 total_length = mp->m_pkthdr.len; in qls_hw_send()
682 return (-1); in qls_hw_send()
685 if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) { in qls_hw_send()
689 ha->tx_ring[txr_idx].txr_free); in qls_hw_send()
690 return (-1); in qls_hw_send()
694 tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next]; in qls_hw_send()
698 if ((mp->m_pkthdr.csum_flags & in qls_hw_send()
704 if (mp->m_pkthdr.csum_flags & CSUM_TSO) in qls_hw_send()
705 ha->tx_ring[txr_idx].tx_tso_frames++; in qls_hw_send()
707 ha->tx_ring[txr_idx].tx_frames++; in qls_hw_send()
710 tx_mac->opcode = Q81_IOCB_TX_MAC; in qls_hw_send()
713 if (mp->m_flags & M_VLANTAG) { in qls_hw_send()
714 tx_mac->vlan_tci = mp->m_pkthdr.ether_vtag; in qls_hw_send()
715 tx_mac->vlan_off |= Q81_TX_MAC_VLAN_OFF_V; in qls_hw_send()
717 ha->tx_ring[txr_idx].tx_vlan_frames++; in qls_hw_send()
720 tx_mac->frame_length = total_length; in qls_hw_send()
722 tx_mac->tid_lo = txr_next; in qls_hw_send()
726 tx_mac->tid_lo)); in qls_hw_send()
729 tx_mac->txd[i].baddr = segs->ds_addr; in qls_hw_send()
730 tx_mac->txd[i].length = segs->ds_len; in qls_hw_send()
733 tx_mac->txd[(nsegs - 1)].flags = Q81_RXB_DESC_FLAGS_E; in qls_hw_send()
737 tx_mac->tid_lo)); in qls_hw_send()
739 tx_mac->txd[0].baddr = in qls_hw_send()
740 ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr; in qls_hw_send()
741 tx_mac->txd[0].length = in qls_hw_send()
743 tx_mac->txd[0].flags = Q81_RXB_DESC_FLAGS_C; in qls_hw_send()
745 tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr; in qls_hw_send()
748 tx_desc->baddr = segs->ds_addr; in qls_hw_send()
749 tx_desc->length = segs->ds_len; in qls_hw_send()
751 if (i == (nsegs -1)) in qls_hw_send()
752 tx_desc->flags = Q81_RXB_DESC_FLAGS_E; in qls_hw_send()
754 tx_desc->flags = 0; in qls_hw_send()
760 txr_next = (txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); in qls_hw_send()
761 ha->tx_ring[txr_idx].txr_next = txr_next; in qls_hw_send()
763 ha->tx_ring[txr_idx].txr_free--; in qls_hw_send()
782 if (ha->hw_init == 0) { in qls_del_hw_if()
787 for (i = 0; i < ha->num_tx_rings; i++) { in qls_del_hw_if()
790 for (i = 0; i < ha->num_rx_rings; i++) { in qls_del_hw_if()
794 for (i = 0; i < ha->num_rx_rings; i++) { in qls_del_hw_if()
795 Q81_DISABLE_INTR(ha, i); /* MSI-x i */ in qls_del_hw_if()
803 ha->flags.intr_enable = 0; in qls_del_hw_if()
813 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
823 QL_DPRINT2((ha->pci_dev, "%s:enter\n", __func__)); in qls_init_hw_if()
829 ha->vm_pgsize = 4096; in qls_init_hw_if()
841 /* Function Specific Control Register - Set Page Size and Enable NIC */ in qls_init_hw_if()
863 for (i = 0; i < ha->num_rx_rings; i++) { in qls_init_hw_if()
869 if (ha->num_rx_rings > 1 ) { in qls_init_hw_if()
877 for (i = 0; i < ha->num_tx_rings; i++) { in qls_init_hw_if()
901 ha->rev_id = READ_REG32(ha, Q81_CTL_REV_ID); in qls_init_hw_if()
917 ha->flags.intr_enable = 1; in qls_init_hw_if()
919 for (i = 0; i < ha->num_rx_rings; i++) { in qls_init_hw_if()
920 Q81_ENABLE_INTR(ha, i); /* MSI-x i */ in qls_init_hw_if()
923 ha->hw_init = 1; in qls_init_hw_if()
927 QL_DPRINT2((ha->pci_dev, "%s:rxr [0x%08x]\n", __func__, in qls_init_hw_if()
928 ha->rx_ring[0].cq_db_offset)); in qls_init_hw_if()
929 QL_DPRINT2((ha->pci_dev, "%s:txr [0x%08x]\n", __func__, in qls_init_hw_if()
930 ha->tx_ring[0].wq_db_offset)); in qls_init_hw_if()
932 for (i = 0; i < ha->num_rx_rings; i++) { in qls_init_hw_if()
934 Q81_WR_LBQ_PROD_IDX(i, ha->rx_ring[i].lbq_in); in qls_init_hw_if()
935 Q81_WR_SBQ_PROD_IDX(i, ha->rx_ring[i].sbq_in); in qls_init_hw_if()
937 QL_DPRINT2((ha->pci_dev, in qls_init_hw_if()
944 for (i = 0; i < ha->num_rx_rings; i++) { in qls_init_hw_if()
949 QL_DPRINT2((ha->pci_dev, "%s:exit\n", __func__)); in qls_init_hw_if()
959 while (count--) { in qls_wait_for_config_reg_bits()
967 ha->qla_initiate_recovery = 1; in qls_wait_for_config_reg_bits()
968 device_printf(ha->pci_dev, "%s: failed\n", __func__); in qls_wait_for_config_reg_bits()
969 return (-1); in qls_wait_for_config_reg_bits()
992 rss_icb = ha->rss_dma.dma_b; in qls_init_rss()
996 rss_icb->flags_base_cq_num = Q81_RSS_ICB_FLAGS_L4K | in qls_init_rss()
1001 rss_icb->mask = 0x3FF; in qls_init_rss()
1004 rss_icb->cq_id[i] = (i & (ha->num_rx_rings - 1)); in qls_init_rss()
1007 memcpy(rss_icb->ipv6_rss_hash_key, q81_hash_key, 40); in qls_init_rss()
1008 memcpy(rss_icb->ipv4_rss_hash_key, q81_hash_key, 16); in qls_init_rss()
1018 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); in qls_init_rss()
1022 value = (uint32_t)ha->rss_dma.dma_addr; in qls_init_rss()
1025 value = (uint32_t)(ha->rss_dma.dma_addr >> 32); in qls_init_rss()
1049 rxr = &ha->rx_ring[cid]; in qls_init_comp_queue()
1051 rxr->cq_db_offset = ha->vm_pgsize * (128 + cid); in qls_init_comp_queue()
1053 cq_icb = rxr->cq_icb_vaddr; in qls_init_comp_queue()
1057 cq_icb->msix_vector = cid; in qls_init_comp_queue()
1058 cq_icb->flags = Q81_CQ_ICB_FLAGS_LC | in qls_init_comp_queue()
1064 cq_icb->length_v = NUM_CQ_ENTRIES; in qls_init_comp_queue()
1066 cq_icb->cq_baddr_lo = (rxr->cq_base_paddr & 0xFFFFFFFF); in qls_init_comp_queue()
1067 cq_icb->cq_baddr_hi = (rxr->cq_base_paddr >> 32) & 0xFFFFFFFF; in qls_init_comp_queue()
1069 cq_icb->cqi_addr_lo = (rxr->cqi_paddr & 0xFFFFFFFF); in qls_init_comp_queue()
1070 cq_icb->cqi_addr_hi = (rxr->cqi_paddr >> 32) & 0xFFFFFFFF; in qls_init_comp_queue()
1072 cq_icb->pkt_idelay = 10; in qls_init_comp_queue()
1073 cq_icb->idelay = 100; in qls_init_comp_queue()
1075 cq_icb->lbq_baddr_lo = (rxr->lbq_addr_tbl_paddr & 0xFFFFFFFF); in qls_init_comp_queue()
1076 cq_icb->lbq_baddr_hi = (rxr->lbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF; in qls_init_comp_queue()
1078 cq_icb->lbq_bsize = QLA_LGB_SIZE; in qls_init_comp_queue()
1079 cq_icb->lbq_length = QLA_NUM_LGB_ENTRIES; in qls_init_comp_queue()
1081 cq_icb->sbq_baddr_lo = (rxr->sbq_addr_tbl_paddr & 0xFFFFFFFF); in qls_init_comp_queue()
1082 cq_icb->sbq_baddr_hi = (rxr->sbq_addr_tbl_paddr >> 32) & 0xFFFFFFFF; in qls_init_comp_queue()
1084 cq_icb->sbq_bsize = (uint16_t)ha->msize; in qls_init_comp_queue()
1085 cq_icb->sbq_length = QLA_NUM_SMB_ENTRIES; in qls_init_comp_queue()
1097 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); in qls_init_comp_queue()
1101 value = (uint32_t)rxr->cq_icb_paddr; in qls_init_comp_queue()
1104 value = (uint32_t)(rxr->cq_icb_paddr >> 32); in qls_init_comp_queue()
1116 rxr->cq_next = 0; in qls_init_comp_queue()
1117 rxr->lbq_next = rxr->lbq_free = 0; in qls_init_comp_queue()
1118 rxr->sbq_next = rxr->sbq_free = 0; in qls_init_comp_queue()
1119 rxr->rx_free = rxr->rx_next = 0; in qls_init_comp_queue()
1120 rxr->lbq_in = (QLA_NUM_LGB_ENTRIES - 1) & ~0xF; in qls_init_comp_queue()
1121 rxr->sbq_in = (QLA_NUM_SMB_ENTRIES - 1) & ~0xF; in qls_init_comp_queue()
1135 txr = &ha->tx_ring[wid]; in qls_init_work_queue()
1137 txr->wq_db_addr = (struct resource *)((uint8_t *)ha->pci_reg1 in qls_init_work_queue()
1138 + (ha->vm_pgsize * wid)); in qls_init_work_queue()
1140 txr->wq_db_offset = (ha->vm_pgsize * wid); in qls_init_work_queue()
1142 wq_icb = txr->wq_icb_vaddr; in qls_init_work_queue()
1145 wq_icb->length_v = NUM_TX_DESCRIPTORS | in qls_init_work_queue()
1148 wq_icb->flags = Q81_WQ_ICB_FLAGS_LO | Q81_WQ_ICB_FLAGS_LI | in qls_init_work_queue()
1151 wq_icb->wqcqid_rss = wid; in qls_init_work_queue()
1153 wq_icb->baddr_lo = txr->wq_paddr & 0xFFFFFFFF; in qls_init_work_queue()
1154 wq_icb->baddr_hi = (txr->wq_paddr >> 32)& 0xFFFFFFFF; in qls_init_work_queue()
1156 wq_icb->ci_addr_lo = txr->txr_cons_paddr & 0xFFFFFFFF; in qls_init_work_queue()
1157 wq_icb->ci_addr_hi = (txr->txr_cons_paddr >> 32)& 0xFFFFFFFF; in qls_init_work_queue()
1167 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); in qls_init_work_queue()
1171 value = (uint32_t)txr->wq_icb_paddr; in qls_init_work_queue()
1174 value = (uint32_t)(txr->wq_icb_paddr >> 32); in qls_init_work_queue()
1186 txr->txr_free = NUM_TX_DESCRIPTORS; in qls_init_work_queue()
1187 txr->txr_next = 0; in qls_init_work_queue()
1188 txr->txr_done = 0; in qls_init_work_queue()
1199 nmcast = ha->nmcast; in qls_hw_add_all_mcast()
1202 if ((ha->mcast[i].addr[0] != 0) || in qls_hw_add_all_mcast()
1203 (ha->mcast[i].addr[1] != 0) || in qls_hw_add_all_mcast()
1204 (ha->mcast[i].addr[2] != 0) || in qls_hw_add_all_mcast()
1205 (ha->mcast[i].addr[3] != 0) || in qls_hw_add_all_mcast()
1206 (ha->mcast[i].addr[4] != 0) || in qls_hw_add_all_mcast()
1207 (ha->mcast[i].addr[5] != 0)) { in qls_hw_add_all_mcast()
1208 if (qls_config_mcast_mac_addr(ha, ha->mcast[i].addr, in qls_hw_add_all_mcast()
1210 device_printf(ha->pci_dev, "%s: failed\n", in qls_hw_add_all_mcast()
1212 return (-1); in qls_hw_add_all_mcast()
1215 nmcast--; in qls_hw_add_all_mcast()
1227 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) in qls_hw_add_mcast()
1232 if ((ha->mcast[i].addr[0] == 0) && in qls_hw_add_mcast()
1233 (ha->mcast[i].addr[1] == 0) && in qls_hw_add_mcast()
1234 (ha->mcast[i].addr[2] == 0) && in qls_hw_add_mcast()
1235 (ha->mcast[i].addr[3] == 0) && in qls_hw_add_mcast()
1236 (ha->mcast[i].addr[4] == 0) && in qls_hw_add_mcast()
1237 (ha->mcast[i].addr[5] == 0)) { in qls_hw_add_mcast()
1239 return (-1); in qls_hw_add_mcast()
1241 bcopy(mta, ha->mcast[i].addr, Q8_MAC_ADDR_LEN); in qls_hw_add_mcast()
1242 ha->nmcast++; in qls_hw_add_mcast()
1256 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { in qls_hw_del_mcast()
1258 return (-1); in qls_hw_del_mcast()
1260 ha->mcast[i].addr[0] = 0; in qls_hw_del_mcast()
1261 ha->mcast[i].addr[1] = 0; in qls_hw_del_mcast()
1262 ha->mcast[i].addr[2] = 0; in qls_hw_del_mcast()
1263 ha->mcast[i].addr[3] = 0; in qls_hw_del_mcast()
1264 ha->mcast[i].addr[4] = 0; in qls_hw_del_mcast()
1265 ha->mcast[i].addr[5] = 0; in qls_hw_del_mcast()
1267 ha->nmcast--; in qls_hw_del_mcast()
1306 if (!(if_getdrvflags(ha->ifp) & IFF_DRV_RUNNING)) { in qls_update_link_state()
1307 ha->link_up = 0; in qls_update_link_state()
1312 prev_link_state = ha->link_up; in qls_update_link_state()
1314 if ((ha->pci_func & 0x1) == 0) in qls_update_link_state()
1315 ha->link_up = ((link_state & Q81_CTL_STATUS_PL0)? 1 : 0); in qls_update_link_state()
1317 ha->link_up = ((link_state & Q81_CTL_STATUS_PL1)? 1 : 0); in qls_update_link_state()
1319 if (prev_link_state != ha->link_up) { in qls_update_link_state()
1320 if (ha->link_up) { in qls_update_link_state()
1321 if_link_state_change(ha->ifp, LINK_STATE_UP); in qls_update_link_state()
1323 if_link_state_change(ha->ifp, LINK_STATE_DOWN); in qls_update_link_state()
1332 if (ha->tx_ring[r_idx].flags.wq_dma) { in qls_free_tx_ring_dma()
1333 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma); in qls_free_tx_ring_dma()
1334 ha->tx_ring[r_idx].flags.wq_dma = 0; in qls_free_tx_ring_dma()
1337 if (ha->tx_ring[r_idx].flags.privb_dma) { in qls_free_tx_ring_dma()
1338 qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma); in qls_free_tx_ring_dma()
1339 ha->tx_ring[r_idx].flags.privb_dma = 0; in qls_free_tx_ring_dma()
1350 for (i = 0; i < ha->num_tx_rings; i++) { in qls_free_tx_dma()
1354 txb = &ha->tx_ring[i].tx_buf[j]; in qls_free_tx_dma()
1356 if (txb->map) { in qls_free_tx_dma()
1357 bus_dmamap_destroy(ha->tx_tag, txb->map); in qls_free_tx_dma()
1362 if (ha->tx_tag != NULL) { in qls_free_tx_dma()
1363 bus_dma_tag_destroy(ha->tx_tag); in qls_free_tx_dma()
1364 ha->tx_tag = NULL; in qls_free_tx_dma()
1377 device_t dev = ha->pci_dev; in qls_alloc_tx_ring_dma()
1379 ha->tx_ring[ridx].wq_dma.alignment = 8; in qls_alloc_tx_ring_dma()
1380 ha->tx_ring[ridx].wq_dma.size = in qls_alloc_tx_ring_dma()
1383 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma); in qls_alloc_tx_ring_dma()
1389 ha->tx_ring[ridx].flags.wq_dma = 1; in qls_alloc_tx_ring_dma()
1391 ha->tx_ring[ridx].privb_dma.alignment = 8; in qls_alloc_tx_ring_dma()
1392 ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE; in qls_alloc_tx_ring_dma()
1394 ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma); in qls_alloc_tx_ring_dma()
1401 ha->tx_ring[ridx].flags.privb_dma = 1; in qls_alloc_tx_ring_dma()
1403 ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b; in qls_alloc_tx_ring_dma()
1404 ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr; in qls_alloc_tx_ring_dma()
1406 v_addr = ha->tx_ring[ridx].privb_dma.dma_b; in qls_alloc_tx_ring_dma()
1407 p_addr = ha->tx_ring[ridx].privb_dma.dma_addr; in qls_alloc_tx_ring_dma()
1409 ha->tx_ring[ridx].wq_icb_vaddr = v_addr; in qls_alloc_tx_ring_dma()
1410 ha->tx_ring[ridx].wq_icb_paddr = p_addr; in qls_alloc_tx_ring_dma()
1412 ha->tx_ring[ridx].txr_cons_vaddr = in qls_alloc_tx_ring_dma()
1414 ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1); in qls_alloc_tx_ring_dma()
1419 txb = ha->tx_ring[ridx].tx_buf; in qls_alloc_tx_ring_dma()
1451 &ha->tx_tag)) { in qls_alloc_tx_dma()
1452 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", in qls_alloc_tx_dma()
1457 for (i = 0; i < ha->num_tx_rings; i++) { in qls_alloc_tx_dma()
1466 txb = &ha->tx_ring[i].tx_buf[j]; in qls_alloc_tx_dma()
1468 ret = bus_dmamap_create(ha->tx_tag, in qls_alloc_tx_dma()
1469 BUS_DMA_NOWAIT, &txb->map); in qls_alloc_tx_dma()
1471 ha->err_tx_dmamap_create++; in qls_alloc_tx_dma()
1472 device_printf(ha->pci_dev, in qls_alloc_tx_dma()
1489 qls_free_dmabuf(ha, &ha->rss_dma); in qls_free_rss_dma()
1490 ha->flags.rss_dma = 0; in qls_free_rss_dma()
1498 ha->rss_dma.alignment = 4; in qls_alloc_rss_dma()
1499 ha->rss_dma.size = PAGE_SIZE; in qls_alloc_rss_dma()
1501 ret = qls_alloc_dmabuf(ha, &ha->rss_dma); in qls_alloc_rss_dma()
1504 device_printf(ha->pci_dev, "%s: failed\n", __func__); in qls_alloc_rss_dma()
1506 ha->flags.rss_dma = 1; in qls_alloc_rss_dma()
1514 qls_free_dmabuf(ha, &ha->mpi_dma); in qls_free_mpi_dma()
1515 ha->flags.mpi_dma = 0; in qls_free_mpi_dma()
1523 ha->mpi_dma.alignment = 4; in qls_alloc_mpi_dma()
1524 ha->mpi_dma.size = (0x4000 * 4); in qls_alloc_mpi_dma()
1526 ret = qls_alloc_dmabuf(ha, &ha->mpi_dma); in qls_alloc_mpi_dma()
1528 device_printf(ha->pci_dev, "%s: failed\n", __func__); in qls_alloc_mpi_dma()
1530 ha->flags.mpi_dma = 1; in qls_alloc_mpi_dma()
1538 if (ha->rx_ring[ridx].flags.cq_dma) { in qls_free_rx_ring_dma()
1539 qls_free_dmabuf(ha, &ha->rx_ring[ridx].cq_dma); in qls_free_rx_ring_dma()
1540 ha->rx_ring[ridx].flags.cq_dma = 0; in qls_free_rx_ring_dma()
1543 if (ha->rx_ring[ridx].flags.lbq_dma) { in qls_free_rx_ring_dma()
1544 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma); in qls_free_rx_ring_dma()
1545 ha->rx_ring[ridx].flags.lbq_dma = 0; in qls_free_rx_ring_dma()
1548 if (ha->rx_ring[ridx].flags.sbq_dma) { in qls_free_rx_ring_dma()
1549 qls_free_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma); in qls_free_rx_ring_dma()
1550 ha->rx_ring[ridx].flags.sbq_dma = 0; in qls_free_rx_ring_dma()
1553 if (ha->rx_ring[ridx].flags.lb_dma) { in qls_free_rx_ring_dma()
1554 qls_free_dmabuf(ha, &ha->rx_ring[ridx].lb_dma); in qls_free_rx_ring_dma()
1555 ha->rx_ring[ridx].flags.lb_dma = 0; in qls_free_rx_ring_dma()
1565 for (i = 0; i < ha->num_rx_rings; i++) { in qls_free_rx_dma()
1569 if (ha->rx_tag != NULL) { in qls_free_rx_dma()
1570 bus_dma_tag_destroy(ha->rx_tag); in qls_free_rx_dma()
1571 ha->rx_tag = NULL; in qls_free_rx_dma()
1584 device_t dev = ha->pci_dev; in qls_alloc_rx_ring_dma()
1586 ha->rx_ring[ridx].cq_dma.alignment = 128; in qls_alloc_rx_ring_dma()
1587 ha->rx_ring[ridx].cq_dma.size = in qls_alloc_rx_ring_dma()
1590 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].cq_dma); in qls_alloc_rx_ring_dma()
1596 ha->rx_ring[ridx].flags.cq_dma = 1; in qls_alloc_rx_ring_dma()
1598 ha->rx_ring[ridx].lbq_dma.alignment = 8; in qls_alloc_rx_ring_dma()
1599 ha->rx_ring[ridx].lbq_dma.size = QLA_LGBQ_AND_TABLE_SIZE; in qls_alloc_rx_ring_dma()
1601 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lbq_dma); in qls_alloc_rx_ring_dma()
1607 ha->rx_ring[ridx].flags.lbq_dma = 1; in qls_alloc_rx_ring_dma()
1609 ha->rx_ring[ridx].sbq_dma.alignment = 8; in qls_alloc_rx_ring_dma()
1610 ha->rx_ring[ridx].sbq_dma.size = QLA_SMBQ_AND_TABLE_SIZE; in qls_alloc_rx_ring_dma()
1612 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].sbq_dma); in qls_alloc_rx_ring_dma()
1618 ha->rx_ring[ridx].flags.sbq_dma = 1; in qls_alloc_rx_ring_dma()
1620 ha->rx_ring[ridx].lb_dma.alignment = 8; in qls_alloc_rx_ring_dma()
1621 ha->rx_ring[ridx].lb_dma.size = (QLA_LGB_SIZE * QLA_NUM_LGB_ENTRIES); in qls_alloc_rx_ring_dma()
1623 ret = qls_alloc_dmabuf(ha, &ha->rx_ring[ridx].lb_dma); in qls_alloc_rx_ring_dma()
1628 ha->rx_ring[ridx].flags.lb_dma = 1; in qls_alloc_rx_ring_dma()
1630 bzero(ha->rx_ring[ridx].cq_dma.dma_b, ha->rx_ring[ridx].cq_dma.size); in qls_alloc_rx_ring_dma()
1631 bzero(ha->rx_ring[ridx].lbq_dma.dma_b, ha->rx_ring[ridx].lbq_dma.size); in qls_alloc_rx_ring_dma()
1632 bzero(ha->rx_ring[ridx].sbq_dma.dma_b, ha->rx_ring[ridx].sbq_dma.size); in qls_alloc_rx_ring_dma()
1633 bzero(ha->rx_ring[ridx].lb_dma.dma_b, ha->rx_ring[ridx].lb_dma.size); in qls_alloc_rx_ring_dma()
1636 ha->rx_ring[ridx].cq_base_vaddr = ha->rx_ring[ridx].cq_dma.dma_b; in qls_alloc_rx_ring_dma()
1637 ha->rx_ring[ridx].cq_base_paddr = ha->rx_ring[ridx].cq_dma.dma_addr; in qls_alloc_rx_ring_dma()
1639 v_addr = ha->rx_ring[ridx].cq_dma.dma_b; in qls_alloc_rx_ring_dma()
1640 p_addr = ha->rx_ring[ridx].cq_dma.dma_addr; in qls_alloc_rx_ring_dma()
1646 ha->rx_ring[ridx].cq_icb_vaddr = v_addr; in qls_alloc_rx_ring_dma()
1647 ha->rx_ring[ridx].cq_icb_paddr = p_addr; in qls_alloc_rx_ring_dma()
1653 ha->rx_ring[ridx].cqi_vaddr = (uint32_t *)v_addr; in qls_alloc_rx_ring_dma()
1654 ha->rx_ring[ridx].cqi_paddr = p_addr; in qls_alloc_rx_ring_dma()
1656 v_addr = ha->rx_ring[ridx].lbq_dma.dma_b; in qls_alloc_rx_ring_dma()
1657 p_addr = ha->rx_ring[ridx].lbq_dma.dma_addr; in qls_alloc_rx_ring_dma()
1660 ha->rx_ring[ridx].lbq_addr_tbl_vaddr = v_addr; in qls_alloc_rx_ring_dma()
1661 ha->rx_ring[ridx].lbq_addr_tbl_paddr = p_addr; in qls_alloc_rx_ring_dma()
1664 ha->rx_ring[ridx].lbq_vaddr = v_addr + PAGE_SIZE; in qls_alloc_rx_ring_dma()
1665 ha->rx_ring[ridx].lbq_paddr = p_addr + PAGE_SIZE; in qls_alloc_rx_ring_dma()
1667 v_addr = ha->rx_ring[ridx].sbq_dma.dma_b; in qls_alloc_rx_ring_dma()
1668 p_addr = ha->rx_ring[ridx].sbq_dma.dma_addr; in qls_alloc_rx_ring_dma()
1671 ha->rx_ring[ridx].sbq_addr_tbl_vaddr = v_addr; in qls_alloc_rx_ring_dma()
1672 ha->rx_ring[ridx].sbq_addr_tbl_paddr = p_addr; in qls_alloc_rx_ring_dma()
1675 ha->rx_ring[ridx].sbq_vaddr = v_addr + PAGE_SIZE; in qls_alloc_rx_ring_dma()
1676 ha->rx_ring[ridx].sbq_paddr = p_addr + PAGE_SIZE; in qls_alloc_rx_ring_dma()
1678 ha->rx_ring[ridx].lb_vaddr = ha->rx_ring[ridx].lb_dma.dma_b; in qls_alloc_rx_ring_dma()
1679 ha->rx_ring[ridx].lb_paddr = ha->rx_ring[ridx].lb_dma.dma_addr; in qls_alloc_rx_ring_dma()
1683 p_addr = ha->rx_ring[ridx].lbq_paddr; in qls_alloc_rx_ring_dma()
1684 bq_e = ha->rx_ring[ridx].lbq_addr_tbl_vaddr; in qls_alloc_rx_ring_dma()
1686 bq_e->addr_lo = p_addr & 0xFFFFFFFF; in qls_alloc_rx_ring_dma()
1687 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF; in qls_alloc_rx_ring_dma()
1689 p_addr = ha->rx_ring[ridx].lb_paddr; in qls_alloc_rx_ring_dma()
1690 bq_e = ha->rx_ring[ridx].lbq_vaddr; in qls_alloc_rx_ring_dma()
1693 bq_e->addr_lo = p_addr & 0xFFFFFFFF; in qls_alloc_rx_ring_dma()
1694 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF; in qls_alloc_rx_ring_dma()
1702 p_addr = ha->rx_ring[ridx].sbq_paddr; in qls_alloc_rx_ring_dma()
1703 bq_e = ha->rx_ring[ridx].sbq_addr_tbl_vaddr; in qls_alloc_rx_ring_dma()
1706 bq_e->addr_lo = p_addr & 0xFFFFFFFF; in qls_alloc_rx_ring_dma()
1707 bq_e->addr_hi = (p_addr >> 32) & 0xFFFFFFFF; in qls_alloc_rx_ring_dma()
1734 &ha->rx_tag)) { in qls_alloc_rx_dma()
1735 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", in qls_alloc_rx_dma()
1741 for (i = 0; i < ha->num_rx_rings; i++) { in qls_alloc_rx_dma()
1759 while (count--) { in qls_wait_for_flash_ready()
1772 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__)); in qls_wait_for_flash_ready()
1774 return (-1); in qls_wait_for_flash_ready()
1810 if (bcmp(ha->flash.id, signature, 4)) { in qls_flash_validate()
1811 QL_DPRINT1((ha->pci_dev, "%s: invalid signature " in qls_flash_validate()
1812 "%x:%x:%x:%x %s\n", __func__, ha->flash.id[0], in qls_flash_validate()
1813 ha->flash.id[1], ha->flash.id[2], ha->flash.id[3], in qls_flash_validate()
1815 return(-1); in qls_flash_validate()
1818 data16 = (uint16_t *)&ha->flash; in qls_flash_validate()
1825 QL_DPRINT1((ha->pci_dev, "%s: invalid checksum\n", __func__)); in qls_flash_validate()
1826 return(-1); in qls_flash_validate()
1839 QL_DPRINT1((ha->pci_dev, "%s: semlock failed\n", __func__)); in qls_rd_nic_params()
1840 return(-1); in qls_rd_nic_params()
1843 if ((ha->pci_func & 0x1) == 0) in qls_rd_nic_params()
1848 qflash = (uint32_t *)&ha->flash; in qls_rd_nic_params()
1860 QL_DUMP_BUFFER8(ha, __func__, (&ha->flash), (sizeof (q81_flash_t))); in qls_rd_nic_params()
1867 bcopy(ha->flash.mac_addr0, ha->mac_addr, ETHER_ADDR_LEN); in qls_rd_nic_params()
1869 QL_DPRINT1((ha->pci_dev, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n", in qls_rd_nic_params()
1870 __func__, ha->mac_addr[0], ha->mac_addr[1], ha->mac_addr[2], in qls_rd_nic_params()
1871 ha->mac_addr[3], ha->mac_addr[4], ha->mac_addr[5])); in qls_rd_nic_params()
1886 while (count--) { in qls_sem_lock()
1897 ha->qla_initiate_recovery = 1; in qls_sem_lock()
1898 return (-1); in qls_sem_lock()
1913 while (count--) { in qls_wait_for_proc_addr_ready()
1926 QL_DPRINT1((ha->pci_dev, "%s: failed\n", __func__)); in qls_wait_for_proc_addr_ready()
1928 ha->qla_initiate_recovery = 1; in qls_wait_for_proc_addr_ready()
1929 return (-1); in qls_wait_for_proc_addr_ready()
1988 device_t dev = ha->pci_dev; in qls_hw_nic_reset()
1990 ha->hw_init = 0; in qls_hw_nic_reset()
1997 while (count--) { in qls_hw_nic_reset()
2006 return (-1); in qls_hw_nic_reset()
2014 device_t dev = ha->pci_dev; in qls_hw_reset()
2019 QL_DPRINT2((ha->pci_dev, "%s:enter[%d]\n", __func__, ha->hw_init)); in qls_hw_reset()
2021 if (ha->hw_init == 0) { in qls_hw_reset()
2038 while (count--) { in qls_hw_reset()
2050 while (count--) { in qls_hw_reset()
2105 if ((ha->pci_func & 0x1) == 0) in qls_mbx_rd_reg()
2120 if ((ha->pci_func & 0x1) == 0) in qls_mbx_wr_reg()
2134 int i, ret = -1; in qls_mbx_cmd()
2138 QL_DPRINT2((ha->pci_dev, "%s: enter[0x%08x 0x%08x 0x%08x]\n", in qls_mbx_cmd()
2144 device_printf(ha->pci_dev, "%s: cmd_status[0x%08x]\n", in qls_mbx_cmd()
2151 device_printf(ha->pci_dev, "%s: semlock failed\n", __func__); in qls_mbx_cmd()
2155 ha->mbx_done = 0; in qls_mbx_cmd()
2161 device_printf(ha->pci_dev, in qls_mbx_cmd()
2174 ret = -1; in qls_mbx_cmd()
2175 ha->mbx_done = 0; in qls_mbx_cmd()
2177 while (count--) { in qls_mbx_cmd()
2178 if (ha->flags.intr_enable == 0) { in qls_mbx_cmd()
2197 ha->pci_dev, in qls_mbx_cmd()
2214 if (ha->mbx_done) { in qls_mbx_cmd()
2216 out_mbx[i] = ha->mbox[i]; in qls_mbx_cmd()
2227 if (ha->flags.intr_enable == 0) { in qls_mbx_cmd()
2233 ha->qla_initiate_recovery = 1; in qls_mbx_cmd()
2236 QL_DPRINT2((ha->pci_dev, "%s: exit[%d]\n", __func__, ret)); in qls_mbx_cmd()
2243 uint32_t *mbox; in qls_mbx_set_mgmt_ctrl() local
2244 device_t dev = ha->pci_dev; in qls_mbx_set_mgmt_ctrl()
2246 mbox = ha->mbox; in qls_mbx_set_mgmt_ctrl()
2247 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS)); in qls_mbx_set_mgmt_ctrl()
2249 mbox[0] = Q81_MBX_SET_MGMT_CTL; in qls_mbx_set_mgmt_ctrl()
2250 mbox[1] = t_ctrl; in qls_mbx_set_mgmt_ctrl()
2252 if (qls_mbx_cmd(ha, mbox, 2, mbox, 1)) { in qls_mbx_set_mgmt_ctrl()
2254 return (-1); in qls_mbx_set_mgmt_ctrl()
2257 if ((mbox[0] == Q81_MBX_CMD_COMPLETE) || in qls_mbx_set_mgmt_ctrl()
2259 (mbox[0] == Q81_MBX_CMD_ERROR))){ in qls_mbx_set_mgmt_ctrl()
2262 device_printf(dev, "%s failed [0x%08x]\n", __func__, mbox[0]); in qls_mbx_set_mgmt_ctrl()
2263 return (-1); in qls_mbx_set_mgmt_ctrl()
2270 uint32_t *mbox; in qls_mbx_get_mgmt_ctrl() local
2271 device_t dev = ha->pci_dev; in qls_mbx_get_mgmt_ctrl()
2275 mbox = ha->mbox; in qls_mbx_get_mgmt_ctrl()
2276 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS)); in qls_mbx_get_mgmt_ctrl()
2278 mbox[0] = Q81_MBX_GET_MGMT_CTL; in qls_mbx_get_mgmt_ctrl()
2280 if (qls_mbx_cmd(ha, mbox, 1, mbox, 2)) { in qls_mbx_get_mgmt_ctrl()
2282 return (-1); in qls_mbx_get_mgmt_ctrl()
2285 *t_status = mbox[1]; in qls_mbx_get_mgmt_ctrl()
2293 uint32_t *mbox; in qls_mbx_get_link_status() local
2294 device_t dev = ha->pci_dev; in qls_mbx_get_link_status()
2296 mbox = ha->mbox; in qls_mbx_get_link_status()
2297 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS)); in qls_mbx_get_link_status()
2299 mbox[0] = Q81_MBX_GET_LNK_STATUS; in qls_mbx_get_link_status()
2301 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) { in qls_mbx_get_link_status()
2306 ha->link_status = mbox[1]; in qls_mbx_get_link_status()
2307 ha->link_down_info = mbox[2]; in qls_mbx_get_link_status()
2308 ha->link_hw_info = mbox[3]; in qls_mbx_get_link_status()
2309 ha->link_dcbx_counters = mbox[4]; in qls_mbx_get_link_status()
2310 ha->link_change_counters = mbox[5]; in qls_mbx_get_link_status()
2313 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]); in qls_mbx_get_link_status()
2321 uint32_t *mbox; in qls_mbx_about_fw() local
2322 device_t dev = ha->pci_dev; in qls_mbx_about_fw()
2324 mbox = ha->mbox; in qls_mbx_about_fw()
2325 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS)); in qls_mbx_about_fw()
2327 mbox[0] = Q81_MBX_ABOUT_FW; in qls_mbx_about_fw()
2329 if (qls_mbx_cmd(ha, mbox, 1, mbox, 6)) { in qls_mbx_about_fw()
2335 __func__, mbox[0],mbox[1],mbox[2],mbox[3],mbox[4],mbox[5]); in qls_mbx_about_fw()
2343 uint32_t *mbox; in qls_mbx_dump_risc_ram() local
2344 device_t dev = ha->pci_dev; in qls_mbx_dump_risc_ram()
2346 mbox = ha->mbox; in qls_mbx_dump_risc_ram()
2347 bzero(mbox, (sizeof (uint32_t) * Q81_NUM_MBX_REGISTERS)); in qls_mbx_dump_risc_ram()
2349 bzero(ha->mpi_dma.dma_b,(r_size << 2)); in qls_mbx_dump_risc_ram()
2350 b_paddr = ha->mpi_dma.dma_addr; in qls_mbx_dump_risc_ram()
2352 mbox[0] = Q81_MBX_DUMP_RISC_RAM; in qls_mbx_dump_risc_ram()
2353 mbox[1] = r_addr & 0xFFFF; in qls_mbx_dump_risc_ram()
2354 mbox[2] = ((uint32_t)(b_paddr >> 16)) & 0xFFFF; in qls_mbx_dump_risc_ram()
2355 mbox[3] = ((uint32_t)b_paddr) & 0xFFFF; in qls_mbx_dump_risc_ram()
2356 mbox[4] = (r_size >> 16) & 0xFFFF; in qls_mbx_dump_risc_ram()
2357 mbox[5] = r_size & 0xFFFF; in qls_mbx_dump_risc_ram()
2358 mbox[6] = ((uint32_t)(b_paddr >> 48)) & 0xFFFF; in qls_mbx_dump_risc_ram()
2359 mbox[7] = ((uint32_t)(b_paddr >> 32)) & 0xFFFF; in qls_mbx_dump_risc_ram()
2360 mbox[8] = (r_addr >> 16) & 0xFFFF; in qls_mbx_dump_risc_ram()
2362 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map, in qls_mbx_dump_risc_ram()
2365 if (qls_mbx_cmd(ha, mbox, 9, mbox, 1)) { in qls_mbx_dump_risc_ram()
2367 return (-1); in qls_mbx_dump_risc_ram()
2369 if (mbox[0] != 0x4000) { in qls_mbx_dump_risc_ram()
2370 device_printf(ha->pci_dev, "%s: failed!\n", __func__); in qls_mbx_dump_risc_ram()
2371 return (-1); in qls_mbx_dump_risc_ram()
2373 bus_dmamap_sync(ha->mpi_dma.dma_tag, ha->mpi_dma.dma_map, in qls_mbx_dump_risc_ram()
2375 bcopy(ha->mpi_dma.dma_b, buf, (r_size << 2)); in qls_mbx_dump_risc_ram()
2386 device_t dev = ha->pci_dev; in qls_mpi_reset()
2392 while (count--) { in qls_mpi_reset()
2403 return (-1); in qls_mpi_reset()