1 /* 2 * Copyright (c) 2013-2014 Qlogic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: qls_isr.c 30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 36 37 #include "qls_os.h" 38 #include "qls_hw.h" 39 #include "qls_def.h" 40 #include "qls_inline.h" 41 #include "qls_ver.h" 42 #include "qls_glbl.h" 43 #include "qls_dbg.h" 44 45 46 static void 47 qls_tx_comp(qla_host_t *ha, uint32_t txr_idx, q81_tx_mac_comp_t *tx_comp) 48 { 49 qla_tx_buf_t *txb; 50 uint32_t tx_idx = tx_comp->tid_lo; 51 52 if (tx_idx >= NUM_TX_DESCRIPTORS) { 53 ha->qla_initiate_recovery = 1; 54 return; 55 } 56 57 txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx]; 58 59 if (txb->m_head) { 60 ha->ifp->if_opackets++; 61 bus_dmamap_sync(ha->tx_tag, txb->map, 62 BUS_DMASYNC_POSTWRITE); 63 bus_dmamap_unload(ha->tx_tag, txb->map); 64 m_freem(txb->m_head); 65 66 txb->m_head = NULL; 67 } 68 69 ha->tx_ring[txr_idx].txr_done++; 70 71 if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS) 72 ha->tx_ring[txr_idx].txr_done = 0; 73 } 74 75 static void 76 qls_replenish_rx(qla_host_t *ha, uint32_t r_idx) 77 { 78 qla_rx_buf_t *rxb; 79 qla_rx_ring_t *rxr; 80 int count; 81 volatile q81_bq_addr_e_t *sbq_e; 82 83 rxr = &ha->rx_ring[r_idx]; 84 85 count = rxr->rx_free; 86 sbq_e = rxr->sbq_vaddr; 87 88 while (count--) { 89 90 rxb = &rxr->rx_buf[rxr->sbq_next]; 91 92 if (rxb->m_head == NULL) { 93 if (qls_get_mbuf(ha, rxb, NULL) != 0) { 94 device_printf(ha->pci_dev, 95 "%s: qls_get_mbuf [0,%d,%d] failed\n", 96 __func__, rxr->sbq_next, r_idx); 97 rxb->m_head = NULL; 98 break; 99 } 100 } 101 102 if (rxb->m_head != NULL) { 103 sbq_e[rxr->sbq_next].addr_lo = (uint32_t)rxb->paddr; 104 sbq_e[rxr->sbq_next].addr_hi = 105 (uint32_t)(rxb->paddr >> 32); 106 107 rxr->sbq_next++; 108 if (rxr->sbq_next == NUM_RX_DESCRIPTORS) 109 rxr->sbq_next = 0; 110 111 rxr->sbq_free++; 112 rxr->rx_free--; 113 } 114 115 if (rxr->sbq_free == 16) { 116 117 rxr->sbq_in += 16; 118 rxr->sbq_in = rxr->sbq_in & (NUM_RX_DESCRIPTORS - 1); 119 rxr->sbq_free = 0; 120 121 Q81_WR_SBQ_PROD_IDX(r_idx, (rxr->sbq_in)); 122 } 123 } 124 } 125 126 static int 127 qls_rx_comp(qla_host_t *ha, uint32_t rxr_idx, uint32_t cq_idx, q81_rx_t *cq_e) 128 { 129 qla_rx_buf_t *rxb; 130 qla_rx_ring_t *rxr; 131 device_t dev = ha->pci_dev; 132 struct mbuf *mp = NULL; 133 struct ifnet *ifp = ha->ifp; 134 struct lro_ctrl *lro; 135 struct ether_vlan_header *eh; 136 137 rxr = &ha->rx_ring[rxr_idx]; 138 139 lro = &rxr->lro; 140 141 rxb = &rxr->rx_buf[rxr->rx_next]; 142 143 if (!(cq_e->flags1 & Q81_RX_FLAGS1_DS)) { 144 device_printf(dev, "%s: DS bit not set \n", __func__); 145 return -1; 146 } 147 if (rxb->paddr != cq_e->b_paddr) { 148 149 device_printf(dev, 150 "%s: (rxb->paddr != cq_e->b_paddr)[%p, %p] \n", 151 __func__, (void *)rxb->paddr, (void *)cq_e->b_paddr); 152 153 Q81_SET_CQ_INVALID(cq_idx); 154 155 ha->qla_initiate_recovery = 1; 156 157 return(-1); 158 } 159 160 rxr->rx_int++; 161 162 if ((cq_e->flags1 & Q81_RX_FLAGS1_ERR_MASK) == 0) { 163 164 mp = rxb->m_head; 165 rxb->m_head = NULL; 166 167 if (mp == NULL) { 168 device_printf(dev, "%s: mp == NULL\n", __func__); 169 } else { 170 mp->m_flags |= M_PKTHDR; 171 mp->m_pkthdr.len = cq_e->length; 172 mp->m_pkthdr.rcvif = ifp; 173 mp->m_len = cq_e->length; 174 175 eh = mtod(mp, struct ether_vlan_header *); 176 177 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 178 uint32_t *data = (uint32_t *)eh; 179 180 mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); 181 mp->m_flags |= M_VLANTAG; 182 183 *(data + 3) = *(data + 2); 184 *(data + 2) = *(data + 1); 185 *(data + 1) = *data; 186 187 m_adj(mp, ETHER_VLAN_ENCAP_LEN); 188 } 189 190 if ((cq_e->flags1 & Q81_RX_FLAGS1_RSS_MATCH_MASK)) { 191 rxr->rss_int++; 192 mp->m_pkthdr.flowid = cq_e->rss; 193 mp->m_flags |= M_FLOWID; 194 } 195 if (cq_e->flags0 & (Q81_RX_FLAGS0_TE | 196 Q81_RX_FLAGS0_NU | Q81_RX_FLAGS0_IE)) { 197 mp->m_pkthdr.csum_flags = 0; 198 } else { 199 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED | 200 CSUM_IP_VALID | CSUM_DATA_VALID | 201 CSUM_PSEUDO_HDR; 202 mp->m_pkthdr.csum_data = 0xFFFF; 203 } 204 ifp->if_ipackets++; 205 206 if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) { 207 /* LRO packet has been successfuly queued */ 208 } else { 209 (*ifp->if_input)(ifp, mp); 210 } 211 } 212 } else { 213 device_printf(dev, "%s: err [0%08x]\n", __func__, cq_e->flags1); 214 } 215 216 rxr->rx_free++; 217 rxr->rx_next++; 218 219 if (rxr->rx_next == NUM_RX_DESCRIPTORS) 220 rxr->rx_next = 0; 221 222 if ((rxr->rx_free + rxr->sbq_free) >= 16) 223 qls_replenish_rx(ha, rxr_idx); 224 225 return 0; 226 } 227 228 static void 229 qls_cq_isr(qla_host_t *ha, uint32_t cq_idx) 230 { 231 q81_cq_e_t *cq_e, *cq_b; 232 uint32_t i, cq_comp_idx; 233 int ret = 0, tx_comp_done = 0; 234 struct lro_ctrl *lro; 235 struct lro_entry *queued; 236 237 cq_b = ha->rx_ring[cq_idx].cq_base_vaddr; 238 lro = &ha->rx_ring[cq_idx].lro; 239 240 cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr); 241 242 i = ha->rx_ring[cq_idx].cq_next; 243 244 while (i != cq_comp_idx) { 245 246 cq_e = &cq_b[i]; 247 248 switch (cq_e->opcode) { 249 250 case Q81_IOCB_TX_MAC: 251 case Q81_IOCB_TX_TSO: 252 qls_tx_comp(ha, cq_idx, (q81_tx_mac_comp_t *)cq_e); 253 tx_comp_done++; 254 break; 255 256 case Q81_IOCB_RX: 257 ret = qls_rx_comp(ha, cq_idx, i, (q81_rx_t *)cq_e); 258 259 break; 260 261 case Q81_IOCB_MPI: 262 case Q81_IOCB_SYS: 263 default: 264 device_printf(ha->pci_dev, "%s[%d %d 0x%x]: illegal \n", 265 __func__, i, (*(ha->rx_ring[cq_idx].cqi_vaddr)), 266 cq_e->opcode); 267 qls_dump_buf32(ha, __func__, cq_e, 268 (sizeof (q81_cq_e_t) >> 2)); 269 break; 270 } 271 272 i++; 273 if (i == NUM_CQ_ENTRIES) 274 i = 0; 275 276 if (ret) { 277 break; 278 } 279 280 if (i == cq_comp_idx) { 281 cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr); 282 } 283 284 if (tx_comp_done) { 285 taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 286 tx_comp_done = 0; 287 } 288 } 289 290 while((!SLIST_EMPTY(&lro->lro_active))) { 291 queued = SLIST_FIRST(&lro->lro_active); 292 SLIST_REMOVE_HEAD(&lro->lro_active, next); 293 tcp_lro_flush(lro, queued); 294 } 295 296 ha->rx_ring[cq_idx].cq_next = cq_comp_idx; 297 298 if (!ret) { 299 Q81_WR_CQ_CONS_IDX(cq_idx, (ha->rx_ring[cq_idx].cq_next)); 300 } 301 if (tx_comp_done) 302 taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 303 304 return; 305 } 306 307 static void 308 qls_mbx_isr(qla_host_t *ha) 309 { 310 uint32_t data; 311 int i; 312 device_t dev = ha->pci_dev; 313 314 if (qls_mbx_rd_reg(ha, 0, &data) == 0) { 315 316 if ((data & 0xF000) == 0x4000) { 317 ha->mbox[0] = data; 318 for (i = 1; i < Q81_NUM_MBX_REGISTERS; i++) { 319 if (qls_mbx_rd_reg(ha, i, &data)) 320 break; 321 ha->mbox[i] = data; 322 } 323 ha->mbx_done = 1; 324 } else if ((data & 0xF000) == 0x8000) { 325 326 /* we have an AEN */ 327 328 ha->aen[0] = data; 329 for (i = 1; i < Q81_NUM_AEN_REGISTERS; i++) { 330 if (qls_mbx_rd_reg(ha, i, &data)) 331 break; 332 ha->aen[i] = data; 333 } 334 device_printf(dev,"%s: AEN " 335 "[0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" 336 " 0x%08x 0x%08x 0x%08x 0x%08x]\n", 337 __func__, 338 ha->aen[0], ha->aen[1], ha->aen[2], 339 ha->aen[3], ha->aen[4], ha->aen[5], 340 ha->aen[6], ha->aen[7], ha->aen[8]); 341 342 switch ((ha->aen[0] & 0xFFFF)) { 343 344 case 0x8011: 345 ha->link_up = 1; 346 break; 347 348 case 0x8012: 349 ha->link_up = 0; 350 break; 351 352 case 0x8130: 353 ha->link_hw_info = ha->aen[1]; 354 break; 355 356 case 0x8131: 357 ha->link_hw_info = 0; 358 break; 359 360 } 361 } 362 } 363 WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_CLR_RTH_INTR); 364 365 return; 366 } 367 368 void 369 qls_isr(void *arg) 370 { 371 qla_ivec_t *ivec = arg; 372 qla_host_t *ha; 373 uint32_t status; 374 uint32_t cq_idx; 375 device_t dev; 376 377 ha = ivec->ha; 378 cq_idx = ivec->cq_idx; 379 dev = ha->pci_dev; 380 381 status = READ_REG32(ha, Q81_CTL_STATUS); 382 383 if (status & Q81_CTL_STATUS_FE) { 384 device_printf(dev, "%s fatal error\n", __func__); 385 return; 386 } 387 388 if ((cq_idx == 0) && (status & Q81_CTL_STATUS_PI)) { 389 qls_mbx_isr(ha); 390 } 391 392 status = READ_REG32(ha, Q81_CTL_INTR_STATUS1); 393 394 if (status & ( 0x1 << cq_idx)) 395 qls_cq_isr(ha, cq_idx); 396 397 Q81_ENABLE_INTR(ha, cq_idx); 398 399 return; 400 } 401 402