1 /* 2 * Copyright (c) 2012-2015 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * The views and conclusions contained in the software and documentation are 27 * those of the authors and should not be interpreted as representing official 28 * policies, either expressed or implied, of the FreeBSD Project. 29 */ 30 31 #include "efx.h" 32 #include "efx_impl.h" 33 #if EFSYS_OPT_MON_STATS 34 #include "mcdi_mon.h" 35 #endif 36 37 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD 38 39 #if EFSYS_OPT_QSTATS 40 #define EFX_EV_QSTAT_INCR(_eep, _stat) \ 41 do { \ 42 (_eep)->ee_stat[_stat]++; \ 43 _NOTE(CONSTANTCONDITION) \ 44 } while (B_FALSE) 45 #else 46 #define EFX_EV_QSTAT_INCR(_eep, _stat) 47 #endif 48 49 50 static __checkReturn boolean_t 51 ef10_ev_rx( 52 __in efx_evq_t *eep, 53 __in efx_qword_t *eqp, 54 __in const efx_ev_callbacks_t *eecp, 55 __in_opt void *arg); 56 57 static __checkReturn boolean_t 58 ef10_ev_tx( 59 __in efx_evq_t *eep, 60 __in efx_qword_t *eqp, 61 __in const efx_ev_callbacks_t *eecp, 62 __in_opt void *arg); 63 64 static __checkReturn boolean_t 65 ef10_ev_driver( 66 __in efx_evq_t *eep, 67 __in efx_qword_t *eqp, 68 __in const efx_ev_callbacks_t *eecp, 69 __in_opt void *arg); 70 71 static __checkReturn boolean_t 72 ef10_ev_drv_gen( 73 __in efx_evq_t *eep, 74 __in efx_qword_t *eqp, 75 __in const efx_ev_callbacks_t *eecp, 76 __in_opt void *arg); 77 78 static __checkReturn boolean_t 79 ef10_ev_mcdi( 80 __in efx_evq_t *eep, 81 __in efx_qword_t *eqp, 82 __in const efx_ev_callbacks_t *eecp, 83 __in_opt void *arg); 84 85 86 static __checkReturn efx_rc_t 87 efx_mcdi_init_evq( 88 __in efx_nic_t *enp, 89 __in unsigned int instance, 90 __in efsys_mem_t *esmp, 91 __in size_t nevs, 92 __in uint32_t irq, 93 __out_opt uint32_t *irqp) 94 { 95 efx_mcdi_req_t req; 96 uint8_t payload[ 97 MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), 98 MC_CMD_INIT_EVQ_OUT_LEN)]; 99 efx_qword_t *dma_addr; 100 uint64_t addr; 101 int npages; 102 int i; 103 int supports_rx_batching; 104 efx_rc_t rc; 105 106 npages = EFX_EVQ_NBUFS(nevs); 107 if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) { 108 rc = EINVAL; 109 goto fail1; 110 } 111 112 (void) memset(payload, 0, sizeof (payload)); 113 req.emr_cmd = MC_CMD_INIT_EVQ; 114 req.emr_in_buf = payload; 115 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); 116 req.emr_out_buf = payload; 117 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN; 118 119 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs); 120 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance); 121 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq); 122 123 /* 124 * On Huntington RX and TX event batching can only be requested 125 * together (even if the datapath firmware doesn't actually support RX 126 * batching). 127 * Cut through is incompatible with RX batching and so enabling cut 128 * through disables RX batching (but it does not affect TX batching). 129 * 130 * So always enable RX and TX event batching, and enable cut through 131 * if RX event batching isn't supported (i.e. on low latency firmware). 132 */ 133 supports_rx_batching = enp->en_nic_cfg.enc_rx_batching_enabled ? 1 : 0; 134 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS, 135 INIT_EVQ_IN_FLAG_INTERRUPTING, 1, 136 INIT_EVQ_IN_FLAG_RPTR_DOS, 0, 137 INIT_EVQ_IN_FLAG_INT_ARMD, 0, 138 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_batching, 139 INIT_EVQ_IN_FLAG_RX_MERGE, 1, 140 INIT_EVQ_IN_FLAG_TX_MERGE, 1); 141 142 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, 143 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); 144 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0); 145 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0); 146 147 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE, 148 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); 149 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0); 150 151 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR); 152 addr = EFSYS_MEM_ADDR(esmp); 153 154 for (i = 0; i < npages; i++) { 155 EFX_POPULATE_QWORD_2(*dma_addr, 156 EFX_DWORD_1, (uint32_t)(addr >> 32), 157 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 158 159 dma_addr++; 160 addr += EFX_BUF_SIZE; 161 } 162 163 efx_mcdi_execute(enp, &req); 164 165 if (req.emr_rc != 0) { 166 rc = req.emr_rc; 167 goto fail2; 168 } 169 170 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { 171 rc = EMSGSIZE; 172 goto fail3; 173 } 174 175 if (irqp != NULL) 176 *irqp = MCDI_OUT_DWORD(req, INIT_EVQ_OUT_IRQ); 177 178 return (0); 179 180 fail3: 181 EFSYS_PROBE(fail3); 182 fail2: 183 EFSYS_PROBE(fail2); 184 fail1: 185 EFSYS_PROBE1(fail1, efx_rc_t, rc); 186 187 return (rc); 188 } 189 190 static __checkReturn efx_rc_t 191 efx_mcdi_fini_evq( 192 __in efx_nic_t *enp, 193 __in uint32_t instance) 194 { 195 efx_mcdi_req_t req; 196 uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN, 197 MC_CMD_FINI_EVQ_OUT_LEN)]; 198 efx_rc_t rc; 199 200 (void) memset(payload, 0, sizeof (payload)); 201 req.emr_cmd = MC_CMD_FINI_EVQ; 202 req.emr_in_buf = payload; 203 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; 204 req.emr_out_buf = payload; 205 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; 206 207 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); 208 209 efx_mcdi_execute(enp, &req); 210 211 if (req.emr_rc != 0) { 212 rc = req.emr_rc; 213 goto fail1; 214 } 215 216 return (0); 217 218 fail1: 219 EFSYS_PROBE1(fail1, efx_rc_t, rc); 220 221 return (rc); 222 } 223 224 225 226 __checkReturn efx_rc_t 227 ef10_ev_init( 228 __in efx_nic_t *enp) 229 { 230 _NOTE(ARGUNUSED(enp)) 231 return (0); 232 } 233 234 void 235 ef10_ev_fini( 236 __in efx_nic_t *enp) 237 { 238 _NOTE(ARGUNUSED(enp)) 239 } 240 241 __checkReturn efx_rc_t 242 ef10_ev_qcreate( 243 __in efx_nic_t *enp, 244 __in unsigned int index, 245 __in efsys_mem_t *esmp, 246 __in size_t n, 247 __in uint32_t id, 248 __in efx_evq_t *eep) 249 { 250 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 251 uint32_t irq; 252 efx_rc_t rc; 253 254 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ 255 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); 256 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); 257 258 if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { 259 rc = EINVAL; 260 goto fail1; 261 } 262 263 if (index >= encp->enc_evq_limit) { 264 rc = EINVAL; 265 goto fail2; 266 } 267 268 /* Set up the handler table */ 269 eep->ee_rx = ef10_ev_rx; 270 eep->ee_tx = ef10_ev_tx; 271 eep->ee_driver = ef10_ev_driver; 272 eep->ee_drv_gen = ef10_ev_drv_gen; 273 eep->ee_mcdi = ef10_ev_mcdi; 274 275 /* 276 * Set up the event queue 277 * NOTE: ignore the returned IRQ param as firmware does not set it. 278 */ 279 irq = index; /* INIT_EVQ expects function-relative vector number */ 280 if ((rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, NULL)) != 0) 281 goto fail3; 282 283 return (0); 284 285 fail3: 286 EFSYS_PROBE(fail3); 287 fail2: 288 EFSYS_PROBE(fail2); 289 fail1: 290 EFSYS_PROBE1(fail1, efx_rc_t, rc); 291 292 return (rc); 293 } 294 295 void 296 ef10_ev_qdestroy( 297 __in efx_evq_t *eep) 298 { 299 efx_nic_t *enp = eep->ee_enp; 300 301 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || 302 enp->en_family == EFX_FAMILY_MEDFORD); 303 304 (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index); 305 } 306 307 __checkReturn efx_rc_t 308 ef10_ev_qprime( 309 __in efx_evq_t *eep, 310 __in unsigned int count) 311 { 312 efx_nic_t *enp = eep->ee_enp; 313 uint32_t rptr; 314 efx_dword_t dword; 315 316 rptr = count & eep->ee_mask; 317 318 if (enp->en_nic_cfg.enc_bug35388_workaround) { 319 EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS > 320 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 321 EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS < 322 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 323 324 EFX_POPULATE_DWORD_2(dword, 325 ERF_DD_EVQ_IND_RPTR_FLAGS, 326 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 327 ERF_DD_EVQ_IND_RPTR, 328 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); 329 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, 330 &dword, B_FALSE); 331 332 EFX_POPULATE_DWORD_2(dword, 333 ERF_DD_EVQ_IND_RPTR_FLAGS, 334 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 335 ERF_DD_EVQ_IND_RPTR, 336 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 337 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, 338 &dword, B_FALSE); 339 } else { 340 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); 341 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, 342 &dword, B_FALSE); 343 } 344 345 return (0); 346 } 347 348 static __checkReturn efx_rc_t 349 efx_mcdi_driver_event( 350 __in efx_nic_t *enp, 351 __in uint32_t evq, 352 __in efx_qword_t data) 353 { 354 efx_mcdi_req_t req; 355 uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN, 356 MC_CMD_DRIVER_EVENT_OUT_LEN)]; 357 efx_rc_t rc; 358 359 req.emr_cmd = MC_CMD_DRIVER_EVENT; 360 req.emr_in_buf = payload; 361 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; 362 req.emr_out_buf = payload; 363 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; 364 365 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); 366 367 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, 368 EFX_QWORD_FIELD(data, EFX_DWORD_0)); 369 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, 370 EFX_QWORD_FIELD(data, EFX_DWORD_1)); 371 372 efx_mcdi_execute(enp, &req); 373 374 if (req.emr_rc != 0) { 375 rc = req.emr_rc; 376 goto fail1; 377 } 378 379 return (0); 380 381 fail1: 382 EFSYS_PROBE1(fail1, efx_rc_t, rc); 383 384 return (rc); 385 } 386 387 void 388 ef10_ev_qpost( 389 __in efx_evq_t *eep, 390 __in uint16_t data) 391 { 392 efx_nic_t *enp = eep->ee_enp; 393 efx_qword_t event; 394 395 EFX_POPULATE_QWORD_3(event, 396 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, 397 ESF_DZ_DRV_SUB_CODE, 0, 398 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); 399 400 (void) efx_mcdi_driver_event(enp, eep->ee_index, event); 401 } 402 403 __checkReturn efx_rc_t 404 ef10_ev_qmoderate( 405 __in efx_evq_t *eep, 406 __in unsigned int us) 407 { 408 efx_nic_t *enp = eep->ee_enp; 409 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 410 efx_dword_t dword; 411 uint32_t timer_val, mode; 412 efx_rc_t rc; 413 414 if (us > encp->enc_evq_timer_max_us) { 415 rc = EINVAL; 416 goto fail1; 417 } 418 419 /* If the value is zero then disable the timer */ 420 if (us == 0) { 421 timer_val = 0; 422 mode = FFE_CZ_TIMER_MODE_DIS; 423 } else { 424 /* Calculate the timer value in quanta */ 425 timer_val = us * 1000 / encp->enc_evq_timer_quantum_ns; 426 427 /* Moderation value is base 0 so we need to deduct 1 */ 428 if (timer_val > 0) 429 timer_val--; 430 431 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; 432 } 433 434 if (encp->enc_bug35388_workaround) { 435 EFX_POPULATE_DWORD_3(dword, 436 ERF_DD_EVQ_IND_TIMER_FLAGS, 437 EFE_DD_EVQ_IND_TIMER_FLAGS, 438 ERF_DD_EVQ_IND_TIMER_MODE, mode, 439 ERF_DD_EVQ_IND_TIMER_VAL, timer_val); 440 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, 441 eep->ee_index, &dword, 0); 442 } else { 443 EFX_POPULATE_DWORD_2(dword, 444 ERF_DZ_TC_TIMER_MODE, mode, 445 ERF_DZ_TC_TIMER_VAL, timer_val); 446 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG, 447 eep->ee_index, &dword, 0); 448 } 449 450 return (0); 451 452 fail1: 453 EFSYS_PROBE1(fail1, efx_rc_t, rc); 454 455 return (rc); 456 } 457 458 459 #if EFSYS_OPT_QSTATS 460 void 461 ef10_ev_qstats_update( 462 __in efx_evq_t *eep, 463 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 464 { 465 unsigned int id; 466 467 for (id = 0; id < EV_NQSTATS; id++) { 468 efsys_stat_t *essp = &stat[id]; 469 470 EFSYS_STAT_INCR(essp, eep->ee_stat[id]); 471 eep->ee_stat[id] = 0; 472 } 473 } 474 #endif /* EFSYS_OPT_QSTATS */ 475 476 477 static __checkReturn boolean_t 478 ef10_ev_rx( 479 __in efx_evq_t *eep, 480 __in efx_qword_t *eqp, 481 __in const efx_ev_callbacks_t *eecp, 482 __in_opt void *arg) 483 { 484 efx_nic_t *enp = eep->ee_enp; 485 uint32_t size; 486 uint32_t label; 487 uint32_t mac_class; 488 uint32_t eth_tag_class; 489 uint32_t l3_class; 490 uint32_t l4_class; 491 uint32_t next_read_lbits; 492 uint16_t flags; 493 boolean_t cont; 494 boolean_t should_abort; 495 efx_evq_rxq_state_t *eersp; 496 unsigned int desc_count; 497 unsigned int last_used_id; 498 499 EFX_EV_QSTAT_INCR(eep, EV_RX); 500 501 /* Discard events after RXQ/TXQ errors */ 502 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) 503 return (B_FALSE); 504 505 /* Basic packet information */ 506 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); 507 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); 508 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); 509 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); 510 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); 511 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); 512 l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); 513 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); 514 515 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { 516 /* Drop this event */ 517 return (B_FALSE); 518 } 519 flags = 0; 520 521 if (cont != 0) { 522 /* 523 * This may be part of a scattered frame, or it may be a 524 * truncated frame if scatter is disabled on this RXQ. 525 * Overlength frames can be received if e.g. a VF is configured 526 * for 1500 MTU but connected to a port set to 9000 MTU 527 * (see bug56567). 528 * FIXME: There is not yet any driver that supports scatter on 529 * Huntington. Scatter support is required for OSX. 530 */ 531 flags |= EFX_PKT_CONT; 532 } 533 534 if (mac_class == ESE_DZ_MAC_CLASS_UCAST) 535 flags |= EFX_PKT_UNICAST; 536 537 /* Increment the count of descriptors read */ 538 eersp = &eep->ee_rxq_state[label]; 539 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & 540 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); 541 eersp->eers_rx_read_ptr += desc_count; 542 543 /* 544 * FIXME: add error checking to make sure this a batched event. 545 * This could also be an aborted scatter, see Bug36629. 546 */ 547 if (desc_count > 1) { 548 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); 549 flags |= EFX_PKT_PREFIX_LEN; 550 } 551 552 /* Calculate the index of the the last descriptor consumed */ 553 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; 554 555 /* Check for errors that invalidate checksum and L3/L4 fields */ 556 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { 557 /* RX frame truncated (error flag is misnamed) */ 558 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); 559 flags |= EFX_DISCARD; 560 goto deliver; 561 } 562 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { 563 /* Bad Ethernet frame CRC */ 564 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); 565 flags |= EFX_DISCARD; 566 goto deliver; 567 } 568 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { 569 /* 570 * Hardware parse failed, due to malformed headers 571 * or headers that are too long for the parser. 572 * Headers and checksums must be validated by the host. 573 */ 574 // TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); 575 goto deliver; 576 } 577 578 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) || 579 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) { 580 flags |= EFX_PKT_VLAN_TAGGED; 581 } 582 583 switch (l3_class) { 584 case ESE_DZ_L3_CLASS_IP4: 585 case ESE_DZ_L3_CLASS_IP4_FRAG: 586 flags |= EFX_PKT_IPV4; 587 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) { 588 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); 589 } else { 590 flags |= EFX_CKSUM_IPV4; 591 } 592 593 if (l4_class == ESE_DZ_L4_CLASS_TCP) { 594 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); 595 flags |= EFX_PKT_TCP; 596 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { 597 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); 598 flags |= EFX_PKT_UDP; 599 } else { 600 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); 601 } 602 break; 603 604 case ESE_DZ_L3_CLASS_IP6: 605 case ESE_DZ_L3_CLASS_IP6_FRAG: 606 flags |= EFX_PKT_IPV6; 607 608 if (l4_class == ESE_DZ_L4_CLASS_TCP) { 609 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); 610 flags |= EFX_PKT_TCP; 611 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { 612 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); 613 flags |= EFX_PKT_UDP; 614 } else { 615 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); 616 } 617 break; 618 619 default: 620 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); 621 break; 622 } 623 624 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) { 625 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 626 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); 627 } else { 628 flags |= EFX_CKSUM_TCPUDP; 629 } 630 } 631 632 deliver: 633 /* If we're not discarding the packet then it is ok */ 634 if (~flags & EFX_DISCARD) 635 EFX_EV_QSTAT_INCR(eep, EV_RX_OK); 636 637 EFSYS_ASSERT(eecp->eec_rx != NULL); 638 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); 639 640 return (should_abort); 641 } 642 643 static __checkReturn boolean_t 644 ef10_ev_tx( 645 __in efx_evq_t *eep, 646 __in efx_qword_t *eqp, 647 __in const efx_ev_callbacks_t *eecp, 648 __in_opt void *arg) 649 { 650 efx_nic_t *enp = eep->ee_enp; 651 uint32_t id; 652 uint32_t label; 653 boolean_t should_abort; 654 655 EFX_EV_QSTAT_INCR(eep, EV_TX); 656 657 /* Discard events after RXQ/TXQ errors */ 658 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) 659 return (B_FALSE); 660 661 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { 662 /* Drop this event */ 663 return (B_FALSE); 664 } 665 666 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ 667 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); 668 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); 669 670 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); 671 672 EFSYS_ASSERT(eecp->eec_tx != NULL); 673 should_abort = eecp->eec_tx(arg, label, id); 674 675 return (should_abort); 676 } 677 678 static __checkReturn boolean_t 679 ef10_ev_driver( 680 __in efx_evq_t *eep, 681 __in efx_qword_t *eqp, 682 __in const efx_ev_callbacks_t *eecp, 683 __in_opt void *arg) 684 { 685 unsigned int code; 686 boolean_t should_abort; 687 688 EFX_EV_QSTAT_INCR(eep, EV_DRIVER); 689 should_abort = B_FALSE; 690 691 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); 692 switch (code) { 693 case ESE_DZ_DRV_TIMER_EV: { 694 uint32_t id; 695 696 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); 697 698 EFSYS_ASSERT(eecp->eec_timer != NULL); 699 should_abort = eecp->eec_timer(arg, id); 700 break; 701 } 702 703 case ESE_DZ_DRV_WAKE_UP_EV: { 704 uint32_t id; 705 706 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); 707 708 EFSYS_ASSERT(eecp->eec_wake_up != NULL); 709 should_abort = eecp->eec_wake_up(arg, id); 710 break; 711 } 712 713 case ESE_DZ_DRV_START_UP_EV: 714 EFSYS_ASSERT(eecp->eec_initialized != NULL); 715 should_abort = eecp->eec_initialized(arg); 716 break; 717 718 default: 719 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 720 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 721 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 722 break; 723 } 724 725 return (should_abort); 726 } 727 728 static __checkReturn boolean_t 729 ef10_ev_drv_gen( 730 __in efx_evq_t *eep, 731 __in efx_qword_t *eqp, 732 __in const efx_ev_callbacks_t *eecp, 733 __in_opt void *arg) 734 { 735 uint32_t data; 736 boolean_t should_abort; 737 738 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); 739 should_abort = B_FALSE; 740 741 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); 742 if (data >= ((uint32_t)1 << 16)) { 743 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 744 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 745 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 746 747 return (B_TRUE); 748 } 749 750 EFSYS_ASSERT(eecp->eec_software != NULL); 751 should_abort = eecp->eec_software(arg, (uint16_t)data); 752 753 return (should_abort); 754 } 755 756 static __checkReturn boolean_t 757 ef10_ev_mcdi( 758 __in efx_evq_t *eep, 759 __in efx_qword_t *eqp, 760 __in const efx_ev_callbacks_t *eecp, 761 __in_opt void *arg) 762 { 763 efx_nic_t *enp = eep->ee_enp; 764 unsigned code; 765 boolean_t should_abort = B_FALSE; 766 767 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); 768 769 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); 770 switch (code) { 771 case MCDI_EVENT_CODE_BADSSERT: 772 efx_mcdi_ev_death(enp, EINTR); 773 break; 774 775 case MCDI_EVENT_CODE_CMDDONE: 776 efx_mcdi_ev_cpl(enp, 777 MCDI_EV_FIELD(eqp, CMDDONE_SEQ), 778 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), 779 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); 780 break; 781 782 #if EFSYS_OPT_MCDI_PROXY_AUTH 783 case MCDI_EVENT_CODE_PROXY_RESPONSE: 784 /* 785 * This event notifies a function that an authorization request 786 * has been processed. If the request was authorized then the 787 * function can now re-send the original MCDI request. 788 * See SF-113652-SW "SR-IOV Proxied Network Access Control". 789 */ 790 efx_mcdi_ev_proxy_response(enp, 791 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE), 792 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC)); 793 break; 794 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 795 796 case MCDI_EVENT_CODE_LINKCHANGE: { 797 efx_link_mode_t link_mode; 798 799 ef10_phy_link_ev(enp, eqp, &link_mode); 800 should_abort = eecp->eec_link_change(arg, link_mode); 801 break; 802 } 803 804 case MCDI_EVENT_CODE_SENSOREVT: { 805 #if EFSYS_OPT_MON_STATS 806 efx_mon_stat_t id; 807 efx_mon_stat_value_t value; 808 efx_rc_t rc; 809 810 /* Decode monitor stat for MCDI sensor (if supported) */ 811 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { 812 /* Report monitor stat change */ 813 should_abort = eecp->eec_monitor(arg, id, value); 814 } else if (rc == ENOTSUP) { 815 should_abort = eecp->eec_exception(arg, 816 EFX_EXCEPTION_UNKNOWN_SENSOREVT, 817 MCDI_EV_FIELD(eqp, DATA)); 818 } else { 819 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ 820 } 821 #endif 822 break; 823 } 824 825 case MCDI_EVENT_CODE_SCHEDERR: 826 /* Informational only */ 827 break; 828 829 case MCDI_EVENT_CODE_REBOOT: 830 /* Falcon/Siena only (should not been seen with Huntington). */ 831 efx_mcdi_ev_death(enp, EIO); 832 break; 833 834 case MCDI_EVENT_CODE_MC_REBOOT: 835 /* MC_REBOOT event is used for Huntington (EF10) and later. */ 836 efx_mcdi_ev_death(enp, EIO); 837 break; 838 839 case MCDI_EVENT_CODE_MAC_STATS_DMA: 840 #if EFSYS_OPT_MAC_STATS 841 if (eecp->eec_mac_stats != NULL) { 842 eecp->eec_mac_stats(arg, 843 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); 844 } 845 #endif 846 break; 847 848 case MCDI_EVENT_CODE_FWALERT: { 849 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); 850 851 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) 852 should_abort = eecp->eec_exception(arg, 853 EFX_EXCEPTION_FWALERT_SRAM, 854 MCDI_EV_FIELD(eqp, FWALERT_DATA)); 855 else 856 should_abort = eecp->eec_exception(arg, 857 EFX_EXCEPTION_UNKNOWN_FWALERT, 858 MCDI_EV_FIELD(eqp, DATA)); 859 break; 860 } 861 862 case MCDI_EVENT_CODE_TX_ERR: { 863 /* 864 * After a TXQ error is detected, firmware sends a TX_ERR event. 865 * This may be followed by TX completions (which we discard), 866 * and then finally by a TX_FLUSH event. Firmware destroys the 867 * TXQ automatically after sending the TX_FLUSH event. 868 */ 869 enp->en_reset_flags |= EFX_RESET_TXQ_ERR; 870 871 EFSYS_PROBE1(tx_descq_err, uint32_t, MCDI_EV_FIELD(eqp, DATA)); 872 873 /* Inform the driver that a reset is required. */ 874 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, 875 MCDI_EV_FIELD(eqp, TX_ERR_DATA)); 876 break; 877 } 878 879 case MCDI_EVENT_CODE_TX_FLUSH: { 880 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); 881 882 /* 883 * EF10 firmware sends two TX_FLUSH events: one to the txq's 884 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). 885 * We want to wait for all completions, so ignore the events 886 * with TX_FLUSH_TO_DRIVER. 887 */ 888 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { 889 should_abort = B_FALSE; 890 break; 891 } 892 893 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); 894 895 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); 896 897 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); 898 should_abort = eecp->eec_txq_flush_done(arg, txq_index); 899 break; 900 } 901 902 case MCDI_EVENT_CODE_RX_ERR: { 903 /* 904 * After an RXQ error is detected, firmware sends an RX_ERR 905 * event. This may be followed by RX events (which we discard), 906 * and then finally by an RX_FLUSH event. Firmware destroys the 907 * RXQ automatically after sending the RX_FLUSH event. 908 */ 909 enp->en_reset_flags |= EFX_RESET_RXQ_ERR; 910 911 EFSYS_PROBE1(rx_descq_err, uint32_t, MCDI_EV_FIELD(eqp, DATA)); 912 913 /* Inform the driver that a reset is required. */ 914 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, 915 MCDI_EV_FIELD(eqp, RX_ERR_DATA)); 916 break; 917 } 918 919 case MCDI_EVENT_CODE_RX_FLUSH: { 920 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); 921 922 /* 923 * EF10 firmware sends two RX_FLUSH events: one to the rxq's 924 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). 925 * We want to wait for all completions, so ignore the events 926 * with RX_FLUSH_TO_DRIVER. 927 */ 928 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { 929 should_abort = B_FALSE; 930 break; 931 } 932 933 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); 934 935 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); 936 937 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); 938 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); 939 break; 940 } 941 942 default: 943 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 944 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 945 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 946 break; 947 } 948 949 return (should_abort); 950 } 951 952 void 953 ef10_ev_rxlabel_init( 954 __in efx_evq_t *eep, 955 __in efx_rxq_t *erp, 956 __in unsigned int label) 957 { 958 efx_evq_rxq_state_t *eersp; 959 960 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); 961 eersp = &eep->ee_rxq_state[label]; 962 963 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); 964 965 eersp->eers_rx_read_ptr = 0; 966 eersp->eers_rx_mask = erp->er_mask; 967 } 968 969 void 970 ef10_ev_rxlabel_fini( 971 __in efx_evq_t *eep, 972 __in unsigned int label) 973 { 974 efx_evq_rxq_state_t *eersp; 975 976 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); 977 eersp = &eep->ee_rxq_state[label]; 978 979 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); 980 981 eersp->eers_rx_read_ptr = 0; 982 eersp->eers_rx_mask = 0; 983 } 984 985 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ 986