1 /*- 2 * Copyright (c) 2012-2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * The views and conclusions contained in the software and documentation are 27 * those of the authors and should not be interpreted as representing official 28 * policies, either expressed or implied, of the FreeBSD Project. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "efx.h" 35 #include "efx_impl.h" 36 #if EFSYS_OPT_MON_STATS 37 #include "mcdi_mon.h" 38 #endif 39 40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD 41 42 #if EFSYS_OPT_QSTATS 43 #define EFX_EV_QSTAT_INCR(_eep, _stat) \ 44 do { \ 45 (_eep)->ee_stat[_stat]++; \ 46 _NOTE(CONSTANTCONDITION) \ 47 } while (B_FALSE) 48 #else 49 #define EFX_EV_QSTAT_INCR(_eep, _stat) 50 #endif 51 52 53 static __checkReturn boolean_t 54 ef10_ev_rx( 55 __in efx_evq_t *eep, 56 __in efx_qword_t *eqp, 57 __in const efx_ev_callbacks_t *eecp, 58 __in_opt void *arg); 59 60 static __checkReturn boolean_t 61 ef10_ev_tx( 62 __in efx_evq_t *eep, 63 __in efx_qword_t *eqp, 64 __in const efx_ev_callbacks_t *eecp, 65 __in_opt void *arg); 66 67 static __checkReturn boolean_t 68 ef10_ev_driver( 69 __in efx_evq_t *eep, 70 __in efx_qword_t *eqp, 71 __in const efx_ev_callbacks_t *eecp, 72 __in_opt void *arg); 73 74 static __checkReturn boolean_t 75 ef10_ev_drv_gen( 76 __in efx_evq_t *eep, 77 __in efx_qword_t *eqp, 78 __in const efx_ev_callbacks_t *eecp, 79 __in_opt void *arg); 80 81 static __checkReturn boolean_t 82 ef10_ev_mcdi( 83 __in efx_evq_t *eep, 84 __in efx_qword_t *eqp, 85 __in const efx_ev_callbacks_t *eecp, 86 __in_opt void *arg); 87 88 89 static __checkReturn efx_rc_t 90 efx_mcdi_init_evq( 91 __in efx_nic_t *enp, 92 __in unsigned int instance, 93 __in efsys_mem_t *esmp, 94 __in size_t nevs, 95 __in uint32_t irq) 96 { 97 efx_mcdi_req_t req; 98 uint8_t payload[ 99 MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), 100 MC_CMD_INIT_EVQ_OUT_LEN)]; 101 efx_qword_t *dma_addr; 102 uint64_t addr; 103 int npages; 104 int i; 105 int supports_rx_batching; 106 efx_rc_t rc; 107 108 npages = EFX_EVQ_NBUFS(nevs); 109 if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) { 110 rc = EINVAL; 111 goto fail1; 112 } 113 114 (void) memset(payload, 0, sizeof (payload)); 115 req.emr_cmd = MC_CMD_INIT_EVQ; 116 req.emr_in_buf = payload; 117 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); 118 req.emr_out_buf = payload; 119 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN; 120 121 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs); 122 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance); 123 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq); 124 125 /* 126 * On Huntington RX and TX event batching can only be requested 127 * together (even if the datapath firmware doesn't actually support RX 128 * batching). 129 * Cut through is incompatible with RX batching and so enabling cut 130 * through disables RX batching (but it does not affect TX batching). 131 * 132 * So always enable RX and TX event batching, and enable cut through 133 * if RX event batching isn't supported (i.e. on low latency firmware). 134 */ 135 supports_rx_batching = enp->en_nic_cfg.enc_rx_batching_enabled ? 1 : 0; 136 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS, 137 INIT_EVQ_IN_FLAG_INTERRUPTING, 1, 138 INIT_EVQ_IN_FLAG_RPTR_DOS, 0, 139 INIT_EVQ_IN_FLAG_INT_ARMD, 0, 140 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_batching, 141 INIT_EVQ_IN_FLAG_RX_MERGE, 1, 142 INIT_EVQ_IN_FLAG_TX_MERGE, 1); 143 144 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, 145 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); 146 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0); 147 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0); 148 149 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE, 150 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); 151 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0); 152 153 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR); 154 addr = EFSYS_MEM_ADDR(esmp); 155 156 for (i = 0; i < npages; i++) { 157 EFX_POPULATE_QWORD_2(*dma_addr, 158 EFX_DWORD_1, (uint32_t)(addr >> 32), 159 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 160 161 dma_addr++; 162 addr += EFX_BUF_SIZE; 163 } 164 165 efx_mcdi_execute(enp, &req); 166 167 if (req.emr_rc != 0) { 168 rc = req.emr_rc; 169 goto fail2; 170 } 171 172 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { 173 rc = EMSGSIZE; 174 goto fail3; 175 } 176 177 /* NOTE: ignore the returned IRQ param as firmware does not set it. */ 178 179 return (0); 180 181 fail3: 182 EFSYS_PROBE(fail3); 183 fail2: 184 EFSYS_PROBE(fail2); 185 fail1: 186 EFSYS_PROBE1(fail1, efx_rc_t, rc); 187 188 return (rc); 189 } 190 191 static __checkReturn efx_rc_t 192 efx_mcdi_fini_evq( 193 __in efx_nic_t *enp, 194 __in uint32_t instance) 195 { 196 efx_mcdi_req_t req; 197 uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN, 198 MC_CMD_FINI_EVQ_OUT_LEN)]; 199 efx_rc_t rc; 200 201 (void) memset(payload, 0, sizeof (payload)); 202 req.emr_cmd = MC_CMD_FINI_EVQ; 203 req.emr_in_buf = payload; 204 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; 205 req.emr_out_buf = payload; 206 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; 207 208 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); 209 210 efx_mcdi_execute_quiet(enp, &req); 211 212 if (req.emr_rc != 0) { 213 rc = req.emr_rc; 214 goto fail1; 215 } 216 217 return (0); 218 219 fail1: 220 EFSYS_PROBE1(fail1, efx_rc_t, rc); 221 222 return (rc); 223 } 224 225 226 227 __checkReturn efx_rc_t 228 ef10_ev_init( 229 __in efx_nic_t *enp) 230 { 231 _NOTE(ARGUNUSED(enp)) 232 return (0); 233 } 234 235 void 236 ef10_ev_fini( 237 __in efx_nic_t *enp) 238 { 239 _NOTE(ARGUNUSED(enp)) 240 } 241 242 __checkReturn efx_rc_t 243 ef10_ev_qcreate( 244 __in efx_nic_t *enp, 245 __in unsigned int index, 246 __in efsys_mem_t *esmp, 247 __in size_t n, 248 __in uint32_t id, 249 __in efx_evq_t *eep) 250 { 251 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 252 uint32_t irq; 253 efx_rc_t rc; 254 255 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ 256 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); 257 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); 258 259 if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { 260 rc = EINVAL; 261 goto fail1; 262 } 263 264 if (index >= encp->enc_evq_limit) { 265 rc = EINVAL; 266 goto fail2; 267 } 268 269 /* Set up the handler table */ 270 eep->ee_rx = ef10_ev_rx; 271 eep->ee_tx = ef10_ev_tx; 272 eep->ee_driver = ef10_ev_driver; 273 eep->ee_drv_gen = ef10_ev_drv_gen; 274 eep->ee_mcdi = ef10_ev_mcdi; 275 276 /* Set up the event queue */ 277 irq = index; /* INIT_EVQ expects function-relative vector number */ 278 279 /* 280 * Interrupts may be raised for events immediately after the queue is 281 * created. See bug58606. 282 */ 283 if ((rc = efx_mcdi_init_evq(enp, index, esmp, n, irq)) != 0) 284 goto fail3; 285 286 return (0); 287 288 fail3: 289 EFSYS_PROBE(fail3); 290 fail2: 291 EFSYS_PROBE(fail2); 292 fail1: 293 EFSYS_PROBE1(fail1, efx_rc_t, rc); 294 295 return (rc); 296 } 297 298 void 299 ef10_ev_qdestroy( 300 __in efx_evq_t *eep) 301 { 302 efx_nic_t *enp = eep->ee_enp; 303 304 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || 305 enp->en_family == EFX_FAMILY_MEDFORD); 306 307 (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index); 308 } 309 310 __checkReturn efx_rc_t 311 ef10_ev_qprime( 312 __in efx_evq_t *eep, 313 __in unsigned int count) 314 { 315 efx_nic_t *enp = eep->ee_enp; 316 uint32_t rptr; 317 efx_dword_t dword; 318 319 rptr = count & eep->ee_mask; 320 321 if (enp->en_nic_cfg.enc_bug35388_workaround) { 322 EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS > 323 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 324 EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS < 325 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 326 327 EFX_POPULATE_DWORD_2(dword, 328 ERF_DD_EVQ_IND_RPTR_FLAGS, 329 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 330 ERF_DD_EVQ_IND_RPTR, 331 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); 332 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, 333 &dword, B_FALSE); 334 335 EFX_POPULATE_DWORD_2(dword, 336 ERF_DD_EVQ_IND_RPTR_FLAGS, 337 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 338 ERF_DD_EVQ_IND_RPTR, 339 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 340 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, 341 &dword, B_FALSE); 342 } else { 343 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); 344 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, 345 &dword, B_FALSE); 346 } 347 348 return (0); 349 } 350 351 static __checkReturn efx_rc_t 352 efx_mcdi_driver_event( 353 __in efx_nic_t *enp, 354 __in uint32_t evq, 355 __in efx_qword_t data) 356 { 357 efx_mcdi_req_t req; 358 uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN, 359 MC_CMD_DRIVER_EVENT_OUT_LEN)]; 360 efx_rc_t rc; 361 362 req.emr_cmd = MC_CMD_DRIVER_EVENT; 363 req.emr_in_buf = payload; 364 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; 365 req.emr_out_buf = payload; 366 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; 367 368 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); 369 370 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, 371 EFX_QWORD_FIELD(data, EFX_DWORD_0)); 372 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, 373 EFX_QWORD_FIELD(data, EFX_DWORD_1)); 374 375 efx_mcdi_execute(enp, &req); 376 377 if (req.emr_rc != 0) { 378 rc = req.emr_rc; 379 goto fail1; 380 } 381 382 return (0); 383 384 fail1: 385 EFSYS_PROBE1(fail1, efx_rc_t, rc); 386 387 return (rc); 388 } 389 390 void 391 ef10_ev_qpost( 392 __in efx_evq_t *eep, 393 __in uint16_t data) 394 { 395 efx_nic_t *enp = eep->ee_enp; 396 efx_qword_t event; 397 398 EFX_POPULATE_QWORD_3(event, 399 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, 400 ESF_DZ_DRV_SUB_CODE, 0, 401 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); 402 403 (void) efx_mcdi_driver_event(enp, eep->ee_index, event); 404 } 405 406 __checkReturn efx_rc_t 407 ef10_ev_qmoderate( 408 __in efx_evq_t *eep, 409 __in unsigned int us) 410 { 411 efx_nic_t *enp = eep->ee_enp; 412 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 413 efx_dword_t dword; 414 uint32_t timer_val, mode; 415 efx_rc_t rc; 416 417 if (us > encp->enc_evq_timer_max_us) { 418 rc = EINVAL; 419 goto fail1; 420 } 421 422 /* If the value is zero then disable the timer */ 423 if (us == 0) { 424 timer_val = 0; 425 mode = FFE_CZ_TIMER_MODE_DIS; 426 } else { 427 /* Calculate the timer value in quanta */ 428 timer_val = us * 1000 / encp->enc_evq_timer_quantum_ns; 429 430 /* Moderation value is base 0 so we need to deduct 1 */ 431 if (timer_val > 0) 432 timer_val--; 433 434 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; 435 } 436 437 if (encp->enc_bug35388_workaround) { 438 EFX_POPULATE_DWORD_3(dword, 439 ERF_DD_EVQ_IND_TIMER_FLAGS, 440 EFE_DD_EVQ_IND_TIMER_FLAGS, 441 ERF_DD_EVQ_IND_TIMER_MODE, mode, 442 ERF_DD_EVQ_IND_TIMER_VAL, timer_val); 443 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, 444 eep->ee_index, &dword, 0); 445 } else { 446 EFX_POPULATE_DWORD_2(dword, 447 ERF_DZ_TC_TIMER_MODE, mode, 448 ERF_DZ_TC_TIMER_VAL, timer_val); 449 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG, 450 eep->ee_index, &dword, 0); 451 } 452 453 return (0); 454 455 fail1: 456 EFSYS_PROBE1(fail1, efx_rc_t, rc); 457 458 return (rc); 459 } 460 461 462 #if EFSYS_OPT_QSTATS 463 void 464 ef10_ev_qstats_update( 465 __in efx_evq_t *eep, 466 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 467 { 468 unsigned int id; 469 470 for (id = 0; id < EV_NQSTATS; id++) { 471 efsys_stat_t *essp = &stat[id]; 472 473 EFSYS_STAT_INCR(essp, eep->ee_stat[id]); 474 eep->ee_stat[id] = 0; 475 } 476 } 477 #endif /* EFSYS_OPT_QSTATS */ 478 479 480 static __checkReturn boolean_t 481 ef10_ev_rx( 482 __in efx_evq_t *eep, 483 __in efx_qword_t *eqp, 484 __in const efx_ev_callbacks_t *eecp, 485 __in_opt void *arg) 486 { 487 efx_nic_t *enp = eep->ee_enp; 488 uint32_t size; 489 uint32_t label; 490 uint32_t mac_class; 491 uint32_t eth_tag_class; 492 uint32_t l3_class; 493 uint32_t l4_class; 494 uint32_t next_read_lbits; 495 uint16_t flags; 496 boolean_t cont; 497 boolean_t should_abort; 498 efx_evq_rxq_state_t *eersp; 499 unsigned int desc_count; 500 unsigned int last_used_id; 501 502 EFX_EV_QSTAT_INCR(eep, EV_RX); 503 504 /* Discard events after RXQ/TXQ errors */ 505 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) 506 return (B_FALSE); 507 508 /* Basic packet information */ 509 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); 510 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); 511 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); 512 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); 513 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); 514 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); 515 l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); 516 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); 517 518 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { 519 /* Drop this event */ 520 return (B_FALSE); 521 } 522 flags = 0; 523 524 if (cont != 0) { 525 /* 526 * This may be part of a scattered frame, or it may be a 527 * truncated frame if scatter is disabled on this RXQ. 528 * Overlength frames can be received if e.g. a VF is configured 529 * for 1500 MTU but connected to a port set to 9000 MTU 530 * (see bug56567). 531 * FIXME: There is not yet any driver that supports scatter on 532 * Huntington. Scatter support is required for OSX. 533 */ 534 flags |= EFX_PKT_CONT; 535 } 536 537 if (mac_class == ESE_DZ_MAC_CLASS_UCAST) 538 flags |= EFX_PKT_UNICAST; 539 540 /* Increment the count of descriptors read */ 541 eersp = &eep->ee_rxq_state[label]; 542 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & 543 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); 544 eersp->eers_rx_read_ptr += desc_count; 545 546 /* 547 * FIXME: add error checking to make sure this a batched event. 548 * This could also be an aborted scatter, see Bug36629. 549 */ 550 if (desc_count > 1) { 551 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); 552 flags |= EFX_PKT_PREFIX_LEN; 553 } 554 555 /* Calculate the index of the last descriptor consumed */ 556 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; 557 558 /* Check for errors that invalidate checksum and L3/L4 fields */ 559 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { 560 /* RX frame truncated (error flag is misnamed) */ 561 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); 562 flags |= EFX_DISCARD; 563 goto deliver; 564 } 565 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { 566 /* Bad Ethernet frame CRC */ 567 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); 568 flags |= EFX_DISCARD; 569 goto deliver; 570 } 571 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { 572 /* 573 * Hardware parse failed, due to malformed headers 574 * or headers that are too long for the parser. 575 * Headers and checksums must be validated by the host. 576 */ 577 // TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); 578 goto deliver; 579 } 580 581 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) || 582 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) { 583 flags |= EFX_PKT_VLAN_TAGGED; 584 } 585 586 switch (l3_class) { 587 case ESE_DZ_L3_CLASS_IP4: 588 case ESE_DZ_L3_CLASS_IP4_FRAG: 589 flags |= EFX_PKT_IPV4; 590 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) { 591 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); 592 } else { 593 flags |= EFX_CKSUM_IPV4; 594 } 595 596 if (l4_class == ESE_DZ_L4_CLASS_TCP) { 597 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); 598 flags |= EFX_PKT_TCP; 599 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { 600 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); 601 flags |= EFX_PKT_UDP; 602 } else { 603 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); 604 } 605 break; 606 607 case ESE_DZ_L3_CLASS_IP6: 608 case ESE_DZ_L3_CLASS_IP6_FRAG: 609 flags |= EFX_PKT_IPV6; 610 611 if (l4_class == ESE_DZ_L4_CLASS_TCP) { 612 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); 613 flags |= EFX_PKT_TCP; 614 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { 615 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); 616 flags |= EFX_PKT_UDP; 617 } else { 618 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); 619 } 620 break; 621 622 default: 623 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); 624 break; 625 } 626 627 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) { 628 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 629 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); 630 } else { 631 flags |= EFX_CKSUM_TCPUDP; 632 } 633 } 634 635 deliver: 636 /* If we're not discarding the packet then it is ok */ 637 if (~flags & EFX_DISCARD) 638 EFX_EV_QSTAT_INCR(eep, EV_RX_OK); 639 640 EFSYS_ASSERT(eecp->eec_rx != NULL); 641 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); 642 643 return (should_abort); 644 } 645 646 static __checkReturn boolean_t 647 ef10_ev_tx( 648 __in efx_evq_t *eep, 649 __in efx_qword_t *eqp, 650 __in const efx_ev_callbacks_t *eecp, 651 __in_opt void *arg) 652 { 653 efx_nic_t *enp = eep->ee_enp; 654 uint32_t id; 655 uint32_t label; 656 boolean_t should_abort; 657 658 EFX_EV_QSTAT_INCR(eep, EV_TX); 659 660 /* Discard events after RXQ/TXQ errors */ 661 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) 662 return (B_FALSE); 663 664 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { 665 /* Drop this event */ 666 return (B_FALSE); 667 } 668 669 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ 670 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); 671 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); 672 673 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); 674 675 EFSYS_ASSERT(eecp->eec_tx != NULL); 676 should_abort = eecp->eec_tx(arg, label, id); 677 678 return (should_abort); 679 } 680 681 static __checkReturn boolean_t 682 ef10_ev_driver( 683 __in efx_evq_t *eep, 684 __in efx_qword_t *eqp, 685 __in const efx_ev_callbacks_t *eecp, 686 __in_opt void *arg) 687 { 688 unsigned int code; 689 boolean_t should_abort; 690 691 EFX_EV_QSTAT_INCR(eep, EV_DRIVER); 692 should_abort = B_FALSE; 693 694 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); 695 switch (code) { 696 case ESE_DZ_DRV_TIMER_EV: { 697 uint32_t id; 698 699 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); 700 701 EFSYS_ASSERT(eecp->eec_timer != NULL); 702 should_abort = eecp->eec_timer(arg, id); 703 break; 704 } 705 706 case ESE_DZ_DRV_WAKE_UP_EV: { 707 uint32_t id; 708 709 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); 710 711 EFSYS_ASSERT(eecp->eec_wake_up != NULL); 712 should_abort = eecp->eec_wake_up(arg, id); 713 break; 714 } 715 716 case ESE_DZ_DRV_START_UP_EV: 717 EFSYS_ASSERT(eecp->eec_initialized != NULL); 718 should_abort = eecp->eec_initialized(arg); 719 break; 720 721 default: 722 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 723 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 724 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 725 break; 726 } 727 728 return (should_abort); 729 } 730 731 static __checkReturn boolean_t 732 ef10_ev_drv_gen( 733 __in efx_evq_t *eep, 734 __in efx_qword_t *eqp, 735 __in const efx_ev_callbacks_t *eecp, 736 __in_opt void *arg) 737 { 738 uint32_t data; 739 boolean_t should_abort; 740 741 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); 742 should_abort = B_FALSE; 743 744 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); 745 if (data >= ((uint32_t)1 << 16)) { 746 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 747 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 748 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 749 750 return (B_TRUE); 751 } 752 753 EFSYS_ASSERT(eecp->eec_software != NULL); 754 should_abort = eecp->eec_software(arg, (uint16_t)data); 755 756 return (should_abort); 757 } 758 759 static __checkReturn boolean_t 760 ef10_ev_mcdi( 761 __in efx_evq_t *eep, 762 __in efx_qword_t *eqp, 763 __in const efx_ev_callbacks_t *eecp, 764 __in_opt void *arg) 765 { 766 efx_nic_t *enp = eep->ee_enp; 767 unsigned code; 768 boolean_t should_abort = B_FALSE; 769 770 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); 771 772 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); 773 switch (code) { 774 case MCDI_EVENT_CODE_BADSSERT: 775 efx_mcdi_ev_death(enp, EINTR); 776 break; 777 778 case MCDI_EVENT_CODE_CMDDONE: 779 efx_mcdi_ev_cpl(enp, 780 MCDI_EV_FIELD(eqp, CMDDONE_SEQ), 781 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), 782 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); 783 break; 784 785 #if EFSYS_OPT_MCDI_PROXY_AUTH 786 case MCDI_EVENT_CODE_PROXY_RESPONSE: 787 /* 788 * This event notifies a function that an authorization request 789 * has been processed. If the request was authorized then the 790 * function can now re-send the original MCDI request. 791 * See SF-113652-SW "SR-IOV Proxied Network Access Control". 792 */ 793 efx_mcdi_ev_proxy_response(enp, 794 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE), 795 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC)); 796 break; 797 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 798 799 case MCDI_EVENT_CODE_LINKCHANGE: { 800 efx_link_mode_t link_mode; 801 802 ef10_phy_link_ev(enp, eqp, &link_mode); 803 should_abort = eecp->eec_link_change(arg, link_mode); 804 break; 805 } 806 807 case MCDI_EVENT_CODE_SENSOREVT: { 808 #if EFSYS_OPT_MON_STATS 809 efx_mon_stat_t id; 810 efx_mon_stat_value_t value; 811 efx_rc_t rc; 812 813 /* Decode monitor stat for MCDI sensor (if supported) */ 814 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { 815 /* Report monitor stat change */ 816 should_abort = eecp->eec_monitor(arg, id, value); 817 } else if (rc == ENOTSUP) { 818 should_abort = eecp->eec_exception(arg, 819 EFX_EXCEPTION_UNKNOWN_SENSOREVT, 820 MCDI_EV_FIELD(eqp, DATA)); 821 } else { 822 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ 823 } 824 #endif 825 break; 826 } 827 828 case MCDI_EVENT_CODE_SCHEDERR: 829 /* Informational only */ 830 break; 831 832 case MCDI_EVENT_CODE_REBOOT: 833 /* Falcon/Siena only (should not been seen with Huntington). */ 834 efx_mcdi_ev_death(enp, EIO); 835 break; 836 837 case MCDI_EVENT_CODE_MC_REBOOT: 838 /* MC_REBOOT event is used for Huntington (EF10) and later. */ 839 efx_mcdi_ev_death(enp, EIO); 840 break; 841 842 case MCDI_EVENT_CODE_MAC_STATS_DMA: 843 #if EFSYS_OPT_MAC_STATS 844 if (eecp->eec_mac_stats != NULL) { 845 eecp->eec_mac_stats(arg, 846 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); 847 } 848 #endif 849 break; 850 851 case MCDI_EVENT_CODE_FWALERT: { 852 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); 853 854 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) 855 should_abort = eecp->eec_exception(arg, 856 EFX_EXCEPTION_FWALERT_SRAM, 857 MCDI_EV_FIELD(eqp, FWALERT_DATA)); 858 else 859 should_abort = eecp->eec_exception(arg, 860 EFX_EXCEPTION_UNKNOWN_FWALERT, 861 MCDI_EV_FIELD(eqp, DATA)); 862 break; 863 } 864 865 case MCDI_EVENT_CODE_TX_ERR: { 866 /* 867 * After a TXQ error is detected, firmware sends a TX_ERR event. 868 * This may be followed by TX completions (which we discard), 869 * and then finally by a TX_FLUSH event. Firmware destroys the 870 * TXQ automatically after sending the TX_FLUSH event. 871 */ 872 enp->en_reset_flags |= EFX_RESET_TXQ_ERR; 873 874 EFSYS_PROBE2(tx_descq_err, 875 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 876 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 877 878 /* Inform the driver that a reset is required. */ 879 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, 880 MCDI_EV_FIELD(eqp, TX_ERR_DATA)); 881 break; 882 } 883 884 case MCDI_EVENT_CODE_TX_FLUSH: { 885 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); 886 887 /* 888 * EF10 firmware sends two TX_FLUSH events: one to the txq's 889 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). 890 * We want to wait for all completions, so ignore the events 891 * with TX_FLUSH_TO_DRIVER. 892 */ 893 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { 894 should_abort = B_FALSE; 895 break; 896 } 897 898 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); 899 900 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); 901 902 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); 903 should_abort = eecp->eec_txq_flush_done(arg, txq_index); 904 break; 905 } 906 907 case MCDI_EVENT_CODE_RX_ERR: { 908 /* 909 * After an RXQ error is detected, firmware sends an RX_ERR 910 * event. This may be followed by RX events (which we discard), 911 * and then finally by an RX_FLUSH event. Firmware destroys the 912 * RXQ automatically after sending the RX_FLUSH event. 913 */ 914 enp->en_reset_flags |= EFX_RESET_RXQ_ERR; 915 916 EFSYS_PROBE2(rx_descq_err, 917 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 918 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 919 920 /* Inform the driver that a reset is required. */ 921 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, 922 MCDI_EV_FIELD(eqp, RX_ERR_DATA)); 923 break; 924 } 925 926 case MCDI_EVENT_CODE_RX_FLUSH: { 927 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); 928 929 /* 930 * EF10 firmware sends two RX_FLUSH events: one to the rxq's 931 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). 932 * We want to wait for all completions, so ignore the events 933 * with RX_FLUSH_TO_DRIVER. 934 */ 935 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { 936 should_abort = B_FALSE; 937 break; 938 } 939 940 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); 941 942 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); 943 944 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); 945 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); 946 break; 947 } 948 949 default: 950 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 951 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 952 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 953 break; 954 } 955 956 return (should_abort); 957 } 958 959 void 960 ef10_ev_rxlabel_init( 961 __in efx_evq_t *eep, 962 __in efx_rxq_t *erp, 963 __in unsigned int label) 964 { 965 efx_evq_rxq_state_t *eersp; 966 967 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); 968 eersp = &eep->ee_rxq_state[label]; 969 970 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); 971 972 eersp->eers_rx_read_ptr = 0; 973 eersp->eers_rx_mask = erp->er_mask; 974 } 975 976 void 977 ef10_ev_rxlabel_fini( 978 __in efx_evq_t *eep, 979 __in unsigned int label) 980 { 981 efx_evq_rxq_state_t *eersp; 982 983 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); 984 eersp = &eep->ee_rxq_state[label]; 985 986 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); 987 988 eersp->eers_rx_read_ptr = 0; 989 eersp->eers_rx_mask = 0; 990 } 991 992 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ 993