1 /*- 2 * Copyright (c) 2012-2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * The views and conclusions contained in the software and documentation are 27 * those of the authors and should not be interpreted as representing official 28 * policies, either expressed or implied, of the FreeBSD Project. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "efx.h" 35 #include "efx_impl.h" 36 #if EFSYS_OPT_MON_STATS 37 #include "mcdi_mon.h" 38 #endif 39 40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 41 42 #if EFSYS_OPT_QSTATS 43 #define EFX_EV_QSTAT_INCR(_eep, _stat) \ 44 do { \ 45 (_eep)->ee_stat[_stat]++; \ 46 _NOTE(CONSTANTCONDITION) \ 47 } while (B_FALSE) 48 #else 49 #define EFX_EV_QSTAT_INCR(_eep, _stat) 50 #endif 51 52 /* 53 * Non-interrupting event queue requires interrrupting event queue to 54 * refer to for wake-up events even if wake ups are never used. 55 * It could be even non-allocated event queue. 56 */ 57 #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0) 58 59 static __checkReturn boolean_t 60 ef10_ev_rx( 61 __in efx_evq_t *eep, 62 __in efx_qword_t *eqp, 63 __in const efx_ev_callbacks_t *eecp, 64 __in_opt void *arg); 65 66 static __checkReturn boolean_t 67 ef10_ev_tx( 68 __in efx_evq_t *eep, 69 __in efx_qword_t *eqp, 70 __in const efx_ev_callbacks_t *eecp, 71 __in_opt void *arg); 72 73 static __checkReturn boolean_t 74 ef10_ev_driver( 75 __in efx_evq_t *eep, 76 __in efx_qword_t *eqp, 77 __in const efx_ev_callbacks_t *eecp, 78 __in_opt void *arg); 79 80 static __checkReturn boolean_t 81 ef10_ev_drv_gen( 82 __in efx_evq_t *eep, 83 __in efx_qword_t *eqp, 84 __in const efx_ev_callbacks_t *eecp, 85 __in_opt void *arg); 86 87 static __checkReturn boolean_t 88 ef10_ev_mcdi( 89 __in efx_evq_t *eep, 90 __in efx_qword_t *eqp, 91 __in const efx_ev_callbacks_t *eecp, 92 __in_opt void *arg); 93 94 static __checkReturn efx_rc_t 95 efx_mcdi_set_evq_tmr( 96 __in efx_nic_t *enp, 97 __in uint32_t instance, 98 __in uint32_t mode, 99 __in uint32_t timer_ns) 100 { 101 efx_mcdi_req_t req; 102 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN, 103 MC_CMD_SET_EVQ_TMR_OUT_LEN); 104 efx_rc_t rc; 105 106 req.emr_cmd = MC_CMD_SET_EVQ_TMR; 107 req.emr_in_buf = payload; 108 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN; 109 req.emr_out_buf = payload; 110 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN; 111 112 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance); 113 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns); 114 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns); 115 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode); 116 117 efx_mcdi_execute(enp, &req); 118 119 if (req.emr_rc != 0) { 120 rc = req.emr_rc; 121 goto fail1; 122 } 123 124 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) { 125 rc = EMSGSIZE; 126 goto fail2; 127 } 128 129 return (0); 130 131 fail2: 132 EFSYS_PROBE(fail2); 133 fail1: 134 EFSYS_PROBE1(fail1, efx_rc_t, rc); 135 136 return (rc); 137 } 138 139 static __checkReturn efx_rc_t 140 efx_mcdi_init_evq( 141 __in efx_nic_t *enp, 142 __in unsigned int instance, 143 __in efsys_mem_t *esmp, 144 __in size_t nevs, 145 __in uint32_t irq, 146 __in uint32_t us, 147 __in uint32_t flags, 148 __in boolean_t low_latency) 149 { 150 efx_mcdi_req_t req; 151 EFX_MCDI_DECLARE_BUF(payload, 152 MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), 153 MC_CMD_INIT_EVQ_OUT_LEN); 154 efx_qword_t *dma_addr; 155 uint64_t addr; 156 int npages; 157 int i; 158 boolean_t interrupting; 159 int ev_cut_through; 160 efx_rc_t rc; 161 162 npages = EFX_EVQ_NBUFS(nevs); 163 if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) { 164 rc = EINVAL; 165 goto fail1; 166 } 167 168 req.emr_cmd = MC_CMD_INIT_EVQ; 169 req.emr_in_buf = payload; 170 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); 171 req.emr_out_buf = payload; 172 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN; 173 174 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs); 175 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance); 176 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq); 177 178 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 179 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); 180 181 /* 182 * On Huntington RX and TX event batching can only be requested together 183 * (even if the datapath firmware doesn't actually support RX 184 * batching). If event cut through is enabled no RX batching will occur. 185 * 186 * So always enable RX and TX event batching, and enable event cut 187 * through if we want low latency operation. 188 */ 189 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 190 case EFX_EVQ_FLAGS_TYPE_AUTO: 191 ev_cut_through = low_latency ? 1 : 0; 192 break; 193 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 194 ev_cut_through = 0; 195 break; 196 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 197 ev_cut_through = 1; 198 break; 199 default: 200 rc = EINVAL; 201 goto fail2; 202 } 203 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS, 204 INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting, 205 INIT_EVQ_IN_FLAG_RPTR_DOS, 0, 206 INIT_EVQ_IN_FLAG_INT_ARMD, 0, 207 INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through, 208 INIT_EVQ_IN_FLAG_RX_MERGE, 1, 209 INIT_EVQ_IN_FLAG_TX_MERGE, 1); 210 211 /* If the value is zero then disable the timer */ 212 if (us == 0) { 213 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, 214 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); 215 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0); 216 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0); 217 } else { 218 unsigned int ticks; 219 220 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 221 goto fail3; 222 223 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, 224 MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF); 225 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks); 226 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks); 227 } 228 229 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE, 230 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); 231 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0); 232 233 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR); 234 addr = EFSYS_MEM_ADDR(esmp); 235 236 for (i = 0; i < npages; i++) { 237 EFX_POPULATE_QWORD_2(*dma_addr, 238 EFX_DWORD_1, (uint32_t)(addr >> 32), 239 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 240 241 dma_addr++; 242 addr += EFX_BUF_SIZE; 243 } 244 245 efx_mcdi_execute(enp, &req); 246 247 if (req.emr_rc != 0) { 248 rc = req.emr_rc; 249 goto fail4; 250 } 251 252 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { 253 rc = EMSGSIZE; 254 goto fail5; 255 } 256 257 /* NOTE: ignore the returned IRQ param as firmware does not set it. */ 258 259 return (0); 260 261 fail5: 262 EFSYS_PROBE(fail5); 263 fail4: 264 EFSYS_PROBE(fail4); 265 fail3: 266 EFSYS_PROBE(fail3); 267 fail2: 268 EFSYS_PROBE(fail2); 269 fail1: 270 EFSYS_PROBE1(fail1, efx_rc_t, rc); 271 272 return (rc); 273 } 274 275 static __checkReturn efx_rc_t 276 efx_mcdi_init_evq_v2( 277 __in efx_nic_t *enp, 278 __in unsigned int instance, 279 __in efsys_mem_t *esmp, 280 __in size_t nevs, 281 __in uint32_t irq, 282 __in uint32_t us, 283 __in uint32_t flags) 284 { 285 efx_mcdi_req_t req; 286 EFX_MCDI_DECLARE_BUF(payload, 287 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), 288 MC_CMD_INIT_EVQ_V2_OUT_LEN); 289 boolean_t interrupting; 290 unsigned int evq_type; 291 efx_qword_t *dma_addr; 292 uint64_t addr; 293 int npages; 294 int i; 295 efx_rc_t rc; 296 297 npages = EFX_EVQ_NBUFS(nevs); 298 if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) { 299 rc = EINVAL; 300 goto fail1; 301 } 302 303 req.emr_cmd = MC_CMD_INIT_EVQ; 304 req.emr_in_buf = payload; 305 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); 306 req.emr_out_buf = payload; 307 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; 308 309 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); 310 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); 311 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); 312 313 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 314 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); 315 316 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 317 case EFX_EVQ_FLAGS_TYPE_AUTO: 318 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO; 319 break; 320 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 321 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT; 322 break; 323 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 324 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY; 325 break; 326 default: 327 rc = EINVAL; 328 goto fail2; 329 } 330 MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS, 331 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting, 332 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, 333 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, 334 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type); 335 336 /* If the value is zero then disable the timer */ 337 if (us == 0) { 338 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 339 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); 340 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); 341 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); 342 } else { 343 unsigned int ticks; 344 345 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 346 goto fail3; 347 348 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 349 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); 350 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); 351 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); 352 } 353 354 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, 355 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); 356 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); 357 358 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); 359 addr = EFSYS_MEM_ADDR(esmp); 360 361 for (i = 0; i < npages; i++) { 362 EFX_POPULATE_QWORD_2(*dma_addr, 363 EFX_DWORD_1, (uint32_t)(addr >> 32), 364 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 365 366 dma_addr++; 367 addr += EFX_BUF_SIZE; 368 } 369 370 efx_mcdi_execute(enp, &req); 371 372 if (req.emr_rc != 0) { 373 rc = req.emr_rc; 374 goto fail4; 375 } 376 377 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { 378 rc = EMSGSIZE; 379 goto fail5; 380 } 381 382 /* NOTE: ignore the returned IRQ param as firmware does not set it. */ 383 384 EFSYS_PROBE1(mcdi_evq_flags, uint32_t, 385 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); 386 387 return (0); 388 389 fail5: 390 EFSYS_PROBE(fail5); 391 fail4: 392 EFSYS_PROBE(fail4); 393 fail3: 394 EFSYS_PROBE(fail3); 395 fail2: 396 EFSYS_PROBE(fail2); 397 fail1: 398 EFSYS_PROBE1(fail1, efx_rc_t, rc); 399 400 return (rc); 401 } 402 403 static __checkReturn efx_rc_t 404 efx_mcdi_fini_evq( 405 __in efx_nic_t *enp, 406 __in uint32_t instance) 407 { 408 efx_mcdi_req_t req; 409 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN, 410 MC_CMD_FINI_EVQ_OUT_LEN); 411 efx_rc_t rc; 412 413 req.emr_cmd = MC_CMD_FINI_EVQ; 414 req.emr_in_buf = payload; 415 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; 416 req.emr_out_buf = payload; 417 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; 418 419 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); 420 421 efx_mcdi_execute_quiet(enp, &req); 422 423 if (req.emr_rc != 0) { 424 rc = req.emr_rc; 425 goto fail1; 426 } 427 428 return (0); 429 430 fail1: 431 /* 432 * EALREADY is not an error, but indicates that the MC has rebooted and 433 * that the EVQ has already been destroyed. 434 */ 435 if (rc != EALREADY) 436 EFSYS_PROBE1(fail1, efx_rc_t, rc); 437 438 return (rc); 439 } 440 441 __checkReturn efx_rc_t 442 ef10_ev_init( 443 __in efx_nic_t *enp) 444 { 445 _NOTE(ARGUNUSED(enp)) 446 return (0); 447 } 448 449 void 450 ef10_ev_fini( 451 __in efx_nic_t *enp) 452 { 453 _NOTE(ARGUNUSED(enp)) 454 } 455 456 __checkReturn efx_rc_t 457 ef10_ev_qcreate( 458 __in efx_nic_t *enp, 459 __in unsigned int index, 460 __in efsys_mem_t *esmp, 461 __in size_t ndescs, 462 __in uint32_t id, 463 __in uint32_t us, 464 __in uint32_t flags, 465 __in efx_evq_t *eep) 466 { 467 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 468 uint32_t irq; 469 efx_rc_t rc; 470 471 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ 472 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); 473 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); 474 475 if (!ISP2(ndescs) || 476 (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) { 477 rc = EINVAL; 478 goto fail1; 479 } 480 481 if (index >= encp->enc_evq_limit) { 482 rc = EINVAL; 483 goto fail2; 484 } 485 486 if (us > encp->enc_evq_timer_max_us) { 487 rc = EINVAL; 488 goto fail3; 489 } 490 491 /* Set up the handler table */ 492 eep->ee_rx = ef10_ev_rx; 493 eep->ee_tx = ef10_ev_tx; 494 eep->ee_driver = ef10_ev_driver; 495 eep->ee_drv_gen = ef10_ev_drv_gen; 496 eep->ee_mcdi = ef10_ev_mcdi; 497 498 /* Set up the event queue */ 499 /* INIT_EVQ expects function-relative vector number */ 500 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 501 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) { 502 irq = index; 503 } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) { 504 irq = index; 505 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) | 506 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 507 } else { 508 irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX; 509 } 510 511 /* 512 * Interrupts may be raised for events immediately after the queue is 513 * created. See bug58606. 514 */ 515 516 if (encp->enc_init_evq_v2_supported) { 517 /* 518 * On Medford the low latency license is required to enable RX 519 * and event cut through and to disable RX batching. If event 520 * queue type in flags is auto, we let the firmware decide the 521 * settings to use. If the adapter has a low latency license, 522 * it will choose the best settings for low latency, otherwise 523 * it will choose the best settings for throughput. 524 */ 525 rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us, 526 flags); 527 if (rc != 0) 528 goto fail4; 529 } else { 530 /* 531 * On Huntington we need to specify the settings to use. 532 * If event queue type in flags is auto, we favour throughput 533 * if the adapter is running virtualization supporting firmware 534 * (i.e. the full featured firmware variant) 535 * and latency otherwise. The Ethernet Virtual Bridging 536 * capability is used to make this decision. (Note though that 537 * the low latency firmware variant is also best for 538 * throughput and corresponding type should be specified 539 * to choose it.) 540 */ 541 boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1; 542 rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags, 543 low_latency); 544 if (rc != 0) 545 goto fail5; 546 } 547 548 return (0); 549 550 fail5: 551 EFSYS_PROBE(fail5); 552 fail4: 553 EFSYS_PROBE(fail4); 554 fail3: 555 EFSYS_PROBE(fail3); 556 fail2: 557 EFSYS_PROBE(fail2); 558 fail1: 559 EFSYS_PROBE1(fail1, efx_rc_t, rc); 560 561 return (rc); 562 } 563 564 void 565 ef10_ev_qdestroy( 566 __in efx_evq_t *eep) 567 { 568 efx_nic_t *enp = eep->ee_enp; 569 570 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || 571 enp->en_family == EFX_FAMILY_MEDFORD || 572 enp->en_family == EFX_FAMILY_MEDFORD2); 573 574 (void) efx_mcdi_fini_evq(enp, eep->ee_index); 575 } 576 577 __checkReturn efx_rc_t 578 ef10_ev_qprime( 579 __in efx_evq_t *eep, 580 __in unsigned int count) 581 { 582 efx_nic_t *enp = eep->ee_enp; 583 uint32_t rptr; 584 efx_dword_t dword; 585 586 rptr = count & eep->ee_mask; 587 588 if (enp->en_nic_cfg.enc_bug35388_workaround) { 589 EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS > 590 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 591 EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS < 592 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 593 594 EFX_POPULATE_DWORD_2(dword, 595 ERF_DD_EVQ_IND_RPTR_FLAGS, 596 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 597 ERF_DD_EVQ_IND_RPTR, 598 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); 599 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, 600 &dword, B_FALSE); 601 602 EFX_POPULATE_DWORD_2(dword, 603 ERF_DD_EVQ_IND_RPTR_FLAGS, 604 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 605 ERF_DD_EVQ_IND_RPTR, 606 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 607 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, 608 &dword, B_FALSE); 609 } else { 610 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); 611 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, 612 &dword, B_FALSE); 613 } 614 615 return (0); 616 } 617 618 static __checkReturn efx_rc_t 619 efx_mcdi_driver_event( 620 __in efx_nic_t *enp, 621 __in uint32_t evq, 622 __in efx_qword_t data) 623 { 624 efx_mcdi_req_t req; 625 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN, 626 MC_CMD_DRIVER_EVENT_OUT_LEN); 627 efx_rc_t rc; 628 629 req.emr_cmd = MC_CMD_DRIVER_EVENT; 630 req.emr_in_buf = payload; 631 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; 632 req.emr_out_buf = payload; 633 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; 634 635 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); 636 637 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, 638 EFX_QWORD_FIELD(data, EFX_DWORD_0)); 639 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, 640 EFX_QWORD_FIELD(data, EFX_DWORD_1)); 641 642 efx_mcdi_execute(enp, &req); 643 644 if (req.emr_rc != 0) { 645 rc = req.emr_rc; 646 goto fail1; 647 } 648 649 return (0); 650 651 fail1: 652 EFSYS_PROBE1(fail1, efx_rc_t, rc); 653 654 return (rc); 655 } 656 657 void 658 ef10_ev_qpost( 659 __in efx_evq_t *eep, 660 __in uint16_t data) 661 { 662 efx_nic_t *enp = eep->ee_enp; 663 efx_qword_t event; 664 665 EFX_POPULATE_QWORD_3(event, 666 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, 667 ESF_DZ_DRV_SUB_CODE, 0, 668 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); 669 670 (void) efx_mcdi_driver_event(enp, eep->ee_index, event); 671 } 672 673 __checkReturn efx_rc_t 674 ef10_ev_qmoderate( 675 __in efx_evq_t *eep, 676 __in unsigned int us) 677 { 678 efx_nic_t *enp = eep->ee_enp; 679 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 680 efx_dword_t dword; 681 uint32_t mode; 682 efx_rc_t rc; 683 684 /* Check that hardware and MCDI use the same timer MODE values */ 685 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS == 686 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS); 687 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START == 688 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START); 689 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START == 690 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START); 691 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF == 692 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF); 693 694 if (us > encp->enc_evq_timer_max_us) { 695 rc = EINVAL; 696 goto fail1; 697 } 698 699 /* If the value is zero then disable the timer */ 700 if (us == 0) { 701 mode = FFE_CZ_TIMER_MODE_DIS; 702 } else { 703 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; 704 } 705 706 if (encp->enc_bug61265_workaround) { 707 uint32_t ns = us * 1000; 708 709 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns); 710 if (rc != 0) 711 goto fail2; 712 } else { 713 unsigned int ticks; 714 715 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 716 goto fail3; 717 718 if (encp->enc_bug35388_workaround) { 719 EFX_POPULATE_DWORD_3(dword, 720 ERF_DD_EVQ_IND_TIMER_FLAGS, 721 EFE_DD_EVQ_IND_TIMER_FLAGS, 722 ERF_DD_EVQ_IND_TIMER_MODE, mode, 723 ERF_DD_EVQ_IND_TIMER_VAL, ticks); 724 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, 725 eep->ee_index, &dword, 0); 726 } else { 727 /* 728 * NOTE: The TMR_REL field introduced in Medford2 is 729 * ignored on earlier EF10 controllers. See bug66418 730 * comment 9 for details. 731 */ 732 EFX_POPULATE_DWORD_3(dword, 733 ERF_DZ_TC_TIMER_MODE, mode, 734 ERF_DZ_TC_TIMER_VAL, ticks, 735 ERF_FZ_TC_TMR_REL_VAL, ticks); 736 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG, 737 eep->ee_index, &dword, 0); 738 } 739 } 740 741 return (0); 742 743 fail3: 744 EFSYS_PROBE(fail3); 745 fail2: 746 EFSYS_PROBE(fail2); 747 fail1: 748 EFSYS_PROBE1(fail1, efx_rc_t, rc); 749 750 return (rc); 751 } 752 753 #if EFSYS_OPT_QSTATS 754 void 755 ef10_ev_qstats_update( 756 __in efx_evq_t *eep, 757 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 758 { 759 unsigned int id; 760 761 for (id = 0; id < EV_NQSTATS; id++) { 762 efsys_stat_t *essp = &stat[id]; 763 764 EFSYS_STAT_INCR(essp, eep->ee_stat[id]); 765 eep->ee_stat[id] = 0; 766 } 767 } 768 #endif /* EFSYS_OPT_QSTATS */ 769 770 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER 771 772 static __checkReturn boolean_t 773 ef10_ev_rx_packed_stream( 774 __in efx_evq_t *eep, 775 __in efx_qword_t *eqp, 776 __in const efx_ev_callbacks_t *eecp, 777 __in_opt void *arg) 778 { 779 uint32_t label; 780 uint32_t pkt_count_lbits; 781 uint16_t flags; 782 boolean_t should_abort; 783 efx_evq_rxq_state_t *eersp; 784 unsigned int pkt_count; 785 unsigned int current_id; 786 boolean_t new_buffer; 787 788 pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); 789 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); 790 new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE); 791 792 flags = 0; 793 794 eersp = &eep->ee_rxq_state[label]; 795 796 /* 797 * RX_DSC_PTR_LBITS has least significant bits of the global 798 * (not per-buffer) packet counter. It is guaranteed that 799 * maximum number of completed packets fits in lbits-mask. 800 * So, modulo lbits-mask arithmetic should be used to calculate 801 * packet counter increment. 802 */ 803 pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) & 804 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); 805 eersp->eers_rx_stream_npackets += pkt_count; 806 807 if (new_buffer) { 808 flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER; 809 #if EFSYS_OPT_RX_PACKED_STREAM 810 /* 811 * If both packed stream and equal stride super-buffer 812 * modes are compiled in, in theory credits should be 813 * be maintained for packed stream only, but right now 814 * these modes are not distinguished in the event queue 815 * Rx queue state and it is OK to increment the counter 816 * regardless (it might be event cheaper than branching 817 * since neighbour structure member are updated as well). 818 */ 819 eersp->eers_rx_packed_stream_credits++; 820 #endif 821 eersp->eers_rx_read_ptr++; 822 } 823 current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask; 824 825 /* Check for errors that invalidate checksum and L3/L4 fields */ 826 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { 827 /* RX frame truncated */ 828 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); 829 flags |= EFX_DISCARD; 830 goto deliver; 831 } 832 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { 833 /* Bad Ethernet frame CRC */ 834 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); 835 flags |= EFX_DISCARD; 836 goto deliver; 837 } 838 839 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { 840 flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE; 841 goto deliver; 842 } 843 844 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) 845 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); 846 847 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) 848 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); 849 850 deliver: 851 /* If we're not discarding the packet then it is ok */ 852 if (~flags & EFX_DISCARD) 853 EFX_EV_QSTAT_INCR(eep, EV_RX_OK); 854 855 EFSYS_ASSERT(eecp->eec_rx_ps != NULL); 856 should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count, 857 flags); 858 859 return (should_abort); 860 } 861 862 #endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */ 863 864 static __checkReturn boolean_t 865 ef10_ev_rx( 866 __in efx_evq_t *eep, 867 __in efx_qword_t *eqp, 868 __in const efx_ev_callbacks_t *eecp, 869 __in_opt void *arg) 870 { 871 efx_nic_t *enp = eep->ee_enp; 872 uint32_t size; 873 uint32_t label; 874 uint32_t mac_class; 875 uint32_t eth_tag_class; 876 uint32_t l3_class; 877 uint32_t l4_class; 878 uint32_t next_read_lbits; 879 uint16_t flags; 880 boolean_t cont; 881 boolean_t should_abort; 882 efx_evq_rxq_state_t *eersp; 883 unsigned int desc_count; 884 unsigned int last_used_id; 885 886 EFX_EV_QSTAT_INCR(eep, EV_RX); 887 888 /* Discard events after RXQ/TXQ errors, or hardware not available */ 889 if (enp->en_reset_flags & 890 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL)) 891 return (B_FALSE); 892 893 /* Basic packet information */ 894 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); 895 eersp = &eep->ee_rxq_state[label]; 896 897 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER 898 /* 899 * Packed stream events are very different, 900 * so handle them separately 901 */ 902 if (eersp->eers_rx_packed_stream) 903 return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg)); 904 #endif 905 906 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); 907 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); 908 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); 909 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); 910 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); 911 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); 912 913 /* 914 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only 915 * 2 bits wide on Medford2. Check it is safe to use the Medford2 field 916 * and values for all EF10 controllers. 917 */ 918 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN); 919 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); 920 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); 921 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN); 922 923 l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS); 924 925 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { 926 /* Drop this event */ 927 return (B_FALSE); 928 } 929 flags = 0; 930 931 if (cont != 0) { 932 /* 933 * This may be part of a scattered frame, or it may be a 934 * truncated frame if scatter is disabled on this RXQ. 935 * Overlength frames can be received if e.g. a VF is configured 936 * for 1500 MTU but connected to a port set to 9000 MTU 937 * (see bug56567). 938 * FIXME: There is not yet any driver that supports scatter on 939 * Huntington. Scatter support is required for OSX. 940 */ 941 flags |= EFX_PKT_CONT; 942 } 943 944 if (mac_class == ESE_DZ_MAC_CLASS_UCAST) 945 flags |= EFX_PKT_UNICAST; 946 947 /* Increment the count of descriptors read */ 948 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & 949 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); 950 eersp->eers_rx_read_ptr += desc_count; 951 952 /* 953 * FIXME: add error checking to make sure this a batched event. 954 * This could also be an aborted scatter, see Bug36629. 955 */ 956 if (desc_count > 1) { 957 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); 958 flags |= EFX_PKT_PREFIX_LEN; 959 } 960 961 /* Calculate the index of the last descriptor consumed */ 962 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; 963 964 /* Check for errors that invalidate checksum and L3/L4 fields */ 965 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { 966 /* RX frame truncated */ 967 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); 968 flags |= EFX_DISCARD; 969 goto deliver; 970 } 971 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { 972 /* Bad Ethernet frame CRC */ 973 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); 974 flags |= EFX_DISCARD; 975 goto deliver; 976 } 977 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { 978 /* 979 * Hardware parse failed, due to malformed headers 980 * or headers that are too long for the parser. 981 * Headers and checksums must be validated by the host. 982 */ 983 /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */ 984 goto deliver; 985 } 986 987 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) || 988 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) { 989 flags |= EFX_PKT_VLAN_TAGGED; 990 } 991 992 switch (l3_class) { 993 case ESE_DZ_L3_CLASS_IP4: 994 case ESE_DZ_L3_CLASS_IP4_FRAG: 995 flags |= EFX_PKT_IPV4; 996 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) { 997 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); 998 } else { 999 flags |= EFX_CKSUM_IPV4; 1000 } 1001 1002 /* 1003 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is 1004 * only 2 bits wide on Medford2. Check it is safe to use the 1005 * Medford2 field and values for all EF10 controllers. 1006 */ 1007 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == 1008 ESF_DE_RX_L4_CLASS_LBN); 1009 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); 1010 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); 1011 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == 1012 ESE_DE_L4_CLASS_UNKNOWN); 1013 1014 if (l4_class == ESE_FZ_L4_CLASS_TCP) { 1015 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); 1016 flags |= EFX_PKT_TCP; 1017 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { 1018 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); 1019 flags |= EFX_PKT_UDP; 1020 } else { 1021 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); 1022 } 1023 break; 1024 1025 case ESE_DZ_L3_CLASS_IP6: 1026 case ESE_DZ_L3_CLASS_IP6_FRAG: 1027 flags |= EFX_PKT_IPV6; 1028 1029 /* 1030 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is 1031 * only 2 bits wide on Medford2. Check it is safe to use the 1032 * Medford2 field and values for all EF10 controllers. 1033 */ 1034 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == 1035 ESF_DE_RX_L4_CLASS_LBN); 1036 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); 1037 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); 1038 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == 1039 ESE_DE_L4_CLASS_UNKNOWN); 1040 1041 if (l4_class == ESE_FZ_L4_CLASS_TCP) { 1042 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); 1043 flags |= EFX_PKT_TCP; 1044 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { 1045 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); 1046 flags |= EFX_PKT_UDP; 1047 } else { 1048 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); 1049 } 1050 break; 1051 1052 default: 1053 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); 1054 break; 1055 } 1056 1057 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) { 1058 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 1059 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); 1060 } else { 1061 flags |= EFX_CKSUM_TCPUDP; 1062 } 1063 } 1064 1065 deliver: 1066 /* If we're not discarding the packet then it is ok */ 1067 if (~flags & EFX_DISCARD) 1068 EFX_EV_QSTAT_INCR(eep, EV_RX_OK); 1069 1070 EFSYS_ASSERT(eecp->eec_rx != NULL); 1071 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); 1072 1073 return (should_abort); 1074 } 1075 1076 static __checkReturn boolean_t 1077 ef10_ev_tx( 1078 __in efx_evq_t *eep, 1079 __in efx_qword_t *eqp, 1080 __in const efx_ev_callbacks_t *eecp, 1081 __in_opt void *arg) 1082 { 1083 efx_nic_t *enp = eep->ee_enp; 1084 uint32_t id; 1085 uint32_t label; 1086 boolean_t should_abort; 1087 1088 EFX_EV_QSTAT_INCR(eep, EV_TX); 1089 1090 /* Discard events after RXQ/TXQ errors, or hardware not available */ 1091 if (enp->en_reset_flags & 1092 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL)) 1093 return (B_FALSE); 1094 1095 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { 1096 /* Drop this event */ 1097 return (B_FALSE); 1098 } 1099 1100 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ 1101 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); 1102 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); 1103 1104 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); 1105 1106 EFSYS_ASSERT(eecp->eec_tx != NULL); 1107 should_abort = eecp->eec_tx(arg, label, id); 1108 1109 return (should_abort); 1110 } 1111 1112 static __checkReturn boolean_t 1113 ef10_ev_driver( 1114 __in efx_evq_t *eep, 1115 __in efx_qword_t *eqp, 1116 __in const efx_ev_callbacks_t *eecp, 1117 __in_opt void *arg) 1118 { 1119 unsigned int code; 1120 boolean_t should_abort; 1121 1122 EFX_EV_QSTAT_INCR(eep, EV_DRIVER); 1123 should_abort = B_FALSE; 1124 1125 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); 1126 switch (code) { 1127 case ESE_DZ_DRV_TIMER_EV: { 1128 uint32_t id; 1129 1130 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); 1131 1132 EFSYS_ASSERT(eecp->eec_timer != NULL); 1133 should_abort = eecp->eec_timer(arg, id); 1134 break; 1135 } 1136 1137 case ESE_DZ_DRV_WAKE_UP_EV: { 1138 uint32_t id; 1139 1140 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); 1141 1142 EFSYS_ASSERT(eecp->eec_wake_up != NULL); 1143 should_abort = eecp->eec_wake_up(arg, id); 1144 break; 1145 } 1146 1147 case ESE_DZ_DRV_START_UP_EV: 1148 EFSYS_ASSERT(eecp->eec_initialized != NULL); 1149 should_abort = eecp->eec_initialized(arg); 1150 break; 1151 1152 default: 1153 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 1154 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1155 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1156 break; 1157 } 1158 1159 return (should_abort); 1160 } 1161 1162 static __checkReturn boolean_t 1163 ef10_ev_drv_gen( 1164 __in efx_evq_t *eep, 1165 __in efx_qword_t *eqp, 1166 __in const efx_ev_callbacks_t *eecp, 1167 __in_opt void *arg) 1168 { 1169 uint32_t data; 1170 boolean_t should_abort; 1171 1172 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); 1173 should_abort = B_FALSE; 1174 1175 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); 1176 if (data >= ((uint32_t)1 << 16)) { 1177 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 1178 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1179 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1180 1181 return (B_TRUE); 1182 } 1183 1184 EFSYS_ASSERT(eecp->eec_software != NULL); 1185 should_abort = eecp->eec_software(arg, (uint16_t)data); 1186 1187 return (should_abort); 1188 } 1189 1190 static __checkReturn boolean_t 1191 ef10_ev_mcdi( 1192 __in efx_evq_t *eep, 1193 __in efx_qword_t *eqp, 1194 __in const efx_ev_callbacks_t *eecp, 1195 __in_opt void *arg) 1196 { 1197 efx_nic_t *enp = eep->ee_enp; 1198 unsigned int code; 1199 boolean_t should_abort = B_FALSE; 1200 1201 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); 1202 1203 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); 1204 switch (code) { 1205 case MCDI_EVENT_CODE_BADSSERT: 1206 efx_mcdi_ev_death(enp, EINTR); 1207 break; 1208 1209 case MCDI_EVENT_CODE_CMDDONE: 1210 efx_mcdi_ev_cpl(enp, 1211 MCDI_EV_FIELD(eqp, CMDDONE_SEQ), 1212 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), 1213 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); 1214 break; 1215 1216 #if EFSYS_OPT_MCDI_PROXY_AUTH 1217 case MCDI_EVENT_CODE_PROXY_RESPONSE: 1218 /* 1219 * This event notifies a function that an authorization request 1220 * has been processed. If the request was authorized then the 1221 * function can now re-send the original MCDI request. 1222 * See SF-113652-SW "SR-IOV Proxied Network Access Control". 1223 */ 1224 efx_mcdi_ev_proxy_response(enp, 1225 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE), 1226 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC)); 1227 break; 1228 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 1229 1230 case MCDI_EVENT_CODE_LINKCHANGE: { 1231 efx_link_mode_t link_mode; 1232 1233 ef10_phy_link_ev(enp, eqp, &link_mode); 1234 should_abort = eecp->eec_link_change(arg, link_mode); 1235 break; 1236 } 1237 1238 case MCDI_EVENT_CODE_SENSOREVT: { 1239 #if EFSYS_OPT_MON_STATS 1240 efx_mon_stat_t id; 1241 efx_mon_stat_value_t value; 1242 efx_rc_t rc; 1243 1244 /* Decode monitor stat for MCDI sensor (if supported) */ 1245 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { 1246 /* Report monitor stat change */ 1247 should_abort = eecp->eec_monitor(arg, id, value); 1248 } else if (rc == ENOTSUP) { 1249 should_abort = eecp->eec_exception(arg, 1250 EFX_EXCEPTION_UNKNOWN_SENSOREVT, 1251 MCDI_EV_FIELD(eqp, DATA)); 1252 } else { 1253 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ 1254 } 1255 #endif 1256 break; 1257 } 1258 1259 case MCDI_EVENT_CODE_SCHEDERR: 1260 /* Informational only */ 1261 break; 1262 1263 case MCDI_EVENT_CODE_REBOOT: 1264 /* Falcon/Siena only (should not been seen with Huntington). */ 1265 efx_mcdi_ev_death(enp, EIO); 1266 break; 1267 1268 case MCDI_EVENT_CODE_MC_REBOOT: 1269 /* MC_REBOOT event is used for Huntington (EF10) and later. */ 1270 efx_mcdi_ev_death(enp, EIO); 1271 break; 1272 1273 case MCDI_EVENT_CODE_MAC_STATS_DMA: 1274 #if EFSYS_OPT_MAC_STATS 1275 if (eecp->eec_mac_stats != NULL) { 1276 eecp->eec_mac_stats(arg, 1277 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); 1278 } 1279 #endif 1280 break; 1281 1282 case MCDI_EVENT_CODE_FWALERT: { 1283 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); 1284 1285 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) 1286 should_abort = eecp->eec_exception(arg, 1287 EFX_EXCEPTION_FWALERT_SRAM, 1288 MCDI_EV_FIELD(eqp, FWALERT_DATA)); 1289 else 1290 should_abort = eecp->eec_exception(arg, 1291 EFX_EXCEPTION_UNKNOWN_FWALERT, 1292 MCDI_EV_FIELD(eqp, DATA)); 1293 break; 1294 } 1295 1296 case MCDI_EVENT_CODE_TX_ERR: { 1297 /* 1298 * After a TXQ error is detected, firmware sends a TX_ERR event. 1299 * This may be followed by TX completions (which we discard), 1300 * and then finally by a TX_FLUSH event. Firmware destroys the 1301 * TXQ automatically after sending the TX_FLUSH event. 1302 */ 1303 enp->en_reset_flags |= EFX_RESET_TXQ_ERR; 1304 1305 EFSYS_PROBE2(tx_descq_err, 1306 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1307 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1308 1309 /* Inform the driver that a reset is required. */ 1310 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, 1311 MCDI_EV_FIELD(eqp, TX_ERR_DATA)); 1312 break; 1313 } 1314 1315 case MCDI_EVENT_CODE_TX_FLUSH: { 1316 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); 1317 1318 /* 1319 * EF10 firmware sends two TX_FLUSH events: one to the txq's 1320 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). 1321 * We want to wait for all completions, so ignore the events 1322 * with TX_FLUSH_TO_DRIVER. 1323 */ 1324 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { 1325 should_abort = B_FALSE; 1326 break; 1327 } 1328 1329 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); 1330 1331 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); 1332 1333 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); 1334 should_abort = eecp->eec_txq_flush_done(arg, txq_index); 1335 break; 1336 } 1337 1338 case MCDI_EVENT_CODE_RX_ERR: { 1339 /* 1340 * After an RXQ error is detected, firmware sends an RX_ERR 1341 * event. This may be followed by RX events (which we discard), 1342 * and then finally by an RX_FLUSH event. Firmware destroys the 1343 * RXQ automatically after sending the RX_FLUSH event. 1344 */ 1345 enp->en_reset_flags |= EFX_RESET_RXQ_ERR; 1346 1347 EFSYS_PROBE2(rx_descq_err, 1348 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1349 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1350 1351 /* Inform the driver that a reset is required. */ 1352 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, 1353 MCDI_EV_FIELD(eqp, RX_ERR_DATA)); 1354 break; 1355 } 1356 1357 case MCDI_EVENT_CODE_RX_FLUSH: { 1358 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); 1359 1360 /* 1361 * EF10 firmware sends two RX_FLUSH events: one to the rxq's 1362 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). 1363 * We want to wait for all completions, so ignore the events 1364 * with RX_FLUSH_TO_DRIVER. 1365 */ 1366 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { 1367 should_abort = B_FALSE; 1368 break; 1369 } 1370 1371 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); 1372 1373 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); 1374 1375 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); 1376 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); 1377 break; 1378 } 1379 1380 default: 1381 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 1382 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1383 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1384 break; 1385 } 1386 1387 return (should_abort); 1388 } 1389 1390 void 1391 ef10_ev_rxlabel_init( 1392 __in efx_evq_t *eep, 1393 __in efx_rxq_t *erp, 1394 __in unsigned int label, 1395 __in efx_rxq_type_t type) 1396 { 1397 efx_evq_rxq_state_t *eersp; 1398 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER 1399 boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM); 1400 boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER); 1401 #endif 1402 1403 _NOTE(ARGUNUSED(type)) 1404 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); 1405 eersp = &eep->ee_rxq_state[label]; 1406 1407 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); 1408 1409 #if EFSYS_OPT_RX_PACKED_STREAM 1410 /* 1411 * For packed stream modes, the very first event will 1412 * have a new buffer flag set, so it will be incremented, 1413 * yielding the correct pointer. That results in a simpler 1414 * code than trying to detect start-of-the-world condition 1415 * in the event handler. 1416 */ 1417 eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0; 1418 #else 1419 eersp->eers_rx_read_ptr = 0; 1420 #endif 1421 eersp->eers_rx_mask = erp->er_mask; 1422 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER 1423 eersp->eers_rx_stream_npackets = 0; 1424 eersp->eers_rx_packed_stream = packed_stream || es_super_buffer; 1425 #endif 1426 #if EFSYS_OPT_RX_PACKED_STREAM 1427 if (packed_stream) { 1428 eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) / 1429 EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT, 1430 EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE); 1431 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0); 1432 /* 1433 * A single credit is allocated to the queue when it is started. 1434 * It is immediately spent by the first packet which has NEW 1435 * BUFFER flag set, though, but still we shall take into 1436 * account, as to not wrap around the maximum number of credits 1437 * accidentally 1438 */ 1439 eersp->eers_rx_packed_stream_credits--; 1440 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=, 1441 EFX_RX_PACKED_STREAM_MAX_CREDITS); 1442 } 1443 #endif 1444 } 1445 1446 void 1447 ef10_ev_rxlabel_fini( 1448 __in efx_evq_t *eep, 1449 __in unsigned int label) 1450 { 1451 efx_evq_rxq_state_t *eersp; 1452 1453 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); 1454 eersp = &eep->ee_rxq_state[label]; 1455 1456 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); 1457 1458 eersp->eers_rx_read_ptr = 0; 1459 eersp->eers_rx_mask = 0; 1460 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER 1461 eersp->eers_rx_stream_npackets = 0; 1462 eersp->eers_rx_packed_stream = B_FALSE; 1463 #endif 1464 #if EFSYS_OPT_RX_PACKED_STREAM 1465 eersp->eers_rx_packed_stream_credits = 0; 1466 #endif 1467 } 1468 1469 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ 1470