1 /*- 2 * Copyright (c) 2012-2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * The views and conclusions contained in the software and documentation are 27 * those of the authors and should not be interpreted as representing official 28 * policies, either expressed or implied, of the FreeBSD Project. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "efx.h" 35 #include "efx_impl.h" 36 #if EFSYS_OPT_MON_STATS 37 #include "mcdi_mon.h" 38 #endif 39 40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD 41 42 #if EFSYS_OPT_QSTATS 43 #define EFX_EV_QSTAT_INCR(_eep, _stat) \ 44 do { \ 45 (_eep)->ee_stat[_stat]++; \ 46 _NOTE(CONSTANTCONDITION) \ 47 } while (B_FALSE) 48 #else 49 #define EFX_EV_QSTAT_INCR(_eep, _stat) 50 #endif 51 52 /* 53 * Non-interrupting event queue requires interrrupting event queue to 54 * refer to for wake-up events even if wake ups are never used. 55 * It could be even non-allocated event queue. 56 */ 57 #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0) 58 59 static __checkReturn boolean_t 60 ef10_ev_rx( 61 __in efx_evq_t *eep, 62 __in efx_qword_t *eqp, 63 __in const efx_ev_callbacks_t *eecp, 64 __in_opt void *arg); 65 66 static __checkReturn boolean_t 67 ef10_ev_tx( 68 __in efx_evq_t *eep, 69 __in efx_qword_t *eqp, 70 __in const efx_ev_callbacks_t *eecp, 71 __in_opt void *arg); 72 73 static __checkReturn boolean_t 74 ef10_ev_driver( 75 __in efx_evq_t *eep, 76 __in efx_qword_t *eqp, 77 __in const efx_ev_callbacks_t *eecp, 78 __in_opt void *arg); 79 80 static __checkReturn boolean_t 81 ef10_ev_drv_gen( 82 __in efx_evq_t *eep, 83 __in efx_qword_t *eqp, 84 __in const efx_ev_callbacks_t *eecp, 85 __in_opt void *arg); 86 87 static __checkReturn boolean_t 88 ef10_ev_mcdi( 89 __in efx_evq_t *eep, 90 __in efx_qword_t *eqp, 91 __in const efx_ev_callbacks_t *eecp, 92 __in_opt void *arg); 93 94 95 static __checkReturn efx_rc_t 96 efx_mcdi_set_evq_tmr( 97 __in efx_nic_t *enp, 98 __in uint32_t instance, 99 __in uint32_t mode, 100 __in uint32_t timer_ns) 101 { 102 efx_mcdi_req_t req; 103 uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN, 104 MC_CMD_SET_EVQ_TMR_OUT_LEN)]; 105 efx_rc_t rc; 106 107 (void) memset(payload, 0, sizeof (payload)); 108 req.emr_cmd = MC_CMD_SET_EVQ_TMR; 109 req.emr_in_buf = payload; 110 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN; 111 req.emr_out_buf = payload; 112 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN; 113 114 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance); 115 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns); 116 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns); 117 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode); 118 119 efx_mcdi_execute(enp, &req); 120 121 if (req.emr_rc != 0) { 122 rc = req.emr_rc; 123 goto fail1; 124 } 125 126 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) { 127 rc = EMSGSIZE; 128 goto fail2; 129 } 130 131 return (0); 132 133 fail2: 134 EFSYS_PROBE(fail2); 135 fail1: 136 EFSYS_PROBE1(fail1, efx_rc_t, rc); 137 138 return (rc); 139 } 140 141 static __checkReturn efx_rc_t 142 efx_mcdi_init_evq( 143 __in efx_nic_t *enp, 144 __in unsigned int instance, 145 __in efsys_mem_t *esmp, 146 __in size_t nevs, 147 __in uint32_t irq, 148 __in uint32_t us, 149 __in uint32_t flags, 150 __in boolean_t low_latency) 151 { 152 efx_mcdi_req_t req; 153 uint8_t payload[ 154 MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), 155 MC_CMD_INIT_EVQ_OUT_LEN)]; 156 efx_qword_t *dma_addr; 157 uint64_t addr; 158 int npages; 159 int i; 160 boolean_t interrupting; 161 int ev_cut_through; 162 efx_rc_t rc; 163 164 npages = EFX_EVQ_NBUFS(nevs); 165 if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) { 166 rc = EINVAL; 167 goto fail1; 168 } 169 170 (void) memset(payload, 0, sizeof (payload)); 171 req.emr_cmd = MC_CMD_INIT_EVQ; 172 req.emr_in_buf = payload; 173 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); 174 req.emr_out_buf = payload; 175 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN; 176 177 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs); 178 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance); 179 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq); 180 181 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 182 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); 183 184 /* 185 * On Huntington RX and TX event batching can only be requested together 186 * (even if the datapath firmware doesn't actually support RX 187 * batching). If event cut through is enabled no RX batching will occur. 188 * 189 * So always enable RX and TX event batching, and enable event cut 190 * through if we want low latency operation. 191 */ 192 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 193 case EFX_EVQ_FLAGS_TYPE_AUTO: 194 ev_cut_through = low_latency ? 1 : 0; 195 break; 196 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 197 ev_cut_through = 0; 198 break; 199 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 200 ev_cut_through = 1; 201 break; 202 default: 203 rc = EINVAL; 204 goto fail2; 205 } 206 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS, 207 INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting, 208 INIT_EVQ_IN_FLAG_RPTR_DOS, 0, 209 INIT_EVQ_IN_FLAG_INT_ARMD, 0, 210 INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through, 211 INIT_EVQ_IN_FLAG_RX_MERGE, 1, 212 INIT_EVQ_IN_FLAG_TX_MERGE, 1); 213 214 /* If the value is zero then disable the timer */ 215 if (us == 0) { 216 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, 217 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); 218 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0); 219 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0); 220 } else { 221 unsigned int ticks; 222 223 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 224 goto fail3; 225 226 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, 227 MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF); 228 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks); 229 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks); 230 } 231 232 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE, 233 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); 234 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0); 235 236 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR); 237 addr = EFSYS_MEM_ADDR(esmp); 238 239 for (i = 0; i < npages; i++) { 240 EFX_POPULATE_QWORD_2(*dma_addr, 241 EFX_DWORD_1, (uint32_t)(addr >> 32), 242 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 243 244 dma_addr++; 245 addr += EFX_BUF_SIZE; 246 } 247 248 efx_mcdi_execute(enp, &req); 249 250 if (req.emr_rc != 0) { 251 rc = req.emr_rc; 252 goto fail4; 253 } 254 255 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { 256 rc = EMSGSIZE; 257 goto fail5; 258 } 259 260 /* NOTE: ignore the returned IRQ param as firmware does not set it. */ 261 262 return (0); 263 264 fail5: 265 EFSYS_PROBE(fail5); 266 fail4: 267 EFSYS_PROBE(fail4); 268 fail3: 269 EFSYS_PROBE(fail3); 270 fail2: 271 EFSYS_PROBE(fail2); 272 fail1: 273 EFSYS_PROBE1(fail1, efx_rc_t, rc); 274 275 return (rc); 276 } 277 278 279 static __checkReturn efx_rc_t 280 efx_mcdi_init_evq_v2( 281 __in efx_nic_t *enp, 282 __in unsigned int instance, 283 __in efsys_mem_t *esmp, 284 __in size_t nevs, 285 __in uint32_t irq, 286 __in uint32_t us, 287 __in uint32_t flags) 288 { 289 efx_mcdi_req_t req; 290 uint8_t payload[ 291 MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), 292 MC_CMD_INIT_EVQ_V2_OUT_LEN)]; 293 boolean_t interrupting; 294 unsigned int evq_type; 295 efx_qword_t *dma_addr; 296 uint64_t addr; 297 int npages; 298 int i; 299 efx_rc_t rc; 300 301 npages = EFX_EVQ_NBUFS(nevs); 302 if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) { 303 rc = EINVAL; 304 goto fail1; 305 } 306 307 (void) memset(payload, 0, sizeof (payload)); 308 req.emr_cmd = MC_CMD_INIT_EVQ; 309 req.emr_in_buf = payload; 310 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); 311 req.emr_out_buf = payload; 312 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; 313 314 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); 315 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); 316 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); 317 318 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 319 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); 320 321 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 322 case EFX_EVQ_FLAGS_TYPE_AUTO: 323 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO; 324 break; 325 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 326 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT; 327 break; 328 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 329 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY; 330 break; 331 default: 332 rc = EINVAL; 333 goto fail2; 334 } 335 MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS, 336 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting, 337 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, 338 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, 339 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type); 340 341 /* If the value is zero then disable the timer */ 342 if (us == 0) { 343 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 344 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); 345 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); 346 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); 347 } else { 348 unsigned int ticks; 349 350 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 351 goto fail3; 352 353 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 354 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); 355 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); 356 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); 357 } 358 359 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, 360 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); 361 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); 362 363 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); 364 addr = EFSYS_MEM_ADDR(esmp); 365 366 for (i = 0; i < npages; i++) { 367 EFX_POPULATE_QWORD_2(*dma_addr, 368 EFX_DWORD_1, (uint32_t)(addr >> 32), 369 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 370 371 dma_addr++; 372 addr += EFX_BUF_SIZE; 373 } 374 375 efx_mcdi_execute(enp, &req); 376 377 if (req.emr_rc != 0) { 378 rc = req.emr_rc; 379 goto fail4; 380 } 381 382 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { 383 rc = EMSGSIZE; 384 goto fail5; 385 } 386 387 /* NOTE: ignore the returned IRQ param as firmware does not set it. */ 388 389 EFSYS_PROBE1(mcdi_evq_flags, uint32_t, 390 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); 391 392 return (0); 393 394 fail5: 395 EFSYS_PROBE(fail5); 396 fail4: 397 EFSYS_PROBE(fail4); 398 fail3: 399 EFSYS_PROBE(fail3); 400 fail2: 401 EFSYS_PROBE(fail2); 402 fail1: 403 EFSYS_PROBE1(fail1, efx_rc_t, rc); 404 405 return (rc); 406 } 407 408 static __checkReturn efx_rc_t 409 efx_mcdi_fini_evq( 410 __in efx_nic_t *enp, 411 __in uint32_t instance) 412 { 413 efx_mcdi_req_t req; 414 uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN, 415 MC_CMD_FINI_EVQ_OUT_LEN)]; 416 efx_rc_t rc; 417 418 (void) memset(payload, 0, sizeof (payload)); 419 req.emr_cmd = MC_CMD_FINI_EVQ; 420 req.emr_in_buf = payload; 421 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; 422 req.emr_out_buf = payload; 423 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; 424 425 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); 426 427 efx_mcdi_execute_quiet(enp, &req); 428 429 if (req.emr_rc != 0) { 430 rc = req.emr_rc; 431 goto fail1; 432 } 433 434 return (0); 435 436 fail1: 437 /* 438 * EALREADY is not an error, but indicates that the MC has rebooted and 439 * that the EVQ has already been destroyed. 440 */ 441 if (rc != EALREADY) 442 EFSYS_PROBE1(fail1, efx_rc_t, rc); 443 444 return (rc); 445 } 446 447 448 449 __checkReturn efx_rc_t 450 ef10_ev_init( 451 __in efx_nic_t *enp) 452 { 453 _NOTE(ARGUNUSED(enp)) 454 return (0); 455 } 456 457 void 458 ef10_ev_fini( 459 __in efx_nic_t *enp) 460 { 461 _NOTE(ARGUNUSED(enp)) 462 } 463 464 __checkReturn efx_rc_t 465 ef10_ev_qcreate( 466 __in efx_nic_t *enp, 467 __in unsigned int index, 468 __in efsys_mem_t *esmp, 469 __in size_t n, 470 __in uint32_t id, 471 __in uint32_t us, 472 __in uint32_t flags, 473 __in efx_evq_t *eep) 474 { 475 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 476 uint32_t irq; 477 efx_rc_t rc; 478 479 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ 480 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); 481 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); 482 483 if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { 484 rc = EINVAL; 485 goto fail1; 486 } 487 488 if (index >= encp->enc_evq_limit) { 489 rc = EINVAL; 490 goto fail2; 491 } 492 493 if (us > encp->enc_evq_timer_max_us) { 494 rc = EINVAL; 495 goto fail3; 496 } 497 498 /* Set up the handler table */ 499 eep->ee_rx = ef10_ev_rx; 500 eep->ee_tx = ef10_ev_tx; 501 eep->ee_driver = ef10_ev_driver; 502 eep->ee_drv_gen = ef10_ev_drv_gen; 503 eep->ee_mcdi = ef10_ev_mcdi; 504 505 /* Set up the event queue */ 506 /* INIT_EVQ expects function-relative vector number */ 507 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 508 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) { 509 irq = index; 510 } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) { 511 irq = index; 512 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) | 513 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 514 } else { 515 irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX; 516 } 517 518 /* 519 * Interrupts may be raised for events immediately after the queue is 520 * created. See bug58606. 521 */ 522 523 if (encp->enc_init_evq_v2_supported) { 524 /* 525 * On Medford the low latency license is required to enable RX 526 * and event cut through and to disable RX batching. If event 527 * queue type in flags is auto, we let the firmware decide the 528 * settings to use. If the adapter has a low latency license, 529 * it will choose the best settings for low latency, otherwise 530 * it will choose the best settings for throughput. 531 */ 532 rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags); 533 if (rc != 0) 534 goto fail4; 535 } else { 536 /* 537 * On Huntington we need to specify the settings to use. 538 * If event queue type in flags is auto, we favour throughput 539 * if the adapter is running virtualization supporting firmware 540 * (i.e. the full featured firmware variant) 541 * and latency otherwise. The Ethernet Virtual Bridging 542 * capability is used to make this decision. (Note though that 543 * the low latency firmware variant is also best for 544 * throughput and corresponding type should be specified 545 * to choose it.) 546 */ 547 boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1; 548 rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags, 549 low_latency); 550 if (rc != 0) 551 goto fail5; 552 } 553 554 return (0); 555 556 fail5: 557 EFSYS_PROBE(fail5); 558 fail4: 559 EFSYS_PROBE(fail4); 560 fail3: 561 EFSYS_PROBE(fail3); 562 fail2: 563 EFSYS_PROBE(fail2); 564 fail1: 565 EFSYS_PROBE1(fail1, efx_rc_t, rc); 566 567 return (rc); 568 } 569 570 void 571 ef10_ev_qdestroy( 572 __in efx_evq_t *eep) 573 { 574 efx_nic_t *enp = eep->ee_enp; 575 576 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || 577 enp->en_family == EFX_FAMILY_MEDFORD); 578 579 (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index); 580 } 581 582 __checkReturn efx_rc_t 583 ef10_ev_qprime( 584 __in efx_evq_t *eep, 585 __in unsigned int count) 586 { 587 efx_nic_t *enp = eep->ee_enp; 588 uint32_t rptr; 589 efx_dword_t dword; 590 591 rptr = count & eep->ee_mask; 592 593 if (enp->en_nic_cfg.enc_bug35388_workaround) { 594 EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS > 595 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 596 EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS < 597 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 598 599 EFX_POPULATE_DWORD_2(dword, 600 ERF_DD_EVQ_IND_RPTR_FLAGS, 601 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 602 ERF_DD_EVQ_IND_RPTR, 603 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); 604 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, 605 &dword, B_FALSE); 606 607 EFX_POPULATE_DWORD_2(dword, 608 ERF_DD_EVQ_IND_RPTR_FLAGS, 609 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 610 ERF_DD_EVQ_IND_RPTR, 611 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 612 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, 613 &dword, B_FALSE); 614 } else { 615 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); 616 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, 617 &dword, B_FALSE); 618 } 619 620 return (0); 621 } 622 623 static __checkReturn efx_rc_t 624 efx_mcdi_driver_event( 625 __in efx_nic_t *enp, 626 __in uint32_t evq, 627 __in efx_qword_t data) 628 { 629 efx_mcdi_req_t req; 630 uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN, 631 MC_CMD_DRIVER_EVENT_OUT_LEN)]; 632 efx_rc_t rc; 633 634 req.emr_cmd = MC_CMD_DRIVER_EVENT; 635 req.emr_in_buf = payload; 636 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; 637 req.emr_out_buf = payload; 638 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; 639 640 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); 641 642 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, 643 EFX_QWORD_FIELD(data, EFX_DWORD_0)); 644 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, 645 EFX_QWORD_FIELD(data, EFX_DWORD_1)); 646 647 efx_mcdi_execute(enp, &req); 648 649 if (req.emr_rc != 0) { 650 rc = req.emr_rc; 651 goto fail1; 652 } 653 654 return (0); 655 656 fail1: 657 EFSYS_PROBE1(fail1, efx_rc_t, rc); 658 659 return (rc); 660 } 661 662 void 663 ef10_ev_qpost( 664 __in efx_evq_t *eep, 665 __in uint16_t data) 666 { 667 efx_nic_t *enp = eep->ee_enp; 668 efx_qword_t event; 669 670 EFX_POPULATE_QWORD_3(event, 671 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, 672 ESF_DZ_DRV_SUB_CODE, 0, 673 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); 674 675 (void) efx_mcdi_driver_event(enp, eep->ee_index, event); 676 } 677 678 __checkReturn efx_rc_t 679 ef10_ev_qmoderate( 680 __in efx_evq_t *eep, 681 __in unsigned int us) 682 { 683 efx_nic_t *enp = eep->ee_enp; 684 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 685 efx_dword_t dword; 686 uint32_t mode; 687 efx_rc_t rc; 688 689 /* Check that hardware and MCDI use the same timer MODE values */ 690 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS == 691 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS); 692 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START == 693 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START); 694 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START == 695 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START); 696 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF == 697 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF); 698 699 if (us > encp->enc_evq_timer_max_us) { 700 rc = EINVAL; 701 goto fail1; 702 } 703 704 /* If the value is zero then disable the timer */ 705 if (us == 0) { 706 mode = FFE_CZ_TIMER_MODE_DIS; 707 } else { 708 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; 709 } 710 711 if (encp->enc_bug61265_workaround) { 712 uint32_t ns = us * 1000; 713 714 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns); 715 if (rc != 0) 716 goto fail2; 717 } else { 718 unsigned int ticks; 719 720 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 721 goto fail3; 722 723 if (encp->enc_bug35388_workaround) { 724 EFX_POPULATE_DWORD_3(dword, 725 ERF_DD_EVQ_IND_TIMER_FLAGS, 726 EFE_DD_EVQ_IND_TIMER_FLAGS, 727 ERF_DD_EVQ_IND_TIMER_MODE, mode, 728 ERF_DD_EVQ_IND_TIMER_VAL, ticks); 729 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, 730 eep->ee_index, &dword, 0); 731 } else { 732 EFX_POPULATE_DWORD_2(dword, 733 ERF_DZ_TC_TIMER_MODE, mode, 734 ERF_DZ_TC_TIMER_VAL, ticks); 735 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG, 736 eep->ee_index, &dword, 0); 737 } 738 } 739 740 return (0); 741 742 fail3: 743 EFSYS_PROBE(fail3); 744 fail2: 745 EFSYS_PROBE(fail2); 746 fail1: 747 EFSYS_PROBE1(fail1, efx_rc_t, rc); 748 749 return (rc); 750 } 751 752 753 #if EFSYS_OPT_QSTATS 754 void 755 ef10_ev_qstats_update( 756 __in efx_evq_t *eep, 757 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 758 { 759 unsigned int id; 760 761 for (id = 0; id < EV_NQSTATS; id++) { 762 efsys_stat_t *essp = &stat[id]; 763 764 EFSYS_STAT_INCR(essp, eep->ee_stat[id]); 765 eep->ee_stat[id] = 0; 766 } 767 } 768 #endif /* EFSYS_OPT_QSTATS */ 769 770 #if EFSYS_OPT_RX_PACKED_STREAM 771 772 static __checkReturn boolean_t 773 ef10_ev_rx_packed_stream( 774 __in efx_evq_t *eep, 775 __in efx_qword_t *eqp, 776 __in const efx_ev_callbacks_t *eecp, 777 __in_opt void *arg) 778 { 779 uint32_t label; 780 uint32_t pkt_count_lbits; 781 uint16_t flags; 782 boolean_t should_abort; 783 efx_evq_rxq_state_t *eersp; 784 unsigned int pkt_count; 785 unsigned int current_id; 786 boolean_t new_buffer; 787 788 pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); 789 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); 790 new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE); 791 792 flags = 0; 793 794 eersp = &eep->ee_rxq_state[label]; 795 796 /* 797 * RX_DSC_PTR_LBITS has least significant bits of the global 798 * (not per-buffer) packet counter. It is guaranteed that 799 * maximum number of completed packets fits in lbits-mask. 800 * So, modulo lbits-mask arithmetic should be used to calculate 801 * packet counter increment. 802 */ 803 pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) & 804 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); 805 eersp->eers_rx_stream_npackets += pkt_count; 806 807 if (new_buffer) { 808 flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER; 809 eersp->eers_rx_packed_stream_credits++; 810 eersp->eers_rx_read_ptr++; 811 } 812 current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask; 813 814 /* Check for errors that invalidate checksum and L3/L4 fields */ 815 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { 816 /* RX frame truncated (error flag is misnamed) */ 817 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); 818 flags |= EFX_DISCARD; 819 goto deliver; 820 } 821 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { 822 /* Bad Ethernet frame CRC */ 823 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); 824 flags |= EFX_DISCARD; 825 goto deliver; 826 } 827 828 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { 829 flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE; 830 goto deliver; 831 } 832 833 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) 834 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); 835 836 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) 837 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); 838 839 deliver: 840 /* If we're not discarding the packet then it is ok */ 841 if (~flags & EFX_DISCARD) 842 EFX_EV_QSTAT_INCR(eep, EV_RX_OK); 843 844 EFSYS_ASSERT(eecp->eec_rx_ps != NULL); 845 should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count, 846 flags); 847 848 return (should_abort); 849 } 850 851 #endif /* EFSYS_OPT_RX_PACKED_STREAM */ 852 853 static __checkReturn boolean_t 854 ef10_ev_rx( 855 __in efx_evq_t *eep, 856 __in efx_qword_t *eqp, 857 __in const efx_ev_callbacks_t *eecp, 858 __in_opt void *arg) 859 { 860 efx_nic_t *enp = eep->ee_enp; 861 uint32_t size; 862 uint32_t label; 863 uint32_t mac_class; 864 uint32_t eth_tag_class; 865 uint32_t l3_class; 866 uint32_t l4_class; 867 uint32_t next_read_lbits; 868 uint16_t flags; 869 boolean_t cont; 870 boolean_t should_abort; 871 efx_evq_rxq_state_t *eersp; 872 unsigned int desc_count; 873 unsigned int last_used_id; 874 875 EFX_EV_QSTAT_INCR(eep, EV_RX); 876 877 /* Discard events after RXQ/TXQ errors */ 878 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) 879 return (B_FALSE); 880 881 /* Basic packet information */ 882 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); 883 eersp = &eep->ee_rxq_state[label]; 884 885 #if EFSYS_OPT_RX_PACKED_STREAM 886 /* 887 * Packed stream events are very different, 888 * so handle them separately 889 */ 890 if (eersp->eers_rx_packed_stream) 891 return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg)); 892 #endif 893 894 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); 895 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); 896 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); 897 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); 898 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); 899 l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); 900 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); 901 902 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { 903 /* Drop this event */ 904 return (B_FALSE); 905 } 906 flags = 0; 907 908 if (cont != 0) { 909 /* 910 * This may be part of a scattered frame, or it may be a 911 * truncated frame if scatter is disabled on this RXQ. 912 * Overlength frames can be received if e.g. a VF is configured 913 * for 1500 MTU but connected to a port set to 9000 MTU 914 * (see bug56567). 915 * FIXME: There is not yet any driver that supports scatter on 916 * Huntington. Scatter support is required for OSX. 917 */ 918 flags |= EFX_PKT_CONT; 919 } 920 921 if (mac_class == ESE_DZ_MAC_CLASS_UCAST) 922 flags |= EFX_PKT_UNICAST; 923 924 /* Increment the count of descriptors read */ 925 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & 926 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); 927 eersp->eers_rx_read_ptr += desc_count; 928 929 /* 930 * FIXME: add error checking to make sure this a batched event. 931 * This could also be an aborted scatter, see Bug36629. 932 */ 933 if (desc_count > 1) { 934 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); 935 flags |= EFX_PKT_PREFIX_LEN; 936 } 937 938 /* Calculate the index of the last descriptor consumed */ 939 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; 940 941 /* Check for errors that invalidate checksum and L3/L4 fields */ 942 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { 943 /* RX frame truncated (error flag is misnamed) */ 944 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); 945 flags |= EFX_DISCARD; 946 goto deliver; 947 } 948 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { 949 /* Bad Ethernet frame CRC */ 950 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); 951 flags |= EFX_DISCARD; 952 goto deliver; 953 } 954 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { 955 /* 956 * Hardware parse failed, due to malformed headers 957 * or headers that are too long for the parser. 958 * Headers and checksums must be validated by the host. 959 */ 960 /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */ 961 goto deliver; 962 } 963 964 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) || 965 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) { 966 flags |= EFX_PKT_VLAN_TAGGED; 967 } 968 969 switch (l3_class) { 970 case ESE_DZ_L3_CLASS_IP4: 971 case ESE_DZ_L3_CLASS_IP4_FRAG: 972 flags |= EFX_PKT_IPV4; 973 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) { 974 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); 975 } else { 976 flags |= EFX_CKSUM_IPV4; 977 } 978 979 if (l4_class == ESE_DZ_L4_CLASS_TCP) { 980 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); 981 flags |= EFX_PKT_TCP; 982 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { 983 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); 984 flags |= EFX_PKT_UDP; 985 } else { 986 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); 987 } 988 break; 989 990 case ESE_DZ_L3_CLASS_IP6: 991 case ESE_DZ_L3_CLASS_IP6_FRAG: 992 flags |= EFX_PKT_IPV6; 993 994 if (l4_class == ESE_DZ_L4_CLASS_TCP) { 995 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); 996 flags |= EFX_PKT_TCP; 997 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { 998 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); 999 flags |= EFX_PKT_UDP; 1000 } else { 1001 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); 1002 } 1003 break; 1004 1005 default: 1006 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); 1007 break; 1008 } 1009 1010 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) { 1011 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 1012 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); 1013 } else { 1014 flags |= EFX_CKSUM_TCPUDP; 1015 } 1016 } 1017 1018 deliver: 1019 /* If we're not discarding the packet then it is ok */ 1020 if (~flags & EFX_DISCARD) 1021 EFX_EV_QSTAT_INCR(eep, EV_RX_OK); 1022 1023 EFSYS_ASSERT(eecp->eec_rx != NULL); 1024 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); 1025 1026 return (should_abort); 1027 } 1028 1029 static __checkReturn boolean_t 1030 ef10_ev_tx( 1031 __in efx_evq_t *eep, 1032 __in efx_qword_t *eqp, 1033 __in const efx_ev_callbacks_t *eecp, 1034 __in_opt void *arg) 1035 { 1036 efx_nic_t *enp = eep->ee_enp; 1037 uint32_t id; 1038 uint32_t label; 1039 boolean_t should_abort; 1040 1041 EFX_EV_QSTAT_INCR(eep, EV_TX); 1042 1043 /* Discard events after RXQ/TXQ errors */ 1044 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) 1045 return (B_FALSE); 1046 1047 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { 1048 /* Drop this event */ 1049 return (B_FALSE); 1050 } 1051 1052 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ 1053 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); 1054 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); 1055 1056 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); 1057 1058 EFSYS_ASSERT(eecp->eec_tx != NULL); 1059 should_abort = eecp->eec_tx(arg, label, id); 1060 1061 return (should_abort); 1062 } 1063 1064 static __checkReturn boolean_t 1065 ef10_ev_driver( 1066 __in efx_evq_t *eep, 1067 __in efx_qword_t *eqp, 1068 __in const efx_ev_callbacks_t *eecp, 1069 __in_opt void *arg) 1070 { 1071 unsigned int code; 1072 boolean_t should_abort; 1073 1074 EFX_EV_QSTAT_INCR(eep, EV_DRIVER); 1075 should_abort = B_FALSE; 1076 1077 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); 1078 switch (code) { 1079 case ESE_DZ_DRV_TIMER_EV: { 1080 uint32_t id; 1081 1082 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); 1083 1084 EFSYS_ASSERT(eecp->eec_timer != NULL); 1085 should_abort = eecp->eec_timer(arg, id); 1086 break; 1087 } 1088 1089 case ESE_DZ_DRV_WAKE_UP_EV: { 1090 uint32_t id; 1091 1092 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); 1093 1094 EFSYS_ASSERT(eecp->eec_wake_up != NULL); 1095 should_abort = eecp->eec_wake_up(arg, id); 1096 break; 1097 } 1098 1099 case ESE_DZ_DRV_START_UP_EV: 1100 EFSYS_ASSERT(eecp->eec_initialized != NULL); 1101 should_abort = eecp->eec_initialized(arg); 1102 break; 1103 1104 default: 1105 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 1106 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1107 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1108 break; 1109 } 1110 1111 return (should_abort); 1112 } 1113 1114 static __checkReturn boolean_t 1115 ef10_ev_drv_gen( 1116 __in efx_evq_t *eep, 1117 __in efx_qword_t *eqp, 1118 __in const efx_ev_callbacks_t *eecp, 1119 __in_opt void *arg) 1120 { 1121 uint32_t data; 1122 boolean_t should_abort; 1123 1124 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); 1125 should_abort = B_FALSE; 1126 1127 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); 1128 if (data >= ((uint32_t)1 << 16)) { 1129 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 1130 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1131 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1132 1133 return (B_TRUE); 1134 } 1135 1136 EFSYS_ASSERT(eecp->eec_software != NULL); 1137 should_abort = eecp->eec_software(arg, (uint16_t)data); 1138 1139 return (should_abort); 1140 } 1141 1142 static __checkReturn boolean_t 1143 ef10_ev_mcdi( 1144 __in efx_evq_t *eep, 1145 __in efx_qword_t *eqp, 1146 __in const efx_ev_callbacks_t *eecp, 1147 __in_opt void *arg) 1148 { 1149 efx_nic_t *enp = eep->ee_enp; 1150 unsigned int code; 1151 boolean_t should_abort = B_FALSE; 1152 1153 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); 1154 1155 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); 1156 switch (code) { 1157 case MCDI_EVENT_CODE_BADSSERT: 1158 efx_mcdi_ev_death(enp, EINTR); 1159 break; 1160 1161 case MCDI_EVENT_CODE_CMDDONE: 1162 efx_mcdi_ev_cpl(enp, 1163 MCDI_EV_FIELD(eqp, CMDDONE_SEQ), 1164 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), 1165 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); 1166 break; 1167 1168 #if EFSYS_OPT_MCDI_PROXY_AUTH 1169 case MCDI_EVENT_CODE_PROXY_RESPONSE: 1170 /* 1171 * This event notifies a function that an authorization request 1172 * has been processed. If the request was authorized then the 1173 * function can now re-send the original MCDI request. 1174 * See SF-113652-SW "SR-IOV Proxied Network Access Control". 1175 */ 1176 efx_mcdi_ev_proxy_response(enp, 1177 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE), 1178 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC)); 1179 break; 1180 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 1181 1182 case MCDI_EVENT_CODE_LINKCHANGE: { 1183 efx_link_mode_t link_mode; 1184 1185 ef10_phy_link_ev(enp, eqp, &link_mode); 1186 should_abort = eecp->eec_link_change(arg, link_mode); 1187 break; 1188 } 1189 1190 case MCDI_EVENT_CODE_SENSOREVT: { 1191 #if EFSYS_OPT_MON_STATS 1192 efx_mon_stat_t id; 1193 efx_mon_stat_value_t value; 1194 efx_rc_t rc; 1195 1196 /* Decode monitor stat for MCDI sensor (if supported) */ 1197 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { 1198 /* Report monitor stat change */ 1199 should_abort = eecp->eec_monitor(arg, id, value); 1200 } else if (rc == ENOTSUP) { 1201 should_abort = eecp->eec_exception(arg, 1202 EFX_EXCEPTION_UNKNOWN_SENSOREVT, 1203 MCDI_EV_FIELD(eqp, DATA)); 1204 } else { 1205 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ 1206 } 1207 #endif 1208 break; 1209 } 1210 1211 case MCDI_EVENT_CODE_SCHEDERR: 1212 /* Informational only */ 1213 break; 1214 1215 case MCDI_EVENT_CODE_REBOOT: 1216 /* Falcon/Siena only (should not been seen with Huntington). */ 1217 efx_mcdi_ev_death(enp, EIO); 1218 break; 1219 1220 case MCDI_EVENT_CODE_MC_REBOOT: 1221 /* MC_REBOOT event is used for Huntington (EF10) and later. */ 1222 efx_mcdi_ev_death(enp, EIO); 1223 break; 1224 1225 case MCDI_EVENT_CODE_MAC_STATS_DMA: 1226 #if EFSYS_OPT_MAC_STATS 1227 if (eecp->eec_mac_stats != NULL) { 1228 eecp->eec_mac_stats(arg, 1229 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); 1230 } 1231 #endif 1232 break; 1233 1234 case MCDI_EVENT_CODE_FWALERT: { 1235 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); 1236 1237 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) 1238 should_abort = eecp->eec_exception(arg, 1239 EFX_EXCEPTION_FWALERT_SRAM, 1240 MCDI_EV_FIELD(eqp, FWALERT_DATA)); 1241 else 1242 should_abort = eecp->eec_exception(arg, 1243 EFX_EXCEPTION_UNKNOWN_FWALERT, 1244 MCDI_EV_FIELD(eqp, DATA)); 1245 break; 1246 } 1247 1248 case MCDI_EVENT_CODE_TX_ERR: { 1249 /* 1250 * After a TXQ error is detected, firmware sends a TX_ERR event. 1251 * This may be followed by TX completions (which we discard), 1252 * and then finally by a TX_FLUSH event. Firmware destroys the 1253 * TXQ automatically after sending the TX_FLUSH event. 1254 */ 1255 enp->en_reset_flags |= EFX_RESET_TXQ_ERR; 1256 1257 EFSYS_PROBE2(tx_descq_err, 1258 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1259 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1260 1261 /* Inform the driver that a reset is required. */ 1262 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, 1263 MCDI_EV_FIELD(eqp, TX_ERR_DATA)); 1264 break; 1265 } 1266 1267 case MCDI_EVENT_CODE_TX_FLUSH: { 1268 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); 1269 1270 /* 1271 * EF10 firmware sends two TX_FLUSH events: one to the txq's 1272 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). 1273 * We want to wait for all completions, so ignore the events 1274 * with TX_FLUSH_TO_DRIVER. 1275 */ 1276 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { 1277 should_abort = B_FALSE; 1278 break; 1279 } 1280 1281 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); 1282 1283 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); 1284 1285 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); 1286 should_abort = eecp->eec_txq_flush_done(arg, txq_index); 1287 break; 1288 } 1289 1290 case MCDI_EVENT_CODE_RX_ERR: { 1291 /* 1292 * After an RXQ error is detected, firmware sends an RX_ERR 1293 * event. This may be followed by RX events (which we discard), 1294 * and then finally by an RX_FLUSH event. Firmware destroys the 1295 * RXQ automatically after sending the RX_FLUSH event. 1296 */ 1297 enp->en_reset_flags |= EFX_RESET_RXQ_ERR; 1298 1299 EFSYS_PROBE2(rx_descq_err, 1300 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1301 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1302 1303 /* Inform the driver that a reset is required. */ 1304 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, 1305 MCDI_EV_FIELD(eqp, RX_ERR_DATA)); 1306 break; 1307 } 1308 1309 case MCDI_EVENT_CODE_RX_FLUSH: { 1310 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); 1311 1312 /* 1313 * EF10 firmware sends two RX_FLUSH events: one to the rxq's 1314 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). 1315 * We want to wait for all completions, so ignore the events 1316 * with RX_FLUSH_TO_DRIVER. 1317 */ 1318 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { 1319 should_abort = B_FALSE; 1320 break; 1321 } 1322 1323 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); 1324 1325 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); 1326 1327 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); 1328 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); 1329 break; 1330 } 1331 1332 default: 1333 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 1334 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1335 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1336 break; 1337 } 1338 1339 return (should_abort); 1340 } 1341 1342 void 1343 ef10_ev_rxlabel_init( 1344 __in efx_evq_t *eep, 1345 __in efx_rxq_t *erp, 1346 __in unsigned int label, 1347 __in efx_rxq_type_t type) 1348 { 1349 efx_evq_rxq_state_t *eersp; 1350 boolean_t packed_stream = (type >= EFX_RXQ_TYPE_PACKED_STREAM_1M) && 1351 (type <= EFX_RXQ_TYPE_PACKED_STREAM_64K); 1352 1353 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); 1354 eersp = &eep->ee_rxq_state[label]; 1355 1356 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); 1357 1358 #if EFSYS_OPT_RX_PACKED_STREAM 1359 /* 1360 * For packed stream modes, the very first event will 1361 * have a new buffer flag set, so it will be incremented, 1362 * yielding the correct pointer. That results in a simpler 1363 * code than trying to detect start-of-the-world condition 1364 * in the event handler. 1365 */ 1366 eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0; 1367 #else 1368 eersp->eers_rx_read_ptr = 0; 1369 #endif 1370 eersp->eers_rx_mask = erp->er_mask; 1371 #if EFSYS_OPT_RX_PACKED_STREAM 1372 eersp->eers_rx_stream_npackets = 0; 1373 eersp->eers_rx_packed_stream = packed_stream; 1374 if (packed_stream) { 1375 eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) / 1376 EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT, 1377 EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE); 1378 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0); 1379 /* 1380 * A single credit is allocated to the queue when it is started. 1381 * It is immediately spent by the first packet which has NEW 1382 * BUFFER flag set, though, but still we shall take into 1383 * account, as to not wrap around the maximum number of credits 1384 * accidentally 1385 */ 1386 eersp->eers_rx_packed_stream_credits--; 1387 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=, 1388 EFX_RX_PACKED_STREAM_MAX_CREDITS); 1389 } 1390 #else 1391 EFSYS_ASSERT(!packed_stream); 1392 #endif 1393 } 1394 1395 void 1396 ef10_ev_rxlabel_fini( 1397 __in efx_evq_t *eep, 1398 __in unsigned int label) 1399 { 1400 efx_evq_rxq_state_t *eersp; 1401 1402 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); 1403 eersp = &eep->ee_rxq_state[label]; 1404 1405 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); 1406 1407 eersp->eers_rx_read_ptr = 0; 1408 eersp->eers_rx_mask = 0; 1409 #if EFSYS_OPT_RX_PACKED_STREAM 1410 eersp->eers_rx_stream_npackets = 0; 1411 eersp->eers_rx_packed_stream = B_FALSE; 1412 eersp->eers_rx_packed_stream_credits = 0; 1413 #endif 1414 } 1415 1416 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ 1417