1 /*- 2 * Copyright (c) 2012-2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * The views and conclusions contained in the software and documentation are 27 * those of the authors and should not be interpreted as representing official 28 * policies, either expressed or implied, of the FreeBSD Project. 29 */ 30 31 #include <sys/cdefs.h> 32 #include "efx.h" 33 #include "efx_impl.h" 34 35 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 36 37 #if EFSYS_OPT_QSTATS 38 #define EFX_TX_QSTAT_INCR(_etp, _stat) \ 39 do { \ 40 (_etp)->et_stat[_stat]++; \ 41 _NOTE(CONSTANTCONDITION) \ 42 } while (B_FALSE) 43 #else 44 #define EFX_TX_QSTAT_INCR(_etp, _stat) 45 #endif 46 47 static __checkReturn efx_rc_t 48 efx_mcdi_init_txq( 49 __in efx_nic_t *enp, 50 __in uint32_t ndescs, 51 __in uint32_t target_evq, 52 __in uint32_t label, 53 __in uint32_t instance, 54 __in uint16_t flags, 55 __in efsys_mem_t *esmp) 56 { 57 efx_mcdi_req_t req; 58 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS), 59 MC_CMD_INIT_TXQ_OUT_LEN); 60 efx_qword_t *dma_addr; 61 uint64_t addr; 62 int npages; 63 int i; 64 efx_rc_t rc; 65 66 EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >= 67 EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs)); 68 69 if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_TXQ_SIZE(ndescs))) { 70 rc = EINVAL; 71 goto fail1; 72 } 73 74 npages = EFX_TXQ_NBUFS(ndescs); 75 if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { 76 rc = EINVAL; 77 goto fail2; 78 } 79 80 req.emr_cmd = MC_CMD_INIT_TXQ; 81 req.emr_in_buf = payload; 82 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); 83 req.emr_out_buf = payload; 84 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; 85 86 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs); 87 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); 88 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); 89 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); 90 91 MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS, 92 INIT_TXQ_IN_FLAG_BUFF_MODE, 0, 93 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, 94 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, 95 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, 96 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, 97 INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, 98 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0, 99 INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, 100 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, 101 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, 102 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, 103 INIT_TXQ_IN_CRC_MODE, 0, 104 INIT_TXQ_IN_FLAG_TIMESTAMP, 0); 105 106 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); 107 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); 108 109 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); 110 addr = EFSYS_MEM_ADDR(esmp); 111 112 for (i = 0; i < npages; i++) { 113 EFX_POPULATE_QWORD_2(*dma_addr, 114 EFX_DWORD_1, (uint32_t)(addr >> 32), 115 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 116 117 dma_addr++; 118 addr += EFX_BUF_SIZE; 119 } 120 121 efx_mcdi_execute(enp, &req); 122 123 if (req.emr_rc != 0) { 124 rc = req.emr_rc; 125 goto fail3; 126 } 127 128 return (0); 129 130 fail3: 131 EFSYS_PROBE(fail3); 132 fail2: 133 EFSYS_PROBE(fail2); 134 fail1: 135 EFSYS_PROBE1(fail1, efx_rc_t, rc); 136 137 return (rc); 138 } 139 140 static __checkReturn efx_rc_t 141 efx_mcdi_fini_txq( 142 __in efx_nic_t *enp, 143 __in uint32_t instance) 144 { 145 efx_mcdi_req_t req; 146 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN, 147 MC_CMD_FINI_TXQ_OUT_LEN); 148 efx_rc_t rc; 149 150 req.emr_cmd = MC_CMD_FINI_TXQ; 151 req.emr_in_buf = payload; 152 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; 153 req.emr_out_buf = payload; 154 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; 155 156 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); 157 158 efx_mcdi_execute_quiet(enp, &req); 159 160 if (req.emr_rc != 0) { 161 rc = req.emr_rc; 162 goto fail1; 163 } 164 165 return (0); 166 167 fail1: 168 /* 169 * EALREADY is not an error, but indicates that the MC has rebooted and 170 * that the TXQ has already been destroyed. 171 */ 172 if (rc != EALREADY) 173 EFSYS_PROBE1(fail1, efx_rc_t, rc); 174 175 return (rc); 176 } 177 178 __checkReturn efx_rc_t 179 ef10_tx_init( 180 __in efx_nic_t *enp) 181 { 182 _NOTE(ARGUNUSED(enp)) 183 return (0); 184 } 185 186 void 187 ef10_tx_fini( 188 __in efx_nic_t *enp) 189 { 190 _NOTE(ARGUNUSED(enp)) 191 } 192 193 __checkReturn efx_rc_t 194 ef10_tx_qcreate( 195 __in efx_nic_t *enp, 196 __in unsigned int index, 197 __in unsigned int label, 198 __in efsys_mem_t *esmp, 199 __in size_t ndescs, 200 __in uint32_t id, 201 __in uint16_t flags, 202 __in efx_evq_t *eep, 203 __in efx_txq_t *etp, 204 __out unsigned int *addedp) 205 { 206 efx_nic_cfg_t *encp = &enp->en_nic_cfg; 207 uint16_t inner_csum; 208 efx_desc_t desc; 209 efx_rc_t rc; 210 211 _NOTE(ARGUNUSED(id)) 212 213 inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP; 214 if (((flags & inner_csum) != 0) && 215 (encp->enc_tunnel_encapsulations_supported == 0)) { 216 rc = EINVAL; 217 goto fail1; 218 } 219 220 if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index, 221 flags, esmp)) != 0) 222 goto fail2; 223 224 /* 225 * A previous user of this TX queue may have written a descriptor to the 226 * TX push collector, but not pushed the doorbell (e.g. after a crash). 227 * The next doorbell write would then push the stale descriptor. 228 * 229 * Ensure the (per network port) TX push collector is cleared by writing 230 * a no-op TX option descriptor. See bug29981 for details. 231 */ 232 *addedp = 1; 233 ef10_tx_qdesc_checksum_create(etp, flags, &desc); 234 235 EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq); 236 ef10_tx_qpush(etp, *addedp, 0); 237 238 return (0); 239 240 fail2: 241 EFSYS_PROBE(fail2); 242 fail1: 243 EFSYS_PROBE1(fail1, efx_rc_t, rc); 244 245 return (rc); 246 } 247 248 void 249 ef10_tx_qdestroy( 250 __in efx_txq_t *etp) 251 { 252 /* FIXME */ 253 _NOTE(ARGUNUSED(etp)) 254 /* FIXME */ 255 } 256 257 __checkReturn efx_rc_t 258 ef10_tx_qpio_enable( 259 __in efx_txq_t *etp) 260 { 261 efx_nic_t *enp = etp->et_enp; 262 efx_piobuf_handle_t handle; 263 efx_rc_t rc; 264 265 if (etp->et_pio_size != 0) { 266 rc = EALREADY; 267 goto fail1; 268 } 269 270 /* Sub-allocate a PIO block from a piobuf */ 271 if ((rc = ef10_nic_pio_alloc(enp, 272 &etp->et_pio_bufnum, 273 &handle, 274 &etp->et_pio_blknum, 275 &etp->et_pio_offset, 276 &etp->et_pio_size)) != 0) { 277 goto fail2; 278 } 279 EFSYS_ASSERT3U(etp->et_pio_size, !=, 0); 280 281 /* Link the piobuf to this TXQ */ 282 if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) { 283 goto fail3; 284 } 285 286 /* 287 * et_pio_offset is the offset of the sub-allocated block within the 288 * hardware PIO buffer. It is used as the buffer address in the PIO 289 * option descriptor. 290 * 291 * et_pio_write_offset is the offset of the sub-allocated block from the 292 * start of the write-combined memory mapping, and is used for writing 293 * data into the PIO buffer. 294 */ 295 etp->et_pio_write_offset = 296 (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) + 297 ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset; 298 299 return (0); 300 301 fail3: 302 EFSYS_PROBE(fail3); 303 (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); 304 fail2: 305 EFSYS_PROBE(fail2); 306 etp->et_pio_size = 0; 307 fail1: 308 EFSYS_PROBE1(fail1, efx_rc_t, rc); 309 310 return (rc); 311 } 312 313 void 314 ef10_tx_qpio_disable( 315 __in efx_txq_t *etp) 316 { 317 efx_nic_t *enp = etp->et_enp; 318 319 if (etp->et_pio_size != 0) { 320 /* Unlink the piobuf from this TXQ */ 321 if (ef10_nic_pio_unlink(enp, etp->et_index) != 0) 322 return; 323 324 /* Free the sub-allocated PIO block */ 325 (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, 326 etp->et_pio_blknum); 327 etp->et_pio_size = 0; 328 etp->et_pio_write_offset = 0; 329 } 330 } 331 332 __checkReturn efx_rc_t 333 ef10_tx_qpio_write( 334 __in efx_txq_t *etp, 335 __in_ecount(length) uint8_t *buffer, 336 __in size_t length, 337 __in size_t offset) 338 { 339 efx_nic_t *enp = etp->et_enp; 340 efsys_bar_t *esbp = enp->en_esbp; 341 uint32_t write_offset; 342 uint32_t write_offset_limit; 343 efx_qword_t *eqp; 344 efx_rc_t rc; 345 346 EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0); 347 348 if (etp->et_pio_size == 0) { 349 rc = ENOENT; 350 goto fail1; 351 } 352 if (offset + length > etp->et_pio_size) { 353 rc = ENOSPC; 354 goto fail2; 355 } 356 357 /* 358 * Writes to PIO buffers must be 64 bit aligned, and multiples of 359 * 64 bits. 360 */ 361 write_offset = etp->et_pio_write_offset + offset; 362 write_offset_limit = write_offset + length; 363 eqp = (efx_qword_t *)buffer; 364 while (write_offset < write_offset_limit) { 365 EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp); 366 eqp++; 367 write_offset += sizeof (efx_qword_t); 368 } 369 370 return (0); 371 372 fail2: 373 EFSYS_PROBE(fail2); 374 fail1: 375 EFSYS_PROBE1(fail1, efx_rc_t, rc); 376 377 return (rc); 378 } 379 380 __checkReturn efx_rc_t 381 ef10_tx_qpio_post( 382 __in efx_txq_t *etp, 383 __in size_t pkt_length, 384 __in unsigned int completed, 385 __inout unsigned int *addedp) 386 { 387 efx_qword_t pio_desc; 388 unsigned int id; 389 size_t offset; 390 unsigned int added = *addedp; 391 efx_rc_t rc; 392 393 if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) { 394 rc = ENOSPC; 395 goto fail1; 396 } 397 398 if (etp->et_pio_size == 0) { 399 rc = ENOENT; 400 goto fail2; 401 } 402 403 id = added++ & etp->et_mask; 404 offset = id * sizeof (efx_qword_t); 405 406 EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index, 407 unsigned int, id, uint32_t, etp->et_pio_offset, 408 size_t, pkt_length); 409 410 EFX_POPULATE_QWORD_5(pio_desc, 411 ESF_DZ_TX_DESC_IS_OPT, 1, 412 ESF_DZ_TX_OPTION_TYPE, 1, 413 ESF_DZ_TX_PIO_CONT, 0, 414 ESF_DZ_TX_PIO_BYTE_CNT, pkt_length, 415 ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset); 416 417 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc); 418 419 EFX_TX_QSTAT_INCR(etp, TX_POST_PIO); 420 421 *addedp = added; 422 return (0); 423 424 fail2: 425 EFSYS_PROBE(fail2); 426 fail1: 427 EFSYS_PROBE1(fail1, efx_rc_t, rc); 428 429 return (rc); 430 } 431 432 __checkReturn efx_rc_t 433 ef10_tx_qpost( 434 __in efx_txq_t *etp, 435 __in_ecount(ndescs) efx_buffer_t *eb, 436 __in unsigned int ndescs, 437 __in unsigned int completed, 438 __inout unsigned int *addedp) 439 { 440 unsigned int added = *addedp; 441 unsigned int i; 442 efx_rc_t rc; 443 444 if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) { 445 rc = ENOSPC; 446 goto fail1; 447 } 448 449 for (i = 0; i < ndescs; i++) { 450 efx_buffer_t *ebp = &eb[i]; 451 efsys_dma_addr_t addr = ebp->eb_addr; 452 size_t size = ebp->eb_size; 453 boolean_t eop = ebp->eb_eop; 454 unsigned int id; 455 size_t offset; 456 efx_qword_t qword; 457 458 /* No limitations on boundary crossing */ 459 EFSYS_ASSERT(size <= 460 etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); 461 462 id = added++ & etp->et_mask; 463 offset = id * sizeof (efx_qword_t); 464 465 EFSYS_PROBE5(tx_post, unsigned int, etp->et_index, 466 unsigned int, id, efsys_dma_addr_t, addr, 467 size_t, size, boolean_t, eop); 468 469 EFX_POPULATE_QWORD_5(qword, 470 ESF_DZ_TX_KER_TYPE, 0, 471 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, 472 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), 473 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), 474 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); 475 476 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword); 477 } 478 479 EFX_TX_QSTAT_INCR(etp, TX_POST); 480 481 *addedp = added; 482 return (0); 483 484 fail1: 485 EFSYS_PROBE1(fail1, efx_rc_t, rc); 486 487 return (rc); 488 } 489 490 /* 491 * This improves performance by, when possible, pushing a TX descriptor at the 492 * same time as the doorbell. The descriptor must be added to the TXQ, so that 493 * can be used if the hardware decides not to use the pushed descriptor. 494 */ 495 void 496 ef10_tx_qpush( 497 __in efx_txq_t *etp, 498 __in unsigned int added, 499 __in unsigned int pushed) 500 { 501 efx_nic_t *enp = etp->et_enp; 502 unsigned int wptr; 503 unsigned int id; 504 size_t offset; 505 efx_qword_t desc; 506 efx_oword_t oword; 507 508 wptr = added & etp->et_mask; 509 id = pushed & etp->et_mask; 510 offset = id * sizeof (efx_qword_t); 511 512 EFSYS_MEM_READQ(etp->et_esmp, offset, &desc); 513 514 /* 515 * SF Bug 65776: TSO option descriptors cannot be pushed if pacer bypass 516 * is enabled on the event queue this transmit queue is attached to. 517 * 518 * To ensure the code is safe, it is easiest to simply test the type of 519 * the descriptor to push, and only push it is if it not a TSO option 520 * descriptor. 521 */ 522 if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) || 523 (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) != 524 ESE_DZ_TX_OPTION_DESC_TSO)) { 525 /* Push the descriptor and update the wptr. */ 526 EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr, 527 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), 528 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); 529 530 /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ 531 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, 532 wptr, id); 533 EFSYS_PIO_WRITE_BARRIER(); 534 EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, 535 etp->et_index, &oword); 536 } else { 537 efx_dword_t dword; 538 539 /* 540 * Only update the wptr. This is signalled to the hardware by 541 * only writing one DWORD of the doorbell register. 542 */ 543 EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr); 544 dword = oword.eo_dword[2]; 545 546 /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ 547 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, 548 wptr, id); 549 EFSYS_PIO_WRITE_BARRIER(); 550 EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, 551 etp->et_index, &dword, B_FALSE); 552 } 553 } 554 555 __checkReturn efx_rc_t 556 ef10_tx_qdesc_post( 557 __in efx_txq_t *etp, 558 __in_ecount(ndescs) efx_desc_t *ed, 559 __in unsigned int ndescs, 560 __in unsigned int completed, 561 __inout unsigned int *addedp) 562 { 563 unsigned int added = *addedp; 564 unsigned int i; 565 566 if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) 567 return (ENOSPC); 568 569 for (i = 0; i < ndescs; i++) { 570 efx_desc_t *edp = &ed[i]; 571 unsigned int id; 572 size_t offset; 573 574 id = added++ & etp->et_mask; 575 offset = id * sizeof (efx_desc_t); 576 577 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq); 578 } 579 580 EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, 581 unsigned int, added, unsigned int, ndescs); 582 583 EFX_TX_QSTAT_INCR(etp, TX_POST); 584 585 *addedp = added; 586 return (0); 587 } 588 589 void 590 ef10_tx_qdesc_dma_create( 591 __in efx_txq_t *etp, 592 __in efsys_dma_addr_t addr, 593 __in size_t size, 594 __in boolean_t eop, 595 __out efx_desc_t *edp) 596 { 597 _NOTE(ARGUNUSED(etp)) 598 599 /* No limitations on boundary crossing */ 600 EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); 601 602 EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, 603 efsys_dma_addr_t, addr, 604 size_t, size, boolean_t, eop); 605 606 EFX_POPULATE_QWORD_5(edp->ed_eq, 607 ESF_DZ_TX_KER_TYPE, 0, 608 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, 609 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), 610 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), 611 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); 612 } 613 614 void 615 ef10_tx_qdesc_tso_create( 616 __in efx_txq_t *etp, 617 __in uint16_t ipv4_id, 618 __in uint32_t tcp_seq, 619 __in uint8_t tcp_flags, 620 __out efx_desc_t *edp) 621 { 622 _NOTE(ARGUNUSED(etp)) 623 624 EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index, 625 uint16_t, ipv4_id, uint32_t, tcp_seq, 626 uint8_t, tcp_flags); 627 628 EFX_POPULATE_QWORD_5(edp->ed_eq, 629 ESF_DZ_TX_DESC_IS_OPT, 1, 630 ESF_DZ_TX_OPTION_TYPE, 631 ESE_DZ_TX_OPTION_DESC_TSO, 632 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, 633 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 634 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); 635 } 636 637 void 638 ef10_tx_qdesc_tso2_create( 639 __in efx_txq_t *etp, 640 __in uint16_t ipv4_id, 641 __in uint16_t outer_ipv4_id, 642 __in uint32_t tcp_seq, 643 __in uint16_t tcp_mss, 644 __out_ecount(count) efx_desc_t *edp, 645 __in int count) 646 { 647 _NOTE(ARGUNUSED(etp, count)) 648 649 EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index, 650 uint16_t, ipv4_id, uint32_t, tcp_seq, 651 uint16_t, tcp_mss); 652 653 EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS); 654 655 EFX_POPULATE_QWORD_5(edp[0].ed_eq, 656 ESF_DZ_TX_DESC_IS_OPT, 1, 657 ESF_DZ_TX_OPTION_TYPE, 658 ESE_DZ_TX_OPTION_DESC_TSO, 659 ESF_DZ_TX_TSO_OPTION_TYPE, 660 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, 661 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 662 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); 663 EFX_POPULATE_QWORD_5(edp[1].ed_eq, 664 ESF_DZ_TX_DESC_IS_OPT, 1, 665 ESF_DZ_TX_OPTION_TYPE, 666 ESE_DZ_TX_OPTION_DESC_TSO, 667 ESF_DZ_TX_TSO_OPTION_TYPE, 668 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, 669 ESF_DZ_TX_TSO_TCP_MSS, tcp_mss, 670 ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id); 671 } 672 673 void 674 ef10_tx_qdesc_vlantci_create( 675 __in efx_txq_t *etp, 676 __in uint16_t tci, 677 __out efx_desc_t *edp) 678 { 679 _NOTE(ARGUNUSED(etp)) 680 681 EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index, 682 uint16_t, tci); 683 684 EFX_POPULATE_QWORD_4(edp->ed_eq, 685 ESF_DZ_TX_DESC_IS_OPT, 1, 686 ESF_DZ_TX_OPTION_TYPE, 687 ESE_DZ_TX_OPTION_DESC_VLAN, 688 ESF_DZ_TX_VLAN_OP, tci ? 1 : 0, 689 ESF_DZ_TX_VLAN_TAG1, tci); 690 } 691 692 void 693 ef10_tx_qdesc_checksum_create( 694 __in efx_txq_t *etp, 695 __in uint16_t flags, 696 __out efx_desc_t *edp) 697 { 698 _NOTE(ARGUNUSED(etp)); 699 700 EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index, 701 uint32_t, flags); 702 703 EFX_POPULATE_QWORD_6(edp->ed_eq, 704 ESF_DZ_TX_DESC_IS_OPT, 1, 705 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, 706 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, 707 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, 708 ESF_DZ_TX_OPTION_IP_CSUM, 709 (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0, 710 ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, 711 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, 712 ESF_DZ_TX_OPTION_INNER_IP_CSUM, 713 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0); 714 } 715 716 __checkReturn efx_rc_t 717 ef10_tx_qpace( 718 __in efx_txq_t *etp, 719 __in unsigned int ns) 720 { 721 efx_rc_t rc; 722 723 /* FIXME */ 724 _NOTE(ARGUNUSED(etp, ns)) 725 _NOTE(CONSTANTCONDITION) 726 if (B_FALSE) { 727 rc = ENOTSUP; 728 goto fail1; 729 } 730 /* FIXME */ 731 732 return (0); 733 734 fail1: 735 /* 736 * EALREADY is not an error, but indicates that the MC has rebooted and 737 * that the TXQ has already been destroyed. Callers need to know that 738 * the TXQ flush has completed to avoid waiting until timeout for a 739 * flush done event that will not be delivered. 740 */ 741 if (rc != EALREADY) 742 EFSYS_PROBE1(fail1, efx_rc_t, rc); 743 744 return (rc); 745 } 746 747 __checkReturn efx_rc_t 748 ef10_tx_qflush( 749 __in efx_txq_t *etp) 750 { 751 efx_nic_t *enp = etp->et_enp; 752 efx_rc_t rc; 753 754 if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0) 755 goto fail1; 756 757 return (0); 758 759 fail1: 760 EFSYS_PROBE1(fail1, efx_rc_t, rc); 761 762 return (rc); 763 } 764 765 void 766 ef10_tx_qenable( 767 __in efx_txq_t *etp) 768 { 769 /* FIXME */ 770 _NOTE(ARGUNUSED(etp)) 771 /* FIXME */ 772 } 773 774 #if EFSYS_OPT_QSTATS 775 void 776 ef10_tx_qstats_update( 777 __in efx_txq_t *etp, 778 __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) 779 { 780 unsigned int id; 781 782 for (id = 0; id < TX_NQSTATS; id++) { 783 efsys_stat_t *essp = &stat[id]; 784 785 EFSYS_STAT_INCR(essp, etp->et_stat[id]); 786 etp->et_stat[id] = 0; 787 } 788 } 789 790 #endif /* EFSYS_OPT_QSTATS */ 791 792 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ 793