1 /*- 2 * Copyright (c) 2012-2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * The views and conclusions contained in the software and documentation are 27 * those of the authors and should not be interpreted as representing official 28 * policies, either expressed or implied, of the FreeBSD Project. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "efx.h" 35 #include "efx_impl.h" 36 37 38 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD 39 40 #if EFSYS_OPT_QSTATS 41 #define EFX_TX_QSTAT_INCR(_etp, _stat) \ 42 do { \ 43 (_etp)->et_stat[_stat]++; \ 44 _NOTE(CONSTANTCONDITION) \ 45 } while (B_FALSE) 46 #else 47 #define EFX_TX_QSTAT_INCR(_etp, _stat) 48 #endif 49 50 static __checkReturn efx_rc_t 51 efx_mcdi_init_txq( 52 __in efx_nic_t *enp, 53 __in uint32_t size, 54 __in uint32_t target_evq, 55 __in uint32_t label, 56 __in uint32_t instance, 57 __in uint16_t flags, 58 __in efsys_mem_t *esmp) 59 { 60 efx_mcdi_req_t req; 61 uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS), 62 MC_CMD_INIT_TXQ_OUT_LEN)]; 63 efx_qword_t *dma_addr; 64 uint64_t addr; 65 int npages; 66 int i; 67 efx_rc_t rc; 68 69 EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >= 70 EFX_TXQ_NBUFS(EFX_TXQ_MAXNDESCS(&enp->en_nic_cfg))); 71 72 npages = EFX_TXQ_NBUFS(size); 73 if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) { 74 rc = EINVAL; 75 goto fail1; 76 } 77 78 (void) memset(payload, 0, sizeof (payload)); 79 req.emr_cmd = MC_CMD_INIT_TXQ; 80 req.emr_in_buf = payload; 81 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); 82 req.emr_out_buf = payload; 83 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; 84 85 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size); 86 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); 87 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); 88 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); 89 90 MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS, 91 INIT_TXQ_IN_FLAG_BUFF_MODE, 0, 92 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, 93 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, 94 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, 95 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, 96 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, 97 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, 98 INIT_TXQ_IN_CRC_MODE, 0, 99 INIT_TXQ_IN_FLAG_TIMESTAMP, 0); 100 101 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); 102 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); 103 104 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); 105 addr = EFSYS_MEM_ADDR(esmp); 106 107 for (i = 0; i < npages; i++) { 108 EFX_POPULATE_QWORD_2(*dma_addr, 109 EFX_DWORD_1, (uint32_t)(addr >> 32), 110 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 111 112 dma_addr++; 113 addr += EFX_BUF_SIZE; 114 } 115 116 efx_mcdi_execute(enp, &req); 117 118 if (req.emr_rc != 0) { 119 rc = req.emr_rc; 120 goto fail2; 121 } 122 123 return (0); 124 125 fail2: 126 EFSYS_PROBE(fail2); 127 fail1: 128 EFSYS_PROBE1(fail1, efx_rc_t, rc); 129 130 return (rc); 131 } 132 133 static __checkReturn efx_rc_t 134 efx_mcdi_fini_txq( 135 __in efx_nic_t *enp, 136 __in uint32_t instance) 137 { 138 efx_mcdi_req_t req; 139 uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN, 140 MC_CMD_FINI_TXQ_OUT_LEN)]; 141 efx_rc_t rc; 142 143 (void) memset(payload, 0, sizeof (payload)); 144 req.emr_cmd = MC_CMD_FINI_TXQ; 145 req.emr_in_buf = payload; 146 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; 147 req.emr_out_buf = payload; 148 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; 149 150 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); 151 152 efx_mcdi_execute_quiet(enp, &req); 153 154 if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) { 155 rc = req.emr_rc; 156 goto fail1; 157 } 158 159 return (0); 160 161 fail1: 162 EFSYS_PROBE1(fail1, efx_rc_t, rc); 163 164 return (rc); 165 } 166 167 __checkReturn efx_rc_t 168 ef10_tx_init( 169 __in efx_nic_t *enp) 170 { 171 _NOTE(ARGUNUSED(enp)) 172 return (0); 173 } 174 175 void 176 ef10_tx_fini( 177 __in efx_nic_t *enp) 178 { 179 _NOTE(ARGUNUSED(enp)) 180 } 181 182 __checkReturn efx_rc_t 183 ef10_tx_qcreate( 184 __in efx_nic_t *enp, 185 __in unsigned int index, 186 __in unsigned int label, 187 __in efsys_mem_t *esmp, 188 __in size_t n, 189 __in uint32_t id, 190 __in uint16_t flags, 191 __in efx_evq_t *eep, 192 __in efx_txq_t *etp, 193 __out unsigned int *addedp) 194 { 195 efx_qword_t desc; 196 efx_rc_t rc; 197 198 _NOTE(ARGUNUSED(id)) 199 200 if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags, 201 esmp)) != 0) 202 goto fail1; 203 204 /* 205 * A previous user of this TX queue may have written a descriptor to the 206 * TX push collector, but not pushed the doorbell (e.g. after a crash). 207 * The next doorbell write would then push the stale descriptor. 208 * 209 * Ensure the (per network port) TX push collector is cleared by writing 210 * a no-op TX option descriptor. See bug29981 for details. 211 */ 212 *addedp = 1; 213 EFX_POPULATE_QWORD_4(desc, 214 ESF_DZ_TX_DESC_IS_OPT, 1, 215 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, 216 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, 217 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, 218 ESF_DZ_TX_OPTION_IP_CSUM, 219 (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0); 220 221 EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc); 222 ef10_tx_qpush(etp, *addedp, 0); 223 224 return (0); 225 226 fail1: 227 EFSYS_PROBE1(fail1, efx_rc_t, rc); 228 229 return (rc); 230 } 231 232 void 233 ef10_tx_qdestroy( 234 __in efx_txq_t *etp) 235 { 236 /* FIXME */ 237 _NOTE(ARGUNUSED(etp)) 238 /* FIXME */ 239 } 240 241 __checkReturn efx_rc_t 242 ef10_tx_qpio_enable( 243 __in efx_txq_t *etp) 244 { 245 efx_nic_t *enp = etp->et_enp; 246 efx_piobuf_handle_t handle; 247 efx_rc_t rc; 248 249 if (etp->et_pio_size != 0) { 250 rc = EALREADY; 251 goto fail1; 252 } 253 254 /* Sub-allocate a PIO block from a piobuf */ 255 if ((rc = ef10_nic_pio_alloc(enp, 256 &etp->et_pio_bufnum, 257 &handle, 258 &etp->et_pio_blknum, 259 &etp->et_pio_offset, 260 &etp->et_pio_size)) != 0) { 261 goto fail2; 262 } 263 EFSYS_ASSERT3U(etp->et_pio_size, !=, 0); 264 265 /* Link the piobuf to this TXQ */ 266 if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) { 267 goto fail3; 268 } 269 270 /* 271 * et_pio_offset is the offset of the sub-allocated block within the 272 * hardware PIO buffer. It is used as the buffer address in the PIO 273 * option descriptor. 274 * 275 * et_pio_write_offset is the offset of the sub-allocated block from the 276 * start of the write-combined memory mapping, and is used for writing 277 * data into the PIO buffer. 278 */ 279 etp->et_pio_write_offset = 280 (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) + 281 ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset; 282 283 return (0); 284 285 fail3: 286 EFSYS_PROBE(fail3); 287 ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); 288 etp->et_pio_size = 0; 289 fail2: 290 EFSYS_PROBE(fail2); 291 fail1: 292 EFSYS_PROBE1(fail1, efx_rc_t, rc); 293 294 return (rc); 295 } 296 297 void 298 ef10_tx_qpio_disable( 299 __in efx_txq_t *etp) 300 { 301 efx_nic_t *enp = etp->et_enp; 302 303 if (etp->et_pio_size != 0) { 304 /* Unlink the piobuf from this TXQ */ 305 ef10_nic_pio_unlink(enp, etp->et_index); 306 307 /* Free the sub-allocated PIO block */ 308 ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); 309 etp->et_pio_size = 0; 310 etp->et_pio_write_offset = 0; 311 } 312 } 313 314 __checkReturn efx_rc_t 315 ef10_tx_qpio_write( 316 __in efx_txq_t *etp, 317 __in_ecount(length) uint8_t *buffer, 318 __in size_t length, 319 __in size_t offset) 320 { 321 efx_nic_t *enp = etp->et_enp; 322 efsys_bar_t *esbp = enp->en_esbp; 323 uint32_t write_offset; 324 uint32_t write_offset_limit; 325 efx_qword_t *eqp; 326 efx_rc_t rc; 327 328 EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0); 329 330 if (etp->et_pio_size == 0) { 331 rc = ENOENT; 332 goto fail1; 333 } 334 if (offset + length > etp->et_pio_size) { 335 rc = ENOSPC; 336 goto fail2; 337 } 338 339 /* 340 * Writes to PIO buffers must be 64 bit aligned, and multiples of 341 * 64 bits. 342 */ 343 write_offset = etp->et_pio_write_offset + offset; 344 write_offset_limit = write_offset + length; 345 eqp = (efx_qword_t *)buffer; 346 while (write_offset < write_offset_limit) { 347 EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp); 348 eqp++; 349 write_offset += sizeof (efx_qword_t); 350 } 351 352 return (0); 353 354 fail2: 355 EFSYS_PROBE(fail2); 356 fail1: 357 EFSYS_PROBE1(fail1, efx_rc_t, rc); 358 359 return (rc); 360 } 361 362 __checkReturn efx_rc_t 363 ef10_tx_qpio_post( 364 __in efx_txq_t *etp, 365 __in size_t pkt_length, 366 __in unsigned int completed, 367 __inout unsigned int *addedp) 368 { 369 efx_qword_t pio_desc; 370 unsigned int id; 371 size_t offset; 372 unsigned int added = *addedp; 373 efx_rc_t rc; 374 375 376 if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) { 377 rc = ENOSPC; 378 goto fail1; 379 } 380 381 if (etp->et_pio_size == 0) { 382 rc = ENOENT; 383 goto fail2; 384 } 385 386 id = added++ & etp->et_mask; 387 offset = id * sizeof (efx_qword_t); 388 389 EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index, 390 unsigned int, id, uint32_t, etp->et_pio_offset, 391 size_t, pkt_length); 392 393 EFX_POPULATE_QWORD_5(pio_desc, 394 ESF_DZ_TX_DESC_IS_OPT, 1, 395 ESF_DZ_TX_OPTION_TYPE, 1, 396 ESF_DZ_TX_PIO_CONT, 0, 397 ESF_DZ_TX_PIO_BYTE_CNT, pkt_length, 398 ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset); 399 400 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc); 401 402 EFX_TX_QSTAT_INCR(etp, TX_POST_PIO); 403 404 *addedp = added; 405 return (0); 406 407 fail2: 408 EFSYS_PROBE(fail2); 409 fail1: 410 EFSYS_PROBE1(fail1, efx_rc_t, rc); 411 412 return (rc); 413 } 414 415 __checkReturn efx_rc_t 416 ef10_tx_qpost( 417 __in efx_txq_t *etp, 418 __in_ecount(n) efx_buffer_t *eb, 419 __in unsigned int n, 420 __in unsigned int completed, 421 __inout unsigned int *addedp) 422 { 423 unsigned int added = *addedp; 424 unsigned int i; 425 efx_rc_t rc; 426 427 if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { 428 rc = ENOSPC; 429 goto fail1; 430 } 431 432 for (i = 0; i < n; i++) { 433 efx_buffer_t *ebp = &eb[i]; 434 efsys_dma_addr_t addr = ebp->eb_addr; 435 size_t size = ebp->eb_size; 436 boolean_t eop = ebp->eb_eop; 437 unsigned int id; 438 size_t offset; 439 efx_qword_t qword; 440 441 /* No limitations on boundary crossing */ 442 EFSYS_ASSERT(size <= 443 etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); 444 445 id = added++ & etp->et_mask; 446 offset = id * sizeof (efx_qword_t); 447 448 EFSYS_PROBE5(tx_post, unsigned int, etp->et_index, 449 unsigned int, id, efsys_dma_addr_t, addr, 450 size_t, size, boolean_t, eop); 451 452 EFX_POPULATE_QWORD_5(qword, 453 ESF_DZ_TX_KER_TYPE, 0, 454 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, 455 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), 456 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), 457 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); 458 459 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword); 460 } 461 462 EFX_TX_QSTAT_INCR(etp, TX_POST); 463 464 *addedp = added; 465 return (0); 466 467 fail1: 468 EFSYS_PROBE1(fail1, efx_rc_t, rc); 469 470 return (rc); 471 } 472 473 /* 474 * This improves performance by, when possible, pushing a TX descriptor at the 475 * same time as the doorbell. The descriptor must be added to the TXQ, so that 476 * can be used if the hardware decides not to use the pushed descriptor. 477 */ 478 void 479 ef10_tx_qpush( 480 __in efx_txq_t *etp, 481 __in unsigned int added, 482 __in unsigned int pushed) 483 { 484 efx_nic_t *enp = etp->et_enp; 485 unsigned int wptr; 486 unsigned int id; 487 size_t offset; 488 efx_qword_t desc; 489 efx_oword_t oword; 490 491 wptr = added & etp->et_mask; 492 id = pushed & etp->et_mask; 493 offset = id * sizeof (efx_qword_t); 494 495 EFSYS_MEM_READQ(etp->et_esmp, offset, &desc); 496 497 /* 498 * SF Bug 65776: TSO option descriptors cannot be pushed if pacer bypass 499 * is enabled on the event queue this transmit queue is attached to. 500 * 501 * To ensure the code is safe, it is easiest to simply test the type of 502 * the descriptor to push, and only push it is if it not a TSO option 503 * descriptor. 504 */ 505 if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) || 506 (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) != 507 ESE_DZ_TX_OPTION_DESC_TSO)) { 508 /* Push the descriptor and update the wptr. */ 509 EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr, 510 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), 511 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); 512 513 /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ 514 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, 515 wptr, id); 516 EFSYS_PIO_WRITE_BARRIER(); 517 EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, 518 etp->et_index, &oword); 519 } else { 520 efx_dword_t dword; 521 522 /* 523 * Only update the wptr. This is signalled to the hardware by 524 * only writing one DWORD of the doorbell register. 525 */ 526 EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr); 527 dword = oword.eo_dword[2]; 528 529 /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ 530 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, 531 wptr, id); 532 EFSYS_PIO_WRITE_BARRIER(); 533 EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, 534 etp->et_index, &dword, B_FALSE); 535 } 536 } 537 538 __checkReturn efx_rc_t 539 ef10_tx_qdesc_post( 540 __in efx_txq_t *etp, 541 __in_ecount(n) efx_desc_t *ed, 542 __in unsigned int n, 543 __in unsigned int completed, 544 __inout unsigned int *addedp) 545 { 546 unsigned int added = *addedp; 547 unsigned int i; 548 efx_rc_t rc; 549 550 if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { 551 rc = ENOSPC; 552 goto fail1; 553 } 554 555 for (i = 0; i < n; i++) { 556 efx_desc_t *edp = &ed[i]; 557 unsigned int id; 558 size_t offset; 559 560 id = added++ & etp->et_mask; 561 offset = id * sizeof (efx_desc_t); 562 563 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq); 564 } 565 566 EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, 567 unsigned int, added, unsigned int, n); 568 569 EFX_TX_QSTAT_INCR(etp, TX_POST); 570 571 *addedp = added; 572 return (0); 573 574 fail1: 575 EFSYS_PROBE1(fail1, efx_rc_t, rc); 576 577 return (rc); 578 } 579 580 void 581 ef10_tx_qdesc_dma_create( 582 __in efx_txq_t *etp, 583 __in efsys_dma_addr_t addr, 584 __in size_t size, 585 __in boolean_t eop, 586 __out efx_desc_t *edp) 587 { 588 /* No limitations on boundary crossing */ 589 EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); 590 591 EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, 592 efsys_dma_addr_t, addr, 593 size_t, size, boolean_t, eop); 594 595 EFX_POPULATE_QWORD_5(edp->ed_eq, 596 ESF_DZ_TX_KER_TYPE, 0, 597 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, 598 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), 599 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), 600 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); 601 } 602 603 void 604 ef10_tx_qdesc_tso_create( 605 __in efx_txq_t *etp, 606 __in uint16_t ipv4_id, 607 __in uint32_t tcp_seq, 608 __in uint8_t tcp_flags, 609 __out efx_desc_t *edp) 610 { 611 EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index, 612 uint16_t, ipv4_id, uint32_t, tcp_seq, 613 uint8_t, tcp_flags); 614 615 EFX_POPULATE_QWORD_5(edp->ed_eq, 616 ESF_DZ_TX_DESC_IS_OPT, 1, 617 ESF_DZ_TX_OPTION_TYPE, 618 ESE_DZ_TX_OPTION_DESC_TSO, 619 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, 620 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 621 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); 622 } 623 624 void 625 ef10_tx_qdesc_tso2_create( 626 __in efx_txq_t *etp, 627 __in uint16_t ipv4_id, 628 __in uint32_t tcp_seq, 629 __in uint16_t tcp_mss, 630 __out_ecount(count) efx_desc_t *edp, 631 __in int count) 632 { 633 EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index, 634 uint16_t, ipv4_id, uint32_t, tcp_seq, 635 uint16_t, tcp_mss); 636 637 EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS); 638 639 EFX_POPULATE_QWORD_5(edp[0].ed_eq, 640 ESF_DZ_TX_DESC_IS_OPT, 1, 641 ESF_DZ_TX_OPTION_TYPE, 642 ESE_DZ_TX_OPTION_DESC_TSO, 643 ESF_DZ_TX_TSO_OPTION_TYPE, 644 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, 645 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 646 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); 647 EFX_POPULATE_QWORD_4(edp[1].ed_eq, 648 ESF_DZ_TX_DESC_IS_OPT, 1, 649 ESF_DZ_TX_OPTION_TYPE, 650 ESE_DZ_TX_OPTION_DESC_TSO, 651 ESF_DZ_TX_TSO_OPTION_TYPE, 652 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, 653 ESF_DZ_TX_TSO_TCP_MSS, tcp_mss); 654 } 655 656 void 657 ef10_tx_qdesc_vlantci_create( 658 __in efx_txq_t *etp, 659 __in uint16_t tci, 660 __out efx_desc_t *edp) 661 { 662 EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index, 663 uint16_t, tci); 664 665 EFX_POPULATE_QWORD_4(edp->ed_eq, 666 ESF_DZ_TX_DESC_IS_OPT, 1, 667 ESF_DZ_TX_OPTION_TYPE, 668 ESE_DZ_TX_OPTION_DESC_VLAN, 669 ESF_DZ_TX_VLAN_OP, tci ? 1 : 0, 670 ESF_DZ_TX_VLAN_TAG1, tci); 671 } 672 673 674 __checkReturn efx_rc_t 675 ef10_tx_qpace( 676 __in efx_txq_t *etp, 677 __in unsigned int ns) 678 { 679 efx_rc_t rc; 680 681 /* FIXME */ 682 _NOTE(ARGUNUSED(etp, ns)) 683 _NOTE(CONSTANTCONDITION) 684 if (B_FALSE) { 685 rc = ENOTSUP; 686 goto fail1; 687 } 688 /* FIXME */ 689 690 return (0); 691 692 fail1: 693 EFSYS_PROBE1(fail1, efx_rc_t, rc); 694 695 return (rc); 696 } 697 698 __checkReturn efx_rc_t 699 ef10_tx_qflush( 700 __in efx_txq_t *etp) 701 { 702 efx_nic_t *enp = etp->et_enp; 703 efx_rc_t rc; 704 705 if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0) 706 goto fail1; 707 708 return (0); 709 710 fail1: 711 EFSYS_PROBE1(fail1, efx_rc_t, rc); 712 713 return (rc); 714 } 715 716 void 717 ef10_tx_qenable( 718 __in efx_txq_t *etp) 719 { 720 /* FIXME */ 721 _NOTE(ARGUNUSED(etp)) 722 /* FIXME */ 723 } 724 725 #if EFSYS_OPT_QSTATS 726 void 727 ef10_tx_qstats_update( 728 __in efx_txq_t *etp, 729 __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) 730 { 731 unsigned int id; 732 733 for (id = 0; id < TX_NQSTATS; id++) { 734 efsys_stat_t *essp = &stat[id]; 735 736 EFSYS_STAT_INCR(essp, etp->et_stat[id]); 737 etp->et_stat[id] = 0; 738 } 739 } 740 741 #endif /* EFSYS_OPT_QSTATS */ 742 743 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ 744