1 /*- 2 * Copyright (c) 2012-2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * The views and conclusions contained in the software and documentation are 27 * those of the authors and should not be interpreted as representing official 28 * policies, either expressed or implied, of the FreeBSD Project. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "efx.h" 35 #include "efx_impl.h" 36 37 38 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD 39 40 #if EFSYS_OPT_QSTATS 41 #define EFX_TX_QSTAT_INCR(_etp, _stat) \ 42 do { \ 43 (_etp)->et_stat[_stat]++; \ 44 _NOTE(CONSTANTCONDITION) \ 45 } while (B_FALSE) 46 #else 47 #define EFX_TX_QSTAT_INCR(_etp, _stat) 48 #endif 49 50 static __checkReturn efx_rc_t 51 efx_mcdi_init_txq( 52 __in efx_nic_t *enp, 53 __in uint32_t ndescs, 54 __in uint32_t target_evq, 55 __in uint32_t label, 56 __in uint32_t instance, 57 __in uint16_t flags, 58 __in efsys_mem_t *esmp) 59 { 60 efx_mcdi_req_t req; 61 uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS), 62 MC_CMD_INIT_TXQ_OUT_LEN)]; 63 efx_qword_t *dma_addr; 64 uint64_t addr; 65 int npages; 66 int i; 67 efx_rc_t rc; 68 69 EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >= 70 EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs)); 71 72 npages = EFX_TXQ_NBUFS(ndescs); 73 if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { 74 rc = EINVAL; 75 goto fail1; 76 } 77 78 (void) memset(payload, 0, sizeof (payload)); 79 req.emr_cmd = MC_CMD_INIT_TXQ; 80 req.emr_in_buf = payload; 81 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); 82 req.emr_out_buf = payload; 83 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; 84 85 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs); 86 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); 87 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); 88 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); 89 90 MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS, 91 INIT_TXQ_IN_FLAG_BUFF_MODE, 0, 92 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, 93 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, 94 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, 95 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, 96 INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, 97 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0, 98 INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, 99 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, 100 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, 101 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, 102 INIT_TXQ_IN_CRC_MODE, 0, 103 INIT_TXQ_IN_FLAG_TIMESTAMP, 0); 104 105 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); 106 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); 107 108 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); 109 addr = EFSYS_MEM_ADDR(esmp); 110 111 for (i = 0; i < npages; i++) { 112 EFX_POPULATE_QWORD_2(*dma_addr, 113 EFX_DWORD_1, (uint32_t)(addr >> 32), 114 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 115 116 dma_addr++; 117 addr += EFX_BUF_SIZE; 118 } 119 120 efx_mcdi_execute(enp, &req); 121 122 if (req.emr_rc != 0) { 123 rc = req.emr_rc; 124 goto fail2; 125 } 126 127 return (0); 128 129 fail2: 130 EFSYS_PROBE(fail2); 131 fail1: 132 EFSYS_PROBE1(fail1, efx_rc_t, rc); 133 134 return (rc); 135 } 136 137 static __checkReturn efx_rc_t 138 efx_mcdi_fini_txq( 139 __in efx_nic_t *enp, 140 __in uint32_t instance) 141 { 142 efx_mcdi_req_t req; 143 uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN, 144 MC_CMD_FINI_TXQ_OUT_LEN)]; 145 efx_rc_t rc; 146 147 (void) memset(payload, 0, sizeof (payload)); 148 req.emr_cmd = MC_CMD_FINI_TXQ; 149 req.emr_in_buf = payload; 150 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; 151 req.emr_out_buf = payload; 152 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; 153 154 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); 155 156 efx_mcdi_execute_quiet(enp, &req); 157 158 if (req.emr_rc != 0) { 159 rc = req.emr_rc; 160 goto fail1; 161 } 162 163 return (0); 164 165 fail1: 166 /* 167 * EALREADY is not an error, but indicates that the MC has rebooted and 168 * that the TXQ has already been destroyed. 169 */ 170 if (rc != EALREADY) 171 EFSYS_PROBE1(fail1, efx_rc_t, rc); 172 173 return (rc); 174 } 175 176 __checkReturn efx_rc_t 177 ef10_tx_init( 178 __in efx_nic_t *enp) 179 { 180 _NOTE(ARGUNUSED(enp)) 181 return (0); 182 } 183 184 void 185 ef10_tx_fini( 186 __in efx_nic_t *enp) 187 { 188 _NOTE(ARGUNUSED(enp)) 189 } 190 191 __checkReturn efx_rc_t 192 ef10_tx_qcreate( 193 __in efx_nic_t *enp, 194 __in unsigned int index, 195 __in unsigned int label, 196 __in efsys_mem_t *esmp, 197 __in size_t ndescs, 198 __in uint32_t id, 199 __in uint16_t flags, 200 __in efx_evq_t *eep, 201 __in efx_txq_t *etp, 202 __out unsigned int *addedp) 203 { 204 efx_nic_cfg_t *encp = &enp->en_nic_cfg; 205 uint16_t inner_csum; 206 efx_qword_t desc; 207 efx_rc_t rc; 208 209 _NOTE(ARGUNUSED(id)) 210 211 inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP; 212 if (((flags & inner_csum) != 0) && 213 (encp->enc_tunnel_encapsulations_supported == 0)) { 214 rc = EINVAL; 215 goto fail1; 216 } 217 218 if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index, 219 flags, esmp)) != 0) 220 goto fail2; 221 222 /* 223 * A previous user of this TX queue may have written a descriptor to the 224 * TX push collector, but not pushed the doorbell (e.g. after a crash). 225 * The next doorbell write would then push the stale descriptor. 226 * 227 * Ensure the (per network port) TX push collector is cleared by writing 228 * a no-op TX option descriptor. See bug29981 for details. 229 */ 230 *addedp = 1; 231 EFX_POPULATE_QWORD_6(desc, 232 ESF_DZ_TX_DESC_IS_OPT, 1, 233 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, 234 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, 235 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, 236 ESF_DZ_TX_OPTION_IP_CSUM, 237 (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0, 238 ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, 239 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, 240 ESF_DZ_TX_OPTION_INNER_IP_CSUM, 241 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0); 242 243 EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc); 244 ef10_tx_qpush(etp, *addedp, 0); 245 246 return (0); 247 248 fail2: 249 EFSYS_PROBE(fail2); 250 fail1: 251 EFSYS_PROBE1(fail1, efx_rc_t, rc); 252 253 return (rc); 254 } 255 256 void 257 ef10_tx_qdestroy( 258 __in efx_txq_t *etp) 259 { 260 /* FIXME */ 261 _NOTE(ARGUNUSED(etp)) 262 /* FIXME */ 263 } 264 265 __checkReturn efx_rc_t 266 ef10_tx_qpio_enable( 267 __in efx_txq_t *etp) 268 { 269 efx_nic_t *enp = etp->et_enp; 270 efx_piobuf_handle_t handle; 271 efx_rc_t rc; 272 273 if (etp->et_pio_size != 0) { 274 rc = EALREADY; 275 goto fail1; 276 } 277 278 /* Sub-allocate a PIO block from a piobuf */ 279 if ((rc = ef10_nic_pio_alloc(enp, 280 &etp->et_pio_bufnum, 281 &handle, 282 &etp->et_pio_blknum, 283 &etp->et_pio_offset, 284 &etp->et_pio_size)) != 0) { 285 goto fail2; 286 } 287 EFSYS_ASSERT3U(etp->et_pio_size, !=, 0); 288 289 /* Link the piobuf to this TXQ */ 290 if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) { 291 goto fail3; 292 } 293 294 /* 295 * et_pio_offset is the offset of the sub-allocated block within the 296 * hardware PIO buffer. It is used as the buffer address in the PIO 297 * option descriptor. 298 * 299 * et_pio_write_offset is the offset of the sub-allocated block from the 300 * start of the write-combined memory mapping, and is used for writing 301 * data into the PIO buffer. 302 */ 303 etp->et_pio_write_offset = 304 (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) + 305 ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset; 306 307 return (0); 308 309 fail3: 310 EFSYS_PROBE(fail3); 311 ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); 312 fail2: 313 EFSYS_PROBE(fail2); 314 etp->et_pio_size = 0; 315 fail1: 316 EFSYS_PROBE1(fail1, efx_rc_t, rc); 317 318 return (rc); 319 } 320 321 void 322 ef10_tx_qpio_disable( 323 __in efx_txq_t *etp) 324 { 325 efx_nic_t *enp = etp->et_enp; 326 327 if (etp->et_pio_size != 0) { 328 /* Unlink the piobuf from this TXQ */ 329 ef10_nic_pio_unlink(enp, etp->et_index); 330 331 /* Free the sub-allocated PIO block */ 332 ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); 333 etp->et_pio_size = 0; 334 etp->et_pio_write_offset = 0; 335 } 336 } 337 338 __checkReturn efx_rc_t 339 ef10_tx_qpio_write( 340 __in efx_txq_t *etp, 341 __in_ecount(length) uint8_t *buffer, 342 __in size_t length, 343 __in size_t offset) 344 { 345 efx_nic_t *enp = etp->et_enp; 346 efsys_bar_t *esbp = enp->en_esbp; 347 uint32_t write_offset; 348 uint32_t write_offset_limit; 349 efx_qword_t *eqp; 350 efx_rc_t rc; 351 352 EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0); 353 354 if (etp->et_pio_size == 0) { 355 rc = ENOENT; 356 goto fail1; 357 } 358 if (offset + length > etp->et_pio_size) { 359 rc = ENOSPC; 360 goto fail2; 361 } 362 363 /* 364 * Writes to PIO buffers must be 64 bit aligned, and multiples of 365 * 64 bits. 366 */ 367 write_offset = etp->et_pio_write_offset + offset; 368 write_offset_limit = write_offset + length; 369 eqp = (efx_qword_t *)buffer; 370 while (write_offset < write_offset_limit) { 371 EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp); 372 eqp++; 373 write_offset += sizeof (efx_qword_t); 374 } 375 376 return (0); 377 378 fail2: 379 EFSYS_PROBE(fail2); 380 fail1: 381 EFSYS_PROBE1(fail1, efx_rc_t, rc); 382 383 return (rc); 384 } 385 386 __checkReturn efx_rc_t 387 ef10_tx_qpio_post( 388 __in efx_txq_t *etp, 389 __in size_t pkt_length, 390 __in unsigned int completed, 391 __inout unsigned int *addedp) 392 { 393 efx_qword_t pio_desc; 394 unsigned int id; 395 size_t offset; 396 unsigned int added = *addedp; 397 efx_rc_t rc; 398 399 400 if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) { 401 rc = ENOSPC; 402 goto fail1; 403 } 404 405 if (etp->et_pio_size == 0) { 406 rc = ENOENT; 407 goto fail2; 408 } 409 410 id = added++ & etp->et_mask; 411 offset = id * sizeof (efx_qword_t); 412 413 EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index, 414 unsigned int, id, uint32_t, etp->et_pio_offset, 415 size_t, pkt_length); 416 417 EFX_POPULATE_QWORD_5(pio_desc, 418 ESF_DZ_TX_DESC_IS_OPT, 1, 419 ESF_DZ_TX_OPTION_TYPE, 1, 420 ESF_DZ_TX_PIO_CONT, 0, 421 ESF_DZ_TX_PIO_BYTE_CNT, pkt_length, 422 ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset); 423 424 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc); 425 426 EFX_TX_QSTAT_INCR(etp, TX_POST_PIO); 427 428 *addedp = added; 429 return (0); 430 431 fail2: 432 EFSYS_PROBE(fail2); 433 fail1: 434 EFSYS_PROBE1(fail1, efx_rc_t, rc); 435 436 return (rc); 437 } 438 439 __checkReturn efx_rc_t 440 ef10_tx_qpost( 441 __in efx_txq_t *etp, 442 __in_ecount(ndescs) efx_buffer_t *eb, 443 __in unsigned int ndescs, 444 __in unsigned int completed, 445 __inout unsigned int *addedp) 446 { 447 unsigned int added = *addedp; 448 unsigned int i; 449 efx_rc_t rc; 450 451 if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) { 452 rc = ENOSPC; 453 goto fail1; 454 } 455 456 for (i = 0; i < ndescs; i++) { 457 efx_buffer_t *ebp = &eb[i]; 458 efsys_dma_addr_t addr = ebp->eb_addr; 459 size_t size = ebp->eb_size; 460 boolean_t eop = ebp->eb_eop; 461 unsigned int id; 462 size_t offset; 463 efx_qword_t qword; 464 465 /* No limitations on boundary crossing */ 466 EFSYS_ASSERT(size <= 467 etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); 468 469 id = added++ & etp->et_mask; 470 offset = id * sizeof (efx_qword_t); 471 472 EFSYS_PROBE5(tx_post, unsigned int, etp->et_index, 473 unsigned int, id, efsys_dma_addr_t, addr, 474 size_t, size, boolean_t, eop); 475 476 EFX_POPULATE_QWORD_5(qword, 477 ESF_DZ_TX_KER_TYPE, 0, 478 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, 479 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), 480 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), 481 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); 482 483 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword); 484 } 485 486 EFX_TX_QSTAT_INCR(etp, TX_POST); 487 488 *addedp = added; 489 return (0); 490 491 fail1: 492 EFSYS_PROBE1(fail1, efx_rc_t, rc); 493 494 return (rc); 495 } 496 497 /* 498 * This improves performance by, when possible, pushing a TX descriptor at the 499 * same time as the doorbell. The descriptor must be added to the TXQ, so that 500 * can be used if the hardware decides not to use the pushed descriptor. 501 */ 502 void 503 ef10_tx_qpush( 504 __in efx_txq_t *etp, 505 __in unsigned int added, 506 __in unsigned int pushed) 507 { 508 efx_nic_t *enp = etp->et_enp; 509 unsigned int wptr; 510 unsigned int id; 511 size_t offset; 512 efx_qword_t desc; 513 efx_oword_t oword; 514 515 wptr = added & etp->et_mask; 516 id = pushed & etp->et_mask; 517 offset = id * sizeof (efx_qword_t); 518 519 EFSYS_MEM_READQ(etp->et_esmp, offset, &desc); 520 521 /* 522 * SF Bug 65776: TSO option descriptors cannot be pushed if pacer bypass 523 * is enabled on the event queue this transmit queue is attached to. 524 * 525 * To ensure the code is safe, it is easiest to simply test the type of 526 * the descriptor to push, and only push it is if it not a TSO option 527 * descriptor. 528 */ 529 if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) || 530 (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) != 531 ESE_DZ_TX_OPTION_DESC_TSO)) { 532 /* Push the descriptor and update the wptr. */ 533 EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr, 534 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), 535 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); 536 537 /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ 538 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, 539 wptr, id); 540 EFSYS_PIO_WRITE_BARRIER(); 541 EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, 542 etp->et_index, &oword); 543 } else { 544 efx_dword_t dword; 545 546 /* 547 * Only update the wptr. This is signalled to the hardware by 548 * only writing one DWORD of the doorbell register. 549 */ 550 EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr); 551 dword = oword.eo_dword[2]; 552 553 /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ 554 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, 555 wptr, id); 556 EFSYS_PIO_WRITE_BARRIER(); 557 EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, 558 etp->et_index, &dword, B_FALSE); 559 } 560 } 561 562 __checkReturn efx_rc_t 563 ef10_tx_qdesc_post( 564 __in efx_txq_t *etp, 565 __in_ecount(ndescs) efx_desc_t *ed, 566 __in unsigned int ndescs, 567 __in unsigned int completed, 568 __inout unsigned int *addedp) 569 { 570 unsigned int added = *addedp; 571 unsigned int i; 572 efx_rc_t rc; 573 574 if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) { 575 rc = ENOSPC; 576 goto fail1; 577 } 578 579 for (i = 0; i < ndescs; i++) { 580 efx_desc_t *edp = &ed[i]; 581 unsigned int id; 582 size_t offset; 583 584 id = added++ & etp->et_mask; 585 offset = id * sizeof (efx_desc_t); 586 587 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq); 588 } 589 590 EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, 591 unsigned int, added, unsigned int, ndescs); 592 593 EFX_TX_QSTAT_INCR(etp, TX_POST); 594 595 *addedp = added; 596 return (0); 597 598 fail1: 599 EFSYS_PROBE1(fail1, efx_rc_t, rc); 600 601 return (rc); 602 } 603 604 void 605 ef10_tx_qdesc_dma_create( 606 __in efx_txq_t *etp, 607 __in efsys_dma_addr_t addr, 608 __in size_t size, 609 __in boolean_t eop, 610 __out efx_desc_t *edp) 611 { 612 /* No limitations on boundary crossing */ 613 EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); 614 615 EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, 616 efsys_dma_addr_t, addr, 617 size_t, size, boolean_t, eop); 618 619 EFX_POPULATE_QWORD_5(edp->ed_eq, 620 ESF_DZ_TX_KER_TYPE, 0, 621 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, 622 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), 623 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), 624 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); 625 } 626 627 void 628 ef10_tx_qdesc_tso_create( 629 __in efx_txq_t *etp, 630 __in uint16_t ipv4_id, 631 __in uint32_t tcp_seq, 632 __in uint8_t tcp_flags, 633 __out efx_desc_t *edp) 634 { 635 EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index, 636 uint16_t, ipv4_id, uint32_t, tcp_seq, 637 uint8_t, tcp_flags); 638 639 EFX_POPULATE_QWORD_5(edp->ed_eq, 640 ESF_DZ_TX_DESC_IS_OPT, 1, 641 ESF_DZ_TX_OPTION_TYPE, 642 ESE_DZ_TX_OPTION_DESC_TSO, 643 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, 644 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 645 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); 646 } 647 648 void 649 ef10_tx_qdesc_tso2_create( 650 __in efx_txq_t *etp, 651 __in uint16_t ipv4_id, 652 __in uint32_t tcp_seq, 653 __in uint16_t tcp_mss, 654 __out_ecount(count) efx_desc_t *edp, 655 __in int count) 656 { 657 EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index, 658 uint16_t, ipv4_id, uint32_t, tcp_seq, 659 uint16_t, tcp_mss); 660 661 EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS); 662 663 EFX_POPULATE_QWORD_5(edp[0].ed_eq, 664 ESF_DZ_TX_DESC_IS_OPT, 1, 665 ESF_DZ_TX_OPTION_TYPE, 666 ESE_DZ_TX_OPTION_DESC_TSO, 667 ESF_DZ_TX_TSO_OPTION_TYPE, 668 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, 669 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 670 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); 671 EFX_POPULATE_QWORD_4(edp[1].ed_eq, 672 ESF_DZ_TX_DESC_IS_OPT, 1, 673 ESF_DZ_TX_OPTION_TYPE, 674 ESE_DZ_TX_OPTION_DESC_TSO, 675 ESF_DZ_TX_TSO_OPTION_TYPE, 676 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, 677 ESF_DZ_TX_TSO_TCP_MSS, tcp_mss); 678 } 679 680 void 681 ef10_tx_qdesc_vlantci_create( 682 __in efx_txq_t *etp, 683 __in uint16_t tci, 684 __out efx_desc_t *edp) 685 { 686 EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index, 687 uint16_t, tci); 688 689 EFX_POPULATE_QWORD_4(edp->ed_eq, 690 ESF_DZ_TX_DESC_IS_OPT, 1, 691 ESF_DZ_TX_OPTION_TYPE, 692 ESE_DZ_TX_OPTION_DESC_VLAN, 693 ESF_DZ_TX_VLAN_OP, tci ? 1 : 0, 694 ESF_DZ_TX_VLAN_TAG1, tci); 695 } 696 697 698 __checkReturn efx_rc_t 699 ef10_tx_qpace( 700 __in efx_txq_t *etp, 701 __in unsigned int ns) 702 { 703 efx_rc_t rc; 704 705 /* FIXME */ 706 _NOTE(ARGUNUSED(etp, ns)) 707 _NOTE(CONSTANTCONDITION) 708 if (B_FALSE) { 709 rc = ENOTSUP; 710 goto fail1; 711 } 712 /* FIXME */ 713 714 return (0); 715 716 fail1: 717 /* 718 * EALREADY is not an error, but indicates that the MC has rebooted and 719 * that the TXQ has already been destroyed. Callers need to know that 720 * the TXQ flush has completed to avoid waiting until timeout for a 721 * flush done event that will not be delivered. 722 */ 723 if (rc != EALREADY) 724 EFSYS_PROBE1(fail1, efx_rc_t, rc); 725 726 return (rc); 727 } 728 729 __checkReturn efx_rc_t 730 ef10_tx_qflush( 731 __in efx_txq_t *etp) 732 { 733 efx_nic_t *enp = etp->et_enp; 734 efx_rc_t rc; 735 736 if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0) 737 goto fail1; 738 739 return (0); 740 741 fail1: 742 EFSYS_PROBE1(fail1, efx_rc_t, rc); 743 744 return (rc); 745 } 746 747 void 748 ef10_tx_qenable( 749 __in efx_txq_t *etp) 750 { 751 /* FIXME */ 752 _NOTE(ARGUNUSED(etp)) 753 /* FIXME */ 754 } 755 756 #if EFSYS_OPT_QSTATS 757 void 758 ef10_tx_qstats_update( 759 __in efx_txq_t *etp, 760 __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) 761 { 762 unsigned int id; 763 764 for (id = 0; id < TX_NQSTATS; id++) { 765 efsys_stat_t *essp = &stat[id]; 766 767 EFSYS_STAT_INCR(essp, etp->et_stat[id]); 768 etp->et_stat[id] = 0; 769 } 770 } 771 772 #endif /* EFSYS_OPT_QSTATS */ 773 774 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ 775