1 /*- 2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "en.h" 29 #include <machine/atomic.h> 30 31 static inline bool 32 mlx5e_do_send_cqe(struct mlx5e_sq *sq) 33 { 34 sq->cev_counter++; 35 /* interleave the CQEs */ 36 if (sq->cev_counter >= sq->cev_factor) { 37 sq->cev_counter = 0; 38 return (1); 39 } 40 return (0); 41 } 42 43 void 44 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt) 45 { 46 u16 pi = sq->pc & sq->wq.sz_m1; 47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 48 49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl)); 50 51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); 52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 53 if (mlx5e_do_send_cqe(sq)) 54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 55 else 56 wqe->ctrl.fm_ce_se = 0; 57 58 /* Copy data for doorbell */ 59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 60 61 sq->mbuf[pi].mbuf = NULL; 62 sq->mbuf[pi].num_bytes = 0; 63 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 64 sq->pc += sq->mbuf[pi].num_wqebbs; 65 } 66 67 #if (__FreeBSD_version >= 1100000) 68 static uint32_t mlx5e_hash_value; 69 70 static void 71 mlx5e_hash_init(void *arg) 72 { 73 mlx5e_hash_value = m_ether_tcpip_hash_init(); 74 } 75 76 /* Make kernel call mlx5e_hash_init after the random stack finished initializing */ 77 SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL); 78 #endif 79 80 static struct mlx5e_sq * 81 mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb) 82 { 83 struct mlx5e_priv *priv = ifp->if_softc; 84 struct mlx5e_channel * volatile *ppch; 85 struct mlx5e_channel *pch; 86 u32 ch; 87 u32 tc; 88 89 ppch = priv->channel; 90 91 /* check if channels are successfully opened */ 92 if (unlikely(ppch == NULL)) 93 return (NULL); 94 95 /* obtain VLAN information if present */ 96 if (mb->m_flags & M_VLANTAG) { 97 tc = (mb->m_pkthdr.ether_vtag >> 13); 98 if (tc >= priv->num_tc) 99 tc = priv->default_vlan_prio; 100 } else { 101 tc = priv->default_vlan_prio; 102 } 103 104 ch = priv->params.num_channels; 105 106 #ifdef RATELIMIT 107 if (mb->m_pkthdr.snd_tag != NULL) { 108 struct mlx5e_sq *sq; 109 110 /* check for route change */ 111 if (mb->m_pkthdr.snd_tag->ifp != ifp) 112 return (NULL); 113 114 /* get pointer to sendqueue */ 115 sq = container_of(mb->m_pkthdr.snd_tag, 116 struct mlx5e_rl_channel, m_snd_tag)->sq; 117 118 /* check if valid */ 119 if (sq != NULL && sq->stopped == 0) 120 return (sq); 121 122 /* FALLTHROUGH */ 123 } 124 #endif 125 /* check if flowid is set */ 126 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) { 127 #ifdef RSS 128 u32 temp; 129 130 if (rss_hash2bucket(mb->m_pkthdr.flowid, 131 M_HASHTYPE_GET(mb), &temp) == 0) 132 ch = temp % ch; 133 else 134 #endif 135 ch = (mb->m_pkthdr.flowid % 128) % ch; 136 } else { 137 #if (__FreeBSD_version >= 1100000) 138 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | 139 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch; 140 #else 141 /* 142 * m_ether_tcpip_hash not present in stable, so just 143 * throw unhashed mbufs on queue 0 144 */ 145 ch = 0; 146 #endif 147 } 148 149 /* check if channel is allocated and not stopped */ 150 pch = ppch[ch]; 151 if (likely(pch != NULL && pch->sq[tc].stopped == 0)) 152 return (&pch->sq[tc]); 153 return (NULL); 154 } 155 156 static inline u16 157 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct mbuf *mb) 158 { 159 160 switch(sq->min_inline_mode) { 161 case MLX5_INLINE_MODE_NONE: 162 /* 163 * When inline mode is NONE, we do not need to copy 164 * headers into WQEs, except when vlan tag framing is 165 * requested. Hardware might offload vlan tagging on 166 * transmit. This is a separate capability, which is 167 * known to be disabled on ConnectX-5 due to a hardware 168 * bug RM 931383. If vlan_inline_cap is not present and 169 * the packet has vlan tag, fall back to inlining. 170 */ 171 if ((mb->m_flags & M_VLANTAG) != 0 && 172 sq->vlan_inline_cap == 0) 173 break; 174 return (0); 175 case MLX5_INLINE_MODE_L2: 176 /* 177 * Due to hardware limitations, when trust mode is 178 * DSCP, the hardware may request MLX5_INLINE_MODE_L2 179 * while it really needs all L2 headers and the 4 first 180 * bytes of the IP header (which include the 181 * TOS/traffic-class). 182 * 183 * To avoid doing a firmware command for querying the 184 * trust state and parsing the mbuf for doing 185 * unnecessary checks (VLAN/eth_type) in the fast path, 186 * we are going for the worth case (22 Bytes) if 187 * the mb->m_pkthdr.len allows it. 188 */ 189 if (mb->m_pkthdr.len > ETHER_HDR_LEN + 190 ETHER_VLAN_ENCAP_LEN + 4) 191 return (MIN(sq->max_inline, ETHER_HDR_LEN + 192 ETHER_VLAN_ENCAP_LEN + 4)); 193 break; 194 } 195 return (MIN(sq->max_inline, mb->m_pkthdr.len)); 196 } 197 198 static int 199 mlx5e_get_header_size(struct mbuf *mb) 200 { 201 struct ether_vlan_header *eh; 202 struct tcphdr *th; 203 struct ip *ip; 204 int ip_hlen, tcp_hlen; 205 struct ip6_hdr *ip6; 206 uint16_t eth_type; 207 int eth_hdr_len; 208 209 eh = mtod(mb, struct ether_vlan_header *); 210 if (mb->m_len < ETHER_HDR_LEN) 211 return (0); 212 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 213 eth_type = ntohs(eh->evl_proto); 214 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 215 } else { 216 eth_type = ntohs(eh->evl_encap_proto); 217 eth_hdr_len = ETHER_HDR_LEN; 218 } 219 if (mb->m_len < eth_hdr_len) 220 return (0); 221 switch (eth_type) { 222 case ETHERTYPE_IP: 223 ip = (struct ip *)(mb->m_data + eth_hdr_len); 224 if (mb->m_len < eth_hdr_len + sizeof(*ip)) 225 return (0); 226 if (ip->ip_p != IPPROTO_TCP) 227 return (0); 228 ip_hlen = ip->ip_hl << 2; 229 eth_hdr_len += ip_hlen; 230 break; 231 case ETHERTYPE_IPV6: 232 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); 233 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) 234 return (0); 235 if (ip6->ip6_nxt != IPPROTO_TCP) 236 return (0); 237 eth_hdr_len += sizeof(*ip6); 238 break; 239 default: 240 return (0); 241 } 242 if (mb->m_len < eth_hdr_len + sizeof(*th)) 243 return (0); 244 th = (struct tcphdr *)(mb->m_data + eth_hdr_len); 245 tcp_hlen = th->th_off << 2; 246 eth_hdr_len += tcp_hlen; 247 if (mb->m_len < eth_hdr_len) 248 return (0); 249 return (eth_hdr_len); 250 } 251 252 /* 253 * The return value is not going back to the stack because of 254 * the drbr 255 */ 256 static int 257 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp) 258 { 259 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS]; 260 struct mlx5_wqe_data_seg *dseg; 261 struct mlx5e_tx_wqe *wqe; 262 struct ifnet *ifp; 263 int nsegs; 264 int err; 265 int x; 266 struct mbuf *mb = *mbp; 267 u16 ds_cnt; 268 u16 ihs; 269 u16 pi; 270 u8 opcode; 271 272 /* 273 * Return ENOBUFS if the queue is full, this may trigger reinsertion 274 * of the mbuf into the drbr (see mlx5e_xmit_locked) 275 */ 276 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) { 277 return (ENOBUFS); 278 } 279 280 /* Align SQ edge with NOPs to avoid WQE wrap around */ 281 pi = ((~sq->pc) & sq->wq.sz_m1); 282 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) { 283 /* Send one multi NOP message instead of many */ 284 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS); 285 pi = ((~sq->pc) & sq->wq.sz_m1); 286 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) 287 return (ENOMEM); 288 } 289 290 /* Setup local variables */ 291 pi = sq->pc & sq->wq.sz_m1; 292 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); 293 ifp = sq->ifp; 294 295 memset(wqe, 0, sizeof(*wqe)); 296 297 /* Send a copy of the frame to the BPF listener, if any */ 298 if (ifp != NULL && ifp->if_bpf != NULL) 299 ETHER_BPF_MTAP(ifp, mb); 300 301 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) { 302 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM; 303 } 304 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) { 305 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM; 306 } 307 if (wqe->eth.cs_flags == 0) { 308 sq->stats.csum_offload_none++; 309 } 310 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 311 u32 payload_len; 312 u32 mss = mb->m_pkthdr.tso_segsz; 313 u32 num_pkts; 314 315 wqe->eth.mss = cpu_to_be16(mss); 316 opcode = MLX5_OPCODE_LSO; 317 ihs = mlx5e_get_header_size(mb); 318 payload_len = mb->m_pkthdr.len - ihs; 319 if (payload_len == 0) 320 num_pkts = 1; 321 else 322 num_pkts = DIV_ROUND_UP(payload_len, mss); 323 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs); 324 325 sq->stats.tso_packets++; 326 sq->stats.tso_bytes += payload_len; 327 } else { 328 opcode = MLX5_OPCODE_SEND; 329 ihs = mlx5e_get_inline_hdr_size(sq, mb); 330 sq->mbuf[pi].num_bytes = max_t (unsigned int, 331 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 332 } 333 if (ihs == 0) { 334 if ((mb->m_flags & M_VLANTAG) != 0) { 335 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */ 336 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag); 337 } else { 338 wqe->eth.inline_hdr_sz = 0; 339 } 340 } else { 341 if ((mb->m_flags & M_VLANTAG) != 0) { 342 struct ether_vlan_header *eh = (struct ether_vlan_header 343 *)wqe->eth.inline_hdr_start; 344 345 /* Range checks */ 346 if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN)) 347 ihs = (MLX5E_MAX_TX_INLINE - 348 ETHER_VLAN_ENCAP_LEN); 349 else if (ihs < ETHER_HDR_LEN) { 350 err = EINVAL; 351 goto tx_drop; 352 } 353 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh); 354 m_adj(mb, ETHER_HDR_LEN); 355 /* Insert 4 bytes VLAN tag into data stream */ 356 eh->evl_proto = eh->evl_encap_proto; 357 eh->evl_encap_proto = htons(ETHERTYPE_VLAN); 358 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag); 359 /* Copy rest of header data, if any */ 360 m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 361 1)); 362 m_adj(mb, ihs - ETHER_HDR_LEN); 363 /* Extend header by 4 bytes */ 364 ihs += ETHER_VLAN_ENCAP_LEN; 365 } else { 366 m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start); 367 m_adj(mb, ihs); 368 } 369 wqe->eth.inline_hdr_sz = cpu_to_be16(ihs); 370 } 371 372 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 373 if (ihs > sizeof(wqe->eth.inline_hdr_start)) { 374 ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start), 375 MLX5_SEND_WQE_DS); 376 } 377 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt; 378 379 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 380 mb, segs, &nsegs, BUS_DMA_NOWAIT); 381 if (err == EFBIG) { 382 /* Update statistics */ 383 sq->stats.defragged++; 384 /* Too many mbuf fragments */ 385 mb = m_defrag(*mbp, M_NOWAIT); 386 if (mb == NULL) { 387 mb = *mbp; 388 goto tx_drop; 389 } 390 /* Try again */ 391 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map, 392 mb, segs, &nsegs, BUS_DMA_NOWAIT); 393 } 394 /* Catch errors */ 395 if (err != 0) 396 goto tx_drop; 397 398 /* Make sure all mbuf data, if any, is written to RAM */ 399 if (nsegs != 0) { 400 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, 401 BUS_DMASYNC_PREWRITE); 402 } else { 403 /* All data was inlined, free the mbuf. */ 404 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map); 405 m_freem(mb); 406 mb = NULL; 407 } 408 409 for (x = 0; x != nsegs; x++) { 410 if (segs[x].ds_len == 0) 411 continue; 412 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr); 413 dseg->lkey = sq->mkey_be; 414 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len); 415 dseg++; 416 } 417 418 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl)); 419 420 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 421 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 422 if (mlx5e_do_send_cqe(sq)) 423 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; 424 else 425 wqe->ctrl.fm_ce_se = 0; 426 427 /* Copy data for doorbell */ 428 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32)); 429 430 /* Store pointer to mbuf */ 431 sq->mbuf[pi].mbuf = mb; 432 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 433 sq->pc += sq->mbuf[pi].num_wqebbs; 434 435 sq->stats.packets++; 436 *mbp = NULL; /* safety clear */ 437 return (0); 438 439 tx_drop: 440 sq->stats.dropped++; 441 *mbp = NULL; 442 m_freem(mb); 443 return err; 444 } 445 446 static void 447 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget) 448 { 449 u16 sqcc; 450 451 /* 452 * sq->cc must be updated only after mlx5_cqwq_update_db_record(), 453 * otherwise a cq overrun may occur 454 */ 455 sqcc = sq->cc; 456 457 while (budget > 0) { 458 struct mlx5_cqe64 *cqe; 459 struct mbuf *mb; 460 u16 x; 461 u16 ci; 462 463 cqe = mlx5e_get_cqe(&sq->cq); 464 if (!cqe) 465 break; 466 467 mlx5_cqwq_pop(&sq->cq.wq); 468 469 /* update budget according to the event factor */ 470 budget -= sq->cev_factor; 471 472 for (x = 0; x != sq->cev_factor; x++) { 473 ci = sqcc & sq->wq.sz_m1; 474 mb = sq->mbuf[ci].mbuf; 475 sq->mbuf[ci].mbuf = NULL; /* Safety clear */ 476 477 if (mb == NULL) { 478 if (sq->mbuf[ci].num_bytes == 0) { 479 /* NOP */ 480 sq->stats.nop++; 481 } 482 } else { 483 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map, 484 BUS_DMASYNC_POSTWRITE); 485 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map); 486 487 /* Free transmitted mbuf */ 488 m_freem(mb); 489 } 490 sqcc += sq->mbuf[ci].num_wqebbs; 491 } 492 } 493 494 mlx5_cqwq_update_db_record(&sq->cq.wq); 495 496 /* Ensure cq space is freed before enabling more cqes */ 497 atomic_thread_fence_rel(); 498 499 sq->cc = sqcc; 500 501 if (sq->sq_tq != NULL && 502 atomic_cmpset_int(&sq->queue_state, MLX5E_SQ_FULL, MLX5E_SQ_READY)) 503 taskqueue_enqueue(sq->sq_tq, &sq->sq_task); 504 } 505 506 static int 507 mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 508 { 509 struct mbuf *next; 510 int err = 0; 511 512 if (likely(mb != NULL)) { 513 /* 514 * If we can't insert mbuf into drbr, try to xmit anyway. 515 * We keep the error we got so we could return that after xmit. 516 */ 517 err = drbr_enqueue(ifp, sq->br, mb); 518 } 519 520 /* 521 * Check if the network interface is closed or if the SQ is 522 * being stopped: 523 */ 524 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 525 sq->stopped != 0)) 526 return (err); 527 528 /* Process the queue */ 529 while ((next = drbr_peek(ifp, sq->br)) != NULL) { 530 if (mlx5e_sq_xmit(sq, &next) != 0) { 531 if (next != NULL) { 532 drbr_putback(ifp, sq->br, next); 533 atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL); 534 break; 535 } 536 } 537 drbr_advance(ifp, sq->br); 538 } 539 /* Check if we need to write the doorbell */ 540 if (likely(sq->doorbell.d64 != 0)) { 541 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 542 sq->doorbell.d64 = 0; 543 } 544 /* 545 * Check if we need to start the event timer which flushes the 546 * transmit ring on timeout: 547 */ 548 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 549 sq->cev_factor != 1)) { 550 /* start the timer */ 551 mlx5e_sq_cev_timeout(sq); 552 } else { 553 /* don't send NOPs yet */ 554 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 555 } 556 return (err); 557 } 558 559 static int 560 mlx5e_xmit_locked_no_br(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb) 561 { 562 int err = 0; 563 564 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 565 sq->stopped != 0)) { 566 m_freem(mb); 567 return (ENETDOWN); 568 } 569 570 /* Do transmit */ 571 if (mlx5e_sq_xmit(sq, &mb) != 0) { 572 /* NOTE: m_freem() is NULL safe */ 573 m_freem(mb); 574 err = ENOBUFS; 575 } 576 577 /* Check if we need to write the doorbell */ 578 if (likely(sq->doorbell.d64 != 0)) { 579 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); 580 sq->doorbell.d64 = 0; 581 } 582 583 /* 584 * Check if we need to start the event timer which flushes the 585 * transmit ring on timeout: 586 */ 587 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL && 588 sq->cev_factor != 1)) { 589 /* start the timer */ 590 mlx5e_sq_cev_timeout(sq); 591 } else { 592 /* don't send NOPs yet */ 593 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; 594 } 595 return (err); 596 } 597 598 int 599 mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb) 600 { 601 struct mlx5e_sq *sq; 602 int ret; 603 604 sq = mlx5e_select_queue(ifp, mb); 605 if (unlikely(sq == NULL)) { 606 #ifdef RATELIMIT 607 /* Check for route change */ 608 if (mb->m_pkthdr.snd_tag != NULL && 609 mb->m_pkthdr.snd_tag->ifp != ifp) { 610 /* Free mbuf */ 611 m_freem(mb); 612 613 /* 614 * Tell upper layers about route change and to 615 * re-transmit this packet: 616 */ 617 return (EAGAIN); 618 } 619 #endif 620 /* Free mbuf */ 621 m_freem(mb); 622 623 /* Invalid send queue */ 624 return (ENXIO); 625 } 626 627 if (unlikely(sq->br == NULL)) { 628 /* rate limited traffic */ 629 mtx_lock(&sq->lock); 630 ret = mlx5e_xmit_locked_no_br(ifp, sq, mb); 631 mtx_unlock(&sq->lock); 632 } else if (mtx_trylock(&sq->lock)) { 633 ret = mlx5e_xmit_locked(ifp, sq, mb); 634 mtx_unlock(&sq->lock); 635 } else { 636 ret = drbr_enqueue(ifp, sq->br, mb); 637 taskqueue_enqueue(sq->sq_tq, &sq->sq_task); 638 } 639 640 return (ret); 641 } 642 643 void 644 mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq) 645 { 646 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq); 647 648 mtx_lock(&sq->comp_lock); 649 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX); 650 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock)); 651 mtx_unlock(&sq->comp_lock); 652 } 653 654 void 655 mlx5e_tx_que(void *context, int pending) 656 { 657 struct mlx5e_sq *sq = context; 658 struct ifnet *ifp = sq->ifp; 659 660 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 661 mtx_lock(&sq->lock); 662 if (!drbr_empty(ifp, sq->br)) 663 mlx5e_xmit_locked(ifp, sq, NULL); 664 mtx_unlock(&sq->lock); 665 } 666 } 667