1 /*- 2 * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include "opt_rss.h" 27 #include "opt_ratelimit.h" 28 29 #include <dev/mlx5/mlx5_en/en.h> 30 #include <machine/in_cksum.h> 31 #include <dev/mlx5/mlx5_accel/ipsec.h> 32 33 static inline int 34 mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, 35 struct mlx5e_rx_wqe *wqe, u16 ix) 36 { 37 bus_dma_segment_t segs[MLX5E_MAX_BUSDMA_RX_SEGS]; 38 struct mbuf *mb; 39 int nsegs; 40 int err; 41 struct mbuf *mb_head; 42 int i; 43 44 if (rq->mbuf[ix].mbuf != NULL) 45 return (0); 46 47 mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 48 MLX5E_MAX_RX_BYTES); 49 if (unlikely(mb == NULL)) 50 return (-ENOMEM); 51 52 mb->m_len = MLX5E_MAX_RX_BYTES; 53 mb->m_pkthdr.len = MLX5E_MAX_RX_BYTES; 54 55 for (i = 1; i < rq->nsegs; i++) { 56 if (mb_head->m_pkthdr.len >= rq->wqe_sz) 57 break; 58 mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0, 59 MLX5E_MAX_RX_BYTES); 60 if (unlikely(mb == NULL)) { 61 m_freem(mb_head); 62 return (-ENOMEM); 63 } 64 mb->m_len = MLX5E_MAX_RX_BYTES; 65 mb_head->m_pkthdr.len += MLX5E_MAX_RX_BYTES; 66 } 67 /* rewind to first mbuf in chain */ 68 mb = mb_head; 69 70 /* get IP header aligned */ 71 m_adj(mb, MLX5E_NET_IP_ALIGN); 72 73 err = mlx5_accel_ipsec_rx_tag_add(rq->ifp, mb); 74 if (err) 75 goto err_free_mbuf; 76 err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map, 77 mb, segs, &nsegs, BUS_DMA_NOWAIT); 78 if (err != 0) 79 goto err_free_mbuf; 80 if (unlikely(nsegs == 0)) { 81 bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map); 82 err = -ENOMEM; 83 goto err_free_mbuf; 84 } 85 wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr); 86 wqe->data[0].byte_count = cpu_to_be32(segs[0].ds_len | 87 MLX5_HW_START_PADDING); 88 for (i = 1; i != nsegs; i++) { 89 wqe->data[i].addr = cpu_to_be64(segs[i].ds_addr); 90 wqe->data[i].byte_count = cpu_to_be32(segs[i].ds_len); 91 } 92 for (; i < rq->nsegs; i++) { 93 wqe->data[i].addr = 0; 94 wqe->data[i].byte_count = 0; 95 } 96 97 rq->mbuf[ix].mbuf = mb; 98 rq->mbuf[ix].data = mb->m_data; 99 100 bus_dmamap_sync(rq->dma_tag, rq->mbuf[ix].dma_map, 101 BUS_DMASYNC_PREREAD); 102 return (0); 103 104 err_free_mbuf: 105 m_freem(mb); 106 return (err); 107 } 108 109 static void 110 mlx5e_post_rx_wqes(struct mlx5e_rq *rq) 111 { 112 if (unlikely(rq->enabled == 0)) 113 return; 114 115 while (!mlx5_wq_ll_is_full(&rq->wq)) { 116 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head); 117 118 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) { 119 callout_reset_curcpu(&rq->watchdog, 1, (void *)&mlx5e_post_rx_wqes, rq); 120 break; 121 } 122 mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index)); 123 } 124 125 /* ensure wqes are visible to device before updating doorbell record */ 126 atomic_thread_fence_rel(); 127 128 mlx5_wq_ll_update_db_record(&rq->wq); 129 } 130 131 static void 132 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe) 133 { 134 /* TODO: consider vlans, ip options, ... */ 135 struct ether_header *eh; 136 uint16_t eh_type; 137 uint16_t tot_len; 138 struct ip6_hdr *ip6 = NULL; 139 struct ip *ip4 = NULL; 140 struct tcphdr *th; 141 uint32_t *ts_ptr; 142 uint8_t l4_hdr_type; 143 int tcp_ack; 144 145 eh = mtod(mb, struct ether_header *); 146 eh_type = ntohs(eh->ether_type); 147 148 l4_hdr_type = get_cqe_l4_hdr_type(cqe); 149 tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || 150 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); 151 152 /* TODO: consider vlan */ 153 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN; 154 155 switch (eh_type) { 156 case ETHERTYPE_IP: 157 ip4 = (struct ip *)(eh + 1); 158 th = (struct tcphdr *)(ip4 + 1); 159 break; 160 case ETHERTYPE_IPV6: 161 ip6 = (struct ip6_hdr *)(eh + 1); 162 th = (struct tcphdr *)(ip6 + 1); 163 break; 164 default: 165 return; 166 } 167 168 ts_ptr = (uint32_t *)(th + 1); 169 170 if (get_cqe_lro_tcppsh(cqe)) 171 th->th_flags |= TH_PUSH; 172 173 if (tcp_ack) { 174 th->th_flags |= TH_ACK; 175 th->th_ack = cqe->lro_ack_seq_num; 176 th->th_win = cqe->lro_tcp_win; 177 178 /* 179 * FreeBSD handles only 32bit aligned timestamp right after 180 * the TCP hdr 181 * +--------+--------+--------+--------+ 182 * | NOP | NOP | TSopt | 10 | 183 * +--------+--------+--------+--------+ 184 * | TSval timestamp | 185 * +--------+--------+--------+--------+ 186 * | TSecr timestamp | 187 * +--------+--------+--------+--------+ 188 */ 189 if (get_cqe_lro_timestamp_valid(cqe) && 190 (__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 | 191 TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 | 192 TCPOLEN_TIMESTAMP))) { 193 /* 194 * cqe->timestamp is 64bit long. 195 * [0-31] - timestamp. 196 * [32-64] - timestamp echo replay. 197 */ 198 ts_ptr[1] = *(uint32_t *)&cqe->timestamp; 199 ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1); 200 } 201 } 202 if (ip4) { 203 ip4->ip_ttl = cqe->lro_min_ttl; 204 ip4->ip_len = cpu_to_be16(tot_len); 205 ip4->ip_sum = 0; 206 ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2); 207 } else { 208 ip6->ip6_hlim = cqe->lro_min_ttl; 209 ip6->ip6_plen = cpu_to_be16(tot_len - 210 sizeof(struct ip6_hdr)); 211 } 212 /* TODO: handle tcp checksum */ 213 } 214 215 static uint64_t 216 mlx5e_mbuf_tstmp(struct mlx5e_priv *priv, uint64_t hw_tstmp) 217 { 218 struct mlx5e_clbr_point *cp, dcp; 219 uint64_t tstmp_sec, tstmp_nsec; 220 uint64_t hw_clocks; 221 uint64_t rt_cur_to_prev, res_s, res_n, res_s_modulo, res; 222 uint64_t hw_clk_div; 223 u_int gen; 224 225 do { 226 cp = &priv->clbr_points[priv->clbr_curr]; 227 gen = atomic_load_acq_int(&cp->clbr_gen); 228 if (gen == 0) 229 return (0); 230 dcp = *cp; 231 atomic_thread_fence_acq(); 232 } while (gen != dcp.clbr_gen); 233 /* 234 * Our goal here is to have a result that is: 235 * 236 * ( (cur_time - prev_time) ) 237 * ((hw_tstmp - hw_prev) * ----------------------------- ) + prev_time 238 * ( (hw_cur - hw_prev) ) 239 * 240 * With the constraints that we cannot use float and we 241 * don't want to overflow the uint64_t numbers we are using. 242 * 243 * The plan is to take the clocking value of the hw timestamps 244 * and split them into seconds and nanosecond equivalent portions. 245 * Then we operate on the two portions seperately making sure to 246 * bring back the carry over from the seconds when we divide. 247 * 248 * First up lets get the two divided into separate entities 249 * i.e. the seconds. We use the clock frequency for this. 250 * Note that priv->cclk was setup with the clock frequency 251 * in hz so we are all set to go. 252 */ 253 hw_clocks = hw_tstmp - dcp.clbr_hw_prev; 254 tstmp_sec = hw_clocks / priv->cclk; 255 tstmp_nsec = hw_clocks % priv->cclk; 256 /* Now work with them separately */ 257 rt_cur_to_prev = (dcp.base_curr - dcp.base_prev); 258 res_s = tstmp_sec * rt_cur_to_prev; 259 res_n = tstmp_nsec * rt_cur_to_prev; 260 /* Now lets get our divider */ 261 hw_clk_div = dcp.clbr_hw_curr - dcp.clbr_hw_prev; 262 /* Make sure to save the remainder from the seconds divide */ 263 res_s_modulo = res_s % hw_clk_div; 264 res_s /= hw_clk_div; 265 /* scale the remainder to where it should be */ 266 res_s_modulo *= priv->cclk; 267 /* Now add in the remainder */ 268 res_n += res_s_modulo; 269 /* Now do the divide */ 270 res_n /= hw_clk_div; 271 res_s *= priv->cclk; 272 /* Recombine the two */ 273 res = res_s + res_n; 274 /* And now add in the base time to get to the real timestamp */ 275 res += dcp.base_prev; 276 return (res); 277 } 278 279 static inline void 280 mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe, 281 struct mlx5e_rq *rq, struct mbuf *mb, 282 u32 cqe_bcnt) 283 { 284 if_t ifp = rq->ifp; 285 struct mlx5e_channel *c; 286 struct mbuf *mb_head; 287 int lro_num_seg; /* HW LRO session aggregated packets counter */ 288 uint64_t tstmp; 289 290 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; 291 if (lro_num_seg > 1) { 292 mlx5e_lro_update_hdr(mb, cqe); 293 rq->stats.lro_packets++; 294 rq->stats.lro_bytes += cqe_bcnt; 295 } 296 297 mb->m_pkthdr.len = cqe_bcnt; 298 for (mb_head = mb; mb != NULL; mb = mb->m_next) { 299 if (mb->m_len > cqe_bcnt) 300 mb->m_len = cqe_bcnt; 301 cqe_bcnt -= mb->m_len; 302 if (likely(cqe_bcnt == 0)) { 303 if (likely(mb->m_next != NULL)) { 304 /* trim off empty mbufs */ 305 m_freem(mb->m_next); 306 mb->m_next = NULL; 307 } 308 break; 309 } 310 } 311 /* rewind to first mbuf in chain */ 312 mb = mb_head; 313 314 /* check if a Toeplitz hash was computed */ 315 if (cqe->rss_hash_type != 0) { 316 mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result); 317 #ifdef RSS 318 /* decode the RSS hash type */ 319 switch (cqe->rss_hash_type & 320 (CQE_RSS_DST_HTYPE_L4 | CQE_RSS_DST_HTYPE_IP)) { 321 /* IPv4 */ 322 case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV4): 323 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV4); 324 break; 325 case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV4): 326 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV4); 327 break; 328 case CQE_RSS_DST_HTYPE_IPV4: 329 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV4); 330 break; 331 /* IPv6 */ 332 case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV6): 333 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV6); 334 break; 335 case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV6): 336 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV6); 337 break; 338 case CQE_RSS_DST_HTYPE_IPV6: 339 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV6); 340 break; 341 default: /* Other */ 342 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH); 343 break; 344 } 345 #else 346 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH); 347 #endif 348 #ifdef M_HASHTYPE_SETINNER 349 if (cqe_is_tunneled(cqe)) 350 M_HASHTYPE_SETINNER(mb); 351 #endif 352 } else { 353 mb->m_pkthdr.flowid = rq->ix; 354 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE); 355 } 356 mb->m_pkthdr.rcvif = ifp; 357 mb->m_pkthdr.leaf_rcvif = ifp; 358 359 if (cqe_is_tunneled(cqe)) { 360 /* 361 * CQE can be tunneled only if TIR is configured to 362 * enable parsing of tunneled payload, so no need to 363 * check for capabilities. 364 */ 365 if (((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK)) == 366 (CQE_L2_OK | CQE_L3_OK))) { 367 mb->m_pkthdr.csum_flags |= 368 CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | 369 CSUM_IP_CHECKED | CSUM_IP_VALID | 370 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 371 mb->m_pkthdr.csum_data = htons(0xffff); 372 373 if (likely((cqe->hds_ip_ext & CQE_L4_OK) == CQE_L4_OK)) { 374 mb->m_pkthdr.csum_flags |= 375 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID; 376 } 377 } else { 378 rq->stats.csum_none++; 379 } 380 } else if (likely((if_getcapenable(ifp) & (IFCAP_RXCSUM | 381 IFCAP_RXCSUM_IPV6)) != 0) && 382 ((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) == 383 (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK))) { 384 mb->m_pkthdr.csum_flags = 385 CSUM_IP_CHECKED | CSUM_IP_VALID | 386 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 387 mb->m_pkthdr.csum_data = htons(0xffff); 388 } else { 389 rq->stats.csum_none++; 390 } 391 392 if (cqe_has_vlan(cqe)) { 393 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info); 394 mb->m_flags |= M_VLANTAG; 395 } 396 397 c = container_of(rq, struct mlx5e_channel, rq); 398 if (c->priv->clbr_done >= 2) { 399 tstmp = mlx5e_mbuf_tstmp(c->priv, be64_to_cpu(cqe->timestamp)); 400 if ((tstmp & MLX5_CQE_TSTMP_PTP) != 0) { 401 /* 402 * Timestamp was taken on the packet entrance, 403 * instead of the cqe generation. 404 */ 405 tstmp &= ~MLX5_CQE_TSTMP_PTP; 406 mb->m_flags |= M_TSTMP_HPREC; 407 } 408 if (tstmp != 0) { 409 mb->m_pkthdr.rcv_tstmp = tstmp; 410 mb->m_flags |= M_TSTMP; 411 } 412 } 413 switch (get_cqe_tls_offload(cqe)) { 414 case CQE_TLS_OFFLOAD_DECRYPTED: 415 /* set proper checksum flag for decrypted packets */ 416 mb->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED; 417 rq->stats.decrypted_ok_packets++; 418 break; 419 case CQE_TLS_OFFLOAD_ERROR: 420 rq->stats.decrypted_error_packets++; 421 break; 422 default: 423 break; 424 } 425 426 mlx5e_accel_ipsec_handle_rx(mb, cqe); 427 } 428 429 static inline void 430 mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data) 431 { 432 memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, (cc & cq->wq.sz_m1)), 433 sizeof(struct mlx5_cqe64)); 434 } 435 436 static inline void 437 mlx5e_write_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data) 438 { 439 memcpy(mlx5_cqwq_get_wqe(&cq->wq, cc & cq->wq.sz_m1), 440 data, sizeof(struct mlx5_cqe64)); 441 } 442 443 static inline void 444 mlx5e_decompress_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *title, 445 struct mlx5_mini_cqe8 *mini, 446 u16 wqe_counter, int i) 447 { 448 /* 449 * NOTE: The fields which are not set here are copied from the 450 * initial and common title. See memcpy() in 451 * mlx5e_write_cqe_slot(). 452 */ 453 title->byte_cnt = mini->byte_cnt; 454 title->wqe_counter = cpu_to_be16((wqe_counter + i) & cq->wq.sz_m1); 455 title->rss_hash_result = mini->rx_hash_result; 456 /* 457 * Since we use MLX5_CQE_FORMAT_HASH when creating the RX CQ, 458 * the value of the checksum should be ignored. 459 */ 460 title->check_sum = 0; 461 title->op_own = (title->op_own & 0xf0) | 462 (((cq->wq.cc + i) >> cq->wq.log_sz) & 1); 463 } 464 465 #define MLX5E_MINI_ARRAY_SZ 8 466 /* Make sure structs are not packet differently */ 467 CTASSERT(sizeof(struct mlx5_cqe64) == 468 sizeof(struct mlx5_mini_cqe8) * MLX5E_MINI_ARRAY_SZ); 469 static void 470 mlx5e_decompress_cqes(struct mlx5e_cq *cq) 471 { 472 struct mlx5_mini_cqe8 mini_array[MLX5E_MINI_ARRAY_SZ]; 473 struct mlx5_cqe64 title; 474 u32 cqe_count; 475 u32 i = 0; 476 u16 title_wqe_counter; 477 478 mlx5e_read_cqe_slot(cq, cq->wq.cc, &title); 479 title_wqe_counter = be16_to_cpu(title.wqe_counter); 480 cqe_count = be32_to_cpu(title.byte_cnt); 481 482 /* Make sure we won't overflow */ 483 KASSERT(cqe_count <= cq->wq.sz_m1, 484 ("%s: cqe_count %u > cq->wq.sz_m1 %u", __func__, 485 cqe_count, cq->wq.sz_m1)); 486 487 mlx5e_read_cqe_slot(cq, cq->wq.cc + 1, mini_array); 488 while (true) { 489 mlx5e_decompress_cqe(cq, &title, 490 &mini_array[i % MLX5E_MINI_ARRAY_SZ], 491 title_wqe_counter, i); 492 mlx5e_write_cqe_slot(cq, cq->wq.cc + i, &title); 493 i++; 494 495 if (i == cqe_count) 496 break; 497 if (i % MLX5E_MINI_ARRAY_SZ == 0) 498 mlx5e_read_cqe_slot(cq, cq->wq.cc + i, mini_array); 499 } 500 } 501 502 static int 503 mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget) 504 { 505 struct pfil_head *pfil; 506 int i, rv; 507 508 CURVNET_SET_QUIET(if_getvnet(rq->ifp)); 509 pfil = rq->channel->priv->pfil; 510 for (i = 0; i < budget; i++) { 511 struct mlx5e_rx_wqe *wqe; 512 struct mlx5_cqe64 *cqe; 513 struct mbuf *mb; 514 __be16 wqe_counter_be; 515 u16 wqe_counter; 516 u32 byte_cnt, seglen; 517 518 cqe = mlx5e_get_cqe(&rq->cq); 519 if (!cqe) 520 break; 521 522 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) 523 mlx5e_decompress_cqes(&rq->cq); 524 525 mlx5_cqwq_pop(&rq->cq.wq); 526 527 wqe_counter_be = cqe->wqe_counter; 528 wqe_counter = be16_to_cpu(wqe_counter_be); 529 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); 530 byte_cnt = be32_to_cpu(cqe->byte_cnt); 531 532 bus_dmamap_sync(rq->dma_tag, 533 rq->mbuf[wqe_counter].dma_map, 534 BUS_DMASYNC_POSTREAD); 535 536 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { 537 mlx5e_dump_err_cqe(&rq->cq, rq->rqn, (const void *)cqe); 538 rq->stats.wqe_err++; 539 goto wq_ll_pop; 540 } 541 if (pfil != NULL && PFIL_HOOKED_IN(pfil)) { 542 seglen = MIN(byte_cnt, MLX5E_MAX_RX_BYTES); 543 rv = pfil_mem_in(rq->channel->priv->pfil, 544 rq->mbuf[wqe_counter].data, seglen, rq->ifp, &mb); 545 546 switch (rv) { 547 case PFIL_DROPPED: 548 case PFIL_CONSUMED: 549 /* 550 * Filter dropped or consumed it. In 551 * either case, we can just recycle 552 * buffer; there is no more work to do. 553 */ 554 rq->stats.packets++; 555 goto wq_ll_pop; 556 case PFIL_REALLOCED: 557 /* 558 * Filter copied it; recycle buffer 559 * and receive the new mbuf allocated 560 * by the Filter 561 */ 562 goto rx_common; 563 default: 564 /* 565 * The Filter said it was OK, so 566 * receive like normal. 567 */ 568 KASSERT(rv == PFIL_PASS, 569 ("Filter returned %d!\n", rv)); 570 } 571 } 572 if (!mlx5e_accel_ipsec_flow(cqe) /* tag is already assigned 573 to rq->mbuf */ && 574 MHLEN - MLX5E_NET_IP_ALIGN >= byte_cnt && 575 (mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) { 576 /* set maximum mbuf length */ 577 mb->m_len = MHLEN - MLX5E_NET_IP_ALIGN; 578 /* get IP header aligned */ 579 mb->m_data += MLX5E_NET_IP_ALIGN; 580 581 bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t), 582 byte_cnt); 583 } else { 584 mb = rq->mbuf[wqe_counter].mbuf; 585 rq->mbuf[wqe_counter].mbuf = NULL; /* safety clear */ 586 587 bus_dmamap_unload(rq->dma_tag, 588 rq->mbuf[wqe_counter].dma_map); 589 } 590 rx_common: 591 mlx5e_build_rx_mbuf(cqe, rq, mb, byte_cnt); 592 rq->stats.bytes += byte_cnt; 593 rq->stats.packets++; 594 #ifdef NUMA 595 mb->m_pkthdr.numa_domain = if_getnumadomain(rq->ifp); 596 #endif 597 598 #if !defined(HAVE_TCP_LRO_RX) 599 tcp_lro_queue_mbuf(&rq->lro, mb); 600 #else 601 if (mb->m_pkthdr.csum_flags == 0 || 602 (if_getcapenable(rq->ifp) & IFCAP_LRO) == 0 || 603 rq->lro.lro_cnt == 0 || 604 tcp_lro_rx(&rq->lro, mb, 0) != 0) { 605 if_input(rq->ifp, mb); 606 } 607 #endif 608 wq_ll_pop: 609 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, 610 &wqe->next.next_wqe_index); 611 } 612 CURVNET_RESTORE(); 613 614 mlx5_cqwq_update_db_record(&rq->cq.wq); 615 616 /* ensure cq space is freed before enabling more cqes */ 617 atomic_thread_fence_rel(); 618 return (i); 619 } 620 621 void 622 mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused) 623 { 624 struct mlx5e_channel *c = container_of(mcq, struct mlx5e_channel, rq.cq.mcq); 625 struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq); 626 int i = 0; 627 628 #ifdef HAVE_PER_CQ_EVENT_PACKET 629 #if (MHLEN < 15) 630 #error "MHLEN is too small" 631 #endif 632 struct mbuf *mb = m_gethdr(M_NOWAIT, MT_DATA); 633 634 if (mb != NULL) { 635 /* this code is used for debugging purpose only */ 636 mb->m_pkthdr.len = mb->m_len = 15; 637 memset(mb->m_data, 255, 14); 638 mb->m_data[14] = rq->ix; 639 mb->m_pkthdr.rcvif = rq->ifp; 640 mb->m_pkthdr.leaf_rcvif = rq->ifp; 641 if_input(rq->ifp, mb); 642 } 643 #endif 644 for (int j = 0; j != MLX5E_MAX_TX_NUM_TC; j++) { 645 mtx_lock(&c->sq[j].lock); 646 c->sq[j].db_inhibit++; 647 mtx_unlock(&c->sq[j].lock); 648 } 649 650 mtx_lock(&c->iq.lock); 651 c->iq.db_inhibit++; 652 mtx_unlock(&c->iq.lock); 653 654 mtx_lock(&rq->mtx); 655 656 /* 657 * Polling the entire CQ without posting new WQEs results in 658 * lack of receive WQEs during heavy traffic scenarios. 659 */ 660 while (1) { 661 if (mlx5e_poll_rx_cq(rq, MLX5E_RX_BUDGET_MAX) != 662 MLX5E_RX_BUDGET_MAX) 663 break; 664 i += MLX5E_RX_BUDGET_MAX; 665 if (i >= MLX5E_BUDGET_MAX) 666 break; 667 mlx5e_post_rx_wqes(rq); 668 } 669 mlx5e_post_rx_wqes(rq); 670 /* check for dynamic interrupt moderation callback */ 671 if (rq->dim.mode != NET_DIM_CQ_PERIOD_MODE_DISABLED) 672 net_dim(&rq->dim, rq->stats.packets, rq->stats.bytes); 673 mlx5e_cq_arm(&rq->cq, MLX5_GET_DOORBELL_LOCK(&rq->channel->priv->doorbell_lock)); 674 tcp_lro_flush_all(&rq->lro); 675 mtx_unlock(&rq->mtx); 676 677 for (int j = 0; j != MLX5E_MAX_TX_NUM_TC; j++) { 678 mtx_lock(&c->sq[j].lock); 679 c->sq[j].db_inhibit--; 680 /* Update the doorbell record, if any. */ 681 mlx5e_tx_notify_hw(c->sq + j, true); 682 mtx_unlock(&c->sq[j].lock); 683 } 684 685 mtx_lock(&c->iq.lock); 686 c->iq.db_inhibit--; 687 mlx5e_iq_notify_hw(&c->iq); 688 mtx_unlock(&c->iq.lock); 689 } 690