1 /* 2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/bpf_trace.h> 34 #include <net/xdp_sock_drv.h> 35 #include "en/xdp.h" 36 #include "en/params.h" 37 #include <linux/bitfield.h> 38 #include <net/page_pool/helpers.h> 39 40 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) 41 { 42 int hr = mlx5e_get_linear_rq_headroom(params, xsk); 43 44 /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)). 45 * The condition checked in mlx5e_rx_is_linear_skb is: 46 * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1) 47 * (Note that hw_mtu == sw_mtu + hard_mtu.) 48 * What is returned from this function is: 49 * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2) 50 * After assigning sw_mtu := max_mtu, the left side of (1) turns to 51 * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE, 52 * because both PAGE_SIZE and S are already aligned. Any number greater 53 * than max_mtu would make the left side of (1) greater than PAGE_SIZE, 54 * so max_mtu is the maximum MTU allowed. 55 */ 56 57 return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr)); 58 } 59 60 static inline bool 61 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, 62 struct xdp_buff *xdp) 63 { 64 struct page *page = virt_to_page(xdp->data); 65 struct mlx5e_xmit_data_frags xdptxdf = {}; 66 struct mlx5e_xmit_data *xdptxd; 67 struct xdp_frame *xdpf; 68 dma_addr_t dma_addr; 69 int i; 70 71 xdpf = xdp_convert_buff_to_frame(xdp); 72 if (unlikely(!xdpf)) 73 return false; 74 75 xdptxd = &xdptxdf.xd; 76 xdptxd->data = xdpf->data; 77 xdptxd->len = xdpf->len; 78 xdptxd->has_frags = xdp_frame_has_frags(xdpf); 79 80 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { 81 /* The xdp_buff was in the UMEM and was copied into a newly 82 * allocated page. The UMEM page was returned via the ZCA, and 83 * this new page has to be mapped at this point and has to be 84 * unmapped and returned via xdp_return_frame on completion. 85 */ 86 87 /* Prevent double recycling of the UMEM page. Even in case this 88 * function returns false, the xdp_buff shouldn't be recycled, 89 * as it was already done in xdp_convert_zc_to_xdp_frame. 90 */ 91 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ 92 93 if (unlikely(xdptxd->has_frags)) 94 return false; 95 96 dma_addr = dma_map_single(sq->pdev, xdptxd->data, xdptxd->len, 97 DMA_TO_DEVICE); 98 if (dma_mapping_error(sq->pdev, dma_addr)) { 99 xdp_return_frame(xdpf); 100 return false; 101 } 102 103 xdptxd->dma_addr = dma_addr; 104 105 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, 106 mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL))) 107 return false; 108 109 /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */ 110 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 111 (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME }); 112 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 113 (union mlx5e_xdp_info) { .frame.xdpf = xdpf }); 114 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 115 (union mlx5e_xdp_info) { .frame.dma_addr = dma_addr }); 116 return true; 117 } 118 119 /* Driver assumes that xdp_convert_buff_to_frame returns an xdp_frame 120 * that points to the same memory region as the original xdp_buff. It 121 * allows to map the memory only once and to use the DMA_BIDIRECTIONAL 122 * mode. 123 */ 124 125 dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf); 126 dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd->len, DMA_BIDIRECTIONAL); 127 128 if (xdptxd->has_frags) { 129 xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf); 130 xdptxdf.dma_arr = NULL; 131 132 for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) { 133 skb_frag_t *frag = &xdptxdf.sinfo->frags[i]; 134 dma_addr_t addr; 135 u32 len; 136 137 addr = page_pool_get_dma_addr(skb_frag_page(frag)) + 138 skb_frag_off(frag); 139 len = skb_frag_size(frag); 140 dma_sync_single_for_device(sq->pdev, addr, len, 141 DMA_BIDIRECTIONAL); 142 } 143 } 144 145 xdptxd->dma_addr = dma_addr; 146 147 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, 148 mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL))) 149 return false; 150 151 /* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */ 152 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 153 (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_PAGE }); 154 155 if (xdptxd->has_frags) { 156 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 157 (union mlx5e_xdp_info) 158 { .page.num = 1 + xdptxdf.sinfo->nr_frags }); 159 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 160 (union mlx5e_xdp_info) { .page.page = page }); 161 for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) { 162 skb_frag_t *frag = &xdptxdf.sinfo->frags[i]; 163 164 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 165 (union mlx5e_xdp_info) 166 { .page.page = skb_frag_page(frag) }); 167 } 168 } else { 169 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 170 (union mlx5e_xdp_info) { .page.num = 1 }); 171 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 172 (union mlx5e_xdp_info) { .page.page = page }); 173 } 174 175 return true; 176 } 177 178 static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) 179 { 180 const struct mlx5e_xdp_buff *_ctx = (void *)ctx; 181 182 if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->hwtstamp_config))) 183 return -ENODATA; 184 185 *timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time, 186 _ctx->rq->clock, get_cqe_ts(_ctx->cqe)); 187 return 0; 188 } 189 190 /* Mapping HW RSS Type bits CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 into 4-bits*/ 191 #define RSS_TYPE_MAX_TABLE 16 /* 4-bits max 16 entries */ 192 #define RSS_L4 GENMASK(1, 0) 193 #define RSS_L3 GENMASK(3, 2) /* Same as CQE_RSS_HTYPE_IP */ 194 195 /* Valid combinations of CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 sorted numerical */ 196 enum mlx5_rss_hash_type { 197 RSS_TYPE_NO_HASH = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IP_NONE) | 198 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)), 199 RSS_TYPE_L3_IPV4 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | 200 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)), 201 RSS_TYPE_L4_IPV4_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | 202 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)), 203 RSS_TYPE_L4_IPV4_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | 204 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)), 205 RSS_TYPE_L4_IPV4_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | 206 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)), 207 RSS_TYPE_L3_IPV6 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | 208 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)), 209 RSS_TYPE_L4_IPV6_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | 210 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)), 211 RSS_TYPE_L4_IPV6_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | 212 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)), 213 RSS_TYPE_L4_IPV6_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | 214 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)), 215 }; 216 217 /* Invalid combinations will simply return zero, allows no boundary checks */ 218 static const enum xdp_rss_hash_type mlx5_xdp_rss_type[RSS_TYPE_MAX_TABLE] = { 219 [RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_NONE, 220 [1] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 221 [2] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 222 [3] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 223 [RSS_TYPE_L3_IPV4] = XDP_RSS_TYPE_L3_IPV4, 224 [RSS_TYPE_L4_IPV4_TCP] = XDP_RSS_TYPE_L4_IPV4_TCP, 225 [RSS_TYPE_L4_IPV4_UDP] = XDP_RSS_TYPE_L4_IPV4_UDP, 226 [RSS_TYPE_L4_IPV4_IPSEC] = XDP_RSS_TYPE_L4_IPV4_IPSEC, 227 [RSS_TYPE_L3_IPV6] = XDP_RSS_TYPE_L3_IPV6, 228 [RSS_TYPE_L4_IPV6_TCP] = XDP_RSS_TYPE_L4_IPV6_TCP, 229 [RSS_TYPE_L4_IPV6_UDP] = XDP_RSS_TYPE_L4_IPV6_UDP, 230 [RSS_TYPE_L4_IPV6_IPSEC] = XDP_RSS_TYPE_L4_IPV6_IPSEC, 231 [12] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 232 [13] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 233 [14] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 234 [15] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 235 }; 236 237 static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, 238 enum xdp_rss_hash_type *rss_type) 239 { 240 const struct mlx5e_xdp_buff *_ctx = (void *)ctx; 241 const struct mlx5_cqe64 *cqe = _ctx->cqe; 242 u32 hash_type, l4_type, ip_type, lookup; 243 244 if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))) 245 return -ENODATA; 246 247 *hash = be32_to_cpu(cqe->rss_hash_result); 248 249 hash_type = cqe->rss_hash_type; 250 BUILD_BUG_ON(CQE_RSS_HTYPE_IP != RSS_L3); /* same mask */ 251 ip_type = hash_type & CQE_RSS_HTYPE_IP; 252 l4_type = FIELD_GET(CQE_RSS_HTYPE_L4, hash_type); 253 lookup = ip_type | l4_type; 254 *rss_type = mlx5_xdp_rss_type[lookup]; 255 256 return 0; 257 } 258 259 static int mlx5e_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, 260 u16 *vlan_tci) 261 { 262 const struct mlx5e_xdp_buff *_ctx = (void *)ctx; 263 const struct mlx5_cqe64 *cqe = _ctx->cqe; 264 265 if (!cqe_has_vlan(cqe)) 266 return -ENODATA; 267 268 *vlan_proto = htons(ETH_P_8021Q); 269 *vlan_tci = be16_to_cpu(cqe->vlan_info); 270 return 0; 271 } 272 273 const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = { 274 .xmo_rx_timestamp = mlx5e_xdp_rx_timestamp, 275 .xmo_rx_hash = mlx5e_xdp_rx_hash, 276 .xmo_rx_vlan_tag = mlx5e_xdp_rx_vlan_tag, 277 }; 278 279 struct mlx5e_xsk_tx_complete { 280 struct mlx5_cqe64 *cqe; 281 struct mlx5e_cq *cq; 282 }; 283 284 static u64 mlx5e_xsk_fill_timestamp(void *_priv) 285 { 286 struct mlx5e_xsk_tx_complete *priv = _priv; 287 u64 ts; 288 289 ts = get_cqe_ts(priv->cqe); 290 291 if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev)) 292 return mlx5_real_time_cyc2time(priv->cq->mdev->clock, ts); 293 294 return mlx5_timecounter_cyc2time(priv->cq->mdev->clock, ts); 295 } 296 297 static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv) 298 { 299 struct mlx5_wqe_eth_seg *eseg = priv; 300 301 /* HW/FW is doing parsing, so offsets are largely ignored. */ 302 eseg->cs_flags |= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; 303 } 304 305 const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops = { 306 .tmo_fill_timestamp = mlx5e_xsk_fill_timestamp, 307 .tmo_request_checksum = mlx5e_xsk_request_checksum, 308 }; 309 310 /* returns true if packet was consumed by xdp */ 311 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, 312 struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf) 313 { 314 struct xdp_buff *xdp = &mxbuf->xdp; 315 u32 act; 316 int err; 317 318 act = bpf_prog_run_xdp(prog, xdp); 319 switch (act) { 320 case XDP_PASS: 321 return false; 322 case XDP_TX: 323 if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, xdp))) 324 goto xdp_abort; 325 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ 326 return true; 327 case XDP_REDIRECT: 328 /* When XDP enabled then page-refcnt==1 here */ 329 err = xdp_do_redirect(rq->netdev, xdp, prog); 330 if (unlikely(err)) 331 goto xdp_abort; 332 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); 333 __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); 334 rq->stats->xdp_redirect++; 335 return true; 336 default: 337 bpf_warn_invalid_xdp_action(rq->netdev, prog, act); 338 fallthrough; 339 case XDP_ABORTED: 340 xdp_abort: 341 trace_xdp_exception(rq->netdev, prog, act); 342 fallthrough; 343 case XDP_DROP: 344 rq->stats->xdp_drop++; 345 return true; 346 } 347 } 348 349 static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size) 350 { 351 struct mlx5_wq_cyc *wq = &sq->wq; 352 u16 pi, contig_wqebbs; 353 354 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 355 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 356 if (unlikely(contig_wqebbs < size)) { 357 struct mlx5e_xdp_wqe_info *wi, *edge_wi; 358 359 wi = &sq->db.wqe_info[pi]; 360 edge_wi = wi + contig_wqebbs; 361 362 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ 363 for (; wi < edge_wi; wi++) { 364 *wi = (struct mlx5e_xdp_wqe_info) { 365 .num_wqebbs = 1, 366 .num_pkts = 0, 367 }; 368 mlx5e_post_nop(wq, sq->sqn, &sq->pc); 369 } 370 sq->stats->nops += contig_wqebbs; 371 372 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 373 } 374 375 return pi; 376 } 377 378 static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) 379 { 380 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 381 struct mlx5e_xdpsq_stats *stats = sq->stats; 382 struct mlx5e_tx_wqe *wqe; 383 u16 pi; 384 385 pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs); 386 wqe = MLX5E_TX_FETCH_WQE(sq, pi); 387 net_prefetchw(wqe->data); 388 389 *session = (struct mlx5e_tx_mpwqe) { 390 .wqe = wqe, 391 .bytes_count = 0, 392 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, 393 .ds_count_max = sq->max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS, 394 .pkt_count = 0, 395 .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on), 396 }; 397 398 stats->mpwqe++; 399 } 400 401 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq) 402 { 403 struct mlx5_wq_cyc *wq = &sq->wq; 404 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 405 struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl; 406 u16 ds_count = session->ds_count; 407 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 408 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; 409 410 cseg->opmod_idx_opcode = 411 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); 412 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); 413 414 wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS); 415 wi->num_pkts = session->pkt_count; 416 417 sq->pc += wi->num_wqebbs; 418 419 sq->doorbell_cseg = cseg; 420 421 session->wqe = NULL; /* Close session */ 422 } 423 424 enum { 425 MLX5E_XDP_CHECK_OK = 1, 426 MLX5E_XDP_CHECK_START_MPWQE = 2, 427 }; 428 429 INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) 430 { 431 if (unlikely(!sq->mpwqe.wqe)) { 432 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 433 sq->stop_room))) { 434 /* SQ is full, ring doorbell */ 435 mlx5e_xmit_xdp_doorbell(sq); 436 sq->stats->full++; 437 return -EBUSY; 438 } 439 440 return MLX5E_XDP_CHECK_START_MPWQE; 441 } 442 443 return MLX5E_XDP_CHECK_OK; 444 } 445 446 INDIRECT_CALLABLE_SCOPE bool 447 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, 448 int check_result, struct xsk_tx_metadata *meta); 449 450 INDIRECT_CALLABLE_SCOPE bool 451 mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, 452 int check_result, struct xsk_tx_metadata *meta) 453 { 454 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 455 struct mlx5e_xdpsq_stats *stats = sq->stats; 456 struct mlx5e_xmit_data *p = xdptxd; 457 struct mlx5e_xmit_data tmp; 458 459 if (xdptxd->has_frags) { 460 struct mlx5e_xmit_data_frags *xdptxdf = 461 container_of(xdptxd, struct mlx5e_xmit_data_frags, xd); 462 463 if (!!xdptxd->len + xdptxdf->sinfo->nr_frags > 1) { 464 /* MPWQE is enabled, but a multi-buffer packet is queued for 465 * transmission. MPWQE can't send fragmented packets, so close 466 * the current session and fall back to a regular WQE. 467 */ 468 if (unlikely(sq->mpwqe.wqe)) 469 mlx5e_xdp_mpwqe_complete(sq); 470 return mlx5e_xmit_xdp_frame(sq, xdptxd, 0, meta); 471 } 472 if (!xdptxd->len) { 473 skb_frag_t *frag = &xdptxdf->sinfo->frags[0]; 474 475 tmp.data = skb_frag_address(frag); 476 tmp.len = skb_frag_size(frag); 477 tmp.dma_addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[0] : 478 page_pool_get_dma_addr(skb_frag_page(frag)) + 479 skb_frag_off(frag); 480 p = &tmp; 481 } 482 } 483 484 if (unlikely(p->len > sq->hw_mtu)) { 485 stats->err++; 486 return false; 487 } 488 489 if (!check_result) 490 check_result = mlx5e_xmit_xdp_frame_check_mpwqe(sq); 491 if (unlikely(check_result < 0)) 492 return false; 493 494 if (check_result == MLX5E_XDP_CHECK_START_MPWQE) { 495 /* Start the session when nothing can fail, so it's guaranteed 496 * that if there is an active session, it has at least one dseg, 497 * and it's safe to complete it at any time. 498 */ 499 mlx5e_xdp_mpwqe_session_start(sq); 500 xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, &session->wqe->eth); 501 } 502 503 mlx5e_xdp_mpwqe_add_dseg(sq, p, stats); 504 505 if (unlikely(mlx5e_xdp_mpwqe_is_full(session))) 506 mlx5e_xdp_mpwqe_complete(sq); 507 508 stats->xmit++; 509 return true; 510 } 511 512 static int mlx5e_xmit_xdp_frame_check_stop_room(struct mlx5e_xdpsq *sq, int stop_room) 513 { 514 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) { 515 /* SQ is full, ring doorbell */ 516 mlx5e_xmit_xdp_doorbell(sq); 517 sq->stats->full++; 518 return -EBUSY; 519 } 520 521 return MLX5E_XDP_CHECK_OK; 522 } 523 524 INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) 525 { 526 return mlx5e_xmit_xdp_frame_check_stop_room(sq, 1); 527 } 528 529 INDIRECT_CALLABLE_SCOPE bool 530 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, 531 int check_result, struct xsk_tx_metadata *meta) 532 { 533 struct mlx5e_xmit_data_frags *xdptxdf = 534 container_of(xdptxd, struct mlx5e_xmit_data_frags, xd); 535 struct mlx5_wq_cyc *wq = &sq->wq; 536 struct mlx5_wqe_ctrl_seg *cseg; 537 struct mlx5_wqe_data_seg *dseg; 538 struct mlx5_wqe_eth_seg *eseg; 539 struct mlx5e_tx_wqe *wqe; 540 541 dma_addr_t dma_addr = xdptxd->dma_addr; 542 u32 dma_len = xdptxd->len; 543 u16 ds_cnt, inline_hdr_sz; 544 unsigned int frags_size; 545 u8 num_wqebbs = 1; 546 int num_frags = 0; 547 bool inline_ok; 548 bool linear; 549 u16 pi; 550 int i; 551 552 struct mlx5e_xdpsq_stats *stats = sq->stats; 553 554 inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE || 555 dma_len >= MLX5E_XDP_MIN_INLINE; 556 frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0; 557 558 if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) { 559 stats->err++; 560 return false; 561 } 562 563 inline_hdr_sz = 0; 564 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) 565 inline_hdr_sz = MLX5E_XDP_MIN_INLINE; 566 567 linear = !!(dma_len - inline_hdr_sz); 568 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz; 569 570 /* check_result must be 0 if xdptxd->has_frags is true. */ 571 if (!check_result) { 572 int stop_room = 1; 573 574 if (xdptxd->has_frags) { 575 ds_cnt += xdptxdf->sinfo->nr_frags; 576 num_frags = xdptxdf->sinfo->nr_frags; 577 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 578 /* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big 579 * enough to hold all fragments. 580 */ 581 stop_room = MLX5E_STOP_ROOM(num_wqebbs); 582 } 583 584 check_result = mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room); 585 } 586 if (unlikely(check_result < 0)) 587 return false; 588 589 pi = mlx5e_xdpsq_get_next_pi(sq, num_wqebbs); 590 wqe = mlx5_wq_cyc_get_wqe(wq, pi); 591 net_prefetchw(wqe); 592 593 cseg = &wqe->ctrl; 594 eseg = &wqe->eth; 595 dseg = wqe->data; 596 597 /* copy the inline part if required */ 598 if (inline_hdr_sz) { 599 memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start)); 600 memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start), 601 inline_hdr_sz - sizeof(eseg->inline_hdr.start)); 602 dma_len -= inline_hdr_sz; 603 dma_addr += inline_hdr_sz; 604 dseg++; 605 } 606 607 /* write the dma part */ 608 if (linear) { 609 dseg->addr = cpu_to_be64(dma_addr); 610 dseg->byte_count = cpu_to_be32(dma_len); 611 dseg->lkey = sq->mkey_be; 612 dseg++; 613 } 614 615 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); 616 617 memset(&cseg->trailer, 0, sizeof(cseg->trailer)); 618 memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer)); 619 620 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); 621 622 for (i = 0; i < num_frags; i++) { 623 skb_frag_t *frag = &xdptxdf->sinfo->frags[i]; 624 dma_addr_t addr; 625 626 addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] : 627 page_pool_get_dma_addr(skb_frag_page(frag)) + 628 skb_frag_off(frag); 629 630 dseg->addr = cpu_to_be64(addr); 631 dseg->byte_count = cpu_to_be32(skb_frag_size(frag)); 632 dseg->lkey = sq->mkey_be; 633 dseg++; 634 } 635 636 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 637 638 sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) { 639 .num_wqebbs = num_wqebbs, 640 .num_pkts = 1, 641 }; 642 643 sq->pc += num_wqebbs; 644 645 xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg); 646 647 sq->doorbell_cseg = cseg; 648 649 stats->xmit++; 650 return true; 651 } 652 653 static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, 654 struct mlx5e_xdp_wqe_info *wi, 655 u32 *xsk_frames, 656 struct xdp_frame_bulk *bq, 657 struct mlx5e_cq *cq, 658 struct mlx5_cqe64 *cqe) 659 { 660 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; 661 u16 i; 662 663 for (i = 0; i < wi->num_pkts; i++) { 664 union mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 665 666 switch (xdpi.mode) { 667 case MLX5E_XDP_XMIT_MODE_FRAME: { 668 /* XDP_TX from the XSK RQ and XDP_REDIRECT */ 669 struct xdp_frame *xdpf; 670 dma_addr_t dma_addr; 671 672 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 673 xdpf = xdpi.frame.xdpf; 674 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 675 dma_addr = xdpi.frame.dma_addr; 676 677 dma_unmap_single(sq->pdev, dma_addr, 678 xdpf->len, DMA_TO_DEVICE); 679 if (xdp_frame_has_frags(xdpf)) { 680 struct skb_shared_info *sinfo; 681 int j; 682 683 sinfo = xdp_get_shared_info_from_frame(xdpf); 684 for (j = 0; j < sinfo->nr_frags; j++) { 685 skb_frag_t *frag = &sinfo->frags[j]; 686 687 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 688 dma_addr = xdpi.frame.dma_addr; 689 690 dma_unmap_single(sq->pdev, dma_addr, 691 skb_frag_size(frag), DMA_TO_DEVICE); 692 } 693 } 694 xdp_return_frame_bulk(xdpf, bq); 695 break; 696 } 697 case MLX5E_XDP_XMIT_MODE_PAGE: { 698 /* XDP_TX from the regular RQ */ 699 u8 num, n = 0; 700 701 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 702 num = xdpi.page.num; 703 704 do { 705 struct page *page; 706 707 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 708 page = xdpi.page.page; 709 710 /* No need to check page_pool_page_is_pp() as we 711 * know this is a page_pool page. 712 */ 713 page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, 714 page); 715 } while (++n < num); 716 717 break; 718 } 719 case MLX5E_XDP_XMIT_MODE_XSK: { 720 /* AF_XDP send */ 721 struct xsk_tx_metadata_compl *compl = NULL; 722 struct mlx5e_xsk_tx_complete priv = { 723 .cqe = cqe, 724 .cq = cq, 725 }; 726 727 if (xp_tx_metadata_enabled(sq->xsk_pool)) { 728 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 729 compl = &xdpi.xsk_meta; 730 731 xsk_tx_metadata_complete(compl, &mlx5e_xsk_tx_metadata_ops, &priv); 732 } 733 734 (*xsk_frames)++; 735 break; 736 } 737 default: 738 WARN_ON_ONCE(true); 739 } 740 } 741 } 742 743 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) 744 { 745 struct xdp_frame_bulk bq; 746 struct mlx5e_xdpsq *sq; 747 struct mlx5_cqe64 *cqe; 748 u32 xsk_frames = 0; 749 u16 sqcc; 750 int i; 751 752 xdp_frame_bulk_init(&bq); 753 754 sq = container_of(cq, struct mlx5e_xdpsq, cq); 755 756 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 757 return false; 758 759 cqe = mlx5_cqwq_get_cqe(&cq->wq); 760 if (!cqe) 761 return false; 762 763 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 764 * otherwise a cq overrun may occur 765 */ 766 sqcc = sq->cc; 767 768 i = 0; 769 do { 770 struct mlx5e_xdp_wqe_info *wi; 771 u16 wqe_counter, ci; 772 bool last_wqe; 773 774 mlx5_cqwq_pop(&cq->wq); 775 776 wqe_counter = be16_to_cpu(cqe->wqe_counter); 777 778 do { 779 last_wqe = (sqcc == wqe_counter); 780 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 781 wi = &sq->db.wqe_info[ci]; 782 783 sqcc += wi->num_wqebbs; 784 785 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, cq, cqe); 786 } while (!last_wqe); 787 788 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 789 netdev_WARN_ONCE(sq->channel->netdev, 790 "Bad OP in XDPSQ CQE: 0x%x\n", 791 get_cqe_opcode(cqe)); 792 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 793 (struct mlx5_err_cqe *)cqe); 794 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 795 } 796 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 797 798 xdp_flush_frame_bulk(&bq); 799 800 if (xsk_frames) 801 xsk_tx_completed(sq->xsk_pool, xsk_frames); 802 803 sq->stats->cqes += i; 804 805 mlx5_cqwq_update_db_record(&cq->wq); 806 807 /* ensure cq space is freed before enabling more cqes */ 808 wmb(); 809 810 sq->cc = sqcc; 811 return (i == MLX5E_TX_CQ_POLL_BUDGET); 812 } 813 814 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) 815 { 816 struct xdp_frame_bulk bq; 817 u32 xsk_frames = 0; 818 819 xdp_frame_bulk_init(&bq); 820 821 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 822 823 while (sq->cc != sq->pc) { 824 struct mlx5e_xdp_wqe_info *wi; 825 u16 ci; 826 827 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); 828 wi = &sq->db.wqe_info[ci]; 829 830 sq->cc += wi->num_wqebbs; 831 832 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, NULL, NULL); 833 } 834 835 xdp_flush_frame_bulk(&bq); 836 rcu_read_unlock(); 837 838 if (xsk_frames) 839 xsk_tx_completed(sq->xsk_pool, xsk_frames); 840 } 841 842 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 843 u32 flags) 844 { 845 struct mlx5e_priv *priv = netdev_priv(dev); 846 struct mlx5e_xdpsq *sq; 847 int nxmit = 0; 848 int sq_num; 849 int i; 850 851 /* this flag is sufficient, no need to test internal sq state */ 852 if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) 853 return -ENETDOWN; 854 855 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 856 return -EINVAL; 857 858 sq_num = smp_processor_id(); 859 860 if (unlikely(sq_num >= priv->channels.num)) 861 return -ENXIO; 862 863 sq = priv->channels.c[sq_num]->xdpsq; 864 865 for (i = 0; i < n; i++) { 866 struct mlx5e_xmit_data_frags xdptxdf = {}; 867 struct xdp_frame *xdpf = frames[i]; 868 dma_addr_t dma_arr[MAX_SKB_FRAGS]; 869 struct mlx5e_xmit_data *xdptxd; 870 bool ret; 871 872 xdptxd = &xdptxdf.xd; 873 xdptxd->data = xdpf->data; 874 xdptxd->len = xdpf->len; 875 xdptxd->has_frags = xdp_frame_has_frags(xdpf); 876 xdptxd->dma_addr = dma_map_single(sq->pdev, xdptxd->data, 877 xdptxd->len, DMA_TO_DEVICE); 878 879 if (unlikely(dma_mapping_error(sq->pdev, xdptxd->dma_addr))) 880 break; 881 882 if (xdptxd->has_frags) { 883 int j; 884 885 xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf); 886 xdptxdf.dma_arr = dma_arr; 887 for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) { 888 skb_frag_t *frag = &xdptxdf.sinfo->frags[j]; 889 890 dma_arr[j] = dma_map_single(sq->pdev, skb_frag_address(frag), 891 skb_frag_size(frag), DMA_TO_DEVICE); 892 893 if (!dma_mapping_error(sq->pdev, dma_arr[j])) 894 continue; 895 /* mapping error */ 896 while (--j >= 0) 897 dma_unmap_single(sq->pdev, dma_arr[j], 898 skb_frag_size(&xdptxdf.sinfo->frags[j]), 899 DMA_TO_DEVICE); 900 goto out; 901 } 902 } 903 904 ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, 905 mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL); 906 if (unlikely(!ret)) { 907 int j; 908 909 dma_unmap_single(sq->pdev, xdptxd->dma_addr, 910 xdptxd->len, DMA_TO_DEVICE); 911 if (!xdptxd->has_frags) 912 break; 913 for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) 914 dma_unmap_single(sq->pdev, dma_arr[j], 915 skb_frag_size(&xdptxdf.sinfo->frags[j]), 916 DMA_TO_DEVICE); 917 break; 918 } 919 920 /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */ 921 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 922 (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME }); 923 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 924 (union mlx5e_xdp_info) { .frame.xdpf = xdpf }); 925 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 926 (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd->dma_addr }); 927 if (xdptxd->has_frags) { 928 int j; 929 930 for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) 931 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 932 (union mlx5e_xdp_info) 933 { .frame.dma_addr = dma_arr[j] }); 934 } 935 nxmit++; 936 } 937 938 out: 939 if (sq->mpwqe.wqe) 940 mlx5e_xdp_mpwqe_complete(sq); 941 942 if (flags & XDP_XMIT_FLUSH) 943 mlx5e_xmit_xdp_doorbell(sq); 944 945 return nxmit; 946 } 947 948 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) 949 { 950 struct mlx5e_xdpsq *xdpsq = rq->xdpsq; 951 952 if (xdpsq->mpwqe.wqe) 953 mlx5e_xdp_mpwqe_complete(xdpsq); 954 955 mlx5e_xmit_xdp_doorbell(xdpsq); 956 957 if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) { 958 xdp_do_flush(); 959 __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); 960 } 961 } 962 963 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw) 964 { 965 sq->xmit_xdp_frame_check = is_mpw ? 966 mlx5e_xmit_xdp_frame_check_mpwqe : mlx5e_xmit_xdp_frame_check; 967 sq->xmit_xdp_frame = is_mpw ? 968 mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame; 969 } 970