1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Interface for implementing AF_XDP zero-copy support in drivers. 3 * Copyright(c) 2020 Intel Corporation. 4 */ 5 6 #ifndef _LINUX_XDP_SOCK_DRV_H 7 #define _LINUX_XDP_SOCK_DRV_H 8 9 #include <net/xdp_sock.h> 10 #include <net/xsk_buff_pool.h> 11 12 #define XDP_UMEM_MIN_CHUNK_SHIFT 11 13 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT) 14 15 #define NETDEV_XDP_ACT_XSK (NETDEV_XDP_ACT_BASIC | \ 16 NETDEV_XDP_ACT_REDIRECT | \ 17 NETDEV_XDP_ACT_XSK_ZEROCOPY) 18 19 struct xsk_cb_desc { 20 void *src; 21 u8 off; 22 u8 bytes; 23 }; 24 25 #ifdef CONFIG_XDP_SOCKETS 26 27 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); 28 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); 29 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max); 30 void xsk_tx_release(struct xsk_buff_pool *pool); 31 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, 32 u16 queue_id); 33 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool); 34 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool); 35 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool); 36 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool); 37 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool); 38 39 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) 40 { 41 return XDP_PACKET_HEADROOM + pool->headroom; 42 } 43 44 static inline u32 xsk_pool_get_tailroom(bool mbuf) 45 { 46 return mbuf ? SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 0; 47 } 48 49 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool) 50 { 51 return pool->chunk_size; 52 } 53 54 static inline u32 __xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) 55 { 56 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool); 57 } 58 59 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) 60 { 61 u32 frame_size = __xsk_pool_get_rx_frame_size(pool); 62 struct xdp_umem *umem = pool->umem; 63 bool mbuf; 64 65 /* Reserve tailroom only for zero-copy pools that opted into 66 * multi-buffer. The reserved area is used for skb_shared_info, 67 * matching the XDP core's xdp_data_hard_end() layout. 68 */ 69 mbuf = pool->dev && (umem->flags & XDP_UMEM_SG_FLAG); 70 frame_size -= xsk_pool_get_tailroom(mbuf); 71 72 return ALIGN_DOWN(frame_size, 128); 73 } 74 75 static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool) 76 { 77 return pool->unaligned ? 0 : xsk_pool_get_chunk_size(pool); 78 } 79 80 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, 81 struct xdp_rxq_info *rxq) 82 { 83 xp_set_rxq_info(pool, rxq); 84 } 85 86 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool, 87 struct xsk_cb_desc *desc) 88 { 89 xp_fill_cb(pool, desc); 90 } 91 92 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool, 93 unsigned long attrs) 94 { 95 xp_dma_unmap(pool, attrs); 96 } 97 98 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool, 99 struct device *dev, unsigned long attrs) 100 { 101 struct xdp_umem *umem = pool->umem; 102 103 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs); 104 } 105 106 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) 107 { 108 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 109 110 return xp_get_dma(xskb); 111 } 112 113 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) 114 { 115 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 116 117 return xp_get_frame_dma(xskb); 118 } 119 120 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) 121 { 122 return xp_alloc(pool); 123 } 124 125 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc) 126 { 127 return !xp_mb_desc(desc); 128 } 129 130 /* Returns as many entries as possible up to max. 0 <= N <= max. */ 131 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) 132 { 133 return xp_alloc_batch(pool, xdp, max); 134 } 135 136 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) 137 { 138 return xp_can_alloc(pool, count); 139 } 140 141 static inline void xsk_buff_free(struct xdp_buff *xdp) 142 { 143 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 144 struct list_head *xskb_list = &xskb->pool->xskb_list; 145 struct xdp_buff_xsk *pos, *tmp; 146 147 if (likely(!xdp_buff_has_frags(xdp))) 148 goto out; 149 150 list_for_each_entry_safe(pos, tmp, xskb_list, list_node) { 151 list_del_init(&pos->list_node); 152 xp_free(pos); 153 } 154 155 xdp_get_shared_info_from_buff(xdp)->nr_frags = 0; 156 out: 157 xp_free(xskb); 158 } 159 160 static inline bool xsk_buff_add_frag(struct xdp_buff *head, 161 struct xdp_buff *xdp) 162 { 163 const void *data = xdp->data; 164 struct xdp_buff_xsk *frag; 165 166 if (!__xdp_buff_add_frag(head, virt_to_netmem(data), 167 offset_in_page(data), xdp->data_end - data, 168 xdp->frame_sz, false)) 169 return false; 170 171 frag = container_of(xdp, struct xdp_buff_xsk, xdp); 172 list_add_tail(&frag->list_node, &frag->pool->xskb_list); 173 174 return true; 175 } 176 177 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first) 178 { 179 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); 180 struct xdp_buff *ret = NULL; 181 struct xdp_buff_xsk *frag; 182 183 frag = list_first_entry_or_null(&xskb->pool->xskb_list, 184 struct xdp_buff_xsk, list_node); 185 if (frag) { 186 list_del_init(&frag->list_node); 187 ret = &frag->xdp; 188 } 189 190 return ret; 191 } 192 193 static inline void xsk_buff_del_frag(struct xdp_buff *xdp) 194 { 195 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 196 197 list_del_init(&xskb->list_node); 198 } 199 200 static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first) 201 { 202 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); 203 struct xdp_buff_xsk *frag; 204 205 frag = list_first_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk, 206 list_node); 207 return &frag->xdp; 208 } 209 210 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) 211 { 212 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); 213 struct xdp_buff_xsk *frag; 214 215 frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk, 216 list_node); 217 return &frag->xdp; 218 } 219 220 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) 221 { 222 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; 223 xdp->data_meta = xdp->data; 224 xdp->data_end = xdp->data + size; 225 xdp->flags = 0; 226 } 227 228 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, 229 u64 addr) 230 { 231 return xp_raw_get_dma(pool, addr); 232 } 233 234 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) 235 { 236 return xp_raw_get_data(pool, addr); 237 } 238 239 /** 240 * xsk_buff_raw_get_ctx - get &xdp_desc context 241 * @pool: XSk buff pool desc address belongs to 242 * @addr: desc address (from userspace) 243 * 244 * Wrapper for xp_raw_get_ctx() to be used in drivers, see its kdoc for 245 * details. 246 * 247 * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata 248 * pointer, if it is present and valid (initialized to %NULL otherwise). 249 */ 250 static inline struct xdp_desc_ctx 251 xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr) 252 { 253 return xp_raw_get_ctx(pool, addr); 254 } 255 256 #define XDP_TXMD_FLAGS_VALID ( \ 257 XDP_TXMD_FLAGS_TIMESTAMP | \ 258 XDP_TXMD_FLAGS_CHECKSUM | \ 259 XDP_TXMD_FLAGS_LAUNCH_TIME | \ 260 0) 261 262 static inline bool 263 xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta) 264 { 265 return !(meta->flags & ~XDP_TXMD_FLAGS_VALID); 266 } 267 268 static inline struct xsk_tx_metadata * 269 __xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data) 270 { 271 struct xsk_tx_metadata *meta; 272 273 if (!pool->tx_metadata_len) 274 return NULL; 275 276 meta = data - pool->tx_metadata_len; 277 if (unlikely(!xsk_buff_valid_tx_metadata(meta))) 278 return NULL; /* no way to signal the error to the user */ 279 280 return meta; 281 } 282 283 static inline struct xsk_tx_metadata * 284 xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) 285 { 286 return __xsk_buff_get_metadata(pool, xp_raw_get_data(pool, addr)); 287 } 288 289 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) 290 { 291 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 292 293 xp_dma_sync_for_cpu(xskb); 294 } 295 296 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool, 297 dma_addr_t dma, 298 size_t size) 299 { 300 xp_dma_sync_for_device(pool, dma, size); 301 } 302 303 #else 304 305 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) 306 { 307 } 308 309 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, 310 struct xdp_desc *desc) 311 { 312 return false; 313 } 314 315 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max) 316 { 317 return 0; 318 } 319 320 static inline void xsk_tx_release(struct xsk_buff_pool *pool) 321 { 322 } 323 324 static inline struct xsk_buff_pool * 325 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id) 326 { 327 return NULL; 328 } 329 330 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) 331 { 332 } 333 334 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) 335 { 336 } 337 338 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) 339 { 340 } 341 342 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) 343 { 344 } 345 346 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) 347 { 348 return false; 349 } 350 351 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) 352 { 353 return 0; 354 } 355 356 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool) 357 { 358 return 0; 359 } 360 361 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) 362 { 363 return 0; 364 } 365 366 static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool) 367 { 368 return 0; 369 } 370 371 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, 372 struct xdp_rxq_info *rxq) 373 { 374 } 375 376 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool, 377 struct xsk_cb_desc *desc) 378 { 379 } 380 381 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool, 382 unsigned long attrs) 383 { 384 } 385 386 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool, 387 struct device *dev, unsigned long attrs) 388 { 389 return 0; 390 } 391 392 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) 393 { 394 return 0; 395 } 396 397 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) 398 { 399 return 0; 400 } 401 402 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) 403 { 404 return NULL; 405 } 406 407 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc) 408 { 409 return false; 410 } 411 412 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) 413 { 414 return 0; 415 } 416 417 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) 418 { 419 return false; 420 } 421 422 static inline void xsk_buff_free(struct xdp_buff *xdp) 423 { 424 } 425 426 static inline bool xsk_buff_add_frag(struct xdp_buff *head, 427 struct xdp_buff *xdp) 428 { 429 return false; 430 } 431 432 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first) 433 { 434 return NULL; 435 } 436 437 static inline void xsk_buff_del_frag(struct xdp_buff *xdp) 438 { 439 } 440 441 static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first) 442 { 443 return NULL; 444 } 445 446 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) 447 { 448 return NULL; 449 } 450 451 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) 452 { 453 } 454 455 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, 456 u64 addr) 457 { 458 return 0; 459 } 460 461 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) 462 { 463 return NULL; 464 } 465 466 static inline struct xdp_desc_ctx 467 xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr) 468 { 469 return (struct xdp_desc_ctx){ }; 470 } 471 472 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta) 473 { 474 return false; 475 } 476 477 static inline struct xsk_tx_metadata * 478 __xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data) 479 { 480 return NULL; 481 } 482 483 static inline struct xsk_tx_metadata * 484 xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) 485 { 486 return NULL; 487 } 488 489 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) 490 { 491 } 492 493 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool, 494 dma_addr_t dma, 495 size_t size) 496 { 497 } 498 499 #endif /* CONFIG_XDP_SOCKETS */ 500 501 #endif /* _LINUX_XDP_SOCK_DRV_H */ 502