1f3660063SAndrii Nakryiko /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2f3660063SAndrii Nakryiko 3f3660063SAndrii Nakryiko /* 4f3660063SAndrii Nakryiko * AF_XDP user-space access library. 5f3660063SAndrii Nakryiko * 6f3660063SAndrii Nakryiko * Copyright (c) 2018 - 2019 Intel Corporation. 7f3660063SAndrii Nakryiko * Copyright (c) 2019 Facebook 8f3660063SAndrii Nakryiko * 9f3660063SAndrii Nakryiko * Author(s): Magnus Karlsson <magnus.karlsson@intel.com> 10f3660063SAndrii Nakryiko */ 11f3660063SAndrii Nakryiko 12f3660063SAndrii Nakryiko #ifndef __XSK_H 13f3660063SAndrii Nakryiko #define __XSK_H 14f3660063SAndrii Nakryiko 15f3660063SAndrii Nakryiko #include <stdio.h> 16f3660063SAndrii Nakryiko #include <stdint.h> 17f3660063SAndrii Nakryiko #include <stdbool.h> 18f3660063SAndrii Nakryiko #include <linux/if_xdp.h> 19f3660063SAndrii Nakryiko 20f3660063SAndrii Nakryiko #include <bpf/libbpf.h> 21f3660063SAndrii Nakryiko 22f3660063SAndrii Nakryiko #ifdef __cplusplus 23f3660063SAndrii Nakryiko extern "C" { 24f3660063SAndrii Nakryiko #endif 25f3660063SAndrii Nakryiko 26f3660063SAndrii Nakryiko /* Do not access these members directly. Use the functions below. */ 27f3660063SAndrii Nakryiko #define DEFINE_XSK_RING(name) \ 28f3660063SAndrii Nakryiko struct name { \ 29f3660063SAndrii Nakryiko __u32 cached_prod; \ 30f3660063SAndrii Nakryiko __u32 cached_cons; \ 31f3660063SAndrii Nakryiko __u32 mask; \ 32f3660063SAndrii Nakryiko __u32 size; \ 33f3660063SAndrii Nakryiko __u32 *producer; \ 34f3660063SAndrii Nakryiko __u32 *consumer; \ 35f3660063SAndrii Nakryiko void *ring; \ 36f3660063SAndrii Nakryiko __u32 *flags; \ 37f3660063SAndrii Nakryiko } 38f3660063SAndrii Nakryiko 39f3660063SAndrii Nakryiko DEFINE_XSK_RING(xsk_ring_prod); 40f3660063SAndrii Nakryiko DEFINE_XSK_RING(xsk_ring_cons); 41f3660063SAndrii Nakryiko 42f3660063SAndrii Nakryiko /* For a detailed explanation on the memory barriers associated with the 43f3660063SAndrii Nakryiko * ring, please take a look at net/xdp/xsk_queue.h. 44f3660063SAndrii Nakryiko */ 45f3660063SAndrii Nakryiko 46f3660063SAndrii Nakryiko struct xsk_umem; 47f3660063SAndrii Nakryiko struct xsk_socket; 48f3660063SAndrii Nakryiko 49f3660063SAndrii Nakryiko static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, 50f3660063SAndrii Nakryiko __u32 idx) 51f3660063SAndrii Nakryiko { 52f3660063SAndrii Nakryiko __u64 *addrs = (__u64 *)fill->ring; 53f3660063SAndrii Nakryiko 54f3660063SAndrii Nakryiko return &addrs[idx & fill->mask]; 55f3660063SAndrii Nakryiko } 56f3660063SAndrii Nakryiko 57f3660063SAndrii Nakryiko static inline const __u64 * 58f3660063SAndrii Nakryiko xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx) 59f3660063SAndrii Nakryiko { 60f3660063SAndrii Nakryiko const __u64 *addrs = (const __u64 *)comp->ring; 61f3660063SAndrii Nakryiko 62f3660063SAndrii Nakryiko return &addrs[idx & comp->mask]; 63f3660063SAndrii Nakryiko } 64f3660063SAndrii Nakryiko 65f3660063SAndrii Nakryiko static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, 66f3660063SAndrii Nakryiko __u32 idx) 67f3660063SAndrii Nakryiko { 68f3660063SAndrii Nakryiko struct xdp_desc *descs = (struct xdp_desc *)tx->ring; 69f3660063SAndrii Nakryiko 70f3660063SAndrii Nakryiko return &descs[idx & tx->mask]; 71f3660063SAndrii Nakryiko } 72f3660063SAndrii Nakryiko 73f3660063SAndrii Nakryiko static inline const struct xdp_desc * 74f3660063SAndrii Nakryiko xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx) 75f3660063SAndrii Nakryiko { 76f3660063SAndrii Nakryiko const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; 77f3660063SAndrii Nakryiko 78f3660063SAndrii Nakryiko return &descs[idx & rx->mask]; 79f3660063SAndrii Nakryiko } 80f3660063SAndrii Nakryiko 81f3660063SAndrii Nakryiko static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r) 82f3660063SAndrii Nakryiko { 83f3660063SAndrii Nakryiko return *r->flags & XDP_RING_NEED_WAKEUP; 84f3660063SAndrii Nakryiko } 85f3660063SAndrii Nakryiko 86f3660063SAndrii Nakryiko static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) 87f3660063SAndrii Nakryiko { 88f3660063SAndrii Nakryiko __u32 free_entries = r->cached_cons - r->cached_prod; 89f3660063SAndrii Nakryiko 90f3660063SAndrii Nakryiko if (free_entries >= nb) 91f3660063SAndrii Nakryiko return free_entries; 92f3660063SAndrii Nakryiko 93f3660063SAndrii Nakryiko /* Refresh the local tail pointer. 94f3660063SAndrii Nakryiko * cached_cons is r->size bigger than the real consumer pointer so 95f3660063SAndrii Nakryiko * that this addition can be avoided in the more frequently 96f3660063SAndrii Nakryiko * executed code that computs free_entries in the beginning of 97f3660063SAndrii Nakryiko * this function. Without this optimization it whould have been 98f3660063SAndrii Nakryiko * free_entries = r->cached_prod - r->cached_cons + r->size. 99f3660063SAndrii Nakryiko */ 100efe620e5SMagnus Karlsson r->cached_cons = __atomic_load_n(r->consumer, __ATOMIC_ACQUIRE); 101f3660063SAndrii Nakryiko r->cached_cons += r->size; 102f3660063SAndrii Nakryiko 103f3660063SAndrii Nakryiko return r->cached_cons - r->cached_prod; 104f3660063SAndrii Nakryiko } 105f3660063SAndrii Nakryiko 106f3660063SAndrii Nakryiko static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb) 107f3660063SAndrii Nakryiko { 108f3660063SAndrii Nakryiko __u32 entries = r->cached_prod - r->cached_cons; 109f3660063SAndrii Nakryiko 110f3660063SAndrii Nakryiko if (entries == 0) { 111efe620e5SMagnus Karlsson r->cached_prod = __atomic_load_n(r->producer, __ATOMIC_ACQUIRE); 112f3660063SAndrii Nakryiko entries = r->cached_prod - r->cached_cons; 113f3660063SAndrii Nakryiko } 114f3660063SAndrii Nakryiko 115f3660063SAndrii Nakryiko return (entries > nb) ? nb : entries; 116f3660063SAndrii Nakryiko } 117f3660063SAndrii Nakryiko 118f3660063SAndrii Nakryiko static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx) 119f3660063SAndrii Nakryiko { 120f3660063SAndrii Nakryiko if (xsk_prod_nb_free(prod, nb) < nb) 121f3660063SAndrii Nakryiko return 0; 122f3660063SAndrii Nakryiko 123f3660063SAndrii Nakryiko *idx = prod->cached_prod; 124f3660063SAndrii Nakryiko prod->cached_prod += nb; 125f3660063SAndrii Nakryiko 126f3660063SAndrii Nakryiko return nb; 127f3660063SAndrii Nakryiko } 128f3660063SAndrii Nakryiko 129f3660063SAndrii Nakryiko static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb) 130f3660063SAndrii Nakryiko { 131f3660063SAndrii Nakryiko /* Make sure everything has been written to the ring before indicating 132f3660063SAndrii Nakryiko * this to the kernel by writing the producer pointer. 133f3660063SAndrii Nakryiko */ 134efe620e5SMagnus Karlsson __atomic_store_n(prod->producer, *prod->producer + nb, __ATOMIC_RELEASE); 135f3660063SAndrii Nakryiko } 136f3660063SAndrii Nakryiko 137*86e41755SMagnus Karlsson static inline void xsk_ring_prod__cancel(struct xsk_ring_prod *prod, __u32 nb) 138*86e41755SMagnus Karlsson { 139*86e41755SMagnus Karlsson prod->cached_prod -= nb; 140*86e41755SMagnus Karlsson } 141*86e41755SMagnus Karlsson 142f3660063SAndrii Nakryiko static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx) 143f3660063SAndrii Nakryiko { 144f3660063SAndrii Nakryiko __u32 entries = xsk_cons_nb_avail(cons, nb); 145f3660063SAndrii Nakryiko 146f3660063SAndrii Nakryiko if (entries > 0) { 147f3660063SAndrii Nakryiko *idx = cons->cached_cons; 148f3660063SAndrii Nakryiko cons->cached_cons += entries; 149f3660063SAndrii Nakryiko } 150f3660063SAndrii Nakryiko 151f3660063SAndrii Nakryiko return entries; 152f3660063SAndrii Nakryiko } 153f3660063SAndrii Nakryiko 154f3660063SAndrii Nakryiko static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb) 155f3660063SAndrii Nakryiko { 156f3660063SAndrii Nakryiko cons->cached_cons -= nb; 157f3660063SAndrii Nakryiko } 158f3660063SAndrii Nakryiko 159f3660063SAndrii Nakryiko static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb) 160f3660063SAndrii Nakryiko { 161f3660063SAndrii Nakryiko /* Make sure data has been read before indicating we are done 162f3660063SAndrii Nakryiko * with the entries by updating the consumer pointer. 163f3660063SAndrii Nakryiko */ 164efe620e5SMagnus Karlsson __atomic_store_n(cons->consumer, *cons->consumer + nb, __ATOMIC_RELEASE); 165f3660063SAndrii Nakryiko } 166f3660063SAndrii Nakryiko 167f3660063SAndrii Nakryiko static inline void *xsk_umem__get_data(void *umem_area, __u64 addr) 168f3660063SAndrii Nakryiko { 169f3660063SAndrii Nakryiko return &((char *)umem_area)[addr]; 170f3660063SAndrii Nakryiko } 171f3660063SAndrii Nakryiko 172f3660063SAndrii Nakryiko static inline __u64 xsk_umem__extract_addr(__u64 addr) 173f3660063SAndrii Nakryiko { 174f3660063SAndrii Nakryiko return addr & XSK_UNALIGNED_BUF_ADDR_MASK; 175f3660063SAndrii Nakryiko } 176f3660063SAndrii Nakryiko 177f3660063SAndrii Nakryiko static inline __u64 xsk_umem__extract_offset(__u64 addr) 178f3660063SAndrii Nakryiko { 179f3660063SAndrii Nakryiko return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; 180f3660063SAndrii Nakryiko } 181f3660063SAndrii Nakryiko 182f3660063SAndrii Nakryiko static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr) 183f3660063SAndrii Nakryiko { 184f3660063SAndrii Nakryiko return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr); 185f3660063SAndrii Nakryiko } 186f3660063SAndrii Nakryiko 187f3660063SAndrii Nakryiko int xsk_umem__fd(const struct xsk_umem *umem); 188f3660063SAndrii Nakryiko int xsk_socket__fd(const struct xsk_socket *xsk); 189f3660063SAndrii Nakryiko 190f3660063SAndrii Nakryiko #define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048 191f3660063SAndrii Nakryiko #define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048 192f3660063SAndrii Nakryiko #define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */ 193f3660063SAndrii Nakryiko #define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT) 194f3660063SAndrii Nakryiko #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0 195f3660063SAndrii Nakryiko #define XSK_UMEM__DEFAULT_FLAGS 0 196f3660063SAndrii Nakryiko 197f3660063SAndrii Nakryiko struct xsk_umem_config { 198f3660063SAndrii Nakryiko __u32 fill_size; 199f3660063SAndrii Nakryiko __u32 comp_size; 200f3660063SAndrii Nakryiko __u32 frame_size; 201f3660063SAndrii Nakryiko __u32 frame_headroom; 202f3660063SAndrii Nakryiko __u32 flags; 203f3660063SAndrii Nakryiko }; 204f3660063SAndrii Nakryiko 205f0a249dfSMagnus Karlsson int xsk_attach_xdp_program(struct bpf_program *prog, int ifindex, u32 xdp_flags); 206f0a249dfSMagnus Karlsson void xsk_detach_xdp_program(int ifindex, u32 xdp_flags); 207f0a249dfSMagnus Karlsson int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk); 208f0a249dfSMagnus Karlsson void xsk_clear_xskmap(struct bpf_map *map); 2097d8319a7SMagnus Karlsson bool xsk_is_in_mode(u32 ifindex, int mode); 210aa61d81fSMagnus Karlsson 211f3660063SAndrii Nakryiko struct xsk_socket_config { 212f3660063SAndrii Nakryiko __u32 rx_size; 213f3660063SAndrii Nakryiko __u32 tx_size; 214f3660063SAndrii Nakryiko __u16 bind_flags; 215f3660063SAndrii Nakryiko }; 216f3660063SAndrii Nakryiko 217f3660063SAndrii Nakryiko /* Set config to NULL to get the default configuration. */ 218f3660063SAndrii Nakryiko int xsk_umem__create(struct xsk_umem **umem, 219f3660063SAndrii Nakryiko void *umem_area, __u64 size, 220f3660063SAndrii Nakryiko struct xsk_ring_prod *fill, 221f3660063SAndrii Nakryiko struct xsk_ring_cons *comp, 222f3660063SAndrii Nakryiko const struct xsk_umem_config *config); 223f3660063SAndrii Nakryiko int xsk_socket__create(struct xsk_socket **xsk, 224aa61d81fSMagnus Karlsson int ifindex, __u32 queue_id, 225f3660063SAndrii Nakryiko struct xsk_umem *umem, 226f3660063SAndrii Nakryiko struct xsk_ring_cons *rx, 227f3660063SAndrii Nakryiko struct xsk_ring_prod *tx, 228f3660063SAndrii Nakryiko const struct xsk_socket_config *config); 229f3660063SAndrii Nakryiko int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, 230aa61d81fSMagnus Karlsson int ifindex, 231f3660063SAndrii Nakryiko __u32 queue_id, struct xsk_umem *umem, 232f3660063SAndrii Nakryiko struct xsk_ring_cons *rx, 233f3660063SAndrii Nakryiko struct xsk_ring_prod *tx, 234f3660063SAndrii Nakryiko struct xsk_ring_prod *fill, 235f3660063SAndrii Nakryiko struct xsk_ring_cons *comp, 236f3660063SAndrii Nakryiko const struct xsk_socket_config *config); 237f3660063SAndrii Nakryiko 238f3660063SAndrii Nakryiko /* Returns 0 for success and -EBUSY if the umem is still in use. */ 239f3660063SAndrii Nakryiko int xsk_umem__delete(struct xsk_umem *umem); 240f3660063SAndrii Nakryiko void xsk_socket__delete(struct xsk_socket *xsk); 241f3660063SAndrii Nakryiko 242f3660063SAndrii Nakryiko #ifdef __cplusplus 243f3660063SAndrii Nakryiko } /* extern "C" */ 244f3660063SAndrii Nakryiko #endif 245f3660063SAndrii Nakryiko 246f3660063SAndrii Nakryiko #endif /* __XSK_H */ 247