1f3660063SAndrii Nakryiko /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2f3660063SAndrii Nakryiko 3f3660063SAndrii Nakryiko /* 4f3660063SAndrii Nakryiko * AF_XDP user-space access library. 5f3660063SAndrii Nakryiko * 6f3660063SAndrii Nakryiko * Copyright (c) 2018 - 2019 Intel Corporation. 7f3660063SAndrii Nakryiko * Copyright (c) 2019 Facebook 8f3660063SAndrii Nakryiko * 9f3660063SAndrii Nakryiko * Author(s): Magnus Karlsson <magnus.karlsson@intel.com> 10f3660063SAndrii Nakryiko */ 11f3660063SAndrii Nakryiko 12f3660063SAndrii Nakryiko #ifndef __XSK_H 13f3660063SAndrii Nakryiko #define __XSK_H 14f3660063SAndrii Nakryiko 15f3660063SAndrii Nakryiko #include <stdio.h> 16f3660063SAndrii Nakryiko #include <stdint.h> 17f3660063SAndrii Nakryiko #include <stdbool.h> 18f3660063SAndrii Nakryiko #include <linux/if_xdp.h> 19f3660063SAndrii Nakryiko 20f3660063SAndrii Nakryiko #include <bpf/libbpf.h> 21f3660063SAndrii Nakryiko 22f3660063SAndrii Nakryiko #ifdef __cplusplus 23f3660063SAndrii Nakryiko extern "C" { 24f3660063SAndrii Nakryiko #endif 25f3660063SAndrii Nakryiko 26f3660063SAndrii Nakryiko /* Do not access these members directly. Use the functions below. */ 27f3660063SAndrii Nakryiko #define DEFINE_XSK_RING(name) \ 28f3660063SAndrii Nakryiko struct name { \ 29f3660063SAndrii Nakryiko __u32 cached_prod; \ 30f3660063SAndrii Nakryiko __u32 cached_cons; \ 31f3660063SAndrii Nakryiko __u32 mask; \ 32f3660063SAndrii Nakryiko __u32 size; \ 33f3660063SAndrii Nakryiko __u32 *producer; \ 34f3660063SAndrii Nakryiko __u32 *consumer; \ 35f3660063SAndrii Nakryiko void *ring; \ 36f3660063SAndrii Nakryiko __u32 *flags; \ 37f3660063SAndrii Nakryiko } 38f3660063SAndrii Nakryiko 39f3660063SAndrii Nakryiko DEFINE_XSK_RING(xsk_ring_prod); 40f3660063SAndrii Nakryiko DEFINE_XSK_RING(xsk_ring_cons); 41f3660063SAndrii Nakryiko 42f3660063SAndrii Nakryiko /* For a detailed explanation on the memory barriers associated with the 43f3660063SAndrii Nakryiko * ring, please take a look at net/xdp/xsk_queue.h. 44f3660063SAndrii Nakryiko */ 45f3660063SAndrii Nakryiko 46f3660063SAndrii Nakryiko struct xsk_umem; 47f3660063SAndrii Nakryiko struct xsk_socket; 48f3660063SAndrii Nakryiko 49f3660063SAndrii Nakryiko static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, 50f3660063SAndrii Nakryiko __u32 idx) 51f3660063SAndrii Nakryiko { 52f3660063SAndrii Nakryiko __u64 *addrs = (__u64 *)fill->ring; 53f3660063SAndrii Nakryiko 54f3660063SAndrii Nakryiko return &addrs[idx & fill->mask]; 55f3660063SAndrii Nakryiko } 56f3660063SAndrii Nakryiko 57f3660063SAndrii Nakryiko static inline const __u64 * 58f3660063SAndrii Nakryiko xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx) 59f3660063SAndrii Nakryiko { 60f3660063SAndrii Nakryiko const __u64 *addrs = (const __u64 *)comp->ring; 61f3660063SAndrii Nakryiko 62f3660063SAndrii Nakryiko return &addrs[idx & comp->mask]; 63f3660063SAndrii Nakryiko } 64f3660063SAndrii Nakryiko 65f3660063SAndrii Nakryiko static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, 66f3660063SAndrii Nakryiko __u32 idx) 67f3660063SAndrii Nakryiko { 68f3660063SAndrii Nakryiko struct xdp_desc *descs = (struct xdp_desc *)tx->ring; 69f3660063SAndrii Nakryiko 70f3660063SAndrii Nakryiko return &descs[idx & tx->mask]; 71f3660063SAndrii Nakryiko } 72f3660063SAndrii Nakryiko 73f3660063SAndrii Nakryiko static inline const struct xdp_desc * 74f3660063SAndrii Nakryiko xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx) 75f3660063SAndrii Nakryiko { 76f3660063SAndrii Nakryiko const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; 77f3660063SAndrii Nakryiko 78f3660063SAndrii Nakryiko return &descs[idx & rx->mask]; 79f3660063SAndrii Nakryiko } 80f3660063SAndrii Nakryiko 81f3660063SAndrii Nakryiko static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r) 82f3660063SAndrii Nakryiko { 83f3660063SAndrii Nakryiko return *r->flags & XDP_RING_NEED_WAKEUP; 84f3660063SAndrii Nakryiko } 85f3660063SAndrii Nakryiko 86f3660063SAndrii Nakryiko static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) 87f3660063SAndrii Nakryiko { 88f3660063SAndrii Nakryiko __u32 free_entries = r->cached_cons - r->cached_prod; 89f3660063SAndrii Nakryiko 90f3660063SAndrii Nakryiko if (free_entries >= nb) 91f3660063SAndrii Nakryiko return free_entries; 92f3660063SAndrii Nakryiko 93f3660063SAndrii Nakryiko /* Refresh the local tail pointer. 94f3660063SAndrii Nakryiko * cached_cons is r->size bigger than the real consumer pointer so 95f3660063SAndrii Nakryiko * that this addition can be avoided in the more frequently 96f3660063SAndrii Nakryiko * executed code that computs free_entries in the beginning of 97f3660063SAndrii Nakryiko * this function. Without this optimization it whould have been 98f3660063SAndrii Nakryiko * free_entries = r->cached_prod - r->cached_cons + r->size. 99f3660063SAndrii Nakryiko */ 100efe620e5SMagnus Karlsson r->cached_cons = __atomic_load_n(r->consumer, __ATOMIC_ACQUIRE); 101f3660063SAndrii Nakryiko r->cached_cons += r->size; 102f3660063SAndrii Nakryiko 103f3660063SAndrii Nakryiko return r->cached_cons - r->cached_prod; 104f3660063SAndrii Nakryiko } 105f3660063SAndrii Nakryiko 106f3660063SAndrii Nakryiko static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb) 107f3660063SAndrii Nakryiko { 108f3660063SAndrii Nakryiko __u32 entries = r->cached_prod - r->cached_cons; 109f3660063SAndrii Nakryiko 110f3660063SAndrii Nakryiko if (entries == 0) { 111efe620e5SMagnus Karlsson r->cached_prod = __atomic_load_n(r->producer, __ATOMIC_ACQUIRE); 112f3660063SAndrii Nakryiko entries = r->cached_prod - r->cached_cons; 113f3660063SAndrii Nakryiko } 114f3660063SAndrii Nakryiko 115f3660063SAndrii Nakryiko return (entries > nb) ? nb : entries; 116f3660063SAndrii Nakryiko } 117f3660063SAndrii Nakryiko 118f3660063SAndrii Nakryiko static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx) 119f3660063SAndrii Nakryiko { 120f3660063SAndrii Nakryiko if (xsk_prod_nb_free(prod, nb) < nb) 121f3660063SAndrii Nakryiko return 0; 122f3660063SAndrii Nakryiko 123f3660063SAndrii Nakryiko *idx = prod->cached_prod; 124f3660063SAndrii Nakryiko prod->cached_prod += nb; 125f3660063SAndrii Nakryiko 126f3660063SAndrii Nakryiko return nb; 127f3660063SAndrii Nakryiko } 128f3660063SAndrii Nakryiko 129f3660063SAndrii Nakryiko static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb) 130f3660063SAndrii Nakryiko { 131f3660063SAndrii Nakryiko /* Make sure everything has been written to the ring before indicating 132f3660063SAndrii Nakryiko * this to the kernel by writing the producer pointer. 133f3660063SAndrii Nakryiko */ 134efe620e5SMagnus Karlsson __atomic_store_n(prod->producer, *prod->producer + nb, __ATOMIC_RELEASE); 135f3660063SAndrii Nakryiko } 136f3660063SAndrii Nakryiko 137f3660063SAndrii Nakryiko static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx) 138f3660063SAndrii Nakryiko { 139f3660063SAndrii Nakryiko __u32 entries = xsk_cons_nb_avail(cons, nb); 140f3660063SAndrii Nakryiko 141f3660063SAndrii Nakryiko if (entries > 0) { 142f3660063SAndrii Nakryiko *idx = cons->cached_cons; 143f3660063SAndrii Nakryiko cons->cached_cons += entries; 144f3660063SAndrii Nakryiko } 145f3660063SAndrii Nakryiko 146f3660063SAndrii Nakryiko return entries; 147f3660063SAndrii Nakryiko } 148f3660063SAndrii Nakryiko 149f3660063SAndrii Nakryiko static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb) 150f3660063SAndrii Nakryiko { 151f3660063SAndrii Nakryiko cons->cached_cons -= nb; 152f3660063SAndrii Nakryiko } 153f3660063SAndrii Nakryiko 154f3660063SAndrii Nakryiko static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb) 155f3660063SAndrii Nakryiko { 156f3660063SAndrii Nakryiko /* Make sure data has been read before indicating we are done 157f3660063SAndrii Nakryiko * with the entries by updating the consumer pointer. 158f3660063SAndrii Nakryiko */ 159efe620e5SMagnus Karlsson __atomic_store_n(cons->consumer, *cons->consumer + nb, __ATOMIC_RELEASE); 160f3660063SAndrii Nakryiko } 161f3660063SAndrii Nakryiko 162f3660063SAndrii Nakryiko static inline void *xsk_umem__get_data(void *umem_area, __u64 addr) 163f3660063SAndrii Nakryiko { 164f3660063SAndrii Nakryiko return &((char *)umem_area)[addr]; 165f3660063SAndrii Nakryiko } 166f3660063SAndrii Nakryiko 167f3660063SAndrii Nakryiko static inline __u64 xsk_umem__extract_addr(__u64 addr) 168f3660063SAndrii Nakryiko { 169f3660063SAndrii Nakryiko return addr & XSK_UNALIGNED_BUF_ADDR_MASK; 170f3660063SAndrii Nakryiko } 171f3660063SAndrii Nakryiko 172f3660063SAndrii Nakryiko static inline __u64 xsk_umem__extract_offset(__u64 addr) 173f3660063SAndrii Nakryiko { 174f3660063SAndrii Nakryiko return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; 175f3660063SAndrii Nakryiko } 176f3660063SAndrii Nakryiko 177f3660063SAndrii Nakryiko static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr) 178f3660063SAndrii Nakryiko { 179f3660063SAndrii Nakryiko return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr); 180f3660063SAndrii Nakryiko } 181f3660063SAndrii Nakryiko 182f3660063SAndrii Nakryiko int xsk_umem__fd(const struct xsk_umem *umem); 183f3660063SAndrii Nakryiko int xsk_socket__fd(const struct xsk_socket *xsk); 184f3660063SAndrii Nakryiko 185f3660063SAndrii Nakryiko #define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048 186f3660063SAndrii Nakryiko #define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048 187f3660063SAndrii Nakryiko #define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */ 188f3660063SAndrii Nakryiko #define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT) 189f3660063SAndrii Nakryiko #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0 190f3660063SAndrii Nakryiko #define XSK_UMEM__DEFAULT_FLAGS 0 191f3660063SAndrii Nakryiko 192f3660063SAndrii Nakryiko struct xsk_umem_config { 193f3660063SAndrii Nakryiko __u32 fill_size; 194f3660063SAndrii Nakryiko __u32 comp_size; 195f3660063SAndrii Nakryiko __u32 frame_size; 196f3660063SAndrii Nakryiko __u32 frame_headroom; 197f3660063SAndrii Nakryiko __u32 flags; 198f3660063SAndrii Nakryiko }; 199f3660063SAndrii Nakryiko 20061333008SMaciej Fijalkowski int xsk_setup_xdp_prog_xsk(struct xsk_socket *xsk, int *xsks_map_fd); 201f3660063SAndrii Nakryiko int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd); 202f3660063SAndrii Nakryiko int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd); 203f3660063SAndrii Nakryiko 204f3660063SAndrii Nakryiko /* Flags for the libbpf_flags field. */ 205f3660063SAndrii Nakryiko #define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0) 206f3660063SAndrii Nakryiko 207*aa61d81fSMagnus Karlsson int xsk_load_xdp_program(int *xsk_map_fd, int *prog_fd); 208*aa61d81fSMagnus Karlsson int xsk_attach_xdp_program(int ifindex, int prog_fd, u32 xdp_flags); 209*aa61d81fSMagnus Karlsson 210f3660063SAndrii Nakryiko struct xsk_socket_config { 211f3660063SAndrii Nakryiko __u32 rx_size; 212f3660063SAndrii Nakryiko __u32 tx_size; 213f3660063SAndrii Nakryiko __u32 libbpf_flags; 214f3660063SAndrii Nakryiko __u32 xdp_flags; 215f3660063SAndrii Nakryiko __u16 bind_flags; 216f3660063SAndrii Nakryiko }; 217f3660063SAndrii Nakryiko 218f3660063SAndrii Nakryiko /* Set config to NULL to get the default configuration. */ 219f3660063SAndrii Nakryiko int xsk_umem__create(struct xsk_umem **umem, 220f3660063SAndrii Nakryiko void *umem_area, __u64 size, 221f3660063SAndrii Nakryiko struct xsk_ring_prod *fill, 222f3660063SAndrii Nakryiko struct xsk_ring_cons *comp, 223f3660063SAndrii Nakryiko const struct xsk_umem_config *config); 224f3660063SAndrii Nakryiko int xsk_socket__create(struct xsk_socket **xsk, 225*aa61d81fSMagnus Karlsson int ifindex, __u32 queue_id, 226f3660063SAndrii Nakryiko struct xsk_umem *umem, 227f3660063SAndrii Nakryiko struct xsk_ring_cons *rx, 228f3660063SAndrii Nakryiko struct xsk_ring_prod *tx, 229f3660063SAndrii Nakryiko const struct xsk_socket_config *config); 230f3660063SAndrii Nakryiko int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, 231*aa61d81fSMagnus Karlsson int ifindex, 232f3660063SAndrii Nakryiko __u32 queue_id, struct xsk_umem *umem, 233f3660063SAndrii Nakryiko struct xsk_ring_cons *rx, 234f3660063SAndrii Nakryiko struct xsk_ring_prod *tx, 235f3660063SAndrii Nakryiko struct xsk_ring_prod *fill, 236f3660063SAndrii Nakryiko struct xsk_ring_cons *comp, 237f3660063SAndrii Nakryiko const struct xsk_socket_config *config); 238f3660063SAndrii Nakryiko 239f3660063SAndrii Nakryiko /* Returns 0 for success and -EBUSY if the umem is still in use. */ 240f3660063SAndrii Nakryiko int xsk_umem__delete(struct xsk_umem *umem); 241f3660063SAndrii Nakryiko void xsk_socket__delete(struct xsk_socket *xsk); 242f3660063SAndrii Nakryiko 243f3660063SAndrii Nakryiko #ifdef __cplusplus 244f3660063SAndrii Nakryiko } /* extern "C" */ 245f3660063SAndrii Nakryiko #endif 246f3660063SAndrii Nakryiko 247f3660063SAndrii Nakryiko #endif /* __XSK_H */ 248