1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* AF_XDP internal functions 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6 #ifndef _LINUX_XDP_SOCK_H 7 #define _LINUX_XDP_SOCK_H 8 9 #include <linux/workqueue.h> 10 #include <linux/if_xdp.h> 11 #include <linux/mutex.h> 12 #include <linux/mm.h> 13 #include <net/sock.h> 14 15 struct net_device; 16 struct xsk_queue; 17 18 struct xdp_umem_props { 19 u64 chunk_mask; 20 u64 size; 21 }; 22 23 struct xdp_umem { 24 struct xsk_queue *fq; 25 struct xsk_queue *cq; 26 struct page **pgs; 27 struct xdp_umem_props props; 28 u32 headroom; 29 u32 chunk_size_nohr; 30 struct user_struct *user; 31 struct pid *pid; 32 unsigned long address; 33 refcount_t users; 34 struct work_struct work; 35 u32 npgs; 36 }; 37 38 struct xdp_sock { 39 /* struct sock must be the first member of struct xdp_sock */ 40 struct sock sk; 41 struct xsk_queue *rx; 42 struct net_device *dev; 43 struct xdp_umem *umem; 44 struct list_head flush_node; 45 u16 queue_id; 46 struct xsk_queue *tx ____cacheline_aligned_in_smp; 47 /* Protects multiple processes in the control path */ 48 struct mutex mutex; 49 u64 rx_dropped; 50 }; 51 52 struct xdp_buff; 53 #ifdef CONFIG_XDP_SOCKETS 54 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 55 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 56 void xsk_flush(struct xdp_sock *xs); 57 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); 58 #else 59 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 60 { 61 return -ENOTSUPP; 62 } 63 64 static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 65 { 66 return -ENOTSUPP; 67 } 68 69 static inline void xsk_flush(struct xdp_sock *xs) 70 { 71 } 72 73 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) 74 { 75 return false; 76 } 77 #endif /* CONFIG_XDP_SOCKETS */ 78 79 #endif /* _LINUX_XDP_SOCK_H */ 80