1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* AF_XDP internal functions 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6 #ifndef _LINUX_XDP_SOCK_H 7 #define _LINUX_XDP_SOCK_H 8 9 #include <linux/workqueue.h> 10 #include <linux/if_xdp.h> 11 #include <linux/mutex.h> 12 #include <linux/spinlock.h> 13 #include <linux/mm.h> 14 #include <net/sock.h> 15 16 struct net_device; 17 struct xsk_queue; 18 19 struct xdp_umem_page { 20 void *addr; 21 dma_addr_t dma; 22 }; 23 24 struct xdp_umem_fq_reuse { 25 u32 nentries; 26 u32 length; 27 u64 handles[]; 28 }; 29 30 struct xdp_umem { 31 struct xsk_queue *fq; 32 struct xsk_queue *cq; 33 struct xdp_umem_page *pages; 34 u64 chunk_mask; 35 u64 size; 36 u32 headroom; 37 u32 chunk_size_nohr; 38 struct user_struct *user; 39 struct pid *pid; 40 unsigned long address; 41 refcount_t users; 42 struct work_struct work; 43 struct page **pgs; 44 u32 npgs; 45 struct net_device *dev; 46 struct xdp_umem_fq_reuse *fq_reuse; 47 u16 queue_id; 48 bool zc; 49 spinlock_t xsk_list_lock; 50 struct list_head xsk_list; 51 }; 52 53 struct xdp_sock { 54 /* struct sock must be the first member of struct xdp_sock */ 55 struct sock sk; 56 struct xsk_queue *rx; 57 struct net_device *dev; 58 struct xdp_umem *umem; 59 struct list_head flush_node; 60 u16 queue_id; 61 struct xsk_queue *tx ____cacheline_aligned_in_smp; 62 struct list_head list; 63 bool zc; 64 /* Protects multiple processes in the control path */ 65 struct mutex mutex; 66 /* Mutual exclusion of NAPI TX thread and sendmsg error paths 67 * in the SKB destructor callback. 68 */ 69 spinlock_t tx_completion_lock; 70 u64 rx_dropped; 71 }; 72 73 struct xdp_buff; 74 #ifdef CONFIG_XDP_SOCKETS 75 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 76 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 77 void xsk_flush(struct xdp_sock *xs); 78 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); 79 /* Used from netdev driver */ 80 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); 81 void xsk_umem_discard_addr(struct xdp_umem *umem); 82 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); 83 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); 84 void xsk_umem_consume_tx_done(struct xdp_umem *umem); 85 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); 86 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, 87 struct xdp_umem_fq_reuse *newq); 88 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); 89 90 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 91 { 92 return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); 93 } 94 95 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 96 { 97 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); 98 } 99 100 /* Reuse-queue aware version of FILL queue helpers */ 101 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) 102 { 103 struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 104 105 if (!rq->length) 106 return xsk_umem_peek_addr(umem, addr); 107 108 *addr = rq->handles[rq->length - 1]; 109 return addr; 110 } 111 112 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) 113 { 114 struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 115 116 if (!rq->length) 117 xsk_umem_discard_addr(umem); 118 else 119 rq->length--; 120 } 121 122 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 123 { 124 struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 125 126 rq->handles[rq->length++] = addr; 127 } 128 #else 129 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 130 { 131 return -ENOTSUPP; 132 } 133 134 static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 135 { 136 return -ENOTSUPP; 137 } 138 139 static inline void xsk_flush(struct xdp_sock *xs) 140 { 141 } 142 143 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) 144 { 145 return false; 146 } 147 148 static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) 149 { 150 return NULL; 151 } 152 153 static inline void xsk_umem_discard_addr(struct xdp_umem *umem) 154 { 155 } 156 157 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) 158 { 159 } 160 161 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, 162 u32 *len) 163 { 164 return false; 165 } 166 167 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) 168 { 169 } 170 171 static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) 172 { 173 return NULL; 174 } 175 176 static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( 177 struct xdp_umem *umem, 178 struct xdp_umem_fq_reuse *newq) 179 { 180 return NULL; 181 } 182 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) 183 { 184 } 185 186 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 187 { 188 return NULL; 189 } 190 191 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 192 { 193 return 0; 194 } 195 196 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) 197 { 198 return NULL; 199 } 200 201 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) 202 { 203 } 204 205 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 206 { 207 } 208 209 #endif /* CONFIG_XDP_SOCKETS */ 210 211 #endif /* _LINUX_XDP_SOCK_H */ 212