1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* AF_XDP internal functions 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6 #ifndef _LINUX_XDP_SOCK_H 7 #define _LINUX_XDP_SOCK_H 8 9 #include <linux/workqueue.h> 10 #include <linux/if_xdp.h> 11 #include <linux/mutex.h> 12 #include <linux/spinlock.h> 13 #include <linux/mm.h> 14 #include <net/sock.h> 15 16 struct net_device; 17 struct xsk_queue; 18 19 struct xdp_umem_page { 20 void *addr; 21 dma_addr_t dma; 22 }; 23 24 struct xdp_umem_fq_reuse { 25 u32 nentries; 26 u32 length; 27 u64 handles[]; 28 }; 29 30 struct xdp_umem { 31 struct xsk_queue *fq; 32 struct xsk_queue *cq; 33 struct xdp_umem_page *pages; 34 u64 chunk_mask; 35 u64 size; 36 u32 headroom; 37 u32 chunk_size_nohr; 38 struct user_struct *user; 39 unsigned long address; 40 refcount_t users; 41 struct work_struct work; 42 struct page **pgs; 43 u32 npgs; 44 int id; 45 struct net_device *dev; 46 struct xdp_umem_fq_reuse *fq_reuse; 47 u16 queue_id; 48 bool zc; 49 spinlock_t xsk_list_lock; 50 struct list_head xsk_list; 51 }; 52 53 struct xdp_sock { 54 /* struct sock must be the first member of struct xdp_sock */ 55 struct sock sk; 56 struct xsk_queue *rx; 57 struct net_device *dev; 58 struct xdp_umem *umem; 59 struct list_head flush_node; 60 u16 queue_id; 61 bool zc; 62 /* Protects multiple processes in the control path */ 63 struct mutex mutex; 64 struct xsk_queue *tx ____cacheline_aligned_in_smp; 65 struct list_head list; 66 /* Mutual exclusion of NAPI TX thread and sendmsg error paths 67 * in the SKB destructor callback. 68 */ 69 spinlock_t tx_completion_lock; 70 u64 rx_dropped; 71 }; 72 73 struct xdp_buff; 74 #ifdef CONFIG_XDP_SOCKETS 75 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 76 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 77 void xsk_flush(struct xdp_sock *xs); 78 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); 79 /* Used from netdev driver */ 80 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); 81 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); 82 void xsk_umem_discard_addr(struct xdp_umem *umem); 83 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); 84 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); 85 void xsk_umem_consume_tx_done(struct xdp_umem *umem); 86 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); 87 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, 88 struct xdp_umem_fq_reuse *newq); 89 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); 90 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); 91 92 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 93 { 94 return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); 95 } 96 97 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 98 { 99 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); 100 } 101 102 /* Reuse-queue aware version of FILL queue helpers */ 103 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) 104 { 105 struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 106 107 if (rq->length >= cnt) 108 return true; 109 110 return xsk_umem_has_addrs(umem, cnt - rq->length); 111 } 112 113 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) 114 { 115 struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 116 117 if (!rq->length) 118 return xsk_umem_peek_addr(umem, addr); 119 120 *addr = rq->handles[rq->length - 1]; 121 return addr; 122 } 123 124 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) 125 { 126 struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 127 128 if (!rq->length) 129 xsk_umem_discard_addr(umem); 130 else 131 rq->length--; 132 } 133 134 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 135 { 136 struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 137 138 rq->handles[rq->length++] = addr; 139 } 140 #else 141 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 142 { 143 return -ENOTSUPP; 144 } 145 146 static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 147 { 148 return -ENOTSUPP; 149 } 150 151 static inline void xsk_flush(struct xdp_sock *xs) 152 { 153 } 154 155 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) 156 { 157 return false; 158 } 159 160 static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) 161 { 162 return false; 163 } 164 165 static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) 166 { 167 return NULL; 168 } 169 170 static inline void xsk_umem_discard_addr(struct xdp_umem *umem) 171 { 172 } 173 174 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) 175 { 176 } 177 178 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, 179 struct xdp_desc *desc) 180 { 181 return false; 182 } 183 184 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) 185 { 186 } 187 188 static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) 189 { 190 return NULL; 191 } 192 193 static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( 194 struct xdp_umem *umem, 195 struct xdp_umem_fq_reuse *newq) 196 { 197 return NULL; 198 } 199 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) 200 { 201 } 202 203 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, 204 u16 queue_id) 205 { 206 return NULL; 207 } 208 209 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 210 { 211 return NULL; 212 } 213 214 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 215 { 216 return 0; 217 } 218 219 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) 220 { 221 return false; 222 } 223 224 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) 225 { 226 return NULL; 227 } 228 229 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) 230 { 231 } 232 233 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 234 { 235 } 236 237 #endif /* CONFIG_XDP_SOCKETS */ 238 239 #endif /* _LINUX_XDP_SOCK_H */ 240