1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* AF_XDP internal functions 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6 #ifndef _LINUX_XDP_SOCK_H 7 #define _LINUX_XDP_SOCK_H 8 9 #include <linux/bpf.h> 10 #include <linux/workqueue.h> 11 #include <linux/if_xdp.h> 12 #include <linux/mutex.h> 13 #include <linux/spinlock.h> 14 #include <linux/mm.h> 15 #include <net/sock.h> 16 17 #define XDP_UMEM_SG_FLAG (1 << 1) 18 19 struct net_device; 20 struct xsk_queue; 21 struct xdp_buff; 22 23 struct xdp_umem { 24 void *addrs; 25 u64 size; 26 u32 headroom; 27 u32 chunk_size; 28 u32 chunks; 29 u32 npgs; 30 struct user_struct *user; 31 refcount_t users; 32 u8 flags; 33 u8 tx_metadata_len; 34 bool zc; 35 struct page **pgs; 36 int id; 37 struct list_head xsk_dma_list; 38 struct work_struct work; 39 }; 40 41 struct xsk_map { 42 struct bpf_map map; 43 spinlock_t lock; /* Synchronize map updates */ 44 atomic_t count; 45 struct xdp_sock __rcu *xsk_map[]; 46 }; 47 48 struct xdp_sock { 49 /* struct sock must be the first member of struct xdp_sock */ 50 struct sock sk; 51 struct xsk_queue *rx ____cacheline_aligned_in_smp; 52 struct net_device *dev; 53 struct xdp_umem *umem; 54 struct list_head flush_node; 55 struct xsk_buff_pool *pool; 56 u16 queue_id; 57 bool zc; 58 bool sg; 59 enum { 60 XSK_READY = 0, 61 XSK_BOUND, 62 XSK_UNBOUND, 63 } state; 64 65 struct xsk_queue *tx ____cacheline_aligned_in_smp; 66 struct list_head tx_list; 67 /* record the number of tx descriptors sent by this xsk and 68 * when it exceeds MAX_PER_SOCKET_BUDGET, an opportunity needs 69 * to be given to other xsks for sending tx descriptors, thereby 70 * preventing other XSKs from being starved. 71 */ 72 u32 tx_budget_spent; 73 74 /* Statistics */ 75 u64 rx_dropped; 76 u64 rx_queue_full; 77 78 /* When __xsk_generic_xmit() must return before it sees the EOP descriptor for the current 79 * packet, the partially built skb is saved here so that packet building can resume in next 80 * call of __xsk_generic_xmit(). 81 */ 82 struct sk_buff *skb; 83 84 struct list_head map_list; 85 /* Protects map_list */ 86 spinlock_t map_list_lock; 87 u32 max_tx_budget; 88 /* Protects multiple processes in the control path */ 89 struct mutex mutex; 90 struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */ 91 struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */ 92 }; 93 94 /* 95 * AF_XDP TX metadata hooks for network devices. 96 * The following hooks can be defined; unless noted otherwise, they are 97 * optional and can be filled with a null pointer. 98 * 99 * void (*tmo_request_timestamp)(void *priv) 100 * Called when AF_XDP frame requested egress timestamp. 101 * 102 * u64 (*tmo_fill_timestamp)(void *priv) 103 * Called when AF_XDP frame, that had requested egress timestamp, 104 * received a completion. The hook needs to return the actual HW timestamp. 105 * 106 * void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv) 107 * Called when AF_XDP frame requested HW checksum offload. csum_start 108 * indicates position where checksumming should start. 109 * csum_offset indicates position where checksum should be stored. 110 * 111 * void (*tmo_request_launch_time)(u64 launch_time, void *priv) 112 * Called when AF_XDP frame requested launch time HW offload support. 113 * launch_time indicates the PTP time at which the device can schedule the 114 * packet for transmission. 115 */ 116 struct xsk_tx_metadata_ops { 117 void (*tmo_request_timestamp)(void *priv); 118 u64 (*tmo_fill_timestamp)(void *priv); 119 void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv); 120 void (*tmo_request_launch_time)(u64 launch_time, void *priv); 121 }; 122 123 #ifdef CONFIG_XDP_SOCKETS 124 125 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 126 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); 127 void __xsk_map_flush(struct list_head *flush_list); 128 129 /** 130 * xsk_tx_metadata_to_compl - Save enough relevant metadata information 131 * to perform tx completion in the future. 132 * @meta: pointer to AF_XDP metadata area 133 * @compl: pointer to output struct xsk_tx_metadata_to_compl 134 * 135 * This function should be called by the networking device when 136 * it prepares AF_XDP egress packet. The value of @compl should be stored 137 * and passed to xsk_tx_metadata_complete upon TX completion. 138 */ 139 static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta, 140 struct xsk_tx_metadata_compl *compl) 141 { 142 if (!meta) 143 return; 144 145 if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP) 146 compl->tx_timestamp = &meta->completion.tx_timestamp; 147 else 148 compl->tx_timestamp = NULL; 149 } 150 151 /** 152 * xsk_tx_metadata_request - Evaluate AF_XDP TX metadata at submission 153 * and call appropriate xsk_tx_metadata_ops operation. 154 * @meta: pointer to AF_XDP metadata area 155 * @ops: pointer to struct xsk_tx_metadata_ops 156 * @priv: pointer to driver-private aread 157 * 158 * This function should be called by the networking device when 159 * it prepares AF_XDP egress packet. 160 */ 161 static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta, 162 const struct xsk_tx_metadata_ops *ops, 163 void *priv) 164 { 165 if (!meta) 166 return; 167 168 if (ops->tmo_request_launch_time) 169 if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME) 170 ops->tmo_request_launch_time(meta->request.launch_time, 171 priv); 172 173 if (ops->tmo_request_timestamp) 174 if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP) 175 ops->tmo_request_timestamp(priv); 176 177 if (ops->tmo_request_checksum) 178 if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) 179 ops->tmo_request_checksum(meta->request.csum_start, 180 meta->request.csum_offset, priv); 181 } 182 183 /** 184 * xsk_tx_metadata_complete - Evaluate AF_XDP TX metadata at completion 185 * and call appropriate xsk_tx_metadata_ops operation. 186 * @compl: pointer to completion metadata produced from xsk_tx_metadata_to_compl 187 * @ops: pointer to struct xsk_tx_metadata_ops 188 * @priv: pointer to driver-private aread 189 * 190 * This function should be called by the networking device upon 191 * AF_XDP egress completion. 192 */ 193 static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl, 194 const struct xsk_tx_metadata_ops *ops, 195 void *priv) 196 { 197 if (!compl) 198 return; 199 if (!compl->tx_timestamp) 200 return; 201 202 *compl->tx_timestamp = ops->tmo_fill_timestamp(priv); 203 } 204 205 #else 206 207 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 208 { 209 return -ENOTSUPP; 210 } 211 212 static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 213 { 214 return -EOPNOTSUPP; 215 } 216 217 static inline void __xsk_map_flush(struct list_head *flush_list) 218 { 219 } 220 221 static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta, 222 struct xsk_tx_metadata_compl *compl) 223 { 224 } 225 226 static inline void xsk_tx_metadata_request(struct xsk_tx_metadata *meta, 227 const struct xsk_tx_metadata_ops *ops, 228 void *priv) 229 { 230 } 231 232 static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl, 233 const struct xsk_tx_metadata_ops *ops, 234 void *priv) 235 { 236 } 237 238 #endif /* CONFIG_XDP_SOCKETS */ 239 #endif /* _LINUX_XDP_SOCK_H */ 240