1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright(c) 2020 Intel Corporation. */ 3 4 #ifndef XSK_BUFF_POOL_H_ 5 #define XSK_BUFF_POOL_H_ 6 7 #include <linux/if_xdp.h> 8 #include <linux/types.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/bpf.h> 11 #include <net/xdp.h> 12 13 struct xsk_buff_pool; 14 struct xdp_rxq_info; 15 struct xsk_queue; 16 struct xdp_desc; 17 struct xdp_umem; 18 struct xdp_sock; 19 struct device; 20 struct page; 21 22 struct xdp_buff_xsk { 23 struct xdp_buff xdp; 24 dma_addr_t dma; 25 dma_addr_t frame_dma; 26 struct xsk_buff_pool *pool; 27 u64 orig_addr; 28 struct list_head free_list_node; 29 }; 30 31 struct xsk_dma_map { 32 dma_addr_t *dma_pages; 33 struct device *dev; 34 struct net_device *netdev; 35 refcount_t users; 36 struct list_head list; /* Protected by the RTNL_LOCK */ 37 u32 dma_pages_cnt; 38 bool dma_need_sync; 39 }; 40 41 struct xsk_buff_pool { 42 /* Members only used in the control path first. */ 43 struct device *dev; 44 struct net_device *netdev; 45 struct list_head xsk_tx_list; 46 /* Protects modifications to the xsk_tx_list */ 47 spinlock_t xsk_tx_list_lock; 48 refcount_t users; 49 struct xdp_umem *umem; 50 struct work_struct work; 51 struct list_head free_list; 52 u32 heads_cnt; 53 u16 queue_id; 54 55 /* Data path members as close to free_heads at the end as possible. */ 56 struct xsk_queue *fq ____cacheline_aligned_in_smp; 57 struct xsk_queue *cq; 58 /* For performance reasons, each buff pool has its own array of dma_pages 59 * even when they are identical. 60 */ 61 dma_addr_t *dma_pages; 62 struct xdp_buff_xsk *heads; 63 struct xdp_desc *tx_descs; 64 u64 chunk_mask; 65 u64 addrs_cnt; 66 u32 free_list_cnt; 67 u32 dma_pages_cnt; 68 u32 free_heads_cnt; 69 u32 headroom; 70 u32 chunk_size; 71 u32 chunk_shift; 72 u32 frame_len; 73 u8 cached_need_wakeup; 74 bool uses_need_wakeup; 75 bool dma_need_sync; 76 bool unaligned; 77 void *addrs; 78 /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect: 79 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when 80 * sockets share a single cq when the same netdev and queue id is shared. 81 */ 82 spinlock_t cq_lock; 83 struct xdp_buff_xsk *free_heads[]; 84 }; 85 86 /* Masks for xdp_umem_page flags. 87 * The low 12-bits of the addr will be 0 since this is the page address, so we 88 * can use them for flags. 89 */ 90 #define XSK_NEXT_PG_CONTIG_SHIFT 0 91 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) 92 93 /* AF_XDP core. */ 94 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, 95 struct xdp_umem *umem); 96 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, 97 u16 queue_id, u16 flags); 98 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, 99 struct net_device *dev, u16 queue_id); 100 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); 101 void xp_destroy(struct xsk_buff_pool *pool); 102 void xp_get_pool(struct xsk_buff_pool *pool); 103 bool xp_put_pool(struct xsk_buff_pool *pool); 104 void xp_clear_dev(struct xsk_buff_pool *pool); 105 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); 106 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); 107 108 /* AF_XDP, and XDP core. */ 109 void xp_free(struct xdp_buff_xsk *xskb); 110 111 static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, 112 u64 addr) 113 { 114 xskb->orig_addr = addr; 115 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; 116 } 117 118 static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, 119 dma_addr_t *dma_pages, u64 addr) 120 { 121 xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) + 122 (addr & ~PAGE_MASK); 123 xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM; 124 } 125 126 /* AF_XDP ZC drivers, via xdp_sock_buff.h */ 127 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq); 128 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, 129 unsigned long attrs, struct page **pages, u32 nr_pages); 130 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs); 131 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool); 132 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max); 133 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count); 134 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr); 135 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr); 136 static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb) 137 { 138 return xskb->dma; 139 } 140 141 static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) 142 { 143 return xskb->frame_dma; 144 } 145 146 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); 147 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) 148 { 149 xp_dma_sync_for_cpu_slow(xskb); 150 } 151 152 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, 153 size_t size); 154 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, 155 dma_addr_t dma, size_t size) 156 { 157 if (!pool->dma_need_sync) 158 return; 159 160 xp_dma_sync_for_device_slow(pool, dma, size); 161 } 162 163 /* Masks for xdp_umem_page flags. 164 * The low 12-bits of the addr will be 0 since this is the page address, so we 165 * can use them for flags. 166 */ 167 #define XSK_NEXT_PG_CONTIG_SHIFT 0 168 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) 169 170 static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, 171 u64 addr, u32 len) 172 { 173 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE; 174 175 if (likely(!cross_pg)) 176 return false; 177 178 if (pool->dma_pages_cnt) { 179 return !(pool->dma_pages[addr >> PAGE_SHIFT] & 180 XSK_NEXT_PG_CONTIG_MASK); 181 } 182 183 /* skb path */ 184 return addr + len > pool->addrs_cnt; 185 } 186 187 static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) 188 { 189 return addr & pool->chunk_mask; 190 } 191 192 static inline u64 xp_unaligned_extract_addr(u64 addr) 193 { 194 return addr & XSK_UNALIGNED_BUF_ADDR_MASK; 195 } 196 197 static inline u64 xp_unaligned_extract_offset(u64 addr) 198 { 199 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; 200 } 201 202 static inline u64 xp_unaligned_add_offset_to_addr(u64 addr) 203 { 204 return xp_unaligned_extract_addr(addr) + 205 xp_unaligned_extract_offset(addr); 206 } 207 208 static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr) 209 { 210 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift; 211 } 212 213 static inline void xp_release(struct xdp_buff_xsk *xskb) 214 { 215 if (xskb->pool->unaligned) 216 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; 217 } 218 219 static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb) 220 { 221 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; 222 223 offset += xskb->pool->headroom; 224 if (!xskb->pool->unaligned) 225 return xskb->orig_addr + offset; 226 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); 227 } 228 229 #endif /* XSK_BUFF_POOL_H_ */ 230