1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * Network memory 4 * 5 * Author: Mina Almasry <almasrymina@google.com> 6 */ 7 8 #ifndef _NET_NETMEM_H 9 #define _NET_NETMEM_H 10 11 #include <linux/dma-mapping.h> 12 #include <linux/mm.h> 13 #include <net/net_debug.h> 14 15 /* These fields in struct page are used by the page_pool and net stack: 16 * 17 * struct { 18 * unsigned long pp_magic; 19 * struct page_pool *pp; 20 * unsigned long _pp_mapping_pad; 21 * unsigned long dma_addr; 22 * atomic_long_t pp_ref_count; 23 * }; 24 * 25 * We mirror the page_pool fields here so the page_pool can access these 26 * fields without worrying whether the underlying fields belong to a 27 * page or netmem_desc. 28 * 29 * CAUTION: Do not update the fields in netmem_desc without also 30 * updating the anonymous aliasing union in struct net_iov. 31 */ 32 struct netmem_desc { 33 unsigned long _flags; 34 unsigned long pp_magic; 35 struct page_pool *pp; 36 unsigned long _pp_mapping_pad; 37 unsigned long dma_addr; 38 atomic_long_t pp_ref_count; 39 }; 40 41 #define NETMEM_DESC_ASSERT_OFFSET(pg, desc) \ 42 static_assert(offsetof(struct page, pg) == \ 43 offsetof(struct netmem_desc, desc)) 44 NETMEM_DESC_ASSERT_OFFSET(flags, _flags); 45 NETMEM_DESC_ASSERT_OFFSET(pp_magic, pp_magic); 46 NETMEM_DESC_ASSERT_OFFSET(pp, pp); 47 NETMEM_DESC_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad); 48 NETMEM_DESC_ASSERT_OFFSET(dma_addr, dma_addr); 49 NETMEM_DESC_ASSERT_OFFSET(pp_ref_count, pp_ref_count); 50 #undef NETMEM_DESC_ASSERT_OFFSET 51 52 /* 53 * Since struct netmem_desc uses the space in struct page, the size 54 * should be checked, until struct netmem_desc has its own instance from 55 * slab, to avoid conflicting with other members within struct page. 56 */ 57 static_assert(sizeof(struct netmem_desc) <= offsetof(struct page, _refcount)); 58 59 /* net_iov */ 60 61 DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers); 62 63 /* We overload the LSB of the struct page pointer to indicate whether it's 64 * a page or net_iov. 65 */ 66 #define NET_IOV 0x01UL 67 68 enum net_iov_type { 69 NET_IOV_DMABUF, 70 NET_IOV_IOURING, 71 }; 72 73 /* A memory descriptor representing abstract networking I/O vectors, 74 * generally for non-pages memory that doesn't have its corresponding 75 * struct page and needs to be explicitly allocated through slab. 76 * 77 * net_iovs are allocated and used by networking code, and the size of 78 * the chunk is PAGE_SIZE. 79 * 80 * This memory can be any form of non-struct paged memory. Examples 81 * include imported dmabuf memory and imported io_uring memory. See 82 * net_iov_type for all the supported types. 83 * 84 * @pp_magic: pp field, similar to the one in struct page/struct 85 * netmem_desc. 86 * @pp: the pp this net_iov belongs to, if any. 87 * @dma_addr: the dma addrs of the net_iov. Needed for the network 88 * card to send/receive this net_iov. 89 * @pp_ref_count: the pp ref count of this net_iov, exactly the same 90 * usage as struct page/struct netmem_desc. 91 * @owner: the net_iov_area this net_iov belongs to, if any. 92 * @type: the type of the memory. Different types of net_iovs are 93 * supported. 94 */ 95 struct net_iov { 96 struct netmem_desc desc; 97 unsigned int page_type; 98 enum net_iov_type type; 99 struct net_iov_area *owner; 100 }; 101 102 /* Make sure 'the offset of page_type in struct page == the offset of 103 * type in struct net_iov'. 104 */ 105 #define NET_IOV_ASSERT_OFFSET(pg, iov) \ 106 static_assert(offsetof(struct page, pg) == \ 107 offsetof(struct net_iov, iov)) 108 NET_IOV_ASSERT_OFFSET(page_type, page_type); 109 #undef NET_IOV_ASSERT_OFFSET 110 111 struct net_iov_area { 112 /* Array of net_iovs for this area. */ 113 struct net_iov *niovs; 114 size_t num_niovs; 115 116 /* Offset into the dma-buf where this chunk starts. */ 117 unsigned long base_virtual; 118 }; 119 120 static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov) 121 { 122 return niov->owner; 123 } 124 125 static inline unsigned int net_iov_idx(const struct net_iov *niov) 126 { 127 return niov - net_iov_owner(niov)->niovs; 128 } 129 130 /* netmem */ 131 132 /** 133 * typedef netmem_ref - a nonexistent type marking a reference to generic 134 * network memory. 135 * 136 * A netmem_ref can be a struct page* or a struct net_iov* underneath. 137 * 138 * Use the supplied helpers to obtain the underlying memory pointer and fields. 139 */ 140 typedef unsigned long __bitwise netmem_ref; 141 142 static inline bool netmem_is_net_iov(const netmem_ref netmem) 143 { 144 return (__force unsigned long)netmem & NET_IOV; 145 } 146 147 /** 148 * __netmem_to_page - unsafely get pointer to the &page backing @netmem 149 * @netmem: netmem reference to convert 150 * 151 * Unsafe version of netmem_to_page(). When @netmem is always page-backed, 152 * e.g. when it's a header buffer, performs faster and generates smaller 153 * object code (no check for the LSB, no WARN). When @netmem points to IOV, 154 * provokes undefined behaviour. 155 * 156 * Return: pointer to the &page (garbage if @netmem is not page-backed). 157 */ 158 static inline struct page *__netmem_to_page(netmem_ref netmem) 159 { 160 return (__force struct page *)netmem; 161 } 162 163 static inline struct page *netmem_to_page(netmem_ref netmem) 164 { 165 if (WARN_ON_ONCE(netmem_is_net_iov(netmem))) 166 return NULL; 167 168 return __netmem_to_page(netmem); 169 } 170 171 static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem) 172 { 173 if (netmem_is_net_iov(netmem)) 174 return (struct net_iov *)((__force unsigned long)netmem & 175 ~NET_IOV); 176 177 DEBUG_NET_WARN_ON_ONCE(true); 178 return NULL; 179 } 180 181 static inline netmem_ref net_iov_to_netmem(struct net_iov *niov) 182 { 183 return (__force netmem_ref)((unsigned long)niov | NET_IOV); 184 } 185 186 #define page_to_netmem(p) (_Generic((p), \ 187 const struct page * : (__force const netmem_ref)(p), \ 188 struct page * : (__force netmem_ref)(p))) 189 190 /** 191 * virt_to_netmem - convert virtual memory pointer to a netmem reference 192 * @data: host memory pointer to convert 193 * 194 * Return: netmem reference to the &page backing this virtual address. 195 */ 196 static inline netmem_ref virt_to_netmem(const void *data) 197 { 198 return page_to_netmem(virt_to_page(data)); 199 } 200 201 static inline int netmem_ref_count(netmem_ref netmem) 202 { 203 /* The non-pp refcount of net_iov is always 1. On net_iov, we only 204 * support pp refcounting which uses the pp_ref_count field. 205 */ 206 if (netmem_is_net_iov(netmem)) 207 return 1; 208 209 return page_ref_count(netmem_to_page(netmem)); 210 } 211 212 static inline unsigned long netmem_pfn_trace(netmem_ref netmem) 213 { 214 if (netmem_is_net_iov(netmem)) 215 return 0; 216 217 return page_to_pfn(netmem_to_page(netmem)); 218 } 219 220 /* XXX: How to extract netmem_desc from page must be changed, once 221 * netmem_desc no longer overlays on page and will be allocated through 222 * slab. 223 */ 224 #define __pp_page_to_nmdesc(p) (_Generic((p), \ 225 const struct page * : (const struct netmem_desc *)(p), \ 226 struct page * : (struct netmem_desc *)(p))) 227 228 /* CAUTION: Check if the page is a pp page before calling this helper or 229 * know it's a pp page. 230 */ 231 #define pp_page_to_nmdesc(p) \ 232 ({ \ 233 DEBUG_NET_WARN_ON_ONCE(!PageNetpp(p)); \ 234 __pp_page_to_nmdesc(p); \ 235 }) 236 237 /** 238 * __netmem_to_nmdesc - unsafely get pointer to the &netmem_desc backing 239 * @netmem 240 * @netmem: netmem reference to convert 241 * 242 * Unsafe version that can be used only when @netmem is always backed by 243 * system memory, performs faster and generates smaller object code (no 244 * check for the LSB, no WARN). When @netmem points to IOV, provokes 245 * undefined behaviour. 246 * 247 * Return: pointer to the &netmem_desc (garbage if @netmem is not backed 248 * by system memory). 249 */ 250 static inline struct netmem_desc *__netmem_to_nmdesc(netmem_ref netmem) 251 { 252 return (__force struct netmem_desc *)netmem; 253 } 254 255 /* netmem_to_nmdesc - convert netmem_ref to struct netmem_desc * for 256 * access to common fields. 257 * @netmem: netmem reference to get netmem_desc. 258 * 259 * All the sub types of netmem_ref (netmem_desc, net_iov) have the same 260 * pp, pp_magic, dma_addr, and pp_ref_count fields via netmem_desc. 261 * 262 * Return: the pointer to struct netmem_desc * regardless of its 263 * underlying type. 264 */ 265 static inline struct netmem_desc *netmem_to_nmdesc(netmem_ref netmem) 266 { 267 void *p = (void *)((__force unsigned long)netmem & ~NET_IOV); 268 269 if (netmem_is_net_iov(netmem)) 270 return &((struct net_iov *)p)->desc; 271 272 return __pp_page_to_nmdesc((struct page *)p); 273 } 274 275 /** 276 * __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem 277 * @netmem: netmem reference to get the pointer from 278 * 279 * Unsafe version of netmem_get_pp(). When @netmem is always page-backed, 280 * e.g. when it's a header buffer, performs faster and generates smaller 281 * object code (avoids clearing the LSB). When @netmem points to IOV, 282 * provokes invalid memory access. 283 * 284 * Return: pointer to the &page_pool (garbage if @netmem is not page-backed). 285 */ 286 static inline struct page_pool *__netmem_get_pp(netmem_ref netmem) 287 { 288 return __netmem_to_nmdesc(netmem)->pp; 289 } 290 291 static inline struct page_pool *netmem_get_pp(netmem_ref netmem) 292 { 293 return netmem_to_nmdesc(netmem)->pp; 294 } 295 296 static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem) 297 { 298 return &netmem_to_nmdesc(netmem)->pp_ref_count; 299 } 300 301 static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid) 302 { 303 /* NUMA node preference only makes sense if we're allocating 304 * system memory. Memory providers (which give us net_iovs) 305 * choose for us. 306 */ 307 if (netmem_is_net_iov(netmem)) 308 return true; 309 310 return page_to_nid(netmem_to_page(netmem)) == pref_nid; 311 } 312 313 static inline netmem_ref netmem_compound_head(netmem_ref netmem) 314 { 315 /* niov are never compounded */ 316 if (netmem_is_net_iov(netmem)) 317 return netmem; 318 319 return page_to_netmem(compound_head(netmem_to_page(netmem))); 320 } 321 322 /** 323 * __netmem_address - unsafely get pointer to the memory backing @netmem 324 * @netmem: netmem reference to get the pointer for 325 * 326 * Unsafe version of netmem_address(). When @netmem is always page-backed, 327 * e.g. when it's a header buffer, performs faster and generates smaller 328 * object code (no check for the LSB). When @netmem points to IOV, provokes 329 * undefined behaviour. 330 * 331 * Return: pointer to the memory (garbage if @netmem is not page-backed). 332 */ 333 static inline void *__netmem_address(netmem_ref netmem) 334 { 335 return page_address(__netmem_to_page(netmem)); 336 } 337 338 static inline void *netmem_address(netmem_ref netmem) 339 { 340 if (netmem_is_net_iov(netmem)) 341 return NULL; 342 343 return __netmem_address(netmem); 344 } 345 346 /** 347 * netmem_is_pfmemalloc - check if @netmem was allocated under memory pressure 348 * @netmem: netmem reference to check 349 * 350 * Return: true if @netmem is page-backed and the page was allocated under 351 * memory pressure, false otherwise. 352 */ 353 static inline bool netmem_is_pfmemalloc(netmem_ref netmem) 354 { 355 if (netmem_is_net_iov(netmem)) 356 return false; 357 358 return page_is_pfmemalloc(netmem_to_page(netmem)); 359 } 360 361 static inline unsigned long netmem_get_dma_addr(netmem_ref netmem) 362 { 363 return netmem_to_nmdesc(netmem)->dma_addr; 364 } 365 366 #if defined(CONFIG_NET_DEVMEM) 367 static inline bool net_is_devmem_iov(const struct net_iov *niov) 368 { 369 return niov->type == NET_IOV_DMABUF; 370 } 371 #else 372 static inline bool net_is_devmem_iov(const struct net_iov *niov) 373 { 374 return false; 375 } 376 #endif 377 378 void __get_netmem(netmem_ref netmem); 379 void __put_netmem(netmem_ref netmem); 380 381 static __always_inline void get_netmem(netmem_ref netmem) 382 { 383 if (netmem_is_net_iov(netmem)) 384 __get_netmem(netmem); 385 else 386 get_page(netmem_to_page(netmem)); 387 } 388 389 static __always_inline void put_netmem(netmem_ref netmem) 390 { 391 if (netmem_is_net_iov(netmem)) 392 __put_netmem(netmem); 393 else 394 put_page(netmem_to_page(netmem)); 395 } 396 397 #define netmem_dma_unmap_addr_set(NETMEM, PTR, ADDR_NAME, VAL) \ 398 do { \ 399 if (!netmem_is_net_iov(NETMEM)) \ 400 dma_unmap_addr_set(PTR, ADDR_NAME, VAL); \ 401 else \ 402 dma_unmap_addr_set(PTR, ADDR_NAME, 0); \ 403 } while (0) 404 405 static inline void netmem_dma_unmap_page_attrs(struct device *dev, 406 dma_addr_t addr, size_t size, 407 enum dma_data_direction dir, 408 unsigned long attrs) 409 { 410 if (!addr) 411 return; 412 413 dma_unmap_page_attrs(dev, addr, size, dir, attrs); 414 } 415 416 #endif /* _NET_NETMEM_H */ 417