xref: /linux/include/net/netmem.h (revision dfecb0c5af3b07ebfa84be63a7a21bfc9e29a872)
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  *	Network memory
4  *
5  *	Author:	Mina Almasry <almasrymina@google.com>
6  */
7 
8 #ifndef _NET_NETMEM_H
9 #define _NET_NETMEM_H
10 
11 #include <linux/dma-mapping.h>
12 #include <linux/mm.h>
13 #include <net/net_debug.h>
14 
15 /* These fields in struct page are used by the page_pool and net stack:
16  *
17  *        struct {
18  *                unsigned long pp_magic;
19  *                struct page_pool *pp;
20  *                unsigned long _pp_mapping_pad;
21  *                unsigned long dma_addr;
22  *                atomic_long_t pp_ref_count;
23  *        };
24  *
25  * We mirror the page_pool fields here so the page_pool can access these
26  * fields without worrying whether the underlying fields belong to a
27  * page or netmem_desc.
28  *
29  * CAUTION: Do not update the fields in netmem_desc without also
30  * updating the anonymous aliasing union in struct net_iov.
31  */
32 struct netmem_desc {
33 	unsigned long _flags;
34 	unsigned long pp_magic;
35 	struct page_pool *pp;
36 	unsigned long _pp_mapping_pad;
37 	unsigned long dma_addr;
38 	atomic_long_t pp_ref_count;
39 };
40 
41 #define NETMEM_DESC_ASSERT_OFFSET(pg, desc)        \
42 	static_assert(offsetof(struct page, pg) == \
43 		      offsetof(struct netmem_desc, desc))
44 NETMEM_DESC_ASSERT_OFFSET(flags, _flags);
45 NETMEM_DESC_ASSERT_OFFSET(pp_magic, pp_magic);
46 NETMEM_DESC_ASSERT_OFFSET(pp, pp);
47 NETMEM_DESC_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
48 NETMEM_DESC_ASSERT_OFFSET(dma_addr, dma_addr);
49 NETMEM_DESC_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
50 #undef NETMEM_DESC_ASSERT_OFFSET
51 
52 /*
53  * Since struct netmem_desc uses the space in struct page, the size
54  * should be checked, until struct netmem_desc has its own instance from
55  * slab, to avoid conflicting with other members within struct page.
56  */
57 static_assert(sizeof(struct netmem_desc) <= offsetof(struct page, _refcount));
58 
59 /* net_iov */
60 
61 DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
62 
63 /*  We overload the LSB of the struct page pointer to indicate whether it's
64  *  a page or net_iov.
65  */
66 #define NET_IOV 0x01UL
67 
68 enum net_iov_type {
69 	NET_IOV_DMABUF,
70 	NET_IOV_IOURING,
71 };
72 
73 /* A memory descriptor representing abstract networking I/O vectors,
74  * generally for non-pages memory that doesn't have its corresponding
75  * struct page and needs to be explicitly allocated through slab.
76  *
77  * net_iovs are allocated and used by networking code, and the size of
78  * the chunk is PAGE_SIZE.
79  *
80  * This memory can be any form of non-struct paged memory.  Examples
81  * include imported dmabuf memory and imported io_uring memory.  See
82  * net_iov_type for all the supported types.
83  *
84  * @pp_magic:	pp field, similar to the one in struct page/struct
85  *		netmem_desc.
86  * @pp:		the pp this net_iov belongs to, if any.
87  * @dma_addr:	the dma addrs of the net_iov. Needed for the network
88  *		card to send/receive this net_iov.
89  * @pp_ref_count: the pp ref count of this net_iov, exactly the same
90  *		usage as struct page/struct netmem_desc.
91  * @owner:	the net_iov_area this net_iov belongs to, if any.
92  * @type:	the type of the memory.  Different types of net_iovs are
93  *		supported.
94  */
95 struct net_iov {
96 	struct netmem_desc desc;
97 	struct net_iov_area *owner;
98 	enum net_iov_type type;
99 };
100 
101 struct net_iov_area {
102 	/* Array of net_iovs for this area. */
103 	struct net_iov *niovs;
104 	size_t num_niovs;
105 
106 	/* Offset into the dma-buf where this chunk starts.  */
107 	unsigned long base_virtual;
108 };
109 
110 static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov)
111 {
112 	return niov->owner;
113 }
114 
115 static inline unsigned int net_iov_idx(const struct net_iov *niov)
116 {
117 	return niov - net_iov_owner(niov)->niovs;
118 }
119 
120 /* netmem */
121 
122 /**
123  * typedef netmem_ref - a nonexistent type marking a reference to generic
124  * network memory.
125  *
126  * A netmem_ref can be a struct page* or a struct net_iov* underneath.
127  *
128  * Use the supplied helpers to obtain the underlying memory pointer and fields.
129  */
130 typedef unsigned long __bitwise netmem_ref;
131 
132 static inline bool netmem_is_net_iov(const netmem_ref netmem)
133 {
134 	return (__force unsigned long)netmem & NET_IOV;
135 }
136 
137 /**
138  * __netmem_to_page - unsafely get pointer to the &page backing @netmem
139  * @netmem: netmem reference to convert
140  *
141  * Unsafe version of netmem_to_page(). When @netmem is always page-backed,
142  * e.g. when it's a header buffer, performs faster and generates smaller
143  * object code (no check for the LSB, no WARN). When @netmem points to IOV,
144  * provokes undefined behaviour.
145  *
146  * Return: pointer to the &page (garbage if @netmem is not page-backed).
147  */
148 static inline struct page *__netmem_to_page(netmem_ref netmem)
149 {
150 	return (__force struct page *)netmem;
151 }
152 
153 static inline struct page *netmem_to_page(netmem_ref netmem)
154 {
155 	if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
156 		return NULL;
157 
158 	return __netmem_to_page(netmem);
159 }
160 
161 static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem)
162 {
163 	if (netmem_is_net_iov(netmem))
164 		return (struct net_iov *)((__force unsigned long)netmem &
165 					  ~NET_IOV);
166 
167 	DEBUG_NET_WARN_ON_ONCE(true);
168 	return NULL;
169 }
170 
171 static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
172 {
173 	return (__force netmem_ref)((unsigned long)niov | NET_IOV);
174 }
175 
176 #define page_to_netmem(p)	(_Generic((p),			\
177 	const struct page * :	(__force const netmem_ref)(p),	\
178 	struct page * :		(__force netmem_ref)(p)))
179 
180 /**
181  * virt_to_netmem - convert virtual memory pointer to a netmem reference
182  * @data: host memory pointer to convert
183  *
184  * Return: netmem reference to the &page backing this virtual address.
185  */
186 static inline netmem_ref virt_to_netmem(const void *data)
187 {
188 	return page_to_netmem(virt_to_page(data));
189 }
190 
191 static inline int netmem_ref_count(netmem_ref netmem)
192 {
193 	/* The non-pp refcount of net_iov is always 1. On net_iov, we only
194 	 * support pp refcounting which uses the pp_ref_count field.
195 	 */
196 	if (netmem_is_net_iov(netmem))
197 		return 1;
198 
199 	return page_ref_count(netmem_to_page(netmem));
200 }
201 
202 static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
203 {
204 	if (netmem_is_net_iov(netmem))
205 		return 0;
206 
207 	return page_to_pfn(netmem_to_page(netmem));
208 }
209 
210 /* XXX: How to extract netmem_desc from page must be changed, once
211  * netmem_desc no longer overlays on page and will be allocated through
212  * slab.
213  */
214 #define __pp_page_to_nmdesc(p)	(_Generic((p),				\
215 	const struct page * :	(const struct netmem_desc *)(p),	\
216 	struct page * :		(struct netmem_desc *)(p)))
217 
218 /* CAUTION: Check if the page is a pp page before calling this helper or
219  * know it's a pp page.
220  */
221 #define pp_page_to_nmdesc(p)						\
222 ({									\
223 	DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p));		\
224 	__pp_page_to_nmdesc(p);						\
225 })
226 
227 /**
228  * __netmem_to_nmdesc - unsafely get pointer to the &netmem_desc backing
229  * @netmem
230  * @netmem: netmem reference to convert
231  *
232  * Unsafe version that can be used only when @netmem is always backed by
233  * system memory, performs faster and generates smaller object code (no
234  * check for the LSB, no WARN). When @netmem points to IOV, provokes
235  * undefined behaviour.
236  *
237  * Return: pointer to the &netmem_desc (garbage if @netmem is not backed
238  * by system memory).
239  */
240 static inline struct netmem_desc *__netmem_to_nmdesc(netmem_ref netmem)
241 {
242 	return (__force struct netmem_desc *)netmem;
243 }
244 
245 /* netmem_to_nmdesc - convert netmem_ref to struct netmem_desc * for
246  * access to common fields.
247  * @netmem: netmem reference to get netmem_desc.
248  *
249  * All the sub types of netmem_ref (netmem_desc, net_iov) have the same
250  * pp, pp_magic, dma_addr, and pp_ref_count fields via netmem_desc.
251  *
252  * Return: the pointer to struct netmem_desc * regardless of its
253  * underlying type.
254  */
255 static inline struct netmem_desc *netmem_to_nmdesc(netmem_ref netmem)
256 {
257 	void *p = (void *)((__force unsigned long)netmem & ~NET_IOV);
258 
259 	if (netmem_is_net_iov(netmem))
260 		return &((struct net_iov *)p)->desc;
261 
262 	return __pp_page_to_nmdesc((struct page *)p);
263 }
264 
265 /**
266  * __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem
267  * @netmem: netmem reference to get the pointer from
268  *
269  * Unsafe version of netmem_get_pp(). When @netmem is always page-backed,
270  * e.g. when it's a header buffer, performs faster and generates smaller
271  * object code (avoids clearing the LSB). When @netmem points to IOV,
272  * provokes invalid memory access.
273  *
274  * Return: pointer to the &page_pool (garbage if @netmem is not page-backed).
275  */
276 static inline struct page_pool *__netmem_get_pp(netmem_ref netmem)
277 {
278 	return __netmem_to_nmdesc(netmem)->pp;
279 }
280 
281 static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
282 {
283 	return netmem_to_nmdesc(netmem)->pp;
284 }
285 
286 static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem)
287 {
288 	return &netmem_to_nmdesc(netmem)->pp_ref_count;
289 }
290 
291 static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid)
292 {
293 	/* NUMA node preference only makes sense if we're allocating
294 	 * system memory. Memory providers (which give us net_iovs)
295 	 * choose for us.
296 	 */
297 	if (netmem_is_net_iov(netmem))
298 		return true;
299 
300 	return page_to_nid(netmem_to_page(netmem)) == pref_nid;
301 }
302 
303 static inline netmem_ref netmem_compound_head(netmem_ref netmem)
304 {
305 	/* niov are never compounded */
306 	if (netmem_is_net_iov(netmem))
307 		return netmem;
308 
309 	return page_to_netmem(compound_head(netmem_to_page(netmem)));
310 }
311 
312 /**
313  * __netmem_address - unsafely get pointer to the memory backing @netmem
314  * @netmem: netmem reference to get the pointer for
315  *
316  * Unsafe version of netmem_address(). When @netmem is always page-backed,
317  * e.g. when it's a header buffer, performs faster and generates smaller
318  * object code (no check for the LSB). When @netmem points to IOV, provokes
319  * undefined behaviour.
320  *
321  * Return: pointer to the memory (garbage if @netmem is not page-backed).
322  */
323 static inline void *__netmem_address(netmem_ref netmem)
324 {
325 	return page_address(__netmem_to_page(netmem));
326 }
327 
328 static inline void *netmem_address(netmem_ref netmem)
329 {
330 	if (netmem_is_net_iov(netmem))
331 		return NULL;
332 
333 	return __netmem_address(netmem);
334 }
335 
336 /**
337  * netmem_is_pfmemalloc - check if @netmem was allocated under memory pressure
338  * @netmem: netmem reference to check
339  *
340  * Return: true if @netmem is page-backed and the page was allocated under
341  * memory pressure, false otherwise.
342  */
343 static inline bool netmem_is_pfmemalloc(netmem_ref netmem)
344 {
345 	if (netmem_is_net_iov(netmem))
346 		return false;
347 
348 	return page_is_pfmemalloc(netmem_to_page(netmem));
349 }
350 
351 static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
352 {
353 	return netmem_to_nmdesc(netmem)->dma_addr;
354 }
355 
356 #if defined(CONFIG_NET_DEVMEM)
357 static inline bool net_is_devmem_iov(const struct net_iov *niov)
358 {
359 	return niov->type == NET_IOV_DMABUF;
360 }
361 #else
362 static inline bool net_is_devmem_iov(const struct net_iov *niov)
363 {
364 	return false;
365 }
366 #endif
367 
368 void __get_netmem(netmem_ref netmem);
369 void __put_netmem(netmem_ref netmem);
370 
371 static __always_inline void get_netmem(netmem_ref netmem)
372 {
373 	if (netmem_is_net_iov(netmem))
374 		__get_netmem(netmem);
375 	else
376 		get_page(netmem_to_page(netmem));
377 }
378 
379 static __always_inline void put_netmem(netmem_ref netmem)
380 {
381 	if (netmem_is_net_iov(netmem))
382 		__put_netmem(netmem);
383 	else
384 		put_page(netmem_to_page(netmem));
385 }
386 
387 #define netmem_dma_unmap_addr_set(NETMEM, PTR, ADDR_NAME, VAL)   \
388 	do {                                                     \
389 		if (!netmem_is_net_iov(NETMEM))                  \
390 			dma_unmap_addr_set(PTR, ADDR_NAME, VAL); \
391 		else                                             \
392 			dma_unmap_addr_set(PTR, ADDR_NAME, 0);   \
393 	} while (0)
394 
395 static inline void netmem_dma_unmap_page_attrs(struct device *dev,
396 					       dma_addr_t addr, size_t size,
397 					       enum dma_data_direction dir,
398 					       unsigned long attrs)
399 {
400 	if (!addr)
401 		return;
402 
403 	dma_unmap_page_attrs(dev, addr, size, dir, attrs);
404 }
405 
406 #endif /* _NET_NETMEM_H */
407