xref: /linux/include/net/netmem.h (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  *	Network memory
4  *
5  *	Author:	Mina Almasry <almasrymina@google.com>
6  */
7 
8 #ifndef _NET_NETMEM_H
9 #define _NET_NETMEM_H
10 
11 #include <linux/dma-mapping.h>
12 #include <linux/mm.h>
13 #include <net/net_debug.h>
14 
15 /* These fields in struct page are used by the page_pool and net stack:
16  *
17  *        struct {
18  *                unsigned long pp_magic;
19  *                struct page_pool *pp;
20  *                unsigned long _pp_mapping_pad;
21  *                unsigned long dma_addr;
22  *                atomic_long_t pp_ref_count;
23  *        };
24  *
25  * We mirror the page_pool fields here so the page_pool can access these
26  * fields without worrying whether the underlying fields belong to a
27  * page or netmem_desc.
28  *
29  * CAUTION: Do not update the fields in netmem_desc without also
30  * updating the anonymous aliasing union in struct net_iov.
31  */
32 struct netmem_desc {
33 	unsigned long _flags;
34 	unsigned long pp_magic;
35 	struct page_pool *pp;
36 	unsigned long _pp_mapping_pad;
37 	unsigned long dma_addr;
38 	atomic_long_t pp_ref_count;
39 };
40 
41 #define NETMEM_DESC_ASSERT_OFFSET(pg, desc)        \
42 	static_assert(offsetof(struct page, pg) == \
43 		      offsetof(struct netmem_desc, desc))
44 NETMEM_DESC_ASSERT_OFFSET(flags, _flags);
45 NETMEM_DESC_ASSERT_OFFSET(pp_magic, pp_magic);
46 NETMEM_DESC_ASSERT_OFFSET(pp, pp);
47 NETMEM_DESC_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
48 NETMEM_DESC_ASSERT_OFFSET(dma_addr, dma_addr);
49 NETMEM_DESC_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
50 #undef NETMEM_DESC_ASSERT_OFFSET
51 
52 /*
53  * Since struct netmem_desc uses the space in struct page, the size
54  * should be checked, until struct netmem_desc has its own instance from
55  * slab, to avoid conflicting with other members within struct page.
56  */
57 static_assert(sizeof(struct netmem_desc) <= offsetof(struct page, _refcount));
58 
59 /* net_iov */
60 
61 DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
62 
63 /*  We overload the LSB of the struct page pointer to indicate whether it's
64  *  a page or net_iov.
65  */
66 #define NET_IOV 0x01UL
67 
68 enum net_iov_type {
69 	NET_IOV_DMABUF,
70 	NET_IOV_IOURING,
71 };
72 
73 /* A memory descriptor representing abstract networking I/O vectors,
74  * generally for non-pages memory that doesn't have its corresponding
75  * struct page and needs to be explicitly allocated through slab.
76  *
77  * net_iovs are allocated and used by networking code, and the size of
78  * the chunk is PAGE_SIZE.
79  *
80  * This memory can be any form of non-struct paged memory.  Examples
81  * include imported dmabuf memory and imported io_uring memory.  See
82  * net_iov_type for all the supported types.
83  *
84  * @pp_magic:	pp field, similar to the one in struct page/struct
85  *		netmem_desc.
86  * @pp:		the pp this net_iov belongs to, if any.
87  * @dma_addr:	the dma addrs of the net_iov. Needed for the network
88  *		card to send/receive this net_iov.
89  * @pp_ref_count: the pp ref count of this net_iov, exactly the same
90  *		usage as struct page/struct netmem_desc.
91  * @owner:	the net_iov_area this net_iov belongs to, if any.
92  * @type:	the type of the memory.  Different types of net_iovs are
93  *		supported.
94  */
95 struct net_iov {
96 	union {
97 		struct netmem_desc desc;
98 
99 		/* XXX: The following part should be removed once all
100 		 * the references to them are converted so as to be
101 		 * accessed via netmem_desc e.g. niov->desc.pp instead
102 		 * of niov->pp.
103 		 */
104 		struct {
105 			unsigned long _flags;
106 			unsigned long pp_magic;
107 			struct page_pool *pp;
108 			unsigned long _pp_mapping_pad;
109 			unsigned long dma_addr;
110 			atomic_long_t pp_ref_count;
111 		};
112 	};
113 	struct net_iov_area *owner;
114 	enum net_iov_type type;
115 };
116 
117 struct net_iov_area {
118 	/* Array of net_iovs for this area. */
119 	struct net_iov *niovs;
120 	size_t num_niovs;
121 
122 	/* Offset into the dma-buf where this chunk starts.  */
123 	unsigned long base_virtual;
124 };
125 
126 /* net_iov is union'ed with struct netmem_desc mirroring struct page, so
127  * the page_pool can access these fields without worrying whether the
128  * underlying fields are accessed via netmem_desc or directly via
129  * net_iov, until all the references to them are converted so as to be
130  * accessed via netmem_desc e.g. niov->desc.pp instead of niov->pp.
131  *
132  * The non-net stack fields of struct page are private to the mm stack
133  * and must never be mirrored to net_iov.
134  */
135 #define NET_IOV_ASSERT_OFFSET(desc, iov)                    \
136 	static_assert(offsetof(struct netmem_desc, desc) == \
137 		      offsetof(struct net_iov, iov))
138 NET_IOV_ASSERT_OFFSET(_flags, _flags);
139 NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
140 NET_IOV_ASSERT_OFFSET(pp, pp);
141 NET_IOV_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
142 NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
143 NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
144 #undef NET_IOV_ASSERT_OFFSET
145 
146 static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov)
147 {
148 	return niov->owner;
149 }
150 
151 static inline unsigned int net_iov_idx(const struct net_iov *niov)
152 {
153 	return niov - net_iov_owner(niov)->niovs;
154 }
155 
156 /* netmem */
157 
158 /**
159  * typedef netmem_ref - a nonexistent type marking a reference to generic
160  * network memory.
161  *
162  * A netmem_ref can be a struct page* or a struct net_iov* underneath.
163  *
164  * Use the supplied helpers to obtain the underlying memory pointer and fields.
165  */
166 typedef unsigned long __bitwise netmem_ref;
167 
168 static inline bool netmem_is_net_iov(const netmem_ref netmem)
169 {
170 	return (__force unsigned long)netmem & NET_IOV;
171 }
172 
173 /**
174  * __netmem_to_page - unsafely get pointer to the &page backing @netmem
175  * @netmem: netmem reference to convert
176  *
177  * Unsafe version of netmem_to_page(). When @netmem is always page-backed,
178  * e.g. when it's a header buffer, performs faster and generates smaller
179  * object code (no check for the LSB, no WARN). When @netmem points to IOV,
180  * provokes undefined behaviour.
181  *
182  * Return: pointer to the &page (garbage if @netmem is not page-backed).
183  */
184 static inline struct page *__netmem_to_page(netmem_ref netmem)
185 {
186 	return (__force struct page *)netmem;
187 }
188 
189 static inline struct page *netmem_to_page(netmem_ref netmem)
190 {
191 	if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
192 		return NULL;
193 
194 	return __netmem_to_page(netmem);
195 }
196 
197 static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem)
198 {
199 	if (netmem_is_net_iov(netmem))
200 		return (struct net_iov *)((__force unsigned long)netmem &
201 					  ~NET_IOV);
202 
203 	DEBUG_NET_WARN_ON_ONCE(true);
204 	return NULL;
205 }
206 
207 static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
208 {
209 	return (__force netmem_ref)((unsigned long)niov | NET_IOV);
210 }
211 
212 #define page_to_netmem(p)	(_Generic((p),			\
213 	const struct page * :	(__force const netmem_ref)(p),	\
214 	struct page * :		(__force netmem_ref)(p)))
215 
216 /**
217  * virt_to_netmem - convert virtual memory pointer to a netmem reference
218  * @data: host memory pointer to convert
219  *
220  * Return: netmem reference to the &page backing this virtual address.
221  */
222 static inline netmem_ref virt_to_netmem(const void *data)
223 {
224 	return page_to_netmem(virt_to_page(data));
225 }
226 
227 static inline int netmem_ref_count(netmem_ref netmem)
228 {
229 	/* The non-pp refcount of net_iov is always 1. On net_iov, we only
230 	 * support pp refcounting which uses the pp_ref_count field.
231 	 */
232 	if (netmem_is_net_iov(netmem))
233 		return 1;
234 
235 	return page_ref_count(netmem_to_page(netmem));
236 }
237 
238 static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
239 {
240 	if (netmem_is_net_iov(netmem))
241 		return 0;
242 
243 	return page_to_pfn(netmem_to_page(netmem));
244 }
245 
246 /* XXX: How to extract netmem_desc from page must be changed, once
247  * netmem_desc no longer overlays on page and will be allocated through
248  * slab.
249  */
250 #define __pp_page_to_nmdesc(p)	(_Generic((p),				\
251 	const struct page * :	(const struct netmem_desc *)(p),	\
252 	struct page * :		(struct netmem_desc *)(p)))
253 
254 /* CAUTION: Check if the page is a pp page before calling this helper or
255  * know it's a pp page.
256  */
257 #define pp_page_to_nmdesc(p)						\
258 ({									\
259 	DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p));		\
260 	__pp_page_to_nmdesc(p);						\
261 })
262 
263 /**
264  * __netmem_to_nmdesc - unsafely get pointer to the &netmem_desc backing
265  * @netmem
266  * @netmem: netmem reference to convert
267  *
268  * Unsafe version that can be used only when @netmem is always backed by
269  * system memory, performs faster and generates smaller object code (no
270  * check for the LSB, no WARN). When @netmem points to IOV, provokes
271  * undefined behaviour.
272  *
273  * Return: pointer to the &netmem_desc (garbage if @netmem is not backed
274  * by system memory).
275  */
276 static inline struct netmem_desc *__netmem_to_nmdesc(netmem_ref netmem)
277 {
278 	return (__force struct netmem_desc *)netmem;
279 }
280 
281 /* netmem_to_nmdesc - convert netmem_ref to struct netmem_desc * for
282  * access to common fields.
283  * @netmem: netmem reference to get netmem_desc.
284  *
285  * All the sub types of netmem_ref (netmem_desc, net_iov) have the same
286  * pp, pp_magic, dma_addr, and pp_ref_count fields via netmem_desc.
287  *
288  * Return: the pointer to struct netmem_desc * regardless of its
289  * underlying type.
290  */
291 static inline struct netmem_desc *netmem_to_nmdesc(netmem_ref netmem)
292 {
293 	void *p = (void *)((__force unsigned long)netmem & ~NET_IOV);
294 
295 	if (netmem_is_net_iov(netmem))
296 		return &((struct net_iov *)p)->desc;
297 
298 	return __pp_page_to_nmdesc((struct page *)p);
299 }
300 
301 /**
302  * __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem
303  * @netmem: netmem reference to get the pointer from
304  *
305  * Unsafe version of netmem_get_pp(). When @netmem is always page-backed,
306  * e.g. when it's a header buffer, performs faster and generates smaller
307  * object code (avoids clearing the LSB). When @netmem points to IOV,
308  * provokes invalid memory access.
309  *
310  * Return: pointer to the &page_pool (garbage if @netmem is not page-backed).
311  */
312 static inline struct page_pool *__netmem_get_pp(netmem_ref netmem)
313 {
314 	return __netmem_to_nmdesc(netmem)->pp;
315 }
316 
317 static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
318 {
319 	return netmem_to_nmdesc(netmem)->pp;
320 }
321 
322 static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem)
323 {
324 	return &netmem_to_nmdesc(netmem)->pp_ref_count;
325 }
326 
327 static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid)
328 {
329 	/* NUMA node preference only makes sense if we're allocating
330 	 * system memory. Memory providers (which give us net_iovs)
331 	 * choose for us.
332 	 */
333 	if (netmem_is_net_iov(netmem))
334 		return true;
335 
336 	return page_to_nid(netmem_to_page(netmem)) == pref_nid;
337 }
338 
339 static inline netmem_ref netmem_compound_head(netmem_ref netmem)
340 {
341 	/* niov are never compounded */
342 	if (netmem_is_net_iov(netmem))
343 		return netmem;
344 
345 	return page_to_netmem(compound_head(netmem_to_page(netmem)));
346 }
347 
348 /**
349  * __netmem_address - unsafely get pointer to the memory backing @netmem
350  * @netmem: netmem reference to get the pointer for
351  *
352  * Unsafe version of netmem_address(). When @netmem is always page-backed,
353  * e.g. when it's a header buffer, performs faster and generates smaller
354  * object code (no check for the LSB). When @netmem points to IOV, provokes
355  * undefined behaviour.
356  *
357  * Return: pointer to the memory (garbage if @netmem is not page-backed).
358  */
359 static inline void *__netmem_address(netmem_ref netmem)
360 {
361 	return page_address(__netmem_to_page(netmem));
362 }
363 
364 static inline void *netmem_address(netmem_ref netmem)
365 {
366 	if (netmem_is_net_iov(netmem))
367 		return NULL;
368 
369 	return __netmem_address(netmem);
370 }
371 
372 /**
373  * netmem_is_pfmemalloc - check if @netmem was allocated under memory pressure
374  * @netmem: netmem reference to check
375  *
376  * Return: true if @netmem is page-backed and the page was allocated under
377  * memory pressure, false otherwise.
378  */
379 static inline bool netmem_is_pfmemalloc(netmem_ref netmem)
380 {
381 	if (netmem_is_net_iov(netmem))
382 		return false;
383 
384 	return page_is_pfmemalloc(netmem_to_page(netmem));
385 }
386 
387 static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
388 {
389 	return netmem_to_nmdesc(netmem)->dma_addr;
390 }
391 
392 void get_netmem(netmem_ref netmem);
393 void put_netmem(netmem_ref netmem);
394 
395 #define netmem_dma_unmap_addr_set(NETMEM, PTR, ADDR_NAME, VAL)   \
396 	do {                                                     \
397 		if (!netmem_is_net_iov(NETMEM))                  \
398 			dma_unmap_addr_set(PTR, ADDR_NAME, VAL); \
399 		else                                             \
400 			dma_unmap_addr_set(PTR, ADDR_NAME, 0);   \
401 	} while (0)
402 
403 static inline void netmem_dma_unmap_page_attrs(struct device *dev,
404 					       dma_addr_t addr, size_t size,
405 					       enum dma_data_direction dir,
406 					       unsigned long attrs)
407 {
408 	if (!addr)
409 		return;
410 
411 	dma_unmap_page_attrs(dev, addr, size, dir, attrs);
412 }
413 
414 #endif /* _NET_NETMEM_H */
415