1 /* SPDX-License-Identifier: GPL-2.0
2 *
3 * Network memory
4 *
5 * Author: Mina Almasry <almasrymina@google.com>
6 */
7
8 #ifndef _NET_NETMEM_H
9 #define _NET_NETMEM_H
10
11 #include <linux/mm.h>
12 #include <net/net_debug.h>
13
14 /* net_iov */
15
16 DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
17
18 /* We overload the LSB of the struct page pointer to indicate whether it's
19 * a page or net_iov.
20 */
21 #define NET_IOV 0x01UL
22
23 struct net_iov {
24 unsigned long __unused_padding;
25 unsigned long pp_magic;
26 struct page_pool *pp;
27 struct net_iov_area *owner;
28 unsigned long dma_addr;
29 atomic_long_t pp_ref_count;
30 };
31
32 struct net_iov_area {
33 /* Array of net_iovs for this area. */
34 struct net_iov *niovs;
35 size_t num_niovs;
36
37 /* Offset into the dma-buf where this chunk starts. */
38 unsigned long base_virtual;
39 };
40
41 /* These fields in struct page are used by the page_pool and net stack:
42 *
43 * struct {
44 * unsigned long pp_magic;
45 * struct page_pool *pp;
46 * unsigned long _pp_mapping_pad;
47 * unsigned long dma_addr;
48 * atomic_long_t pp_ref_count;
49 * };
50 *
51 * We mirror the page_pool fields here so the page_pool can access these fields
52 * without worrying whether the underlying fields belong to a page or net_iov.
53 *
54 * The non-net stack fields of struct page are private to the mm stack and must
55 * never be mirrored to net_iov.
56 */
57 #define NET_IOV_ASSERT_OFFSET(pg, iov) \
58 static_assert(offsetof(struct page, pg) == \
59 offsetof(struct net_iov, iov))
60 NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
61 NET_IOV_ASSERT_OFFSET(pp, pp);
62 NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
63 NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
64 #undef NET_IOV_ASSERT_OFFSET
65
net_iov_owner(const struct net_iov * niov)66 static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov)
67 {
68 return niov->owner;
69 }
70
net_iov_idx(const struct net_iov * niov)71 static inline unsigned int net_iov_idx(const struct net_iov *niov)
72 {
73 return niov - net_iov_owner(niov)->niovs;
74 }
75
76 /* netmem */
77
78 /**
79 * typedef netmem_ref - a nonexistent type marking a reference to generic
80 * network memory.
81 *
82 * A netmem_ref currently is always a reference to a struct page. This
83 * abstraction is introduced so support for new memory types can be added.
84 *
85 * Use the supplied helpers to obtain the underlying memory pointer and fields.
86 */
87 typedef unsigned long __bitwise netmem_ref;
88
netmem_is_net_iov(const netmem_ref netmem)89 static inline bool netmem_is_net_iov(const netmem_ref netmem)
90 {
91 return (__force unsigned long)netmem & NET_IOV;
92 }
93
94 /**
95 * __netmem_to_page - unsafely get pointer to the &page backing @netmem
96 * @netmem: netmem reference to convert
97 *
98 * Unsafe version of netmem_to_page(). When @netmem is always page-backed,
99 * e.g. when it's a header buffer, performs faster and generates smaller
100 * object code (no check for the LSB, no WARN). When @netmem points to IOV,
101 * provokes undefined behaviour.
102 *
103 * Return: pointer to the &page (garbage if @netmem is not page-backed).
104 */
__netmem_to_page(netmem_ref netmem)105 static inline struct page *__netmem_to_page(netmem_ref netmem)
106 {
107 return (__force struct page *)netmem;
108 }
109
110 /* This conversion fails (returns NULL) if the netmem_ref is not struct page
111 * backed.
112 */
netmem_to_page(netmem_ref netmem)113 static inline struct page *netmem_to_page(netmem_ref netmem)
114 {
115 if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
116 return NULL;
117
118 return __netmem_to_page(netmem);
119 }
120
netmem_to_net_iov(netmem_ref netmem)121 static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem)
122 {
123 if (netmem_is_net_iov(netmem))
124 return (struct net_iov *)((__force unsigned long)netmem &
125 ~NET_IOV);
126
127 DEBUG_NET_WARN_ON_ONCE(true);
128 return NULL;
129 }
130
net_iov_to_netmem(struct net_iov * niov)131 static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
132 {
133 return (__force netmem_ref)((unsigned long)niov | NET_IOV);
134 }
135
page_to_netmem(struct page * page)136 static inline netmem_ref page_to_netmem(struct page *page)
137 {
138 return (__force netmem_ref)page;
139 }
140
141 /**
142 * virt_to_netmem - convert virtual memory pointer to a netmem reference
143 * @data: host memory pointer to convert
144 *
145 * Return: netmem reference to the &page backing this virtual address.
146 */
virt_to_netmem(const void * data)147 static inline netmem_ref virt_to_netmem(const void *data)
148 {
149 return page_to_netmem(virt_to_page(data));
150 }
151
netmem_ref_count(netmem_ref netmem)152 static inline int netmem_ref_count(netmem_ref netmem)
153 {
154 /* The non-pp refcount of net_iov is always 1. On net_iov, we only
155 * support pp refcounting which uses the pp_ref_count field.
156 */
157 if (netmem_is_net_iov(netmem))
158 return 1;
159
160 return page_ref_count(netmem_to_page(netmem));
161 }
162
netmem_pfn_trace(netmem_ref netmem)163 static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
164 {
165 if (netmem_is_net_iov(netmem))
166 return 0;
167
168 return page_to_pfn(netmem_to_page(netmem));
169 }
170
__netmem_clear_lsb(netmem_ref netmem)171 static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
172 {
173 return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
174 }
175
176 /**
177 * __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem
178 * @netmem: netmem reference to get the pointer from
179 *
180 * Unsafe version of netmem_get_pp(). When @netmem is always page-backed,
181 * e.g. when it's a header buffer, performs faster and generates smaller
182 * object code (avoids clearing the LSB). When @netmem points to IOV,
183 * provokes invalid memory access.
184 *
185 * Return: pointer to the &page_pool (garbage if @netmem is not page-backed).
186 */
__netmem_get_pp(netmem_ref netmem)187 static inline struct page_pool *__netmem_get_pp(netmem_ref netmem)
188 {
189 return __netmem_to_page(netmem)->pp;
190 }
191
netmem_get_pp(netmem_ref netmem)192 static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
193 {
194 return __netmem_clear_lsb(netmem)->pp;
195 }
196
netmem_get_pp_ref_count_ref(netmem_ref netmem)197 static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem)
198 {
199 return &__netmem_clear_lsb(netmem)->pp_ref_count;
200 }
201
netmem_is_pref_nid(netmem_ref netmem,int pref_nid)202 static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid)
203 {
204 /* NUMA node preference only makes sense if we're allocating
205 * system memory. Memory providers (which give us net_iovs)
206 * choose for us.
207 */
208 if (netmem_is_net_iov(netmem))
209 return true;
210
211 return page_to_nid(netmem_to_page(netmem)) == pref_nid;
212 }
213
netmem_compound_head(netmem_ref netmem)214 static inline netmem_ref netmem_compound_head(netmem_ref netmem)
215 {
216 /* niov are never compounded */
217 if (netmem_is_net_iov(netmem))
218 return netmem;
219
220 return page_to_netmem(compound_head(netmem_to_page(netmem)));
221 }
222
223 /**
224 * __netmem_address - unsafely get pointer to the memory backing @netmem
225 * @netmem: netmem reference to get the pointer for
226 *
227 * Unsafe version of netmem_address(). When @netmem is always page-backed,
228 * e.g. when it's a header buffer, performs faster and generates smaller
229 * object code (no check for the LSB). When @netmem points to IOV, provokes
230 * undefined behaviour.
231 *
232 * Return: pointer to the memory (garbage if @netmem is not page-backed).
233 */
__netmem_address(netmem_ref netmem)234 static inline void *__netmem_address(netmem_ref netmem)
235 {
236 return page_address(__netmem_to_page(netmem));
237 }
238
netmem_address(netmem_ref netmem)239 static inline void *netmem_address(netmem_ref netmem)
240 {
241 if (netmem_is_net_iov(netmem))
242 return NULL;
243
244 return __netmem_address(netmem);
245 }
246
247 /**
248 * netmem_is_pfmemalloc - check if @netmem was allocated under memory pressure
249 * @netmem: netmem reference to check
250 *
251 * Return: true if @netmem is page-backed and the page was allocated under
252 * memory pressure, false otherwise.
253 */
netmem_is_pfmemalloc(netmem_ref netmem)254 static inline bool netmem_is_pfmemalloc(netmem_ref netmem)
255 {
256 if (netmem_is_net_iov(netmem))
257 return false;
258
259 return page_is_pfmemalloc(netmem_to_page(netmem));
260 }
261
netmem_get_dma_addr(netmem_ref netmem)262 static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
263 {
264 return __netmem_clear_lsb(netmem)->dma_addr;
265 }
266
267 #endif /* _NET_NETMEM_H */
268