xref: /linux/net/core/devmem.h (revision 0d161eb27d69ceb371b3409184a1bb69d3c83de3)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Device memory TCP support
4  *
5  * Authors:	Mina Almasry <almasrymina@google.com>
6  *		Willem de Bruijn <willemb@google.com>
7  *		Kaiyuan Zhang <kaiyuanz@google.com>
8  *
9  */
10 #ifndef _NET_DEVMEM_H
11 #define _NET_DEVMEM_H
12 
13 #include <net/netmem.h>
14 
15 struct netlink_ext_ack;
16 
17 struct net_devmem_dmabuf_binding {
18 	struct dma_buf *dmabuf;
19 	struct dma_buf_attachment *attachment;
20 	struct sg_table *sgt;
21 	struct net_device *dev;
22 	struct gen_pool *chunk_pool;
23 
24 	/* The user holds a ref (via the netlink API) for as long as they want
25 	 * the binding to remain alive. Each page pool using this binding holds
26 	 * a ref to keep the binding alive. The page_pool does not release the
27 	 * ref until all the net_iovs allocated from this binding are released
28 	 * back to the page_pool.
29 	 *
30 	 * The binding undos itself and unmaps the underlying dmabuf once all
31 	 * those refs are dropped and the binding is no longer desired or in
32 	 * use.
33 	 *
34 	 * net_devmem_get_net_iov() on dmabuf net_iovs will increment this
35 	 * reference, making sure that the binding remains alive until all the
36 	 * net_iovs are no longer used. net_iovs allocated from this binding
37 	 * that are stuck in the TX path for any reason (such as awaiting
38 	 * retransmits) hold a reference to the binding until the skb holding
39 	 * them is freed.
40 	 */
41 	refcount_t ref;
42 
43 	/* The list of bindings currently active. Used for netlink to notify us
44 	 * of the user dropping the bind.
45 	 */
46 	struct list_head list;
47 
48 	/* rxq's this binding is active on. */
49 	struct xarray bound_rxqs;
50 
51 	/* ID of this binding. Globally unique to all bindings currently
52 	 * active.
53 	 */
54 	u32 id;
55 
56 	/* Array of net_iov pointers for this binding, sorted by virtual
57 	 * address. This array is convenient to map the virtual addresses to
58 	 * net_iovs in the TX path.
59 	 */
60 	struct net_iov **tx_vec;
61 
62 	struct work_struct unbind_w;
63 };
64 
65 #if defined(CONFIG_NET_DEVMEM)
66 /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
67  * entry from the dmabuf is inserted into the genpool as a chunk, and needs
68  * this owner struct to keep track of some metadata necessary to create
69  * allocations from this chunk.
70  */
71 struct dmabuf_genpool_chunk_owner {
72 	struct net_iov_area area;
73 	struct net_devmem_dmabuf_binding *binding;
74 
75 	/* dma_addr of the start of the chunk.  */
76 	dma_addr_t base_dma_addr;
77 };
78 
79 void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
80 struct net_devmem_dmabuf_binding *
81 net_devmem_bind_dmabuf(struct net_device *dev,
82 		       enum dma_data_direction direction,
83 		       unsigned int dmabuf_fd, struct netlink_ext_ack *extack);
84 struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id);
85 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
86 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
87 				    struct net_devmem_dmabuf_binding *binding,
88 				    struct netlink_ext_ack *extack);
89 void net_devmem_bind_tx_release(struct sock *sk);
90 
91 static inline struct dmabuf_genpool_chunk_owner *
92 net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
93 {
94 	struct net_iov_area *owner = net_iov_owner(niov);
95 
96 	return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
97 }
98 
99 static inline struct net_devmem_dmabuf_binding *
100 net_devmem_iov_binding(const struct net_iov *niov)
101 {
102 	return net_devmem_iov_to_chunk_owner(niov)->binding;
103 }
104 
105 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
106 {
107 	return net_devmem_iov_binding(niov)->id;
108 }
109 
110 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
111 {
112 	struct net_iov_area *owner = net_iov_owner(niov);
113 
114 	return owner->base_virtual +
115 	       ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
116 }
117 
118 static inline bool
119 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
120 {
121 	return refcount_inc_not_zero(&binding->ref);
122 }
123 
124 static inline void
125 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
126 {
127 	if (!refcount_dec_and_test(&binding->ref))
128 		return;
129 
130 	INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free);
131 	schedule_work(&binding->unbind_w);
132 }
133 
134 void net_devmem_get_net_iov(struct net_iov *niov);
135 void net_devmem_put_net_iov(struct net_iov *niov);
136 
137 struct net_iov *
138 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
139 void net_devmem_free_dmabuf(struct net_iov *ppiov);
140 
141 bool net_is_devmem_iov(struct net_iov *niov);
142 struct net_devmem_dmabuf_binding *
143 net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id);
144 struct net_iov *
145 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
146 		       size_t *off, size_t *size);
147 
148 #else
149 struct net_devmem_dmabuf_binding;
150 
151 static inline void
152 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
153 {
154 }
155 
156 static inline void net_devmem_get_net_iov(struct net_iov *niov)
157 {
158 }
159 
160 static inline void net_devmem_put_net_iov(struct net_iov *niov)
161 {
162 }
163 
164 static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
165 {
166 }
167 
168 static inline struct net_devmem_dmabuf_binding *
169 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
170 		       enum dma_data_direction direction,
171 		       struct netlink_ext_ack *extack)
172 {
173 	return ERR_PTR(-EOPNOTSUPP);
174 }
175 
176 static inline struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
177 {
178 	return NULL;
179 }
180 
181 static inline void
182 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
183 {
184 }
185 
186 static inline int
187 net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
188 				struct net_devmem_dmabuf_binding *binding,
189 				struct netlink_ext_ack *extack)
190 
191 {
192 	return -EOPNOTSUPP;
193 }
194 
195 static inline struct net_iov *
196 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
197 {
198 	return NULL;
199 }
200 
201 static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
202 {
203 }
204 
205 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
206 {
207 	return 0;
208 }
209 
210 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
211 {
212 	return 0;
213 }
214 
215 static inline bool net_is_devmem_iov(struct net_iov *niov)
216 {
217 	return false;
218 }
219 
220 static inline struct net_devmem_dmabuf_binding *
221 net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id)
222 {
223 	return ERR_PTR(-EOPNOTSUPP);
224 }
225 
226 static inline struct net_iov *
227 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
228 		       size_t *off, size_t *size)
229 {
230 	return NULL;
231 }
232 
233 static inline struct net_devmem_dmabuf_binding *
234 net_devmem_iov_binding(const struct net_iov *niov)
235 {
236 	return NULL;
237 }
238 #endif
239 
240 #endif /* _NET_DEVMEM_H */
241