xref: /linux/net/core/devmem.h (revision cfc4ca8986bb1f6182da6cd7bb57f228590b4643)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Device memory TCP support
4  *
5  * Authors:	Mina Almasry <almasrymina@google.com>
6  *		Willem de Bruijn <willemb@google.com>
7  *		Kaiyuan Zhang <kaiyuanz@google.com>
8  *
9  */
10 #ifndef _NET_DEVMEM_H
11 #define _NET_DEVMEM_H
12 
13 #include <net/netmem.h>
14 #include <net/netdev_netlink.h>
15 
16 struct netlink_ext_ack;
17 
18 struct net_devmem_dmabuf_binding {
19 	struct dma_buf *dmabuf;
20 	struct dma_buf_attachment *attachment;
21 	struct sg_table *sgt;
22 	struct net_device *dev;
23 	struct gen_pool *chunk_pool;
24 	/* Protect dev */
25 	struct mutex lock;
26 
27 	/* The user holds a ref (via the netlink API) for as long as they want
28 	 * the binding to remain alive. Each page pool using this binding holds
29 	 * a ref to keep the binding alive. The page_pool does not release the
30 	 * ref until all the net_iovs allocated from this binding are released
31 	 * back to the page_pool.
32 	 *
33 	 * The binding undos itself and unmaps the underlying dmabuf once all
34 	 * those refs are dropped and the binding is no longer desired or in
35 	 * use.
36 	 *
37 	 * net_devmem_get_net_iov() on dmabuf net_iovs will increment this
38 	 * reference, making sure that the binding remains alive until all the
39 	 * net_iovs are no longer used. net_iovs allocated from this binding
40 	 * that are stuck in the TX path for any reason (such as awaiting
41 	 * retransmits) hold a reference to the binding until the skb holding
42 	 * them is freed.
43 	 */
44 	refcount_t ref;
45 
46 	/* The list of bindings currently active. Used for netlink to notify us
47 	 * of the user dropping the bind.
48 	 */
49 	struct list_head list;
50 
51 	/* rxq's this binding is active on. */
52 	struct xarray bound_rxqs;
53 
54 	/* ID of this binding. Globally unique to all bindings currently
55 	 * active.
56 	 */
57 	u32 id;
58 
59 	/* Array of net_iov pointers for this binding, sorted by virtual
60 	 * address. This array is convenient to map the virtual addresses to
61 	 * net_iovs in the TX path.
62 	 */
63 	struct net_iov **tx_vec;
64 
65 	struct work_struct unbind_w;
66 };
67 
68 #if defined(CONFIG_NET_DEVMEM)
69 /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
70  * entry from the dmabuf is inserted into the genpool as a chunk, and needs
71  * this owner struct to keep track of some metadata necessary to create
72  * allocations from this chunk.
73  */
74 struct dmabuf_genpool_chunk_owner {
75 	struct net_iov_area area;
76 	struct net_devmem_dmabuf_binding *binding;
77 
78 	/* dma_addr of the start of the chunk.  */
79 	dma_addr_t base_dma_addr;
80 };
81 
82 void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
83 struct net_devmem_dmabuf_binding *
84 net_devmem_bind_dmabuf(struct net_device *dev,
85 		       enum dma_data_direction direction,
86 		       unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
87 		       struct netlink_ext_ack *extack);
88 struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id);
89 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
90 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
91 				    struct net_devmem_dmabuf_binding *binding,
92 				    struct netlink_ext_ack *extack);
93 void net_devmem_bind_tx_release(struct sock *sk);
94 
95 static inline struct dmabuf_genpool_chunk_owner *
96 net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
97 {
98 	struct net_iov_area *owner = net_iov_owner(niov);
99 
100 	return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
101 }
102 
103 static inline struct net_devmem_dmabuf_binding *
104 net_devmem_iov_binding(const struct net_iov *niov)
105 {
106 	return net_devmem_iov_to_chunk_owner(niov)->binding;
107 }
108 
109 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
110 {
111 	return net_devmem_iov_binding(niov)->id;
112 }
113 
114 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
115 {
116 	struct net_iov_area *owner = net_iov_owner(niov);
117 
118 	return owner->base_virtual +
119 	       ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
120 }
121 
122 static inline bool
123 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
124 {
125 	return refcount_inc_not_zero(&binding->ref);
126 }
127 
128 static inline void
129 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
130 {
131 	if (!refcount_dec_and_test(&binding->ref))
132 		return;
133 
134 	INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free);
135 	schedule_work(&binding->unbind_w);
136 }
137 
138 void net_devmem_get_net_iov(struct net_iov *niov);
139 void net_devmem_put_net_iov(struct net_iov *niov);
140 
141 struct net_iov *
142 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
143 void net_devmem_free_dmabuf(struct net_iov *ppiov);
144 
145 bool net_is_devmem_iov(struct net_iov *niov);
146 struct net_devmem_dmabuf_binding *
147 net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id);
148 struct net_iov *
149 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
150 		       size_t *off, size_t *size);
151 
152 #else
153 struct net_devmem_dmabuf_binding;
154 
155 static inline void
156 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
157 {
158 }
159 
160 static inline void net_devmem_get_net_iov(struct net_iov *niov)
161 {
162 }
163 
164 static inline void net_devmem_put_net_iov(struct net_iov *niov)
165 {
166 }
167 
168 static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
169 {
170 }
171 
172 static inline struct net_devmem_dmabuf_binding *
173 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
174 		       enum dma_data_direction direction,
175 		       struct netdev_nl_sock *priv,
176 		       struct netlink_ext_ack *extack)
177 {
178 	return ERR_PTR(-EOPNOTSUPP);
179 }
180 
181 static inline struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
182 {
183 	return NULL;
184 }
185 
186 static inline void
187 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
188 {
189 }
190 
191 static inline int
192 net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
193 				struct net_devmem_dmabuf_binding *binding,
194 				struct netlink_ext_ack *extack)
195 
196 {
197 	return -EOPNOTSUPP;
198 }
199 
200 static inline struct net_iov *
201 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
202 {
203 	return NULL;
204 }
205 
206 static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
207 {
208 }
209 
210 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
211 {
212 	return 0;
213 }
214 
215 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
216 {
217 	return 0;
218 }
219 
220 static inline bool net_is_devmem_iov(struct net_iov *niov)
221 {
222 	return false;
223 }
224 
225 static inline struct net_devmem_dmabuf_binding *
226 net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id)
227 {
228 	return ERR_PTR(-EOPNOTSUPP);
229 }
230 
231 static inline struct net_iov *
232 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
233 		       size_t *off, size_t *size)
234 {
235 	return NULL;
236 }
237 
238 static inline struct net_devmem_dmabuf_binding *
239 net_devmem_iov_binding(const struct net_iov *niov)
240 {
241 	return NULL;
242 }
243 #endif
244 
245 #endif /* _NET_DEVMEM_H */
246