xref: /linux/net/core/devmem.h (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Device memory TCP support
4  *
5  * Authors:	Mina Almasry <almasrymina@google.com>
6  *		Willem de Bruijn <willemb@google.com>
7  *		Kaiyuan Zhang <kaiyuanz@google.com>
8  *
9  */
10 #ifndef _NET_DEVMEM_H
11 #define _NET_DEVMEM_H
12 
13 #include <net/netmem.h>
14 
15 struct netlink_ext_ack;
16 
17 struct net_devmem_dmabuf_binding {
18 	struct dma_buf *dmabuf;
19 	struct dma_buf_attachment *attachment;
20 	struct sg_table *sgt;
21 	struct net_device *dev;
22 	struct gen_pool *chunk_pool;
23 
24 	/* The user holds a ref (via the netlink API) for as long as they want
25 	 * the binding to remain alive. Each page pool using this binding holds
26 	 * a ref to keep the binding alive. Each allocated net_iov holds a
27 	 * ref.
28 	 *
29 	 * The binding undos itself and unmaps the underlying dmabuf once all
30 	 * those refs are dropped and the binding is no longer desired or in
31 	 * use.
32 	 */
33 	refcount_t ref;
34 
35 	/* The list of bindings currently active. Used for netlink to notify us
36 	 * of the user dropping the bind.
37 	 */
38 	struct list_head list;
39 
40 	/* rxq's this binding is active on. */
41 	struct xarray bound_rxqs;
42 
43 	/* ID of this binding. Globally unique to all bindings currently
44 	 * active.
45 	 */
46 	u32 id;
47 };
48 
49 #if defined(CONFIG_NET_DEVMEM)
50 /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
51  * entry from the dmabuf is inserted into the genpool as a chunk, and needs
52  * this owner struct to keep track of some metadata necessary to create
53  * allocations from this chunk.
54  */
55 struct dmabuf_genpool_chunk_owner {
56 	struct net_iov_area area;
57 	struct net_devmem_dmabuf_binding *binding;
58 
59 	/* dma_addr of the start of the chunk.  */
60 	dma_addr_t base_dma_addr;
61 };
62 
63 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
64 struct net_devmem_dmabuf_binding *
65 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
66 		       struct netlink_ext_ack *extack);
67 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
68 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
69 				    struct net_devmem_dmabuf_binding *binding,
70 				    struct netlink_ext_ack *extack);
71 
72 static inline struct dmabuf_genpool_chunk_owner *
net_devmem_iov_to_chunk_owner(const struct net_iov * niov)73 net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
74 {
75 	struct net_iov_area *owner = net_iov_owner(niov);
76 
77 	return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
78 }
79 
80 static inline struct net_devmem_dmabuf_binding *
net_devmem_iov_binding(const struct net_iov * niov)81 net_devmem_iov_binding(const struct net_iov *niov)
82 {
83 	return net_devmem_iov_to_chunk_owner(niov)->binding;
84 }
85 
net_devmem_iov_binding_id(const struct net_iov * niov)86 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
87 {
88 	return net_devmem_iov_binding(niov)->id;
89 }
90 
net_iov_virtual_addr(const struct net_iov * niov)91 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
92 {
93 	struct net_iov_area *owner = net_iov_owner(niov);
94 
95 	return owner->base_virtual +
96 	       ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
97 }
98 
99 static inline void
net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding * binding)100 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
101 {
102 	refcount_inc(&binding->ref);
103 }
104 
105 static inline void
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding * binding)106 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
107 {
108 	if (!refcount_dec_and_test(&binding->ref))
109 		return;
110 
111 	__net_devmem_dmabuf_binding_free(binding);
112 }
113 
114 struct net_iov *
115 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
116 void net_devmem_free_dmabuf(struct net_iov *ppiov);
117 
118 bool net_is_devmem_iov(struct net_iov *niov);
119 
120 #else
121 struct net_devmem_dmabuf_binding;
122 
123 static inline void
__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding * binding)124 __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
125 {
126 }
127 
128 static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device * dev,unsigned int dmabuf_fd,struct netlink_ext_ack * extack)129 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
130 		       struct netlink_ext_ack *extack)
131 {
132 	return ERR_PTR(-EOPNOTSUPP);
133 }
134 
135 static inline void
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding * binding)136 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
137 {
138 }
139 
140 static inline int
net_devmem_bind_dmabuf_to_queue(struct net_device * dev,u32 rxq_idx,struct net_devmem_dmabuf_binding * binding,struct netlink_ext_ack * extack)141 net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
142 				struct net_devmem_dmabuf_binding *binding,
143 				struct netlink_ext_ack *extack)
144 
145 {
146 	return -EOPNOTSUPP;
147 }
148 
149 static inline struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding * binding)150 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
151 {
152 	return NULL;
153 }
154 
net_devmem_free_dmabuf(struct net_iov * ppiov)155 static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
156 {
157 }
158 
net_iov_virtual_addr(const struct net_iov * niov)159 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
160 {
161 	return 0;
162 }
163 
net_devmem_iov_binding_id(const struct net_iov * niov)164 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
165 {
166 	return 0;
167 }
168 
net_is_devmem_iov(struct net_iov * niov)169 static inline bool net_is_devmem_iov(struct net_iov *niov)
170 {
171 	return false;
172 }
173 #endif
174 
175 #endif /* _NET_DEVMEM_H */
176