xref: /linux/net/core/devmem.h (revision 06a130e42a5bfc84795464bff023bff4c16f58c5)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Device memory TCP support
4  *
5  * Authors:	Mina Almasry <almasrymina@google.com>
6  *		Willem de Bruijn <willemb@google.com>
7  *		Kaiyuan Zhang <kaiyuanz@google.com>
8  *
9  */
10 #ifndef _NET_DEVMEM_H
11 #define _NET_DEVMEM_H
12 
13 struct netlink_ext_ack;
14 
15 struct net_devmem_dmabuf_binding {
16 	struct dma_buf *dmabuf;
17 	struct dma_buf_attachment *attachment;
18 	struct sg_table *sgt;
19 	struct net_device *dev;
20 	struct gen_pool *chunk_pool;
21 
22 	/* The user holds a ref (via the netlink API) for as long as they want
23 	 * the binding to remain alive. Each page pool using this binding holds
24 	 * a ref to keep the binding alive. Each allocated net_iov holds a
25 	 * ref.
26 	 *
27 	 * The binding undos itself and unmaps the underlying dmabuf once all
28 	 * those refs are dropped and the binding is no longer desired or in
29 	 * use.
30 	 */
31 	refcount_t ref;
32 
33 	/* The list of bindings currently active. Used for netlink to notify us
34 	 * of the user dropping the bind.
35 	 */
36 	struct list_head list;
37 
38 	/* rxq's this binding is active on. */
39 	struct xarray bound_rxqs;
40 
41 	/* ID of this binding. Globally unique to all bindings currently
42 	 * active.
43 	 */
44 	u32 id;
45 };
46 
47 #if defined(CONFIG_NET_DEVMEM)
48 /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
49  * entry from the dmabuf is inserted into the genpool as a chunk, and needs
50  * this owner struct to keep track of some metadata necessary to create
51  * allocations from this chunk.
52  */
53 struct dmabuf_genpool_chunk_owner {
54 	/* Offset into the dma-buf where this chunk starts.  */
55 	unsigned long base_virtual;
56 
57 	/* dma_addr of the start of the chunk.  */
58 	dma_addr_t base_dma_addr;
59 
60 	/* Array of net_iovs for this chunk. */
61 	struct net_iov *niovs;
62 	size_t num_niovs;
63 
64 	struct net_devmem_dmabuf_binding *binding;
65 };
66 
67 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
68 struct net_devmem_dmabuf_binding *
69 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
70 		       struct netlink_ext_ack *extack);
71 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
72 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
73 				    struct net_devmem_dmabuf_binding *binding,
74 				    struct netlink_ext_ack *extack);
75 void dev_dmabuf_uninstall(struct net_device *dev);
76 
77 static inline struct dmabuf_genpool_chunk_owner *
78 net_iov_owner(const struct net_iov *niov)
79 {
80 	return niov->owner;
81 }
82 
83 static inline unsigned int net_iov_idx(const struct net_iov *niov)
84 {
85 	return niov - net_iov_owner(niov)->niovs;
86 }
87 
88 static inline struct net_devmem_dmabuf_binding *
89 net_iov_binding(const struct net_iov *niov)
90 {
91 	return net_iov_owner(niov)->binding;
92 }
93 
94 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
95 {
96 	struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
97 
98 	return owner->base_virtual +
99 	       ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
100 }
101 
102 static inline u32 net_iov_binding_id(const struct net_iov *niov)
103 {
104 	return net_iov_owner(niov)->binding->id;
105 }
106 
107 static inline void
108 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
109 {
110 	refcount_inc(&binding->ref);
111 }
112 
113 static inline void
114 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
115 {
116 	if (!refcount_dec_and_test(&binding->ref))
117 		return;
118 
119 	__net_devmem_dmabuf_binding_free(binding);
120 }
121 
122 struct net_iov *
123 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
124 void net_devmem_free_dmabuf(struct net_iov *ppiov);
125 
126 #else
127 struct net_devmem_dmabuf_binding;
128 
129 static inline void
130 __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
131 {
132 }
133 
134 static inline struct net_devmem_dmabuf_binding *
135 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
136 		       struct netlink_ext_ack *extack)
137 {
138 	return ERR_PTR(-EOPNOTSUPP);
139 }
140 
141 static inline void
142 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
143 {
144 }
145 
146 static inline int
147 net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
148 				struct net_devmem_dmabuf_binding *binding,
149 				struct netlink_ext_ack *extack)
150 
151 {
152 	return -EOPNOTSUPP;
153 }
154 
155 static inline void dev_dmabuf_uninstall(struct net_device *dev)
156 {
157 }
158 
159 static inline struct net_iov *
160 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
161 {
162 	return NULL;
163 }
164 
165 static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
166 {
167 }
168 
169 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
170 {
171 	return 0;
172 }
173 
174 static inline u32 net_iov_binding_id(const struct net_iov *niov)
175 {
176 	return 0;
177 }
178 #endif
179 
180 #endif /* _NET_DEVMEM_H */
181