xref: /linux/include/linux/virtio_vsock.h (revision a69686327e42912e87d1f4be23f54ce1eae4dbd2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_VSOCK_H
3 #define _LINUX_VIRTIO_VSOCK_H
4 
5 #include <uapi/linux/virtio_vsock.h>
6 #include <linux/socket.h>
7 #include <net/sock.h>
8 #include <net/af_vsock.h>
9 
10 #define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr))
11 
12 struct virtio_vsock_skb_cb {
13 	bool reply;
14 	bool tap_delivered;
15 	u32 offset;
16 };
17 
18 #define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
19 
20 static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb)
21 {
22 	return (struct virtio_vsock_hdr *)skb->head;
23 }
24 
25 static inline bool virtio_vsock_skb_reply(struct sk_buff *skb)
26 {
27 	return VIRTIO_VSOCK_SKB_CB(skb)->reply;
28 }
29 
30 static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb)
31 {
32 	VIRTIO_VSOCK_SKB_CB(skb)->reply = true;
33 }
34 
35 static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb)
36 {
37 	return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered;
38 }
39 
40 static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb)
41 {
42 	VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true;
43 }
44 
45 static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
46 {
47 	VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
48 }
49 
50 static inline void virtio_vsock_skb_put(struct sk_buff *skb, u32 len)
51 {
52 	DEBUG_NET_WARN_ON_ONCE(skb->len);
53 
54 	if (skb_is_nonlinear(skb))
55 		skb->len = len;
56 	else
57 		skb_put(skb, len);
58 }
59 
60 static inline struct sk_buff *
61 __virtio_vsock_alloc_skb_with_frags(unsigned int header_len,
62 				    unsigned int data_len,
63 				    gfp_t mask)
64 {
65 	struct sk_buff *skb;
66 	int err;
67 
68 	skb = alloc_skb_with_frags(header_len, data_len,
69 				   PAGE_ALLOC_COSTLY_ORDER, &err, mask);
70 	if (!skb)
71 		return NULL;
72 
73 	skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
74 	skb->data_len = data_len;
75 	return skb;
76 }
77 
78 static inline struct sk_buff *
79 virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask)
80 {
81 	return __virtio_vsock_alloc_skb_with_frags(size, 0, mask);
82 }
83 
84 static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
85 {
86 	if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
87 		return virtio_vsock_alloc_linear_skb(size, mask);
88 
89 	size -= VIRTIO_VSOCK_SKB_HEADROOM;
90 	return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM,
91 						   size, mask);
92 }
93 
94 static inline void
95 virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
96 {
97 	spin_lock_bh(&list->lock);
98 	__skb_queue_head(list, skb);
99 	spin_unlock_bh(&list->lock);
100 }
101 
102 static inline void
103 virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb)
104 {
105 	spin_lock_bh(&list->lock);
106 	__skb_queue_tail(list, skb);
107 	spin_unlock_bh(&list->lock);
108 }
109 
110 static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list)
111 {
112 	struct sk_buff *skb;
113 
114 	spin_lock_bh(&list->lock);
115 	skb = __skb_dequeue(list);
116 	spin_unlock_bh(&list->lock);
117 
118 	return skb;
119 }
120 
121 static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list)
122 {
123 	spin_lock_bh(&list->lock);
124 	__skb_queue_purge(list);
125 	spin_unlock_bh(&list->lock);
126 }
127 
128 static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
129 {
130 	return (size_t)(skb_end_pointer(skb) - skb->head);
131 }
132 
133 /* Dimension the RX SKB so that the entire thing fits exactly into
134  * a single 4KiB page. This avoids wasting memory due to alloc_skb()
135  * rounding up to the next page order and also means that we
136  * don't leave higher-order pages sitting around in the RX queue.
137  */
138 #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE	SKB_WITH_OVERHEAD(1024 * 4)
139 #define VIRTIO_VSOCK_MAX_BUF_SIZE		0xFFFFFFFFUL
140 #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE		(1024 * 64)
141 
142 enum {
143 	VSOCK_VQ_RX     = 0, /* for host to guest data */
144 	VSOCK_VQ_TX     = 1, /* for guest to host data */
145 	VSOCK_VQ_EVENT  = 2,
146 	VSOCK_VQ_MAX    = 3,
147 };
148 
149 /* Per-socket state (accessed via vsk->trans) */
150 struct virtio_vsock_sock {
151 	struct vsock_sock *vsk;
152 
153 	spinlock_t tx_lock;
154 	spinlock_t rx_lock;
155 
156 	/* Protected by tx_lock */
157 	u32 tx_cnt;
158 	u32 peer_fwd_cnt;
159 	u32 peer_buf_alloc;
160 	size_t bytes_unsent;
161 
162 	/* Protected by rx_lock */
163 	u32 fwd_cnt;
164 	u32 last_fwd_cnt;
165 	u32 rx_bytes;
166 	u32 buf_alloc;
167 	u32 buf_used;
168 	struct sk_buff_head rx_queue;
169 	u32 msg_count;
170 };
171 
172 struct virtio_vsock_pkt_info {
173 	u32 remote_cid, remote_port;
174 	struct vsock_sock *vsk;
175 	struct msghdr *msg;
176 	struct net *net;
177 	u32 pkt_len;
178 	u16 type;
179 	u16 op;
180 	u32 flags;
181 	bool reply;
182 };
183 
184 struct virtio_transport {
185 	/* This must be the first field */
186 	struct vsock_transport transport;
187 
188 	/* Takes ownership of the packet */
189 	int (*send_pkt)(struct sk_buff *skb, struct net *net);
190 
191 	/* Used in MSG_ZEROCOPY mode. Checks, that provided data
192 	 * (number of buffers) could be transmitted with zerocopy
193 	 * mode. If this callback is not implemented for the current
194 	 * transport - this means that this transport doesn't need
195 	 * extra checks and can perform zerocopy transmission by
196 	 * default.
197 	 */
198 	bool (*can_msgzerocopy)(int bufs_num);
199 };
200 
201 ssize_t
202 virtio_transport_stream_dequeue(struct vsock_sock *vsk,
203 				struct msghdr *msg,
204 				size_t len,
205 				int type);
206 int
207 virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
208 			       struct msghdr *msg,
209 			       size_t len, int flags);
210 
211 int
212 virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
213 				   struct msghdr *msg,
214 				   size_t len);
215 ssize_t
216 virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
217 				   struct msghdr *msg,
218 				   int flags);
219 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
220 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
221 u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk);
222 
223 ssize_t virtio_transport_unsent_bytes(struct vsock_sock *vsk);
224 
225 void virtio_transport_consume_skb_sent(struct sk_buff *skb,
226 				       bool consume);
227 
228 int virtio_transport_do_socket_init(struct vsock_sock *vsk,
229 				 struct vsock_sock *psk);
230 int
231 virtio_transport_notify_poll_in(struct vsock_sock *vsk,
232 				size_t target,
233 				bool *data_ready_now);
234 int
235 virtio_transport_notify_poll_out(struct vsock_sock *vsk,
236 				 size_t target,
237 				 bool *space_available_now);
238 
239 int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
240 	size_t target, struct vsock_transport_recv_notify_data *data);
241 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
242 	size_t target, struct vsock_transport_recv_notify_data *data);
243 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
244 	size_t target, struct vsock_transport_recv_notify_data *data);
245 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
246 	size_t target, ssize_t copied, bool data_read,
247 	struct vsock_transport_recv_notify_data *data);
248 int virtio_transport_notify_send_init(struct vsock_sock *vsk,
249 	struct vsock_transport_send_notify_data *data);
250 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
251 	struct vsock_transport_send_notify_data *data);
252 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
253 	struct vsock_transport_send_notify_data *data);
254 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
255 	ssize_t written, struct vsock_transport_send_notify_data *data);
256 void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val);
257 
258 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
259 bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
260 bool virtio_transport_stream_allow(struct vsock_sock *vsk, u32 cid, u32 port);
261 int virtio_transport_dgram_bind(struct vsock_sock *vsk,
262 				struct sockaddr_vm *addr);
263 bool virtio_transport_dgram_allow(struct vsock_sock *vsk, u32 cid, u32 port);
264 
265 int virtio_transport_connect(struct vsock_sock *vsk);
266 
267 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode);
268 
269 void virtio_transport_release(struct vsock_sock *vsk);
270 
271 ssize_t
272 virtio_transport_stream_enqueue(struct vsock_sock *vsk,
273 				struct msghdr *msg,
274 				size_t len);
275 int
276 virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
277 			       struct sockaddr_vm *remote_addr,
278 			       struct msghdr *msg,
279 			       size_t len);
280 
281 void virtio_transport_destruct(struct vsock_sock *vsk);
282 
283 void virtio_transport_recv_pkt(struct virtio_transport *t,
284 			       struct sk_buff *skb, struct net *net);
285 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
286 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
287 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
288 void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
289 int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
290 int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor);
291 int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val);
292 #endif /* _LINUX_VIRTIO_VSOCK_H */
293