xref: /linux/drivers/vhost/vhost.h (revision ab58a2f319de945f631150a190653a90e307df8e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _VHOST_H
3 #define _VHOST_H
4 
5 #include <linux/eventfd.h>
6 #include <linux/vhost.h>
7 #include <linux/mm.h>
8 #include <linux/mutex.h>
9 #include <linux/poll.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/virtio_config.h>
13 #include <linux/virtio_ring.h>
14 #include <linux/atomic.h>
15 #include <linux/vhost_iotlb.h>
16 #include <linux/irqbypass.h>
17 
18 struct vhost_work;
19 struct vhost_task;
20 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
21 
22 #define VHOST_WORK_QUEUED 1
23 struct vhost_work {
24 	struct llist_node	node;
25 	vhost_work_fn_t		fn;
26 	unsigned long		flags;
27 };
28 
29 struct vhost_worker {
30 	struct vhost_task	*vtsk;
31 	/* Used to serialize device wide flushing with worker swapping. */
32 	struct mutex		mutex;
33 	struct llist_head	work_list;
34 	u64			kcov_handle;
35 	u32			id;
36 	int			attachment_cnt;
37 };
38 
39 /* Poll a file (eventfd or socket) */
40 /* Note: there's nothing vhost specific about this structure. */
41 struct vhost_poll {
42 	poll_table		table;
43 	wait_queue_head_t	*wqh;
44 	wait_queue_entry_t	wait;
45 	struct vhost_work	work;
46 	__poll_t		mask;
47 	struct vhost_dev	*dev;
48 	struct vhost_virtqueue	*vq;
49 };
50 
51 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
52 		     __poll_t mask, struct vhost_dev *dev,
53 		     struct vhost_virtqueue *vq);
54 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
55 void vhost_poll_stop(struct vhost_poll *poll);
56 void vhost_poll_queue(struct vhost_poll *poll);
57 
58 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
59 void vhost_dev_flush(struct vhost_dev *dev);
60 
61 struct vhost_log {
62 	u64 addr;
63 	u64 len;
64 };
65 
66 enum vhost_uaddr_type {
67 	VHOST_ADDR_DESC = 0,
68 	VHOST_ADDR_AVAIL = 1,
69 	VHOST_ADDR_USED = 2,
70 	VHOST_NUM_ADDRS = 3,
71 };
72 
73 struct vhost_vring_call {
74 	struct eventfd_ctx *ctx;
75 	struct irq_bypass_producer producer;
76 };
77 
78 /* The virtqueue structure describes a queue attached to a device. */
79 struct vhost_virtqueue {
80 	struct vhost_dev *dev;
81 	struct vhost_worker __rcu *worker;
82 
83 	/* The actual ring of buffers. */
84 	struct mutex mutex;
85 	unsigned int num;
86 	vring_desc_t __user *desc;
87 	vring_avail_t __user *avail;
88 	vring_used_t __user *used;
89 	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
90 	struct file *kick;
91 	struct vhost_vring_call call_ctx;
92 	struct eventfd_ctx *error_ctx;
93 	struct eventfd_ctx *log_ctx;
94 
95 	struct vhost_poll poll;
96 
97 	/* The routine to call when the Guest pings us, or timeout. */
98 	vhost_work_fn_t handle_kick;
99 
100 	/* Last available index we saw.
101 	 * Values are limited to 0x7fff, and the high bit is used as
102 	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
103 	u16 last_avail_idx;
104 
105 	/* Caches available index value from user. */
106 	u16 avail_idx;
107 
108 	/* Last index we used.
109 	 * Values are limited to 0x7fff, and the high bit is used as
110 	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
111 	u16 last_used_idx;
112 
113 	/* Used flags */
114 	u16 used_flags;
115 
116 	/* Last used index value we have signalled on */
117 	u16 signalled_used;
118 
119 	/* Last used index value we have signalled on */
120 	bool signalled_used_valid;
121 
122 	/* Log writes to used structure. */
123 	bool log_used;
124 	u64 log_addr;
125 
126 	struct iovec iov[UIO_MAXIOV];
127 	struct iovec iotlb_iov[64];
128 	struct iovec *indirect;
129 	struct vring_used_elem *heads;
130 	/* Protected by virtqueue mutex. */
131 	struct vhost_iotlb *umem;
132 	struct vhost_iotlb *iotlb;
133 	void *private_data;
134 	u64 acked_features;
135 	u64 acked_backend_features;
136 	/* Log write descriptors */
137 	void __user *log_base;
138 	struct vhost_log *log;
139 	struct iovec log_iov[64];
140 
141 	/* Ring endianness. Defaults to legacy native endianness.
142 	 * Set to true when starting a modern virtio device. */
143 	bool is_le;
144 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
145 	/* Ring endianness requested by userspace for cross-endian support. */
146 	bool user_be;
147 #endif
148 	u32 busyloop_timeout;
149 };
150 
151 struct vhost_msg_node {
152   union {
153 	  struct vhost_msg msg;
154 	  struct vhost_msg_v2 msg_v2;
155   };
156   struct vhost_virtqueue *vq;
157   struct list_head node;
158 };
159 
160 struct vhost_dev {
161 	struct mm_struct *mm;
162 	struct mutex mutex;
163 	struct vhost_virtqueue **vqs;
164 	int nvqs;
165 	struct eventfd_ctx *log_ctx;
166 	struct vhost_iotlb *umem;
167 	struct vhost_iotlb *iotlb;
168 	spinlock_t iotlb_lock;
169 	struct list_head read_list;
170 	struct list_head pending_list;
171 	wait_queue_head_t wait;
172 	int iov_limit;
173 	int weight;
174 	int byte_weight;
175 	struct xarray worker_xa;
176 	bool use_worker;
177 	int (*msg_handler)(struct vhost_dev *dev, u32 asid,
178 			   struct vhost_iotlb_msg *msg);
179 };
180 
181 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
182 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
183 		    int nvqs, int iov_limit, int weight, int byte_weight,
184 		    bool use_worker,
185 		    int (*msg_handler)(struct vhost_dev *dev, u32 asid,
186 				       struct vhost_iotlb_msg *msg));
187 long vhost_dev_set_owner(struct vhost_dev *dev);
188 bool vhost_dev_has_owner(struct vhost_dev *dev);
189 long vhost_dev_check_owner(struct vhost_dev *);
190 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
191 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
192 void vhost_dev_cleanup(struct vhost_dev *);
193 void vhost_dev_stop(struct vhost_dev *);
194 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
195 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
196 long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
197 			void __user *argp);
198 bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
199 bool vhost_log_access_ok(struct vhost_dev *);
200 void vhost_clear_msg(struct vhost_dev *dev);
201 
202 int vhost_get_vq_desc(struct vhost_virtqueue *,
203 		      struct iovec iov[], unsigned int iov_size,
204 		      unsigned int *out_num, unsigned int *in_num,
205 		      struct vhost_log *log, unsigned int *log_num);
206 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
207 
208 void vhost_vq_flush(struct vhost_virtqueue *vq);
209 bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
210 bool vhost_vq_has_work(struct vhost_virtqueue *vq);
211 bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
212 int vhost_vq_init_access(struct vhost_virtqueue *);
213 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
214 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
215 		     unsigned count);
216 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
217 			       unsigned int id, int len);
218 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
219 			       struct vring_used_elem *heads, unsigned count);
220 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
221 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
222 bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
223 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
224 
225 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
226 		    unsigned int log_num, u64 len,
227 		    struct iovec *iov, int count);
228 int vq_meta_prefetch(struct vhost_virtqueue *vq);
229 
230 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
231 void vhost_enqueue_msg(struct vhost_dev *dev,
232 		       struct list_head *head,
233 		       struct vhost_msg_node *node);
234 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
235 					 struct list_head *head);
236 void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
237 
238 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
239 			    poll_table *wait);
240 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
241 			    int noblock);
242 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
243 			     struct iov_iter *from);
244 int vhost_init_device_iotlb(struct vhost_dev *d);
245 
246 void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
247 			  struct vhost_iotlb_map *map);
248 
249 #define vq_err(vq, fmt, ...) do {                                  \
250 		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
251 		if ((vq)->error_ctx)                               \
252 				eventfd_signal((vq)->error_ctx);\
253 	} while (0)
254 
255 enum {
256 	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
257 			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
258 			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
259 			 (1ULL << VHOST_F_LOG_ALL) |
260 			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
261 			 (1ULL << VIRTIO_F_VERSION_1)
262 };
263 
264 /**
265  * vhost_vq_set_backend - Set backend.
266  *
267  * @vq            Virtqueue.
268  * @private_data  The private data.
269  *
270  * Context: Need to call with vq->mutex acquired.
271  */
272 static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
273 					void *private_data)
274 {
275 	vq->private_data = private_data;
276 }
277 
278 /**
279  * vhost_vq_get_backend - Get backend.
280  *
281  * @vq            Virtqueue.
282  *
283  * Context: Need to call with vq->mutex acquired.
284  * Return: Private data previously set with vhost_vq_set_backend.
285  */
286 static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
287 {
288 	return vq->private_data;
289 }
290 
291 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
292 {
293 	return vq->acked_features & (1ULL << bit);
294 }
295 
296 static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
297 {
298 	return vq->acked_backend_features & (1ULL << bit);
299 }
300 
301 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
302 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
303 {
304 	return vq->is_le;
305 }
306 #else
307 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
308 {
309 	return virtio_legacy_is_little_endian() || vq->is_le;
310 }
311 #endif
312 
313 /* Memory accessors */
314 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
315 {
316 	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
317 }
318 
319 static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
320 {
321 	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
322 }
323 
324 static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
325 {
326 	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
327 }
328 
329 static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
330 {
331 	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
332 }
333 
334 static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
335 {
336 	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
337 }
338 
339 static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
340 {
341 	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
342 }
343 #endif
344