1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _VHOST_H
3 #define _VHOST_H
4
5 #include <linux/eventfd.h>
6 #include <linux/vhost.h>
7 #include <linux/mm.h>
8 #include <linux/mutex.h>
9 #include <linux/poll.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/virtio_config.h>
13 #include <linux/virtio_ring.h>
14 #include <linux/atomic.h>
15 #include <linux/vhost_iotlb.h>
16 #include <linux/irqbypass.h>
17 #include <linux/unroll.h>
18
19 struct vhost_work;
20 struct vhost_task;
21 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
22
23 #define VHOST_WORK_QUEUED 1
24 struct vhost_work {
25 struct llist_node node;
26 vhost_work_fn_t fn;
27 unsigned long flags;
28 };
29
30 struct vhost_worker;
31 struct vhost_dev;
32
33 struct vhost_worker_ops {
34 int (*create)(struct vhost_worker *worker, struct vhost_dev *dev,
35 const char *name);
36 void (*stop)(struct vhost_worker *worker);
37 void (*wakeup)(struct vhost_worker *worker);
38 };
39
40 struct vhost_worker {
41 struct task_struct *kthread_task;
42 struct vhost_task *vtsk;
43 struct vhost_dev *dev;
44 /* Used to serialize device wide flushing with worker swapping. */
45 struct mutex mutex;
46 struct llist_head work_list;
47 u64 kcov_handle;
48 u32 id;
49 int attachment_cnt;
50 bool killed;
51 const struct vhost_worker_ops *ops;
52 };
53
54 /* Poll a file (eventfd or socket) */
55 /* Note: there's nothing vhost specific about this structure. */
56 struct vhost_poll {
57 poll_table table;
58 wait_queue_head_t *wqh;
59 wait_queue_entry_t wait;
60 struct vhost_work work;
61 __poll_t mask;
62 struct vhost_dev *dev;
63 struct vhost_virtqueue *vq;
64 };
65
66 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
67 __poll_t mask, struct vhost_dev *dev,
68 struct vhost_virtqueue *vq);
69 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
70 void vhost_poll_stop(struct vhost_poll *poll);
71 void vhost_poll_queue(struct vhost_poll *poll);
72
73 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
74 void vhost_dev_flush(struct vhost_dev *dev);
75
76 struct vhost_log {
77 u64 addr;
78 u64 len;
79 };
80
81 enum vhost_uaddr_type {
82 VHOST_ADDR_DESC = 0,
83 VHOST_ADDR_AVAIL = 1,
84 VHOST_ADDR_USED = 2,
85 VHOST_NUM_ADDRS = 3,
86 };
87
88 struct vhost_vring_call {
89 struct eventfd_ctx *ctx;
90 struct irq_bypass_producer producer;
91 };
92
93 /* The virtqueue structure describes a queue attached to a device. */
94 struct vhost_virtqueue {
95 struct vhost_dev *dev;
96 struct vhost_worker __rcu *worker;
97
98 /* The actual ring of buffers. */
99 struct mutex mutex;
100 unsigned int num;
101 vring_desc_t __user *desc;
102 vring_avail_t __user *avail;
103 vring_used_t __user *used;
104 const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
105 struct file *kick;
106 struct vhost_vring_call call_ctx;
107 struct eventfd_ctx *error_ctx;
108 struct eventfd_ctx *log_ctx;
109
110 struct vhost_poll poll;
111
112 /* The routine to call when the Guest pings us, or timeout. */
113 vhost_work_fn_t handle_kick;
114
115 /* Last available index we saw.
116 * Values are limited to 0x7fff, and the high bit is used as
117 * a wrap counter when using VIRTIO_F_RING_PACKED. */
118 u16 last_avail_idx;
119 /* Next avail ring head when VIRTIO_F_IN_ORDER is negoitated */
120 u16 next_avail_head;
121
122 /* Caches available index value from user. */
123 u16 avail_idx;
124
125 /* Last index we used.
126 * Values are limited to 0x7fff, and the high bit is used as
127 * a wrap counter when using VIRTIO_F_RING_PACKED. */
128 u16 last_used_idx;
129
130 /* Used flags */
131 u16 used_flags;
132
133 /* Last used index value we have signalled on */
134 u16 signalled_used;
135
136 /* Last used index value we have signalled on */
137 bool signalled_used_valid;
138
139 /* Log writes to used structure. */
140 bool log_used;
141 u64 log_addr;
142
143 struct iovec iov[UIO_MAXIOV];
144 struct iovec iotlb_iov[64];
145 struct iovec *indirect;
146 struct vring_used_elem *heads;
147 u16 *nheads;
148 /* Protected by virtqueue mutex. */
149 struct vhost_iotlb *umem;
150 struct vhost_iotlb *iotlb;
151 void *private_data;
152 VIRTIO_DECLARE_FEATURES(acked_features);
153 u64 acked_backend_features;
154 /* Log write descriptors */
155 void __user *log_base;
156 struct vhost_log *log;
157 struct iovec log_iov[64];
158
159 /* Ring endianness. Defaults to legacy native endianness.
160 * Set to true when starting a modern virtio device. */
161 bool is_le;
162 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
163 /* Ring endianness requested by userspace for cross-endian support. */
164 bool user_be;
165 #endif
166 u32 busyloop_timeout;
167 };
168
169 struct vhost_msg_node {
170 union {
171 struct vhost_msg msg;
172 struct vhost_msg_v2 msg_v2;
173 };
174 struct vhost_virtqueue *vq;
175 struct list_head node;
176 };
177
178 struct vhost_dev {
179 struct mm_struct *mm;
180 struct mutex mutex;
181 struct vhost_virtqueue **vqs;
182 int nvqs;
183 struct eventfd_ctx *log_ctx;
184 struct vhost_iotlb *umem;
185 struct vhost_iotlb *iotlb;
186 spinlock_t iotlb_lock;
187 struct list_head read_list;
188 struct list_head pending_list;
189 wait_queue_head_t wait;
190 int iov_limit;
191 int weight;
192 int byte_weight;
193 struct xarray worker_xa;
194 bool use_worker;
195 /*
196 * If fork_owner is true we use vhost_tasks to create
197 * the worker so all settings/limits like cgroups, NPROC,
198 * scheduler, etc are inherited from the owner. If false,
199 * we use kthreads and only attach to the same cgroups
200 * as the owner for compat with older kernels.
201 * here we use true as default value.
202 * The default value is set by fork_from_owner_default
203 */
204 bool fork_owner;
205 int (*msg_handler)(struct vhost_dev *dev, u32 asid,
206 struct vhost_iotlb_msg *msg);
207 };
208
209 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
210 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
211 int nvqs, int iov_limit, int weight, int byte_weight,
212 bool use_worker,
213 int (*msg_handler)(struct vhost_dev *dev, u32 asid,
214 struct vhost_iotlb_msg *msg));
215 long vhost_dev_set_owner(struct vhost_dev *dev);
216 bool vhost_dev_has_owner(struct vhost_dev *dev);
217 long vhost_dev_check_owner(struct vhost_dev *);
218 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
219 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
220 void vhost_dev_cleanup(struct vhost_dev *);
221 void vhost_dev_stop(struct vhost_dev *);
222 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
223 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
224 long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
225 void __user *argp);
226 bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
227 bool vhost_log_access_ok(struct vhost_dev *);
228 void vhost_clear_msg(struct vhost_dev *dev);
229
230 int vhost_get_vq_desc(struct vhost_virtqueue *,
231 struct iovec iov[], unsigned int iov_size,
232 unsigned int *out_num, unsigned int *in_num,
233 struct vhost_log *log, unsigned int *log_num);
234
235 int vhost_get_vq_desc_n(struct vhost_virtqueue *vq,
236 struct iovec iov[], unsigned int iov_size,
237 unsigned int *out_num, unsigned int *in_num,
238 struct vhost_log *log, unsigned int *log_num,
239 unsigned int *ndesc);
240
241 void vhost_discard_vq_desc(struct vhost_virtqueue *, int nbuf,
242 unsigned int ndesc);
243
244 bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
245 bool vhost_vq_has_work(struct vhost_virtqueue *vq);
246 bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
247 int vhost_vq_init_access(struct vhost_virtqueue *);
248 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
249 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
250 u16 *nheads, unsigned count);
251 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
252 unsigned int id, int len);
253 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
254 struct vring_used_elem *heads, u16 *nheads,
255 unsigned count);
256 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
257 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
258 bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
259 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
260
261 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
262 unsigned int log_num, u64 len,
263 struct iovec *iov, int count);
264 int vq_meta_prefetch(struct vhost_virtqueue *vq);
265
266 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
267 void vhost_enqueue_msg(struct vhost_dev *dev,
268 struct list_head *head,
269 struct vhost_msg_node *node);
270 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
271 struct list_head *head);
272 void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
273
274 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
275 poll_table *wait);
276 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
277 int noblock);
278 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
279 struct iov_iter *from);
280 int vhost_init_device_iotlb(struct vhost_dev *d);
281
282 void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
283 struct vhost_iotlb_map *map);
284
285 #define vq_err(vq, fmt, ...) do { \
286 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
287 if ((vq)->error_ctx) \
288 eventfd_signal((vq)->error_ctx);\
289 } while (0)
290
291 #define VHOST_FEATURES \
292 VIRTIO_F_NOTIFY_ON_EMPTY, \
293 VIRTIO_RING_F_INDIRECT_DESC, \
294 VIRTIO_RING_F_EVENT_IDX, \
295 VHOST_F_LOG_ALL, \
296 VIRTIO_F_ANY_LAYOUT, \
297 VIRTIO_F_VERSION_1
298
vhost_features_u64(const int * features,int size,int idx)299 static inline u64 vhost_features_u64(const int *features, int size, int idx)
300 {
301 u64 res = 0;
302
303 unrolled_count(VIRTIO_FEATURES_BITS)
304 for (int i = 0; i < size; ++i) {
305 int bit = features[i];
306
307 if (virtio_features_chk_bit(bit) && VIRTIO_U64(bit) == idx)
308 res |= VIRTIO_BIT(bit);
309 }
310 return res;
311 }
312
313 #define VHOST_FEATURES_U64(features, idx) \
314 vhost_features_u64(features, ARRAY_SIZE(features), idx)
315
316 #define DEFINE_VHOST_FEATURES_ARRAY_ENTRY(idx, features) \
317 [idx] = VHOST_FEATURES_U64(features, idx),
318
319 #define DEFINE_VHOST_FEATURES_ARRAY(array, features) \
320 u64 array[VIRTIO_FEATURES_U64S] = { \
321 UNROLL(VIRTIO_FEATURES_U64S, \
322 DEFINE_VHOST_FEATURES_ARRAY_ENTRY, features) \
323 }
324
325 /**
326 * vhost_vq_set_backend - Set backend.
327 *
328 * @vq Virtqueue.
329 * @private_data The private data.
330 *
331 * Context: Need to call with vq->mutex acquired.
332 */
vhost_vq_set_backend(struct vhost_virtqueue * vq,void * private_data)333 static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
334 void *private_data)
335 {
336 vq->private_data = private_data;
337 }
338
339 /**
340 * vhost_vq_get_backend - Get backend.
341 *
342 * @vq Virtqueue.
343 *
344 * Context: Need to call with vq->mutex acquired.
345 * Return: Private data previously set with vhost_vq_set_backend.
346 */
vhost_vq_get_backend(struct vhost_virtqueue * vq)347 static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
348 {
349 return vq->private_data;
350 }
351
vhost_has_feature(struct vhost_virtqueue * vq,int bit)352 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
353 {
354 return virtio_features_test_bit(vq->acked_features_array, bit);
355 }
356
vhost_backend_has_feature(struct vhost_virtqueue * vq,int bit)357 static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
358 {
359 return vq->acked_backend_features & (1ULL << bit);
360 }
361
362 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
vhost_is_little_endian(struct vhost_virtqueue * vq)363 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
364 {
365 return vq->is_le;
366 }
367 #else
vhost_is_little_endian(struct vhost_virtqueue * vq)368 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
369 {
370 return virtio_legacy_is_little_endian() || vq->is_le;
371 }
372 #endif
373
374 /* Memory accessors */
vhost16_to_cpu(struct vhost_virtqueue * vq,__virtio16 val)375 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
376 {
377 return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
378 }
379
cpu_to_vhost16(struct vhost_virtqueue * vq,u16 val)380 static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
381 {
382 return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
383 }
384
vhost32_to_cpu(struct vhost_virtqueue * vq,__virtio32 val)385 static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
386 {
387 return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
388 }
389
cpu_to_vhost32(struct vhost_virtqueue * vq,u32 val)390 static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
391 {
392 return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
393 }
394
vhost64_to_cpu(struct vhost_virtqueue * vq,__virtio64 val)395 static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
396 {
397 return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
398 }
399
cpu_to_vhost64(struct vhost_virtqueue * vq,u64 val)400 static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
401 {
402 return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
403 }
404 #endif
405