1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_CONFIG_H
3 #define _LINUX_VIRTIO_CONFIG_H
4
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/virtio.h>
8 #include <linux/virtio_byteorder.h>
9 #include <linux/compiler_types.h>
10 #include <uapi/linux/virtio_config.h>
11
12 struct irq_affinity;
13
14 struct virtio_shm_region {
15 u64 addr;
16 u64 len;
17 };
18
19 typedef void vq_callback_t(struct virtqueue *);
20
21 /**
22 * struct virtqueue_info - Info for a virtqueue passed to find_vqs().
23 * @name: virtqueue description. Used mainly for debugging, NULL for
24 * a virtqueue unused by the driver.
25 * @callback: A callback to invoke on a used buffer notification.
26 * NULL for a virtqueue that does not need a callback.
27 * @ctx: A flag to indicate to maintain an extra context per virtqueue.
28 */
29 struct virtqueue_info {
30 const char *name;
31 vq_callback_t *callback;
32 bool ctx;
33 };
34
35 /**
36 * struct virtio_config_ops - operations for configuring a virtio device
37 * Note: Do not assume that a transport implements all of the operations
38 * getting/setting a value as a simple read/write! Generally speaking,
39 * any of @get/@set, @get_status/@set_status, or @get_features/
40 * @finalize_features are NOT safe to be called from an atomic
41 * context.
42 * @get: read the value of a configuration field
43 * vdev: the virtio_device
44 * offset: the offset of the configuration field
45 * buf: the buffer to write the field value into.
46 * len: the length of the buffer
47 * @set: write the value of a configuration field
48 * vdev: the virtio_device
49 * offset: the offset of the configuration field
50 * buf: the buffer to read the field value from.
51 * len: the length of the buffer
52 * @generation: config generation counter (optional)
53 * vdev: the virtio_device
54 * Returns the config generation counter
55 * @get_status: read the status byte
56 * vdev: the virtio_device
57 * Returns the status byte
58 * @set_status: write the status byte
59 * vdev: the virtio_device
60 * status: the new status byte
61 * @reset: reset the device
62 * vdev: the virtio device
63 * After this, status and feature negotiation must be done again
64 * Device must not be reset from its vq/config callbacks, or in
65 * parallel with being added/removed.
66 * @find_vqs: find virtqueues and instantiate them.
67 * vdev: the virtio_device
68 * nvqs: the number of virtqueues to find
69 * vqs: on success, includes new virtqueues
70 * vqs_info: array of virtqueue info structures
71 * Returns 0 on success or error status
72 * @del_vqs: free virtqueues found by find_vqs().
73 * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
74 * The function guarantees that all memory operations on the
75 * queue before it are visible to the vring_interrupt() that is
76 * called after it.
77 * vdev: the virtio_device
78 * @get_features: get the array of feature bits for this device.
79 * vdev: the virtio_device
80 * Returns the first 64 feature bits.
81 * @get_extended_features:
82 * vdev: the virtio_device
83 * Returns the first VIRTIO_FEATURES_MAX feature bits (all we currently
84 * need).
85 * @finalize_features: confirm what device features we'll be using.
86 * vdev: the virtio_device
87 * This sends the driver feature bits to the device: it can change
88 * the dev->feature bits if it wants.
89 * Note that despite the name this can be called any number of
90 * times.
91 * Returns 0 on success or error status
92 * @bus_name: return the bus name associated with the device (optional)
93 * vdev: the virtio_device
94 * This returns a pointer to the bus name a la pci_name from which
95 * the caller can then copy.
96 * @set_vq_affinity: set the affinity for a virtqueue (optional).
97 * @get_vq_affinity: get the affinity for a virtqueue (optional).
98 * @get_shm_region: get a shared memory region based on the index.
99 * @disable_vq_and_reset: reset a queue individually (optional).
100 * vq: the virtqueue
101 * Returns 0 on success or error status
102 * disable_vq_and_reset will guarantee that the callbacks are disabled and
103 * synchronized.
104 * Except for the callback, the caller should guarantee that the vring is
105 * not accessed by any functions of virtqueue.
106 * @enable_vq_after_reset: enable a reset queue
107 * vq: the virtqueue
108 * Returns 0 on success or error status
109 * If disable_vq_and_reset is set, then enable_vq_after_reset must also be
110 * set.
111 */
112 struct virtio_config_ops {
113 void (*get)(struct virtio_device *vdev, unsigned offset,
114 void *buf, unsigned len);
115 void (*set)(struct virtio_device *vdev, unsigned offset,
116 const void *buf, unsigned len);
117 u32 (*generation)(struct virtio_device *vdev);
118 u8 (*get_status)(struct virtio_device *vdev);
119 void (*set_status)(struct virtio_device *vdev, u8 status);
120 void (*reset)(struct virtio_device *vdev);
121 int (*find_vqs)(struct virtio_device *vdev, unsigned int nvqs,
122 struct virtqueue *vqs[],
123 struct virtqueue_info vqs_info[],
124 struct irq_affinity *desc);
125 void (*del_vqs)(struct virtio_device *);
126 void (*synchronize_cbs)(struct virtio_device *);
127 u64 (*get_features)(struct virtio_device *vdev);
128 void (*get_extended_features)(struct virtio_device *vdev,
129 u64 *features);
130 int (*finalize_features)(struct virtio_device *vdev);
131 const char *(*bus_name)(struct virtio_device *vdev);
132 int (*set_vq_affinity)(struct virtqueue *vq,
133 const struct cpumask *cpu_mask);
134 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
135 int index);
136 bool (*get_shm_region)(struct virtio_device *vdev,
137 struct virtio_shm_region *region, u8 id);
138 int (*disable_vq_and_reset)(struct virtqueue *vq);
139 int (*enable_vq_after_reset)(struct virtqueue *vq);
140 };
141
142 /* If driver didn't advertise the feature, it will never appear. */
143 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
144 unsigned int fbit);
145
146 /**
147 * __virtio_test_bit - helper to test feature bits. For use by transports.
148 * Devices should normally use virtio_has_feature,
149 * which includes more checks.
150 * @vdev: the device
151 * @fbit: the feature bit
152 */
__virtio_test_bit(const struct virtio_device * vdev,unsigned int fbit)153 static inline bool __virtio_test_bit(const struct virtio_device *vdev,
154 unsigned int fbit)
155 {
156 return virtio_features_test_bit(vdev->features_array, fbit);
157 }
158
159 /**
160 * __virtio_set_bit - helper to set feature bits. For use by transports.
161 * @vdev: the device
162 * @fbit: the feature bit
163 */
__virtio_set_bit(struct virtio_device * vdev,unsigned int fbit)164 static inline void __virtio_set_bit(struct virtio_device *vdev,
165 unsigned int fbit)
166 {
167 virtio_features_set_bit(vdev->features_array, fbit);
168 }
169
170 /**
171 * __virtio_clear_bit - helper to clear feature bits. For use by transports.
172 * @vdev: the device
173 * @fbit: the feature bit
174 */
__virtio_clear_bit(struct virtio_device * vdev,unsigned int fbit)175 static inline void __virtio_clear_bit(struct virtio_device *vdev,
176 unsigned int fbit)
177 {
178 virtio_features_clear_bit(vdev->features_array, fbit);
179 }
180
181 /**
182 * virtio_has_feature - helper to determine if this device has this feature.
183 * @vdev: the device
184 * @fbit: the feature bit
185 */
virtio_has_feature(const struct virtio_device * vdev,unsigned int fbit)186 static inline bool virtio_has_feature(const struct virtio_device *vdev,
187 unsigned int fbit)
188 {
189 if (fbit < VIRTIO_TRANSPORT_F_START)
190 virtio_check_driver_offered_feature(vdev, fbit);
191
192 return __virtio_test_bit(vdev, fbit);
193 }
194
virtio_get_features(struct virtio_device * vdev,u64 * features)195 static inline void virtio_get_features(struct virtio_device *vdev,
196 u64 *features)
197 {
198 if (vdev->config->get_extended_features) {
199 vdev->config->get_extended_features(vdev, features);
200 return;
201 }
202
203 virtio_features_from_u64(features, vdev->config->get_features(vdev));
204 }
205
206 /**
207 * virtio_has_dma_quirk - determine whether this device has the DMA quirk
208 * @vdev: the device
209 */
virtio_has_dma_quirk(const struct virtio_device * vdev)210 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
211 {
212 /*
213 * Note the reverse polarity of the quirk feature (compared to most
214 * other features), this is for compatibility with legacy systems.
215 */
216 return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
217 }
218
219 static inline
virtio_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],struct virtqueue_info vqs_info[],struct irq_affinity * desc)220 int virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
221 struct virtqueue *vqs[],
222 struct virtqueue_info vqs_info[],
223 struct irq_affinity *desc)
224 {
225 return vdev->config->find_vqs(vdev, nvqs, vqs, vqs_info, desc);
226 }
227
228 static inline
virtio_find_single_vq(struct virtio_device * vdev,vq_callback_t * c,const char * n)229 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
230 vq_callback_t *c, const char *n)
231 {
232 struct virtqueue_info vqs_info[] = {
233 { n, c },
234 };
235 struct virtqueue *vq;
236 int err = virtio_find_vqs(vdev, 1, &vq, vqs_info, NULL);
237
238 if (err < 0)
239 return ERR_PTR(err);
240 return vq;
241 }
242
243 /**
244 * virtio_synchronize_cbs - synchronize with virtqueue callbacks
245 * @dev: the virtio device
246 */
247 static inline
virtio_synchronize_cbs(struct virtio_device * dev)248 void virtio_synchronize_cbs(struct virtio_device *dev)
249 {
250 if (dev->config->synchronize_cbs) {
251 dev->config->synchronize_cbs(dev);
252 } else {
253 /*
254 * A best effort fallback to synchronize with
255 * interrupts, preemption and softirq disabled
256 * regions. See comment above synchronize_rcu().
257 */
258 synchronize_rcu();
259 }
260 }
261
262 /**
263 * virtio_device_ready - enable vq use in probe function
264 * @dev: the virtio device
265 *
266 * Driver must call this to use vqs in the probe function.
267 *
268 * Note: vqs are enabled automatically after probe returns.
269 */
270 static inline
virtio_device_ready(struct virtio_device * dev)271 void virtio_device_ready(struct virtio_device *dev)
272 {
273 unsigned status = dev->config->get_status(dev);
274
275 WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
276
277 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
278 /*
279 * The virtio_synchronize_cbs() makes sure vring_interrupt()
280 * will see the driver specific setup if it sees vq->broken
281 * as false (even if the notifications come before DRIVER_OK).
282 */
283 virtio_synchronize_cbs(dev);
284 __virtio_unbreak_device(dev);
285 #endif
286 /*
287 * The transport should ensure the visibility of vq->broken
288 * before setting DRIVER_OK. See the comments for the transport
289 * specific set_status() method.
290 *
291 * A well behaved device will only notify a virtqueue after
292 * DRIVER_OK, this means the device should "see" the coherenct
293 * memory write that set vq->broken as false which is done by
294 * the driver when it sees DRIVER_OK, then the following
295 * driver's vring_interrupt() will see vq->broken as false so
296 * we won't lose any notification.
297 */
298 dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
299 }
300
301 static inline
virtio_bus_name(struct virtio_device * vdev)302 const char *virtio_bus_name(struct virtio_device *vdev)
303 {
304 if (!vdev->config->bus_name)
305 return "virtio";
306 return vdev->config->bus_name(vdev);
307 }
308
309 /**
310 * virtqueue_set_affinity - setting affinity for a virtqueue
311 * @vq: the virtqueue
312 * @cpu_mask: the cpu mask
313 *
314 * Pay attention the function are best-effort: the affinity hint may not be set
315 * due to config support, irq type and sharing.
316 *
317 */
318 static inline
virtqueue_set_affinity(struct virtqueue * vq,const struct cpumask * cpu_mask)319 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
320 {
321 struct virtio_device *vdev = vq->vdev;
322 if (vdev->config->set_vq_affinity)
323 return vdev->config->set_vq_affinity(vq, cpu_mask);
324 return 0;
325 }
326
327 static inline
virtio_get_shm_region(struct virtio_device * vdev,struct virtio_shm_region * region,u8 id)328 bool virtio_get_shm_region(struct virtio_device *vdev,
329 struct virtio_shm_region *region, u8 id)
330 {
331 if (!vdev->config->get_shm_region)
332 return false;
333 return vdev->config->get_shm_region(vdev, region, id);
334 }
335
virtio_is_little_endian(struct virtio_device * vdev)336 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
337 {
338 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
339 virtio_legacy_is_little_endian();
340 }
341
342 /* Memory accessors */
virtio16_to_cpu(struct virtio_device * vdev,__virtio16 val)343 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
344 {
345 return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
346 }
347
cpu_to_virtio16(struct virtio_device * vdev,u16 val)348 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
349 {
350 return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
351 }
352
virtio32_to_cpu(struct virtio_device * vdev,__virtio32 val)353 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
354 {
355 return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
356 }
357
cpu_to_virtio32(struct virtio_device * vdev,u32 val)358 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
359 {
360 return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
361 }
362
virtio64_to_cpu(struct virtio_device * vdev,__virtio64 val)363 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
364 {
365 return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
366 }
367
cpu_to_virtio64(struct virtio_device * vdev,u64 val)368 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
369 {
370 return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
371 }
372
373 #define virtio_to_cpu(vdev, x) \
374 _Generic((x), \
375 __u8: (x), \
376 __virtio16: virtio16_to_cpu((vdev), (x)), \
377 __virtio32: virtio32_to_cpu((vdev), (x)), \
378 __virtio64: virtio64_to_cpu((vdev), (x)) \
379 )
380
381 #define cpu_to_virtio(vdev, x, m) \
382 _Generic((m), \
383 __u8: (x), \
384 __virtio16: cpu_to_virtio16((vdev), (x)), \
385 __virtio32: cpu_to_virtio32((vdev), (x)), \
386 __virtio64: cpu_to_virtio64((vdev), (x)) \
387 )
388
389 #define __virtio_native_type(structname, member) \
390 typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
391
392 /* Config space accessors. */
393 #define virtio_cread(vdev, structname, member, ptr) \
394 do { \
395 typeof(((structname*)0)->member) virtio_cread_v; \
396 \
397 might_sleep(); \
398 /* Sanity check: must match the member's type */ \
399 typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
400 \
401 switch (sizeof(virtio_cread_v)) { \
402 case 1: \
403 case 2: \
404 case 4: \
405 vdev->config->get((vdev), \
406 offsetof(structname, member), \
407 &virtio_cread_v, \
408 sizeof(virtio_cread_v)); \
409 break; \
410 default: \
411 __virtio_cread_many((vdev), \
412 offsetof(structname, member), \
413 &virtio_cread_v, \
414 1, \
415 sizeof(virtio_cread_v)); \
416 break; \
417 } \
418 *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
419 } while(0)
420
421 /* Config space accessors. */
422 #define virtio_cwrite(vdev, structname, member, ptr) \
423 do { \
424 typeof(((structname*)0)->member) virtio_cwrite_v = \
425 cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
426 \
427 might_sleep(); \
428 /* Sanity check: must match the member's type */ \
429 typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
430 \
431 vdev->config->set((vdev), offsetof(structname, member), \
432 &virtio_cwrite_v, \
433 sizeof(virtio_cwrite_v)); \
434 } while(0)
435
436 /*
437 * Nothing virtio-specific about these, but let's worry about generalizing
438 * these later.
439 */
440 #define virtio_le_to_cpu(x) \
441 _Generic((x), \
442 __u8: (u8)(x), \
443 __le16: (u16)le16_to_cpu(x), \
444 __le32: (u32)le32_to_cpu(x), \
445 __le64: (u64)le64_to_cpu(x) \
446 )
447
448 #define virtio_cpu_to_le(x, m) \
449 _Generic((m), \
450 __u8: (x), \
451 __le16: cpu_to_le16(x), \
452 __le32: cpu_to_le32(x), \
453 __le64: cpu_to_le64(x) \
454 )
455
456 /* LE (e.g. modern) Config space accessors. */
457 #define virtio_cread_le(vdev, structname, member, ptr) \
458 do { \
459 typeof(((structname*)0)->member) virtio_cread_v; \
460 \
461 might_sleep(); \
462 /* Sanity check: must match the member's type */ \
463 typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
464 \
465 switch (sizeof(virtio_cread_v)) { \
466 case 1: \
467 case 2: \
468 case 4: \
469 vdev->config->get((vdev), \
470 offsetof(structname, member), \
471 &virtio_cread_v, \
472 sizeof(virtio_cread_v)); \
473 break; \
474 default: \
475 __virtio_cread_many((vdev), \
476 offsetof(structname, member), \
477 &virtio_cread_v, \
478 1, \
479 sizeof(virtio_cread_v)); \
480 break; \
481 } \
482 *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
483 } while(0)
484
485 #define virtio_cwrite_le(vdev, structname, member, ptr) \
486 do { \
487 typeof(((structname*)0)->member) virtio_cwrite_v = \
488 virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
489 \
490 might_sleep(); \
491 /* Sanity check: must match the member's type */ \
492 typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
493 \
494 vdev->config->set((vdev), offsetof(structname, member), \
495 &virtio_cwrite_v, \
496 sizeof(virtio_cwrite_v)); \
497 } while(0)
498
499
500 /* Read @count fields, @bytes each. */
__virtio_cread_many(struct virtio_device * vdev,unsigned int offset,void * buf,size_t count,size_t bytes)501 static inline void __virtio_cread_many(struct virtio_device *vdev,
502 unsigned int offset,
503 void *buf, size_t count, size_t bytes)
504 {
505 u32 old, gen = vdev->config->generation ?
506 vdev->config->generation(vdev) : 0;
507 int i;
508
509 might_sleep();
510 do {
511 old = gen;
512
513 for (i = 0; i < count; i++)
514 vdev->config->get(vdev, offset + bytes * i,
515 buf + i * bytes, bytes);
516
517 gen = vdev->config->generation ?
518 vdev->config->generation(vdev) : 0;
519 } while (gen != old);
520 }
521
virtio_cread_bytes(struct virtio_device * vdev,unsigned int offset,void * buf,size_t len)522 static inline void virtio_cread_bytes(struct virtio_device *vdev,
523 unsigned int offset,
524 void *buf, size_t len)
525 {
526 __virtio_cread_many(vdev, offset, buf, len, 1);
527 }
528
virtio_cread8(struct virtio_device * vdev,unsigned int offset)529 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
530 {
531 u8 ret;
532
533 might_sleep();
534 vdev->config->get(vdev, offset, &ret, sizeof(ret));
535 return ret;
536 }
537
virtio_cwrite8(struct virtio_device * vdev,unsigned int offset,u8 val)538 static inline void virtio_cwrite8(struct virtio_device *vdev,
539 unsigned int offset, u8 val)
540 {
541 might_sleep();
542 vdev->config->set(vdev, offset, &val, sizeof(val));
543 }
544
virtio_cread16(struct virtio_device * vdev,unsigned int offset)545 static inline u16 virtio_cread16(struct virtio_device *vdev,
546 unsigned int offset)
547 {
548 __virtio16 ret;
549
550 might_sleep();
551 vdev->config->get(vdev, offset, &ret, sizeof(ret));
552 return virtio16_to_cpu(vdev, ret);
553 }
554
virtio_cwrite16(struct virtio_device * vdev,unsigned int offset,u16 val)555 static inline void virtio_cwrite16(struct virtio_device *vdev,
556 unsigned int offset, u16 val)
557 {
558 __virtio16 v;
559
560 might_sleep();
561 v = cpu_to_virtio16(vdev, val);
562 vdev->config->set(vdev, offset, &v, sizeof(v));
563 }
564
virtio_cread32(struct virtio_device * vdev,unsigned int offset)565 static inline u32 virtio_cread32(struct virtio_device *vdev,
566 unsigned int offset)
567 {
568 __virtio32 ret;
569
570 might_sleep();
571 vdev->config->get(vdev, offset, &ret, sizeof(ret));
572 return virtio32_to_cpu(vdev, ret);
573 }
574
virtio_cwrite32(struct virtio_device * vdev,unsigned int offset,u32 val)575 static inline void virtio_cwrite32(struct virtio_device *vdev,
576 unsigned int offset, u32 val)
577 {
578 __virtio32 v;
579
580 might_sleep();
581 v = cpu_to_virtio32(vdev, val);
582 vdev->config->set(vdev, offset, &v, sizeof(v));
583 }
584
virtio_cread64(struct virtio_device * vdev,unsigned int offset)585 static inline u64 virtio_cread64(struct virtio_device *vdev,
586 unsigned int offset)
587 {
588 __virtio64 ret;
589
590 __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
591 return virtio64_to_cpu(vdev, ret);
592 }
593
virtio_cwrite64(struct virtio_device * vdev,unsigned int offset,u64 val)594 static inline void virtio_cwrite64(struct virtio_device *vdev,
595 unsigned int offset, u64 val)
596 {
597 __virtio64 v;
598
599 might_sleep();
600 v = cpu_to_virtio64(vdev, val);
601 vdev->config->set(vdev, offset, &v, sizeof(v));
602 }
603
604 /* Conditional config space accessors. */
605 #define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
606 ({ \
607 int _r = 0; \
608 if (!virtio_has_feature(vdev, fbit)) \
609 _r = -ENOENT; \
610 else \
611 virtio_cread((vdev), structname, member, ptr); \
612 _r; \
613 })
614
615 /* Conditional config space accessors. */
616 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
617 ({ \
618 int _r = 0; \
619 if (!virtio_has_feature(vdev, fbit)) \
620 _r = -ENOENT; \
621 else \
622 virtio_cread_le((vdev), structname, member, ptr); \
623 _r; \
624 })
625
626 #endif /* _LINUX_VIRTIO_CONFIG_H */
627