xref: /linux/include/linux/virtio_config.h (revision bf897d2626abe4559953342e2f7dda05d034c8c7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_CONFIG_H
3 #define _LINUX_VIRTIO_CONFIG_H
4 
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/virtio.h>
8 #include <linux/virtio_byteorder.h>
9 #include <linux/compiler_types.h>
10 #include <uapi/linux/virtio_config.h>
11 
12 struct irq_affinity;
13 
14 struct virtio_shm_region {
15 	u64 addr;
16 	u64 len;
17 };
18 
19 typedef void vq_callback_t(struct virtqueue *);
20 
21 /**
22  * struct virtqueue_info - Info for a virtqueue passed to find_vqs().
23  * @name: virtqueue description. Used mainly for debugging, NULL for
24  *        a virtqueue unused by the driver.
25  * @callback: A callback to invoke on a used buffer notification.
26  *            NULL for a virtqueue that does not need a callback.
27  * @ctx: A flag to indicate to maintain an extra context per virtqueue.
28  */
29 struct virtqueue_info {
30 	const char *name;
31 	vq_callback_t *callback;
32 	bool ctx;
33 };
34 
35 /**
36  * struct virtio_config_ops - operations for configuring a virtio device
37  * Note: Do not assume that a transport implements all of the operations
38  *       getting/setting a value as a simple read/write! Generally speaking,
39  *       any of @get/@set, @get_status/@set_status, or @get_features/
40  *       @finalize_features are NOT safe to be called from an atomic
41  *       context.
42  * @get: read the value of a configuration field
43  *	vdev: the virtio_device
44  *	offset: the offset of the configuration field
45  *	buf: the buffer to write the field value into.
46  *	len: the length of the buffer
47  * @set: write the value of a configuration field
48  *	vdev: the virtio_device
49  *	offset: the offset of the configuration field
50  *	buf: the buffer to read the field value from.
51  *	len: the length of the buffer
52  * @generation: config generation counter (optional)
53  *	vdev: the virtio_device
54  *	Returns the config generation counter
55  * @get_status: read the status byte
56  *	vdev: the virtio_device
57  *	Returns the status byte
58  * @set_status: write the status byte
59  *	vdev: the virtio_device
60  *	status: the new status byte
61  * @reset: reset the device
62  *	vdev: the virtio device
63  *	After this, status and feature negotiation must be done again
64  *	Device must not be reset from its vq/config callbacks, or in
65  *	parallel with being added/removed.
66  * @find_vqs: find virtqueues and instantiate them.
67  *	vdev: the virtio_device
68  *	nvqs: the number of virtqueues to find
69  *	vqs: on success, includes new virtqueues
70  *	vqs_info: array of virtqueue info structures
71  *	Returns 0 on success or error status
72  * @del_vqs: free virtqueues found by find_vqs().
73  * @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
74  *      The function guarantees that all memory operations on the
75  *      queue before it are visible to the vring_interrupt() that is
76  *      called after it.
77  *      vdev: the virtio_device
78  * @get_features: get the array of feature bits for this device.
79  *	vdev: the virtio_device
80  *	Returns the first 64 feature bits.
81  * @get_extended_features:
82  *      vdev: the virtio_device
83  *      Returns the first VIRTIO_FEATURES_MAX feature bits (all we currently
84  *      need).
85  * @finalize_features: confirm what device features we'll be using.
86  *	vdev: the virtio_device
87  *	This sends the driver feature bits to the device: it can change
88  *	the dev->feature bits if it wants.
89  *	Note that despite the name this	can be called any number of
90  *	times.
91  *	Returns 0 on success or error status
92  * @bus_name: return the bus name associated with the device (optional)
93  *	vdev: the virtio_device
94  *      This returns a pointer to the bus name a la pci_name from which
95  *      the caller can then copy.
96  * @set_vq_affinity: set the affinity for a virtqueue (optional).
97  * @get_vq_affinity: get the affinity for a virtqueue (optional).
98  * @get_shm_region: get a shared memory region based on the index.
99  * @disable_vq_and_reset: reset a queue individually (optional).
100  *	vq: the virtqueue
101  *	Returns 0 on success or error status
102  *	disable_vq_and_reset will guarantee that the callbacks are disabled and
103  *	synchronized.
104  *	Except for the callback, the caller should guarantee that the vring is
105  *	not accessed by any functions of virtqueue.
106  * @enable_vq_after_reset: enable a reset queue
107  *	vq: the virtqueue
108  *	Returns 0 on success or error status
109  *	If disable_vq_and_reset is set, then enable_vq_after_reset must also be
110  *	set.
111  */
112 struct virtio_config_ops {
113 	void (*get)(struct virtio_device *vdev, unsigned offset,
114 		    void *buf, unsigned len);
115 	void (*set)(struct virtio_device *vdev, unsigned offset,
116 		    const void *buf, unsigned len);
117 	u32 (*generation)(struct virtio_device *vdev);
118 	u8 (*get_status)(struct virtio_device *vdev);
119 	void (*set_status)(struct virtio_device *vdev, u8 status);
120 	void (*reset)(struct virtio_device *vdev);
121 	int (*find_vqs)(struct virtio_device *vdev, unsigned int nvqs,
122 			struct virtqueue *vqs[],
123 			struct virtqueue_info vqs_info[],
124 			struct irq_affinity *desc);
125 	void (*del_vqs)(struct virtio_device *);
126 	void (*synchronize_cbs)(struct virtio_device *);
127 	u64 (*get_features)(struct virtio_device *vdev);
128 	void (*get_extended_features)(struct virtio_device *vdev,
129 				      u64 *features);
130 	int (*finalize_features)(struct virtio_device *vdev);
131 	const char *(*bus_name)(struct virtio_device *vdev);
132 	int (*set_vq_affinity)(struct virtqueue *vq,
133 			       const struct cpumask *cpu_mask);
134 	const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
135 						 int index);
136 	bool (*get_shm_region)(struct virtio_device *vdev,
137 			       struct virtio_shm_region *region, u8 id);
138 	int (*disable_vq_and_reset)(struct virtqueue *vq);
139 	int (*enable_vq_after_reset)(struct virtqueue *vq);
140 };
141 
142 /**
143  * struct virtio_map_ops - operations for mapping buffer for a virtio device
144  * Note: For transport that has its own mapping logic it must
145  * implements all of the operations
146  * @map_page: map a buffer to the device
147  *      map: metadata for performing mapping
148  *      page: the page that will be mapped by the device
149  *      offset: the offset in the page for a buffer
150  *      size: the buffer size
151  *      dir: mapping direction
152  *      attrs: mapping attributes
153  *      Returns: the mapped address
154  * @unmap_page: unmap a buffer from the device
155  *      map: device specific mapping map
156  *      map_handle: the mapped address
157  *      size: the buffer size
158  *      dir: mapping direction
159  *      attrs: unmapping attributes
160  * @sync_single_for_cpu: sync a single buffer from device to cpu
161  *      map: metadata for performing mapping
162  *      map_handle: the mapping address to sync
163  *      size: the size of the buffer
164  *      dir: synchronization direction
165  * @sync_single_for_device: sync a single buffer from cpu to device
166  *      map: metadata for performing mapping
167  *      map_handle: the mapping address to sync
168  *      size: the size of the buffer
169  *      dir: synchronization direction
170  * @alloc: alloc a coherent buffer mapping
171  *      map: metadata for performing mapping
172  *      size: the size of the buffer
173  *      map_handle: the mapping address to sync
174  *      gfp: allocation flag (GFP_XXX)
175  *      Returns: virtual address of the allocated buffer
176  * @free: free a coherent buffer mapping
177  *      map: metadata for performing mapping
178  *      size: the size of the buffer
179  *      vaddr: virtual address of the buffer
180  *      map_handle: the mapping address to sync
181  *      attrs: unmapping attributes
182  * @need_sync: if the buffer needs synchronization
183  *      map: metadata for performing mapping
184  *      map_handle: the mapped address
185  *      Returns: whether the buffer needs synchronization
186  * @mapping_error: if the mapping address is error
187  *      map: metadata for performing mapping
188  *      map_handle: the mapped address
189  * @max_mapping_size: get the maximum buffer size that can be mapped
190  *      map: metadata for performing mapping
191  *      Returns: the maximum buffer size that can be mapped
192  */
193 struct virtio_map_ops {
194 	dma_addr_t (*map_page)(union virtio_map map, struct page *page,
195 			       unsigned long offset, size_t size,
196 			       enum dma_data_direction dir, unsigned long attrs);
197 	void (*unmap_page)(union virtio_map map, dma_addr_t map_handle,
198 			   size_t size, enum dma_data_direction dir,
199 			   unsigned long attrs);
200 	void (*sync_single_for_cpu)(union virtio_map map, dma_addr_t map_handle,
201 				    size_t size, enum dma_data_direction dir);
202 	void (*sync_single_for_device)(union virtio_map map,
203 				       dma_addr_t map_handle, size_t size,
204 				       enum dma_data_direction dir);
205 	void *(*alloc)(union virtio_map map, size_t size,
206 		       dma_addr_t *map_handle, gfp_t gfp);
207 	void (*free)(union virtio_map map, size_t size, void *vaddr,
208 		     dma_addr_t map_handle, unsigned long attrs);
209 	bool (*need_sync)(union virtio_map map, dma_addr_t map_handle);
210 	int (*mapping_error)(union virtio_map map, dma_addr_t map_handle);
211 	size_t (*max_mapping_size)(union virtio_map map);
212 };
213 
214 /* If driver didn't advertise the feature, it will never appear. */
215 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
216 					 unsigned int fbit);
217 
218 /**
219  * __virtio_test_bit - helper to test feature bits. For use by transports.
220  *                     Devices should normally use virtio_has_feature,
221  *                     which includes more checks.
222  * @vdev: the device
223  * @fbit: the feature bit
224  */
225 static inline bool __virtio_test_bit(const struct virtio_device *vdev,
226 				     unsigned int fbit)
227 {
228 	return virtio_features_test_bit(vdev->features_array, fbit);
229 }
230 
231 /**
232  * __virtio_set_bit - helper to set feature bits. For use by transports.
233  * @vdev: the device
234  * @fbit: the feature bit
235  */
236 static inline void __virtio_set_bit(struct virtio_device *vdev,
237 				    unsigned int fbit)
238 {
239 	virtio_features_set_bit(vdev->features_array, fbit);
240 }
241 
242 /**
243  * __virtio_clear_bit - helper to clear feature bits. For use by transports.
244  * @vdev: the device
245  * @fbit: the feature bit
246  */
247 static inline void __virtio_clear_bit(struct virtio_device *vdev,
248 				      unsigned int fbit)
249 {
250 	virtio_features_clear_bit(vdev->features_array, fbit);
251 }
252 
253 /**
254  * virtio_has_feature - helper to determine if this device has this feature.
255  * @vdev: the device
256  * @fbit: the feature bit
257  */
258 static inline bool virtio_has_feature(const struct virtio_device *vdev,
259 				      unsigned int fbit)
260 {
261 	if (fbit < VIRTIO_TRANSPORT_F_START)
262 		virtio_check_driver_offered_feature(vdev, fbit);
263 
264 	return __virtio_test_bit(vdev, fbit);
265 }
266 
267 static inline void virtio_get_features(struct virtio_device *vdev,
268 				       u64 *features_out)
269 {
270 	if (vdev->config->get_extended_features) {
271 		vdev->config->get_extended_features(vdev, features_out);
272 		return;
273 	}
274 
275 	virtio_features_from_u64(features_out,
276 		vdev->config->get_features(vdev));
277 }
278 
279 /**
280  * virtio_has_dma_quirk - determine whether this device has the DMA quirk
281  * @vdev: the device
282  */
283 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
284 {
285 	/*
286 	 * Note the reverse polarity of the quirk feature (compared to most
287 	 * other features), this is for compatibility with legacy systems.
288 	 */
289 	return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
290 }
291 
292 static inline
293 int virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
294 		    struct virtqueue *vqs[],
295 		    struct virtqueue_info vqs_info[],
296 		    struct irq_affinity *desc)
297 {
298 	return vdev->config->find_vqs(vdev, nvqs, vqs, vqs_info, desc);
299 }
300 
301 static inline
302 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
303 					vq_callback_t *c, const char *n)
304 {
305 	struct virtqueue_info vqs_info[] = {
306 		{ n, c },
307 	};
308 	struct virtqueue *vq;
309 	int err = virtio_find_vqs(vdev, 1, &vq, vqs_info, NULL);
310 
311 	if (err < 0)
312 		return ERR_PTR(err);
313 	return vq;
314 }
315 
316 /**
317  * virtio_synchronize_cbs - synchronize with virtqueue callbacks
318  * @dev: the virtio device
319  */
320 static inline
321 void virtio_synchronize_cbs(struct virtio_device *dev)
322 {
323 	if (dev->config->synchronize_cbs) {
324 		dev->config->synchronize_cbs(dev);
325 	} else {
326 		/*
327 		 * A best effort fallback to synchronize with
328 		 * interrupts, preemption and softirq disabled
329 		 * regions. See comment above synchronize_rcu().
330 		 */
331 		synchronize_rcu();
332 	}
333 }
334 
335 /**
336  * virtio_device_ready - enable vq use in probe function
337  * @dev: the virtio device
338  *
339  * Driver must call this to use vqs in the probe function.
340  *
341  * Note: vqs are enabled automatically after probe returns.
342  */
343 static inline
344 void virtio_device_ready(struct virtio_device *dev)
345 {
346 	unsigned status = dev->config->get_status(dev);
347 
348 	WARN_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
349 
350 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
351 	/*
352 	 * The virtio_synchronize_cbs() makes sure vring_interrupt()
353 	 * will see the driver specific setup if it sees vq->broken
354 	 * as false (even if the notifications come before DRIVER_OK).
355 	 */
356 	virtio_synchronize_cbs(dev);
357 	__virtio_unbreak_device(dev);
358 #endif
359 	/*
360 	 * The transport should ensure the visibility of vq->broken
361 	 * before setting DRIVER_OK. See the comments for the transport
362 	 * specific set_status() method.
363 	 *
364 	 * A well behaved device will only notify a virtqueue after
365 	 * DRIVER_OK, this means the device should "see" the coherenct
366 	 * memory write that set vq->broken as false which is done by
367 	 * the driver when it sees DRIVER_OK, then the following
368 	 * driver's vring_interrupt() will see vq->broken as false so
369 	 * we won't lose any notification.
370 	 */
371 	dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
372 }
373 
374 static inline
375 const char *virtio_bus_name(struct virtio_device *vdev)
376 {
377 	if (!vdev->config->bus_name)
378 		return "virtio";
379 	return vdev->config->bus_name(vdev);
380 }
381 
382 /**
383  * virtqueue_set_affinity - setting affinity for a virtqueue
384  * @vq: the virtqueue
385  * @cpu_mask: the cpu mask
386  *
387  * Pay attention the function are best-effort: the affinity hint may not be set
388  * due to config support, irq type and sharing.
389  *
390  */
391 static inline
392 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
393 {
394 	struct virtio_device *vdev = vq->vdev;
395 	if (vdev->config->set_vq_affinity)
396 		return vdev->config->set_vq_affinity(vq, cpu_mask);
397 	return 0;
398 }
399 
400 static inline
401 bool virtio_get_shm_region(struct virtio_device *vdev,
402 			   struct virtio_shm_region *region_out, u8 id)
403 {
404 	if (!vdev->config->get_shm_region)
405 		return false;
406 	return vdev->config->get_shm_region(vdev, region_out, id);
407 }
408 
409 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
410 {
411 	return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
412 		virtio_legacy_is_little_endian();
413 }
414 
415 /* Memory accessors */
416 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
417 {
418 	return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
419 }
420 
421 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
422 {
423 	return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
424 }
425 
426 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
427 {
428 	return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
429 }
430 
431 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
432 {
433 	return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
434 }
435 
436 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
437 {
438 	return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
439 }
440 
441 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
442 {
443 	return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
444 }
445 
446 #define virtio_to_cpu(vdev, x) \
447 	_Generic((x), \
448 		__u8: (x), \
449 		__virtio16: virtio16_to_cpu((vdev), (x)), \
450 		__virtio32: virtio32_to_cpu((vdev), (x)), \
451 		__virtio64: virtio64_to_cpu((vdev), (x)) \
452 		)
453 
454 #define cpu_to_virtio(vdev, x, m) \
455 	_Generic((m), \
456 		__u8: (x), \
457 		__virtio16: cpu_to_virtio16((vdev), (x)), \
458 		__virtio32: cpu_to_virtio32((vdev), (x)), \
459 		__virtio64: cpu_to_virtio64((vdev), (x)) \
460 		)
461 
462 #define __virtio_native_type(structname, member) \
463 	typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
464 
465 /* Config space accessors. */
466 #define virtio_cread(vdev, structname, member, ptr)			\
467 	do {								\
468 		typeof(((structname*)0)->member) virtio_cread_v;	\
469 									\
470 		might_sleep();						\
471 		/* Sanity check: must match the member's type */	\
472 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
473 									\
474 		switch (sizeof(virtio_cread_v)) {			\
475 		case 1:							\
476 		case 2:							\
477 		case 4:							\
478 			vdev->config->get((vdev), 			\
479 					  offsetof(structname, member), \
480 					  &virtio_cread_v,		\
481 					  sizeof(virtio_cread_v));	\
482 			break;						\
483 		default:						\
484 			__virtio_cread_many((vdev), 			\
485 					  offsetof(structname, member), \
486 					  &virtio_cread_v,		\
487 					  1,				\
488 					  sizeof(virtio_cread_v));	\
489 			break;						\
490 		}							\
491 		*(ptr) = virtio_to_cpu(vdev, virtio_cread_v);		\
492 	} while(0)
493 
494 /* Config space accessors. */
495 #define virtio_cwrite(vdev, structname, member, ptr)			\
496 	do {								\
497 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
498 			cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
499 									\
500 		might_sleep();						\
501 		/* Sanity check: must match the member's type */	\
502 		typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
503 									\
504 		vdev->config->set((vdev), offsetof(structname, member),	\
505 				  &virtio_cwrite_v,			\
506 				  sizeof(virtio_cwrite_v));		\
507 	} while(0)
508 
509 /*
510  * Nothing virtio-specific about these, but let's worry about generalizing
511  * these later.
512  */
513 #define virtio_le_to_cpu(x) \
514 	_Generic((x), \
515 		__u8: (u8)(x), \
516 		 __le16: (u16)le16_to_cpu(x), \
517 		 __le32: (u32)le32_to_cpu(x), \
518 		 __le64: (u64)le64_to_cpu(x) \
519 		)
520 
521 #define virtio_cpu_to_le(x, m) \
522 	_Generic((m), \
523 		 __u8: (x), \
524 		 __le16: cpu_to_le16(x), \
525 		 __le32: cpu_to_le32(x), \
526 		 __le64: cpu_to_le64(x) \
527 		)
528 
529 /* LE (e.g. modern) Config space accessors. */
530 #define virtio_cread_le(vdev, structname, member, ptr)			\
531 	do {								\
532 		typeof(((structname*)0)->member) virtio_cread_v;	\
533 									\
534 		might_sleep();						\
535 		/* Sanity check: must match the member's type */	\
536 		typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
537 									\
538 		switch (sizeof(virtio_cread_v)) {			\
539 		case 1:							\
540 		case 2:							\
541 		case 4:							\
542 			vdev->config->get((vdev), 			\
543 					  offsetof(structname, member), \
544 					  &virtio_cread_v,		\
545 					  sizeof(virtio_cread_v));	\
546 			break;						\
547 		default:						\
548 			__virtio_cread_many((vdev), 			\
549 					  offsetof(structname, member), \
550 					  &virtio_cread_v,		\
551 					  1,				\
552 					  sizeof(virtio_cread_v));	\
553 			break;						\
554 		}							\
555 		*(ptr) = virtio_le_to_cpu(virtio_cread_v);		\
556 	} while(0)
557 
558 #define virtio_cwrite_le(vdev, structname, member, ptr)			\
559 	do {								\
560 		typeof(((structname*)0)->member) virtio_cwrite_v =	\
561 			virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
562 									\
563 		might_sleep();						\
564 		/* Sanity check: must match the member's type */	\
565 		typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
566 									\
567 		vdev->config->set((vdev), offsetof(structname, member),	\
568 				  &virtio_cwrite_v,			\
569 				  sizeof(virtio_cwrite_v));		\
570 	} while(0)
571 
572 
573 /* Read @count fields, @bytes each. */
574 static inline void __virtio_cread_many(struct virtio_device *vdev,
575 				       unsigned int offset,
576 				       void *buf, size_t count, size_t bytes)
577 {
578 	u32 old, gen = vdev->config->generation ?
579 		vdev->config->generation(vdev) : 0;
580 	int i;
581 
582 	might_sleep();
583 	do {
584 		old = gen;
585 
586 		for (i = 0; i < count; i++)
587 			vdev->config->get(vdev, offset + bytes * i,
588 					  buf + i * bytes, bytes);
589 
590 		gen = vdev->config->generation ?
591 			vdev->config->generation(vdev) : 0;
592 	} while (gen != old);
593 }
594 
595 static inline void virtio_cread_bytes(struct virtio_device *vdev,
596 				      unsigned int offset,
597 				      void *buf, size_t len)
598 {
599 	__virtio_cread_many(vdev, offset, buf, len, 1);
600 }
601 
602 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
603 {
604 	u8 ret;
605 
606 	might_sleep();
607 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
608 	return ret;
609 }
610 
611 static inline void virtio_cwrite8(struct virtio_device *vdev,
612 				  unsigned int offset, u8 val)
613 {
614 	might_sleep();
615 	vdev->config->set(vdev, offset, &val, sizeof(val));
616 }
617 
618 static inline u16 virtio_cread16(struct virtio_device *vdev,
619 				 unsigned int offset)
620 {
621 	__virtio16 ret;
622 
623 	might_sleep();
624 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
625 	return virtio16_to_cpu(vdev, ret);
626 }
627 
628 static inline void virtio_cwrite16(struct virtio_device *vdev,
629 				   unsigned int offset, u16 val)
630 {
631 	__virtio16 v;
632 
633 	might_sleep();
634 	v = cpu_to_virtio16(vdev, val);
635 	vdev->config->set(vdev, offset, &v, sizeof(v));
636 }
637 
638 static inline u32 virtio_cread32(struct virtio_device *vdev,
639 				 unsigned int offset)
640 {
641 	__virtio32 ret;
642 
643 	might_sleep();
644 	vdev->config->get(vdev, offset, &ret, sizeof(ret));
645 	return virtio32_to_cpu(vdev, ret);
646 }
647 
648 static inline void virtio_cwrite32(struct virtio_device *vdev,
649 				   unsigned int offset, u32 val)
650 {
651 	__virtio32 v;
652 
653 	might_sleep();
654 	v = cpu_to_virtio32(vdev, val);
655 	vdev->config->set(vdev, offset, &v, sizeof(v));
656 }
657 
658 static inline u64 virtio_cread64(struct virtio_device *vdev,
659 				 unsigned int offset)
660 {
661 	__virtio64 ret;
662 
663 	__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
664 	return virtio64_to_cpu(vdev, ret);
665 }
666 
667 static inline void virtio_cwrite64(struct virtio_device *vdev,
668 				   unsigned int offset, u64 val)
669 {
670 	__virtio64 v;
671 
672 	might_sleep();
673 	v = cpu_to_virtio64(vdev, val);
674 	vdev->config->set(vdev, offset, &v, sizeof(v));
675 }
676 
677 /* Conditional config space accessors. */
678 #define virtio_cread_feature(vdev, fbit, structname, member, ptr)	\
679 	({								\
680 		int _r = 0;						\
681 		if (!virtio_has_feature(vdev, fbit))			\
682 			_r = -ENOENT;					\
683 		else							\
684 			virtio_cread((vdev), structname, member, ptr);	\
685 		_r;							\
686 	})
687 
688 /* Conditional config space accessors. */
689 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr)	\
690 	({								\
691 		int _r = 0;						\
692 		if (!virtio_has_feature(vdev, fbit))			\
693 			_r = -ENOENT;					\
694 		else							\
695 			virtio_cread_le((vdev), structname, member, ptr); \
696 		_r;							\
697 	})
698 
699 #endif /* _LINUX_VIRTIO_CONFIG_H */
700