xref: /linux/include/linux/virtio.h (revision 8f0cbedc86cfc93ea869bbff420a2d86f6373f57)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_H
3 #define _LINUX_VIRTIO_H
4 /* Everything a virtio driver needs to work with any particular virtio
5  * implementation. */
6 #include <linux/types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/spinlock.h>
9 #include <linux/device.h>
10 #include <linux/mod_devicetable.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/completion.h>
14 #include <linux/virtio_features.h>
15 
16 struct module;
17 
18 /**
19  * struct virtqueue - a queue to register buffers for sending or receiving.
20  * @list: the chain of virtqueues for this device
21  * @callback: the function to call when buffers are consumed (can be NULL).
22  * @name: the name of this virtqueue (mainly for debugging)
23  * @vdev: the virtio device this queue was created for.
24  * @priv: a pointer for the virtqueue implementation to use.
25  * @index: the zero-based ordinal number for this queue.
26  * @num_free: number of elements we expect to be able to fit.
27  * @num_max: the maximum number of elements supported by the device.
28  * @reset: vq is in reset state or not.
29  *
30  * A note on @num_free: with indirect buffers, each buffer needs one
31  * element in the queue, otherwise a buffer will need one element per
32  * sg element.
33  */
34 struct virtqueue {
35 	struct list_head list;
36 	void (*callback)(struct virtqueue *vq);
37 	const char *name;
38 	struct virtio_device *vdev;
39 	unsigned int index;
40 	unsigned int num_free;
41 	unsigned int num_max;
42 	bool reset;
43 	void *priv;
44 };
45 
46 struct vduse_iova_domain;
47 
48 union virtio_map {
49 	/* Device that performs DMA */
50 	struct device *dma_dev;
51 	/* VDUSE specific mapping data */
52 	struct vduse_iova_domain *iova_domain;
53 };
54 
55 int virtqueue_add_outbuf(struct virtqueue *vq,
56 			 struct scatterlist sg[], unsigned int num,
57 			 void *data,
58 			 gfp_t gfp);
59 
60 int virtqueue_add_inbuf(struct virtqueue *vq,
61 			struct scatterlist sg[], unsigned int num,
62 			void *data,
63 			gfp_t gfp);
64 
65 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
66 			    struct scatterlist sg[], unsigned int num,
67 			    void *data,
68 			    void *ctx,
69 			    gfp_t gfp);
70 
71 int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
72 				  struct scatterlist *sg, unsigned int num,
73 				  void *data,
74 				  void *ctx,
75 				  gfp_t gfp);
76 
77 int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
78 				   struct scatterlist *sg, unsigned int num,
79 				   void *data,
80 				   gfp_t gfp);
81 
82 int virtqueue_add_sgs(struct virtqueue *vq,
83 		      struct scatterlist *sgs[],
84 		      unsigned int out_sgs,
85 		      unsigned int in_sgs,
86 		      void *data,
87 		      gfp_t gfp);
88 
89 struct device *virtqueue_dma_dev(struct virtqueue *vq);
90 
91 bool virtqueue_kick(struct virtqueue *vq);
92 
93 bool virtqueue_kick_prepare(struct virtqueue *vq);
94 
95 bool virtqueue_notify(struct virtqueue *vq);
96 
97 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
98 
99 void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
100 			    void **ctx);
101 
102 void virtqueue_disable_cb(struct virtqueue *vq);
103 
104 bool virtqueue_enable_cb(struct virtqueue *vq);
105 
106 unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
107 
108 bool virtqueue_poll(struct virtqueue *vq, unsigned);
109 
110 bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
111 
112 void *virtqueue_detach_unused_buf(struct virtqueue *vq);
113 
114 unsigned int virtqueue_get_vring_size(const struct virtqueue *vq);
115 
116 bool virtqueue_is_broken(const struct virtqueue *vq);
117 
118 const struct vring *virtqueue_get_vring(const struct virtqueue *vq);
119 dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *vq);
120 dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq);
121 dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
122 
123 int virtqueue_resize(struct virtqueue *vq, u32 num,
124 		     void (*recycle)(struct virtqueue *vq, void *buf),
125 		     void (*recycle_done)(struct virtqueue *vq));
126 int virtqueue_reset(struct virtqueue *vq,
127 		    void (*recycle)(struct virtqueue *vq, void *buf),
128 		    void (*recycle_done)(struct virtqueue *vq));
129 
130 struct virtio_admin_cmd {
131 	__le16 opcode;
132 	__le16 group_type;
133 	__le64 group_member_id;
134 	struct scatterlist *data_sg;
135 	struct scatterlist *result_sg;
136 	struct completion completion;
137 	u32 result_sg_size;
138 	int ret;
139 };
140 
141 /**
142  * struct virtio_device - representation of a device using virtio
143  * @index: unique position on the virtio bus
144  * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
145  * @config_core_enabled: configuration change reporting enabled by core
146  * @config_driver_disabled: configuration change reporting disabled by
147  *                          a driver
148  * @config_change_pending: configuration change reported while disabled
149  * @config_lock: protects configuration change reporting
150  * @vqs_list_lock: protects @vqs.
151  * @dev: underlying device.
152  * @id: the device type identification (used to match it with a driver).
153  * @config: the configuration ops for this device.
154  * @vringh_config: configuration ops for host vrings.
155  * @vqs: the list of virtqueues for this device.
156  * @features: the 64 lower features supported by both driver and device.
157  * @features_array: the full features space supported by both driver and
158  *		    device.
159  * @priv: private pointer for the driver's use.
160  * @debugfs_dir: debugfs directory entry.
161  * @debugfs_filter_features: features to be filtered set by debugfs.
162  */
163 struct virtio_device {
164 	int index;
165 	bool failed;
166 	bool config_core_enabled;
167 	bool config_driver_disabled;
168 	bool config_change_pending;
169 	spinlock_t config_lock;
170 	spinlock_t vqs_list_lock;
171 	struct device dev;
172 	struct virtio_device_id id;
173 	const struct virtio_config_ops *config;
174 	const struct vringh_config_ops *vringh_config;
175 	const struct virtio_map_ops *map;
176 	struct list_head vqs;
177 	VIRTIO_DECLARE_FEATURES(features);
178 	void *priv;
179 	union virtio_map vmap;
180 #ifdef CONFIG_VIRTIO_DEBUG
181 	struct dentry *debugfs_dir;
182 	u64 debugfs_filter_features[VIRTIO_FEATURES_U64S];
183 #endif
184 };
185 
186 #define dev_to_virtio(_dev)	container_of_const(_dev, struct virtio_device, dev)
187 
188 void virtio_add_status(struct virtio_device *dev, unsigned int status);
189 int register_virtio_device(struct virtio_device *dev);
190 void unregister_virtio_device(struct virtio_device *dev);
191 bool is_virtio_device(struct device *dev);
192 
193 void virtio_break_device(struct virtio_device *dev);
194 void __virtio_unbreak_device(struct virtio_device *dev);
195 
196 void __virtqueue_break(struct virtqueue *_vq);
197 void __virtqueue_unbreak(struct virtqueue *_vq);
198 
199 void virtio_config_changed(struct virtio_device *dev);
200 
201 void virtio_config_driver_disable(struct virtio_device *dev);
202 void virtio_config_driver_enable(struct virtio_device *dev);
203 
204 #ifdef CONFIG_PM_SLEEP
205 int virtio_device_freeze(struct virtio_device *dev);
206 int virtio_device_restore(struct virtio_device *dev);
207 #endif
208 void virtio_reset_device(struct virtio_device *dev);
209 int virtio_device_reset_prepare(struct virtio_device *dev);
210 int virtio_device_reset_done(struct virtio_device *dev);
211 
212 size_t virtio_max_dma_size(const struct virtio_device *vdev);
213 
214 #define virtio_device_for_each_vq(vdev, vq) \
215 	list_for_each_entry(vq, &(vdev)->vqs, list)
216 
217 /**
218  * struct virtio_driver - operations for a virtio I/O driver
219  * @driver: underlying device driver (populate name).
220  * @id_table: the ids serviced by this driver.
221  * @feature_table: an array of feature numbers supported by this driver.
222  * @feature_table_size: number of entries in the feature table array.
223  * @feature_table_legacy: same as feature_table but when working in legacy mode.
224  * @feature_table_size_legacy: number of entries in feature table legacy array.
225  * @validate: the function to call to validate features and config space.
226  *            Returns 0 or -errno.
227  * @probe: the function to call when a device is found.  Returns 0 or -errno.
228  * @scan: optional function to call after successful probe; intended
229  *    for virtio-scsi to invoke a scan.
230  * @remove: the function to call when a device is removed.
231  * @config_changed: optional function to call when the device configuration
232  *    changes; may be called in interrupt context.
233  * @freeze: optional function to call during suspend/hibernation.
234  * @restore: optional function to call on resume.
235  * @reset_prepare: optional function to call when a transport specific reset
236  *    occurs.
237  * @reset_done: optional function to call after transport specific reset
238  *    operation has finished.
239  * @shutdown: synchronize with the device on shutdown. If provided, replaces
240  *    the virtio core implementation.
241  */
242 struct virtio_driver {
243 	struct device_driver driver;
244 	const struct virtio_device_id *id_table;
245 	const unsigned int *feature_table;
246 	unsigned int feature_table_size;
247 	const unsigned int *feature_table_legacy;
248 	unsigned int feature_table_size_legacy;
249 	int (*validate)(struct virtio_device *dev);
250 	int (*probe)(struct virtio_device *dev);
251 	void (*scan)(struct virtio_device *dev);
252 	void (*remove)(struct virtio_device *dev);
253 	void (*config_changed)(struct virtio_device *dev);
254 	int (*freeze)(struct virtio_device *dev);
255 	int (*restore)(struct virtio_device *dev);
256 	int (*reset_prepare)(struct virtio_device *dev);
257 	int (*reset_done)(struct virtio_device *dev);
258 	void (*shutdown)(struct virtio_device *dev);
259 };
260 
261 #define drv_to_virtio(__drv)	container_of_const(__drv, struct virtio_driver, driver)
262 
263 /* use a macro to avoid include chaining to get THIS_MODULE */
264 #define register_virtio_driver(drv) \
265 	__register_virtio_driver(drv, THIS_MODULE)
266 int __register_virtio_driver(struct virtio_driver *drv, struct module *owner);
267 void unregister_virtio_driver(struct virtio_driver *drv);
268 
269 /* module_virtio_driver() - Helper macro for drivers that don't do
270  * anything special in module init/exit.  This eliminates a lot of
271  * boilerplate.  Each module may only use this macro once, and
272  * calling it replaces module_init() and module_exit()
273  */
274 #define module_virtio_driver(__virtio_driver) \
275 	module_driver(__virtio_driver, register_virtio_driver, \
276 			unregister_virtio_driver)
277 
278 
279 void *virtqueue_map_alloc_coherent(struct virtio_device *vdev,
280 				   union virtio_map mapping_token,
281 				   size_t size, dma_addr_t *dma_handle,
282 				   gfp_t gfp);
283 
284 void virtqueue_map_free_coherent(struct virtio_device *vdev,
285 				 union virtio_map mapping_token,
286 				 size_t size, void *vaddr,
287 				 dma_addr_t dma_handle);
288 
289 dma_addr_t virtqueue_map_page_attrs(const struct virtqueue *_vq,
290 				    struct page *page,
291 				    unsigned long offset,
292 				    size_t size,
293 				    enum dma_data_direction dir,
294 				    unsigned long attrs);
295 
296 void virtqueue_unmap_page_attrs(const struct virtqueue *_vq,
297 				dma_addr_t dma_handle,
298 				size_t size, enum dma_data_direction dir,
299 				unsigned long attrs);
300 
301 dma_addr_t virtqueue_map_single_attrs(const struct virtqueue *_vq, void *ptr, size_t size,
302 					  enum dma_data_direction dir, unsigned long attrs);
303 void virtqueue_unmap_single_attrs(const struct virtqueue *_vq, dma_addr_t addr,
304 				      size_t size, enum dma_data_direction dir,
305 				      unsigned long attrs);
306 int virtqueue_map_mapping_error(const struct virtqueue *_vq, dma_addr_t addr);
307 
308 bool virtqueue_map_need_sync(const struct virtqueue *_vq, dma_addr_t addr);
309 void virtqueue_map_sync_single_range_for_cpu(const struct virtqueue *_vq, dma_addr_t addr,
310 					     unsigned long offset, size_t size,
311 					     enum dma_data_direction dir);
312 void virtqueue_map_sync_single_range_for_device(const struct virtqueue *_vq, dma_addr_t addr,
313 						unsigned long offset, size_t size,
314 						enum dma_data_direction dir);
315 
316 #ifdef CONFIG_VIRTIO_DEBUG
317 void virtio_debug_device_init(struct virtio_device *dev);
318 void virtio_debug_device_exit(struct virtio_device *dev);
319 void virtio_debug_device_filter_features(struct virtio_device *dev);
320 void virtio_debug_init(void);
321 void virtio_debug_exit(void);
322 #else
virtio_debug_device_init(struct virtio_device * dev)323 static inline void virtio_debug_device_init(struct virtio_device *dev)
324 {
325 }
326 
virtio_debug_device_exit(struct virtio_device * dev)327 static inline void virtio_debug_device_exit(struct virtio_device *dev)
328 {
329 }
330 
virtio_debug_device_filter_features(struct virtio_device * dev)331 static inline void virtio_debug_device_filter_features(struct virtio_device *dev)
332 {
333 }
334 
virtio_debug_init(void)335 static inline void virtio_debug_init(void)
336 {
337 }
338 
virtio_debug_exit(void)339 static inline void virtio_debug_exit(void)
340 {
341 }
342 #endif
343 
344 #endif /* _LINUX_VIRTIO_H */
345