xref: /linux/include/linux/virtio.h (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_H
3 #define _LINUX_VIRTIO_H
4 /* Everything a virtio driver needs to work with any particular virtio
5  * implementation. */
6 #include <linux/types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/spinlock.h>
9 #include <linux/device.h>
10 #include <linux/mod_devicetable.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/completion.h>
14 
15 /**
16  * struct virtqueue - a queue to register buffers for sending or receiving.
17  * @list: the chain of virtqueues for this device
18  * @callback: the function to call when buffers are consumed (can be NULL).
19  * @name: the name of this virtqueue (mainly for debugging)
20  * @vdev: the virtio device this queue was created for.
21  * @priv: a pointer for the virtqueue implementation to use.
22  * @index: the zero-based ordinal number for this queue.
23  * @num_free: number of elements we expect to be able to fit.
24  * @num_max: the maximum number of elements supported by the device.
25  * @reset: vq is in reset state or not.
26  *
27  * A note on @num_free: with indirect buffers, each buffer needs one
28  * element in the queue, otherwise a buffer will need one element per
29  * sg element.
30  */
31 struct virtqueue {
32 	struct list_head list;
33 	void (*callback)(struct virtqueue *vq);
34 	const char *name;
35 	struct virtio_device *vdev;
36 	unsigned int index;
37 	unsigned int num_free;
38 	unsigned int num_max;
39 	bool reset;
40 	void *priv;
41 };
42 
43 int virtqueue_add_outbuf(struct virtqueue *vq,
44 			 struct scatterlist sg[], unsigned int num,
45 			 void *data,
46 			 gfp_t gfp);
47 
48 int virtqueue_add_inbuf(struct virtqueue *vq,
49 			struct scatterlist sg[], unsigned int num,
50 			void *data,
51 			gfp_t gfp);
52 
53 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
54 			    struct scatterlist sg[], unsigned int num,
55 			    void *data,
56 			    void *ctx,
57 			    gfp_t gfp);
58 
59 int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
60 				  struct scatterlist *sg, unsigned int num,
61 				  void *data,
62 				  void *ctx,
63 				  gfp_t gfp);
64 
65 int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
66 				   struct scatterlist *sg, unsigned int num,
67 				   void *data,
68 				   gfp_t gfp);
69 
70 int virtqueue_add_sgs(struct virtqueue *vq,
71 		      struct scatterlist *sgs[],
72 		      unsigned int out_sgs,
73 		      unsigned int in_sgs,
74 		      void *data,
75 		      gfp_t gfp);
76 
77 struct device *virtqueue_dma_dev(struct virtqueue *vq);
78 
79 bool virtqueue_kick(struct virtqueue *vq);
80 
81 bool virtqueue_kick_prepare(struct virtqueue *vq);
82 
83 bool virtqueue_notify(struct virtqueue *vq);
84 
85 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
86 
87 void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
88 			    void **ctx);
89 
90 void virtqueue_disable_cb(struct virtqueue *vq);
91 
92 bool virtqueue_enable_cb(struct virtqueue *vq);
93 
94 unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
95 
96 bool virtqueue_poll(struct virtqueue *vq, unsigned);
97 
98 bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
99 
100 void *virtqueue_detach_unused_buf(struct virtqueue *vq);
101 
102 unsigned int virtqueue_get_vring_size(const struct virtqueue *vq);
103 
104 bool virtqueue_is_broken(const struct virtqueue *vq);
105 
106 const struct vring *virtqueue_get_vring(const struct virtqueue *vq);
107 dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *vq);
108 dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq);
109 dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
110 
111 int virtqueue_resize(struct virtqueue *vq, u32 num,
112 		     void (*recycle)(struct virtqueue *vq, void *buf),
113 		     void (*recycle_done)(struct virtqueue *vq));
114 int virtqueue_reset(struct virtqueue *vq,
115 		    void (*recycle)(struct virtqueue *vq, void *buf),
116 		    void (*recycle_done)(struct virtqueue *vq));
117 
118 struct virtio_admin_cmd {
119 	__le16 opcode;
120 	__le16 group_type;
121 	__le64 group_member_id;
122 	struct scatterlist *data_sg;
123 	struct scatterlist *result_sg;
124 	struct completion completion;
125 	u32 result_sg_size;
126 	int ret;
127 };
128 
129 /**
130  * struct virtio_device - representation of a device using virtio
131  * @index: unique position on the virtio bus
132  * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
133  * @config_core_enabled: configuration change reporting enabled by core
134  * @config_driver_disabled: configuration change reporting disabled by
135  *                          a driver
136  * @config_change_pending: configuration change reported while disabled
137  * @config_lock: protects configuration change reporting
138  * @vqs_list_lock: protects @vqs.
139  * @dev: underlying device.
140  * @id: the device type identification (used to match it with a driver).
141  * @config: the configuration ops for this device.
142  * @vringh_config: configuration ops for host vrings.
143  * @vqs: the list of virtqueues for this device.
144  * @features: the features supported by both driver and device.
145  * @priv: private pointer for the driver's use.
146  * @debugfs_dir: debugfs directory entry.
147  * @debugfs_filter_features: features to be filtered set by debugfs.
148  */
149 struct virtio_device {
150 	int index;
151 	bool failed;
152 	bool config_core_enabled;
153 	bool config_driver_disabled;
154 	bool config_change_pending;
155 	spinlock_t config_lock;
156 	spinlock_t vqs_list_lock;
157 	struct device dev;
158 	struct virtio_device_id id;
159 	const struct virtio_config_ops *config;
160 	const struct vringh_config_ops *vringh_config;
161 	struct list_head vqs;
162 	u64 features;
163 	void *priv;
164 #ifdef CONFIG_VIRTIO_DEBUG
165 	struct dentry *debugfs_dir;
166 	u64 debugfs_filter_features;
167 #endif
168 };
169 
170 #define dev_to_virtio(_dev)	container_of_const(_dev, struct virtio_device, dev)
171 
172 void virtio_add_status(struct virtio_device *dev, unsigned int status);
173 int register_virtio_device(struct virtio_device *dev);
174 void unregister_virtio_device(struct virtio_device *dev);
175 bool is_virtio_device(struct device *dev);
176 
177 void virtio_break_device(struct virtio_device *dev);
178 void __virtio_unbreak_device(struct virtio_device *dev);
179 
180 void __virtqueue_break(struct virtqueue *_vq);
181 void __virtqueue_unbreak(struct virtqueue *_vq);
182 
183 void virtio_config_changed(struct virtio_device *dev);
184 
185 void virtio_config_driver_disable(struct virtio_device *dev);
186 void virtio_config_driver_enable(struct virtio_device *dev);
187 
188 #ifdef CONFIG_PM_SLEEP
189 int virtio_device_freeze(struct virtio_device *dev);
190 int virtio_device_restore(struct virtio_device *dev);
191 #endif
192 void virtio_reset_device(struct virtio_device *dev);
193 int virtio_device_reset_prepare(struct virtio_device *dev);
194 int virtio_device_reset_done(struct virtio_device *dev);
195 
196 size_t virtio_max_dma_size(const struct virtio_device *vdev);
197 
198 #define virtio_device_for_each_vq(vdev, vq) \
199 	list_for_each_entry(vq, &vdev->vqs, list)
200 
201 /**
202  * struct virtio_driver - operations for a virtio I/O driver
203  * @driver: underlying device driver (populate name).
204  * @id_table: the ids serviced by this driver.
205  * @feature_table: an array of feature numbers supported by this driver.
206  * @feature_table_size: number of entries in the feature table array.
207  * @feature_table_legacy: same as feature_table but when working in legacy mode.
208  * @feature_table_size_legacy: number of entries in feature table legacy array.
209  * @validate: the function to call to validate features and config space.
210  *            Returns 0 or -errno.
211  * @probe: the function to call when a device is found.  Returns 0 or -errno.
212  * @scan: optional function to call after successful probe; intended
213  *    for virtio-scsi to invoke a scan.
214  * @remove: the function to call when a device is removed.
215  * @config_changed: optional function to call when the device configuration
216  *    changes; may be called in interrupt context.
217  * @freeze: optional function to call during suspend/hibernation.
218  * @restore: optional function to call on resume.
219  * @reset_prepare: optional function to call when a transport specific reset
220  *    occurs.
221  * @reset_done: optional function to call after transport specific reset
222  *    operation has finished.
223  */
224 struct virtio_driver {
225 	struct device_driver driver;
226 	const struct virtio_device_id *id_table;
227 	const unsigned int *feature_table;
228 	unsigned int feature_table_size;
229 	const unsigned int *feature_table_legacy;
230 	unsigned int feature_table_size_legacy;
231 	int (*validate)(struct virtio_device *dev);
232 	int (*probe)(struct virtio_device *dev);
233 	void (*scan)(struct virtio_device *dev);
234 	void (*remove)(struct virtio_device *dev);
235 	void (*config_changed)(struct virtio_device *dev);
236 	int (*freeze)(struct virtio_device *dev);
237 	int (*restore)(struct virtio_device *dev);
238 	int (*reset_prepare)(struct virtio_device *dev);
239 	int (*reset_done)(struct virtio_device *dev);
240 };
241 
242 #define drv_to_virtio(__drv)	container_of_const(__drv, struct virtio_driver, driver)
243 
244 /* use a macro to avoid include chaining to get THIS_MODULE */
245 #define register_virtio_driver(drv) \
246 	__register_virtio_driver(drv, THIS_MODULE)
247 int __register_virtio_driver(struct virtio_driver *drv, struct module *owner);
248 void unregister_virtio_driver(struct virtio_driver *drv);
249 
250 /* module_virtio_driver() - Helper macro for drivers that don't do
251  * anything special in module init/exit.  This eliminates a lot of
252  * boilerplate.  Each module may only use this macro once, and
253  * calling it replaces module_init() and module_exit()
254  */
255 #define module_virtio_driver(__virtio_driver) \
256 	module_driver(__virtio_driver, register_virtio_driver, \
257 			unregister_virtio_driver)
258 
259 dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
260 					  enum dma_data_direction dir, unsigned long attrs);
261 void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
262 				      size_t size, enum dma_data_direction dir,
263 				      unsigned long attrs);
264 int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
265 
266 bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
267 void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
268 					     unsigned long offset, size_t size,
269 					     enum dma_data_direction dir);
270 void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
271 						unsigned long offset, size_t size,
272 						enum dma_data_direction dir);
273 
274 #ifdef CONFIG_VIRTIO_DEBUG
275 void virtio_debug_device_init(struct virtio_device *dev);
276 void virtio_debug_device_exit(struct virtio_device *dev);
277 void virtio_debug_device_filter_features(struct virtio_device *dev);
278 void virtio_debug_init(void);
279 void virtio_debug_exit(void);
280 #else
virtio_debug_device_init(struct virtio_device * dev)281 static inline void virtio_debug_device_init(struct virtio_device *dev)
282 {
283 }
284 
virtio_debug_device_exit(struct virtio_device * dev)285 static inline void virtio_debug_device_exit(struct virtio_device *dev)
286 {
287 }
288 
virtio_debug_device_filter_features(struct virtio_device * dev)289 static inline void virtio_debug_device_filter_features(struct virtio_device *dev)
290 {
291 }
292 
virtio_debug_init(void)293 static inline void virtio_debug_init(void)
294 {
295 }
296 
virtio_debug_exit(void)297 static inline void virtio_debug_exit(void)
298 {
299 }
300 #endif
301 
302 #endif /* _LINUX_VIRTIO_H */
303