1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_H
3 #define _LINUX_VIRTIO_H
4 /* Everything a virtio driver needs to work with any particular virtio
5 * implementation. */
6 #include <linux/types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/spinlock.h>
9 #include <linux/device.h>
10 #include <linux/mod_devicetable.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/completion.h>
14 #include <linux/virtio_features.h>
15
16 /**
17 * struct virtqueue - a queue to register buffers for sending or receiving.
18 * @list: the chain of virtqueues for this device
19 * @callback: the function to call when buffers are consumed (can be NULL).
20 * @name: the name of this virtqueue (mainly for debugging)
21 * @vdev: the virtio device this queue was created for.
22 * @priv: a pointer for the virtqueue implementation to use.
23 * @index: the zero-based ordinal number for this queue.
24 * @num_free: number of elements we expect to be able to fit.
25 * @num_max: the maximum number of elements supported by the device.
26 * @reset: vq is in reset state or not.
27 *
28 * A note on @num_free: with indirect buffers, each buffer needs one
29 * element in the queue, otherwise a buffer will need one element per
30 * sg element.
31 */
32 struct virtqueue {
33 struct list_head list;
34 void (*callback)(struct virtqueue *vq);
35 const char *name;
36 struct virtio_device *vdev;
37 unsigned int index;
38 unsigned int num_free;
39 unsigned int num_max;
40 bool reset;
41 void *priv;
42 };
43
44 int virtqueue_add_outbuf(struct virtqueue *vq,
45 struct scatterlist sg[], unsigned int num,
46 void *data,
47 gfp_t gfp);
48
49 int virtqueue_add_inbuf(struct virtqueue *vq,
50 struct scatterlist sg[], unsigned int num,
51 void *data,
52 gfp_t gfp);
53
54 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
55 struct scatterlist sg[], unsigned int num,
56 void *data,
57 void *ctx,
58 gfp_t gfp);
59
60 int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
61 struct scatterlist *sg, unsigned int num,
62 void *data,
63 void *ctx,
64 gfp_t gfp);
65
66 int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
67 struct scatterlist *sg, unsigned int num,
68 void *data,
69 gfp_t gfp);
70
71 int virtqueue_add_sgs(struct virtqueue *vq,
72 struct scatterlist *sgs[],
73 unsigned int out_sgs,
74 unsigned int in_sgs,
75 void *data,
76 gfp_t gfp);
77
78 struct device *virtqueue_dma_dev(struct virtqueue *vq);
79
80 bool virtqueue_kick(struct virtqueue *vq);
81
82 bool virtqueue_kick_prepare(struct virtqueue *vq);
83
84 bool virtqueue_notify(struct virtqueue *vq);
85
86 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
87
88 void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
89 void **ctx);
90
91 void virtqueue_disable_cb(struct virtqueue *vq);
92
93 bool virtqueue_enable_cb(struct virtqueue *vq);
94
95 unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
96
97 bool virtqueue_poll(struct virtqueue *vq, unsigned);
98
99 bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
100
101 void *virtqueue_detach_unused_buf(struct virtqueue *vq);
102
103 unsigned int virtqueue_get_vring_size(const struct virtqueue *vq);
104
105 bool virtqueue_is_broken(const struct virtqueue *vq);
106
107 const struct vring *virtqueue_get_vring(const struct virtqueue *vq);
108 dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *vq);
109 dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq);
110 dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
111
112 int virtqueue_resize(struct virtqueue *vq, u32 num,
113 void (*recycle)(struct virtqueue *vq, void *buf),
114 void (*recycle_done)(struct virtqueue *vq));
115 int virtqueue_reset(struct virtqueue *vq,
116 void (*recycle)(struct virtqueue *vq, void *buf),
117 void (*recycle_done)(struct virtqueue *vq));
118
119 struct virtio_admin_cmd {
120 __le16 opcode;
121 __le16 group_type;
122 __le64 group_member_id;
123 struct scatterlist *data_sg;
124 struct scatterlist *result_sg;
125 struct completion completion;
126 u32 result_sg_size;
127 int ret;
128 };
129
130 /**
131 * struct virtio_device - representation of a device using virtio
132 * @index: unique position on the virtio bus
133 * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
134 * @config_core_enabled: configuration change reporting enabled by core
135 * @config_driver_disabled: configuration change reporting disabled by
136 * a driver
137 * @config_change_pending: configuration change reported while disabled
138 * @config_lock: protects configuration change reporting
139 * @vqs_list_lock: protects @vqs.
140 * @dev: underlying device.
141 * @id: the device type identification (used to match it with a driver).
142 * @config: the configuration ops for this device.
143 * @vringh_config: configuration ops for host vrings.
144 * @vqs: the list of virtqueues for this device.
145 * @features: the 64 lower features supported by both driver and device.
146 * @features_array: the full features space supported by both driver and
147 * device.
148 * @priv: private pointer for the driver's use.
149 * @debugfs_dir: debugfs directory entry.
150 * @debugfs_filter_features: features to be filtered set by debugfs.
151 */
152 struct virtio_device {
153 int index;
154 bool failed;
155 bool config_core_enabled;
156 bool config_driver_disabled;
157 bool config_change_pending;
158 spinlock_t config_lock;
159 spinlock_t vqs_list_lock;
160 struct device dev;
161 struct virtio_device_id id;
162 const struct virtio_config_ops *config;
163 const struct vringh_config_ops *vringh_config;
164 struct list_head vqs;
165 VIRTIO_DECLARE_FEATURES(features);
166 void *priv;
167 #ifdef CONFIG_VIRTIO_DEBUG
168 struct dentry *debugfs_dir;
169 u64 debugfs_filter_features[VIRTIO_FEATURES_DWORDS];
170 #endif
171 };
172
173 #define dev_to_virtio(_dev) container_of_const(_dev, struct virtio_device, dev)
174
175 void virtio_add_status(struct virtio_device *dev, unsigned int status);
176 int register_virtio_device(struct virtio_device *dev);
177 void unregister_virtio_device(struct virtio_device *dev);
178 bool is_virtio_device(struct device *dev);
179
180 void virtio_break_device(struct virtio_device *dev);
181 void __virtio_unbreak_device(struct virtio_device *dev);
182
183 void __virtqueue_break(struct virtqueue *_vq);
184 void __virtqueue_unbreak(struct virtqueue *_vq);
185
186 void virtio_config_changed(struct virtio_device *dev);
187
188 void virtio_config_driver_disable(struct virtio_device *dev);
189 void virtio_config_driver_enable(struct virtio_device *dev);
190
191 #ifdef CONFIG_PM_SLEEP
192 int virtio_device_freeze(struct virtio_device *dev);
193 int virtio_device_restore(struct virtio_device *dev);
194 #endif
195 void virtio_reset_device(struct virtio_device *dev);
196 int virtio_device_reset_prepare(struct virtio_device *dev);
197 int virtio_device_reset_done(struct virtio_device *dev);
198
199 size_t virtio_max_dma_size(const struct virtio_device *vdev);
200
201 #define virtio_device_for_each_vq(vdev, vq) \
202 list_for_each_entry(vq, &(vdev)->vqs, list)
203
204 /**
205 * struct virtio_driver - operations for a virtio I/O driver
206 * @driver: underlying device driver (populate name).
207 * @id_table: the ids serviced by this driver.
208 * @feature_table: an array of feature numbers supported by this driver.
209 * @feature_table_size: number of entries in the feature table array.
210 * @feature_table_legacy: same as feature_table but when working in legacy mode.
211 * @feature_table_size_legacy: number of entries in feature table legacy array.
212 * @validate: the function to call to validate features and config space.
213 * Returns 0 or -errno.
214 * @probe: the function to call when a device is found. Returns 0 or -errno.
215 * @scan: optional function to call after successful probe; intended
216 * for virtio-scsi to invoke a scan.
217 * @remove: the function to call when a device is removed.
218 * @config_changed: optional function to call when the device configuration
219 * changes; may be called in interrupt context.
220 * @freeze: optional function to call during suspend/hibernation.
221 * @restore: optional function to call on resume.
222 * @reset_prepare: optional function to call when a transport specific reset
223 * occurs.
224 * @reset_done: optional function to call after transport specific reset
225 * operation has finished.
226 * @shutdown: synchronize with the device on shutdown. If provided, replaces
227 * the virtio core implementation.
228 */
229 struct virtio_driver {
230 struct device_driver driver;
231 const struct virtio_device_id *id_table;
232 const unsigned int *feature_table;
233 unsigned int feature_table_size;
234 const unsigned int *feature_table_legacy;
235 unsigned int feature_table_size_legacy;
236 int (*validate)(struct virtio_device *dev);
237 int (*probe)(struct virtio_device *dev);
238 void (*scan)(struct virtio_device *dev);
239 void (*remove)(struct virtio_device *dev);
240 void (*config_changed)(struct virtio_device *dev);
241 int (*freeze)(struct virtio_device *dev);
242 int (*restore)(struct virtio_device *dev);
243 int (*reset_prepare)(struct virtio_device *dev);
244 int (*reset_done)(struct virtio_device *dev);
245 void (*shutdown)(struct virtio_device *dev);
246 };
247
248 #define drv_to_virtio(__drv) container_of_const(__drv, struct virtio_driver, driver)
249
250 /* use a macro to avoid include chaining to get THIS_MODULE */
251 #define register_virtio_driver(drv) \
252 __register_virtio_driver(drv, THIS_MODULE)
253 int __register_virtio_driver(struct virtio_driver *drv, struct module *owner);
254 void unregister_virtio_driver(struct virtio_driver *drv);
255
256 /* module_virtio_driver() - Helper macro for drivers that don't do
257 * anything special in module init/exit. This eliminates a lot of
258 * boilerplate. Each module may only use this macro once, and
259 * calling it replaces module_init() and module_exit()
260 */
261 #define module_virtio_driver(__virtio_driver) \
262 module_driver(__virtio_driver, register_virtio_driver, \
263 unregister_virtio_driver)
264
265 dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
266 enum dma_data_direction dir, unsigned long attrs);
267 void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
268 size_t size, enum dma_data_direction dir,
269 unsigned long attrs);
270 int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
271
272 bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
273 void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
274 unsigned long offset, size_t size,
275 enum dma_data_direction dir);
276 void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
277 unsigned long offset, size_t size,
278 enum dma_data_direction dir);
279
280 #ifdef CONFIG_VIRTIO_DEBUG
281 void virtio_debug_device_init(struct virtio_device *dev);
282 void virtio_debug_device_exit(struct virtio_device *dev);
283 void virtio_debug_device_filter_features(struct virtio_device *dev);
284 void virtio_debug_init(void);
285 void virtio_debug_exit(void);
286 #else
virtio_debug_device_init(struct virtio_device * dev)287 static inline void virtio_debug_device_init(struct virtio_device *dev)
288 {
289 }
290
virtio_debug_device_exit(struct virtio_device * dev)291 static inline void virtio_debug_device_exit(struct virtio_device *dev)
292 {
293 }
294
virtio_debug_device_filter_features(struct virtio_device * dev)295 static inline void virtio_debug_device_filter_features(struct virtio_device *dev)
296 {
297 }
298
virtio_debug_init(void)299 static inline void virtio_debug_init(void)
300 {
301 }
302
virtio_debug_exit(void)303 static inline void virtio_debug_exit(void)
304 {
305 }
306 #endif
307
308 #endif /* _LINUX_VIRTIO_H */
309