xref: /linux/include/linux/vdpa.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VDPA_H
3 #define _LINUX_VDPA_H
4 
5 #include <linux/kernel.h>
6 #include <linux/device.h>
7 #include <linux/interrupt.h>
8 #include <linux/vhost_iotlb.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_blk.h>
11 #include <linux/if_ether.h>
12 
13 /**
14  * struct vdpa_callback - vDPA callback definition.
15  * @callback: interrupt callback function
16  * @private: the data passed to the callback function
17  * @trigger: the eventfd for the callback (Optional).
18  *           When it is set, the vDPA driver must guarantee that
19  *           signaling it is functional equivalent to triggering
20  *           the callback. Then vDPA parent can signal it directly
21  *           instead of triggering the callback.
22  */
23 struct vdpa_callback {
24 	irqreturn_t (*callback)(void *data);
25 	void *private;
26 	struct eventfd_ctx *trigger;
27 };
28 
29 /**
30  * struct vdpa_notification_area - vDPA notification area
31  * @addr: base address of the notification area
32  * @size: size of the notification area
33  */
34 struct vdpa_notification_area {
35 	resource_size_t addr;
36 	resource_size_t size;
37 };
38 
39 /**
40  * struct vdpa_vq_state_split - vDPA split virtqueue state
41  * @avail_index: available index
42  */
43 struct vdpa_vq_state_split {
44 	u16	avail_index;
45 };
46 
47 /**
48  * struct vdpa_vq_state_packed - vDPA packed virtqueue state
49  * @last_avail_counter: last driver ring wrap counter observed by device
50  * @last_avail_idx: device available index
51  * @last_used_counter: device ring wrap counter
52  * @last_used_idx: used index
53  */
54 struct vdpa_vq_state_packed {
55 	u16	last_avail_counter:1;
56 	u16	last_avail_idx:15;
57 	u16	last_used_counter:1;
58 	u16	last_used_idx:15;
59 };
60 
61 struct vdpa_vq_state {
62 	union {
63 		struct vdpa_vq_state_split split;
64 		struct vdpa_vq_state_packed packed;
65 	};
66 };
67 
68 struct vdpa_mgmt_dev;
69 
70 /**
71  * struct vdpa_device - representation of a vDPA device
72  * @dev: underlying device
73  * @dma_dev: the actual device that is performing DMA
74  * @driver_override: driver name to force a match; do not set directly,
75  *                   because core frees it; use driver_set_override() to
76  *                   set or clear it.
77  * @config: the configuration ops for this device.
78  * @cf_lock: Protects get and set access to configuration layout.
79  * @index: device index
80  * @features_valid: were features initialized? for legacy guests
81  * @ngroups: the number of virtqueue groups
82  * @nas: the number of address spaces
83  * @use_va: indicate whether virtual address must be used by this device
84  * @nvqs: maximum number of supported virtqueues
85  * @mdev: management device pointer; caller must setup when registering device as part
86  *	  of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
87  */
88 struct vdpa_device {
89 	struct device dev;
90 	struct device *dma_dev;
91 	const char *driver_override;
92 	const struct vdpa_config_ops *config;
93 	struct rw_semaphore cf_lock; /* Protects get/set config */
94 	unsigned int index;
95 	bool features_valid;
96 	bool use_va;
97 	u32 nvqs;
98 	struct vdpa_mgmt_dev *mdev;
99 	unsigned int ngroups;
100 	unsigned int nas;
101 };
102 
103 /**
104  * struct vdpa_iova_range - the IOVA range support by the device
105  * @first: start of the IOVA range
106  * @last: end of the IOVA range
107  */
108 struct vdpa_iova_range {
109 	u64 first;
110 	u64 last;
111 };
112 
113 struct vdpa_dev_set_config {
114 	u64 device_features;
115 	struct {
116 		u8 mac[ETH_ALEN];
117 		u16 mtu;
118 		u16 max_vq_pairs;
119 	} net;
120 	u64 mask;
121 };
122 
123 /**
124  * struct vdpa_map_file - file area for device memory mapping
125  * @file: vma->vm_file for the mapping
126  * @offset: mapping offset in the vm_file
127  */
128 struct vdpa_map_file {
129 	struct file *file;
130 	u64 offset;
131 };
132 
133 /**
134  * struct vdpa_config_ops - operations for configuring a vDPA device.
135  * Note: vDPA device drivers are required to implement all of the
136  * operations unless it is mentioned to be optional in the following
137  * list.
138  *
139  * @set_vq_address:		Set the address of virtqueue
140  *				@vdev: vdpa device
141  *				@idx: virtqueue index
142  *				@desc_area: address of desc area
143  *				@driver_area: address of driver area
144  *				@device_area: address of device area
145  *				Returns integer: success (0) or error (< 0)
146  * @set_vq_num:			Set the size of virtqueue
147  *				@vdev: vdpa device
148  *				@idx: virtqueue index
149  *				@num: the size of virtqueue
150  * @kick_vq:			Kick the virtqueue
151  *				@vdev: vdpa device
152  *				@idx: virtqueue index
153  * @kick_vq_with_data:		Kick the virtqueue and supply extra data
154  *				(only if VIRTIO_F_NOTIFICATION_DATA is negotiated)
155  *				@vdev: vdpa device
156  *				@data for split virtqueue:
157  *				16 bits vqn and 16 bits next available index.
158  *				@data for packed virtqueue:
159  *				16 bits vqn, 15 least significant bits of
160  *				next available index and 1 bit next_wrap.
161  * @set_vq_cb:			Set the interrupt callback function for
162  *				a virtqueue
163  *				@vdev: vdpa device
164  *				@idx: virtqueue index
165  *				@cb: virtio-vdev interrupt callback structure
166  * @set_vq_ready:		Set ready status for a virtqueue
167  *				@vdev: vdpa device
168  *				@idx: virtqueue index
169  *				@ready: ready (true) not ready(false)
170  * @get_vq_ready:		Get ready status for a virtqueue
171  *				@vdev: vdpa device
172  *				@idx: virtqueue index
173  *				Returns boolean: ready (true) or not (false)
174  * @set_vq_state:		Set the state for a virtqueue
175  *				@vdev: vdpa device
176  *				@idx: virtqueue index
177  *				@state: pointer to set virtqueue state (last_avail_idx)
178  *				Returns integer: success (0) or error (< 0)
179  * @get_vq_state:		Get the state for a virtqueue
180  *				@vdev: vdpa device
181  *				@idx: virtqueue index
182  *				@state: pointer to returned state (last_avail_idx)
183  * @get_vendor_vq_stats:	Get the vendor statistics of a device.
184  *				@vdev: vdpa device
185  *				@idx: virtqueue index
186  *				@msg: socket buffer holding stats message
187  *				@extack: extack for reporting error messages
188  *				Returns integer: success (0) or error (< 0)
189  * @get_vq_notification:	Get the notification area for a virtqueue (optional)
190  *				@vdev: vdpa device
191  *				@idx: virtqueue index
192  *				Returns the notification area
193  * @get_vq_irq:			Get the irq number of a virtqueue (optional,
194  *				but must implemented if require vq irq offloading)
195  *				@vdev: vdpa device
196  *				@idx: virtqueue index
197  *				Returns int: irq number of a virtqueue,
198  *				negative number if no irq assigned.
199  * @get_vq_size:		Get the size of a specific virtqueue (optional)
200  *				@vdev: vdpa device
201  *				@idx: virtqueue index
202  *				Return u16: the size of the virtqueue
203  * @get_vq_align:		Get the virtqueue align requirement
204  *				for the device
205  *				@vdev: vdpa device
206  *				Returns virtqueue algin requirement
207  * @get_vq_group:		Get the group id for a specific
208  *				virtqueue (optional)
209  *				@vdev: vdpa device
210  *				@idx: virtqueue index
211  *				Returns u32: group id for this virtqueue
212  * @get_vq_desc_group:		Get the group id for the descriptor table of
213  *				a specific virtqueue (optional)
214  *				@vdev: vdpa device
215  *				@idx: virtqueue index
216  *				Returns u32: group id for the descriptor table
217  *				portion of this virtqueue. Could be different
218  *				than the one from @get_vq_group, in which case
219  *				the access to the descriptor table can be
220  *				confined to a separate asid, isolating from
221  *				the virtqueue's buffer address access.
222  * @get_device_features:	Get virtio features supported by the device
223  *				@vdev: vdpa device
224  *				Returns the virtio features support by the
225  *				device
226  * @get_backend_features:	Get parent-specific backend features (optional)
227  *				Returns the vdpa features supported by the
228  *				device.
229  * @set_driver_features:	Set virtio features supported by the driver
230  *				@vdev: vdpa device
231  *				@features: feature support by the driver
232  *				Returns integer: success (0) or error (< 0)
233  * @get_driver_features:	Get the virtio driver features in action
234  *				@vdev: vdpa device
235  *				Returns the virtio features accepted
236  * @set_config_cb:		Set the config interrupt callback
237  *				@vdev: vdpa device
238  *				@cb: virtio-vdev interrupt callback structure
239  * @get_vq_num_max:		Get the max size of virtqueue
240  *				@vdev: vdpa device
241  *				Returns u16: max size of virtqueue
242  * @get_vq_num_min:		Get the min size of virtqueue (optional)
243  *				@vdev: vdpa device
244  *				Returns u16: min size of virtqueue
245  * @get_device_id:		Get virtio device id
246  *				@vdev: vdpa device
247  *				Returns u32: virtio device id
248  * @get_vendor_id:		Get id for the vendor that provides this device
249  *				@vdev: vdpa device
250  *				Returns u32: virtio vendor id
251  * @get_status:			Get the device status
252  *				@vdev: vdpa device
253  *				Returns u8: virtio device status
254  * @set_status:			Set the device status
255  *				@vdev: vdpa device
256  *				@status: virtio device status
257  * @reset:			Reset device
258  *				@vdev: vdpa device
259  *				Returns integer: success (0) or error (< 0)
260  * @compat_reset:		Reset device with compatibility quirks to
261  *				accommodate older userspace. Only needed by
262  *				parent driver which used to have bogus reset
263  *				behaviour, and has to maintain such behaviour
264  *				for compatibility with older userspace.
265  *				Historically compliant driver only has to
266  *				implement .reset, Historically non-compliant
267  *				driver should implement both.
268  *				@vdev: vdpa device
269  *				@flags: compatibility quirks for reset
270  *				Returns integer: success (0) or error (< 0)
271  * @suspend:			Suspend the device (optional)
272  *				@vdev: vdpa device
273  *				Returns integer: success (0) or error (< 0)
274  * @resume:			Resume the device (optional)
275  *				@vdev: vdpa device
276  *				Returns integer: success (0) or error (< 0)
277  * @get_config_size:		Get the size of the configuration space includes
278  *				fields that are conditional on feature bits.
279  *				@vdev: vdpa device
280  *				Returns size_t: configuration size
281  * @get_config:			Read from device specific configuration space
282  *				@vdev: vdpa device
283  *				@offset: offset from the beginning of
284  *				configuration space
285  *				@buf: buffer used to read to
286  *				@len: the length to read from
287  *				configuration space
288  * @set_config:			Write to device specific configuration space
289  *				@vdev: vdpa device
290  *				@offset: offset from the beginning of
291  *				configuration space
292  *				@buf: buffer used to write from
293  *				@len: the length to write to
294  *				configuration space
295  * @get_generation:		Get device config generation (optional)
296  *				@vdev: vdpa device
297  *				Returns u32: device generation
298  * @get_iova_range:		Get supported iova range (optional)
299  *				@vdev: vdpa device
300  *				Returns the iova range supported by
301  *				the device.
302  * @set_vq_affinity:		Set the affinity of virtqueue (optional)
303  *				@vdev: vdpa device
304  *				@idx: virtqueue index
305  *				@cpu_mask: the affinity mask
306  *				Returns integer: success (0) or error (< 0)
307  * @get_vq_affinity:		Get the affinity of virtqueue (optional)
308  *				@vdev: vdpa device
309  *				@idx: virtqueue index
310  *				Returns the affinity mask
311  * @set_group_asid:		Set address space identifier for a
312  *				virtqueue group (optional)
313  *				@vdev: vdpa device
314  *				@group: virtqueue group
315  *				@asid: address space id for this group
316  *				Returns integer: success (0) or error (< 0)
317  * @set_map:			Set device memory mapping (optional)
318  *				Needed for device that using device
319  *				specific DMA translation (on-chip IOMMU)
320  *				@vdev: vdpa device
321  *				@asid: address space identifier
322  *				@iotlb: vhost memory mapping to be
323  *				used by the vDPA
324  *				Returns integer: success (0) or error (< 0)
325  * @dma_map:			Map an area of PA to IOVA (optional)
326  *				Needed for device that using device
327  *				specific DMA translation (on-chip IOMMU)
328  *				and preferring incremental map.
329  *				@vdev: vdpa device
330  *				@asid: address space identifier
331  *				@iova: iova to be mapped
332  *				@size: size of the area
333  *				@pa: physical address for the map
334  *				@perm: device access permission (VHOST_MAP_XX)
335  *				Returns integer: success (0) or error (< 0)
336  * @dma_unmap:			Unmap an area of IOVA (optional but
337  *				must be implemented with dma_map)
338  *				Needed for device that using device
339  *				specific DMA translation (on-chip IOMMU)
340  *				and preferring incremental unmap.
341  *				@vdev: vdpa device
342  *				@asid: address space identifier
343  *				@iova: iova to be unmapped
344  *				@size: size of the area
345  *				Returns integer: success (0) or error (< 0)
346  * @reset_map:			Reset device memory mapping to the default
347  *				state (optional)
348  *				Needed for devices that are using device
349  *				specific DMA translation and prefer mapping
350  *				to be decoupled from the virtio life cycle,
351  *				i.e. device .reset op does not reset mapping
352  *				@vdev: vdpa device
353  *				@asid: address space identifier
354  *				Returns integer: success (0) or error (< 0)
355  * @get_vq_dma_dev:		Get the dma device for a specific
356  *				virtqueue (optional)
357  *				@vdev: vdpa device
358  *				@idx: virtqueue index
359  *				Returns pointer to structure device or error (NULL)
360  * @bind_mm:			Bind the device to a specific address space
361  *				so the vDPA framework can use VA when this
362  *				callback is implemented. (optional)
363  *				@vdev: vdpa device
364  *				@mm: address space to bind
365  * @unbind_mm:			Unbind the device from the address space
366  *				bound using the bind_mm callback. (optional)
367  *				@vdev: vdpa device
368  * @free:			Free resources that belongs to vDPA (optional)
369  *				@vdev: vdpa device
370  */
371 struct vdpa_config_ops {
372 	/* Virtqueue ops */
373 	int (*set_vq_address)(struct vdpa_device *vdev,
374 			      u16 idx, u64 desc_area, u64 driver_area,
375 			      u64 device_area);
376 	void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num);
377 	void (*kick_vq)(struct vdpa_device *vdev, u16 idx);
378 	void (*kick_vq_with_data)(struct vdpa_device *vdev, u32 data);
379 	void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx,
380 			  struct vdpa_callback *cb);
381 	void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
382 	bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
383 	int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
384 			    const struct vdpa_vq_state *state);
385 	int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
386 			    struct vdpa_vq_state *state);
387 	int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
388 				   struct sk_buff *msg,
389 				   struct netlink_ext_ack *extack);
390 	struct vdpa_notification_area
391 	(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
392 	/* vq irq is not expected to be changed once DRIVER_OK is set */
393 	int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx);
394 	u16 (*get_vq_size)(struct vdpa_device *vdev, u16 idx);
395 
396 	/* Device ops */
397 	u32 (*get_vq_align)(struct vdpa_device *vdev);
398 	u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
399 	u32 (*get_vq_desc_group)(struct vdpa_device *vdev, u16 idx);
400 	u64 (*get_device_features)(struct vdpa_device *vdev);
401 	u64 (*get_backend_features)(const struct vdpa_device *vdev);
402 	int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
403 	u64 (*get_driver_features)(struct vdpa_device *vdev);
404 	void (*set_config_cb)(struct vdpa_device *vdev,
405 			      struct vdpa_callback *cb);
406 	u16 (*get_vq_num_max)(struct vdpa_device *vdev);
407 	u16 (*get_vq_num_min)(struct vdpa_device *vdev);
408 	u32 (*get_device_id)(struct vdpa_device *vdev);
409 	u32 (*get_vendor_id)(struct vdpa_device *vdev);
410 	u8 (*get_status)(struct vdpa_device *vdev);
411 	void (*set_status)(struct vdpa_device *vdev, u8 status);
412 	int (*reset)(struct vdpa_device *vdev);
413 	int (*compat_reset)(struct vdpa_device *vdev, u32 flags);
414 #define VDPA_RESET_F_CLEAN_MAP 1
415 	int (*suspend)(struct vdpa_device *vdev);
416 	int (*resume)(struct vdpa_device *vdev);
417 	size_t (*get_config_size)(struct vdpa_device *vdev);
418 	void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
419 			   void *buf, unsigned int len);
420 	void (*set_config)(struct vdpa_device *vdev, unsigned int offset,
421 			   const void *buf, unsigned int len);
422 	u32 (*get_generation)(struct vdpa_device *vdev);
423 	struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
424 	int (*set_vq_affinity)(struct vdpa_device *vdev, u16 idx,
425 			       const struct cpumask *cpu_mask);
426 	const struct cpumask *(*get_vq_affinity)(struct vdpa_device *vdev,
427 						 u16 idx);
428 
429 	/* DMA ops */
430 	int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
431 		       struct vhost_iotlb *iotlb);
432 	int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
433 		       u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
434 	int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
435 			 u64 iova, u64 size);
436 	int (*reset_map)(struct vdpa_device *vdev, unsigned int asid);
437 	int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
438 			      unsigned int asid);
439 	struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
440 	int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm);
441 	void (*unbind_mm)(struct vdpa_device *vdev);
442 
443 	/* Free device resources */
444 	void (*free)(struct vdpa_device *vdev);
445 };
446 
447 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
448 					const struct vdpa_config_ops *config,
449 					unsigned int ngroups, unsigned int nas,
450 					size_t size, const char *name,
451 					bool use_va);
452 
453 /**
454  * vdpa_alloc_device - allocate and initilaize a vDPA device
455  *
456  * @dev_struct: the type of the parent structure
457  * @member: the name of struct vdpa_device within the @dev_struct
458  * @parent: the parent device
459  * @config: the bus operations that is supported by this device
460  * @ngroups: the number of virtqueue groups supported by this device
461  * @nas: the number of address spaces
462  * @name: name of the vdpa device
463  * @use_va: indicate whether virtual address must be used by this device
464  *
465  * Return allocated data structure or ERR_PTR upon error
466  */
467 #define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
468 			  name, use_va) \
469 			  container_of((__vdpa_alloc_device( \
470 				       parent, config, ngroups, nas, \
471 				       (sizeof(dev_struct) + \
472 				       BUILD_BUG_ON_ZERO(offsetof( \
473 				       dev_struct, member))), name, use_va)), \
474 				       dev_struct, member)
475 
476 int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
477 void vdpa_unregister_device(struct vdpa_device *vdev);
478 
479 int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
480 void _vdpa_unregister_device(struct vdpa_device *vdev);
481 
482 /**
483  * struct vdpa_driver - operations for a vDPA driver
484  * @driver: underlying device driver
485  * @probe: the function to call when a device is found.  Returns 0 or -errno.
486  * @remove: the function to call when a device is removed.
487  */
488 struct vdpa_driver {
489 	struct device_driver driver;
490 	int (*probe)(struct vdpa_device *vdev);
491 	void (*remove)(struct vdpa_device *vdev);
492 };
493 
494 #define vdpa_register_driver(drv) \
495 	__vdpa_register_driver(drv, THIS_MODULE)
496 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner);
497 void vdpa_unregister_driver(struct vdpa_driver *drv);
498 
499 #define module_vdpa_driver(__vdpa_driver) \
500 	module_driver(__vdpa_driver, vdpa_register_driver,	\
501 		      vdpa_unregister_driver)
502 
drv_to_vdpa(struct device_driver * driver)503 static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver)
504 {
505 	return container_of(driver, struct vdpa_driver, driver);
506 }
507 
dev_to_vdpa(struct device * _dev)508 static inline struct vdpa_device *dev_to_vdpa(struct device *_dev)
509 {
510 	return container_of(_dev, struct vdpa_device, dev);
511 }
512 
vdpa_get_drvdata(const struct vdpa_device * vdev)513 static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev)
514 {
515 	return dev_get_drvdata(&vdev->dev);
516 }
517 
vdpa_set_drvdata(struct vdpa_device * vdev,void * data)518 static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
519 {
520 	dev_set_drvdata(&vdev->dev, data);
521 }
522 
vdpa_get_dma_dev(struct vdpa_device * vdev)523 static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
524 {
525 	return vdev->dma_dev;
526 }
527 
vdpa_reset(struct vdpa_device * vdev,u32 flags)528 static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)
529 {
530 	const struct vdpa_config_ops *ops = vdev->config;
531 	int ret;
532 
533 	down_write(&vdev->cf_lock);
534 	vdev->features_valid = false;
535 	if (ops->compat_reset && flags)
536 		ret = ops->compat_reset(vdev, flags);
537 	else
538 		ret = ops->reset(vdev);
539 	up_write(&vdev->cf_lock);
540 	return ret;
541 }
542 
vdpa_set_features_unlocked(struct vdpa_device * vdev,u64 features)543 static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features)
544 {
545 	const struct vdpa_config_ops *ops = vdev->config;
546 	int ret;
547 
548 	vdev->features_valid = true;
549 	ret = ops->set_driver_features(vdev, features);
550 
551 	return ret;
552 }
553 
vdpa_set_features(struct vdpa_device * vdev,u64 features)554 static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
555 {
556 	int ret;
557 
558 	down_write(&vdev->cf_lock);
559 	ret = vdpa_set_features_unlocked(vdev, features);
560 	up_write(&vdev->cf_lock);
561 
562 	return ret;
563 }
564 
565 void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
566 		     void *buf, unsigned int len);
567 void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
568 		     const void *buf, unsigned int length);
569 void vdpa_set_status(struct vdpa_device *vdev, u8 status);
570 
571 /**
572  * struct vdpa_mgmtdev_ops - vdpa device ops
573  * @dev_add: Add a vdpa device using alloc and register
574  *	     @mdev: parent device to use for device addition
575  *	     @name: name of the new vdpa device
576  *	     @config: config attributes to apply to the device under creation
577  *	     Driver need to add a new device using _vdpa_register_device()
578  *	     after fully initializing the vdpa device. Driver must return 0
579  *	     on success or appropriate error code.
580  * @dev_del: Remove a vdpa device using unregister
581  *	     @mdev: parent device to use for device removal
582  *	     @dev: vdpa device to remove
583  *	     Driver need to remove the specified device by calling
584  *	     _vdpa_unregister_device().
585  * @dev_set_attr: change a vdpa device's attr after it was create
586  *	     @mdev: parent device to use for device
587  *	     @dev: vdpa device structure
588  *	     @config:Attributes to be set for the device.
589  *	     The driver needs to check the mask of the structure and then set
590  *	     the related information to the vdpa device. The driver must return 0
591  *	     if set successfully.
592  */
593 struct vdpa_mgmtdev_ops {
594 	int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
595 		       const struct vdpa_dev_set_config *config);
596 	void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
597 	int (*dev_set_attr)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev,
598 			    const struct vdpa_dev_set_config *config);
599 };
600 
601 /**
602  * struct vdpa_mgmt_dev - vdpa management device
603  * @device: Management parent device
604  * @ops: operations supported by management device
605  * @id_table: Pointer to device id table of supported ids
606  * @config_attr_mask: bit mask of attributes of type enum vdpa_attr that
607  *		      management device support during dev_add callback
608  * @list: list entry
609  * @supported_features: features supported by device
610  * @max_supported_vqs: maximum number of virtqueues supported by device
611  */
612 struct vdpa_mgmt_dev {
613 	struct device *device;
614 	const struct vdpa_mgmtdev_ops *ops;
615 	struct virtio_device_id *id_table;
616 	u64 config_attr_mask;
617 	struct list_head list;
618 	u64 supported_features;
619 	u32 max_supported_vqs;
620 };
621 
622 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
623 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
624 
625 #endif /* _LINUX_VDPA_H */
626