xref: /linux/drivers/virtio/virtio_pci_common.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
3 #define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
4 /*
5  * Virtio PCI driver - APIs for common functionality for all device versions
6  *
7  * This module allows virtio devices to be used over a virtual PCI device.
8  * This can be used with QEMU based VMMs like KVM or Xen.
9  *
10  * Copyright IBM Corp. 2007
11  * Copyright Red Hat, Inc. 2014
12  *
13  * Authors:
14  *  Anthony Liguori  <aliguori@us.ibm.com>
15  *  Rusty Russell <rusty@rustcorp.com.au>
16  *  Michael S. Tsirkin <mst@redhat.com>
17  */
18 
19 #include <linux/module.h>
20 #include <linux/list.h>
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_config.h>
26 #include <linux/virtio_ring.h>
27 #include <linux/virtio_pci.h>
28 #include <linux/virtio_pci_legacy.h>
29 #include <linux/virtio_pci_modern.h>
30 #include <linux/highmem.h>
31 #include <linux/spinlock.h>
32 #include <linux/mutex.h>
33 
34 struct virtio_pci_vq_info {
35 	/* the actual virtqueue */
36 	struct virtqueue *vq;
37 
38 	/* the list node for the virtqueues or slow_virtqueues list */
39 	struct list_head node;
40 
41 	/* MSI-X vector (or none) */
42 	unsigned int msix_vector;
43 };
44 
45 struct virtio_pci_admin_vq {
46 	/* Virtqueue info associated with this admin queue. */
47 	struct virtio_pci_vq_info *info;
48 	/* Protects virtqueue access. */
49 	spinlock_t lock;
50 	u64 supported_cmds;
51 	/* Name of the admin queue: avq.$vq_index. */
52 	char name[10];
53 	u16 vq_index;
54 };
55 
56 /* Our device structure */
57 struct virtio_pci_device {
58 	struct virtio_device vdev;
59 	struct pci_dev *pci_dev;
60 	union {
61 		struct virtio_pci_legacy_device ldev;
62 		struct virtio_pci_modern_device mdev;
63 	};
64 	bool is_legacy;
65 
66 	/* Where to read and clear interrupt */
67 	u8 __iomem *isr;
68 
69 	/* Lists of queues and potentially slow path queues
70 	 * so we can dispatch IRQs.
71 	 */
72 	spinlock_t lock;
73 	struct list_head virtqueues;
74 	struct list_head slow_virtqueues;
75 
76 	/* Array of all virtqueues reported in the
77 	 * PCI common config num_queues field
78 	 */
79 	struct virtio_pci_vq_info **vqs;
80 
81 	struct virtio_pci_admin_vq admin_vq;
82 
83 	/* MSI-X support */
84 	int msix_enabled;
85 	int intx_enabled;
86 	cpumask_var_t *msix_affinity_masks;
87 	/* Name strings for interrupts. This size should be enough,
88 	 * and I'm too lazy to allocate each name separately. */
89 	char (*msix_names)[256];
90 	/* Number of available vectors */
91 	unsigned int msix_vectors;
92 	/* Vectors allocated, excluding per-vq vectors if any */
93 	unsigned int msix_used_vectors;
94 
95 	/* Whether we have vector per vq */
96 	bool per_vq_vectors;
97 
98 	struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
99 				      struct virtio_pci_vq_info *info,
100 				      unsigned int idx,
101 				      void (*callback)(struct virtqueue *vq),
102 				      const char *name,
103 				      bool ctx,
104 				      u16 msix_vec);
105 	void (*del_vq)(struct virtio_pci_vq_info *info);
106 
107 	u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
108 	int (*avq_index)(struct virtio_device *vdev, u16 *index, u16 *num);
109 };
110 
111 /* Constants for MSI-X */
112 /* Use first vector for configuration changes, second and the rest for
113  * virtqueues Thus, we need at least 2 vectors for MSI. */
114 enum {
115 	VP_MSIX_CONFIG_VECTOR = 0,
116 	VP_MSIX_VQ_VECTOR = 1,
117 };
118 
119 /* Convert a generic virtio device to our structure */
to_vp_device(struct virtio_device * vdev)120 static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
121 {
122 	return container_of(vdev, struct virtio_pci_device, vdev);
123 }
124 
125 /* wait for pending irq handlers */
126 void vp_synchronize_vectors(struct virtio_device *vdev);
127 /* the notify function used when creating a virt queue */
128 bool vp_notify(struct virtqueue *vq);
129 /* the config->del_vqs() implementation */
130 void vp_del_vqs(struct virtio_device *vdev);
131 /* the config->find_vqs() implementation */
132 int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
133 		struct virtqueue *vqs[], struct virtqueue_info vqs_info[],
134 		struct irq_affinity *desc);
135 const char *vp_bus_name(struct virtio_device *vdev);
136 
137 /* Setup the affinity for a virtqueue:
138  * - force the affinity for per vq vector
139  * - OR over all affinities for shared MSI
140  * - ignore the affinity request if we're using INTX
141  */
142 int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask);
143 
144 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index);
145 
146 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
147 int virtio_pci_legacy_probe(struct virtio_pci_device *);
148 void virtio_pci_legacy_remove(struct virtio_pci_device *);
149 #else
virtio_pci_legacy_probe(struct virtio_pci_device * vp_dev)150 static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
151 {
152 	return -ENODEV;
153 }
virtio_pci_legacy_remove(struct virtio_pci_device * vp_dev)154 static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
155 {
156 }
157 #endif
158 int virtio_pci_modern_probe(struct virtio_pci_device *);
159 void virtio_pci_modern_remove(struct virtio_pci_device *);
160 
161 struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
162 
163 #define VIRTIO_LEGACY_ADMIN_CMD_BITMAP \
164 	(BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE) | \
165 	 BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ) | \
166 	 BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE) | \
167 	 BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ) | \
168 	 BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO))
169 
170 /* Unlike modern drivers which support hardware virtio devices, legacy drivers
171  * assume software-based devices: e.g. they don't use proper memory barriers
172  * on ARM, use big endian on PPC, etc. X86 drivers are mostly ok though, more
173  * or less by chance. For now, only support legacy IO on X86.
174  */
175 #ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
176 #define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_LEGACY_ADMIN_CMD_BITMAP
177 #else
178 #define VIRTIO_ADMIN_CMD_BITMAP 0
179 #endif
180 
181 void vp_modern_avq_done(struct virtqueue *vq);
182 int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
183 			     struct virtio_admin_cmd *cmd);
184 
185 #endif
186