xref: /linux/include/linux/vfio_pci_core.h (revision 056daec2925dc200b22c30419bc7b9e01f7843c4)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
4  *     Author: Alex Williamson <alex.williamson@redhat.com>
5  *
6  * Derived from original vfio:
7  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
8  * Author: Tom Lyon, pugs@cisco.com
9  */
10 
11 #include <linux/mutex.h>
12 #include <linux/pci.h>
13 #include <linux/vfio.h>
14 #include <linux/irqbypass.h>
15 #include <linux/rcupdate.h>
16 #include <linux/types.h>
17 #include <linux/uuid.h>
18 #include <linux/notifier.h>
19 
20 #ifndef VFIO_PCI_CORE_H
21 #define VFIO_PCI_CORE_H
22 
23 #define VFIO_PCI_OFFSET_SHIFT   40
24 #define VFIO_PCI_OFFSET_TO_INDEX(off)	(off >> VFIO_PCI_OFFSET_SHIFT)
25 #define VFIO_PCI_INDEX_TO_OFFSET(index)	((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
26 #define VFIO_PCI_OFFSET_MASK	(((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
27 
28 struct vfio_pci_core_device;
29 struct vfio_pci_region;
30 struct p2pdma_provider;
31 struct dma_buf_phys_vec;
32 struct dma_buf_attachment;
33 
34 struct vfio_pci_eventfd {
35 	struct eventfd_ctx	*ctx;
36 	struct rcu_head		rcu;
37 };
38 
39 struct vfio_pci_regops {
40 	ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf,
41 		      size_t count, loff_t *ppos, bool iswrite);
42 	void	(*release)(struct vfio_pci_core_device *vdev,
43 			   struct vfio_pci_region *region);
44 	int	(*mmap)(struct vfio_pci_core_device *vdev,
45 			struct vfio_pci_region *region,
46 			struct vm_area_struct *vma);
47 	int	(*add_capability)(struct vfio_pci_core_device *vdev,
48 				  struct vfio_pci_region *region,
49 				  struct vfio_info_cap *caps);
50 };
51 
52 struct vfio_pci_region {
53 	u32				type;
54 	u32				subtype;
55 	const struct vfio_pci_regops	*ops;
56 	void				*data;
57 	size_t				size;
58 	u32				flags;
59 };
60 
61 struct vfio_pci_device_ops {
62 	int (*get_dmabuf_phys)(struct vfio_pci_core_device *vdev,
63 			       struct p2pdma_provider **provider,
64 			       unsigned int region_index,
65 			       struct dma_buf_phys_vec *phys_vec,
66 			       struct vfio_region_dma_range *dma_ranges,
67 			       size_t nr_ranges);
68 };
69 
70 #if IS_ENABLED(CONFIG_VFIO_PCI_DMABUF)
71 int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
72 				struct vfio_region_dma_range *dma_ranges,
73 				size_t nr_ranges, phys_addr_t start,
74 				phys_addr_t len);
75 int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev,
76 				  struct p2pdma_provider **provider,
77 				  unsigned int region_index,
78 				  struct dma_buf_phys_vec *phys_vec,
79 				  struct vfio_region_dma_range *dma_ranges,
80 				  size_t nr_ranges);
81 #else
82 static inline int
vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec * phys_vec,struct vfio_region_dma_range * dma_ranges,size_t nr_ranges,phys_addr_t start,phys_addr_t len)83 vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
84 			    struct vfio_region_dma_range *dma_ranges,
85 			    size_t nr_ranges, phys_addr_t start,
86 			    phys_addr_t len)
87 {
88 	return -EINVAL;
89 }
vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device * vdev,struct p2pdma_provider ** provider,unsigned int region_index,struct dma_buf_phys_vec * phys_vec,struct vfio_region_dma_range * dma_ranges,size_t nr_ranges)90 static inline int vfio_pci_core_get_dmabuf_phys(
91 	struct vfio_pci_core_device *vdev, struct p2pdma_provider **provider,
92 	unsigned int region_index, struct dma_buf_phys_vec *phys_vec,
93 	struct vfio_region_dma_range *dma_ranges, size_t nr_ranges)
94 {
95 	return -EOPNOTSUPP;
96 }
97 #endif
98 
99 struct vfio_pci_core_device {
100 	struct vfio_device	vdev;
101 	struct pci_dev		*pdev;
102 	const struct vfio_pci_device_ops *pci_ops;
103 	void __iomem		*barmap[PCI_STD_NUM_BARS];
104 	bool			bar_mmap_supported[PCI_STD_NUM_BARS];
105 	u8			*pci_config_map;
106 	u8			*vconfig;
107 	struct perm_bits	*msi_perm;
108 	spinlock_t		irqlock;
109 	struct mutex		igate;
110 	struct xarray		ctx;
111 	int			irq_type;
112 	int			num_regions;
113 	struct vfio_pci_region	*region;
114 	u8			msi_qmax;
115 	u8			msix_bar;
116 	u16			msix_size;
117 	u32			msix_offset;
118 	u32			rbar[7];
119 	bool			has_dyn_msix:1;
120 	bool			pci_2_3:1;
121 	bool			virq_disabled:1;
122 	bool			reset_works:1;
123 	bool			extended_caps:1;
124 	bool			bardirty:1;
125 	bool			has_vga:1;
126 	bool			needs_reset:1;
127 	bool			nointx:1;
128 	bool			needs_pm_restore:1;
129 	bool			pm_intx_masked:1;
130 	bool			pm_runtime_engaged:1;
131 	struct pci_saved_state	*pci_saved_state;
132 	struct pci_saved_state	*pm_save;
133 	int			ioeventfds_nr;
134 	struct vfio_pci_eventfd __rcu *err_trigger;
135 	struct vfio_pci_eventfd __rcu *req_trigger;
136 	struct eventfd_ctx	*pm_wake_eventfd_ctx;
137 	struct list_head	dummy_resources_list;
138 	struct mutex		ioeventfds_lock;
139 	struct list_head	ioeventfds_list;
140 	struct vfio_pci_vf_token	*vf_token;
141 	struct list_head		sriov_pfs_item;
142 	struct vfio_pci_core_device	*sriov_pf_core_dev;
143 	struct notifier_block	nb;
144 	struct rw_semaphore	memory_lock;
145 	struct list_head	dmabufs;
146 };
147 
148 /* Will be exported for vfio pci drivers usage */
149 int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
150 				      unsigned int type, unsigned int subtype,
151 				      const struct vfio_pci_regops *ops,
152 				      size_t size, u32 flags, void *data);
153 void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga,
154 			      bool is_disable_idle_d3);
155 void vfio_pci_core_close_device(struct vfio_device *core_vdev);
156 int vfio_pci_core_init_dev(struct vfio_device *core_vdev);
157 void vfio_pci_core_release_dev(struct vfio_device *core_vdev);
158 int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev);
159 void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev);
160 extern const struct pci_error_handlers vfio_pci_core_err_handlers;
161 int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
162 				  int nr_virtfn);
163 long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
164 		unsigned long arg);
165 int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
166 				void __user *arg, size_t argsz);
167 int vfio_pci_ioctl_get_region_info(struct vfio_device *core_vdev,
168 				   struct vfio_region_info *info,
169 				   struct vfio_info_cap *caps);
170 ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
171 		size_t count, loff_t *ppos);
172 ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
173 		size_t count, loff_t *ppos);
174 vm_fault_t vfio_pci_vmf_insert_pfn(struct vfio_pci_core_device *vdev,
175 				   struct vm_fault *vmf, unsigned long pfn,
176 				   unsigned int order);
177 int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
178 void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
179 int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
180 int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev,
181 				   const uuid_t *uuid);
182 int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
183 void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
184 void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
185 int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar);
186 pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
187 						pci_channel_state_t state);
188 ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
189 			       void __iomem *io, char __user *buf,
190 			       loff_t off, size_t count, size_t x_start,
191 			       size_t x_end, bool iswrite);
192 bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
193 bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt,
194 					 loff_t reg_start, size_t reg_cnt,
195 					 loff_t *buf_offset,
196 					 size_t *intersect_count,
197 					 size_t *register_offset);
198 #define VFIO_IOWRITE_DECLARATION(size) \
199 int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev,	\
200 			bool test_mem, u##size val, void __iomem *io);
201 
202 VFIO_IOWRITE_DECLARATION(8)
203 VFIO_IOWRITE_DECLARATION(16)
204 VFIO_IOWRITE_DECLARATION(32)
205 #ifdef iowrite64
206 VFIO_IOWRITE_DECLARATION(64)
207 #endif
208 
209 #define VFIO_IOREAD_DECLARATION(size) \
210 int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev,	\
211 			bool test_mem, u##size *val, void __iomem *io);
212 
213 VFIO_IOREAD_DECLARATION(8)
214 VFIO_IOREAD_DECLARATION(16)
215 VFIO_IOREAD_DECLARATION(32)
216 #ifdef ioread64
217 VFIO_IOREAD_DECLARATION(64)
218 #endif
219 
is_aligned_for_order(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned int order)220 static inline bool is_aligned_for_order(struct vm_area_struct *vma,
221 					unsigned long addr,
222 					unsigned long pfn,
223 					unsigned int order)
224 {
225 	return !(order && (addr < vma->vm_start ||
226 			   addr + (PAGE_SIZE << order) > vma->vm_end ||
227 			   !IS_ALIGNED(pfn, 1 << order)));
228 }
229 
230 int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
231 				 struct dma_buf_phys_vec *phys);
232 
233 #endif /* VFIO_PCI_CORE_H */
234