1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef INCLUDE_XEN_OPS_H 3 #define INCLUDE_XEN_OPS_H 4 5 #include <linux/percpu.h> 6 #include <linux/notifier.h> 7 #include <linux/efi.h> 8 #include <linux/virtio_anchor.h> 9 #include <xen/features.h> 10 #include <asm/xen/interface.h> 11 #include <xen/interface/vcpu.h> 12 13 DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); 14 15 DECLARE_PER_CPU(uint32_t, xen_vcpu_id); 16 static inline uint32_t xen_vcpu_nr(int cpu) 17 { 18 return per_cpu(xen_vcpu_id, cpu); 19 } 20 21 #define XEN_VCPU_ID_INVALID U32_MAX 22 23 void xen_arch_pre_suspend(void); 24 void xen_arch_post_suspend(int suspend_cancelled); 25 26 void xen_timer_resume(void); 27 void xen_arch_resume(void); 28 void xen_arch_suspend(void); 29 30 void xen_reboot(int reason); 31 32 void xen_resume_notifier_register(struct notifier_block *nb); 33 34 bool xen_vcpu_stolen(int vcpu); 35 void xen_setup_runstate_info(int cpu); 36 void xen_time_setup_guest(void); 37 void xen_manage_runstate_time(int action); 38 u64 xen_steal_clock(int cpu); 39 40 int xen_setup_shutdown_event(void); 41 42 extern unsigned long *xen_contiguous_bitmap; 43 44 #if defined(CONFIG_XEN_PV) 45 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, 46 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, 47 unsigned int domid, bool no_translate); 48 #else 49 static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, 50 xen_pfn_t *pfn, int nr, int *err_ptr, 51 pgprot_t prot, unsigned int domid, 52 bool no_translate) 53 { 54 BUG(); 55 return 0; 56 } 57 #endif 58 59 struct vm_area_struct; 60 61 #ifdef CONFIG_XEN_AUTO_XLATE 62 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 63 unsigned long addr, 64 xen_pfn_t *gfn, int nr, 65 int *err_ptr, pgprot_t prot, 66 unsigned int domid, 67 struct page **pages); 68 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, 69 int nr, struct page **pages); 70 #else 71 /* 72 * These two functions are called from arch/x86/xen/mmu.c and so stubs 73 * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE. 74 */ 75 static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 76 unsigned long addr, 77 xen_pfn_t *gfn, int nr, 78 int *err_ptr, pgprot_t prot, 79 unsigned int domid, 80 struct page **pages) 81 { 82 return -EOPNOTSUPP; 83 } 84 85 static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, 86 int nr, struct page **pages) 87 { 88 return -EOPNOTSUPP; 89 } 90 #endif 91 92 int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, 93 unsigned long len); 94 95 /* 96 * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn 97 * @vma: VMA to map the pages into 98 * @addr: Address at which to map the pages 99 * @gfn: Array of GFNs to map 100 * @nr: Number entries in the GFN array 101 * @err_ptr: Returns per-GFN error status. 102 * @prot: page protection mask 103 * @domid: Domain owning the pages 104 * @pages: Array of pages if this domain has an auto-translated physmap 105 * 106 * @gfn and @err_ptr may point to the same buffer, the GFNs will be 107 * overwritten by the error codes after they are mapped. 108 * 109 * Returns the number of successfully mapped frames, or a -ve error 110 * code. 111 */ 112 static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma, 113 unsigned long addr, 114 xen_pfn_t *gfn, int nr, 115 int *err_ptr, pgprot_t prot, 116 unsigned int domid, 117 struct page **pages) 118 { 119 if (xen_feature(XENFEAT_auto_translated_physmap)) 120 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, 121 prot, domid, pages); 122 123 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr, 124 * and the consequences later is quite hard to detect what the actual 125 * cause of "wrong memory was mapped in". 126 */ 127 BUG_ON(err_ptr == NULL); 128 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, 129 false); 130 } 131 132 /* 133 * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn 134 * @vma: VMA to map the pages into 135 * @addr: Address at which to map the pages 136 * @mfn: Array of MFNs to map 137 * @nr: Number entries in the MFN array 138 * @err_ptr: Returns per-MFN error status. 139 * @prot: page protection mask 140 * @domid: Domain owning the pages 141 * 142 * @mfn and @err_ptr may point to the same buffer, the MFNs will be 143 * overwritten by the error codes after they are mapped. 144 * 145 * Returns the number of successfully mapped frames, or a -ve error 146 * code. 147 */ 148 static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 149 unsigned long addr, xen_pfn_t *mfn, 150 int nr, int *err_ptr, 151 pgprot_t prot, unsigned int domid) 152 { 153 if (xen_feature(XENFEAT_auto_translated_physmap)) 154 return -EOPNOTSUPP; 155 156 return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid, 157 true); 158 } 159 160 /* xen_remap_domain_gfn_range() - map a range of foreign frames 161 * @vma: VMA to map the pages into 162 * @addr: Address at which to map the pages 163 * @gfn: First GFN to map. 164 * @nr: Number frames to map 165 * @prot: page protection mask 166 * @domid: Domain owning the pages 167 * @pages: Array of pages if this domain has an auto-translated physmap 168 * 169 * Returns the number of successfully mapped frames, or a -ve error 170 * code. 171 */ 172 static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma, 173 unsigned long addr, 174 xen_pfn_t gfn, int nr, 175 pgprot_t prot, unsigned int domid, 176 struct page **pages) 177 { 178 if (xen_feature(XENFEAT_auto_translated_physmap)) 179 return -EOPNOTSUPP; 180 181 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false); 182 } 183 184 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, 185 int numpgs, struct page **pages); 186 187 int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr, 188 unsigned long nr_grant_frames); 189 190 bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); 191 192 void xen_efi_runtime_setup(void); 193 194 195 #if defined(CONFIG_XEN_PV) && !defined(CONFIG_PREEMPTION) 196 197 DECLARE_PER_CPU(bool, xen_in_preemptible_hcall); 198 199 static inline void xen_preemptible_hcall_begin(void) 200 { 201 __this_cpu_write(xen_in_preemptible_hcall, true); 202 } 203 204 static inline void xen_preemptible_hcall_end(void) 205 { 206 __this_cpu_write(xen_in_preemptible_hcall, false); 207 } 208 209 #else 210 211 static inline void xen_preemptible_hcall_begin(void) { } 212 static inline void xen_preemptible_hcall_end(void) { } 213 214 #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ 215 216 #ifdef CONFIG_XEN_GRANT_DMA_OPS 217 bool xen_virtio_restricted_mem_acc(struct virtio_device *dev); 218 #else 219 struct virtio_device; 220 221 static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) 222 { 223 return false; 224 } 225 #endif /* CONFIG_XEN_GRANT_DMA_OPS */ 226 227 #endif /* INCLUDE_XEN_OPS_H */ 228