1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef XEN_OPS_H 3 #define XEN_OPS_H 4 5 #include <linux/init.h> 6 #include <linux/clocksource.h> 7 #include <linux/irqreturn.h> 8 #include <linux/linkage.h> 9 10 #include <xen/interface/xenpmu.h> 11 #include <xen/xen-ops.h> 12 13 #include <asm/page.h> 14 15 #include <trace/events/xen.h> 16 17 /* These are code, but not functions. Defined in entry.S */ 18 extern const char xen_failsafe_callback[]; 19 20 void xen_entry_SYSENTER_compat(void); 21 #ifdef CONFIG_X86_64 22 void xen_entry_SYSCALL_64(void); 23 void xen_entry_SYSCALL_compat(void); 24 #endif 25 26 extern void *xen_initial_gdt; 27 28 struct trap_info; 29 void xen_copy_trap_info(struct trap_info *traps); 30 31 DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info); 32 DECLARE_PER_CPU(unsigned long, xen_cr3); 33 34 extern struct start_info *xen_start_info; 35 extern struct shared_info xen_dummy_shared_info; 36 extern struct shared_info *HYPERVISOR_shared_info; 37 38 void xen_setup_mfn_list_list(void); 39 void xen_build_mfn_list_list(void); 40 void xen_setup_machphys_mapping(void); 41 void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); 42 void __init xen_reserve_special_pages(void); 43 void __init xen_pt_check_e820(void); 44 45 void xen_mm_pin_all(void); 46 void xen_mm_unpin_all(void); 47 #ifdef CONFIG_X86_64 48 void __init xen_relocate_p2m(void); 49 #endif 50 void __init xen_do_remap_nonram(void); 51 void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr, 52 unsigned long size); 53 54 void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, 55 const char *component); 56 unsigned long __ref xen_chk_extra_mem(unsigned long pfn); 57 void __init xen_inv_extra_mem(void); 58 void __init xen_remap_memory(void); 59 phys_addr_t __init xen_find_free_area(phys_addr_t size); 60 char * __init xen_memory_setup(void); 61 void __init xen_arch_setup(void); 62 void xen_banner(void); 63 void xen_enable_syscall(void); 64 void xen_vcpu_restore(void); 65 66 void xen_hvm_init_shared_info(void); 67 void xen_unplug_emulated_devices(void); 68 69 void __init xen_build_dynamic_phys_to_machine(void); 70 void __init xen_vmalloc_p2m_tree(void); 71 72 void xen_init_irq_ops(void); 73 void xen_setup_timer(int cpu); 74 void xen_setup_runstate_info(int cpu); 75 void xen_teardown_timer(int cpu); 76 void xen_setup_cpu_clockevents(void); 77 void xen_save_time_memory_area(void); 78 void xen_restore_time_memory_area(void); 79 void xen_init_time_ops(void); 80 void xen_hvm_init_time_ops(void); 81 82 bool xen_vcpu_stolen(int vcpu); 83 84 void xen_vcpu_setup(int cpu); 85 void xen_vcpu_info_reset(int cpu); 86 void xen_setup_vcpu_info_placement(void); 87 88 #ifdef CONFIG_SMP 89 void xen_smp_init(void); 90 void __init xen_hvm_smp_init(void); 91 92 extern cpumask_var_t xen_cpu_initialized_map; 93 #else 94 static inline void xen_smp_init(void) {} 95 static inline void xen_hvm_smp_init(void) {} 96 #endif 97 98 #ifdef CONFIG_PARAVIRT_SPINLOCKS 99 void __init xen_init_spinlocks(void); 100 void xen_init_lock_cpu(int cpu); 101 void xen_uninit_lock_cpu(int cpu); 102 #else 103 static inline void xen_init_spinlocks(void) 104 { 105 } 106 static inline void xen_init_lock_cpu(int cpu) 107 { 108 } 109 static inline void xen_uninit_lock_cpu(int cpu) 110 { 111 } 112 #endif 113 114 struct dom0_vga_console_info; 115 116 #ifdef CONFIG_XEN_DOM0 117 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size, 118 struct screen_info *); 119 #else 120 static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, 121 size_t size, struct screen_info *si) 122 { 123 } 124 #endif 125 126 void xen_add_preferred_consoles(void); 127 128 void __init xen_init_apic(void); 129 130 #ifdef CONFIG_XEN_EFI 131 extern void xen_efi_init(struct boot_params *boot_params); 132 #else 133 static inline void __init xen_efi_init(struct boot_params *boot_params) 134 { 135 } 136 #endif 137 138 __visible void xen_irq_enable_direct(void); 139 __visible void xen_irq_disable_direct(void); 140 __visible unsigned long xen_save_fl_direct(void); 141 142 __visible unsigned long xen_read_cr2(void); 143 __visible unsigned long xen_read_cr2_direct(void); 144 145 /* These are not functions, and cannot be called normally */ 146 __visible void xen_iret(void); 147 148 extern int xen_panic_handler_init(void); 149 150 int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), 151 int (*cpu_dead_cb)(unsigned int)); 152 153 void xen_pin_vcpu(int cpu); 154 155 void xen_emergency_restart(void); 156 void xen_force_evtchn_callback(void); 157 158 #ifdef CONFIG_XEN_PV 159 void xen_pv_pre_suspend(void); 160 void xen_pv_post_suspend(int suspend_cancelled); 161 void xen_start_kernel(struct start_info *si); 162 #else 163 static inline void xen_pv_pre_suspend(void) {} 164 static inline void xen_pv_post_suspend(int suspend_cancelled) {} 165 #endif 166 167 #ifdef CONFIG_XEN_PVHVM 168 void xen_hvm_post_suspend(int suspend_cancelled); 169 #else 170 static inline void xen_hvm_post_suspend(int suspend_cancelled) {} 171 #endif 172 173 /* 174 * The maximum amount of extra memory compared to the base size. The 175 * main scaling factor is the size of struct page. At extreme ratios 176 * of base:extra, all the base memory can be filled with page 177 * structures for the extra memory, leaving no space for anything 178 * else. 179 * 180 * 10x seems like a reasonable balance between scaling flexibility and 181 * leaving a practically usable system. 182 */ 183 #define EXTRA_MEM_RATIO (10) 184 185 void xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns); 186 187 struct dentry * __init xen_init_debugfs(void); 188 189 enum pt_level { 190 PT_PGD, 191 PT_P4D, 192 PT_PUD, 193 PT_PMD, 194 PT_PTE 195 }; 196 197 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 198 void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 199 unsigned long xen_read_cr2_direct(void); 200 void xen_init_mmu_ops(void); 201 void xen_hvm_init_mmu_ops(void); 202 203 /* Multicalls */ 204 struct multicall_space 205 { 206 struct multicall_entry *mc; 207 void *args; 208 }; 209 210 /* Allocate room for a multicall and its args */ 211 struct multicall_space __xen_mc_entry(size_t args); 212 213 DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags); 214 215 /* Call to start a batch of multiple __xen_mc_entry()s. Must be 216 paired with xen_mc_issue() */ 217 static inline void xen_mc_batch(void) 218 { 219 unsigned long flags; 220 221 /* need to disable interrupts until this entry is complete */ 222 local_irq_save(flags); 223 trace_xen_mc_batch(xen_get_lazy_mode()); 224 __this_cpu_write(xen_mc_irq_flags, flags); 225 } 226 227 static inline struct multicall_space xen_mc_entry(size_t args) 228 { 229 xen_mc_batch(); 230 return __xen_mc_entry(args); 231 } 232 233 /* Flush all pending multicalls */ 234 void xen_mc_flush(void); 235 236 /* Issue a multicall if we're not in a lazy mode */ 237 static inline void xen_mc_issue(unsigned mode) 238 { 239 trace_xen_mc_issue(mode); 240 241 if ((xen_get_lazy_mode() & mode) == 0) 242 xen_mc_flush(); 243 244 /* restore flags saved in xen_mc_batch */ 245 local_irq_restore(this_cpu_read(xen_mc_irq_flags)); 246 } 247 248 /* Set up a callback to be called when the current batch is flushed */ 249 void xen_mc_callback(void (*fn)(void *), void *data); 250 251 /* 252 * Try to extend the arguments of the previous multicall command. The 253 * previous command's op must match. If it does, then it attempts to 254 * extend the argument space allocated to the multicall entry by 255 * arg_size bytes. 256 * 257 * The returned multicall_space will return with mc pointing to the 258 * command on success, or NULL on failure, and args pointing to the 259 * newly allocated space. 260 */ 261 struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size); 262 263 extern bool is_xen_pmu; 264 265 irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id); 266 #ifdef CONFIG_XEN_HAVE_VPMU 267 void xen_pmu_init(int cpu); 268 void xen_pmu_finish(int cpu); 269 #else 270 static inline void xen_pmu_init(int cpu) {} 271 static inline void xen_pmu_finish(int cpu) {} 272 #endif 273 bool pmu_msr_chk_emulated(u32 msr, u64 *val, bool is_read); 274 int pmu_apic_update(uint32_t reg); 275 u64 xen_read_pmc(int counter); 276 277 #ifdef CONFIG_SMP 278 279 void asm_cpu_bringup_and_idle(void); 280 asmlinkage void cpu_bringup_and_idle(void); 281 282 extern void xen_send_IPI_mask(const struct cpumask *mask, 283 int vector); 284 extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, 285 int vector); 286 extern void xen_send_IPI_allbutself(int vector); 287 extern void xen_send_IPI_all(int vector); 288 extern void xen_send_IPI_self(int vector); 289 290 extern int xen_smp_intr_init(unsigned int cpu); 291 extern void xen_smp_intr_free(unsigned int cpu); 292 int xen_smp_intr_init_pv(unsigned int cpu); 293 void xen_smp_intr_free_pv(unsigned int cpu); 294 295 void xen_smp_count_cpus(void); 296 void xen_smp_cpus_done(unsigned int max_cpus); 297 298 void xen_smp_send_reschedule(int cpu); 299 void xen_smp_send_call_function_ipi(const struct cpumask *mask); 300 void xen_smp_send_call_function_single_ipi(int cpu); 301 302 void __noreturn xen_cpu_bringup_again(unsigned long stack); 303 304 struct xen_common_irq { 305 int irq; 306 char *name; 307 }; 308 #else /* CONFIG_SMP */ 309 310 static inline int xen_smp_intr_init(unsigned int cpu) 311 { 312 return 0; 313 } 314 static inline void xen_smp_intr_free(unsigned int cpu) {} 315 316 static inline int xen_smp_intr_init_pv(unsigned int cpu) 317 { 318 return 0; 319 } 320 static inline void xen_smp_intr_free_pv(unsigned int cpu) {} 321 static inline void xen_smp_count_cpus(void) { } 322 #endif /* CONFIG_SMP */ 323 324 #ifdef CONFIG_XEN_PV 325 void xen_hypercall_pv(void); 326 #endif 327 void xen_hypercall_hvm(void); 328 void xen_hypercall_amd(void); 329 void xen_hypercall_intel(void); 330 void xen_hypercall_setfunc(void); 331 void *__xen_hypercall_setfunc(void); 332 333 #endif /* XEN_OPS_H */ 334