xref: /linux/arch/x86/include/asm/mshyperv.h (revision eb914cfe72f4c948b2318b1381f6d2e08d43b63c)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2e08cae41SH. Peter Anvin #ifndef _ASM_X86_MSHYPER_H
3e08cae41SH. Peter Anvin #define _ASM_X86_MSHYPER_H
4a2a47c6cSKy Srinivasan 
5e08cae41SH. Peter Anvin #include <linux/types.h>
626fcd952SThomas Gleixner #include <linux/atomic.h>
7806c8927SVitaly Kuznetsov #include <linux/nmi.h>
8fc53662fSVitaly Kuznetsov #include <asm/io.h>
95a485803SVitaly Kuznetsov #include <asm/hyperv-tlfs.h>
10e70e5892SDavid Woodhouse #include <asm/nospec-branch.h>
11e08cae41SH. Peter Anvin 
121268ed0cSK. Y. Srinivasan #define VP_INVAL	U32_MAX
131268ed0cSK. Y. Srinivasan 
14e08cae41SH. Peter Anvin struct ms_hyperv_info {
15e08cae41SH. Peter Anvin 	u32 features;
16cc2dd402SDenis V. Lunev 	u32 misc_features;
17e08cae41SH. Peter Anvin 	u32 hints;
185431390bSVitaly Kuznetsov 	u32 nested_features;
19dd018597SVitaly Kuznetsov 	u32 max_vp_index;
20dd018597SVitaly Kuznetsov 	u32 max_lp_index;
21e08cae41SH. Peter Anvin };
22e08cae41SH. Peter Anvin 
23e08cae41SH. Peter Anvin extern struct ms_hyperv_info ms_hyperv;
24a2a47c6cSKy Srinivasan 
25352c9624SK. Y. Srinivasan /*
26415bd1cdSVitaly Kuznetsov  * Generate the guest ID.
27352c9624SK. Y. Srinivasan  */
28352c9624SK. Y. Srinivasan 
29352c9624SK. Y. Srinivasan static inline  __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
30352c9624SK. Y. Srinivasan 				       __u64 d_info2)
31352c9624SK. Y. Srinivasan {
32352c9624SK. Y. Srinivasan 	__u64 guest_id = 0;
33352c9624SK. Y. Srinivasan 
349b06e101SK. Y. Srinivasan 	guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
35352c9624SK. Y. Srinivasan 	guest_id |= (d_info1 << 48);
36352c9624SK. Y. Srinivasan 	guest_id |= (kernel_version << 16);
37352c9624SK. Y. Srinivasan 	guest_id |= d_info2;
38352c9624SK. Y. Srinivasan 
39352c9624SK. Y. Srinivasan 	return guest_id;
40352c9624SK. Y. Srinivasan }
41352c9624SK. Y. Srinivasan 
42e810e48cSK. Y. Srinivasan 
43e810e48cSK. Y. Srinivasan /* Free the message slot and signal end-of-message if required */
44e810e48cSK. Y. Srinivasan static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
45e810e48cSK. Y. Srinivasan {
46e810e48cSK. Y. Srinivasan 	/*
47e810e48cSK. Y. Srinivasan 	 * On crash we're reading some other CPU's message page and we need
48e810e48cSK. Y. Srinivasan 	 * to be careful: this other CPU may already had cleared the header
49e810e48cSK. Y. Srinivasan 	 * and the host may already had delivered some other message there.
50e810e48cSK. Y. Srinivasan 	 * In case we blindly write msg->header.message_type we're going
51e810e48cSK. Y. Srinivasan 	 * to lose it. We can still lose a message of the same type but
52e810e48cSK. Y. Srinivasan 	 * we count on the fact that there can only be one
53e810e48cSK. Y. Srinivasan 	 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
54e810e48cSK. Y. Srinivasan 	 * on crash.
55e810e48cSK. Y. Srinivasan 	 */
56e810e48cSK. Y. Srinivasan 	if (cmpxchg(&msg->header.message_type, old_msg_type,
57e810e48cSK. Y. Srinivasan 		    HVMSG_NONE) != old_msg_type)
58e810e48cSK. Y. Srinivasan 		return;
59e810e48cSK. Y. Srinivasan 
60e810e48cSK. Y. Srinivasan 	/*
61e810e48cSK. Y. Srinivasan 	 * Make sure the write to MessageType (ie set to
62e810e48cSK. Y. Srinivasan 	 * HVMSG_NONE) happens before we read the
63e810e48cSK. Y. Srinivasan 	 * MessagePending and EOMing. Otherwise, the EOMing
64e810e48cSK. Y. Srinivasan 	 * will not deliver any more messages since there is
65e810e48cSK. Y. Srinivasan 	 * no empty slot
66e810e48cSK. Y. Srinivasan 	 */
67e810e48cSK. Y. Srinivasan 	mb();
68e810e48cSK. Y. Srinivasan 
69e810e48cSK. Y. Srinivasan 	if (msg->header.message_flags.msg_pending) {
70e810e48cSK. Y. Srinivasan 		/*
71e810e48cSK. Y. Srinivasan 		 * This will cause message queue rescan to
72e810e48cSK. Y. Srinivasan 		 * possibly deliver another msg from the
73e810e48cSK. Y. Srinivasan 		 * hypervisor
74e810e48cSK. Y. Srinivasan 		 */
75e810e48cSK. Y. Srinivasan 		wrmsrl(HV_X64_MSR_EOM, 0);
76e810e48cSK. Y. Srinivasan 	}
77e810e48cSK. Y. Srinivasan }
78e810e48cSK. Y. Srinivasan 
79d5116b40SK. Y. Srinivasan #define hv_init_timer(timer, tick) wrmsrl(timer, tick)
80d5116b40SK. Y. Srinivasan #define hv_init_timer_config(config, val) wrmsrl(config, val)
81d5116b40SK. Y. Srinivasan 
82155e4a2fSK. Y. Srinivasan #define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
83155e4a2fSK. Y. Srinivasan #define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
84155e4a2fSK. Y. Srinivasan 
858e307bf8SK. Y. Srinivasan #define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
868e307bf8SK. Y. Srinivasan #define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
878e307bf8SK. Y. Srinivasan 
8806d1d98aSK. Y. Srinivasan #define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
8906d1d98aSK. Y. Srinivasan #define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
9006d1d98aSK. Y. Srinivasan 
917297ff0cSK. Y. Srinivasan #define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
927297ff0cSK. Y. Srinivasan 
9337e11d5cSK. Y. Srinivasan #define hv_get_synint_state(int_num, val) rdmsrl(int_num, val)
9437e11d5cSK. Y. Srinivasan #define hv_set_synint_state(int_num, val) wrmsrl(int_num, val)
9537e11d5cSK. Y. Srinivasan 
96bc2b0331SK. Y. Srinivasan void hyperv_callback_vector(void);
9793286261SVitaly Kuznetsov void hyperv_reenlightenment_vector(void);
98cf910e83SSeiji Aguchi #ifdef CONFIG_TRACING
99cf910e83SSeiji Aguchi #define trace_hyperv_callback_vector hyperv_callback_vector
100cf910e83SSeiji Aguchi #endif
101bc2b0331SK. Y. Srinivasan void hyperv_vector_handler(struct pt_regs *regs);
10276d388cdSThomas Gleixner void hv_setup_vmbus_irq(void (*handler)(void));
10376d388cdSThomas Gleixner void hv_remove_vmbus_irq(void);
104bc2b0331SK. Y. Srinivasan 
1052517281dSVitaly Kuznetsov void hv_setup_kexec_handler(void (*handler)(void));
1062517281dSVitaly Kuznetsov void hv_remove_kexec_handler(void);
107b4370df2SVitaly Kuznetsov void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
108b4370df2SVitaly Kuznetsov void hv_remove_crash_handler(void);
1098730046cSK. Y. Srinivasan 
110248e742aSMichael Kelley /*
111248e742aSMichael Kelley  * Routines for stimer0 Direct Mode handling.
112248e742aSMichael Kelley  * On x86/x64, there are no percpu actions to take.
113248e742aSMichael Kelley  */
114248e742aSMichael Kelley void hv_stimer0_vector_handler(struct pt_regs *regs);
115248e742aSMichael Kelley void hv_stimer0_callback_vector(void);
116248e742aSMichael Kelley int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
117248e742aSMichael Kelley void hv_remove_stimer0_irq(int irq);
118248e742aSMichael Kelley 
119248e742aSMichael Kelley static inline void hv_enable_stimer0_percpu_irq(int irq) {}
120248e742aSMichael Kelley static inline void hv_disable_stimer0_percpu_irq(int irq) {}
121248e742aSMichael Kelley 
122248e742aSMichael Kelley 
1238730046cSK. Y. Srinivasan #if IS_ENABLED(CONFIG_HYPERV)
124dee863b5SVitaly Kuznetsov extern struct clocksource *hyperv_cs;
125fc53662fSVitaly Kuznetsov extern void *hv_hypercall_pg;
12668bb7bfbSK. Y. Srinivasan extern void  __percpu  **hyperv_pcpu_input_arg;
127fc53662fSVitaly Kuznetsov 
128fc53662fSVitaly Kuznetsov static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
129fc53662fSVitaly Kuznetsov {
130fc53662fSVitaly Kuznetsov 	u64 input_address = input ? virt_to_phys(input) : 0;
131fc53662fSVitaly Kuznetsov 	u64 output_address = output ? virt_to_phys(output) : 0;
132fc53662fSVitaly Kuznetsov 	u64 hv_status;
133fc53662fSVitaly Kuznetsov 
134fc53662fSVitaly Kuznetsov #ifdef CONFIG_X86_64
135fc53662fSVitaly Kuznetsov 	if (!hv_hypercall_pg)
136fc53662fSVitaly Kuznetsov 		return U64_MAX;
137fc53662fSVitaly Kuznetsov 
138fc53662fSVitaly Kuznetsov 	__asm__ __volatile__("mov %4, %%r8\n"
139e70e5892SDavid Woodhouse 			     CALL_NOSPEC
140f5caf621SJosh Poimboeuf 			     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
141fc53662fSVitaly Kuznetsov 			       "+c" (control), "+d" (input_address)
142e70e5892SDavid Woodhouse 			     :  "r" (output_address),
143e70e5892SDavid Woodhouse 				THUNK_TARGET(hv_hypercall_pg)
144fc53662fSVitaly Kuznetsov 			     : "cc", "memory", "r8", "r9", "r10", "r11");
145fc53662fSVitaly Kuznetsov #else
146fc53662fSVitaly Kuznetsov 	u32 input_address_hi = upper_32_bits(input_address);
147fc53662fSVitaly Kuznetsov 	u32 input_address_lo = lower_32_bits(input_address);
148fc53662fSVitaly Kuznetsov 	u32 output_address_hi = upper_32_bits(output_address);
149fc53662fSVitaly Kuznetsov 	u32 output_address_lo = lower_32_bits(output_address);
150fc53662fSVitaly Kuznetsov 
151fc53662fSVitaly Kuznetsov 	if (!hv_hypercall_pg)
152fc53662fSVitaly Kuznetsov 		return U64_MAX;
153fc53662fSVitaly Kuznetsov 
154e70e5892SDavid Woodhouse 	__asm__ __volatile__(CALL_NOSPEC
155fc53662fSVitaly Kuznetsov 			     : "=A" (hv_status),
156f5caf621SJosh Poimboeuf 			       "+c" (input_address_lo), ASM_CALL_CONSTRAINT
157fc53662fSVitaly Kuznetsov 			     : "A" (control),
158fc53662fSVitaly Kuznetsov 			       "b" (input_address_hi),
159fc53662fSVitaly Kuznetsov 			       "D"(output_address_hi), "S"(output_address_lo),
160e70e5892SDavid Woodhouse 			       THUNK_TARGET(hv_hypercall_pg)
161fc53662fSVitaly Kuznetsov 			     : "cc", "memory");
162fc53662fSVitaly Kuznetsov #endif /* !x86_64 */
163fc53662fSVitaly Kuznetsov 	return hv_status;
164fc53662fSVitaly Kuznetsov }
165dee863b5SVitaly Kuznetsov 
1666a8edbd0SVitaly Kuznetsov /* Fast hypercall with 8 bytes of input and no output */
1676a8edbd0SVitaly Kuznetsov static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
1686a8edbd0SVitaly Kuznetsov {
1696a8edbd0SVitaly Kuznetsov 	u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
1706a8edbd0SVitaly Kuznetsov 
1716a8edbd0SVitaly Kuznetsov #ifdef CONFIG_X86_64
1726a8edbd0SVitaly Kuznetsov 	{
173e70e5892SDavid Woodhouse 		__asm__ __volatile__(CALL_NOSPEC
174f5caf621SJosh Poimboeuf 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
1756a8edbd0SVitaly Kuznetsov 				       "+c" (control), "+d" (input1)
176e70e5892SDavid Woodhouse 				     : THUNK_TARGET(hv_hypercall_pg)
1776a8edbd0SVitaly Kuznetsov 				     : "cc", "r8", "r9", "r10", "r11");
1786a8edbd0SVitaly Kuznetsov 	}
1796a8edbd0SVitaly Kuznetsov #else
1806a8edbd0SVitaly Kuznetsov 	{
1816a8edbd0SVitaly Kuznetsov 		u32 input1_hi = upper_32_bits(input1);
1826a8edbd0SVitaly Kuznetsov 		u32 input1_lo = lower_32_bits(input1);
1836a8edbd0SVitaly Kuznetsov 
184e70e5892SDavid Woodhouse 		__asm__ __volatile__ (CALL_NOSPEC
1856a8edbd0SVitaly Kuznetsov 				      : "=A"(hv_status),
1866a8edbd0SVitaly Kuznetsov 					"+c"(input1_lo),
187f5caf621SJosh Poimboeuf 					ASM_CALL_CONSTRAINT
1886a8edbd0SVitaly Kuznetsov 				      :	"A" (control),
1896a8edbd0SVitaly Kuznetsov 					"b" (input1_hi),
190e70e5892SDavid Woodhouse 					THUNK_TARGET(hv_hypercall_pg)
1916a8edbd0SVitaly Kuznetsov 				      : "cc", "edi", "esi");
1926a8edbd0SVitaly Kuznetsov 	}
1936a8edbd0SVitaly Kuznetsov #endif
1946a8edbd0SVitaly Kuznetsov 		return hv_status;
1956a8edbd0SVitaly Kuznetsov }
1966a8edbd0SVitaly Kuznetsov 
197806c8927SVitaly Kuznetsov /*
198806c8927SVitaly Kuznetsov  * Rep hypercalls. Callers of this functions are supposed to ensure that
199806c8927SVitaly Kuznetsov  * rep_count and varhead_size comply with Hyper-V hypercall definition.
200806c8927SVitaly Kuznetsov  */
201806c8927SVitaly Kuznetsov static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
202806c8927SVitaly Kuznetsov 				      void *input, void *output)
203806c8927SVitaly Kuznetsov {
204806c8927SVitaly Kuznetsov 	u64 control = code;
205806c8927SVitaly Kuznetsov 	u64 status;
206806c8927SVitaly Kuznetsov 	u16 rep_comp;
207806c8927SVitaly Kuznetsov 
208806c8927SVitaly Kuznetsov 	control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
209806c8927SVitaly Kuznetsov 	control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
210806c8927SVitaly Kuznetsov 
211806c8927SVitaly Kuznetsov 	do {
212806c8927SVitaly Kuznetsov 		status = hv_do_hypercall(control, input, output);
213806c8927SVitaly Kuznetsov 		if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
214806c8927SVitaly Kuznetsov 			return status;
215806c8927SVitaly Kuznetsov 
216806c8927SVitaly Kuznetsov 		/* Bits 32-43 of status have 'Reps completed' data. */
217806c8927SVitaly Kuznetsov 		rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >>
218806c8927SVitaly Kuznetsov 			HV_HYPERCALL_REP_COMP_OFFSET;
219806c8927SVitaly Kuznetsov 
220806c8927SVitaly Kuznetsov 		control &= ~HV_HYPERCALL_REP_START_MASK;
221806c8927SVitaly Kuznetsov 		control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
222806c8927SVitaly Kuznetsov 
223806c8927SVitaly Kuznetsov 		touch_nmi_watchdog();
224806c8927SVitaly Kuznetsov 	} while (rep_comp < rep_count);
225806c8927SVitaly Kuznetsov 
226806c8927SVitaly Kuznetsov 	return status;
227806c8927SVitaly Kuznetsov }
228806c8927SVitaly Kuznetsov 
2297415aea6SVitaly Kuznetsov /*
2307415aea6SVitaly Kuznetsov  * Hypervisor's notion of virtual processor ID is different from
2317415aea6SVitaly Kuznetsov  * Linux' notion of CPU ID. This information can only be retrieved
2327415aea6SVitaly Kuznetsov  * in the context of the calling CPU. Setup a map for easy access
2337415aea6SVitaly Kuznetsov  * to this information.
2347415aea6SVitaly Kuznetsov  */
2357415aea6SVitaly Kuznetsov extern u32 *hv_vp_index;
236a3b74243SVitaly Kuznetsov extern u32 hv_max_vp_index;
237a46d15ccSVitaly Kuznetsov extern struct hv_vp_assist_page **hv_vp_assist_page;
238a46d15ccSVitaly Kuznetsov 
239a46d15ccSVitaly Kuznetsov static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
240a46d15ccSVitaly Kuznetsov {
241a46d15ccSVitaly Kuznetsov 	if (!hv_vp_assist_page)
242a46d15ccSVitaly Kuznetsov 		return NULL;
243a46d15ccSVitaly Kuznetsov 
244a46d15ccSVitaly Kuznetsov 	return hv_vp_assist_page[cpu];
245a46d15ccSVitaly Kuznetsov }
2467415aea6SVitaly Kuznetsov 
2477415aea6SVitaly Kuznetsov /**
2487415aea6SVitaly Kuznetsov  * hv_cpu_number_to_vp_number() - Map CPU to VP.
2497415aea6SVitaly Kuznetsov  * @cpu_number: CPU number in Linux terms
2507415aea6SVitaly Kuznetsov  *
2517415aea6SVitaly Kuznetsov  * This function returns the mapping between the Linux processor
2527415aea6SVitaly Kuznetsov  * number and the hypervisor's virtual processor number, useful
2537415aea6SVitaly Kuznetsov  * in making hypercalls and such that talk about specific
2547415aea6SVitaly Kuznetsov  * processors.
2557415aea6SVitaly Kuznetsov  *
2567415aea6SVitaly Kuznetsov  * Return: Virtual processor number in Hyper-V terms
2577415aea6SVitaly Kuznetsov  */
2587415aea6SVitaly Kuznetsov static inline int hv_cpu_number_to_vp_number(int cpu_number)
2597415aea6SVitaly Kuznetsov {
2607415aea6SVitaly Kuznetsov 	return hv_vp_index[cpu_number];
2617415aea6SVitaly Kuznetsov }
26273638cddSK. Y. Srinivasan 
263366f03b0SK. Y. Srinivasan static inline int cpumask_to_vpset(struct hv_vpset *vpset,
264366f03b0SK. Y. Srinivasan 				    const struct cpumask *cpus)
265366f03b0SK. Y. Srinivasan {
266366f03b0SK. Y. Srinivasan 	int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
267366f03b0SK. Y. Srinivasan 
268366f03b0SK. Y. Srinivasan 	/* valid_bank_mask can represent up to 64 banks */
269366f03b0SK. Y. Srinivasan 	if (hv_max_vp_index / 64 >= 64)
270366f03b0SK. Y. Srinivasan 		return 0;
271366f03b0SK. Y. Srinivasan 
272366f03b0SK. Y. Srinivasan 	/*
273c9c92beeSVitaly Kuznetsov 	 * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
274366f03b0SK. Y. Srinivasan 	 * structs are not cleared between calls, we risk flushing unneeded
275366f03b0SK. Y. Srinivasan 	 * vCPUs otherwise.
276366f03b0SK. Y. Srinivasan 	 */
277366f03b0SK. Y. Srinivasan 	for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
278366f03b0SK. Y. Srinivasan 		vpset->bank_contents[vcpu_bank] = 0;
279366f03b0SK. Y. Srinivasan 
280366f03b0SK. Y. Srinivasan 	/*
281366f03b0SK. Y. Srinivasan 	 * Some banks may end up being empty but this is acceptable.
282366f03b0SK. Y. Srinivasan 	 */
283366f03b0SK. Y. Srinivasan 	for_each_cpu(cpu, cpus) {
284366f03b0SK. Y. Srinivasan 		vcpu = hv_cpu_number_to_vp_number(cpu);
2851268ed0cSK. Y. Srinivasan 		if (vcpu == VP_INVAL)
2861268ed0cSK. Y. Srinivasan 			return -1;
287366f03b0SK. Y. Srinivasan 		vcpu_bank = vcpu / 64;
288366f03b0SK. Y. Srinivasan 		vcpu_offset = vcpu % 64;
289366f03b0SK. Y. Srinivasan 		__set_bit(vcpu_offset, (unsigned long *)
290366f03b0SK. Y. Srinivasan 			  &vpset->bank_contents[vcpu_bank]);
291366f03b0SK. Y. Srinivasan 		if (vcpu_bank >= nr_bank)
292366f03b0SK. Y. Srinivasan 			nr_bank = vcpu_bank + 1;
293366f03b0SK. Y. Srinivasan 	}
294366f03b0SK. Y. Srinivasan 	vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
295366f03b0SK. Y. Srinivasan 	return nr_bank;
296366f03b0SK. Y. Srinivasan }
297366f03b0SK. Y. Srinivasan 
2986b48cb5fSK. Y. Srinivasan void __init hyperv_init(void);
2992ffd9e33SVitaly Kuznetsov void hyperv_setup_mmu_ops(void);
3007ed4325aSK. Y. Srinivasan void hyperv_report_panic(struct pt_regs *regs, long err);
3014a5f3cdeSMichael Kelley bool hv_is_hyperv_initialized(void);
302d6f3609dSVitaly Kuznetsov void hyperv_cleanup(void);
30393286261SVitaly Kuznetsov 
30493286261SVitaly Kuznetsov void hyperv_reenlightenment_intr(struct pt_regs *regs);
30593286261SVitaly Kuznetsov void set_hv_tscchange_cb(void (*cb)(void));
30693286261SVitaly Kuznetsov void clear_hv_tscchange_cb(void);
30793286261SVitaly Kuznetsov void hyperv_stop_tsc_emulation(void);
308*eb914cfeSTianyu Lan int hyperv_flush_guest_mapping(u64 as);
3092d2ccf24SThomas Gleixner 
3102d2ccf24SThomas Gleixner #ifdef CONFIG_X86_64
3116b48cb5fSK. Y. Srinivasan void hv_apic_init(void);
3122d2ccf24SThomas Gleixner #else
3132d2ccf24SThomas Gleixner static inline void hv_apic_init(void) {}
3142d2ccf24SThomas Gleixner #endif
3152d2ccf24SThomas Gleixner 
31679cadff2SVitaly Kuznetsov #else /* CONFIG_HYPERV */
31779cadff2SVitaly Kuznetsov static inline void hyperv_init(void) {}
3184a5f3cdeSMichael Kelley static inline bool hv_is_hyperv_initialized(void) { return false; }
31979cadff2SVitaly Kuznetsov static inline void hyperv_cleanup(void) {}
3202ffd9e33SVitaly Kuznetsov static inline void hyperv_setup_mmu_ops(void) {}
32193286261SVitaly Kuznetsov static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
32293286261SVitaly Kuznetsov static inline void clear_hv_tscchange_cb(void) {}
32393286261SVitaly Kuznetsov static inline void hyperv_stop_tsc_emulation(void) {};
324a46d15ccSVitaly Kuznetsov static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
325a46d15ccSVitaly Kuznetsov {
326a46d15ccSVitaly Kuznetsov 	return NULL;
327a46d15ccSVitaly Kuznetsov }
328*eb914cfeSTianyu Lan static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
32979cadff2SVitaly Kuznetsov #endif /* CONFIG_HYPERV */
33079cadff2SVitaly Kuznetsov 
331bd2a9adaSVitaly Kuznetsov #ifdef CONFIG_HYPERV_TSCPAGE
332bd2a9adaSVitaly Kuznetsov struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
333e2768eaaSVitaly Kuznetsov static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
334e2768eaaSVitaly Kuznetsov 				       u64 *cur_tsc)
3350733379bSVitaly Kuznetsov {
336e2768eaaSVitaly Kuznetsov 	u64 scale, offset;
3370733379bSVitaly Kuznetsov 	u32 sequence;
3380733379bSVitaly Kuznetsov 
3390733379bSVitaly Kuznetsov 	/*
3400733379bSVitaly Kuznetsov 	 * The protocol for reading Hyper-V TSC page is specified in Hypervisor
3410733379bSVitaly Kuznetsov 	 * Top-Level Functional Specification ver. 3.0 and above. To get the
3420733379bSVitaly Kuznetsov 	 * reference time we must do the following:
3430733379bSVitaly Kuznetsov 	 * - READ ReferenceTscSequence
3440733379bSVitaly Kuznetsov 	 *   A special '0' value indicates the time source is unreliable and we
3450733379bSVitaly Kuznetsov 	 *   need to use something else. The currently published specification
3460733379bSVitaly Kuznetsov 	 *   versions (up to 4.0b) contain a mistake and wrongly claim '-1'
3470733379bSVitaly Kuznetsov 	 *   instead of '0' as the special value, see commit c35b82ef0294.
3480733379bSVitaly Kuznetsov 	 * - ReferenceTime =
3490733379bSVitaly Kuznetsov 	 *        ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset
3500733379bSVitaly Kuznetsov 	 * - READ ReferenceTscSequence again. In case its value has changed
3510733379bSVitaly Kuznetsov 	 *   since our first reading we need to discard ReferenceTime and repeat
3520733379bSVitaly Kuznetsov 	 *   the whole sequence as the hypervisor was updating the page in
3530733379bSVitaly Kuznetsov 	 *   between.
3540733379bSVitaly Kuznetsov 	 */
3550733379bSVitaly Kuznetsov 	do {
3560733379bSVitaly Kuznetsov 		sequence = READ_ONCE(tsc_pg->tsc_sequence);
3570733379bSVitaly Kuznetsov 		if (!sequence)
3580733379bSVitaly Kuznetsov 			return U64_MAX;
3590733379bSVitaly Kuznetsov 		/*
3600733379bSVitaly Kuznetsov 		 * Make sure we read sequence before we read other values from
3610733379bSVitaly Kuznetsov 		 * TSC page.
3620733379bSVitaly Kuznetsov 		 */
3630733379bSVitaly Kuznetsov 		smp_rmb();
3640733379bSVitaly Kuznetsov 
3650733379bSVitaly Kuznetsov 		scale = READ_ONCE(tsc_pg->tsc_scale);
3660733379bSVitaly Kuznetsov 		offset = READ_ONCE(tsc_pg->tsc_offset);
367e2768eaaSVitaly Kuznetsov 		*cur_tsc = rdtsc_ordered();
3680733379bSVitaly Kuznetsov 
3690733379bSVitaly Kuznetsov 		/*
3700733379bSVitaly Kuznetsov 		 * Make sure we read sequence after we read all other values
3710733379bSVitaly Kuznetsov 		 * from TSC page.
3720733379bSVitaly Kuznetsov 		 */
3730733379bSVitaly Kuznetsov 		smp_rmb();
3740733379bSVitaly Kuznetsov 
3750733379bSVitaly Kuznetsov 	} while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
3760733379bSVitaly Kuznetsov 
377e2768eaaSVitaly Kuznetsov 	return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
378e2768eaaSVitaly Kuznetsov }
379e2768eaaSVitaly Kuznetsov 
380e2768eaaSVitaly Kuznetsov static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
381e2768eaaSVitaly Kuznetsov {
382e2768eaaSVitaly Kuznetsov 	u64 cur_tsc;
383e2768eaaSVitaly Kuznetsov 
384e2768eaaSVitaly Kuznetsov 	return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
3850733379bSVitaly Kuznetsov }
3860733379bSVitaly Kuznetsov 
387bd2a9adaSVitaly Kuznetsov #else
388bd2a9adaSVitaly Kuznetsov static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
389bd2a9adaSVitaly Kuznetsov {
390bd2a9adaSVitaly Kuznetsov 	return NULL;
391bd2a9adaSVitaly Kuznetsov }
392e2768eaaSVitaly Kuznetsov 
393e2768eaaSVitaly Kuznetsov static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
394e2768eaaSVitaly Kuznetsov 				       u64 *cur_tsc)
395e2768eaaSVitaly Kuznetsov {
396e2768eaaSVitaly Kuznetsov 	BUG();
397e2768eaaSVitaly Kuznetsov 	return U64_MAX;
398e2768eaaSVitaly Kuznetsov }
399bd2a9adaSVitaly Kuznetsov #endif
400a2a47c6cSKy Srinivasan #endif
401