xref: /linux/arch/x86/hyperv/hv_crash.c (revision feb06d2690bb826fd33798a99ce5cff8d07b38f9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * X86 specific Hyper-V root partition kdump/crash support module
4  *
5  * Copyright (C) 2025, Microsoft, Inc.
6  *
7  * This module implements hypervisor RAM collection into vmcore for both
8  * cases of the hypervisor crash and Linux root crash. Hyper-V implements
9  * a disable hypercall with a 32bit protected mode ABI callback. This
10  * mechanism must be used to unlock hypervisor RAM. Since the hypervisor RAM
11  * is already mapped in Linux, it is automatically collected into Linux vmcore,
12  * and can be examined by the crash command (raw RAM dump) or windbg.
13  *
14  * At a high level:
15  *
16  *  Hypervisor Crash:
17  *    Upon crash, hypervisor goes into an emergency minimal dispatch loop, a
18  *    restrictive mode with very limited hypercall and MSR support. Each cpu
19  *    then injects NMIs into root vcpus. A shared page is used to check
20  *    by Linux in the NMI handler if the hypervisor has crashed. This shared
21  *    page is setup in hv_root_crash_init during boot.
22  *
23  *  Linux Crash:
24  *    In case of Linux crash, the callback hv_crash_stop_other_cpus will send
25  *    NMIs to all cpus, then proceed to the crash_nmi_callback where it waits
26  *    for all cpus to be in NMI.
27  *
28  *  NMI Handler (upon quorum):
29  *    Eventually, in both cases, all cpus will end up in the NMI handler.
30  *    Hyper-V requires the disable hypervisor must be done from the BSP. So
31  *    the BSP NMI handler saves current context, does some fixups and makes
32  *    the hypercall to disable the hypervisor, ie, devirtualize. Hypervisor
33  *    at that point will suspend all vcpus (except the BSP), unlock all its
34  *    RAM, and return to Linux at the 32bit mode entry RIP.
35  *
36  *  Linux 32bit entry trampoline will then restore long mode and call C
37  *  function here to restore context and continue execution to crash kexec.
38  */
39 
40 #include <linux/delay.h>
41 #include <linux/kexec.h>
42 #include <linux/crash_dump.h>
43 #include <linux/panic.h>
44 #include <asm/apic.h>
45 #include <asm/desc.h>
46 #include <asm/page.h>
47 #include <asm/pgalloc.h>
48 #include <asm/mshyperv.h>
49 #include <asm/nmi.h>
50 #include <asm/idtentry.h>
51 #include <asm/reboot.h>
52 #include <asm/intel_pt.h>
53 
54 bool hv_crash_enabled;
55 EXPORT_SYMBOL_GPL(hv_crash_enabled);
56 
57 struct hv_crash_ctxt {
58 	ulong rsp;
59 	ulong cr0;
60 	ulong cr2;
61 	ulong cr4;
62 	ulong cr8;
63 
64 	u16 cs;
65 	u16 ss;
66 	u16 ds;
67 	u16 es;
68 	u16 fs;
69 	u16 gs;
70 
71 	u16 gdt_fill;
72 	struct desc_ptr gdtr;
73 	char idt_fill[6];
74 	struct desc_ptr idtr;
75 
76 	u64 gsbase;
77 	u64 efer;
78 	u64 pat;
79 };
80 static struct hv_crash_ctxt hv_crash_ctxt;
81 
82 /* Shared hypervisor page that contains crash dump area we peek into.
83  * NB: windbg looks for "hv_cda" symbol so don't change it.
84  */
85 static struct hv_crashdump_area *hv_cda;
86 
87 static u32 trampoline_pa, devirt_arg;
88 static atomic_t crash_cpus_wait;
89 static void *hv_crash_ptpgs[4];
90 static bool hv_has_crashed, lx_has_crashed;
91 
hv_panic_timeout_reboot(void)92 static void __noreturn hv_panic_timeout_reboot(void)
93 {
94 	#define PANIC_TIMER_STEP 100
95 
96 	if (panic_timeout > 0) {
97 		int i;
98 
99 		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP)
100 			mdelay(PANIC_TIMER_STEP);
101 	}
102 
103 	if (panic_timeout)
104 		native_wrmsrq(HV_X64_MSR_RESET, 1);    /* get hyp to reboot */
105 
106 	for (;;)
107 		cpu_relax();
108 }
109 
110 /* This cannot be inlined as it needs stack */
hv_crash_restore_tss(void)111 static noinline __noclone void hv_crash_restore_tss(void)
112 {
113 	load_TR_desc();
114 }
115 
116 /* This cannot be inlined as it needs stack */
hv_crash_clear_kernpt(void)117 static noinline void hv_crash_clear_kernpt(void)
118 {
119 	pgd_t *pgd;
120 	p4d_t *p4d;
121 
122 	/* Clear entry so it's not confusing to someone looking at the core */
123 	pgd = pgd_offset_k(trampoline_pa);
124 	p4d = p4d_offset(pgd, trampoline_pa);
125 	native_p4d_clear(p4d);
126 }
127 
128 /*
129  * This is the C entry point from the asm glue code after the disable hypercall.
130  * We enter here in IA32-e long mode, ie, full 64bit mode running on kernel
131  * page tables with our below 4G page identity mapped, but using a temporary
132  * GDT. ds/fs/gs/es are null. ss is not usable. bp is null. stack is not
133  * available. We restore kernel GDT, and rest of the context, and continue
134  * to kexec.
135  */
hv_crash_c_entry(void)136 static asmlinkage void __noreturn hv_crash_c_entry(void)
137 {
138 	struct hv_crash_ctxt *ctxt = &hv_crash_ctxt;
139 
140 	/* first thing, restore kernel gdt */
141 	native_load_gdt(&ctxt->gdtr);
142 
143 	asm volatile("movw %%ax, %%ss" : : "a"(ctxt->ss));
144 	asm volatile("movq %0, %%rsp" : : "m"(ctxt->rsp));
145 
146 	asm volatile("movw %%ax, %%ds" : : "a"(ctxt->ds));
147 	asm volatile("movw %%ax, %%es" : : "a"(ctxt->es));
148 	asm volatile("movw %%ax, %%fs" : : "a"(ctxt->fs));
149 	asm volatile("movw %%ax, %%gs" : : "a"(ctxt->gs));
150 
151 	native_wrmsrq(MSR_IA32_CR_PAT, ctxt->pat);
152 	asm volatile("movq %0, %%cr0" : : "r"(ctxt->cr0));
153 
154 	asm volatile("movq %0, %%cr8" : : "r"(ctxt->cr8));
155 	asm volatile("movq %0, %%cr4" : : "r"(ctxt->cr4));
156 	asm volatile("movq %0, %%cr2" : : "r"(ctxt->cr4));
157 
158 	native_load_idt(&ctxt->idtr);
159 	native_wrmsrq(MSR_GS_BASE, ctxt->gsbase);
160 	native_wrmsrq(MSR_EFER, ctxt->efer);
161 
162 	/* restore the original kernel CS now via far return */
163 	asm volatile("movzwq %0, %%rax\n\t"
164 		     "pushq %%rax\n\t"
165 		     "pushq $1f\n\t"
166 		     "lretq\n\t"
167 		     "1:nop\n\t" : : "m"(ctxt->cs) : "rax");
168 
169 	/* We are in asmlinkage without stack frame, hence make C function
170 	 * calls which will buy stack frames.
171 	 */
172 	hv_crash_restore_tss();
173 	hv_crash_clear_kernpt();
174 
175 	/* we are now fully in devirtualized normal kernel mode */
176 	__crash_kexec(NULL);
177 
178 	hv_panic_timeout_reboot();
179 }
180 /* Tell gcc we are using lretq long jump in the above function intentionally */
181 STACK_FRAME_NON_STANDARD(hv_crash_c_entry);
182 
hv_mark_tss_not_busy(void)183 static void hv_mark_tss_not_busy(void)
184 {
185 	struct desc_struct *desc = get_current_gdt_rw();
186 	tss_desc tss;
187 
188 	memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
189 	tss.type = 0x9;        /* available 64-bit TSS. 0xB is busy TSS */
190 	write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
191 }
192 
193 /* Save essential context */
hv_hvcrash_ctxt_save(void)194 static void hv_hvcrash_ctxt_save(void)
195 {
196 	struct hv_crash_ctxt *ctxt = &hv_crash_ctxt;
197 
198 	asm volatile("movq %%rsp,%0" : "=m"(ctxt->rsp));
199 
200 	ctxt->cr0 = native_read_cr0();
201 	ctxt->cr4 = native_read_cr4();
202 
203 	asm volatile("movq %%cr2, %0" : "=a"(ctxt->cr2));
204 	asm volatile("movq %%cr8, %0" : "=a"(ctxt->cr8));
205 
206 	asm volatile("movl %%cs, %%eax" : "=a"(ctxt->cs));
207 	asm volatile("movl %%ss, %%eax" : "=a"(ctxt->ss));
208 	asm volatile("movl %%ds, %%eax" : "=a"(ctxt->ds));
209 	asm volatile("movl %%es, %%eax" : "=a"(ctxt->es));
210 	asm volatile("movl %%fs, %%eax" : "=a"(ctxt->fs));
211 	asm volatile("movl %%gs, %%eax" : "=a"(ctxt->gs));
212 
213 	native_store_gdt(&ctxt->gdtr);
214 	store_idt(&ctxt->idtr);
215 
216 	ctxt->gsbase = __rdmsr(MSR_GS_BASE);
217 	ctxt->efer = __rdmsr(MSR_EFER);
218 	ctxt->pat = __rdmsr(MSR_IA32_CR_PAT);
219 }
220 
221 /* Add trampoline page to the kernel pagetable for transition to kernel PT */
hv_crash_fixup_kernpt(void)222 static void hv_crash_fixup_kernpt(void)
223 {
224 	pgd_t *pgd;
225 	p4d_t *p4d;
226 
227 	pgd = pgd_offset_k(trampoline_pa);
228 	p4d = p4d_offset(pgd, trampoline_pa);
229 
230 	/* trampoline_pa is below 4G, so no pre-existing entry to clobber */
231 	p4d_populate(&init_mm, p4d, (pud_t *)hv_crash_ptpgs[1]);
232 	p4d->p4d = p4d->p4d & ~(_PAGE_NX);    /* enable execute */
233 }
234 
235 /*
236  * Notify the hyp that Linux has crashed. This will cause the hyp to quiesce
237  * and suspend all guest VPs.
238  */
hv_notify_prepare_hyp(void)239 static void hv_notify_prepare_hyp(void)
240 {
241 	u64 status;
242 	struct hv_input_notify_partition_event *input;
243 	struct hv_partition_event_root_crashdump_input *cda;
244 
245 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
246 	cda = &input->input.crashdump_input;
247 	memset(input, 0, sizeof(*input));
248 	input->event = HV_PARTITION_EVENT_ROOT_CRASHDUMP;
249 
250 	cda->crashdump_action = HV_CRASHDUMP_ENTRY;
251 	status = hv_do_hypercall(HVCALL_NOTIFY_PARTITION_EVENT, input, NULL);
252 	if (!hv_result_success(status))
253 		return;
254 
255 	cda->crashdump_action = HV_CRASHDUMP_SUSPEND_ALL_VPS;
256 	hv_do_hypercall(HVCALL_NOTIFY_PARTITION_EVENT, input, NULL);
257 }
258 
259 /*
260  * Common function for all cpus before devirtualization.
261  *
262  * Hypervisor crash: all cpus get here in NMI context.
263  * Linux crash: the panicing cpu gets here at base level, all others in NMI
264  *		context. Note, panicing cpu may not be the BSP.
265  *
266  * The function is not inlined so it will show on the stack. It is named so
267  * because the crash cmd looks for certain well known function names on the
268  * stack before looking into the cpu saved note in the elf section, and
269  * that work is currently incomplete.
270  *
271  * Notes:
272  *  Hypervisor crash:
273  *    - the hypervisor is in a very restrictive mode at this point and any
274  *	vmexit it cannot handle would result in reboot. So, no mumbo jumbo,
275  *	just get to kexec as quickly as possible.
276  *
277  *  Devirtualization is supported from the BSP only at present.
278  */
crash_nmi_callback(struct pt_regs * regs)279 static noinline __noclone void crash_nmi_callback(struct pt_regs *regs)
280 {
281 	struct hv_input_disable_hyp_ex *input;
282 	u64 status;
283 	int msecs = 1000, ccpu = smp_processor_id();
284 
285 	if (ccpu == 0) {
286 		/* crash_save_cpu() will be done in the kexec path */
287 		cpu_emergency_stop_pt();	/* disable performance trace */
288 		atomic_inc(&crash_cpus_wait);
289 	} else {
290 		crash_save_cpu(regs, ccpu);
291 		cpu_emergency_stop_pt();	/* disable performance trace */
292 		atomic_inc(&crash_cpus_wait);
293 		for (;;)
294 			cpu_relax();
295 	}
296 
297 	while (atomic_read(&crash_cpus_wait) < num_online_cpus() && msecs--)
298 		mdelay(1);
299 
300 	stop_nmi();
301 	if (!hv_has_crashed)
302 		hv_notify_prepare_hyp();
303 
304 	if (crashing_cpu == -1)
305 		crashing_cpu = ccpu;		/* crash cmd uses this */
306 
307 	hv_hvcrash_ctxt_save();
308 	hv_mark_tss_not_busy();
309 	hv_crash_fixup_kernpt();
310 
311 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
312 	memset(input, 0, sizeof(*input));
313 	input->rip = trampoline_pa;
314 	input->arg = devirt_arg;
315 
316 	status = hv_do_hypercall(HVCALL_DISABLE_HYP_EX, input, NULL);
317 
318 	hv_panic_timeout_reboot();
319 }
320 
321 
322 static DEFINE_SPINLOCK(hv_crash_reboot_lk);
323 
324 /*
325  * Generic NMI callback handler: could be called without any crash also.
326  *   hv crash: hypervisor injects NMI's into all cpus
327  *   lx crash: panicing cpu sends NMI to all but self via crash_stop_other_cpus
328  */
hv_crash_nmi_local(unsigned int cmd,struct pt_regs * regs)329 static int hv_crash_nmi_local(unsigned int cmd, struct pt_regs *regs)
330 {
331 	if (!hv_has_crashed && hv_cda && hv_cda->cda_valid)
332 		hv_has_crashed = true;
333 
334 	if (!hv_has_crashed && !lx_has_crashed)
335 		return NMI_DONE;	/* ignore the NMI */
336 
337 	if (hv_has_crashed && !kexec_crash_loaded()) {
338 		if (spin_trylock(&hv_crash_reboot_lk))
339 			hv_panic_timeout_reboot();
340 		else
341 			for (;;)
342 				cpu_relax();
343 	}
344 
345 	crash_nmi_callback(regs);
346 
347 	return NMI_DONE;
348 }
349 
350 /*
351  * hv_crash_stop_other_cpus() == smp_ops.crash_stop_other_cpus
352  *
353  * On normal Linux panic, this is called twice: first from panic and then again
354  * from native_machine_crash_shutdown.
355  *
356  * In case of hyperv, 3 ways to get here:
357  *  1. hv crash (only BSP will get here):
358  *	BSP : NMI callback -> DisableHv -> hv_crash_asm32 -> hv_crash_c_entry
359  *		  -> __crash_kexec -> native_machine_crash_shutdown
360  *		  -> crash_smp_send_stop -> smp_ops.crash_stop_other_cpus
361  *  Linux panic:
362  *	2. panic cpu x: panic() -> crash_smp_send_stop
363  *				     -> smp_ops.crash_stop_other_cpus
364  *	3. BSP: native_machine_crash_shutdown -> crash_smp_send_stop
365  *
366  * NB: noclone and non standard stack because of call to crash_setup_regs().
367  */
hv_crash_stop_other_cpus(void)368 static void __noclone hv_crash_stop_other_cpus(void)
369 {
370 	static bool crash_stop_done;
371 	struct pt_regs lregs;
372 	int ccpu = smp_processor_id();
373 
374 	if (hv_has_crashed)
375 		return;		/* all cpus already in NMI handler path */
376 
377 	if (!kexec_crash_loaded()) {
378 		hv_notify_prepare_hyp();
379 		hv_panic_timeout_reboot();	/* no return */
380 	}
381 
382 	/* If the hv crashes also, we could come here again before cpus_stopped
383 	 * is set in crash_smp_send_stop(). So use our own check.
384 	 */
385 	if (crash_stop_done)
386 		return;
387 	crash_stop_done = true;
388 
389 	/* Linux has crashed: hv is healthy, we can IPI safely */
390 	lx_has_crashed = true;
391 	wmb();			/* NMI handlers look at lx_has_crashed */
392 
393 	apic->send_IPI_allbutself(NMI_VECTOR);
394 
395 	if (crashing_cpu == -1)
396 		crashing_cpu = ccpu;		/* crash cmd uses this */
397 
398 	/* crash_setup_regs() happens in kexec also, but for the kexec cpu which
399 	 * is the BSP. We could be here on non-BSP cpu, collect regs if so.
400 	 */
401 	if (ccpu)
402 		crash_setup_regs(&lregs, NULL);
403 
404 	crash_nmi_callback(&lregs);
405 }
406 STACK_FRAME_NON_STANDARD(hv_crash_stop_other_cpus);
407 
408 /* This GDT is accessed in IA32-e compat mode which uses 32bits addresses */
409 struct hv_gdtreg_32 {
410 	u16 fill;
411 	u16 limit;
412 	u32 address;
413 } __packed;
414 
415 /* We need a CS with L bit to goto IA32-e long mode from 32bit compat mode */
416 struct hv_crash_tramp_gdt {
417 	u64 null;	/* index 0, selector 0, null selector */
418 	u64 cs64;	/* index 1, selector 8, cs64 selector */
419 } __packed;
420 
421 /* No stack, so jump via far ptr in memory to load the 64bit CS */
422 struct hv_cs_jmptgt {
423 	u32 address;
424 	u16 csval;
425 	u16 fill;
426 } __packed;
427 
428 /* Linux use only, hypervisor doesn't look at this struct */
429 struct hv_crash_tramp_data {
430 	u64 tramp32_cr3;
431 	u64 kernel_cr3;
432 	struct hv_gdtreg_32 gdtr32;
433 	struct hv_crash_tramp_gdt tramp_gdt;
434 	struct hv_cs_jmptgt cs_jmptgt;
435 	u64 c_entry_addr;
436 } __packed;
437 
438 /*
439  * Setup a temporary gdt to allow the asm code to switch to the long mode.
440  * Since the asm code is relocated/copied to a below 4G page, it cannot use rip
441  * relative addressing, hence we must use trampoline_pa here. Also, save other
442  * info like jmp and C entry targets for same reasons.
443  *
444  * Returns: 0 on success, -1 on error
445  */
hv_crash_setup_trampdata(u64 trampoline_va)446 static int hv_crash_setup_trampdata(u64 trampoline_va)
447 {
448 	int size, offs;
449 	void *dest;
450 	struct hv_crash_tramp_data *tramp;
451 
452 	/* These must match exactly the ones in the corresponding asm file */
453 	BUILD_BUG_ON(offsetof(struct hv_crash_tramp_data, tramp32_cr3) != 0);
454 	BUILD_BUG_ON(offsetof(struct hv_crash_tramp_data, kernel_cr3) != 8);
455 	BUILD_BUG_ON(offsetof(struct hv_crash_tramp_data, gdtr32.limit) != 18);
456 	BUILD_BUG_ON(offsetof(struct hv_crash_tramp_data,
457 						     cs_jmptgt.address) != 40);
458 	BUILD_BUG_ON(offsetof(struct hv_crash_tramp_data, c_entry_addr) != 48);
459 
460 	/* hv_crash_asm_end is beyond last byte by 1 */
461 	size = &hv_crash_asm_end - &hv_crash_asm32;
462 	if (size + sizeof(struct hv_crash_tramp_data) > PAGE_SIZE) {
463 		pr_err("%s: trampoline page overflow\n", __func__);
464 		return -1;
465 	}
466 
467 	dest = (void *)trampoline_va;
468 	memcpy(dest, &hv_crash_asm32, size);
469 
470 	dest += size;
471 	dest = (void *)round_up((ulong)dest, 16);
472 	tramp = (struct hv_crash_tramp_data *)dest;
473 
474 	/* see MAX_ASID_AVAILABLE in tlb.c: "PCID 0 is reserved for use by
475 	 * non-PCID-aware users". Build cr3 with pcid 0
476 	 */
477 	tramp->tramp32_cr3 = __sme_pa(hv_crash_ptpgs[0]);
478 
479 	/* Note, when restoring X86_CR4_PCIDE, cr3[11:0] must be zero */
480 	tramp->kernel_cr3 = __sme_pa(init_mm.pgd);
481 
482 	tramp->gdtr32.limit = sizeof(struct hv_crash_tramp_gdt);
483 	tramp->gdtr32.address = trampoline_pa +
484 				   (ulong)&tramp->tramp_gdt - trampoline_va;
485 
486 	 /* base:0 limit:0xfffff type:b dpl:0 P:1 L:1 D:0 avl:0 G:1 */
487 	tramp->tramp_gdt.cs64 = 0x00af9a000000ffff;
488 
489 	tramp->cs_jmptgt.csval = 0x8;
490 	offs = (ulong)&hv_crash_asm64 - (ulong)&hv_crash_asm32;
491 	tramp->cs_jmptgt.address = trampoline_pa + offs;
492 
493 	tramp->c_entry_addr = (u64)&hv_crash_c_entry;
494 
495 	devirt_arg = trampoline_pa + (ulong)dest - trampoline_va;
496 
497 	return 0;
498 }
499 
500 /*
501  * Build 32bit trampoline page table for transition from protected mode
502  * non-paging to long-mode paging. This transition needs pagetables below 4G.
503  */
hv_crash_build_tramp_pt(void)504 static void hv_crash_build_tramp_pt(void)
505 {
506 	p4d_t *p4d;
507 	pud_t *pud;
508 	pmd_t *pmd;
509 	pte_t *pte;
510 	u64 pa, addr = trampoline_pa;
511 
512 	p4d = hv_crash_ptpgs[0] + pgd_index(addr) * sizeof(p4d);
513 	pa = virt_to_phys(hv_crash_ptpgs[1]);
514 	set_p4d(p4d, __p4d(_PAGE_TABLE | pa));
515 	p4d->p4d &= ~(_PAGE_NX);	/* enable execute */
516 
517 	pud = hv_crash_ptpgs[1] + pud_index(addr) * sizeof(pud);
518 	pa = virt_to_phys(hv_crash_ptpgs[2]);
519 	set_pud(pud, __pud(_PAGE_TABLE | pa));
520 
521 	pmd = hv_crash_ptpgs[2] + pmd_index(addr) * sizeof(pmd);
522 	pa = virt_to_phys(hv_crash_ptpgs[3]);
523 	set_pmd(pmd, __pmd(_PAGE_TABLE | pa));
524 
525 	pte = hv_crash_ptpgs[3] + pte_index(addr) * sizeof(pte);
526 	set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
527 }
528 
529 /*
530  * Setup trampoline for devirtualization:
531  *  - a page below 4G, ie 32bit addr containing asm glue code that hyp jmps to
532  *    in protected mode.
533  *  - 4 pages for a temporary page table that asm code uses to turn paging on
534  *  - a temporary gdt to use in the compat mode.
535  *
536  *  Returns: 0 on success
537  */
hv_crash_trampoline_setup(void)538 static int hv_crash_trampoline_setup(void)
539 {
540 	int i, rc, order;
541 	struct page *page;
542 	u64 trampoline_va;
543 	gfp_t flags32 = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
544 
545 	/* page for 32bit trampoline assembly code + hv_crash_tramp_data */
546 	page = alloc_page(flags32);
547 	if (page == NULL) {
548 		pr_err("%s: failed to alloc asm stub page\n", __func__);
549 		return -1;
550 	}
551 
552 	trampoline_va = (u64)page_to_virt(page);
553 	trampoline_pa = (u32)page_to_phys(page);
554 
555 	order = 2;	   /* alloc 2^2 pages */
556 	page = alloc_pages(flags32, order);
557 	if (page == NULL) {
558 		pr_err("%s: failed to alloc pt pages\n", __func__);
559 		free_page(trampoline_va);
560 		return -1;
561 	}
562 
563 	for (i = 0; i < 4; i++, page++)
564 		hv_crash_ptpgs[i] = page_to_virt(page);
565 
566 	hv_crash_build_tramp_pt();
567 
568 	rc = hv_crash_setup_trampdata(trampoline_va);
569 	if (rc)
570 		goto errout;
571 
572 	return 0;
573 
574 errout:
575 	free_page(trampoline_va);
576 	free_pages((ulong)hv_crash_ptpgs[0], order);
577 
578 	return rc;
579 }
580 
581 /* Setup for kdump kexec to collect hypervisor RAM when running as root */
hv_root_crash_init(void)582 void hv_root_crash_init(void)
583 {
584 	int rc;
585 	struct hv_input_get_system_property *input;
586 	struct hv_output_get_system_property *output;
587 	unsigned long flags;
588 	u64 status;
589 	union hv_pfn_range cda_info;
590 
591 	if (pgtable_l5_enabled()) {
592 		pr_err("Hyper-V: crash dump not yet supported on 5level PTs\n");
593 		return;
594 	}
595 
596 	rc = register_nmi_handler(NMI_LOCAL, hv_crash_nmi_local, NMI_FLAG_FIRST,
597 				  "hv_crash_nmi");
598 	if (rc) {
599 		pr_err("Hyper-V: failed to register crash nmi handler\n");
600 		return;
601 	}
602 
603 	local_irq_save(flags);
604 	input = *this_cpu_ptr(hyperv_pcpu_input_arg);
605 	output = *this_cpu_ptr(hyperv_pcpu_output_arg);
606 
607 	memset(input, 0, sizeof(*input));
608 	input->property_id = HV_SYSTEM_PROPERTY_CRASHDUMPAREA;
609 
610 	status = hv_do_hypercall(HVCALL_GET_SYSTEM_PROPERTY, input, output);
611 	cda_info.as_uint64 = output->hv_cda_info.as_uint64;
612 	local_irq_restore(flags);
613 
614 	if (!hv_result_success(status)) {
615 		pr_err("Hyper-V: %s: property:%d %s\n", __func__,
616 		       input->property_id, hv_result_to_string(status));
617 		goto err_out;
618 	}
619 
620 	if (cda_info.base_pfn == 0) {
621 		pr_err("Hyper-V: hypervisor crash dump area pfn is 0\n");
622 		goto err_out;
623 	}
624 
625 	hv_cda = phys_to_virt(cda_info.base_pfn << HV_HYP_PAGE_SHIFT);
626 
627 	rc = hv_crash_trampoline_setup();
628 	if (rc)
629 		goto err_out;
630 
631 	smp_ops.crash_stop_other_cpus = hv_crash_stop_other_cpus;
632 
633 	crash_kexec_post_notifiers = true;
634 	hv_crash_enabled = true;
635 	pr_info("Hyper-V: both linux and hypervisor kdump support enabled\n");
636 
637 	return;
638 
639 err_out:
640 	unregister_nmi_handler(NMI_LOCAL, "hv_crash_nmi");
641 	pr_err("Hyper-V: only linux root kdump support enabled\n");
642 }
643