xref: /linux/arch/x86/hyperv/nested.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Hyper-V nested virtualization code.
5  *
6  * Copyright (C) 2018, Microsoft, Inc.
7  *
8  * Author : Lan Tianyu <Tianyu.Lan@microsoft.com>
9  */
10 #define pr_fmt(fmt)  "Hyper-V: " fmt
11 
12 
13 #include <linux/types.h>
14 #include <asm/hyperv-tlfs.h>
15 #include <asm/mshyperv.h>
16 #include <asm/tlbflush.h>
17 
18 #include <asm/trace/hyperv.h>
19 
20 int hyperv_flush_guest_mapping(u64 as)
21 {
22 	struct hv_guest_mapping_flush *flush;
23 	u64 status;
24 	unsigned long flags;
25 	int ret = -ENOTSUPP;
26 
27 	if (!hv_hypercall_pg)
28 		goto fault;
29 
30 	local_irq_save(flags);
31 
32 	flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
33 
34 	if (unlikely(!flush)) {
35 		local_irq_restore(flags);
36 		goto fault;
37 	}
38 
39 	flush->address_space = as;
40 	flush->flags = 0;
41 
42 	status = hv_do_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE,
43 				 flush, NULL);
44 	local_irq_restore(flags);
45 
46 	if (hv_result_success(status))
47 		ret = 0;
48 
49 fault:
50 	trace_hyperv_nested_flush_guest_mapping(as, ret);
51 	return ret;
52 }
53 EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping);
54 
55 int hyperv_fill_flush_guest_mapping_list(
56 		struct hv_guest_mapping_flush_list *flush,
57 		u64 start_gfn, u64 pages)
58 {
59 	u64 cur = start_gfn;
60 	u64 additional_pages;
61 	int gpa_n = 0;
62 
63 	do {
64 		/*
65 		 * If flush requests exceed max flush count, go back to
66 		 * flush tlbs without range.
67 		 */
68 		if (gpa_n >= HV_MAX_FLUSH_REP_COUNT)
69 			return -ENOSPC;
70 
71 		additional_pages = min_t(u64, pages, HV_MAX_FLUSH_PAGES) - 1;
72 
73 		flush->gpa_list[gpa_n].page.additional_pages = additional_pages;
74 		flush->gpa_list[gpa_n].page.largepage = false;
75 		flush->gpa_list[gpa_n].page.basepfn = cur;
76 
77 		pages -= additional_pages + 1;
78 		cur += additional_pages + 1;
79 		gpa_n++;
80 	} while (pages > 0);
81 
82 	return gpa_n;
83 }
84 EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list);
85 
86 int hyperv_flush_guest_mapping_range(u64 as,
87 		hyperv_fill_flush_list_func fill_flush_list_func, void *data)
88 {
89 	struct hv_guest_mapping_flush_list *flush;
90 	u64 status;
91 	unsigned long flags;
92 	int ret = -ENOTSUPP;
93 	int gpa_n = 0;
94 
95 	if (!hv_hypercall_pg || !fill_flush_list_func)
96 		goto fault;
97 
98 	local_irq_save(flags);
99 
100 	flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
101 
102 	if (unlikely(!flush)) {
103 		local_irq_restore(flags);
104 		goto fault;
105 	}
106 
107 	flush->address_space = as;
108 	flush->flags = 0;
109 
110 	gpa_n = fill_flush_list_func(flush, data);
111 	if (gpa_n < 0) {
112 		local_irq_restore(flags);
113 		goto fault;
114 	}
115 
116 	status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST,
117 				     gpa_n, 0, flush, NULL);
118 
119 	local_irq_restore(flags);
120 
121 	if (hv_result_success(status))
122 		ret = 0;
123 	else
124 		ret = hv_result(status);
125 fault:
126 	trace_hyperv_nested_flush_guest_mapping_range(as, ret);
127 	return ret;
128 }
129 EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_range);
130