1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/types.h>
3 #include <linux/vmalloc.h>
4 #include <linux/mm.h>
5 #include <linux/clockchips.h>
6 #include <linux/slab.h>
7 #include <linux/cpuhotplug.h>
8 #include <linux/minmax.h>
9 #include <linux/export.h>
10 #include <asm/mshyperv.h>
11
12 /*
13 * See struct hv_deposit_memory. The first u64 is partition ID, the rest
14 * are GPAs.
15 */
16 #define HV_DEPOSIT_MAX (HV_HYP_PAGE_SIZE / sizeof(u64) - 1)
17
18 /* Deposits exact number of pages. Must be called with interrupts enabled. */
hv_call_deposit_pages(int node,u64 partition_id,u32 num_pages)19 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
20 {
21 struct page **pages, *page;
22 int *counts;
23 int num_allocations;
24 int i, j, page_count;
25 int order;
26 u64 status;
27 int ret;
28 u64 base_pfn;
29 struct hv_deposit_memory *input_page;
30 unsigned long flags;
31
32 if (num_pages > HV_DEPOSIT_MAX)
33 return -E2BIG;
34 if (!num_pages)
35 return 0;
36
37 /* One buffer for page pointers and counts */
38 page = alloc_page(GFP_KERNEL);
39 if (!page)
40 return -ENOMEM;
41 pages = page_address(page);
42
43 counts = kcalloc(HV_DEPOSIT_MAX, sizeof(int), GFP_KERNEL);
44 if (!counts) {
45 free_page((unsigned long)pages);
46 return -ENOMEM;
47 }
48
49 /* Allocate all the pages before disabling interrupts */
50 i = 0;
51
52 while (num_pages) {
53 /* Find highest order we can actually allocate */
54 order = 31 - __builtin_clz(num_pages);
55
56 while (1) {
57 pages[i] = alloc_pages_node(node, GFP_KERNEL, order);
58 if (pages[i])
59 break;
60 if (!order) {
61 ret = -ENOMEM;
62 num_allocations = i;
63 goto err_free_allocations;
64 }
65 --order;
66 }
67
68 split_page(pages[i], order);
69 counts[i] = 1 << order;
70 num_pages -= counts[i];
71 i++;
72 }
73 num_allocations = i;
74
75 local_irq_save(flags);
76
77 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
78
79 input_page->partition_id = partition_id;
80
81 /* Populate gpa_page_list - these will fit on the input page */
82 for (i = 0, page_count = 0; i < num_allocations; ++i) {
83 base_pfn = page_to_pfn(pages[i]);
84 for (j = 0; j < counts[i]; ++j, ++page_count)
85 input_page->gpa_page_list[page_count] = base_pfn + j;
86 }
87 status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY,
88 page_count, 0, input_page, NULL);
89 local_irq_restore(flags);
90 if (!hv_result_success(status)) {
91 hv_status_err(status, "\n");
92 ret = hv_result_to_errno(status);
93 goto err_free_allocations;
94 }
95
96 ret = 0;
97 goto free_buf;
98
99 err_free_allocations:
100 for (i = 0; i < num_allocations; ++i) {
101 base_pfn = page_to_pfn(pages[i]);
102 for (j = 0; j < counts[i]; ++j)
103 __free_page(pfn_to_page(base_pfn + j));
104 }
105
106 free_buf:
107 free_page((unsigned long)pages);
108 kfree(counts);
109 return ret;
110 }
111 EXPORT_SYMBOL_GPL(hv_call_deposit_pages);
112
hv_call_add_logical_proc(int node,u32 lp_index,u32 apic_id)113 int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
114 {
115 struct hv_input_add_logical_processor *input;
116 struct hv_output_add_logical_processor *output;
117 u64 status;
118 unsigned long flags;
119 int ret = 0;
120
121 /*
122 * When adding a logical processor, the hypervisor may return
123 * HV_STATUS_INSUFFICIENT_MEMORY. When that happens, we deposit more
124 * pages and retry.
125 */
126 do {
127 local_irq_save(flags);
128
129 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
130 /* We don't do anything with the output right now */
131 output = *this_cpu_ptr(hyperv_pcpu_output_arg);
132
133 input->lp_index = lp_index;
134 input->apic_id = apic_id;
135 input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
136 status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR,
137 input, output);
138 local_irq_restore(flags);
139
140 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
141 if (!hv_result_success(status)) {
142 hv_status_err(status, "cpu %u apic ID: %u\n",
143 lp_index, apic_id);
144 ret = hv_result_to_errno(status);
145 }
146 break;
147 }
148 ret = hv_call_deposit_pages(node, hv_current_partition_id, 1);
149 } while (!ret);
150
151 return ret;
152 }
153
hv_call_create_vp(int node,u64 partition_id,u32 vp_index,u32 flags)154 int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
155 {
156 struct hv_create_vp *input;
157 u64 status;
158 unsigned long irq_flags;
159 int ret = 0;
160
161 /* Root VPs don't seem to need pages deposited */
162 if (partition_id != hv_current_partition_id) {
163 /* The value 90 is empirically determined. It may change. */
164 ret = hv_call_deposit_pages(node, partition_id, 90);
165 if (ret)
166 return ret;
167 }
168
169 do {
170 local_irq_save(irq_flags);
171
172 input = *this_cpu_ptr(hyperv_pcpu_input_arg);
173
174 input->partition_id = partition_id;
175 input->vp_index = vp_index;
176 input->flags = flags;
177 input->subnode_type = HV_SUBNODE_ANY;
178 input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
179 status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
180 local_irq_restore(irq_flags);
181
182 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
183 if (!hv_result_success(status)) {
184 hv_status_err(status, "vcpu: %u, lp: %u\n",
185 vp_index, flags);
186 ret = hv_result_to_errno(status);
187 }
188 break;
189 }
190 ret = hv_call_deposit_pages(node, partition_id, 1);
191
192 } while (!ret);
193
194 return ret;
195 }
196 EXPORT_SYMBOL_GPL(hv_call_create_vp);
197