xref: /linux/drivers/hv/hv_proc.c (revision 4f9786035f9e519db41375818e1d0b5f20da2f10)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/types.h>
3 #include <linux/vmalloc.h>
4 #include <linux/mm.h>
5 #include <linux/clockchips.h>
6 #include <linux/slab.h>
7 #include <linux/cpuhotplug.h>
8 #include <linux/minmax.h>
9 #include <asm/mshyperv.h>
10 
11 /*
12  * See struct hv_deposit_memory. The first u64 is partition ID, the rest
13  * are GPAs.
14  */
15 #define HV_DEPOSIT_MAX (HV_HYP_PAGE_SIZE / sizeof(u64) - 1)
16 
17 /* Deposits exact number of pages. Must be called with interrupts enabled.  */
18 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
19 {
20 	struct page **pages, *page;
21 	int *counts;
22 	int num_allocations;
23 	int i, j, page_count;
24 	int order;
25 	u64 status;
26 	int ret;
27 	u64 base_pfn;
28 	struct hv_deposit_memory *input_page;
29 	unsigned long flags;
30 
31 	if (num_pages > HV_DEPOSIT_MAX)
32 		return -E2BIG;
33 	if (!num_pages)
34 		return 0;
35 
36 	/* One buffer for page pointers and counts */
37 	page = alloc_page(GFP_KERNEL);
38 	if (!page)
39 		return -ENOMEM;
40 	pages = page_address(page);
41 
42 	counts = kcalloc(HV_DEPOSIT_MAX, sizeof(int), GFP_KERNEL);
43 	if (!counts) {
44 		free_page((unsigned long)pages);
45 		return -ENOMEM;
46 	}
47 
48 	/* Allocate all the pages before disabling interrupts */
49 	i = 0;
50 
51 	while (num_pages) {
52 		/* Find highest order we can actually allocate */
53 		order = 31 - __builtin_clz(num_pages);
54 
55 		while (1) {
56 			pages[i] = alloc_pages_node(node, GFP_KERNEL, order);
57 			if (pages[i])
58 				break;
59 			if (!order) {
60 				ret = -ENOMEM;
61 				num_allocations = i;
62 				goto err_free_allocations;
63 			}
64 			--order;
65 		}
66 
67 		split_page(pages[i], order);
68 		counts[i] = 1 << order;
69 		num_pages -= counts[i];
70 		i++;
71 	}
72 	num_allocations = i;
73 
74 	local_irq_save(flags);
75 
76 	input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
77 
78 	input_page->partition_id = partition_id;
79 
80 	/* Populate gpa_page_list - these will fit on the input page */
81 	for (i = 0, page_count = 0; i < num_allocations; ++i) {
82 		base_pfn = page_to_pfn(pages[i]);
83 		for (j = 0; j < counts[i]; ++j, ++page_count)
84 			input_page->gpa_page_list[page_count] = base_pfn + j;
85 	}
86 	status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY,
87 				     page_count, 0, input_page, NULL);
88 	local_irq_restore(flags);
89 	if (!hv_result_success(status)) {
90 		hv_status_err(status, "\n");
91 		ret = hv_result_to_errno(status);
92 		goto err_free_allocations;
93 	}
94 
95 	ret = 0;
96 	goto free_buf;
97 
98 err_free_allocations:
99 	for (i = 0; i < num_allocations; ++i) {
100 		base_pfn = page_to_pfn(pages[i]);
101 		for (j = 0; j < counts[i]; ++j)
102 			__free_page(pfn_to_page(base_pfn + j));
103 	}
104 
105 free_buf:
106 	free_page((unsigned long)pages);
107 	kfree(counts);
108 	return ret;
109 }
110 EXPORT_SYMBOL_GPL(hv_call_deposit_pages);
111 
112 int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
113 {
114 	struct hv_input_add_logical_processor *input;
115 	struct hv_output_add_logical_processor *output;
116 	u64 status;
117 	unsigned long flags;
118 	int ret = 0;
119 
120 	/*
121 	 * When adding a logical processor, the hypervisor may return
122 	 * HV_STATUS_INSUFFICIENT_MEMORY. When that happens, we deposit more
123 	 * pages and retry.
124 	 */
125 	do {
126 		local_irq_save(flags);
127 
128 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
129 		/* We don't do anything with the output right now */
130 		output = *this_cpu_ptr(hyperv_pcpu_output_arg);
131 
132 		input->lp_index = lp_index;
133 		input->apic_id = apic_id;
134 		input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
135 		status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR,
136 					 input, output);
137 		local_irq_restore(flags);
138 
139 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
140 			if (!hv_result_success(status)) {
141 				hv_status_err(status, "cpu %u apic ID: %u\n",
142 					      lp_index, apic_id);
143 				ret = hv_result_to_errno(status);
144 			}
145 			break;
146 		}
147 		ret = hv_call_deposit_pages(node, hv_current_partition_id, 1);
148 	} while (!ret);
149 
150 	return ret;
151 }
152 
153 int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
154 {
155 	struct hv_create_vp *input;
156 	u64 status;
157 	unsigned long irq_flags;
158 	int ret = 0;
159 
160 	/* Root VPs don't seem to need pages deposited */
161 	if (partition_id != hv_current_partition_id) {
162 		/* The value 90 is empirically determined. It may change. */
163 		ret = hv_call_deposit_pages(node, partition_id, 90);
164 		if (ret)
165 			return ret;
166 	}
167 
168 	do {
169 		local_irq_save(irq_flags);
170 
171 		input = *this_cpu_ptr(hyperv_pcpu_input_arg);
172 
173 		input->partition_id = partition_id;
174 		input->vp_index = vp_index;
175 		input->flags = flags;
176 		input->subnode_type = HV_SUBNODE_ANY;
177 		input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
178 		status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
179 		local_irq_restore(irq_flags);
180 
181 		if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
182 			if (!hv_result_success(status)) {
183 				hv_status_err(status, "vcpu: %u, lp: %u\n",
184 					      vp_index, flags);
185 				ret = hv_result_to_errno(status);
186 			}
187 			break;
188 		}
189 		ret = hv_call_deposit_pages(node, partition_id, 1);
190 
191 	} while (!ret);
192 
193 	return ret;
194 }
195 EXPORT_SYMBOL_GPL(hv_call_create_vp);
196