1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/types.h> 3 #include <linux/vmalloc.h> 4 #include <linux/mm.h> 5 #include <linux/clockchips.h> 6 #include <linux/hyperv.h> 7 #include <linux/slab.h> 8 #include <linux/cpuhotplug.h> 9 #include <linux/minmax.h> 10 #include <asm/hypervisor.h> 11 #include <asm/mshyperv.h> 12 #include <asm/apic.h> 13 14 #include <asm/trace/hyperv.h> 15 16 /* 17 * See struct hv_deposit_memory. The first u64 is partition ID, the rest 18 * are GPAs. 19 */ 20 #define HV_DEPOSIT_MAX (HV_HYP_PAGE_SIZE / sizeof(u64) - 1) 21 22 /* Deposits exact number of pages. Must be called with interrupts enabled. */ 23 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) 24 { 25 struct page **pages, *page; 26 int *counts; 27 int num_allocations; 28 int i, j, page_count; 29 int order; 30 u64 status; 31 int ret; 32 u64 base_pfn; 33 struct hv_deposit_memory *input_page; 34 unsigned long flags; 35 36 if (num_pages > HV_DEPOSIT_MAX) 37 return -E2BIG; 38 if (!num_pages) 39 return 0; 40 41 /* One buffer for page pointers and counts */ 42 page = alloc_page(GFP_KERNEL); 43 if (!page) 44 return -ENOMEM; 45 pages = page_address(page); 46 47 counts = kcalloc(HV_DEPOSIT_MAX, sizeof(int), GFP_KERNEL); 48 if (!counts) { 49 free_page((unsigned long)pages); 50 return -ENOMEM; 51 } 52 53 /* Allocate all the pages before disabling interrupts */ 54 i = 0; 55 56 while (num_pages) { 57 /* Find highest order we can actually allocate */ 58 order = 31 - __builtin_clz(num_pages); 59 60 while (1) { 61 pages[i] = alloc_pages_node(node, GFP_KERNEL, order); 62 if (pages[i]) 63 break; 64 if (!order) { 65 ret = -ENOMEM; 66 num_allocations = i; 67 goto err_free_allocations; 68 } 69 --order; 70 } 71 72 split_page(pages[i], order); 73 counts[i] = 1 << order; 74 num_pages -= counts[i]; 75 i++; 76 } 77 num_allocations = i; 78 79 local_irq_save(flags); 80 81 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg); 82 83 input_page->partition_id = partition_id; 84 85 /* Populate gpa_page_list - these will fit on the input page */ 86 for (i = 0, page_count = 0; i < num_allocations; ++i) { 87 base_pfn = page_to_pfn(pages[i]); 88 for (j = 0; j < counts[i]; ++j, ++page_count) 89 input_page->gpa_page_list[page_count] = base_pfn + j; 90 } 91 status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY, 92 page_count, 0, input_page, NULL); 93 local_irq_restore(flags); 94 if (!hv_result_success(status)) { 95 pr_err("Failed to deposit pages: %lld\n", status); 96 ret = hv_result(status); 97 goto err_free_allocations; 98 } 99 100 ret = 0; 101 goto free_buf; 102 103 err_free_allocations: 104 for (i = 0; i < num_allocations; ++i) { 105 base_pfn = page_to_pfn(pages[i]); 106 for (j = 0; j < counts[i]; ++j) 107 __free_page(pfn_to_page(base_pfn + j)); 108 } 109 110 free_buf: 111 free_page((unsigned long)pages); 112 kfree(counts); 113 return ret; 114 } 115 116 int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id) 117 { 118 struct hv_input_add_logical_processor *input; 119 struct hv_output_add_logical_processor *output; 120 u64 status; 121 unsigned long flags; 122 int ret = HV_STATUS_SUCCESS; 123 124 /* 125 * When adding a logical processor, the hypervisor may return 126 * HV_STATUS_INSUFFICIENT_MEMORY. When that happens, we deposit more 127 * pages and retry. 128 */ 129 do { 130 local_irq_save(flags); 131 132 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 133 /* We don't do anything with the output right now */ 134 output = *this_cpu_ptr(hyperv_pcpu_output_arg); 135 136 input->lp_index = lp_index; 137 input->apic_id = apic_id; 138 input->proximity_domain_info = hv_numa_node_to_pxm_info(node); 139 status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR, 140 input, output); 141 local_irq_restore(flags); 142 143 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { 144 if (!hv_result_success(status)) { 145 pr_err("%s: cpu %u apic ID %u, %lld\n", __func__, 146 lp_index, apic_id, status); 147 ret = hv_result(status); 148 } 149 break; 150 } 151 ret = hv_call_deposit_pages(node, hv_current_partition_id, 1); 152 } while (!ret); 153 154 return ret; 155 } 156 157 int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags) 158 { 159 struct hv_create_vp *input; 160 u64 status; 161 unsigned long irq_flags; 162 int ret = HV_STATUS_SUCCESS; 163 164 /* Root VPs don't seem to need pages deposited */ 165 if (partition_id != hv_current_partition_id) { 166 /* The value 90 is empirically determined. It may change. */ 167 ret = hv_call_deposit_pages(node, partition_id, 90); 168 if (ret) 169 return ret; 170 } 171 172 do { 173 local_irq_save(irq_flags); 174 175 input = *this_cpu_ptr(hyperv_pcpu_input_arg); 176 177 input->partition_id = partition_id; 178 input->vp_index = vp_index; 179 input->flags = flags; 180 input->subnode_type = HvSubnodeAny; 181 input->proximity_domain_info = hv_numa_node_to_pxm_info(node); 182 status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL); 183 local_irq_restore(irq_flags); 184 185 if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) { 186 if (!hv_result_success(status)) { 187 pr_err("%s: vcpu %u, lp %u, %lld\n", __func__, 188 vp_index, flags, status); 189 ret = hv_result(status); 190 } 191 break; 192 } 193 ret = hv_call_deposit_pages(node, partition_id, 1); 194 195 } while (!ret); 196 197 return ret; 198 } 199 200