1 /* 2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/cpu.h> 10 #include <linux/kvm_host.h> 11 #include <linux/preempt.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/init.h> 16 #include <linux/memblock.h> 17 #include <linux/sizes.h> 18 #include <linux/cma.h> 19 #include <linux/bitops.h> 20 21 #include <asm/cputable.h> 22 #include <asm/kvm_ppc.h> 23 #include <asm/kvm_book3s.h> 24 25 #define KVM_CMA_CHUNK_ORDER 18 26 27 /* 28 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 29 * should be power of 2. 30 */ 31 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 32 /* 33 * By default we reserve 5% of memory for hash pagetable allocation. 34 */ 35 static unsigned long kvm_cma_resv_ratio = 5; 36 37 static struct cma *kvm_cma; 38 39 static int __init early_parse_kvm_cma_resv(char *p) 40 { 41 pr_debug("%s(%s)\n", __func__, p); 42 if (!p) 43 return -EINVAL; 44 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 45 } 46 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 47 48 struct page *kvm_alloc_hpt(unsigned long nr_pages) 49 { 50 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 51 52 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); 53 } 54 EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 55 56 void kvm_release_hpt(struct page *page, unsigned long nr_pages) 57 { 58 cma_release(kvm_cma, page, nr_pages); 59 } 60 EXPORT_SYMBOL_GPL(kvm_release_hpt); 61 62 /** 63 * kvm_cma_reserve() - reserve area for kvm hash pagetable 64 * 65 * This function reserves memory from early allocator. It should be 66 * called by arch specific code once the memblock allocator 67 * has been activated and all other subsystems have already allocated/reserved 68 * memory. 69 */ 70 void __init kvm_cma_reserve(void) 71 { 72 unsigned long align_size; 73 struct memblock_region *reg; 74 phys_addr_t selected_size = 0; 75 76 /* 77 * We need CMA reservation only when we are in HV mode 78 */ 79 if (!cpu_has_feature(CPU_FTR_HVMODE)) 80 return; 81 /* 82 * We cannot use memblock_phys_mem_size() here, because 83 * memblock_analyze() has not been called yet. 84 */ 85 for_each_memblock(memory, reg) 86 selected_size += memblock_region_memory_end_pfn(reg) - 87 memblock_region_memory_base_pfn(reg); 88 89 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 90 if (selected_size) { 91 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 92 (unsigned long)selected_size / SZ_1M); 93 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 94 cma_declare_contiguous(0, selected_size, 0, align_size, 95 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 96 } 97 } 98 99 /* 100 * Real-mode H_CONFER implementation. 101 * We check if we are the only vcpu out of this virtual core 102 * still running in the guest and not ceded. If so, we pop up 103 * to the virtual-mode implementation; if not, just return to 104 * the guest. 105 */ 106 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 107 unsigned int yield_count) 108 { 109 struct kvmppc_vcore *vc = vcpu->arch.vcore; 110 int threads_running; 111 int threads_ceded; 112 int threads_conferring; 113 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 114 int rv = H_SUCCESS; /* => don't yield */ 115 116 set_bit(vcpu->arch.ptid, &vc->conferring_threads); 117 while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) { 118 threads_running = VCORE_ENTRY_COUNT(vc); 119 threads_ceded = hweight32(vc->napping_threads); 120 threads_conferring = hweight32(vc->conferring_threads); 121 if (threads_ceded + threads_conferring >= threads_running) { 122 rv = H_TOO_HARD; /* => do yield */ 123 break; 124 } 125 } 126 clear_bit(vcpu->arch.ptid, &vc->conferring_threads); 127 return rv; 128 } 129 130 /* 131 * When running HV mode KVM we need to block certain operations while KVM VMs 132 * exist in the system. We use a counter of VMs to track this. 133 * 134 * One of the operations we need to block is onlining of secondaries, so we 135 * protect hv_vm_count with get/put_online_cpus(). 136 */ 137 static atomic_t hv_vm_count; 138 139 void kvm_hv_vm_activated(void) 140 { 141 get_online_cpus(); 142 atomic_inc(&hv_vm_count); 143 put_online_cpus(); 144 } 145 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 146 147 void kvm_hv_vm_deactivated(void) 148 { 149 get_online_cpus(); 150 atomic_dec(&hv_vm_count); 151 put_online_cpus(); 152 } 153 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 154 155 bool kvm_hv_mode_active(void) 156 { 157 return atomic_read(&hv_vm_count) != 0; 158 } 159 160 extern int hcall_real_table[], hcall_real_table_end[]; 161 162 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 163 { 164 cmd /= 4; 165 if (cmd < hcall_real_table_end - hcall_real_table && 166 hcall_real_table[cmd]) 167 return 1; 168 169 return 0; 170 } 171 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 172