xref: /linux/arch/riscv/kvm/vmid.c (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/cpumask.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
14 #include <linux/smp.h>
15 #include <linux/kvm_host.h>
16 #include <asm/csr.h>
17 #include <asm/kvm_tlb.h>
18 #include <asm/kvm_vmid.h>
19 
20 static unsigned long vmid_version = 1;
21 static unsigned long vmid_next;
22 static unsigned long vmid_bits __ro_after_init;
23 static DEFINE_SPINLOCK(vmid_lock);
24 
25 void __init kvm_riscv_gstage_vmid_detect(void)
26 {
27 	unsigned long old;
28 
29 	/* Figure-out number of VMID bits in HW */
30 	old = csr_read(CSR_HGATP);
31 	csr_write(CSR_HGATP, old | HGATP_VMID);
32 	vmid_bits = csr_read(CSR_HGATP);
33 	vmid_bits = (vmid_bits & HGATP_VMID) >> HGATP_VMID_SHIFT;
34 	vmid_bits = fls_long(vmid_bits);
35 	csr_write(CSR_HGATP, old);
36 
37 	/* We polluted local TLB so flush all guest TLB */
38 	kvm_riscv_local_hfence_gvma_all();
39 
40 	/* We don't use VMID bits if they are not sufficient */
41 	if ((1UL << vmid_bits) < num_possible_cpus())
42 		vmid_bits = 0;
43 }
44 
45 unsigned long kvm_riscv_gstage_vmid_bits(void)
46 {
47 	return vmid_bits;
48 }
49 
50 int kvm_riscv_gstage_vmid_init(struct kvm *kvm)
51 {
52 	/* Mark the initial VMID and VMID version invalid */
53 	kvm->arch.vmid.vmid_version = 0;
54 	kvm->arch.vmid.vmid = 0;
55 
56 	return 0;
57 }
58 
59 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid)
60 {
61 	if (!vmid_bits)
62 		return false;
63 
64 	return unlikely(READ_ONCE(vmid->vmid_version) !=
65 			READ_ONCE(vmid_version));
66 }
67 
68 static void __local_hfence_gvma_all(void *info)
69 {
70 	kvm_riscv_local_hfence_gvma_all();
71 }
72 
73 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
74 {
75 	unsigned long i;
76 	struct kvm_vcpu *v;
77 	struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid;
78 
79 	if (!kvm_riscv_gstage_vmid_ver_changed(vmid))
80 		return;
81 
82 	spin_lock(&vmid_lock);
83 
84 	/*
85 	 * We need to re-check the vmid_version here to ensure that if
86 	 * another vcpu already allocated a valid vmid for this vm.
87 	 */
88 	if (!kvm_riscv_gstage_vmid_ver_changed(vmid)) {
89 		spin_unlock(&vmid_lock);
90 		return;
91 	}
92 
93 	/* First user of a new VMID version? */
94 	if (unlikely(vmid_next == 0)) {
95 		WRITE_ONCE(vmid_version, READ_ONCE(vmid_version) + 1);
96 		vmid_next = 1;
97 
98 		/*
99 		 * We ran out of VMIDs so we increment vmid_version and
100 		 * start assigning VMIDs from 1.
101 		 *
102 		 * This also means existing VMIDs assignment to all Guest
103 		 * instances is invalid and we have force VMID re-assignement
104 		 * for all Guest instances. The Guest instances that were not
105 		 * running will automatically pick-up new VMIDs because will
106 		 * call kvm_riscv_gstage_vmid_update() whenever they enter
107 		 * in-kernel run loop. For Guest instances that are already
108 		 * running, we force VM exits on all host CPUs using IPI and
109 		 * flush all Guest TLBs.
110 		 */
111 		on_each_cpu_mask(cpu_online_mask, __local_hfence_gvma_all,
112 				 NULL, 1);
113 	}
114 
115 	vmid->vmid = vmid_next;
116 	vmid_next++;
117 	vmid_next &= (1 << vmid_bits) - 1;
118 
119 	WRITE_ONCE(vmid->vmid_version, READ_ONCE(vmid_version));
120 
121 	spin_unlock(&vmid_lock);
122 
123 	/* Request G-stage page table update for all VCPUs */
124 	kvm_for_each_vcpu(i, v, vcpu->kvm)
125 		kvm_make_request(KVM_REQ_UPDATE_HGATP, v);
126 }
127 
128 void kvm_riscv_gstage_vmid_sanitize(struct kvm_vcpu *vcpu)
129 {
130 	unsigned long vmid;
131 
132 	if (!kvm_riscv_gstage_vmid_bits() ||
133 	    vcpu->arch.last_exit_cpu == vcpu->cpu)
134 		return;
135 
136 	/*
137 	 * On RISC-V platforms with hardware VMID support, we share same
138 	 * VMID for all VCPUs of a particular Guest/VM. This means we might
139 	 * have stale G-stage TLB entries on the current Host CPU due to
140 	 * some other VCPU of the same Guest which ran previously on the
141 	 * current Host CPU.
142 	 *
143 	 * To cleanup stale TLB entries, we simply flush all G-stage TLB
144 	 * entries by VMID whenever underlying Host CPU changes for a VCPU.
145 	 */
146 
147 	vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
148 	kvm_riscv_local_hfence_gvma_vmid_all(vmid);
149 }
150