xref: /linux/arch/riscv/kvm/vmid.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/cpumask.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
14 #include <linux/smp.h>
15 #include <linux/kvm_host.h>
16 #include <asm/csr.h>
17 #include <asm/kvm_mmu.h>
18 #include <asm/kvm_tlb.h>
19 #include <asm/kvm_vmid.h>
20 
21 static unsigned long vmid_version = 1;
22 static unsigned long vmid_next;
23 static unsigned long vmid_bits __ro_after_init;
24 static DEFINE_SPINLOCK(vmid_lock);
25 
26 void __init kvm_riscv_gstage_vmid_detect(void)
27 {
28 	/* Figure-out number of VMID bits in HW */
29 	csr_write(CSR_HGATP, (kvm_riscv_gstage_mode(kvm_riscv_gstage_max_pgd_levels) <<
30 			      HGATP_MODE_SHIFT) | HGATP_VMID);
31 	vmid_bits = csr_read(CSR_HGATP);
32 	vmid_bits = (vmid_bits & HGATP_VMID) >> HGATP_VMID_SHIFT;
33 	vmid_bits = fls_long(vmid_bits);
34 	csr_write(CSR_HGATP, 0);
35 
36 	/* We polluted local TLB so flush all guest TLB */
37 	kvm_riscv_local_hfence_gvma_all();
38 
39 	/* We don't use VMID bits if they are not sufficient */
40 	if ((1UL << vmid_bits) < num_possible_cpus())
41 		vmid_bits = 0;
42 }
43 
44 unsigned long kvm_riscv_gstage_vmid_bits(void)
45 {
46 	return vmid_bits;
47 }
48 
49 int kvm_riscv_gstage_vmid_init(struct kvm *kvm)
50 {
51 	/* Mark the initial VMID and VMID version invalid */
52 	kvm->arch.vmid.vmid_version = 0;
53 	kvm->arch.vmid.vmid = 0;
54 
55 	return 0;
56 }
57 
58 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid)
59 {
60 	if (!vmid_bits)
61 		return false;
62 
63 	return unlikely(READ_ONCE(vmid->vmid_version) !=
64 			READ_ONCE(vmid_version));
65 }
66 
67 static void __local_hfence_gvma_all(void *info)
68 {
69 	kvm_riscv_local_hfence_gvma_all();
70 }
71 
72 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
73 {
74 	unsigned long i;
75 	struct kvm_vcpu *v;
76 	struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid;
77 
78 	if (!kvm_riscv_gstage_vmid_ver_changed(vmid))
79 		return;
80 
81 	spin_lock(&vmid_lock);
82 
83 	/*
84 	 * We need to re-check the vmid_version here to ensure that if
85 	 * another vcpu already allocated a valid vmid for this vm.
86 	 */
87 	if (!kvm_riscv_gstage_vmid_ver_changed(vmid)) {
88 		spin_unlock(&vmid_lock);
89 		return;
90 	}
91 
92 	/* First user of a new VMID version? */
93 	if (unlikely(vmid_next == 0)) {
94 		WRITE_ONCE(vmid_version, READ_ONCE(vmid_version) + 1);
95 		vmid_next = 1;
96 
97 		/*
98 		 * We ran out of VMIDs so we increment vmid_version and
99 		 * start assigning VMIDs from 1.
100 		 *
101 		 * This also means existing VMIDs assignment to all Guest
102 		 * instances is invalid and we have force VMID re-assignement
103 		 * for all Guest instances. The Guest instances that were not
104 		 * running will automatically pick-up new VMIDs because will
105 		 * call kvm_riscv_gstage_vmid_update() whenever they enter
106 		 * in-kernel run loop. For Guest instances that are already
107 		 * running, we force VM exits on all host CPUs using IPI and
108 		 * flush all Guest TLBs.
109 		 */
110 		on_each_cpu_mask(cpu_online_mask, __local_hfence_gvma_all,
111 				 NULL, 1);
112 	}
113 
114 	vmid->vmid = vmid_next;
115 	vmid_next++;
116 	vmid_next &= (1 << vmid_bits) - 1;
117 
118 	WRITE_ONCE(vmid->vmid_version, READ_ONCE(vmid_version));
119 
120 	spin_unlock(&vmid_lock);
121 
122 	/* Request G-stage page table update for all VCPUs */
123 	kvm_for_each_vcpu(i, v, vcpu->kvm)
124 		kvm_make_request(KVM_REQ_UPDATE_HGATP, v);
125 }
126