xref: /linux/arch/riscv/kvm/vm.c (revision 7263b4fdb0b240e67e3ebd802e0df761d35a7fdf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_mmu.h>
15 
16 const struct kvm_stats_desc kvm_vm_stats_desc[] = {
17 	KVM_GENERIC_VM_STATS()
18 };
19 static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
20 		sizeof(struct kvm_vm_stat) / sizeof(u64));
21 
22 const struct kvm_stats_header kvm_vm_stats_header = {
23 	.name_size = KVM_STATS_NAME_SIZE,
24 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
25 	.id_offset =  sizeof(struct kvm_stats_header),
26 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
27 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
28 		       sizeof(kvm_vm_stats_desc),
29 };
30 
31 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
32 {
33 	int r;
34 
35 	r = kvm_riscv_mmu_alloc_pgd(kvm);
36 	if (r)
37 		return r;
38 
39 	r = kvm_riscv_gstage_vmid_init(kvm);
40 	if (r) {
41 		kvm_riscv_mmu_free_pgd(kvm);
42 		return r;
43 	}
44 
45 	kvm_riscv_aia_init_vm(kvm);
46 
47 	kvm_riscv_guest_timer_init(kvm);
48 
49 	return 0;
50 }
51 
52 void kvm_arch_destroy_vm(struct kvm *kvm)
53 {
54 	kvm_destroy_vcpus(kvm);
55 
56 	kvm_riscv_aia_destroy_vm(kvm);
57 }
58 
59 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irql,
60 			  bool line_status)
61 {
62 	if (!irqchip_in_kernel(kvm))
63 		return -ENXIO;
64 
65 	return kvm_riscv_aia_inject_irq(kvm, irql->irq, irql->level);
66 }
67 
68 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
69 		struct kvm *kvm, int irq_source_id,
70 		int level, bool line_status)
71 {
72 	struct kvm_msi msi;
73 
74 	if (!level)
75 		return -1;
76 
77 	msi.address_lo = e->msi.address_lo;
78 	msi.address_hi = e->msi.address_hi;
79 	msi.data = e->msi.data;
80 	msi.flags = e->msi.flags;
81 	msi.devid = e->msi.devid;
82 
83 	return kvm_riscv_aia_inject_msi(kvm, &msi);
84 }
85 
86 static int kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry *e,
87 			     struct kvm *kvm, int irq_source_id,
88 			     int level, bool line_status)
89 {
90 	return kvm_riscv_aia_inject_irq(kvm, e->irqchip.pin, level);
91 }
92 
93 int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines)
94 {
95 	struct kvm_irq_routing_entry *ents;
96 	int i, rc;
97 
98 	ents = kzalloc_objs(*ents, lines);
99 	if (!ents)
100 		return -ENOMEM;
101 
102 	for (i = 0; i < lines; i++) {
103 		ents[i].gsi = i;
104 		ents[i].type = KVM_IRQ_ROUTING_IRQCHIP;
105 		ents[i].u.irqchip.irqchip = 0;
106 		ents[i].u.irqchip.pin = i;
107 	}
108 	rc = kvm_set_irq_routing(kvm, ents, lines, 0);
109 	kfree(ents);
110 
111 	return rc;
112 }
113 
114 bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
115 {
116 	return irqchip_in_kernel(kvm);
117 }
118 
119 int kvm_set_routing_entry(struct kvm *kvm,
120 			  struct kvm_kernel_irq_routing_entry *e,
121 			  const struct kvm_irq_routing_entry *ue)
122 {
123 	int r = -EINVAL;
124 
125 	switch (ue->type) {
126 	case KVM_IRQ_ROUTING_IRQCHIP:
127 		e->set = kvm_riscv_set_irq;
128 		e->irqchip.irqchip = ue->u.irqchip.irqchip;
129 		e->irqchip.pin = ue->u.irqchip.pin;
130 		if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) ||
131 		    (e->irqchip.irqchip >= KVM_NR_IRQCHIPS))
132 			goto out;
133 		break;
134 	case KVM_IRQ_ROUTING_MSI:
135 		e->set = kvm_set_msi;
136 		e->msi.address_lo = ue->u.msi.address_lo;
137 		e->msi.address_hi = ue->u.msi.address_hi;
138 		e->msi.data = ue->u.msi.data;
139 		e->msi.flags = ue->flags;
140 		e->msi.devid = ue->u.msi.devid;
141 		break;
142 	default:
143 		goto out;
144 	}
145 	r = 0;
146 out:
147 	return r;
148 }
149 
150 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
151 			      struct kvm *kvm, int irq_source_id, int level,
152 			      bool line_status)
153 {
154 	if (!level)
155 		return -EWOULDBLOCK;
156 
157 	switch (e->type) {
158 	case KVM_IRQ_ROUTING_MSI:
159 		return kvm_set_msi(e, kvm, irq_source_id, level, line_status);
160 
161 	case KVM_IRQ_ROUTING_IRQCHIP:
162 		return kvm_riscv_set_irq(e, kvm, irq_source_id,
163 					 level, line_status);
164 	}
165 
166 	return -EWOULDBLOCK;
167 }
168 
169 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
170 {
171 	return irqchip_in_kernel(kvm);
172 }
173 
174 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
175 {
176 	int r;
177 
178 	switch (ext) {
179 	case KVM_CAP_IRQCHIP:
180 		r = kvm_riscv_aia_available();
181 		break;
182 	case KVM_CAP_IOEVENTFD:
183 	case KVM_CAP_USER_MEMORY:
184 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
185 	case KVM_CAP_ONE_REG:
186 	case KVM_CAP_READONLY_MEM:
187 	case KVM_CAP_MP_STATE:
188 	case KVM_CAP_IMMEDIATE_EXIT:
189 	case KVM_CAP_SET_GUEST_DEBUG:
190 		r = 1;
191 		break;
192 	case KVM_CAP_NR_VCPUS:
193 		r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
194 		break;
195 	case KVM_CAP_MAX_VCPUS:
196 		r = KVM_MAX_VCPUS;
197 		break;
198 	case KVM_CAP_NR_MEMSLOTS:
199 		r = KVM_USER_MEM_SLOTS;
200 		break;
201 	case KVM_CAP_VM_GPA_BITS:
202 		if (!kvm)
203 			r = kvm_riscv_gstage_gpa_bits(kvm_riscv_gstage_max_pgd_levels);
204 		else
205 			r = kvm_riscv_gstage_gpa_bits(kvm->arch.pgd_levels);
206 		break;
207 	default:
208 		r = 0;
209 		break;
210 	}
211 
212 	return r;
213 }
214 
215 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
216 {
217 	if (cap->flags)
218 		return -EINVAL;
219 
220 	switch (cap->cap) {
221 	case KVM_CAP_RISCV_MP_STATE_RESET:
222 		kvm->arch.mp_state_reset = true;
223 		return 0;
224 	case KVM_CAP_VM_GPA_BITS: {
225 		unsigned long gpa_bits = cap->args[0];
226 		unsigned long new_levels;
227 		int r = 0;
228 
229 		/* Decide target pgd levels from requested gpa_bits */
230 #ifdef CONFIG_64BIT
231 		if (gpa_bits <= 41)
232 			new_levels = 3;        /* Sv39x4 */
233 		else if (gpa_bits <= 50)
234 			new_levels = 4;        /* Sv48x4 */
235 		else if (gpa_bits <= 59)
236 			new_levels = 5;        /* Sv57x4 */
237 		else
238 			return -EINVAL;
239 #else
240 		/* 32-bit: only Sv32x4*/
241 		if (gpa_bits <= 34)
242 			new_levels = 2;
243 		else
244 			return -EINVAL;
245 #endif
246 		if (new_levels > kvm_riscv_gstage_max_pgd_levels)
247 			return -EINVAL;
248 
249 		/* Follow KVM's lock ordering: kvm->lock -> kvm->slots_lock. */
250 		mutex_lock(&kvm->lock);
251 		mutex_lock(&kvm->slots_lock);
252 
253 		if (kvm->created_vcpus || !kvm_are_all_memslots_empty(kvm))
254 			r = -EBUSY;
255 		else
256 			kvm->arch.pgd_levels = new_levels;
257 
258 		mutex_unlock(&kvm->slots_lock);
259 		mutex_unlock(&kvm->lock);
260 
261 		return r;
262 	}
263 	default:
264 		return -EINVAL;
265 	}
266 }
267 
268 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
269 {
270 	return -EINVAL;
271 }
272