xref: /linux/arch/riscv/kvm/vm.c (revision 63eb28bb1402891b1ad2be02a530f29a9dd7f1cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_mmu.h>
15 
16 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
17 	KVM_GENERIC_VM_STATS()
18 };
19 static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
20 		sizeof(struct kvm_vm_stat) / sizeof(u64));
21 
22 const struct kvm_stats_header kvm_vm_stats_header = {
23 	.name_size = KVM_STATS_NAME_SIZE,
24 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
25 	.id_offset =  sizeof(struct kvm_stats_header),
26 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
27 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
28 		       sizeof(kvm_vm_stats_desc),
29 };
30 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)31 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
32 {
33 	int r;
34 
35 	r = kvm_riscv_mmu_alloc_pgd(kvm);
36 	if (r)
37 		return r;
38 
39 	r = kvm_riscv_gstage_vmid_init(kvm);
40 	if (r) {
41 		kvm_riscv_mmu_free_pgd(kvm);
42 		return r;
43 	}
44 
45 	kvm_riscv_aia_init_vm(kvm);
46 
47 	kvm_riscv_guest_timer_init(kvm);
48 
49 	return 0;
50 }
51 
kvm_arch_destroy_vm(struct kvm * kvm)52 void kvm_arch_destroy_vm(struct kvm *kvm)
53 {
54 	kvm_destroy_vcpus(kvm);
55 
56 	kvm_riscv_aia_destroy_vm(kvm);
57 }
58 
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irql,bool line_status)59 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irql,
60 			  bool line_status)
61 {
62 	if (!irqchip_in_kernel(kvm))
63 		return -ENXIO;
64 
65 	return kvm_riscv_aia_inject_irq(kvm, irql->irq, irql->level);
66 }
67 
kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)68 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
69 		struct kvm *kvm, int irq_source_id,
70 		int level, bool line_status)
71 {
72 	struct kvm_msi msi;
73 
74 	if (!level)
75 		return -1;
76 
77 	msi.address_lo = e->msi.address_lo;
78 	msi.address_hi = e->msi.address_hi;
79 	msi.data = e->msi.data;
80 	msi.flags = e->msi.flags;
81 	msi.devid = e->msi.devid;
82 
83 	return kvm_riscv_aia_inject_msi(kvm, &msi);
84 }
85 
kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)86 static int kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry *e,
87 			     struct kvm *kvm, int irq_source_id,
88 			     int level, bool line_status)
89 {
90 	return kvm_riscv_aia_inject_irq(kvm, e->irqchip.pin, level);
91 }
92 
kvm_riscv_setup_default_irq_routing(struct kvm * kvm,u32 lines)93 int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines)
94 {
95 	struct kvm_irq_routing_entry *ents;
96 	int i, rc;
97 
98 	ents = kcalloc(lines, sizeof(*ents), GFP_KERNEL);
99 	if (!ents)
100 		return -ENOMEM;
101 
102 	for (i = 0; i < lines; i++) {
103 		ents[i].gsi = i;
104 		ents[i].type = KVM_IRQ_ROUTING_IRQCHIP;
105 		ents[i].u.irqchip.irqchip = 0;
106 		ents[i].u.irqchip.pin = i;
107 	}
108 	rc = kvm_set_irq_routing(kvm, ents, lines, 0);
109 	kfree(ents);
110 
111 	return rc;
112 }
113 
kvm_arch_can_set_irq_routing(struct kvm * kvm)114 bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
115 {
116 	return irqchip_in_kernel(kvm);
117 }
118 
kvm_set_routing_entry(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue)119 int kvm_set_routing_entry(struct kvm *kvm,
120 			  struct kvm_kernel_irq_routing_entry *e,
121 			  const struct kvm_irq_routing_entry *ue)
122 {
123 	int r = -EINVAL;
124 
125 	switch (ue->type) {
126 	case KVM_IRQ_ROUTING_IRQCHIP:
127 		e->set = kvm_riscv_set_irq;
128 		e->irqchip.irqchip = ue->u.irqchip.irqchip;
129 		e->irqchip.pin = ue->u.irqchip.pin;
130 		if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) ||
131 		    (e->irqchip.irqchip >= KVM_NR_IRQCHIPS))
132 			goto out;
133 		break;
134 	case KVM_IRQ_ROUTING_MSI:
135 		e->set = kvm_set_msi;
136 		e->msi.address_lo = ue->u.msi.address_lo;
137 		e->msi.address_hi = ue->u.msi.address_hi;
138 		e->msi.data = ue->u.msi.data;
139 		e->msi.flags = ue->flags;
140 		e->msi.devid = ue->u.msi.devid;
141 		break;
142 	default:
143 		goto out;
144 	}
145 	r = 0;
146 out:
147 	return r;
148 }
149 
kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)150 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
151 			      struct kvm *kvm, int irq_source_id, int level,
152 			      bool line_status)
153 {
154 	if (!level)
155 		return -EWOULDBLOCK;
156 
157 	switch (e->type) {
158 	case KVM_IRQ_ROUTING_MSI:
159 		return kvm_set_msi(e, kvm, irq_source_id, level, line_status);
160 
161 	case KVM_IRQ_ROUTING_IRQCHIP:
162 		return kvm_riscv_set_irq(e, kvm, irq_source_id,
163 					 level, line_status);
164 	}
165 
166 	return -EWOULDBLOCK;
167 }
168 
kvm_arch_irqchip_in_kernel(struct kvm * kvm)169 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
170 {
171 	return irqchip_in_kernel(kvm);
172 }
173 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)174 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
175 {
176 	int r;
177 
178 	switch (ext) {
179 	case KVM_CAP_IRQCHIP:
180 		r = kvm_riscv_aia_available();
181 		break;
182 	case KVM_CAP_IOEVENTFD:
183 	case KVM_CAP_USER_MEMORY:
184 	case KVM_CAP_SYNC_MMU:
185 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
186 	case KVM_CAP_ONE_REG:
187 	case KVM_CAP_READONLY_MEM:
188 	case KVM_CAP_MP_STATE:
189 	case KVM_CAP_IMMEDIATE_EXIT:
190 	case KVM_CAP_SET_GUEST_DEBUG:
191 		r = 1;
192 		break;
193 	case KVM_CAP_NR_VCPUS:
194 		r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
195 		break;
196 	case KVM_CAP_MAX_VCPUS:
197 		r = KVM_MAX_VCPUS;
198 		break;
199 	case KVM_CAP_NR_MEMSLOTS:
200 		r = KVM_USER_MEM_SLOTS;
201 		break;
202 	case KVM_CAP_VM_GPA_BITS:
203 		r = kvm_riscv_gstage_gpa_bits;
204 		break;
205 	default:
206 		r = 0;
207 		break;
208 	}
209 
210 	return r;
211 }
212 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)213 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
214 {
215 	switch (cap->cap) {
216 	case KVM_CAP_RISCV_MP_STATE_RESET:
217 		if (cap->flags)
218 			return -EINVAL;
219 		kvm->arch.mp_state_reset = true;
220 		return 0;
221 	default:
222 		return -EINVAL;
223 	}
224 }
225 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)226 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
227 {
228 	return -EINVAL;
229 }
230