xref: /linux/arch/loongarch/kvm/vm.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_mmu.h>
8 #include <asm/kvm_vcpu.h>
9 #include <asm/kvm_csr.h>
10 #include <asm/kvm_eiointc.h>
11 #include <asm/kvm_pch_pic.h>
12 
13 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
14 	KVM_GENERIC_VM_STATS(),
15 	STATS_DESC_ICOUNTER(VM, pages),
16 	STATS_DESC_ICOUNTER(VM, hugepages),
17 };
18 
19 const struct kvm_stats_header kvm_vm_stats_header = {
20 	.name_size = KVM_STATS_NAME_SIZE,
21 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
22 	.id_offset =  sizeof(struct kvm_stats_header),
23 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
24 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
25 					sizeof(kvm_vm_stats_desc),
26 };
27 
28 static void kvm_vm_init_features(struct kvm *kvm)
29 {
30 	unsigned long val;
31 
32 	val = read_csr_gcfg();
33 	if (val & CSR_GCFG_GPMP)
34 		kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU);
35 
36 	/* Enable all PV features by default */
37 	kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
38 	kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
39 	if (kvm_pvtime_supported()) {
40 		kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
41 		kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME);
42 	}
43 }
44 
45 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
46 {
47 	int i;
48 
49 	/* Allocate page table to map GPA -> RPA */
50 	kvm->arch.pgd = kvm_pgd_alloc();
51 	if (!kvm->arch.pgd)
52 		return -ENOMEM;
53 
54 	kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), GFP_KERNEL_ACCOUNT);
55 	if (!kvm->arch.phyid_map) {
56 		free_page((unsigned long)kvm->arch.pgd);
57 		kvm->arch.pgd = NULL;
58 		return -ENOMEM;
59 	}
60 	spin_lock_init(&kvm->arch.phyid_map_lock);
61 
62 	kvm_init_vmcs(kvm);
63 	kvm_vm_init_features(kvm);
64 
65 	/*
66 	 * cpu_vabits means user address space only (a half of total).
67 	 * GPA size of VM is the same with the size of user address space.
68 	 */
69 	kvm->arch.gpa_size = BIT(cpu_vabits);
70 	kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
71 	kvm->arch.invalid_ptes[0] = 0;
72 	kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
73 #if CONFIG_PGTABLE_LEVELS > 2
74 	kvm->arch.invalid_ptes[2] = (unsigned long)invalid_pmd_table;
75 #endif
76 #if CONFIG_PGTABLE_LEVELS > 3
77 	kvm->arch.invalid_ptes[3] = (unsigned long)invalid_pud_table;
78 #endif
79 	for (i = 0; i <= kvm->arch.root_level; i++)
80 		kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3);
81 
82 	return 0;
83 }
84 
85 void kvm_arch_destroy_vm(struct kvm *kvm)
86 {
87 	kvm_destroy_vcpus(kvm);
88 	free_page((unsigned long)kvm->arch.pgd);
89 	kvm->arch.pgd = NULL;
90 	kvfree(kvm->arch.phyid_map);
91 	kvm->arch.phyid_map = NULL;
92 }
93 
94 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
95 {
96 	int r;
97 
98 	switch (ext) {
99 	case KVM_CAP_IRQCHIP:
100 	case KVM_CAP_ONE_REG:
101 	case KVM_CAP_ENABLE_CAP:
102 	case KVM_CAP_READONLY_MEM:
103 	case KVM_CAP_SYNC_MMU:
104 	case KVM_CAP_IMMEDIATE_EXIT:
105 	case KVM_CAP_IOEVENTFD:
106 	case KVM_CAP_MP_STATE:
107 	case KVM_CAP_SET_GUEST_DEBUG:
108 		r = 1;
109 		break;
110 	case KVM_CAP_NR_VCPUS:
111 		r = num_online_cpus();
112 		break;
113 	case KVM_CAP_MAX_VCPUS:
114 		r = KVM_MAX_VCPUS;
115 		break;
116 	case KVM_CAP_MAX_VCPU_ID:
117 		r = KVM_MAX_VCPU_IDS;
118 		break;
119 	case KVM_CAP_NR_MEMSLOTS:
120 		r = KVM_USER_MEM_SLOTS;
121 		break;
122 	default:
123 		r = 0;
124 		break;
125 	}
126 
127 	return r;
128 }
129 
130 static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
131 {
132 	switch (attr->attr) {
133 	case KVM_LOONGARCH_VM_FEAT_LSX:
134 		if (cpu_has_lsx)
135 			return 0;
136 		return -ENXIO;
137 	case KVM_LOONGARCH_VM_FEAT_LASX:
138 		if (cpu_has_lasx)
139 			return 0;
140 		return -ENXIO;
141 	case KVM_LOONGARCH_VM_FEAT_X86BT:
142 		if (cpu_has_lbt_x86)
143 			return 0;
144 		return -ENXIO;
145 	case KVM_LOONGARCH_VM_FEAT_ARMBT:
146 		if (cpu_has_lbt_arm)
147 			return 0;
148 		return -ENXIO;
149 	case KVM_LOONGARCH_VM_FEAT_MIPSBT:
150 		if (cpu_has_lbt_mips)
151 			return 0;
152 		return -ENXIO;
153 	case KVM_LOONGARCH_VM_FEAT_PTW:
154 		if (cpu_has_ptw)
155 			return 0;
156 		return -ENXIO;
157 	case KVM_LOONGARCH_VM_FEAT_MSGINT:
158 		if (cpu_has_msgint)
159 			return 0;
160 		return -ENXIO;
161 	case KVM_LOONGARCH_VM_FEAT_PMU:
162 	case KVM_LOONGARCH_VM_FEAT_PV_IPI:
163 	case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
164 		if (kvm_vm_support(&kvm->arch, attr->attr))
165 			return 0;
166 		return -ENXIO;
167 	default:
168 		return -ENXIO;
169 	}
170 }
171 
172 static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
173 {
174 	switch (attr->group) {
175 	case KVM_LOONGARCH_VM_FEAT_CTRL:
176 		return kvm_vm_feature_has_attr(kvm, attr);
177 	default:
178 		return -ENXIO;
179 	}
180 }
181 
182 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
183 {
184 	void __user *argp = (void __user *)arg;
185 	struct kvm *kvm = filp->private_data;
186 	struct kvm_device_attr attr;
187 
188 	switch (ioctl) {
189 	case KVM_CREATE_IRQCHIP:
190 		return 0;
191 	case KVM_HAS_DEVICE_ATTR:
192 		if (copy_from_user(&attr, argp, sizeof(attr)))
193 			return -EFAULT;
194 
195 		return kvm_vm_has_attr(kvm, &attr);
196 	default:
197 		return -ENOIOCTLCMD;
198 	}
199 }
200 
201 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status)
202 {
203 	if (!kvm_arch_irqchip_in_kernel(kvm))
204 		return -ENXIO;
205 
206 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
207 					irq_event->irq, irq_event->level, line_status);
208 
209 	return 0;
210 }
211 
212 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
213 {
214 	return (kvm->arch.ipi && kvm->arch.eiointc && kvm->arch.pch_pic);
215 }
216