xref: /linux/arch/loongarch/kvm/vm.c (revision 1928254c5ccb7bdffd7f0334e1ce250e9ce4de94)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_mmu.h>
8 #include <asm/kvm_vcpu.h>
9 #include <asm/kvm_eiointc.h>
10 #include <asm/kvm_pch_pic.h>
11 
12 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
13 	KVM_GENERIC_VM_STATS(),
14 	STATS_DESC_ICOUNTER(VM, pages),
15 	STATS_DESC_ICOUNTER(VM, hugepages),
16 };
17 
18 const struct kvm_stats_header kvm_vm_stats_header = {
19 	.name_size = KVM_STATS_NAME_SIZE,
20 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
21 	.id_offset =  sizeof(struct kvm_stats_header),
22 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
23 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
24 					sizeof(kvm_vm_stats_desc),
25 };
26 
27 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
28 {
29 	int i;
30 
31 	/* Allocate page table to map GPA -> RPA */
32 	kvm->arch.pgd = kvm_pgd_alloc();
33 	if (!kvm->arch.pgd)
34 		return -ENOMEM;
35 
36 	kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), GFP_KERNEL_ACCOUNT);
37 	if (!kvm->arch.phyid_map) {
38 		free_page((unsigned long)kvm->arch.pgd);
39 		kvm->arch.pgd = NULL;
40 		return -ENOMEM;
41 	}
42 	spin_lock_init(&kvm->arch.phyid_map_lock);
43 
44 	kvm_init_vmcs(kvm);
45 
46 	/* Enable all PV features by default */
47 	kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
48 	if (kvm_pvtime_supported())
49 		kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
50 
51 	kvm->arch.gpa_size = BIT(cpu_vabits - 1);
52 	kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
53 	kvm->arch.invalid_ptes[0] = 0;
54 	kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
55 #if CONFIG_PGTABLE_LEVELS > 2
56 	kvm->arch.invalid_ptes[2] = (unsigned long)invalid_pmd_table;
57 #endif
58 #if CONFIG_PGTABLE_LEVELS > 3
59 	kvm->arch.invalid_ptes[3] = (unsigned long)invalid_pud_table;
60 #endif
61 	for (i = 0; i <= kvm->arch.root_level; i++)
62 		kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3);
63 
64 	return 0;
65 }
66 
67 void kvm_arch_destroy_vm(struct kvm *kvm)
68 {
69 	kvm_destroy_vcpus(kvm);
70 	free_page((unsigned long)kvm->arch.pgd);
71 	kvm->arch.pgd = NULL;
72 	kvfree(kvm->arch.phyid_map);
73 	kvm->arch.phyid_map = NULL;
74 }
75 
76 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
77 {
78 	int r;
79 
80 	switch (ext) {
81 	case KVM_CAP_IRQCHIP:
82 	case KVM_CAP_ONE_REG:
83 	case KVM_CAP_ENABLE_CAP:
84 	case KVM_CAP_READONLY_MEM:
85 	case KVM_CAP_SYNC_MMU:
86 	case KVM_CAP_IMMEDIATE_EXIT:
87 	case KVM_CAP_IOEVENTFD:
88 	case KVM_CAP_MP_STATE:
89 	case KVM_CAP_SET_GUEST_DEBUG:
90 		r = 1;
91 		break;
92 	case KVM_CAP_NR_VCPUS:
93 		r = num_online_cpus();
94 		break;
95 	case KVM_CAP_MAX_VCPUS:
96 		r = KVM_MAX_VCPUS;
97 		break;
98 	case KVM_CAP_MAX_VCPU_ID:
99 		r = KVM_MAX_VCPU_IDS;
100 		break;
101 	case KVM_CAP_NR_MEMSLOTS:
102 		r = KVM_USER_MEM_SLOTS;
103 		break;
104 	default:
105 		r = 0;
106 		break;
107 	}
108 
109 	return r;
110 }
111 
112 static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
113 {
114 	switch (attr->attr) {
115 	case KVM_LOONGARCH_VM_FEAT_LSX:
116 		if (cpu_has_lsx)
117 			return 0;
118 		return -ENXIO;
119 	case KVM_LOONGARCH_VM_FEAT_LASX:
120 		if (cpu_has_lasx)
121 			return 0;
122 		return -ENXIO;
123 	case KVM_LOONGARCH_VM_FEAT_X86BT:
124 		if (cpu_has_lbt_x86)
125 			return 0;
126 		return -ENXIO;
127 	case KVM_LOONGARCH_VM_FEAT_ARMBT:
128 		if (cpu_has_lbt_arm)
129 			return 0;
130 		return -ENXIO;
131 	case KVM_LOONGARCH_VM_FEAT_MIPSBT:
132 		if (cpu_has_lbt_mips)
133 			return 0;
134 		return -ENXIO;
135 	case KVM_LOONGARCH_VM_FEAT_PMU:
136 		if (cpu_has_pmp)
137 			return 0;
138 		return -ENXIO;
139 	case KVM_LOONGARCH_VM_FEAT_PV_IPI:
140 		return 0;
141 	case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
142 		if (kvm_pvtime_supported())
143 			return 0;
144 		return -ENXIO;
145 	default:
146 		return -ENXIO;
147 	}
148 }
149 
150 static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
151 {
152 	switch (attr->group) {
153 	case KVM_LOONGARCH_VM_FEAT_CTRL:
154 		return kvm_vm_feature_has_attr(kvm, attr);
155 	default:
156 		return -ENXIO;
157 	}
158 }
159 
160 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
161 {
162 	void __user *argp = (void __user *)arg;
163 	struct kvm *kvm = filp->private_data;
164 	struct kvm_device_attr attr;
165 
166 	switch (ioctl) {
167 	case KVM_CREATE_IRQCHIP:
168 		return 0;
169 	case KVM_HAS_DEVICE_ATTR:
170 		if (copy_from_user(&attr, argp, sizeof(attr)))
171 			return -EFAULT;
172 
173 		return kvm_vm_has_attr(kvm, &attr);
174 	default:
175 		return -ENOIOCTLCMD;
176 	}
177 }
178 
179 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status)
180 {
181 	if (!kvm_arch_irqchip_in_kernel(kvm))
182 		return -ENXIO;
183 
184 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
185 					irq_event->irq, irq_event->level, line_status);
186 
187 	return 0;
188 }
189 
190 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
191 {
192 	return (kvm->arch.ipi && kvm->arch.eiointc && kvm->arch.pch_pic);
193 }
194