1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_mmu.h>
8 #include <asm/kvm_vcpu.h>
9 #include <asm/kvm_csr.h>
10 #include <asm/kvm_eiointc.h>
11 #include <asm/kvm_pch_pic.h>
12
13 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
14 KVM_GENERIC_VM_STATS(),
15 STATS_DESC_ICOUNTER(VM, pages),
16 STATS_DESC_ICOUNTER(VM, hugepages),
17 };
18
19 const struct kvm_stats_header kvm_vm_stats_header = {
20 .name_size = KVM_STATS_NAME_SIZE,
21 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
22 .id_offset = sizeof(struct kvm_stats_header),
23 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
24 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
25 sizeof(kvm_vm_stats_desc),
26 };
27
kvm_vm_init_features(struct kvm * kvm)28 static void kvm_vm_init_features(struct kvm *kvm)
29 {
30 unsigned long val;
31
32 if (cpu_has_lsx)
33 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_LSX);
34 if (cpu_has_lasx)
35 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_LASX);
36 if (cpu_has_lbt_x86)
37 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_X86BT);
38 if (cpu_has_lbt_arm)
39 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_ARMBT);
40 if (cpu_has_lbt_mips)
41 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_MIPSBT);
42 if (cpu_has_ptw)
43 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PTW);
44 if (cpu_has_msgint)
45 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_MSGINT);
46
47 val = read_csr_gcfg();
48 if (val & CSR_GCFG_GPMP)
49 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU);
50
51 /* Enable all PV features by default */
52 kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
53 kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
54 if (kvm_pvtime_supported()) {
55 kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT);
56 kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
57 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_PREEMPT);
58 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME);
59 }
60 }
61
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)62 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
63 {
64 int i;
65
66 /* Allocate page table to map GPA -> RPA */
67 kvm->arch.pgd = kvm_pgd_alloc();
68 if (!kvm->arch.pgd)
69 return -ENOMEM;
70
71 kvm->arch.phyid_map = kvzalloc_obj(struct kvm_phyid_map,
72 GFP_KERNEL_ACCOUNT);
73 if (!kvm->arch.phyid_map) {
74 free_page((unsigned long)kvm->arch.pgd);
75 kvm->arch.pgd = NULL;
76 return -ENOMEM;
77 }
78 spin_lock_init(&kvm->arch.phyid_map_lock);
79
80 kvm_init_vmcs(kvm);
81 kvm_vm_init_features(kvm);
82
83 /*
84 * cpu_vabits means user address space only (a half of total).
85 * GPA size of VM is the same with the size of user address space.
86 */
87 kvm->arch.gpa_size = BIT(cpu_vabits);
88 kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
89 kvm->arch.invalid_ptes[0] = 0;
90 kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
91 #if CONFIG_PGTABLE_LEVELS > 2
92 kvm->arch.invalid_ptes[2] = (unsigned long)invalid_pmd_table;
93 #endif
94 #if CONFIG_PGTABLE_LEVELS > 3
95 kvm->arch.invalid_ptes[3] = (unsigned long)invalid_pud_table;
96 #endif
97 for (i = 0; i <= kvm->arch.root_level; i++)
98 kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3);
99
100 return 0;
101 }
102
kvm_arch_destroy_vm(struct kvm * kvm)103 void kvm_arch_destroy_vm(struct kvm *kvm)
104 {
105 kvm_destroy_vcpus(kvm);
106 free_page((unsigned long)kvm->arch.pgd);
107 kvm->arch.pgd = NULL;
108 kvfree(kvm->arch.phyid_map);
109 kvm->arch.phyid_map = NULL;
110 }
111
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)112 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
113 {
114 int r;
115
116 switch (ext) {
117 case KVM_CAP_IRQCHIP:
118 case KVM_CAP_ONE_REG:
119 case KVM_CAP_ENABLE_CAP:
120 case KVM_CAP_READONLY_MEM:
121 case KVM_CAP_SYNC_MMU:
122 case KVM_CAP_IMMEDIATE_EXIT:
123 case KVM_CAP_IOEVENTFD:
124 case KVM_CAP_MP_STATE:
125 case KVM_CAP_SET_GUEST_DEBUG:
126 r = 1;
127 break;
128 case KVM_CAP_NR_VCPUS:
129 r = num_online_cpus();
130 break;
131 case KVM_CAP_MAX_VCPUS:
132 r = KVM_MAX_VCPUS;
133 break;
134 case KVM_CAP_MAX_VCPU_ID:
135 r = KVM_MAX_VCPU_IDS;
136 break;
137 case KVM_CAP_NR_MEMSLOTS:
138 r = KVM_USER_MEM_SLOTS;
139 break;
140 default:
141 r = 0;
142 break;
143 }
144
145 return r;
146 }
147
kvm_vm_feature_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)148 static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
149 {
150 switch (attr->attr) {
151 case KVM_LOONGARCH_VM_FEAT_LSX:
152 case KVM_LOONGARCH_VM_FEAT_LASX:
153 case KVM_LOONGARCH_VM_FEAT_X86BT:
154 case KVM_LOONGARCH_VM_FEAT_ARMBT:
155 case KVM_LOONGARCH_VM_FEAT_MIPSBT:
156 case KVM_LOONGARCH_VM_FEAT_PTW:
157 case KVM_LOONGARCH_VM_FEAT_MSGINT:
158 case KVM_LOONGARCH_VM_FEAT_PMU:
159 case KVM_LOONGARCH_VM_FEAT_PV_IPI:
160 case KVM_LOONGARCH_VM_FEAT_PV_PREEMPT:
161 case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
162 if (kvm_vm_support(&kvm->arch, attr->attr))
163 return 0;
164 return -ENXIO;
165 default:
166 return -ENXIO;
167 }
168 }
169
kvm_vm_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)170 static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
171 {
172 switch (attr->group) {
173 case KVM_LOONGARCH_VM_FEAT_CTRL:
174 return kvm_vm_feature_has_attr(kvm, attr);
175 default:
176 return -ENXIO;
177 }
178 }
179
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)180 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
181 {
182 void __user *argp = (void __user *)arg;
183 struct kvm *kvm = filp->private_data;
184 struct kvm_device_attr attr;
185
186 switch (ioctl) {
187 case KVM_CREATE_IRQCHIP:
188 return 0;
189 case KVM_HAS_DEVICE_ATTR:
190 if (copy_from_user(&attr, argp, sizeof(attr)))
191 return -EFAULT;
192
193 return kvm_vm_has_attr(kvm, &attr);
194 default:
195 return -ENOIOCTLCMD;
196 }
197 }
198
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_event,bool line_status)199 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status)
200 {
201 if (!kvm_arch_irqchip_in_kernel(kvm))
202 return -ENXIO;
203
204 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
205 irq_event->irq, irq_event->level, line_status);
206
207 return 0;
208 }
209
kvm_arch_irqchip_in_kernel(struct kvm * kvm)210 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
211 {
212 return (kvm->arch.ipi && kvm->arch.eiointc && kvm->arch.pch_pic);
213 }
214