xref: /linux/arch/mips/kvm/mmu.c (revision 10accd2e6890b57db8e717e9aee91b791f90fe14)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS MMU handling in the KVM module.
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11 
12 #include <linux/highmem.h>
13 #include <linux/kvm_host.h>
14 #include <asm/mmu_context.h>
15 
16 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
17 {
18 	int cpu = smp_processor_id();
19 
20 	return vcpu->arch.guest_kernel_asid[cpu] &
21 			cpu_asid_mask(&cpu_data[cpu]);
22 }
23 
24 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
25 {
26 	int cpu = smp_processor_id();
27 
28 	return vcpu->arch.guest_user_asid[cpu] &
29 			cpu_asid_mask(&cpu_data[cpu]);
30 }
31 
32 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
33 {
34 	int srcu_idx, err = 0;
35 	kvm_pfn_t pfn;
36 
37 	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
38 		return 0;
39 
40 	srcu_idx = srcu_read_lock(&kvm->srcu);
41 	pfn = gfn_to_pfn(kvm, gfn);
42 
43 	if (is_error_pfn(pfn)) {
44 		kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
45 		err = -EFAULT;
46 		goto out;
47 	}
48 
49 	kvm->arch.guest_pmap[gfn] = pfn;
50 out:
51 	srcu_read_unlock(&kvm->srcu, srcu_idx);
52 	return err;
53 }
54 
55 /* Translate guest KSEG0 addresses to Host PA */
56 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
57 						    unsigned long gva)
58 {
59 	gfn_t gfn;
60 	unsigned long offset = gva & ~PAGE_MASK;
61 	struct kvm *kvm = vcpu->kvm;
62 
63 	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
64 		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
65 			__builtin_return_address(0), gva);
66 		return KVM_INVALID_PAGE;
67 	}
68 
69 	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
70 
71 	if (gfn >= kvm->arch.guest_pmap_npages) {
72 		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
73 			gva);
74 		return KVM_INVALID_PAGE;
75 	}
76 
77 	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
78 		return KVM_INVALID_ADDR;
79 
80 	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
81 }
82 
83 /* XXXKYMA: Must be called with interrupts disabled */
84 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
85 				    struct kvm_vcpu *vcpu)
86 {
87 	gfn_t gfn;
88 	kvm_pfn_t pfn0, pfn1;
89 	unsigned long vaddr = 0;
90 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
91 	struct kvm *kvm = vcpu->kvm;
92 	const int flush_dcache_mask = 0;
93 	int ret;
94 
95 	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
96 		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
97 		kvm_mips_dump_host_tlbs();
98 		return -1;
99 	}
100 
101 	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
102 	if (gfn >= kvm->arch.guest_pmap_npages) {
103 		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
104 			gfn, badvaddr);
105 		kvm_mips_dump_host_tlbs();
106 		return -1;
107 	}
108 	vaddr = badvaddr & (PAGE_MASK << 1);
109 
110 	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
111 		return -1;
112 
113 	if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
114 		return -1;
115 
116 	pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
117 	pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
118 
119 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
120 		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
121 		ENTRYLO_D | ENTRYLO_V;
122 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
123 		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
124 		ENTRYLO_D | ENTRYLO_V;
125 
126 	preempt_disable();
127 	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
128 	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
129 				      flush_dcache_mask);
130 	preempt_enable();
131 
132 	return ret;
133 }
134 
135 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
136 					 struct kvm_mips_tlb *tlb)
137 {
138 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
139 	struct kvm *kvm = vcpu->kvm;
140 	kvm_pfn_t pfn0, pfn1;
141 	int ret;
142 
143 	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
144 		pfn0 = 0;
145 		pfn1 = 0;
146 	} else {
147 		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0])
148 					   >> PAGE_SHIFT) < 0)
149 			return -1;
150 
151 		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1])
152 					   >> PAGE_SHIFT) < 0)
153 			return -1;
154 
155 		pfn0 = kvm->arch.guest_pmap[
156 			mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT];
157 		pfn1 = kvm->arch.guest_pmap[
158 			mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT];
159 	}
160 
161 	/* Get attributes from the Guest TLB */
162 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
163 		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
164 		(tlb->tlb_lo[0] & ENTRYLO_D) |
165 		(tlb->tlb_lo[0] & ENTRYLO_V);
166 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
167 		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
168 		(tlb->tlb_lo[1] & ENTRYLO_D) |
169 		(tlb->tlb_lo[1] & ENTRYLO_V);
170 
171 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
172 		  tlb->tlb_lo[0], tlb->tlb_lo[1]);
173 
174 	preempt_disable();
175 	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
176 					       kvm_mips_get_kernel_asid(vcpu) :
177 					       kvm_mips_get_user_asid(vcpu));
178 	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
179 				      tlb->tlb_mask);
180 	preempt_enable();
181 
182 	return ret;
183 }
184 
185 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
186 			     struct kvm_vcpu *vcpu)
187 {
188 	unsigned long asid = asid_cache(cpu);
189 
190 	asid += cpu_asid_inc();
191 	if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
192 		if (cpu_has_vtag_icache)
193 			flush_icache_all();
194 
195 		kvm_local_flush_tlb_all();      /* start new asid cycle */
196 
197 		if (!asid)      /* fix version if needed */
198 			asid = asid_first_version(cpu);
199 	}
200 
201 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
202 }
203 
204 /**
205  * kvm_mips_migrate_count() - Migrate timer.
206  * @vcpu:	Virtual CPU.
207  *
208  * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
209  * if it was running prior to being cancelled.
210  *
211  * Must be called when the VCPU is migrated to a different CPU to ensure that
212  * timer expiry during guest execution interrupts the guest and causes the
213  * interrupt to be delivered in a timely manner.
214  */
215 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
216 {
217 	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
218 		hrtimer_restart(&vcpu->arch.comparecount_timer);
219 }
220 
221 /* Restore ASID once we are scheduled back after preemption */
222 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
223 {
224 	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
225 	unsigned long flags;
226 	int newasid = 0;
227 
228 	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
229 
230 	/* Allocate new kernel and user ASIDs if needed */
231 
232 	local_irq_save(flags);
233 
234 	if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
235 						asid_version_mask(cpu)) {
236 		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
237 		vcpu->arch.guest_kernel_asid[cpu] =
238 		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
239 		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
240 		vcpu->arch.guest_user_asid[cpu] =
241 		    vcpu->arch.guest_user_mm.context.asid[cpu];
242 		newasid++;
243 
244 		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
245 			  cpu_context(cpu, current->mm));
246 		kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
247 			  cpu, vcpu->arch.guest_kernel_asid[cpu]);
248 		kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
249 			  vcpu->arch.guest_user_asid[cpu]);
250 	}
251 
252 	if (vcpu->arch.last_sched_cpu != cpu) {
253 		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
254 			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
255 		/*
256 		 * Migrate the timer interrupt to the current CPU so that it
257 		 * always interrupts the guest and synchronously triggers a
258 		 * guest timer interrupt.
259 		 */
260 		kvm_mips_migrate_count(vcpu);
261 	}
262 
263 	if (!newasid) {
264 		/*
265 		 * If we preempted while the guest was executing, then reload
266 		 * the pre-empted ASID
267 		 */
268 		if (current->flags & PF_VCPU) {
269 			write_c0_entryhi(vcpu->arch.
270 					 preempt_entryhi & asid_mask);
271 			ehb();
272 		}
273 	} else {
274 		/* New ASIDs were allocated for the VM */
275 
276 		/*
277 		 * Were we in guest context? If so then the pre-empted ASID is
278 		 * no longer valid, we need to set it to what it should be based
279 		 * on the mode of the Guest (Kernel/User)
280 		 */
281 		if (current->flags & PF_VCPU) {
282 			if (KVM_GUEST_KERNEL_MODE(vcpu))
283 				write_c0_entryhi(vcpu->arch.
284 						 guest_kernel_asid[cpu] &
285 						 asid_mask);
286 			else
287 				write_c0_entryhi(vcpu->arch.
288 						 guest_user_asid[cpu] &
289 						 asid_mask);
290 			ehb();
291 		}
292 	}
293 
294 	/* restore guest state to registers */
295 	kvm_mips_callbacks->vcpu_set_regs(vcpu);
296 
297 	local_irq_restore(flags);
298 
299 }
300 
301 /* ASID can change if another task is scheduled during preemption */
302 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
303 {
304 	unsigned long flags;
305 	int cpu;
306 
307 	local_irq_save(flags);
308 
309 	cpu = smp_processor_id();
310 
311 	vcpu->arch.preempt_entryhi = read_c0_entryhi();
312 	vcpu->arch.last_sched_cpu = cpu;
313 
314 	/* save guest state in registers */
315 	kvm_mips_callbacks->vcpu_get_regs(vcpu);
316 
317 	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
318 	     asid_version_mask(cpu))) {
319 		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
320 			  cpu_context(cpu, current->mm));
321 		drop_mmu_context(current->mm, cpu);
322 	}
323 	write_c0_entryhi(cpu_asid(cpu, current->mm));
324 	ehb();
325 
326 	local_irq_restore(flags);
327 }
328 
329 u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
330 {
331 	struct mips_coproc *cop0 = vcpu->arch.cop0;
332 	unsigned long paddr, flags, vpn2, asid;
333 	unsigned long va = (unsigned long)opc;
334 	void *vaddr;
335 	u32 inst;
336 	int index;
337 
338 	if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
339 	    KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
340 		local_irq_save(flags);
341 		index = kvm_mips_host_tlb_lookup(vcpu, va);
342 		if (index >= 0) {
343 			inst = *(opc);
344 		} else {
345 			vpn2 = va & VPN2_MASK;
346 			asid = kvm_read_c0_guest_entryhi(cop0) &
347 						KVM_ENTRYHI_ASID;
348 			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
349 			if (index < 0) {
350 				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
351 					__func__, opc, vcpu, read_c0_entryhi());
352 				kvm_mips_dump_host_tlbs();
353 				kvm_mips_dump_guest_tlbs(vcpu);
354 				local_irq_restore(flags);
355 				return KVM_INVALID_INST;
356 			}
357 			kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
358 							     &vcpu->arch.
359 							     guest_tlb[index]);
360 			inst = *(opc);
361 		}
362 		local_irq_restore(flags);
363 	} else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
364 		paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
365 		vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
366 		vaddr += paddr & ~PAGE_MASK;
367 		inst = *(u32 *)vaddr;
368 		kunmap_atomic(vaddr);
369 	} else {
370 		kvm_err("%s: illegal address: %p\n", __func__, opc);
371 		return KVM_INVALID_INST;
372 	}
373 
374 	return inst;
375 }
376