xref: /linux/arch/powerpc/kvm/book3s_hv_rm_mmu.c (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
15 #include <linux/log2.h>
16 
17 #include <asm/tlbflush.h>
18 #include <asm/kvm_ppc.h>
19 #include <asm/kvm_book3s.h>
20 #include <asm/mmu-hash64.h>
21 #include <asm/hvcall.h>
22 #include <asm/synch.h>
23 #include <asm/ppc-opcode.h>
24 
25 /* Translate address of a vmalloc'd thing to a linear map address */
26 static void *real_vmalloc_addr(void *x)
27 {
28 	unsigned long addr = (unsigned long) x;
29 	pte_t *p;
30 	/*
31 	 * assume we don't have huge pages in vmalloc space...
32 	 * So don't worry about THP collapse/split. Called
33 	 * Only in realmode, hence won't need irq_save/restore.
34 	 */
35 	p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
36 	if (!p || !pte_present(*p))
37 		return NULL;
38 	addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
39 	return __va(addr);
40 }
41 
42 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
43 static int global_invalidates(struct kvm *kvm, unsigned long flags)
44 {
45 	int global;
46 
47 	/*
48 	 * If there is only one vcore, and it's currently running,
49 	 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
50 	 * we can use tlbiel as long as we mark all other physical
51 	 * cores as potentially having stale TLB entries for this lpid.
52 	 * Otherwise, don't use tlbiel.
53 	 */
54 	if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
55 		global = 0;
56 	else
57 		global = 1;
58 
59 	if (!global) {
60 		/* any other core might now have stale TLB entries... */
61 		smp_wmb();
62 		cpumask_setall(&kvm->arch.need_tlb_flush);
63 		cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
64 				  &kvm->arch.need_tlb_flush);
65 	}
66 
67 	return global;
68 }
69 
70 /*
71  * Add this HPTE into the chain for the real page.
72  * Must be called with the chain locked; it unlocks the chain.
73  */
74 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
75 			     unsigned long *rmap, long pte_index, int realmode)
76 {
77 	struct revmap_entry *head, *tail;
78 	unsigned long i;
79 
80 	if (*rmap & KVMPPC_RMAP_PRESENT) {
81 		i = *rmap & KVMPPC_RMAP_INDEX;
82 		head = &kvm->arch.revmap[i];
83 		if (realmode)
84 			head = real_vmalloc_addr(head);
85 		tail = &kvm->arch.revmap[head->back];
86 		if (realmode)
87 			tail = real_vmalloc_addr(tail);
88 		rev->forw = i;
89 		rev->back = head->back;
90 		tail->forw = pte_index;
91 		head->back = pte_index;
92 	} else {
93 		rev->forw = rev->back = pte_index;
94 		*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
95 			pte_index | KVMPPC_RMAP_PRESENT;
96 	}
97 	unlock_rmap(rmap);
98 }
99 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
100 
101 /* Update the changed page order field of an rmap entry */
102 void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize)
103 {
104 	unsigned long order;
105 
106 	if (!psize)
107 		return;
108 	order = ilog2(psize);
109 	order <<= KVMPPC_RMAP_CHG_SHIFT;
110 	if (order > (*rmap & KVMPPC_RMAP_CHG_ORDER))
111 		*rmap = (*rmap & ~KVMPPC_RMAP_CHG_ORDER) | order;
112 }
113 EXPORT_SYMBOL_GPL(kvmppc_update_rmap_change);
114 
115 /* Returns a pointer to the revmap entry for the page mapped by a HPTE */
116 static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
117 				      unsigned long hpte_gr)
118 {
119 	struct kvm_memory_slot *memslot;
120 	unsigned long *rmap;
121 	unsigned long gfn;
122 
123 	gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr));
124 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
125 	if (!memslot)
126 		return NULL;
127 
128 	rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
129 	return rmap;
130 }
131 
132 /* Remove this HPTE from the chain for a real page */
133 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
134 				struct revmap_entry *rev,
135 				unsigned long hpte_v, unsigned long hpte_r)
136 {
137 	struct revmap_entry *next, *prev;
138 	unsigned long ptel, head;
139 	unsigned long *rmap;
140 	unsigned long rcbits;
141 
142 	rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
143 	ptel = rev->guest_rpte |= rcbits;
144 	rmap = revmap_for_hpte(kvm, hpte_v, ptel);
145 	if (!rmap)
146 		return;
147 	lock_rmap(rmap);
148 
149 	head = *rmap & KVMPPC_RMAP_INDEX;
150 	next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
151 	prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
152 	next->back = rev->back;
153 	prev->forw = rev->forw;
154 	if (head == pte_index) {
155 		head = rev->forw;
156 		if (head == pte_index)
157 			*rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
158 		else
159 			*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
160 	}
161 	*rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
162 	if (rcbits & HPTE_R_C)
163 		kvmppc_update_rmap_change(rmap, hpte_page_size(hpte_v, hpte_r));
164 	unlock_rmap(rmap);
165 }
166 
167 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
168 		       long pte_index, unsigned long pteh, unsigned long ptel,
169 		       pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
170 {
171 	unsigned long i, pa, gpa, gfn, psize;
172 	unsigned long slot_fn, hva;
173 	__be64 *hpte;
174 	struct revmap_entry *rev;
175 	unsigned long g_ptel;
176 	struct kvm_memory_slot *memslot;
177 	unsigned hpage_shift;
178 	unsigned long is_io;
179 	unsigned long *rmap;
180 	pte_t *ptep;
181 	unsigned int writing;
182 	unsigned long mmu_seq;
183 	unsigned long rcbits, irq_flags = 0;
184 
185 	psize = hpte_page_size(pteh, ptel);
186 	if (!psize)
187 		return H_PARAMETER;
188 	writing = hpte_is_writable(ptel);
189 	pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
190 	ptel &= ~HPTE_GR_RESERVED;
191 	g_ptel = ptel;
192 
193 	/* used later to detect if we might have been invalidated */
194 	mmu_seq = kvm->mmu_notifier_seq;
195 	smp_rmb();
196 
197 	/* Find the memslot (if any) for this address */
198 	gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
199 	gfn = gpa >> PAGE_SHIFT;
200 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
201 	pa = 0;
202 	is_io = ~0ul;
203 	rmap = NULL;
204 	if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
205 		/* Emulated MMIO - mark this with key=31 */
206 		pteh |= HPTE_V_ABSENT;
207 		ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
208 		goto do_insert;
209 	}
210 
211 	/* Check if the requested page fits entirely in the memslot. */
212 	if (!slot_is_aligned(memslot, psize))
213 		return H_PARAMETER;
214 	slot_fn = gfn - memslot->base_gfn;
215 	rmap = &memslot->arch.rmap[slot_fn];
216 
217 	/* Translate to host virtual address */
218 	hva = __gfn_to_hva_memslot(memslot, gfn);
219 	/*
220 	 * If we had a page table table change after lookup, we would
221 	 * retry via mmu_notifier_retry.
222 	 */
223 	if (realmode)
224 		ptep = __find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
225 	else {
226 		local_irq_save(irq_flags);
227 		ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
228 	}
229 	if (ptep) {
230 		pte_t pte;
231 		unsigned int host_pte_size;
232 
233 		if (hpage_shift)
234 			host_pte_size = 1ul << hpage_shift;
235 		else
236 			host_pte_size = PAGE_SIZE;
237 		/*
238 		 * We should always find the guest page size
239 		 * to <= host page size, if host is using hugepage
240 		 */
241 		if (host_pte_size < psize) {
242 			if (!realmode)
243 				local_irq_restore(flags);
244 			return H_PARAMETER;
245 		}
246 		pte = kvmppc_read_update_linux_pte(ptep, writing);
247 		if (pte_present(pte) && !pte_protnone(pte)) {
248 			if (writing && !pte_write(pte))
249 				/* make the actual HPTE be read-only */
250 				ptel = hpte_make_readonly(ptel);
251 			is_io = hpte_cache_bits(pte_val(pte));
252 			pa = pte_pfn(pte) << PAGE_SHIFT;
253 			pa |= hva & (host_pte_size - 1);
254 			pa |= gpa & ~PAGE_MASK;
255 		}
256 	}
257 	if (!realmode)
258 		local_irq_restore(irq_flags);
259 
260 	ptel &= ~(HPTE_R_PP0 - psize);
261 	ptel |= pa;
262 
263 	if (pa)
264 		pteh |= HPTE_V_VALID;
265 	else
266 		pteh |= HPTE_V_ABSENT;
267 
268 	/* Check WIMG */
269 	if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
270 		if (is_io)
271 			return H_PARAMETER;
272 		/*
273 		 * Allow guest to map emulated device memory as
274 		 * uncacheable, but actually make it cacheable.
275 		 */
276 		ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
277 		ptel |= HPTE_R_M;
278 	}
279 
280 	/* Find and lock the HPTEG slot to use */
281  do_insert:
282 	if (pte_index >= kvm->arch.hpt_npte)
283 		return H_PARAMETER;
284 	if (likely((flags & H_EXACT) == 0)) {
285 		pte_index &= ~7UL;
286 		hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
287 		for (i = 0; i < 8; ++i) {
288 			if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
289 			    try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
290 					  HPTE_V_ABSENT))
291 				break;
292 			hpte += 2;
293 		}
294 		if (i == 8) {
295 			/*
296 			 * Since try_lock_hpte doesn't retry (not even stdcx.
297 			 * failures), it could be that there is a free slot
298 			 * but we transiently failed to lock it.  Try again,
299 			 * actually locking each slot and checking it.
300 			 */
301 			hpte -= 16;
302 			for (i = 0; i < 8; ++i) {
303 				u64 pte;
304 				while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
305 					cpu_relax();
306 				pte = be64_to_cpu(hpte[0]);
307 				if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
308 					break;
309 				__unlock_hpte(hpte, pte);
310 				hpte += 2;
311 			}
312 			if (i == 8)
313 				return H_PTEG_FULL;
314 		}
315 		pte_index += i;
316 	} else {
317 		hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
318 		if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
319 				   HPTE_V_ABSENT)) {
320 			/* Lock the slot and check again */
321 			u64 pte;
322 
323 			while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
324 				cpu_relax();
325 			pte = be64_to_cpu(hpte[0]);
326 			if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
327 				__unlock_hpte(hpte, pte);
328 				return H_PTEG_FULL;
329 			}
330 		}
331 	}
332 
333 	/* Save away the guest's idea of the second HPTE dword */
334 	rev = &kvm->arch.revmap[pte_index];
335 	if (realmode)
336 		rev = real_vmalloc_addr(rev);
337 	if (rev) {
338 		rev->guest_rpte = g_ptel;
339 		note_hpte_modification(kvm, rev);
340 	}
341 
342 	/* Link HPTE into reverse-map chain */
343 	if (pteh & HPTE_V_VALID) {
344 		if (realmode)
345 			rmap = real_vmalloc_addr(rmap);
346 		lock_rmap(rmap);
347 		/* Check for pending invalidations under the rmap chain lock */
348 		if (mmu_notifier_retry(kvm, mmu_seq)) {
349 			/* inval in progress, write a non-present HPTE */
350 			pteh |= HPTE_V_ABSENT;
351 			pteh &= ~HPTE_V_VALID;
352 			unlock_rmap(rmap);
353 		} else {
354 			kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
355 						realmode);
356 			/* Only set R/C in real HPTE if already set in *rmap */
357 			rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
358 			ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
359 		}
360 	}
361 
362 	hpte[1] = cpu_to_be64(ptel);
363 
364 	/* Write the first HPTE dword, unlocking the HPTE and making it valid */
365 	eieio();
366 	__unlock_hpte(hpte, pteh);
367 	asm volatile("ptesync" : : : "memory");
368 
369 	*pte_idx_ret = pte_index;
370 	return H_SUCCESS;
371 }
372 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
373 
374 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
375 		    long pte_index, unsigned long pteh, unsigned long ptel)
376 {
377 	return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
378 				 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
379 }
380 
381 #ifdef __BIG_ENDIAN__
382 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
383 #else
384 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
385 #endif
386 
387 static inline int try_lock_tlbie(unsigned int *lock)
388 {
389 	unsigned int tmp, old;
390 	unsigned int token = LOCK_TOKEN;
391 
392 	asm volatile("1:lwarx	%1,0,%2\n"
393 		     "	cmpwi	cr0,%1,0\n"
394 		     "	bne	2f\n"
395 		     "  stwcx.	%3,0,%2\n"
396 		     "	bne-	1b\n"
397 		     "  isync\n"
398 		     "2:"
399 		     : "=&r" (tmp), "=&r" (old)
400 		     : "r" (lock), "r" (token)
401 		     : "cc", "memory");
402 	return old == 0;
403 }
404 
405 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
406 		      long npages, int global, bool need_sync)
407 {
408 	long i;
409 
410 	if (global) {
411 		while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
412 			cpu_relax();
413 		if (need_sync)
414 			asm volatile("ptesync" : : : "memory");
415 		for (i = 0; i < npages; ++i)
416 			asm volatile(PPC_TLBIE(%1,%0) : :
417 				     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
418 		asm volatile("eieio; tlbsync; ptesync" : : : "memory");
419 		kvm->arch.tlbie_lock = 0;
420 	} else {
421 		if (need_sync)
422 			asm volatile("ptesync" : : : "memory");
423 		for (i = 0; i < npages; ++i)
424 			asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
425 		asm volatile("ptesync" : : : "memory");
426 	}
427 }
428 
429 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
430 			unsigned long pte_index, unsigned long avpn,
431 			unsigned long *hpret)
432 {
433 	__be64 *hpte;
434 	unsigned long v, r, rb;
435 	struct revmap_entry *rev;
436 	u64 pte;
437 
438 	if (pte_index >= kvm->arch.hpt_npte)
439 		return H_PARAMETER;
440 	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
441 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
442 		cpu_relax();
443 	pte = be64_to_cpu(hpte[0]);
444 	if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
445 	    ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
446 	    ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
447 		__unlock_hpte(hpte, pte);
448 		return H_NOT_FOUND;
449 	}
450 
451 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
452 	v = pte & ~HPTE_V_HVLOCK;
453 	if (v & HPTE_V_VALID) {
454 		hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
455 		rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
456 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
457 		/*
458 		 * The reference (R) and change (C) bits in a HPT
459 		 * entry can be set by hardware at any time up until
460 		 * the HPTE is invalidated and the TLB invalidation
461 		 * sequence has completed.  This means that when
462 		 * removing a HPTE, we need to re-read the HPTE after
463 		 * the invalidation sequence has completed in order to
464 		 * obtain reliable values of R and C.
465 		 */
466 		remove_revmap_chain(kvm, pte_index, rev, v,
467 				    be64_to_cpu(hpte[1]));
468 	}
469 	r = rev->guest_rpte & ~HPTE_GR_RESERVED;
470 	note_hpte_modification(kvm, rev);
471 	unlock_hpte(hpte, 0);
472 
473 	hpret[0] = v;
474 	hpret[1] = r;
475 	return H_SUCCESS;
476 }
477 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
478 
479 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
480 		     unsigned long pte_index, unsigned long avpn)
481 {
482 	return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
483 				  &vcpu->arch.gpr[4]);
484 }
485 
486 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
487 {
488 	struct kvm *kvm = vcpu->kvm;
489 	unsigned long *args = &vcpu->arch.gpr[4];
490 	__be64 *hp, *hptes[4];
491 	unsigned long tlbrb[4];
492 	long int i, j, k, n, found, indexes[4];
493 	unsigned long flags, req, pte_index, rcbits;
494 	int global;
495 	long int ret = H_SUCCESS;
496 	struct revmap_entry *rev, *revs[4];
497 	u64 hp0;
498 
499 	global = global_invalidates(kvm, 0);
500 	for (i = 0; i < 4 && ret == H_SUCCESS; ) {
501 		n = 0;
502 		for (; i < 4; ++i) {
503 			j = i * 2;
504 			pte_index = args[j];
505 			flags = pte_index >> 56;
506 			pte_index &= ((1ul << 56) - 1);
507 			req = flags >> 6;
508 			flags &= 3;
509 			if (req == 3) {		/* no more requests */
510 				i = 4;
511 				break;
512 			}
513 			if (req != 1 || flags == 3 ||
514 			    pte_index >= kvm->arch.hpt_npte) {
515 				/* parameter error */
516 				args[j] = ((0xa0 | flags) << 56) + pte_index;
517 				ret = H_PARAMETER;
518 				break;
519 			}
520 			hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
521 			/* to avoid deadlock, don't spin except for first */
522 			if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
523 				if (n)
524 					break;
525 				while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
526 					cpu_relax();
527 			}
528 			found = 0;
529 			hp0 = be64_to_cpu(hp[0]);
530 			if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
531 				switch (flags & 3) {
532 				case 0:		/* absolute */
533 					found = 1;
534 					break;
535 				case 1:		/* andcond */
536 					if (!(hp0 & args[j + 1]))
537 						found = 1;
538 					break;
539 				case 2:		/* AVPN */
540 					if ((hp0 & ~0x7fUL) == args[j + 1])
541 						found = 1;
542 					break;
543 				}
544 			}
545 			if (!found) {
546 				hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
547 				args[j] = ((0x90 | flags) << 56) + pte_index;
548 				continue;
549 			}
550 
551 			args[j] = ((0x80 | flags) << 56) + pte_index;
552 			rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
553 			note_hpte_modification(kvm, rev);
554 
555 			if (!(hp0 & HPTE_V_VALID)) {
556 				/* insert R and C bits from PTE */
557 				rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
558 				args[j] |= rcbits << (56 - 5);
559 				hp[0] = 0;
560 				continue;
561 			}
562 
563 			/* leave it locked */
564 			hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
565 			tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
566 				be64_to_cpu(hp[1]), pte_index);
567 			indexes[n] = j;
568 			hptes[n] = hp;
569 			revs[n] = rev;
570 			++n;
571 		}
572 
573 		if (!n)
574 			break;
575 
576 		/* Now that we've collected a batch, do the tlbies */
577 		do_tlbies(kvm, tlbrb, n, global, true);
578 
579 		/* Read PTE low words after tlbie to get final R/C values */
580 		for (k = 0; k < n; ++k) {
581 			j = indexes[k];
582 			pte_index = args[j] & ((1ul << 56) - 1);
583 			hp = hptes[k];
584 			rev = revs[k];
585 			remove_revmap_chain(kvm, pte_index, rev,
586 				be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
587 			rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
588 			args[j] |= rcbits << (56 - 5);
589 			__unlock_hpte(hp, 0);
590 		}
591 	}
592 
593 	return ret;
594 }
595 
596 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
597 		      unsigned long pte_index, unsigned long avpn,
598 		      unsigned long va)
599 {
600 	struct kvm *kvm = vcpu->kvm;
601 	__be64 *hpte;
602 	struct revmap_entry *rev;
603 	unsigned long v, r, rb, mask, bits;
604 	u64 pte;
605 
606 	if (pte_index >= kvm->arch.hpt_npte)
607 		return H_PARAMETER;
608 
609 	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
610 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
611 		cpu_relax();
612 	pte = be64_to_cpu(hpte[0]);
613 	if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
614 	    ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
615 		__unlock_hpte(hpte, pte);
616 		return H_NOT_FOUND;
617 	}
618 
619 	v = pte;
620 	bits = (flags << 55) & HPTE_R_PP0;
621 	bits |= (flags << 48) & HPTE_R_KEY_HI;
622 	bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
623 
624 	/* Update guest view of 2nd HPTE dword */
625 	mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
626 		HPTE_R_KEY_HI | HPTE_R_KEY_LO;
627 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
628 	if (rev) {
629 		r = (rev->guest_rpte & ~mask) | bits;
630 		rev->guest_rpte = r;
631 		note_hpte_modification(kvm, rev);
632 	}
633 
634 	/* Update HPTE */
635 	if (v & HPTE_V_VALID) {
636 		/*
637 		 * If the page is valid, don't let it transition from
638 		 * readonly to writable.  If it should be writable, we'll
639 		 * take a trap and let the page fault code sort it out.
640 		 */
641 		pte = be64_to_cpu(hpte[1]);
642 		r = (pte & ~mask) | bits;
643 		if (hpte_is_writable(r) && !hpte_is_writable(pte))
644 			r = hpte_make_readonly(r);
645 		/* If the PTE is changing, invalidate it first */
646 		if (r != pte) {
647 			rb = compute_tlbie_rb(v, r, pte_index);
648 			hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) |
649 					      HPTE_V_ABSENT);
650 			do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
651 				  true);
652 			hpte[1] = cpu_to_be64(r);
653 		}
654 	}
655 	unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
656 	asm volatile("ptesync" : : : "memory");
657 	return H_SUCCESS;
658 }
659 
660 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
661 		   unsigned long pte_index)
662 {
663 	struct kvm *kvm = vcpu->kvm;
664 	__be64 *hpte;
665 	unsigned long v, r;
666 	int i, n = 1;
667 	struct revmap_entry *rev = NULL;
668 
669 	if (pte_index >= kvm->arch.hpt_npte)
670 		return H_PARAMETER;
671 	if (flags & H_READ_4) {
672 		pte_index &= ~3;
673 		n = 4;
674 	}
675 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
676 	for (i = 0; i < n; ++i, ++pte_index) {
677 		hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
678 		v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
679 		r = be64_to_cpu(hpte[1]);
680 		if (v & HPTE_V_ABSENT) {
681 			v &= ~HPTE_V_ABSENT;
682 			v |= HPTE_V_VALID;
683 		}
684 		if (v & HPTE_V_VALID) {
685 			r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
686 			r &= ~HPTE_GR_RESERVED;
687 		}
688 		vcpu->arch.gpr[4 + i * 2] = v;
689 		vcpu->arch.gpr[5 + i * 2] = r;
690 	}
691 	return H_SUCCESS;
692 }
693 
694 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
695 			unsigned long pte_index)
696 {
697 	struct kvm *kvm = vcpu->kvm;
698 	__be64 *hpte;
699 	unsigned long v, r, gr;
700 	struct revmap_entry *rev;
701 	unsigned long *rmap;
702 	long ret = H_NOT_FOUND;
703 
704 	if (pte_index >= kvm->arch.hpt_npte)
705 		return H_PARAMETER;
706 
707 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
708 	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
709 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
710 		cpu_relax();
711 	v = be64_to_cpu(hpte[0]);
712 	r = be64_to_cpu(hpte[1]);
713 	if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
714 		goto out;
715 
716 	gr = rev->guest_rpte;
717 	if (rev->guest_rpte & HPTE_R_R) {
718 		rev->guest_rpte &= ~HPTE_R_R;
719 		note_hpte_modification(kvm, rev);
720 	}
721 	if (v & HPTE_V_VALID) {
722 		gr |= r & (HPTE_R_R | HPTE_R_C);
723 		if (r & HPTE_R_R) {
724 			kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
725 			rmap = revmap_for_hpte(kvm, v, gr);
726 			if (rmap) {
727 				lock_rmap(rmap);
728 				*rmap |= KVMPPC_RMAP_REFERENCED;
729 				unlock_rmap(rmap);
730 			}
731 		}
732 	}
733 	vcpu->arch.gpr[4] = gr;
734 	ret = H_SUCCESS;
735  out:
736 	unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
737 	return ret;
738 }
739 
740 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
741 			unsigned long pte_index)
742 {
743 	struct kvm *kvm = vcpu->kvm;
744 	__be64 *hpte;
745 	unsigned long v, r, gr;
746 	struct revmap_entry *rev;
747 	unsigned long *rmap;
748 	long ret = H_NOT_FOUND;
749 
750 	if (pte_index >= kvm->arch.hpt_npte)
751 		return H_PARAMETER;
752 
753 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
754 	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
755 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
756 		cpu_relax();
757 	v = be64_to_cpu(hpte[0]);
758 	r = be64_to_cpu(hpte[1]);
759 	if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
760 		goto out;
761 
762 	gr = rev->guest_rpte;
763 	if (gr & HPTE_R_C) {
764 		rev->guest_rpte &= ~HPTE_R_C;
765 		note_hpte_modification(kvm, rev);
766 	}
767 	if (v & HPTE_V_VALID) {
768 		/* need to make it temporarily absent so C is stable */
769 		hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
770 		kvmppc_invalidate_hpte(kvm, hpte, pte_index);
771 		r = be64_to_cpu(hpte[1]);
772 		gr |= r & (HPTE_R_R | HPTE_R_C);
773 		if (r & HPTE_R_C) {
774 			unsigned long psize = hpte_page_size(v, r);
775 			hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
776 			eieio();
777 			rmap = revmap_for_hpte(kvm, v, gr);
778 			if (rmap) {
779 				lock_rmap(rmap);
780 				*rmap |= KVMPPC_RMAP_CHANGED;
781 				kvmppc_update_rmap_change(rmap, psize);
782 				unlock_rmap(rmap);
783 			}
784 		}
785 	}
786 	vcpu->arch.gpr[4] = gr;
787 	ret = H_SUCCESS;
788  out:
789 	unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
790 	return ret;
791 }
792 
793 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
794 			unsigned long pte_index)
795 {
796 	unsigned long rb;
797 
798 	hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
799 	rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
800 			      pte_index);
801 	do_tlbies(kvm, &rb, 1, 1, true);
802 }
803 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
804 
805 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
806 			   unsigned long pte_index)
807 {
808 	unsigned long rb;
809 	unsigned char rbyte;
810 
811 	rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
812 			      pte_index);
813 	rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
814 	/* modify only the second-last byte, which contains the ref bit */
815 	*((char *)hptep + 14) = rbyte;
816 	do_tlbies(kvm, &rb, 1, 1, false);
817 }
818 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
819 
820 static int slb_base_page_shift[4] = {
821 	24,	/* 16M */
822 	16,	/* 64k */
823 	34,	/* 16G */
824 	20,	/* 1M, unsupported */
825 };
826 
827 /* When called from virtmode, this func should be protected by
828  * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
829  * can trigger deadlock issue.
830  */
831 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
832 			      unsigned long valid)
833 {
834 	unsigned int i;
835 	unsigned int pshift;
836 	unsigned long somask;
837 	unsigned long vsid, hash;
838 	unsigned long avpn;
839 	__be64 *hpte;
840 	unsigned long mask, val;
841 	unsigned long v, r;
842 
843 	/* Get page shift, work out hash and AVPN etc. */
844 	mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
845 	val = 0;
846 	pshift = 12;
847 	if (slb_v & SLB_VSID_L) {
848 		mask |= HPTE_V_LARGE;
849 		val |= HPTE_V_LARGE;
850 		pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
851 	}
852 	if (slb_v & SLB_VSID_B_1T) {
853 		somask = (1UL << 40) - 1;
854 		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
855 		vsid ^= vsid << 25;
856 	} else {
857 		somask = (1UL << 28) - 1;
858 		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
859 	}
860 	hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
861 	avpn = slb_v & ~(somask >> 16);	/* also includes B */
862 	avpn |= (eaddr & somask) >> 16;
863 
864 	if (pshift >= 24)
865 		avpn &= ~((1UL << (pshift - 16)) - 1);
866 	else
867 		avpn &= ~0x7fUL;
868 	val |= avpn;
869 
870 	for (;;) {
871 		hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
872 
873 		for (i = 0; i < 16; i += 2) {
874 			/* Read the PTE racily */
875 			v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
876 
877 			/* Check valid/absent, hash, segment size and AVPN */
878 			if (!(v & valid) || (v & mask) != val)
879 				continue;
880 
881 			/* Lock the PTE and read it under the lock */
882 			while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
883 				cpu_relax();
884 			v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
885 			r = be64_to_cpu(hpte[i+1]);
886 
887 			/*
888 			 * Check the HPTE again, including base page size
889 			 */
890 			if ((v & valid) && (v & mask) == val &&
891 			    hpte_base_page_size(v, r) == (1ul << pshift))
892 				/* Return with the HPTE still locked */
893 				return (hash << 3) + (i >> 1);
894 
895 			__unlock_hpte(&hpte[i], v);
896 		}
897 
898 		if (val & HPTE_V_SECONDARY)
899 			break;
900 		val |= HPTE_V_SECONDARY;
901 		hash = hash ^ kvm->arch.hpt_mask;
902 	}
903 	return -1;
904 }
905 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
906 
907 /*
908  * Called in real mode to check whether an HPTE not found fault
909  * is due to accessing a paged-out page or an emulated MMIO page,
910  * or if a protection fault is due to accessing a page that the
911  * guest wanted read/write access to but which we made read-only.
912  * Returns a possibly modified status (DSISR) value if not
913  * (i.e. pass the interrupt to the guest),
914  * -1 to pass the fault up to host kernel mode code, -2 to do that
915  * and also load the instruction word (for MMIO emulation),
916  * or 0 if we should make the guest retry the access.
917  */
918 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
919 			  unsigned long slb_v, unsigned int status, bool data)
920 {
921 	struct kvm *kvm = vcpu->kvm;
922 	long int index;
923 	unsigned long v, r, gr;
924 	__be64 *hpte;
925 	unsigned long valid;
926 	struct revmap_entry *rev;
927 	unsigned long pp, key;
928 
929 	/* For protection fault, expect to find a valid HPTE */
930 	valid = HPTE_V_VALID;
931 	if (status & DSISR_NOHPTE)
932 		valid |= HPTE_V_ABSENT;
933 
934 	index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
935 	if (index < 0) {
936 		if (status & DSISR_NOHPTE)
937 			return status;	/* there really was no HPTE */
938 		return 0;		/* for prot fault, HPTE disappeared */
939 	}
940 	hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
941 	v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
942 	r = be64_to_cpu(hpte[1]);
943 	rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
944 	gr = rev->guest_rpte;
945 
946 	unlock_hpte(hpte, v);
947 
948 	/* For not found, if the HPTE is valid by now, retry the instruction */
949 	if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
950 		return 0;
951 
952 	/* Check access permissions to the page */
953 	pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
954 	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
955 	status &= ~DSISR_NOHPTE;	/* DSISR_NOHPTE == SRR1_ISI_NOPT */
956 	if (!data) {
957 		if (gr & (HPTE_R_N | HPTE_R_G))
958 			return status | SRR1_ISI_N_OR_G;
959 		if (!hpte_read_permission(pp, slb_v & key))
960 			return status | SRR1_ISI_PROT;
961 	} else if (status & DSISR_ISSTORE) {
962 		/* check write permission */
963 		if (!hpte_write_permission(pp, slb_v & key))
964 			return status | DSISR_PROTFAULT;
965 	} else {
966 		if (!hpte_read_permission(pp, slb_v & key))
967 			return status | DSISR_PROTFAULT;
968 	}
969 
970 	/* Check storage key, if applicable */
971 	if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
972 		unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
973 		if (status & DSISR_ISSTORE)
974 			perm >>= 1;
975 		if (perm & 1)
976 			return status | DSISR_KEYFAULT;
977 	}
978 
979 	/* Save HPTE info for virtual-mode handler */
980 	vcpu->arch.pgfault_addr = addr;
981 	vcpu->arch.pgfault_index = index;
982 	vcpu->arch.pgfault_hpte[0] = v;
983 	vcpu->arch.pgfault_hpte[1] = r;
984 
985 	/* Check the storage key to see if it is possibly emulated MMIO */
986 	if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
987 	    (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
988 	    (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
989 		return -2;	/* MMIO emulation - load instr word */
990 
991 	return -1;		/* send fault up to host kernel mode */
992 }
993