xref: /linux/arch/s390/mm/gmap.c (revision fcc79e1714e8c2b8e216dc3149812edd37884eef)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  KVM guest address space mapping code
4  *
5  *    Copyright IBM Corp. 2007, 2020
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  *		 David Hildenbrand <david@redhat.com>
8  *		 Janosch Frank <frankja@linux.vnet.ibm.com>
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/pagewalk.h>
13 #include <linux/swap.h>
14 #include <linux/smp.h>
15 #include <linux/spinlock.h>
16 #include <linux/slab.h>
17 #include <linux/swapops.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
20 #include <linux/pgtable.h>
21 #include <asm/page-states.h>
22 #include <asm/pgalloc.h>
23 #include <asm/gmap.h>
24 #include <asm/page.h>
25 #include <asm/tlb.h>
26 
27 #define GMAP_SHADOW_FAKE_TABLE 1ULL
28 
29 static struct page *gmap_alloc_crst(void)
30 {
31 	struct page *page;
32 
33 	page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
34 	if (!page)
35 		return NULL;
36 	__arch_set_page_dat(page_to_virt(page), 1UL << CRST_ALLOC_ORDER);
37 	return page;
38 }
39 
40 /**
41  * gmap_alloc - allocate and initialize a guest address space
42  * @limit: maximum address of the gmap address space
43  *
44  * Returns a guest address space structure.
45  */
46 static struct gmap *gmap_alloc(unsigned long limit)
47 {
48 	struct gmap *gmap;
49 	struct page *page;
50 	unsigned long *table;
51 	unsigned long etype, atype;
52 
53 	if (limit < _REGION3_SIZE) {
54 		limit = _REGION3_SIZE - 1;
55 		atype = _ASCE_TYPE_SEGMENT;
56 		etype = _SEGMENT_ENTRY_EMPTY;
57 	} else if (limit < _REGION2_SIZE) {
58 		limit = _REGION2_SIZE - 1;
59 		atype = _ASCE_TYPE_REGION3;
60 		etype = _REGION3_ENTRY_EMPTY;
61 	} else if (limit < _REGION1_SIZE) {
62 		limit = _REGION1_SIZE - 1;
63 		atype = _ASCE_TYPE_REGION2;
64 		etype = _REGION2_ENTRY_EMPTY;
65 	} else {
66 		limit = -1UL;
67 		atype = _ASCE_TYPE_REGION1;
68 		etype = _REGION1_ENTRY_EMPTY;
69 	}
70 	gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT);
71 	if (!gmap)
72 		goto out;
73 	INIT_LIST_HEAD(&gmap->crst_list);
74 	INIT_LIST_HEAD(&gmap->children);
75 	INIT_LIST_HEAD(&gmap->pt_list);
76 	INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT);
77 	INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT);
78 	INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT);
79 	spin_lock_init(&gmap->guest_table_lock);
80 	spin_lock_init(&gmap->shadow_lock);
81 	refcount_set(&gmap->ref_count, 1);
82 	page = gmap_alloc_crst();
83 	if (!page)
84 		goto out_free;
85 	page->index = 0;
86 	list_add(&page->lru, &gmap->crst_list);
87 	table = page_to_virt(page);
88 	crst_table_init(table, etype);
89 	gmap->table = table;
90 	gmap->asce = atype | _ASCE_TABLE_LENGTH |
91 		_ASCE_USER_BITS | __pa(table);
92 	gmap->asce_end = limit;
93 	return gmap;
94 
95 out_free:
96 	kfree(gmap);
97 out:
98 	return NULL;
99 }
100 
101 /**
102  * gmap_create - create a guest address space
103  * @mm: pointer to the parent mm_struct
104  * @limit: maximum size of the gmap address space
105  *
106  * Returns a guest address space structure.
107  */
108 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
109 {
110 	struct gmap *gmap;
111 	unsigned long gmap_asce;
112 
113 	gmap = gmap_alloc(limit);
114 	if (!gmap)
115 		return NULL;
116 	gmap->mm = mm;
117 	spin_lock(&mm->context.lock);
118 	list_add_rcu(&gmap->list, &mm->context.gmap_list);
119 	if (list_is_singular(&mm->context.gmap_list))
120 		gmap_asce = gmap->asce;
121 	else
122 		gmap_asce = -1UL;
123 	WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
124 	spin_unlock(&mm->context.lock);
125 	return gmap;
126 }
127 EXPORT_SYMBOL_GPL(gmap_create);
128 
129 static void gmap_flush_tlb(struct gmap *gmap)
130 {
131 	if (MACHINE_HAS_IDTE)
132 		__tlb_flush_idte(gmap->asce);
133 	else
134 		__tlb_flush_global();
135 }
136 
137 static void gmap_radix_tree_free(struct radix_tree_root *root)
138 {
139 	struct radix_tree_iter iter;
140 	unsigned long indices[16];
141 	unsigned long index;
142 	void __rcu **slot;
143 	int i, nr;
144 
145 	/* A radix tree is freed by deleting all of its entries */
146 	index = 0;
147 	do {
148 		nr = 0;
149 		radix_tree_for_each_slot(slot, root, &iter, index) {
150 			indices[nr] = iter.index;
151 			if (++nr == 16)
152 				break;
153 		}
154 		for (i = 0; i < nr; i++) {
155 			index = indices[i];
156 			radix_tree_delete(root, index);
157 		}
158 	} while (nr > 0);
159 }
160 
161 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
162 {
163 	struct gmap_rmap *rmap, *rnext, *head;
164 	struct radix_tree_iter iter;
165 	unsigned long indices[16];
166 	unsigned long index;
167 	void __rcu **slot;
168 	int i, nr;
169 
170 	/* A radix tree is freed by deleting all of its entries */
171 	index = 0;
172 	do {
173 		nr = 0;
174 		radix_tree_for_each_slot(slot, root, &iter, index) {
175 			indices[nr] = iter.index;
176 			if (++nr == 16)
177 				break;
178 		}
179 		for (i = 0; i < nr; i++) {
180 			index = indices[i];
181 			head = radix_tree_delete(root, index);
182 			gmap_for_each_rmap_safe(rmap, rnext, head)
183 				kfree(rmap);
184 		}
185 	} while (nr > 0);
186 }
187 
188 /**
189  * gmap_free - free a guest address space
190  * @gmap: pointer to the guest address space structure
191  *
192  * No locks required. There are no references to this gmap anymore.
193  */
194 static void gmap_free(struct gmap *gmap)
195 {
196 	struct page *page, *next;
197 
198 	/* Flush tlb of all gmaps (if not already done for shadows) */
199 	if (!(gmap_is_shadow(gmap) && gmap->removed))
200 		gmap_flush_tlb(gmap);
201 	/* Free all segment & region tables. */
202 	list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
203 		__free_pages(page, CRST_ALLOC_ORDER);
204 	gmap_radix_tree_free(&gmap->guest_to_host);
205 	gmap_radix_tree_free(&gmap->host_to_guest);
206 
207 	/* Free additional data for a shadow gmap */
208 	if (gmap_is_shadow(gmap)) {
209 		struct ptdesc *ptdesc, *n;
210 
211 		/* Free all page tables. */
212 		list_for_each_entry_safe(ptdesc, n, &gmap->pt_list, pt_list)
213 			page_table_free_pgste(ptdesc);
214 		gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
215 		/* Release reference to the parent */
216 		gmap_put(gmap->parent);
217 	}
218 
219 	kfree(gmap);
220 }
221 
222 /**
223  * gmap_get - increase reference counter for guest address space
224  * @gmap: pointer to the guest address space structure
225  *
226  * Returns the gmap pointer
227  */
228 struct gmap *gmap_get(struct gmap *gmap)
229 {
230 	refcount_inc(&gmap->ref_count);
231 	return gmap;
232 }
233 EXPORT_SYMBOL_GPL(gmap_get);
234 
235 /**
236  * gmap_put - decrease reference counter for guest address space
237  * @gmap: pointer to the guest address space structure
238  *
239  * If the reference counter reaches zero the guest address space is freed.
240  */
241 void gmap_put(struct gmap *gmap)
242 {
243 	if (refcount_dec_and_test(&gmap->ref_count))
244 		gmap_free(gmap);
245 }
246 EXPORT_SYMBOL_GPL(gmap_put);
247 
248 /**
249  * gmap_remove - remove a guest address space but do not free it yet
250  * @gmap: pointer to the guest address space structure
251  */
252 void gmap_remove(struct gmap *gmap)
253 {
254 	struct gmap *sg, *next;
255 	unsigned long gmap_asce;
256 
257 	/* Remove all shadow gmaps linked to this gmap */
258 	if (!list_empty(&gmap->children)) {
259 		spin_lock(&gmap->shadow_lock);
260 		list_for_each_entry_safe(sg, next, &gmap->children, list) {
261 			list_del(&sg->list);
262 			gmap_put(sg);
263 		}
264 		spin_unlock(&gmap->shadow_lock);
265 	}
266 	/* Remove gmap from the pre-mm list */
267 	spin_lock(&gmap->mm->context.lock);
268 	list_del_rcu(&gmap->list);
269 	if (list_empty(&gmap->mm->context.gmap_list))
270 		gmap_asce = 0;
271 	else if (list_is_singular(&gmap->mm->context.gmap_list))
272 		gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
273 					     struct gmap, list)->asce;
274 	else
275 		gmap_asce = -1UL;
276 	WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
277 	spin_unlock(&gmap->mm->context.lock);
278 	synchronize_rcu();
279 	/* Put reference */
280 	gmap_put(gmap);
281 }
282 EXPORT_SYMBOL_GPL(gmap_remove);
283 
284 /*
285  * gmap_alloc_table is assumed to be called with mmap_lock held
286  */
287 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
288 			    unsigned long init, unsigned long gaddr)
289 {
290 	struct page *page;
291 	unsigned long *new;
292 
293 	/* since we dont free the gmap table until gmap_free we can unlock */
294 	page = gmap_alloc_crst();
295 	if (!page)
296 		return -ENOMEM;
297 	new = page_to_virt(page);
298 	crst_table_init(new, init);
299 	spin_lock(&gmap->guest_table_lock);
300 	if (*table & _REGION_ENTRY_INVALID) {
301 		list_add(&page->lru, &gmap->crst_list);
302 		*table = __pa(new) | _REGION_ENTRY_LENGTH |
303 			(*table & _REGION_ENTRY_TYPE_MASK);
304 		page->index = gaddr;
305 		page = NULL;
306 	}
307 	spin_unlock(&gmap->guest_table_lock);
308 	if (page)
309 		__free_pages(page, CRST_ALLOC_ORDER);
310 	return 0;
311 }
312 
313 /**
314  * __gmap_segment_gaddr - find virtual address from segment pointer
315  * @entry: pointer to a segment table entry in the guest address space
316  *
317  * Returns the virtual address in the guest address space for the segment
318  */
319 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
320 {
321 	struct page *page;
322 	unsigned long offset;
323 
324 	offset = (unsigned long) entry / sizeof(unsigned long);
325 	offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
326 	page = pmd_pgtable_page((pmd_t *) entry);
327 	return page->index + offset;
328 }
329 
330 /**
331  * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
332  * @gmap: pointer to the guest address space structure
333  * @vmaddr: address in the host process address space
334  *
335  * Returns 1 if a TLB flush is required
336  */
337 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
338 {
339 	unsigned long *entry;
340 	int flush = 0;
341 
342 	BUG_ON(gmap_is_shadow(gmap));
343 	spin_lock(&gmap->guest_table_lock);
344 	entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
345 	if (entry) {
346 		flush = (*entry != _SEGMENT_ENTRY_EMPTY);
347 		*entry = _SEGMENT_ENTRY_EMPTY;
348 	}
349 	spin_unlock(&gmap->guest_table_lock);
350 	return flush;
351 }
352 
353 /**
354  * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
355  * @gmap: pointer to the guest address space structure
356  * @gaddr: address in the guest address space
357  *
358  * Returns 1 if a TLB flush is required
359  */
360 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
361 {
362 	unsigned long vmaddr;
363 
364 	vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
365 						   gaddr >> PMD_SHIFT);
366 	return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
367 }
368 
369 /**
370  * gmap_unmap_segment - unmap segment from the guest address space
371  * @gmap: pointer to the guest address space structure
372  * @to: address in the guest address space
373  * @len: length of the memory area to unmap
374  *
375  * Returns 0 if the unmap succeeded, -EINVAL if not.
376  */
377 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
378 {
379 	unsigned long off;
380 	int flush;
381 
382 	BUG_ON(gmap_is_shadow(gmap));
383 	if ((to | len) & (PMD_SIZE - 1))
384 		return -EINVAL;
385 	if (len == 0 || to + len < to)
386 		return -EINVAL;
387 
388 	flush = 0;
389 	mmap_write_lock(gmap->mm);
390 	for (off = 0; off < len; off += PMD_SIZE)
391 		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
392 	mmap_write_unlock(gmap->mm);
393 	if (flush)
394 		gmap_flush_tlb(gmap);
395 	return 0;
396 }
397 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
398 
399 /**
400  * gmap_map_segment - map a segment to the guest address space
401  * @gmap: pointer to the guest address space structure
402  * @from: source address in the parent address space
403  * @to: target address in the guest address space
404  * @len: length of the memory area to map
405  *
406  * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
407  */
408 int gmap_map_segment(struct gmap *gmap, unsigned long from,
409 		     unsigned long to, unsigned long len)
410 {
411 	unsigned long off;
412 	int flush;
413 
414 	BUG_ON(gmap_is_shadow(gmap));
415 	if ((from | to | len) & (PMD_SIZE - 1))
416 		return -EINVAL;
417 	if (len == 0 || from + len < from || to + len < to ||
418 	    from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
419 		return -EINVAL;
420 
421 	flush = 0;
422 	mmap_write_lock(gmap->mm);
423 	for (off = 0; off < len; off += PMD_SIZE) {
424 		/* Remove old translation */
425 		flush |= __gmap_unmap_by_gaddr(gmap, to + off);
426 		/* Store new translation */
427 		if (radix_tree_insert(&gmap->guest_to_host,
428 				      (to + off) >> PMD_SHIFT,
429 				      (void *) from + off))
430 			break;
431 	}
432 	mmap_write_unlock(gmap->mm);
433 	if (flush)
434 		gmap_flush_tlb(gmap);
435 	if (off >= len)
436 		return 0;
437 	gmap_unmap_segment(gmap, to, len);
438 	return -ENOMEM;
439 }
440 EXPORT_SYMBOL_GPL(gmap_map_segment);
441 
442 /**
443  * __gmap_translate - translate a guest address to a user space address
444  * @gmap: pointer to guest mapping meta data structure
445  * @gaddr: guest address
446  *
447  * Returns user space address which corresponds to the guest address or
448  * -EFAULT if no such mapping exists.
449  * This function does not establish potentially missing page table entries.
450  * The mmap_lock of the mm that belongs to the address space must be held
451  * when this function gets called.
452  *
453  * Note: Can also be called for shadow gmaps.
454  */
455 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
456 {
457 	unsigned long vmaddr;
458 
459 	vmaddr = (unsigned long)
460 		radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
461 	/* Note: guest_to_host is empty for a shadow gmap */
462 	return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
463 }
464 EXPORT_SYMBOL_GPL(__gmap_translate);
465 
466 /**
467  * gmap_translate - translate a guest address to a user space address
468  * @gmap: pointer to guest mapping meta data structure
469  * @gaddr: guest address
470  *
471  * Returns user space address which corresponds to the guest address or
472  * -EFAULT if no such mapping exists.
473  * This function does not establish potentially missing page table entries.
474  */
475 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
476 {
477 	unsigned long rc;
478 
479 	mmap_read_lock(gmap->mm);
480 	rc = __gmap_translate(gmap, gaddr);
481 	mmap_read_unlock(gmap->mm);
482 	return rc;
483 }
484 EXPORT_SYMBOL_GPL(gmap_translate);
485 
486 /**
487  * gmap_unlink - disconnect a page table from the gmap shadow tables
488  * @mm: pointer to the parent mm_struct
489  * @table: pointer to the host page table
490  * @vmaddr: vm address associated with the host page table
491  */
492 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
493 		 unsigned long vmaddr)
494 {
495 	struct gmap *gmap;
496 	int flush;
497 
498 	rcu_read_lock();
499 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
500 		flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
501 		if (flush)
502 			gmap_flush_tlb(gmap);
503 	}
504 	rcu_read_unlock();
505 }
506 
507 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
508 			   unsigned long gaddr);
509 
510 /**
511  * __gmap_link - set up shadow page tables to connect a host to a guest address
512  * @gmap: pointer to guest mapping meta data structure
513  * @gaddr: guest address
514  * @vmaddr: vm address
515  *
516  * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
517  * if the vm address is already mapped to a different guest segment.
518  * The mmap_lock of the mm that belongs to the address space must be held
519  * when this function gets called.
520  */
521 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
522 {
523 	struct mm_struct *mm;
524 	unsigned long *table;
525 	spinlock_t *ptl;
526 	pgd_t *pgd;
527 	p4d_t *p4d;
528 	pud_t *pud;
529 	pmd_t *pmd;
530 	u64 unprot;
531 	int rc;
532 
533 	BUG_ON(gmap_is_shadow(gmap));
534 	/* Create higher level tables in the gmap page table */
535 	table = gmap->table;
536 	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
537 		table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
538 		if ((*table & _REGION_ENTRY_INVALID) &&
539 		    gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
540 				     gaddr & _REGION1_MASK))
541 			return -ENOMEM;
542 		table = __va(*table & _REGION_ENTRY_ORIGIN);
543 	}
544 	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
545 		table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
546 		if ((*table & _REGION_ENTRY_INVALID) &&
547 		    gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
548 				     gaddr & _REGION2_MASK))
549 			return -ENOMEM;
550 		table = __va(*table & _REGION_ENTRY_ORIGIN);
551 	}
552 	if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
553 		table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
554 		if ((*table & _REGION_ENTRY_INVALID) &&
555 		    gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
556 				     gaddr & _REGION3_MASK))
557 			return -ENOMEM;
558 		table = __va(*table & _REGION_ENTRY_ORIGIN);
559 	}
560 	table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
561 	/* Walk the parent mm page table */
562 	mm = gmap->mm;
563 	pgd = pgd_offset(mm, vmaddr);
564 	VM_BUG_ON(pgd_none(*pgd));
565 	p4d = p4d_offset(pgd, vmaddr);
566 	VM_BUG_ON(p4d_none(*p4d));
567 	pud = pud_offset(p4d, vmaddr);
568 	VM_BUG_ON(pud_none(*pud));
569 	/* large puds cannot yet be handled */
570 	if (pud_leaf(*pud))
571 		return -EFAULT;
572 	pmd = pmd_offset(pud, vmaddr);
573 	VM_BUG_ON(pmd_none(*pmd));
574 	/* Are we allowed to use huge pages? */
575 	if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
576 		return -EFAULT;
577 	/* Link gmap segment table entry location to page table. */
578 	rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
579 	if (rc)
580 		return rc;
581 	ptl = pmd_lock(mm, pmd);
582 	spin_lock(&gmap->guest_table_lock);
583 	if (*table == _SEGMENT_ENTRY_EMPTY) {
584 		rc = radix_tree_insert(&gmap->host_to_guest,
585 				       vmaddr >> PMD_SHIFT, table);
586 		if (!rc) {
587 			if (pmd_leaf(*pmd)) {
588 				*table = (pmd_val(*pmd) &
589 					  _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
590 					| _SEGMENT_ENTRY_GMAP_UC;
591 			} else
592 				*table = pmd_val(*pmd) &
593 					_SEGMENT_ENTRY_HARDWARE_BITS;
594 		}
595 	} else if (*table & _SEGMENT_ENTRY_PROTECT &&
596 		   !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
597 		unprot = (u64)*table;
598 		unprot &= ~_SEGMENT_ENTRY_PROTECT;
599 		unprot |= _SEGMENT_ENTRY_GMAP_UC;
600 		gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
601 	}
602 	spin_unlock(&gmap->guest_table_lock);
603 	spin_unlock(ptl);
604 	radix_tree_preload_end();
605 	return rc;
606 }
607 
608 /**
609  * fixup_user_fault_nowait - manually resolve a user page fault without waiting
610  * @mm:		mm_struct of target mm
611  * @address:	user address
612  * @fault_flags:flags to pass down to handle_mm_fault()
613  * @unlocked:	did we unlock the mmap_lock while retrying
614  *
615  * This function behaves similarly to fixup_user_fault(), but it guarantees
616  * that the fault will be resolved without waiting. The function might drop
617  * and re-acquire the mm lock, in which case @unlocked will be set to true.
618  *
619  * The guarantee is that the fault is handled without waiting, but the
620  * function itself might sleep, due to the lock.
621  *
622  * Context: Needs to be called with mm->mmap_lock held in read mode, and will
623  * return with the lock held in read mode; @unlocked will indicate whether
624  * the lock has been dropped and re-acquired. This is the same behaviour as
625  * fixup_user_fault().
626  *
627  * Return: 0 on success, -EAGAIN if the fault cannot be resolved without
628  * waiting, -EFAULT if the fault cannot be resolved, -ENOMEM if out of
629  * memory.
630  */
631 static int fixup_user_fault_nowait(struct mm_struct *mm, unsigned long address,
632 				   unsigned int fault_flags, bool *unlocked)
633 {
634 	struct vm_area_struct *vma;
635 	unsigned int test_flags;
636 	vm_fault_t fault;
637 	int rc;
638 
639 	fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
640 	test_flags = fault_flags & FAULT_FLAG_WRITE ? VM_WRITE : VM_READ;
641 
642 	vma = find_vma(mm, address);
643 	if (unlikely(!vma || address < vma->vm_start))
644 		return -EFAULT;
645 	if (unlikely(!(vma->vm_flags & test_flags)))
646 		return -EFAULT;
647 
648 	fault = handle_mm_fault(vma, address, fault_flags, NULL);
649 	/* the mm lock has been dropped, take it again */
650 	if (fault & VM_FAULT_COMPLETED) {
651 		*unlocked = true;
652 		mmap_read_lock(mm);
653 		return 0;
654 	}
655 	/* the mm lock has not been dropped */
656 	if (fault & VM_FAULT_ERROR) {
657 		rc = vm_fault_to_errno(fault, 0);
658 		BUG_ON(!rc);
659 		return rc;
660 	}
661 	/* the mm lock has not been dropped because of FAULT_FLAG_RETRY_NOWAIT */
662 	if (fault & VM_FAULT_RETRY)
663 		return -EAGAIN;
664 	/* nothing needed to be done and the mm lock has not been dropped */
665 	return 0;
666 }
667 
668 /**
669  * __gmap_fault - resolve a fault on a guest address
670  * @gmap: pointer to guest mapping meta data structure
671  * @gaddr: guest address
672  * @fault_flags: flags to pass down to handle_mm_fault()
673  *
674  * Context: Needs to be called with mm->mmap_lock held in read mode. Might
675  * drop and re-acquire the lock. Will always return with the lock held.
676  */
677 static int __gmap_fault(struct gmap *gmap, unsigned long gaddr, unsigned int fault_flags)
678 {
679 	unsigned long vmaddr;
680 	bool unlocked;
681 	int rc = 0;
682 
683 retry:
684 	unlocked = false;
685 
686 	vmaddr = __gmap_translate(gmap, gaddr);
687 	if (IS_ERR_VALUE(vmaddr))
688 		return vmaddr;
689 
690 	if (fault_flags & FAULT_FLAG_RETRY_NOWAIT)
691 		rc = fixup_user_fault_nowait(gmap->mm, vmaddr, fault_flags, &unlocked);
692 	else
693 		rc = fixup_user_fault(gmap->mm, vmaddr, fault_flags, &unlocked);
694 	if (rc)
695 		return rc;
696 	/*
697 	 * In the case that fixup_user_fault unlocked the mmap_lock during
698 	 * fault-in, redo __gmap_translate() to avoid racing with a
699 	 * map/unmap_segment.
700 	 * In particular, __gmap_translate(), fixup_user_fault{,_nowait}(),
701 	 * and __gmap_link() must all be called atomically in one go; if the
702 	 * lock had been dropped in between, a retry is needed.
703 	 */
704 	if (unlocked)
705 		goto retry;
706 
707 	return __gmap_link(gmap, gaddr, vmaddr);
708 }
709 
710 /**
711  * gmap_fault - resolve a fault on a guest address
712  * @gmap: pointer to guest mapping meta data structure
713  * @gaddr: guest address
714  * @fault_flags: flags to pass down to handle_mm_fault()
715  *
716  * Returns 0 on success, -ENOMEM for out of memory conditions, -EFAULT if the
717  * vm address is already mapped to a different guest segment, and -EAGAIN if
718  * FAULT_FLAG_RETRY_NOWAIT was specified and the fault could not be processed
719  * immediately.
720  */
721 int gmap_fault(struct gmap *gmap, unsigned long gaddr, unsigned int fault_flags)
722 {
723 	int rc;
724 
725 	mmap_read_lock(gmap->mm);
726 	rc = __gmap_fault(gmap, gaddr, fault_flags);
727 	mmap_read_unlock(gmap->mm);
728 	return rc;
729 }
730 EXPORT_SYMBOL_GPL(gmap_fault);
731 
732 /*
733  * this function is assumed to be called with mmap_lock held
734  */
735 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
736 {
737 	struct vm_area_struct *vma;
738 	unsigned long vmaddr;
739 	spinlock_t *ptl;
740 	pte_t *ptep;
741 
742 	/* Find the vm address for the guest address */
743 	vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
744 						   gaddr >> PMD_SHIFT);
745 	if (vmaddr) {
746 		vmaddr |= gaddr & ~PMD_MASK;
747 
748 		vma = vma_lookup(gmap->mm, vmaddr);
749 		if (!vma || is_vm_hugetlb_page(vma))
750 			return;
751 
752 		/* Get pointer to the page table entry */
753 		ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
754 		if (likely(ptep)) {
755 			ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
756 			pte_unmap_unlock(ptep, ptl);
757 		}
758 	}
759 }
760 EXPORT_SYMBOL_GPL(__gmap_zap);
761 
762 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
763 {
764 	unsigned long gaddr, vmaddr, size;
765 	struct vm_area_struct *vma;
766 
767 	mmap_read_lock(gmap->mm);
768 	for (gaddr = from; gaddr < to;
769 	     gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
770 		/* Find the vm address for the guest address */
771 		vmaddr = (unsigned long)
772 			radix_tree_lookup(&gmap->guest_to_host,
773 					  gaddr >> PMD_SHIFT);
774 		if (!vmaddr)
775 			continue;
776 		vmaddr |= gaddr & ~PMD_MASK;
777 		/* Find vma in the parent mm */
778 		vma = find_vma(gmap->mm, vmaddr);
779 		if (!vma)
780 			continue;
781 		/*
782 		 * We do not discard pages that are backed by
783 		 * hugetlbfs, so we don't have to refault them.
784 		 */
785 		if (is_vm_hugetlb_page(vma))
786 			continue;
787 		size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
788 		zap_page_range_single(vma, vmaddr, size, NULL);
789 	}
790 	mmap_read_unlock(gmap->mm);
791 }
792 EXPORT_SYMBOL_GPL(gmap_discard);
793 
794 static LIST_HEAD(gmap_notifier_list);
795 static DEFINE_SPINLOCK(gmap_notifier_lock);
796 
797 /**
798  * gmap_register_pte_notifier - register a pte invalidation callback
799  * @nb: pointer to the gmap notifier block
800  */
801 void gmap_register_pte_notifier(struct gmap_notifier *nb)
802 {
803 	spin_lock(&gmap_notifier_lock);
804 	list_add_rcu(&nb->list, &gmap_notifier_list);
805 	spin_unlock(&gmap_notifier_lock);
806 }
807 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
808 
809 /**
810  * gmap_unregister_pte_notifier - remove a pte invalidation callback
811  * @nb: pointer to the gmap notifier block
812  */
813 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
814 {
815 	spin_lock(&gmap_notifier_lock);
816 	list_del_rcu(&nb->list);
817 	spin_unlock(&gmap_notifier_lock);
818 	synchronize_rcu();
819 }
820 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
821 
822 /**
823  * gmap_call_notifier - call all registered invalidation callbacks
824  * @gmap: pointer to guest mapping meta data structure
825  * @start: start virtual address in the guest address space
826  * @end: end virtual address in the guest address space
827  */
828 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
829 			       unsigned long end)
830 {
831 	struct gmap_notifier *nb;
832 
833 	list_for_each_entry(nb, &gmap_notifier_list, list)
834 		nb->notifier_call(gmap, start, end);
835 }
836 
837 /**
838  * gmap_table_walk - walk the gmap page tables
839  * @gmap: pointer to guest mapping meta data structure
840  * @gaddr: virtual address in the guest address space
841  * @level: page table level to stop at
842  *
843  * Returns a table entry pointer for the given guest address and @level
844  * @level=0 : returns a pointer to a page table table entry (or NULL)
845  * @level=1 : returns a pointer to a segment table entry (or NULL)
846  * @level=2 : returns a pointer to a region-3 table entry (or NULL)
847  * @level=3 : returns a pointer to a region-2 table entry (or NULL)
848  * @level=4 : returns a pointer to a region-1 table entry (or NULL)
849  *
850  * Returns NULL if the gmap page tables could not be walked to the
851  * requested level.
852  *
853  * Note: Can also be called for shadow gmaps.
854  */
855 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
856 					     unsigned long gaddr, int level)
857 {
858 	const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
859 	unsigned long *table = gmap->table;
860 
861 	if (gmap_is_shadow(gmap) && gmap->removed)
862 		return NULL;
863 
864 	if (WARN_ON_ONCE(level > (asce_type >> 2) + 1))
865 		return NULL;
866 
867 	if (asce_type != _ASCE_TYPE_REGION1 &&
868 	    gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
869 		return NULL;
870 
871 	switch (asce_type) {
872 	case _ASCE_TYPE_REGION1:
873 		table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
874 		if (level == 4)
875 			break;
876 		if (*table & _REGION_ENTRY_INVALID)
877 			return NULL;
878 		table = __va(*table & _REGION_ENTRY_ORIGIN);
879 		fallthrough;
880 	case _ASCE_TYPE_REGION2:
881 		table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
882 		if (level == 3)
883 			break;
884 		if (*table & _REGION_ENTRY_INVALID)
885 			return NULL;
886 		table = __va(*table & _REGION_ENTRY_ORIGIN);
887 		fallthrough;
888 	case _ASCE_TYPE_REGION3:
889 		table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
890 		if (level == 2)
891 			break;
892 		if (*table & _REGION_ENTRY_INVALID)
893 			return NULL;
894 		table = __va(*table & _REGION_ENTRY_ORIGIN);
895 		fallthrough;
896 	case _ASCE_TYPE_SEGMENT:
897 		table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
898 		if (level == 1)
899 			break;
900 		if (*table & _REGION_ENTRY_INVALID)
901 			return NULL;
902 		table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
903 		table += (gaddr & _PAGE_INDEX) >> PAGE_SHIFT;
904 	}
905 	return table;
906 }
907 
908 /**
909  * gmap_pte_op_walk - walk the gmap page table, get the page table lock
910  *		      and return the pte pointer
911  * @gmap: pointer to guest mapping meta data structure
912  * @gaddr: virtual address in the guest address space
913  * @ptl: pointer to the spinlock pointer
914  *
915  * Returns a pointer to the locked pte for a guest address, or NULL
916  */
917 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
918 			       spinlock_t **ptl)
919 {
920 	unsigned long *table;
921 
922 	BUG_ON(gmap_is_shadow(gmap));
923 	/* Walk the gmap page table, lock and get pte pointer */
924 	table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
925 	if (!table || *table & _SEGMENT_ENTRY_INVALID)
926 		return NULL;
927 	return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
928 }
929 
930 /**
931  * gmap_pte_op_fixup - force a page in and connect the gmap page table
932  * @gmap: pointer to guest mapping meta data structure
933  * @gaddr: virtual address in the guest address space
934  * @vmaddr: address in the host process address space
935  * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
936  *
937  * Returns 0 if the caller can retry __gmap_translate (might fail again),
938  * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
939  * up or connecting the gmap page table.
940  */
941 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
942 			     unsigned long vmaddr, int prot)
943 {
944 	struct mm_struct *mm = gmap->mm;
945 	unsigned int fault_flags;
946 	bool unlocked = false;
947 
948 	BUG_ON(gmap_is_shadow(gmap));
949 	fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
950 	if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked))
951 		return -EFAULT;
952 	if (unlocked)
953 		/* lost mmap_lock, caller has to retry __gmap_translate */
954 		return 0;
955 	/* Connect the page tables */
956 	return __gmap_link(gmap, gaddr, vmaddr);
957 }
958 
959 /**
960  * gmap_pte_op_end - release the page table lock
961  * @ptep: pointer to the locked pte
962  * @ptl: pointer to the page table spinlock
963  */
964 static void gmap_pte_op_end(pte_t *ptep, spinlock_t *ptl)
965 {
966 	pte_unmap_unlock(ptep, ptl);
967 }
968 
969 /**
970  * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
971  *		      and return the pmd pointer
972  * @gmap: pointer to guest mapping meta data structure
973  * @gaddr: virtual address in the guest address space
974  *
975  * Returns a pointer to the pmd for a guest address, or NULL
976  */
977 static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
978 {
979 	pmd_t *pmdp;
980 
981 	BUG_ON(gmap_is_shadow(gmap));
982 	pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
983 	if (!pmdp)
984 		return NULL;
985 
986 	/* without huge pages, there is no need to take the table lock */
987 	if (!gmap->mm->context.allow_gmap_hpage_1m)
988 		return pmd_none(*pmdp) ? NULL : pmdp;
989 
990 	spin_lock(&gmap->guest_table_lock);
991 	if (pmd_none(*pmdp)) {
992 		spin_unlock(&gmap->guest_table_lock);
993 		return NULL;
994 	}
995 
996 	/* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
997 	if (!pmd_leaf(*pmdp))
998 		spin_unlock(&gmap->guest_table_lock);
999 	return pmdp;
1000 }
1001 
1002 /**
1003  * gmap_pmd_op_end - release the guest_table_lock if needed
1004  * @gmap: pointer to the guest mapping meta data structure
1005  * @pmdp: pointer to the pmd
1006  */
1007 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
1008 {
1009 	if (pmd_leaf(*pmdp))
1010 		spin_unlock(&gmap->guest_table_lock);
1011 }
1012 
1013 /*
1014  * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
1015  * @pmdp: pointer to the pmd to be protected
1016  * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1017  * @bits: notification bits to set
1018  *
1019  * Returns:
1020  * 0 if successfully protected
1021  * -EAGAIN if a fixup is needed
1022  * -EINVAL if unsupported notifier bits have been specified
1023  *
1024  * Expected to be called with sg->mm->mmap_lock in read and
1025  * guest_table_lock held.
1026  */
1027 static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
1028 			    pmd_t *pmdp, int prot, unsigned long bits)
1029 {
1030 	int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
1031 	int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
1032 	pmd_t new = *pmdp;
1033 
1034 	/* Fixup needed */
1035 	if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
1036 		return -EAGAIN;
1037 
1038 	if (prot == PROT_NONE && !pmd_i) {
1039 		new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
1040 		gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
1041 	}
1042 
1043 	if (prot == PROT_READ && !pmd_p) {
1044 		new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
1045 		new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_PROTECT));
1046 		gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
1047 	}
1048 
1049 	if (bits & GMAP_NOTIFY_MPROT)
1050 		set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
1051 
1052 	/* Shadow GMAP protection needs split PMDs */
1053 	if (bits & GMAP_NOTIFY_SHADOW)
1054 		return -EINVAL;
1055 
1056 	return 0;
1057 }
1058 
1059 /*
1060  * gmap_protect_pte - remove access rights to memory and set pgste bits
1061  * @gmap: pointer to guest mapping meta data structure
1062  * @gaddr: virtual address in the guest address space
1063  * @pmdp: pointer to the pmd associated with the pte
1064  * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1065  * @bits: notification bits to set
1066  *
1067  * Returns 0 if successfully protected, -ENOMEM if out of memory and
1068  * -EAGAIN if a fixup is needed.
1069  *
1070  * Expected to be called with sg->mm->mmap_lock in read
1071  */
1072 static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
1073 			    pmd_t *pmdp, int prot, unsigned long bits)
1074 {
1075 	int rc;
1076 	pte_t *ptep;
1077 	spinlock_t *ptl;
1078 	unsigned long pbits = 0;
1079 
1080 	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1081 		return -EAGAIN;
1082 
1083 	ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
1084 	if (!ptep)
1085 		return -ENOMEM;
1086 
1087 	pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
1088 	pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
1089 	/* Protect and unlock. */
1090 	rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
1091 	gmap_pte_op_end(ptep, ptl);
1092 	return rc;
1093 }
1094 
1095 /*
1096  * gmap_protect_range - remove access rights to memory and set pgste bits
1097  * @gmap: pointer to guest mapping meta data structure
1098  * @gaddr: virtual address in the guest address space
1099  * @len: size of area
1100  * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1101  * @bits: pgste notification bits to set
1102  *
1103  * Returns 0 if successfully protected, -ENOMEM if out of memory and
1104  * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
1105  *
1106  * Called with sg->mm->mmap_lock in read.
1107  */
1108 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
1109 			      unsigned long len, int prot, unsigned long bits)
1110 {
1111 	unsigned long vmaddr, dist;
1112 	pmd_t *pmdp;
1113 	int rc;
1114 
1115 	BUG_ON(gmap_is_shadow(gmap));
1116 	while (len) {
1117 		rc = -EAGAIN;
1118 		pmdp = gmap_pmd_op_walk(gmap, gaddr);
1119 		if (pmdp) {
1120 			if (!pmd_leaf(*pmdp)) {
1121 				rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
1122 						      bits);
1123 				if (!rc) {
1124 					len -= PAGE_SIZE;
1125 					gaddr += PAGE_SIZE;
1126 				}
1127 			} else {
1128 				rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
1129 						      bits);
1130 				if (!rc) {
1131 					dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
1132 					len = len < dist ? 0 : len - dist;
1133 					gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
1134 				}
1135 			}
1136 			gmap_pmd_op_end(gmap, pmdp);
1137 		}
1138 		if (rc) {
1139 			if (rc == -EINVAL)
1140 				return rc;
1141 
1142 			/* -EAGAIN, fixup of userspace mm and gmap */
1143 			vmaddr = __gmap_translate(gmap, gaddr);
1144 			if (IS_ERR_VALUE(vmaddr))
1145 				return vmaddr;
1146 			rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
1147 			if (rc)
1148 				return rc;
1149 		}
1150 	}
1151 	return 0;
1152 }
1153 
1154 /**
1155  * gmap_mprotect_notify - change access rights for a range of ptes and
1156  *                        call the notifier if any pte changes again
1157  * @gmap: pointer to guest mapping meta data structure
1158  * @gaddr: virtual address in the guest address space
1159  * @len: size of area
1160  * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1161  *
1162  * Returns 0 if for each page in the given range a gmap mapping exists,
1163  * the new access rights could be set and the notifier could be armed.
1164  * If the gmap mapping is missing for one or more pages -EFAULT is
1165  * returned. If no memory could be allocated -ENOMEM is returned.
1166  * This function establishes missing page table entries.
1167  */
1168 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
1169 			 unsigned long len, int prot)
1170 {
1171 	int rc;
1172 
1173 	if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
1174 		return -EINVAL;
1175 	if (!MACHINE_HAS_ESOP && prot == PROT_READ)
1176 		return -EINVAL;
1177 	mmap_read_lock(gmap->mm);
1178 	rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
1179 	mmap_read_unlock(gmap->mm);
1180 	return rc;
1181 }
1182 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
1183 
1184 /**
1185  * gmap_read_table - get an unsigned long value from a guest page table using
1186  *                   absolute addressing, without marking the page referenced.
1187  * @gmap: pointer to guest mapping meta data structure
1188  * @gaddr: virtual address in the guest address space
1189  * @val: pointer to the unsigned long value to return
1190  *
1191  * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
1192  * if reading using the virtual address failed. -EINVAL if called on a gmap
1193  * shadow.
1194  *
1195  * Called with gmap->mm->mmap_lock in read.
1196  */
1197 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
1198 {
1199 	unsigned long address, vmaddr;
1200 	spinlock_t *ptl;
1201 	pte_t *ptep, pte;
1202 	int rc;
1203 
1204 	if (gmap_is_shadow(gmap))
1205 		return -EINVAL;
1206 
1207 	while (1) {
1208 		rc = -EAGAIN;
1209 		ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
1210 		if (ptep) {
1211 			pte = *ptep;
1212 			if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
1213 				address = pte_val(pte) & PAGE_MASK;
1214 				address += gaddr & ~PAGE_MASK;
1215 				*val = *(unsigned long *)__va(address);
1216 				set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG)));
1217 				/* Do *NOT* clear the _PAGE_INVALID bit! */
1218 				rc = 0;
1219 			}
1220 			gmap_pte_op_end(ptep, ptl);
1221 		}
1222 		if (!rc)
1223 			break;
1224 		vmaddr = __gmap_translate(gmap, gaddr);
1225 		if (IS_ERR_VALUE(vmaddr)) {
1226 			rc = vmaddr;
1227 			break;
1228 		}
1229 		rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
1230 		if (rc)
1231 			break;
1232 	}
1233 	return rc;
1234 }
1235 EXPORT_SYMBOL_GPL(gmap_read_table);
1236 
1237 /**
1238  * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1239  * @sg: pointer to the shadow guest address space structure
1240  * @vmaddr: vm address associated with the rmap
1241  * @rmap: pointer to the rmap structure
1242  *
1243  * Called with the sg->guest_table_lock
1244  */
1245 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1246 				    struct gmap_rmap *rmap)
1247 {
1248 	struct gmap_rmap *temp;
1249 	void __rcu **slot;
1250 
1251 	BUG_ON(!gmap_is_shadow(sg));
1252 	slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1253 	if (slot) {
1254 		rmap->next = radix_tree_deref_slot_protected(slot,
1255 							&sg->guest_table_lock);
1256 		for (temp = rmap->next; temp; temp = temp->next) {
1257 			if (temp->raddr == rmap->raddr) {
1258 				kfree(rmap);
1259 				return;
1260 			}
1261 		}
1262 		radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
1263 	} else {
1264 		rmap->next = NULL;
1265 		radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1266 				  rmap);
1267 	}
1268 }
1269 
1270 /**
1271  * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
1272  * @sg: pointer to the shadow guest address space structure
1273  * @raddr: rmap address in the shadow gmap
1274  * @paddr: address in the parent guest address space
1275  * @len: length of the memory area to protect
1276  *
1277  * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1278  * if out of memory and -EFAULT if paddr is invalid.
1279  */
1280 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1281 			     unsigned long paddr, unsigned long len)
1282 {
1283 	struct gmap *parent;
1284 	struct gmap_rmap *rmap;
1285 	unsigned long vmaddr;
1286 	spinlock_t *ptl;
1287 	pte_t *ptep;
1288 	int rc;
1289 
1290 	BUG_ON(!gmap_is_shadow(sg));
1291 	parent = sg->parent;
1292 	while (len) {
1293 		vmaddr = __gmap_translate(parent, paddr);
1294 		if (IS_ERR_VALUE(vmaddr))
1295 			return vmaddr;
1296 		rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
1297 		if (!rmap)
1298 			return -ENOMEM;
1299 		rmap->raddr = raddr;
1300 		rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
1301 		if (rc) {
1302 			kfree(rmap);
1303 			return rc;
1304 		}
1305 		rc = -EAGAIN;
1306 		ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1307 		if (ptep) {
1308 			spin_lock(&sg->guest_table_lock);
1309 			rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
1310 					     PGSTE_VSIE_BIT);
1311 			if (!rc)
1312 				gmap_insert_rmap(sg, vmaddr, rmap);
1313 			spin_unlock(&sg->guest_table_lock);
1314 			gmap_pte_op_end(ptep, ptl);
1315 		}
1316 		radix_tree_preload_end();
1317 		if (rc) {
1318 			kfree(rmap);
1319 			rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
1320 			if (rc)
1321 				return rc;
1322 			continue;
1323 		}
1324 		paddr += PAGE_SIZE;
1325 		len -= PAGE_SIZE;
1326 	}
1327 	return 0;
1328 }
1329 
1330 #define _SHADOW_RMAP_MASK	0x7
1331 #define _SHADOW_RMAP_REGION1	0x5
1332 #define _SHADOW_RMAP_REGION2	0x4
1333 #define _SHADOW_RMAP_REGION3	0x3
1334 #define _SHADOW_RMAP_SEGMENT	0x2
1335 #define _SHADOW_RMAP_PGTABLE	0x1
1336 
1337 /**
1338  * gmap_idte_one - invalidate a single region or segment table entry
1339  * @asce: region or segment table *origin* + table-type bits
1340  * @vaddr: virtual address to identify the table entry to flush
1341  *
1342  * The invalid bit of a single region or segment table entry is set
1343  * and the associated TLB entries depending on the entry are flushed.
1344  * The table-type of the @asce identifies the portion of the @vaddr
1345  * that is used as the invalidation index.
1346  */
1347 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1348 {
1349 	asm volatile(
1350 		"	idte	%0,0,%1"
1351 		: : "a" (asce), "a" (vaddr) : "cc", "memory");
1352 }
1353 
1354 /**
1355  * gmap_unshadow_page - remove a page from a shadow page table
1356  * @sg: pointer to the shadow guest address space structure
1357  * @raddr: rmap address in the shadow guest address space
1358  *
1359  * Called with the sg->guest_table_lock
1360  */
1361 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1362 {
1363 	unsigned long *table;
1364 
1365 	BUG_ON(!gmap_is_shadow(sg));
1366 	table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1367 	if (!table || *table & _PAGE_INVALID)
1368 		return;
1369 	gmap_call_notifier(sg, raddr, raddr + PAGE_SIZE - 1);
1370 	ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1371 }
1372 
1373 /**
1374  * __gmap_unshadow_pgt - remove all entries from a shadow page table
1375  * @sg: pointer to the shadow guest address space structure
1376  * @raddr: rmap address in the shadow guest address space
1377  * @pgt: pointer to the start of a shadow page table
1378  *
1379  * Called with the sg->guest_table_lock
1380  */
1381 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1382 				unsigned long *pgt)
1383 {
1384 	int i;
1385 
1386 	BUG_ON(!gmap_is_shadow(sg));
1387 	for (i = 0; i < _PAGE_ENTRIES; i++, raddr += PAGE_SIZE)
1388 		pgt[i] = _PAGE_INVALID;
1389 }
1390 
1391 /**
1392  * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1393  * @sg: pointer to the shadow guest address space structure
1394  * @raddr: address in the shadow guest address space
1395  *
1396  * Called with the sg->guest_table_lock
1397  */
1398 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1399 {
1400 	unsigned long *ste;
1401 	phys_addr_t sto, pgt;
1402 	struct ptdesc *ptdesc;
1403 
1404 	BUG_ON(!gmap_is_shadow(sg));
1405 	ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1406 	if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1407 		return;
1408 	gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1409 	sto = __pa(ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
1410 	gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1411 	pgt = *ste & _SEGMENT_ENTRY_ORIGIN;
1412 	*ste = _SEGMENT_ENTRY_EMPTY;
1413 	__gmap_unshadow_pgt(sg, raddr, __va(pgt));
1414 	/* Free page table */
1415 	ptdesc = page_ptdesc(phys_to_page(pgt));
1416 	list_del(&ptdesc->pt_list);
1417 	page_table_free_pgste(ptdesc);
1418 }
1419 
1420 /**
1421  * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1422  * @sg: pointer to the shadow guest address space structure
1423  * @raddr: rmap address in the shadow guest address space
1424  * @sgt: pointer to the start of a shadow segment table
1425  *
1426  * Called with the sg->guest_table_lock
1427  */
1428 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1429 				unsigned long *sgt)
1430 {
1431 	struct ptdesc *ptdesc;
1432 	phys_addr_t pgt;
1433 	int i;
1434 
1435 	BUG_ON(!gmap_is_shadow(sg));
1436 	for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
1437 		if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1438 			continue;
1439 		pgt = sgt[i] & _REGION_ENTRY_ORIGIN;
1440 		sgt[i] = _SEGMENT_ENTRY_EMPTY;
1441 		__gmap_unshadow_pgt(sg, raddr, __va(pgt));
1442 		/* Free page table */
1443 		ptdesc = page_ptdesc(phys_to_page(pgt));
1444 		list_del(&ptdesc->pt_list);
1445 		page_table_free_pgste(ptdesc);
1446 	}
1447 }
1448 
1449 /**
1450  * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1451  * @sg: pointer to the shadow guest address space structure
1452  * @raddr: rmap address in the shadow guest address space
1453  *
1454  * Called with the shadow->guest_table_lock
1455  */
1456 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1457 {
1458 	unsigned long r3o, *r3e;
1459 	phys_addr_t sgt;
1460 	struct page *page;
1461 
1462 	BUG_ON(!gmap_is_shadow(sg));
1463 	r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1464 	if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1465 		return;
1466 	gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1467 	r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
1468 	gmap_idte_one(__pa(r3o) | _ASCE_TYPE_REGION3, raddr);
1469 	sgt = *r3e & _REGION_ENTRY_ORIGIN;
1470 	*r3e = _REGION3_ENTRY_EMPTY;
1471 	__gmap_unshadow_sgt(sg, raddr, __va(sgt));
1472 	/* Free segment table */
1473 	page = phys_to_page(sgt);
1474 	list_del(&page->lru);
1475 	__free_pages(page, CRST_ALLOC_ORDER);
1476 }
1477 
1478 /**
1479  * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1480  * @sg: pointer to the shadow guest address space structure
1481  * @raddr: address in the shadow guest address space
1482  * @r3t: pointer to the start of a shadow region-3 table
1483  *
1484  * Called with the sg->guest_table_lock
1485  */
1486 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1487 				unsigned long *r3t)
1488 {
1489 	struct page *page;
1490 	phys_addr_t sgt;
1491 	int i;
1492 
1493 	BUG_ON(!gmap_is_shadow(sg));
1494 	for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
1495 		if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1496 			continue;
1497 		sgt = r3t[i] & _REGION_ENTRY_ORIGIN;
1498 		r3t[i] = _REGION3_ENTRY_EMPTY;
1499 		__gmap_unshadow_sgt(sg, raddr, __va(sgt));
1500 		/* Free segment table */
1501 		page = phys_to_page(sgt);
1502 		list_del(&page->lru);
1503 		__free_pages(page, CRST_ALLOC_ORDER);
1504 	}
1505 }
1506 
1507 /**
1508  * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1509  * @sg: pointer to the shadow guest address space structure
1510  * @raddr: rmap address in the shadow guest address space
1511  *
1512  * Called with the sg->guest_table_lock
1513  */
1514 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1515 {
1516 	unsigned long r2o, *r2e;
1517 	phys_addr_t r3t;
1518 	struct page *page;
1519 
1520 	BUG_ON(!gmap_is_shadow(sg));
1521 	r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1522 	if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1523 		return;
1524 	gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1525 	r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
1526 	gmap_idte_one(__pa(r2o) | _ASCE_TYPE_REGION2, raddr);
1527 	r3t = *r2e & _REGION_ENTRY_ORIGIN;
1528 	*r2e = _REGION2_ENTRY_EMPTY;
1529 	__gmap_unshadow_r3t(sg, raddr, __va(r3t));
1530 	/* Free region 3 table */
1531 	page = phys_to_page(r3t);
1532 	list_del(&page->lru);
1533 	__free_pages(page, CRST_ALLOC_ORDER);
1534 }
1535 
1536 /**
1537  * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1538  * @sg: pointer to the shadow guest address space structure
1539  * @raddr: rmap address in the shadow guest address space
1540  * @r2t: pointer to the start of a shadow region-2 table
1541  *
1542  * Called with the sg->guest_table_lock
1543  */
1544 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1545 				unsigned long *r2t)
1546 {
1547 	phys_addr_t r3t;
1548 	struct page *page;
1549 	int i;
1550 
1551 	BUG_ON(!gmap_is_shadow(sg));
1552 	for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
1553 		if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1554 			continue;
1555 		r3t = r2t[i] & _REGION_ENTRY_ORIGIN;
1556 		r2t[i] = _REGION2_ENTRY_EMPTY;
1557 		__gmap_unshadow_r3t(sg, raddr, __va(r3t));
1558 		/* Free region 3 table */
1559 		page = phys_to_page(r3t);
1560 		list_del(&page->lru);
1561 		__free_pages(page, CRST_ALLOC_ORDER);
1562 	}
1563 }
1564 
1565 /**
1566  * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1567  * @sg: pointer to the shadow guest address space structure
1568  * @raddr: rmap address in the shadow guest address space
1569  *
1570  * Called with the sg->guest_table_lock
1571  */
1572 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1573 {
1574 	unsigned long r1o, *r1e;
1575 	struct page *page;
1576 	phys_addr_t r2t;
1577 
1578 	BUG_ON(!gmap_is_shadow(sg));
1579 	r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1580 	if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1581 		return;
1582 	gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1583 	r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
1584 	gmap_idte_one(__pa(r1o) | _ASCE_TYPE_REGION1, raddr);
1585 	r2t = *r1e & _REGION_ENTRY_ORIGIN;
1586 	*r1e = _REGION1_ENTRY_EMPTY;
1587 	__gmap_unshadow_r2t(sg, raddr, __va(r2t));
1588 	/* Free region 2 table */
1589 	page = phys_to_page(r2t);
1590 	list_del(&page->lru);
1591 	__free_pages(page, CRST_ALLOC_ORDER);
1592 }
1593 
1594 /**
1595  * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1596  * @sg: pointer to the shadow guest address space structure
1597  * @raddr: rmap address in the shadow guest address space
1598  * @r1t: pointer to the start of a shadow region-1 table
1599  *
1600  * Called with the shadow->guest_table_lock
1601  */
1602 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1603 				unsigned long *r1t)
1604 {
1605 	unsigned long asce;
1606 	struct page *page;
1607 	phys_addr_t r2t;
1608 	int i;
1609 
1610 	BUG_ON(!gmap_is_shadow(sg));
1611 	asce = __pa(r1t) | _ASCE_TYPE_REGION1;
1612 	for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
1613 		if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1614 			continue;
1615 		r2t = r1t[i] & _REGION_ENTRY_ORIGIN;
1616 		__gmap_unshadow_r2t(sg, raddr, __va(r2t));
1617 		/* Clear entry and flush translation r1t -> r2t */
1618 		gmap_idte_one(asce, raddr);
1619 		r1t[i] = _REGION1_ENTRY_EMPTY;
1620 		/* Free region 2 table */
1621 		page = phys_to_page(r2t);
1622 		list_del(&page->lru);
1623 		__free_pages(page, CRST_ALLOC_ORDER);
1624 	}
1625 }
1626 
1627 /**
1628  * gmap_unshadow - remove a shadow page table completely
1629  * @sg: pointer to the shadow guest address space structure
1630  *
1631  * Called with sg->guest_table_lock
1632  */
1633 static void gmap_unshadow(struct gmap *sg)
1634 {
1635 	unsigned long *table;
1636 
1637 	BUG_ON(!gmap_is_shadow(sg));
1638 	if (sg->removed)
1639 		return;
1640 	sg->removed = 1;
1641 	gmap_call_notifier(sg, 0, -1UL);
1642 	gmap_flush_tlb(sg);
1643 	table = __va(sg->asce & _ASCE_ORIGIN);
1644 	switch (sg->asce & _ASCE_TYPE_MASK) {
1645 	case _ASCE_TYPE_REGION1:
1646 		__gmap_unshadow_r1t(sg, 0, table);
1647 		break;
1648 	case _ASCE_TYPE_REGION2:
1649 		__gmap_unshadow_r2t(sg, 0, table);
1650 		break;
1651 	case _ASCE_TYPE_REGION3:
1652 		__gmap_unshadow_r3t(sg, 0, table);
1653 		break;
1654 	case _ASCE_TYPE_SEGMENT:
1655 		__gmap_unshadow_sgt(sg, 0, table);
1656 		break;
1657 	}
1658 }
1659 
1660 /**
1661  * gmap_find_shadow - find a specific asce in the list of shadow tables
1662  * @parent: pointer to the parent gmap
1663  * @asce: ASCE for which the shadow table is created
1664  * @edat_level: edat level to be used for the shadow translation
1665  *
1666  * Returns the pointer to a gmap if a shadow table with the given asce is
1667  * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1668  * otherwise NULL
1669  */
1670 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1671 				     int edat_level)
1672 {
1673 	struct gmap *sg;
1674 
1675 	list_for_each_entry(sg, &parent->children, list) {
1676 		if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1677 		    sg->removed)
1678 			continue;
1679 		if (!sg->initialized)
1680 			return ERR_PTR(-EAGAIN);
1681 		refcount_inc(&sg->ref_count);
1682 		return sg;
1683 	}
1684 	return NULL;
1685 }
1686 
1687 /**
1688  * gmap_shadow_valid - check if a shadow guest address space matches the
1689  *                     given properties and is still valid
1690  * @sg: pointer to the shadow guest address space structure
1691  * @asce: ASCE for which the shadow table is requested
1692  * @edat_level: edat level to be used for the shadow translation
1693  *
1694  * Returns 1 if the gmap shadow is still valid and matches the given
1695  * properties, the caller can continue using it. Returns 0 otherwise, the
1696  * caller has to request a new shadow gmap in this case.
1697  *
1698  */
1699 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1700 {
1701 	if (sg->removed)
1702 		return 0;
1703 	return sg->orig_asce == asce && sg->edat_level == edat_level;
1704 }
1705 EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1706 
1707 /**
1708  * gmap_shadow - create/find a shadow guest address space
1709  * @parent: pointer to the parent gmap
1710  * @asce: ASCE for which the shadow table is created
1711  * @edat_level: edat level to be used for the shadow translation
1712  *
1713  * The pages of the top level page table referred by the asce parameter
1714  * will be set to read-only and marked in the PGSTEs of the kvm process.
1715  * The shadow table will be removed automatically on any change to the
1716  * PTE mapping for the source table.
1717  *
1718  * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1719  * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1720  * parent gmap table could not be protected.
1721  */
1722 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1723 			 int edat_level)
1724 {
1725 	struct gmap *sg, *new;
1726 	unsigned long limit;
1727 	int rc;
1728 
1729 	BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
1730 	BUG_ON(gmap_is_shadow(parent));
1731 	spin_lock(&parent->shadow_lock);
1732 	sg = gmap_find_shadow(parent, asce, edat_level);
1733 	spin_unlock(&parent->shadow_lock);
1734 	if (sg)
1735 		return sg;
1736 	/* Create a new shadow gmap */
1737 	limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1738 	if (asce & _ASCE_REAL_SPACE)
1739 		limit = -1UL;
1740 	new = gmap_alloc(limit);
1741 	if (!new)
1742 		return ERR_PTR(-ENOMEM);
1743 	new->mm = parent->mm;
1744 	new->parent = gmap_get(parent);
1745 	new->private = parent->private;
1746 	new->orig_asce = asce;
1747 	new->edat_level = edat_level;
1748 	new->initialized = false;
1749 	spin_lock(&parent->shadow_lock);
1750 	/* Recheck if another CPU created the same shadow */
1751 	sg = gmap_find_shadow(parent, asce, edat_level);
1752 	if (sg) {
1753 		spin_unlock(&parent->shadow_lock);
1754 		gmap_free(new);
1755 		return sg;
1756 	}
1757 	if (asce & _ASCE_REAL_SPACE) {
1758 		/* only allow one real-space gmap shadow */
1759 		list_for_each_entry(sg, &parent->children, list) {
1760 			if (sg->orig_asce & _ASCE_REAL_SPACE) {
1761 				spin_lock(&sg->guest_table_lock);
1762 				gmap_unshadow(sg);
1763 				spin_unlock(&sg->guest_table_lock);
1764 				list_del(&sg->list);
1765 				gmap_put(sg);
1766 				break;
1767 			}
1768 		}
1769 	}
1770 	refcount_set(&new->ref_count, 2);
1771 	list_add(&new->list, &parent->children);
1772 	if (asce & _ASCE_REAL_SPACE) {
1773 		/* nothing to protect, return right away */
1774 		new->initialized = true;
1775 		spin_unlock(&parent->shadow_lock);
1776 		return new;
1777 	}
1778 	spin_unlock(&parent->shadow_lock);
1779 	/* protect after insertion, so it will get properly invalidated */
1780 	mmap_read_lock(parent->mm);
1781 	rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1782 				((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
1783 				PROT_READ, GMAP_NOTIFY_SHADOW);
1784 	mmap_read_unlock(parent->mm);
1785 	spin_lock(&parent->shadow_lock);
1786 	new->initialized = true;
1787 	if (rc) {
1788 		list_del(&new->list);
1789 		gmap_free(new);
1790 		new = ERR_PTR(rc);
1791 	}
1792 	spin_unlock(&parent->shadow_lock);
1793 	return new;
1794 }
1795 EXPORT_SYMBOL_GPL(gmap_shadow);
1796 
1797 /**
1798  * gmap_shadow_r2t - create an empty shadow region 2 table
1799  * @sg: pointer to the shadow guest address space structure
1800  * @saddr: faulting address in the shadow gmap
1801  * @r2t: parent gmap address of the region 2 table to get shadowed
1802  * @fake: r2t references contiguous guest memory block, not a r2t
1803  *
1804  * The r2t parameter specifies the address of the source table. The
1805  * four pages of the source table are made read-only in the parent gmap
1806  * address space. A write to the source table area @r2t will automatically
1807  * remove the shadow r2 table and all of its descendants.
1808  *
1809  * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1810  * shadow table structure is incomplete, -ENOMEM if out of memory and
1811  * -EFAULT if an address in the parent gmap could not be resolved.
1812  *
1813  * Called with sg->mm->mmap_lock in read.
1814  */
1815 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1816 		    int fake)
1817 {
1818 	unsigned long raddr, origin, offset, len;
1819 	unsigned long *table;
1820 	phys_addr_t s_r2t;
1821 	struct page *page;
1822 	int rc;
1823 
1824 	BUG_ON(!gmap_is_shadow(sg));
1825 	/* Allocate a shadow region second table */
1826 	page = gmap_alloc_crst();
1827 	if (!page)
1828 		return -ENOMEM;
1829 	page->index = r2t & _REGION_ENTRY_ORIGIN;
1830 	if (fake)
1831 		page->index |= GMAP_SHADOW_FAKE_TABLE;
1832 	s_r2t = page_to_phys(page);
1833 	/* Install shadow region second table */
1834 	spin_lock(&sg->guest_table_lock);
1835 	table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1836 	if (!table) {
1837 		rc = -EAGAIN;		/* Race with unshadow */
1838 		goto out_free;
1839 	}
1840 	if (!(*table & _REGION_ENTRY_INVALID)) {
1841 		rc = 0;			/* Already established */
1842 		goto out_free;
1843 	} else if (*table & _REGION_ENTRY_ORIGIN) {
1844 		rc = -EAGAIN;		/* Race with shadow */
1845 		goto out_free;
1846 	}
1847 	crst_table_init(__va(s_r2t), _REGION2_ENTRY_EMPTY);
1848 	/* mark as invalid as long as the parent table is not protected */
1849 	*table = s_r2t | _REGION_ENTRY_LENGTH |
1850 		 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1851 	if (sg->edat_level >= 1)
1852 		*table |= (r2t & _REGION_ENTRY_PROTECT);
1853 	list_add(&page->lru, &sg->crst_list);
1854 	if (fake) {
1855 		/* nothing to protect for fake tables */
1856 		*table &= ~_REGION_ENTRY_INVALID;
1857 		spin_unlock(&sg->guest_table_lock);
1858 		return 0;
1859 	}
1860 	spin_unlock(&sg->guest_table_lock);
1861 	/* Make r2t read-only in parent gmap page table */
1862 	raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
1863 	origin = r2t & _REGION_ENTRY_ORIGIN;
1864 	offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1865 	len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1866 	rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1867 	spin_lock(&sg->guest_table_lock);
1868 	if (!rc) {
1869 		table = gmap_table_walk(sg, saddr, 4);
1870 		if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_r2t)
1871 			rc = -EAGAIN;		/* Race with unshadow */
1872 		else
1873 			*table &= ~_REGION_ENTRY_INVALID;
1874 	} else {
1875 		gmap_unshadow_r2t(sg, raddr);
1876 	}
1877 	spin_unlock(&sg->guest_table_lock);
1878 	return rc;
1879 out_free:
1880 	spin_unlock(&sg->guest_table_lock);
1881 	__free_pages(page, CRST_ALLOC_ORDER);
1882 	return rc;
1883 }
1884 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1885 
1886 /**
1887  * gmap_shadow_r3t - create a shadow region 3 table
1888  * @sg: pointer to the shadow guest address space structure
1889  * @saddr: faulting address in the shadow gmap
1890  * @r3t: parent gmap address of the region 3 table to get shadowed
1891  * @fake: r3t references contiguous guest memory block, not a r3t
1892  *
1893  * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1894  * shadow table structure is incomplete, -ENOMEM if out of memory and
1895  * -EFAULT if an address in the parent gmap could not be resolved.
1896  *
1897  * Called with sg->mm->mmap_lock in read.
1898  */
1899 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1900 		    int fake)
1901 {
1902 	unsigned long raddr, origin, offset, len;
1903 	unsigned long *table;
1904 	phys_addr_t s_r3t;
1905 	struct page *page;
1906 	int rc;
1907 
1908 	BUG_ON(!gmap_is_shadow(sg));
1909 	/* Allocate a shadow region second table */
1910 	page = gmap_alloc_crst();
1911 	if (!page)
1912 		return -ENOMEM;
1913 	page->index = r3t & _REGION_ENTRY_ORIGIN;
1914 	if (fake)
1915 		page->index |= GMAP_SHADOW_FAKE_TABLE;
1916 	s_r3t = page_to_phys(page);
1917 	/* Install shadow region second table */
1918 	spin_lock(&sg->guest_table_lock);
1919 	table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1920 	if (!table) {
1921 		rc = -EAGAIN;		/* Race with unshadow */
1922 		goto out_free;
1923 	}
1924 	if (!(*table & _REGION_ENTRY_INVALID)) {
1925 		rc = 0;			/* Already established */
1926 		goto out_free;
1927 	} else if (*table & _REGION_ENTRY_ORIGIN) {
1928 		rc = -EAGAIN;		/* Race with shadow */
1929 		goto out_free;
1930 	}
1931 	crst_table_init(__va(s_r3t), _REGION3_ENTRY_EMPTY);
1932 	/* mark as invalid as long as the parent table is not protected */
1933 	*table = s_r3t | _REGION_ENTRY_LENGTH |
1934 		 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1935 	if (sg->edat_level >= 1)
1936 		*table |= (r3t & _REGION_ENTRY_PROTECT);
1937 	list_add(&page->lru, &sg->crst_list);
1938 	if (fake) {
1939 		/* nothing to protect for fake tables */
1940 		*table &= ~_REGION_ENTRY_INVALID;
1941 		spin_unlock(&sg->guest_table_lock);
1942 		return 0;
1943 	}
1944 	spin_unlock(&sg->guest_table_lock);
1945 	/* Make r3t read-only in parent gmap page table */
1946 	raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
1947 	origin = r3t & _REGION_ENTRY_ORIGIN;
1948 	offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1949 	len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1950 	rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1951 	spin_lock(&sg->guest_table_lock);
1952 	if (!rc) {
1953 		table = gmap_table_walk(sg, saddr, 3);
1954 		if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_r3t)
1955 			rc = -EAGAIN;		/* Race with unshadow */
1956 		else
1957 			*table &= ~_REGION_ENTRY_INVALID;
1958 	} else {
1959 		gmap_unshadow_r3t(sg, raddr);
1960 	}
1961 	spin_unlock(&sg->guest_table_lock);
1962 	return rc;
1963 out_free:
1964 	spin_unlock(&sg->guest_table_lock);
1965 	__free_pages(page, CRST_ALLOC_ORDER);
1966 	return rc;
1967 }
1968 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1969 
1970 /**
1971  * gmap_shadow_sgt - create a shadow segment table
1972  * @sg: pointer to the shadow guest address space structure
1973  * @saddr: faulting address in the shadow gmap
1974  * @sgt: parent gmap address of the segment table to get shadowed
1975  * @fake: sgt references contiguous guest memory block, not a sgt
1976  *
1977  * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1978  * shadow table structure is incomplete, -ENOMEM if out of memory and
1979  * -EFAULT if an address in the parent gmap could not be resolved.
1980  *
1981  * Called with sg->mm->mmap_lock in read.
1982  */
1983 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1984 		    int fake)
1985 {
1986 	unsigned long raddr, origin, offset, len;
1987 	unsigned long *table;
1988 	phys_addr_t s_sgt;
1989 	struct page *page;
1990 	int rc;
1991 
1992 	BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1993 	/* Allocate a shadow segment table */
1994 	page = gmap_alloc_crst();
1995 	if (!page)
1996 		return -ENOMEM;
1997 	page->index = sgt & _REGION_ENTRY_ORIGIN;
1998 	if (fake)
1999 		page->index |= GMAP_SHADOW_FAKE_TABLE;
2000 	s_sgt = page_to_phys(page);
2001 	/* Install shadow region second table */
2002 	spin_lock(&sg->guest_table_lock);
2003 	table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
2004 	if (!table) {
2005 		rc = -EAGAIN;		/* Race with unshadow */
2006 		goto out_free;
2007 	}
2008 	if (!(*table & _REGION_ENTRY_INVALID)) {
2009 		rc = 0;			/* Already established */
2010 		goto out_free;
2011 	} else if (*table & _REGION_ENTRY_ORIGIN) {
2012 		rc = -EAGAIN;		/* Race with shadow */
2013 		goto out_free;
2014 	}
2015 	crst_table_init(__va(s_sgt), _SEGMENT_ENTRY_EMPTY);
2016 	/* mark as invalid as long as the parent table is not protected */
2017 	*table = s_sgt | _REGION_ENTRY_LENGTH |
2018 		 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
2019 	if (sg->edat_level >= 1)
2020 		*table |= sgt & _REGION_ENTRY_PROTECT;
2021 	list_add(&page->lru, &sg->crst_list);
2022 	if (fake) {
2023 		/* nothing to protect for fake tables */
2024 		*table &= ~_REGION_ENTRY_INVALID;
2025 		spin_unlock(&sg->guest_table_lock);
2026 		return 0;
2027 	}
2028 	spin_unlock(&sg->guest_table_lock);
2029 	/* Make sgt read-only in parent gmap page table */
2030 	raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
2031 	origin = sgt & _REGION_ENTRY_ORIGIN;
2032 	offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
2033 	len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
2034 	rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
2035 	spin_lock(&sg->guest_table_lock);
2036 	if (!rc) {
2037 		table = gmap_table_walk(sg, saddr, 2);
2038 		if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_sgt)
2039 			rc = -EAGAIN;		/* Race with unshadow */
2040 		else
2041 			*table &= ~_REGION_ENTRY_INVALID;
2042 	} else {
2043 		gmap_unshadow_sgt(sg, raddr);
2044 	}
2045 	spin_unlock(&sg->guest_table_lock);
2046 	return rc;
2047 out_free:
2048 	spin_unlock(&sg->guest_table_lock);
2049 	__free_pages(page, CRST_ALLOC_ORDER);
2050 	return rc;
2051 }
2052 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
2053 
2054 /**
2055  * gmap_shadow_pgt_lookup - find a shadow page table
2056  * @sg: pointer to the shadow guest address space structure
2057  * @saddr: the address in the shadow aguest address space
2058  * @pgt: parent gmap address of the page table to get shadowed
2059  * @dat_protection: if the pgtable is marked as protected by dat
2060  * @fake: pgt references contiguous guest memory block, not a pgtable
2061  *
2062  * Returns 0 if the shadow page table was found and -EAGAIN if the page
2063  * table was not found.
2064  *
2065  * Called with sg->mm->mmap_lock in read.
2066  */
2067 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
2068 			   unsigned long *pgt, int *dat_protection,
2069 			   int *fake)
2070 {
2071 	unsigned long *table;
2072 	struct page *page;
2073 	int rc;
2074 
2075 	BUG_ON(!gmap_is_shadow(sg));
2076 	spin_lock(&sg->guest_table_lock);
2077 	table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
2078 	if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
2079 		/* Shadow page tables are full pages (pte+pgste) */
2080 		page = pfn_to_page(*table >> PAGE_SHIFT);
2081 		*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
2082 		*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
2083 		*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
2084 		rc = 0;
2085 	} else  {
2086 		rc = -EAGAIN;
2087 	}
2088 	spin_unlock(&sg->guest_table_lock);
2089 	return rc;
2090 
2091 }
2092 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
2093 
2094 /**
2095  * gmap_shadow_pgt - instantiate a shadow page table
2096  * @sg: pointer to the shadow guest address space structure
2097  * @saddr: faulting address in the shadow gmap
2098  * @pgt: parent gmap address of the page table to get shadowed
2099  * @fake: pgt references contiguous guest memory block, not a pgtable
2100  *
2101  * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2102  * shadow table structure is incomplete, -ENOMEM if out of memory,
2103  * -EFAULT if an address in the parent gmap could not be resolved and
2104  *
2105  * Called with gmap->mm->mmap_lock in read
2106  */
2107 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
2108 		    int fake)
2109 {
2110 	unsigned long raddr, origin;
2111 	unsigned long *table;
2112 	struct ptdesc *ptdesc;
2113 	phys_addr_t s_pgt;
2114 	int rc;
2115 
2116 	BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
2117 	/* Allocate a shadow page table */
2118 	ptdesc = page_table_alloc_pgste(sg->mm);
2119 	if (!ptdesc)
2120 		return -ENOMEM;
2121 	ptdesc->pt_index = pgt & _SEGMENT_ENTRY_ORIGIN;
2122 	if (fake)
2123 		ptdesc->pt_index |= GMAP_SHADOW_FAKE_TABLE;
2124 	s_pgt = page_to_phys(ptdesc_page(ptdesc));
2125 	/* Install shadow page table */
2126 	spin_lock(&sg->guest_table_lock);
2127 	table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
2128 	if (!table) {
2129 		rc = -EAGAIN;		/* Race with unshadow */
2130 		goto out_free;
2131 	}
2132 	if (!(*table & _SEGMENT_ENTRY_INVALID)) {
2133 		rc = 0;			/* Already established */
2134 		goto out_free;
2135 	} else if (*table & _SEGMENT_ENTRY_ORIGIN) {
2136 		rc = -EAGAIN;		/* Race with shadow */
2137 		goto out_free;
2138 	}
2139 	/* mark as invalid as long as the parent table is not protected */
2140 	*table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
2141 		 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
2142 	list_add(&ptdesc->pt_list, &sg->pt_list);
2143 	if (fake) {
2144 		/* nothing to protect for fake tables */
2145 		*table &= ~_SEGMENT_ENTRY_INVALID;
2146 		spin_unlock(&sg->guest_table_lock);
2147 		return 0;
2148 	}
2149 	spin_unlock(&sg->guest_table_lock);
2150 	/* Make pgt read-only in parent gmap page table (not the pgste) */
2151 	raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
2152 	origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
2153 	rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
2154 	spin_lock(&sg->guest_table_lock);
2155 	if (!rc) {
2156 		table = gmap_table_walk(sg, saddr, 1);
2157 		if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) != s_pgt)
2158 			rc = -EAGAIN;		/* Race with unshadow */
2159 		else
2160 			*table &= ~_SEGMENT_ENTRY_INVALID;
2161 	} else {
2162 		gmap_unshadow_pgt(sg, raddr);
2163 	}
2164 	spin_unlock(&sg->guest_table_lock);
2165 	return rc;
2166 out_free:
2167 	spin_unlock(&sg->guest_table_lock);
2168 	page_table_free_pgste(ptdesc);
2169 	return rc;
2170 
2171 }
2172 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
2173 
2174 /**
2175  * gmap_shadow_page - create a shadow page mapping
2176  * @sg: pointer to the shadow guest address space structure
2177  * @saddr: faulting address in the shadow gmap
2178  * @pte: pte in parent gmap address space to get shadowed
2179  *
2180  * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2181  * shadow table structure is incomplete, -ENOMEM if out of memory and
2182  * -EFAULT if an address in the parent gmap could not be resolved.
2183  *
2184  * Called with sg->mm->mmap_lock in read.
2185  */
2186 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
2187 {
2188 	struct gmap *parent;
2189 	struct gmap_rmap *rmap;
2190 	unsigned long vmaddr, paddr;
2191 	spinlock_t *ptl;
2192 	pte_t *sptep, *tptep;
2193 	int prot;
2194 	int rc;
2195 
2196 	BUG_ON(!gmap_is_shadow(sg));
2197 	parent = sg->parent;
2198 	prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
2199 
2200 	rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
2201 	if (!rmap)
2202 		return -ENOMEM;
2203 	rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
2204 
2205 	while (1) {
2206 		paddr = pte_val(pte) & PAGE_MASK;
2207 		vmaddr = __gmap_translate(parent, paddr);
2208 		if (IS_ERR_VALUE(vmaddr)) {
2209 			rc = vmaddr;
2210 			break;
2211 		}
2212 		rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
2213 		if (rc)
2214 			break;
2215 		rc = -EAGAIN;
2216 		sptep = gmap_pte_op_walk(parent, paddr, &ptl);
2217 		if (sptep) {
2218 			spin_lock(&sg->guest_table_lock);
2219 			/* Get page table pointer */
2220 			tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
2221 			if (!tptep) {
2222 				spin_unlock(&sg->guest_table_lock);
2223 				gmap_pte_op_end(sptep, ptl);
2224 				radix_tree_preload_end();
2225 				break;
2226 			}
2227 			rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
2228 			if (rc > 0) {
2229 				/* Success and a new mapping */
2230 				gmap_insert_rmap(sg, vmaddr, rmap);
2231 				rmap = NULL;
2232 				rc = 0;
2233 			}
2234 			gmap_pte_op_end(sptep, ptl);
2235 			spin_unlock(&sg->guest_table_lock);
2236 		}
2237 		radix_tree_preload_end();
2238 		if (!rc)
2239 			break;
2240 		rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
2241 		if (rc)
2242 			break;
2243 	}
2244 	kfree(rmap);
2245 	return rc;
2246 }
2247 EXPORT_SYMBOL_GPL(gmap_shadow_page);
2248 
2249 /*
2250  * gmap_shadow_notify - handle notifications for shadow gmap
2251  *
2252  * Called with sg->parent->shadow_lock.
2253  */
2254 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2255 			       unsigned long gaddr)
2256 {
2257 	struct gmap_rmap *rmap, *rnext, *head;
2258 	unsigned long start, end, bits, raddr;
2259 
2260 	BUG_ON(!gmap_is_shadow(sg));
2261 
2262 	spin_lock(&sg->guest_table_lock);
2263 	if (sg->removed) {
2264 		spin_unlock(&sg->guest_table_lock);
2265 		return;
2266 	}
2267 	/* Check for top level table */
2268 	start = sg->orig_asce & _ASCE_ORIGIN;
2269 	end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
2270 	if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2271 	    gaddr < end) {
2272 		/* The complete shadow table has to go */
2273 		gmap_unshadow(sg);
2274 		spin_unlock(&sg->guest_table_lock);
2275 		list_del(&sg->list);
2276 		gmap_put(sg);
2277 		return;
2278 	}
2279 	/* Remove the page table tree from on specific entry */
2280 	head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
2281 	gmap_for_each_rmap_safe(rmap, rnext, head) {
2282 		bits = rmap->raddr & _SHADOW_RMAP_MASK;
2283 		raddr = rmap->raddr ^ bits;
2284 		switch (bits) {
2285 		case _SHADOW_RMAP_REGION1:
2286 			gmap_unshadow_r2t(sg, raddr);
2287 			break;
2288 		case _SHADOW_RMAP_REGION2:
2289 			gmap_unshadow_r3t(sg, raddr);
2290 			break;
2291 		case _SHADOW_RMAP_REGION3:
2292 			gmap_unshadow_sgt(sg, raddr);
2293 			break;
2294 		case _SHADOW_RMAP_SEGMENT:
2295 			gmap_unshadow_pgt(sg, raddr);
2296 			break;
2297 		case _SHADOW_RMAP_PGTABLE:
2298 			gmap_unshadow_page(sg, raddr);
2299 			break;
2300 		}
2301 		kfree(rmap);
2302 	}
2303 	spin_unlock(&sg->guest_table_lock);
2304 }
2305 
2306 /**
2307  * ptep_notify - call all invalidation callbacks for a specific pte.
2308  * @mm: pointer to the process mm_struct
2309  * @vmaddr: virtual address in the process address space
2310  * @pte: pointer to the page table entry
2311  * @bits: bits from the pgste that caused the notify call
2312  *
2313  * This function is assumed to be called with the page table lock held
2314  * for the pte to notify.
2315  */
2316 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2317 		 pte_t *pte, unsigned long bits)
2318 {
2319 	unsigned long offset, gaddr = 0;
2320 	unsigned long *table;
2321 	struct gmap *gmap, *sg, *next;
2322 
2323 	offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2324 	offset = offset * (PAGE_SIZE / sizeof(pte_t));
2325 	rcu_read_lock();
2326 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2327 		spin_lock(&gmap->guest_table_lock);
2328 		table = radix_tree_lookup(&gmap->host_to_guest,
2329 					  vmaddr >> PMD_SHIFT);
2330 		if (table)
2331 			gaddr = __gmap_segment_gaddr(table) + offset;
2332 		spin_unlock(&gmap->guest_table_lock);
2333 		if (!table)
2334 			continue;
2335 
2336 		if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2337 			spin_lock(&gmap->shadow_lock);
2338 			list_for_each_entry_safe(sg, next,
2339 						 &gmap->children, list)
2340 				gmap_shadow_notify(sg, vmaddr, gaddr);
2341 			spin_unlock(&gmap->shadow_lock);
2342 		}
2343 		if (bits & PGSTE_IN_BIT)
2344 			gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2345 	}
2346 	rcu_read_unlock();
2347 }
2348 EXPORT_SYMBOL_GPL(ptep_notify);
2349 
2350 static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
2351 			     unsigned long gaddr)
2352 {
2353 	set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
2354 	gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
2355 }
2356 
2357 /**
2358  * gmap_pmdp_xchg - exchange a gmap pmd with another
2359  * @gmap: pointer to the guest address space structure
2360  * @pmdp: pointer to the pmd entry
2361  * @new: replacement entry
2362  * @gaddr: the affected guest address
2363  *
2364  * This function is assumed to be called with the guest_table_lock
2365  * held.
2366  */
2367 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
2368 			   unsigned long gaddr)
2369 {
2370 	gaddr &= HPAGE_MASK;
2371 	pmdp_notify_gmap(gmap, pmdp, gaddr);
2372 	new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_GMAP_IN));
2373 	if (MACHINE_HAS_TLB_GUEST)
2374 		__pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
2375 			    IDTE_GLOBAL);
2376 	else if (MACHINE_HAS_IDTE)
2377 		__pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
2378 	else
2379 		__pmdp_csp(pmdp);
2380 	set_pmd(pmdp, new);
2381 }
2382 
2383 static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
2384 			    int purge)
2385 {
2386 	pmd_t *pmdp;
2387 	struct gmap *gmap;
2388 	unsigned long gaddr;
2389 
2390 	rcu_read_lock();
2391 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2392 		spin_lock(&gmap->guest_table_lock);
2393 		pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
2394 						  vmaddr >> PMD_SHIFT);
2395 		if (pmdp) {
2396 			gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
2397 			pmdp_notify_gmap(gmap, pmdp, gaddr);
2398 			WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2399 						   _SEGMENT_ENTRY_GMAP_UC));
2400 			if (purge)
2401 				__pmdp_csp(pmdp);
2402 			set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
2403 		}
2404 		spin_unlock(&gmap->guest_table_lock);
2405 	}
2406 	rcu_read_unlock();
2407 }
2408 
2409 /**
2410  * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
2411  *                        flushing
2412  * @mm: pointer to the process mm_struct
2413  * @vmaddr: virtual address in the process address space
2414  */
2415 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
2416 {
2417 	gmap_pmdp_clear(mm, vmaddr, 0);
2418 }
2419 EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
2420 
2421 /**
2422  * gmap_pmdp_csp - csp all affected guest pmd entries
2423  * @mm: pointer to the process mm_struct
2424  * @vmaddr: virtual address in the process address space
2425  */
2426 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
2427 {
2428 	gmap_pmdp_clear(mm, vmaddr, 1);
2429 }
2430 EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
2431 
2432 /**
2433  * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
2434  * @mm: pointer to the process mm_struct
2435  * @vmaddr: virtual address in the process address space
2436  */
2437 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
2438 {
2439 	unsigned long *entry, gaddr;
2440 	struct gmap *gmap;
2441 	pmd_t *pmdp;
2442 
2443 	rcu_read_lock();
2444 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2445 		spin_lock(&gmap->guest_table_lock);
2446 		entry = radix_tree_delete(&gmap->host_to_guest,
2447 					  vmaddr >> PMD_SHIFT);
2448 		if (entry) {
2449 			pmdp = (pmd_t *)entry;
2450 			gaddr = __gmap_segment_gaddr(entry);
2451 			pmdp_notify_gmap(gmap, pmdp, gaddr);
2452 			WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2453 					   _SEGMENT_ENTRY_GMAP_UC));
2454 			if (MACHINE_HAS_TLB_GUEST)
2455 				__pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2456 					    gmap->asce, IDTE_LOCAL);
2457 			else if (MACHINE_HAS_IDTE)
2458 				__pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
2459 			*entry = _SEGMENT_ENTRY_EMPTY;
2460 		}
2461 		spin_unlock(&gmap->guest_table_lock);
2462 	}
2463 	rcu_read_unlock();
2464 }
2465 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
2466 
2467 /**
2468  * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
2469  * @mm: pointer to the process mm_struct
2470  * @vmaddr: virtual address in the process address space
2471  */
2472 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
2473 {
2474 	unsigned long *entry, gaddr;
2475 	struct gmap *gmap;
2476 	pmd_t *pmdp;
2477 
2478 	rcu_read_lock();
2479 	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2480 		spin_lock(&gmap->guest_table_lock);
2481 		entry = radix_tree_delete(&gmap->host_to_guest,
2482 					  vmaddr >> PMD_SHIFT);
2483 		if (entry) {
2484 			pmdp = (pmd_t *)entry;
2485 			gaddr = __gmap_segment_gaddr(entry);
2486 			pmdp_notify_gmap(gmap, pmdp, gaddr);
2487 			WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2488 					   _SEGMENT_ENTRY_GMAP_UC));
2489 			if (MACHINE_HAS_TLB_GUEST)
2490 				__pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2491 					    gmap->asce, IDTE_GLOBAL);
2492 			else if (MACHINE_HAS_IDTE)
2493 				__pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
2494 			else
2495 				__pmdp_csp(pmdp);
2496 			*entry = _SEGMENT_ENTRY_EMPTY;
2497 		}
2498 		spin_unlock(&gmap->guest_table_lock);
2499 	}
2500 	rcu_read_unlock();
2501 }
2502 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
2503 
2504 /**
2505  * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
2506  * @gmap: pointer to guest address space
2507  * @pmdp: pointer to the pmd to be tested
2508  * @gaddr: virtual address in the guest address space
2509  *
2510  * This function is assumed to be called with the guest_table_lock
2511  * held.
2512  */
2513 static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
2514 					  unsigned long gaddr)
2515 {
2516 	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
2517 		return false;
2518 
2519 	/* Already protected memory, which did not change is clean */
2520 	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
2521 	    !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
2522 		return false;
2523 
2524 	/* Clear UC indication and reset protection */
2525 	set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_UC)));
2526 	gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
2527 	return true;
2528 }
2529 
2530 /**
2531  * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
2532  * @gmap: pointer to guest address space
2533  * @bitmap: dirty bitmap for this pmd
2534  * @gaddr: virtual address in the guest address space
2535  * @vmaddr: virtual address in the host address space
2536  *
2537  * This function is assumed to be called with the guest_table_lock
2538  * held.
2539  */
2540 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
2541 			     unsigned long gaddr, unsigned long vmaddr)
2542 {
2543 	int i;
2544 	pmd_t *pmdp;
2545 	pte_t *ptep;
2546 	spinlock_t *ptl;
2547 
2548 	pmdp = gmap_pmd_op_walk(gmap, gaddr);
2549 	if (!pmdp)
2550 		return;
2551 
2552 	if (pmd_leaf(*pmdp)) {
2553 		if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
2554 			bitmap_fill(bitmap, _PAGE_ENTRIES);
2555 	} else {
2556 		for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
2557 			ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
2558 			if (!ptep)
2559 				continue;
2560 			if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
2561 				set_bit(i, bitmap);
2562 			pte_unmap_unlock(ptep, ptl);
2563 		}
2564 	}
2565 	gmap_pmd_op_end(gmap, pmdp);
2566 }
2567 EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
2568 
2569 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2570 static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
2571 				    unsigned long end, struct mm_walk *walk)
2572 {
2573 	struct vm_area_struct *vma = walk->vma;
2574 
2575 	split_huge_pmd(vma, pmd, addr);
2576 	return 0;
2577 }
2578 
2579 static const struct mm_walk_ops thp_split_walk_ops = {
2580 	.pmd_entry	= thp_split_walk_pmd_entry,
2581 	.walk_lock	= PGWALK_WRLOCK_VERIFY,
2582 };
2583 
2584 static inline void thp_split_mm(struct mm_struct *mm)
2585 {
2586 	struct vm_area_struct *vma;
2587 	VMA_ITERATOR(vmi, mm, 0);
2588 
2589 	for_each_vma(vmi, vma) {
2590 		vm_flags_mod(vma, VM_NOHUGEPAGE, VM_HUGEPAGE);
2591 		walk_page_vma(vma, &thp_split_walk_ops, NULL);
2592 	}
2593 	mm->def_flags |= VM_NOHUGEPAGE;
2594 }
2595 #else
2596 static inline void thp_split_mm(struct mm_struct *mm)
2597 {
2598 }
2599 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2600 
2601 /*
2602  * switch on pgstes for its userspace process (for kvm)
2603  */
2604 int s390_enable_sie(void)
2605 {
2606 	struct mm_struct *mm = current->mm;
2607 
2608 	/* Do we have pgstes? if yes, we are done */
2609 	if (mm_has_pgste(mm))
2610 		return 0;
2611 	/* Fail if the page tables are 2K */
2612 	if (!mm_alloc_pgste(mm))
2613 		return -EINVAL;
2614 	mmap_write_lock(mm);
2615 	mm->context.has_pgste = 1;
2616 	/* split thp mappings and disable thp for future mappings */
2617 	thp_split_mm(mm);
2618 	mmap_write_unlock(mm);
2619 	return 0;
2620 }
2621 EXPORT_SYMBOL_GPL(s390_enable_sie);
2622 
2623 static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr,
2624 				   unsigned long end, struct mm_walk *walk)
2625 {
2626 	unsigned long *found_addr = walk->private;
2627 
2628 	/* Return 1 of the page is a zeropage. */
2629 	if (is_zero_pfn(pte_pfn(*pte))) {
2630 		/*
2631 		 * Shared zeropage in e.g., a FS DAX mapping? We cannot do the
2632 		 * right thing and likely don't care: FAULT_FLAG_UNSHARE
2633 		 * currently only works in COW mappings, which is also where
2634 		 * mm_forbids_zeropage() is checked.
2635 		 */
2636 		if (!is_cow_mapping(walk->vma->vm_flags))
2637 			return -EFAULT;
2638 
2639 		*found_addr = addr;
2640 		return 1;
2641 	}
2642 	return 0;
2643 }
2644 
2645 static const struct mm_walk_ops find_zeropage_ops = {
2646 	.pte_entry	= find_zeropage_pte_entry,
2647 	.walk_lock	= PGWALK_WRLOCK,
2648 };
2649 
2650 /*
2651  * Unshare all shared zeropages, replacing them by anonymous pages. Note that
2652  * we cannot simply zap all shared zeropages, because this could later
2653  * trigger unexpected userfaultfd missing events.
2654  *
2655  * This must be called after mm->context.allow_cow_sharing was
2656  * set to 0, to avoid future mappings of shared zeropages.
2657  *
2658  * mm contracts with s390, that even if mm were to remove a page table,
2659  * and racing with walk_page_range_vma() calling pte_offset_map_lock()
2660  * would fail, it will never insert a page table containing empty zero
2661  * pages once mm_forbids_zeropage(mm) i.e.
2662  * mm->context.allow_cow_sharing is set to 0.
2663  */
2664 static int __s390_unshare_zeropages(struct mm_struct *mm)
2665 {
2666 	struct vm_area_struct *vma;
2667 	VMA_ITERATOR(vmi, mm, 0);
2668 	unsigned long addr;
2669 	vm_fault_t fault;
2670 	int rc;
2671 
2672 	for_each_vma(vmi, vma) {
2673 		/*
2674 		 * We could only look at COW mappings, but it's more future
2675 		 * proof to catch unexpected zeropages in other mappings and
2676 		 * fail.
2677 		 */
2678 		if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma))
2679 			continue;
2680 		addr = vma->vm_start;
2681 
2682 retry:
2683 		rc = walk_page_range_vma(vma, addr, vma->vm_end,
2684 					 &find_zeropage_ops, &addr);
2685 		if (rc < 0)
2686 			return rc;
2687 		else if (!rc)
2688 			continue;
2689 
2690 		/* addr was updated by find_zeropage_pte_entry() */
2691 		fault = handle_mm_fault(vma, addr,
2692 					FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
2693 					NULL);
2694 		if (fault & VM_FAULT_OOM)
2695 			return -ENOMEM;
2696 		/*
2697 		 * See break_ksm(): even after handle_mm_fault() returned 0, we
2698 		 * must start the lookup from the current address, because
2699 		 * handle_mm_fault() may back out if there's any difficulty.
2700 		 *
2701 		 * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but
2702 		 * maybe they could trigger in the future on concurrent
2703 		 * truncation. In that case, the shared zeropage would be gone
2704 		 * and we can simply retry and make progress.
2705 		 */
2706 		cond_resched();
2707 		goto retry;
2708 	}
2709 
2710 	return 0;
2711 }
2712 
2713 static int __s390_disable_cow_sharing(struct mm_struct *mm)
2714 {
2715 	int rc;
2716 
2717 	if (!mm->context.allow_cow_sharing)
2718 		return 0;
2719 
2720 	mm->context.allow_cow_sharing = 0;
2721 
2722 	/* Replace all shared zeropages by anonymous pages. */
2723 	rc = __s390_unshare_zeropages(mm);
2724 	/*
2725 	 * Make sure to disable KSM (if enabled for the whole process or
2726 	 * individual VMAs). Note that nothing currently hinders user space
2727 	 * from re-enabling it.
2728 	 */
2729 	if (!rc)
2730 		rc = ksm_disable(mm);
2731 	if (rc)
2732 		mm->context.allow_cow_sharing = 1;
2733 	return rc;
2734 }
2735 
2736 /*
2737  * Disable most COW-sharing of memory pages for the whole process:
2738  * (1) Disable KSM and unmerge/unshare any KSM pages.
2739  * (2) Disallow shared zeropages and unshare any zerpages that are mapped.
2740  *
2741  * Not that we currently don't bother with COW-shared pages that are shared
2742  * with parent/child processes due to fork().
2743  */
2744 int s390_disable_cow_sharing(void)
2745 {
2746 	int rc;
2747 
2748 	mmap_write_lock(current->mm);
2749 	rc = __s390_disable_cow_sharing(current->mm);
2750 	mmap_write_unlock(current->mm);
2751 	return rc;
2752 }
2753 EXPORT_SYMBOL_GPL(s390_disable_cow_sharing);
2754 
2755 /*
2756  * Enable storage key handling from now on and initialize the storage
2757  * keys with the default key.
2758  */
2759 static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
2760 				  unsigned long next, struct mm_walk *walk)
2761 {
2762 	/* Clear storage key */
2763 	ptep_zap_key(walk->mm, addr, pte);
2764 	return 0;
2765 }
2766 
2767 /*
2768  * Give a chance to schedule after setting a key to 256 pages.
2769  * We only hold the mm lock, which is a rwsem and the kvm srcu.
2770  * Both can sleep.
2771  */
2772 static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
2773 				  unsigned long next, struct mm_walk *walk)
2774 {
2775 	cond_resched();
2776 	return 0;
2777 }
2778 
2779 static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
2780 				      unsigned long hmask, unsigned long next,
2781 				      struct mm_walk *walk)
2782 {
2783 	pmd_t *pmd = (pmd_t *)pte;
2784 	unsigned long start, end;
2785 	struct folio *folio = page_folio(pmd_page(*pmd));
2786 
2787 	/*
2788 	 * The write check makes sure we do not set a key on shared
2789 	 * memory. This is needed as the walker does not differentiate
2790 	 * between actual guest memory and the process executable or
2791 	 * shared libraries.
2792 	 */
2793 	if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
2794 	    !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
2795 		return 0;
2796 
2797 	start = pmd_val(*pmd) & HPAGE_MASK;
2798 	end = start + HPAGE_SIZE;
2799 	__storage_key_init_range(start, end);
2800 	set_bit(PG_arch_1, &folio->flags);
2801 	cond_resched();
2802 	return 0;
2803 }
2804 
2805 static const struct mm_walk_ops enable_skey_walk_ops = {
2806 	.hugetlb_entry		= __s390_enable_skey_hugetlb,
2807 	.pte_entry		= __s390_enable_skey_pte,
2808 	.pmd_entry		= __s390_enable_skey_pmd,
2809 	.walk_lock		= PGWALK_WRLOCK,
2810 };
2811 
2812 int s390_enable_skey(void)
2813 {
2814 	struct mm_struct *mm = current->mm;
2815 	int rc = 0;
2816 
2817 	mmap_write_lock(mm);
2818 	if (mm_uses_skeys(mm))
2819 		goto out_up;
2820 
2821 	mm->context.uses_skeys = 1;
2822 	rc = __s390_disable_cow_sharing(mm);
2823 	if (rc) {
2824 		mm->context.uses_skeys = 0;
2825 		goto out_up;
2826 	}
2827 	walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
2828 
2829 out_up:
2830 	mmap_write_unlock(mm);
2831 	return rc;
2832 }
2833 EXPORT_SYMBOL_GPL(s390_enable_skey);
2834 
2835 /*
2836  * Reset CMMA state, make all pages stable again.
2837  */
2838 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2839 			     unsigned long next, struct mm_walk *walk)
2840 {
2841 	ptep_zap_unused(walk->mm, addr, pte, 1);
2842 	return 0;
2843 }
2844 
2845 static const struct mm_walk_ops reset_cmma_walk_ops = {
2846 	.pte_entry		= __s390_reset_cmma,
2847 	.walk_lock		= PGWALK_WRLOCK,
2848 };
2849 
2850 void s390_reset_cmma(struct mm_struct *mm)
2851 {
2852 	mmap_write_lock(mm);
2853 	walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
2854 	mmap_write_unlock(mm);
2855 }
2856 EXPORT_SYMBOL_GPL(s390_reset_cmma);
2857 
2858 #define GATHER_GET_PAGES 32
2859 
2860 struct reset_walk_state {
2861 	unsigned long next;
2862 	unsigned long count;
2863 	unsigned long pfns[GATHER_GET_PAGES];
2864 };
2865 
2866 static int s390_gather_pages(pte_t *ptep, unsigned long addr,
2867 			     unsigned long next, struct mm_walk *walk)
2868 {
2869 	struct reset_walk_state *p = walk->private;
2870 	pte_t pte = READ_ONCE(*ptep);
2871 
2872 	if (pte_present(pte)) {
2873 		/* we have a reference from the mapping, take an extra one */
2874 		get_page(phys_to_page(pte_val(pte)));
2875 		p->pfns[p->count] = phys_to_pfn(pte_val(pte));
2876 		p->next = next;
2877 		p->count++;
2878 	}
2879 	return p->count >= GATHER_GET_PAGES;
2880 }
2881 
2882 static const struct mm_walk_ops gather_pages_ops = {
2883 	.pte_entry = s390_gather_pages,
2884 	.walk_lock = PGWALK_RDLOCK,
2885 };
2886 
2887 /*
2888  * Call the Destroy secure page UVC on each page in the given array of PFNs.
2889  * Each page needs to have an extra reference, which will be released here.
2890  */
2891 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns)
2892 {
2893 	struct folio *folio;
2894 	unsigned long i;
2895 
2896 	for (i = 0; i < count; i++) {
2897 		folio = pfn_folio(pfns[i]);
2898 		/* we always have an extra reference */
2899 		uv_destroy_folio(folio);
2900 		/* get rid of the extra reference */
2901 		folio_put(folio);
2902 		cond_resched();
2903 	}
2904 }
2905 EXPORT_SYMBOL_GPL(s390_uv_destroy_pfns);
2906 
2907 /**
2908  * __s390_uv_destroy_range - Call the destroy secure page UVC on each page
2909  * in the given range of the given address space.
2910  * @mm: the mm to operate on
2911  * @start: the start of the range
2912  * @end: the end of the range
2913  * @interruptible: if not 0, stop when a fatal signal is received
2914  *
2915  * Walk the given range of the given address space and call the destroy
2916  * secure page UVC on each page. Optionally exit early if a fatal signal is
2917  * pending.
2918  *
2919  * Return: 0 on success, -EINTR if the function stopped before completing
2920  */
2921 int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
2922 			    unsigned long end, bool interruptible)
2923 {
2924 	struct reset_walk_state state = { .next = start };
2925 	int r = 1;
2926 
2927 	while (r > 0) {
2928 		state.count = 0;
2929 		mmap_read_lock(mm);
2930 		r = walk_page_range(mm, state.next, end, &gather_pages_ops, &state);
2931 		mmap_read_unlock(mm);
2932 		cond_resched();
2933 		s390_uv_destroy_pfns(state.count, state.pfns);
2934 		if (interruptible && fatal_signal_pending(current))
2935 			return -EINTR;
2936 	}
2937 	return 0;
2938 }
2939 EXPORT_SYMBOL_GPL(__s390_uv_destroy_range);
2940 
2941 /**
2942  * s390_unlist_old_asce - Remove the topmost level of page tables from the
2943  * list of page tables of the gmap.
2944  * @gmap: the gmap whose table is to be removed
2945  *
2946  * On s390x, KVM keeps a list of all pages containing the page tables of the
2947  * gmap (the CRST list). This list is used at tear down time to free all
2948  * pages that are now not needed anymore.
2949  *
2950  * This function removes the topmost page of the tree (the one pointed to by
2951  * the ASCE) from the CRST list.
2952  *
2953  * This means that it will not be freed when the VM is torn down, and needs
2954  * to be handled separately by the caller, unless a leak is actually
2955  * intended. Notice that this function will only remove the page from the
2956  * list, the page will still be used as a top level page table (and ASCE).
2957  */
2958 void s390_unlist_old_asce(struct gmap *gmap)
2959 {
2960 	struct page *old;
2961 
2962 	old = virt_to_page(gmap->table);
2963 	spin_lock(&gmap->guest_table_lock);
2964 	list_del(&old->lru);
2965 	/*
2966 	 * Sometimes the topmost page might need to be "removed" multiple
2967 	 * times, for example if the VM is rebooted into secure mode several
2968 	 * times concurrently, or if s390_replace_asce fails after calling
2969 	 * s390_remove_old_asce and is attempted again later. In that case
2970 	 * the old asce has been removed from the list, and therefore it
2971 	 * will not be freed when the VM terminates, but the ASCE is still
2972 	 * in use and still pointed to.
2973 	 * A subsequent call to replace_asce will follow the pointer and try
2974 	 * to remove the same page from the list again.
2975 	 * Therefore it's necessary that the page of the ASCE has valid
2976 	 * pointers, so list_del can work (and do nothing) without
2977 	 * dereferencing stale or invalid pointers.
2978 	 */
2979 	INIT_LIST_HEAD(&old->lru);
2980 	spin_unlock(&gmap->guest_table_lock);
2981 }
2982 EXPORT_SYMBOL_GPL(s390_unlist_old_asce);
2983 
2984 /**
2985  * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy
2986  * @gmap: the gmap whose ASCE needs to be replaced
2987  *
2988  * If the ASCE is a SEGMENT type then this function will return -EINVAL,
2989  * otherwise the pointers in the host_to_guest radix tree will keep pointing
2990  * to the wrong pages, causing use-after-free and memory corruption.
2991  * If the allocation of the new top level page table fails, the ASCE is not
2992  * replaced.
2993  * In any case, the old ASCE is always removed from the gmap CRST list.
2994  * Therefore the caller has to make sure to save a pointer to it
2995  * beforehand, unless a leak is actually intended.
2996  */
2997 int s390_replace_asce(struct gmap *gmap)
2998 {
2999 	unsigned long asce;
3000 	struct page *page;
3001 	void *table;
3002 
3003 	s390_unlist_old_asce(gmap);
3004 
3005 	/* Replacing segment type ASCEs would cause serious issues */
3006 	if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
3007 		return -EINVAL;
3008 
3009 	page = gmap_alloc_crst();
3010 	if (!page)
3011 		return -ENOMEM;
3012 	page->index = 0;
3013 	table = page_to_virt(page);
3014 	memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
3015 
3016 	/*
3017 	 * The caller has to deal with the old ASCE, but here we make sure
3018 	 * the new one is properly added to the CRST list, so that
3019 	 * it will be freed when the VM is torn down.
3020 	 */
3021 	spin_lock(&gmap->guest_table_lock);
3022 	list_add(&page->lru, &gmap->crst_list);
3023 	spin_unlock(&gmap->guest_table_lock);
3024 
3025 	/* Set new table origin while preserving existing ASCE control bits */
3026 	asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table);
3027 	WRITE_ONCE(gmap->asce, asce);
3028 	WRITE_ONCE(gmap->mm->context.gmap_asce, asce);
3029 	WRITE_ONCE(gmap->table, table);
3030 
3031 	return 0;
3032 }
3033 EXPORT_SYMBOL_GPL(s390_replace_asce);
3034