xref: /linux/arch/s390/mm/pgtable.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  *    Copyright IBM Corp. 2007,2011
3  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/gfp.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
19 #include <linux/slab.h>
20 
21 #include <asm/system.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlb.h>
25 #include <asm/tlbflush.h>
26 #include <asm/mmu_context.h>
27 
28 #ifndef CONFIG_64BIT
29 #define ALLOC_ORDER	1
30 #define FRAG_MASK	0x0f
31 #else
32 #define ALLOC_ORDER	2
33 #define FRAG_MASK	0x03
34 #endif
35 
36 unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
37 EXPORT_SYMBOL(VMALLOC_START);
38 
39 static int __init parse_vmalloc(char *arg)
40 {
41 	if (!arg)
42 		return -EINVAL;
43 	VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
44 	return 0;
45 }
46 early_param("vmalloc", parse_vmalloc);
47 
48 unsigned long *crst_table_alloc(struct mm_struct *mm)
49 {
50 	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
51 
52 	if (!page)
53 		return NULL;
54 	return (unsigned long *) page_to_phys(page);
55 }
56 
57 void crst_table_free(struct mm_struct *mm, unsigned long *table)
58 {
59 	free_pages((unsigned long) table, ALLOC_ORDER);
60 }
61 
62 #ifdef CONFIG_64BIT
63 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
64 {
65 	unsigned long *table, *pgd;
66 	unsigned long entry;
67 
68 	BUG_ON(limit > (1UL << 53));
69 repeat:
70 	table = crst_table_alloc(mm);
71 	if (!table)
72 		return -ENOMEM;
73 	spin_lock_bh(&mm->page_table_lock);
74 	if (mm->context.asce_limit < limit) {
75 		pgd = (unsigned long *) mm->pgd;
76 		if (mm->context.asce_limit <= (1UL << 31)) {
77 			entry = _REGION3_ENTRY_EMPTY;
78 			mm->context.asce_limit = 1UL << 42;
79 			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
80 						_ASCE_USER_BITS |
81 						_ASCE_TYPE_REGION3;
82 		} else {
83 			entry = _REGION2_ENTRY_EMPTY;
84 			mm->context.asce_limit = 1UL << 53;
85 			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
86 						_ASCE_USER_BITS |
87 						_ASCE_TYPE_REGION2;
88 		}
89 		crst_table_init(table, entry);
90 		pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
91 		mm->pgd = (pgd_t *) table;
92 		mm->task_size = mm->context.asce_limit;
93 		table = NULL;
94 	}
95 	spin_unlock_bh(&mm->page_table_lock);
96 	if (table)
97 		crst_table_free(mm, table);
98 	if (mm->context.asce_limit < limit)
99 		goto repeat;
100 	update_mm(mm, current);
101 	return 0;
102 }
103 
104 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
105 {
106 	pgd_t *pgd;
107 
108 	if (mm->context.asce_limit <= limit)
109 		return;
110 	__tlb_flush_mm(mm);
111 	while (mm->context.asce_limit > limit) {
112 		pgd = mm->pgd;
113 		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
114 		case _REGION_ENTRY_TYPE_R2:
115 			mm->context.asce_limit = 1UL << 42;
116 			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
117 						_ASCE_USER_BITS |
118 						_ASCE_TYPE_REGION3;
119 			break;
120 		case _REGION_ENTRY_TYPE_R3:
121 			mm->context.asce_limit = 1UL << 31;
122 			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
123 						_ASCE_USER_BITS |
124 						_ASCE_TYPE_SEGMENT;
125 			break;
126 		default:
127 			BUG();
128 		}
129 		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
130 		mm->task_size = mm->context.asce_limit;
131 		crst_table_free(mm, (unsigned long *) pgd);
132 	}
133 	update_mm(mm, current);
134 }
135 #endif
136 
137 #ifdef CONFIG_PGSTE
138 
139 /**
140  * gmap_alloc - allocate a guest address space
141  * @mm: pointer to the parent mm_struct
142  *
143  * Returns a guest address space structure.
144  */
145 struct gmap *gmap_alloc(struct mm_struct *mm)
146 {
147 	struct gmap *gmap;
148 	struct page *page;
149 	unsigned long *table;
150 
151 	gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
152 	if (!gmap)
153 		goto out;
154 	INIT_LIST_HEAD(&gmap->crst_list);
155 	gmap->mm = mm;
156 	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
157 	if (!page)
158 		goto out_free;
159 	list_add(&page->lru, &gmap->crst_list);
160 	table = (unsigned long *) page_to_phys(page);
161 	crst_table_init(table, _REGION1_ENTRY_EMPTY);
162 	gmap->table = table;
163 	gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
164 		     _ASCE_USER_BITS | __pa(table);
165 	list_add(&gmap->list, &mm->context.gmap_list);
166 	return gmap;
167 
168 out_free:
169 	kfree(gmap);
170 out:
171 	return NULL;
172 }
173 EXPORT_SYMBOL_GPL(gmap_alloc);
174 
175 static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
176 {
177 	struct gmap_pgtable *mp;
178 	struct gmap_rmap *rmap;
179 	struct page *page;
180 
181 	if (*table & _SEGMENT_ENTRY_INV)
182 		return 0;
183 	page = pfn_to_page(*table >> PAGE_SHIFT);
184 	mp = (struct gmap_pgtable *) page->index;
185 	list_for_each_entry(rmap, &mp->mapper, list) {
186 		if (rmap->entry != table)
187 			continue;
188 		list_del(&rmap->list);
189 		kfree(rmap);
190 		break;
191 	}
192 	*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
193 	return 1;
194 }
195 
196 static void gmap_flush_tlb(struct gmap *gmap)
197 {
198 	if (MACHINE_HAS_IDTE)
199 		__tlb_flush_idte((unsigned long) gmap->table |
200 				 _ASCE_TYPE_REGION1);
201 	else
202 		__tlb_flush_global();
203 }
204 
205 /**
206  * gmap_free - free a guest address space
207  * @gmap: pointer to the guest address space structure
208  */
209 void gmap_free(struct gmap *gmap)
210 {
211 	struct page *page, *next;
212 	unsigned long *table;
213 	int i;
214 
215 
216 	/* Flush tlb. */
217 	if (MACHINE_HAS_IDTE)
218 		__tlb_flush_idte((unsigned long) gmap->table |
219 				 _ASCE_TYPE_REGION1);
220 	else
221 		__tlb_flush_global();
222 
223 	/* Free all segment & region tables. */
224 	down_read(&gmap->mm->mmap_sem);
225 	spin_lock(&gmap->mm->page_table_lock);
226 	list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
227 		table = (unsigned long *) page_to_phys(page);
228 		if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
229 			/* Remove gmap rmap structures for segment table. */
230 			for (i = 0; i < PTRS_PER_PMD; i++, table++)
231 				gmap_unlink_segment(gmap, table);
232 		__free_pages(page, ALLOC_ORDER);
233 	}
234 	spin_unlock(&gmap->mm->page_table_lock);
235 	up_read(&gmap->mm->mmap_sem);
236 	list_del(&gmap->list);
237 	kfree(gmap);
238 }
239 EXPORT_SYMBOL_GPL(gmap_free);
240 
241 /**
242  * gmap_enable - switch primary space to the guest address space
243  * @gmap: pointer to the guest address space structure
244  */
245 void gmap_enable(struct gmap *gmap)
246 {
247 	S390_lowcore.gmap = (unsigned long) gmap;
248 }
249 EXPORT_SYMBOL_GPL(gmap_enable);
250 
251 /**
252  * gmap_disable - switch back to the standard primary address space
253  * @gmap: pointer to the guest address space structure
254  */
255 void gmap_disable(struct gmap *gmap)
256 {
257 	S390_lowcore.gmap = 0UL;
258 }
259 EXPORT_SYMBOL_GPL(gmap_disable);
260 
261 /*
262  * gmap_alloc_table is assumed to be called with mmap_sem held
263  */
264 static int gmap_alloc_table(struct gmap *gmap,
265 			       unsigned long *table, unsigned long init)
266 {
267 	struct page *page;
268 	unsigned long *new;
269 
270 	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
271 	if (!page)
272 		return -ENOMEM;
273 	new = (unsigned long *) page_to_phys(page);
274 	crst_table_init(new, init);
275 	if (*table & _REGION_ENTRY_INV) {
276 		list_add(&page->lru, &gmap->crst_list);
277 		*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
278 			(*table & _REGION_ENTRY_TYPE_MASK);
279 	} else
280 		__free_pages(page, ALLOC_ORDER);
281 	return 0;
282 }
283 
284 /**
285  * gmap_unmap_segment - unmap segment from the guest address space
286  * @gmap: pointer to the guest address space structure
287  * @addr: address in the guest address space
288  * @len: length of the memory area to unmap
289  *
290  * Returns 0 if the unmap succeded, -EINVAL if not.
291  */
292 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
293 {
294 	unsigned long *table;
295 	unsigned long off;
296 	int flush;
297 
298 	if ((to | len) & (PMD_SIZE - 1))
299 		return -EINVAL;
300 	if (len == 0 || to + len < to)
301 		return -EINVAL;
302 
303 	flush = 0;
304 	down_read(&gmap->mm->mmap_sem);
305 	spin_lock(&gmap->mm->page_table_lock);
306 	for (off = 0; off < len; off += PMD_SIZE) {
307 		/* Walk the guest addr space page table */
308 		table = gmap->table + (((to + off) >> 53) & 0x7ff);
309 		if (*table & _REGION_ENTRY_INV)
310 			goto out;
311 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
312 		table = table + (((to + off) >> 42) & 0x7ff);
313 		if (*table & _REGION_ENTRY_INV)
314 			goto out;
315 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
316 		table = table + (((to + off) >> 31) & 0x7ff);
317 		if (*table & _REGION_ENTRY_INV)
318 			goto out;
319 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
320 		table = table + (((to + off) >> 20) & 0x7ff);
321 
322 		/* Clear segment table entry in guest address space. */
323 		flush |= gmap_unlink_segment(gmap, table);
324 		*table = _SEGMENT_ENTRY_INV;
325 	}
326 out:
327 	spin_unlock(&gmap->mm->page_table_lock);
328 	up_read(&gmap->mm->mmap_sem);
329 	if (flush)
330 		gmap_flush_tlb(gmap);
331 	return 0;
332 }
333 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
334 
335 /**
336  * gmap_mmap_segment - map a segment to the guest address space
337  * @gmap: pointer to the guest address space structure
338  * @from: source address in the parent address space
339  * @to: target address in the guest address space
340  *
341  * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
342  */
343 int gmap_map_segment(struct gmap *gmap, unsigned long from,
344 		     unsigned long to, unsigned long len)
345 {
346 	unsigned long *table;
347 	unsigned long off;
348 	int flush;
349 
350 	if ((from | to | len) & (PMD_SIZE - 1))
351 		return -EINVAL;
352 	if (len == 0 || from + len > PGDIR_SIZE ||
353 	    from + len < from || to + len < to)
354 		return -EINVAL;
355 
356 	flush = 0;
357 	down_read(&gmap->mm->mmap_sem);
358 	spin_lock(&gmap->mm->page_table_lock);
359 	for (off = 0; off < len; off += PMD_SIZE) {
360 		/* Walk the gmap address space page table */
361 		table = gmap->table + (((to + off) >> 53) & 0x7ff);
362 		if ((*table & _REGION_ENTRY_INV) &&
363 		    gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
364 			goto out_unmap;
365 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
366 		table = table + (((to + off) >> 42) & 0x7ff);
367 		if ((*table & _REGION_ENTRY_INV) &&
368 		    gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
369 			goto out_unmap;
370 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
371 		table = table + (((to + off) >> 31) & 0x7ff);
372 		if ((*table & _REGION_ENTRY_INV) &&
373 		    gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
374 			goto out_unmap;
375 		table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
376 		table = table + (((to + off) >> 20) & 0x7ff);
377 
378 		/* Store 'from' address in an invalid segment table entry. */
379 		flush |= gmap_unlink_segment(gmap, table);
380 		*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
381 	}
382 	spin_unlock(&gmap->mm->page_table_lock);
383 	up_read(&gmap->mm->mmap_sem);
384 	if (flush)
385 		gmap_flush_tlb(gmap);
386 	return 0;
387 
388 out_unmap:
389 	spin_unlock(&gmap->mm->page_table_lock);
390 	up_read(&gmap->mm->mmap_sem);
391 	gmap_unmap_segment(gmap, to, len);
392 	return -ENOMEM;
393 }
394 EXPORT_SYMBOL_GPL(gmap_map_segment);
395 
396 /*
397  * this function is assumed to be called with mmap_sem held
398  */
399 unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
400 {
401 	unsigned long *table, vmaddr, segment;
402 	struct mm_struct *mm;
403 	struct gmap_pgtable *mp;
404 	struct gmap_rmap *rmap;
405 	struct vm_area_struct *vma;
406 	struct page *page;
407 	pgd_t *pgd;
408 	pud_t *pud;
409 	pmd_t *pmd;
410 
411 	current->thread.gmap_addr = address;
412 	mm = gmap->mm;
413 	/* Walk the gmap address space page table */
414 	table = gmap->table + ((address >> 53) & 0x7ff);
415 	if (unlikely(*table & _REGION_ENTRY_INV))
416 		return -EFAULT;
417 	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
418 	table = table + ((address >> 42) & 0x7ff);
419 	if (unlikely(*table & _REGION_ENTRY_INV))
420 		return -EFAULT;
421 	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
422 	table = table + ((address >> 31) & 0x7ff);
423 	if (unlikely(*table & _REGION_ENTRY_INV))
424 		return -EFAULT;
425 	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
426 	table = table + ((address >> 20) & 0x7ff);
427 
428 	/* Convert the gmap address to an mm address. */
429 	segment = *table;
430 	if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
431 		page = pfn_to_page(segment >> PAGE_SHIFT);
432 		mp = (struct gmap_pgtable *) page->index;
433 		return mp->vmaddr | (address & ~PMD_MASK);
434 	} else if (segment & _SEGMENT_ENTRY_RO) {
435 		vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
436 		vma = find_vma(mm, vmaddr);
437 		if (!vma || vma->vm_start > vmaddr)
438 			return -EFAULT;
439 
440 		/* Walk the parent mm page table */
441 		pgd = pgd_offset(mm, vmaddr);
442 		pud = pud_alloc(mm, pgd, vmaddr);
443 		if (!pud)
444 			return -ENOMEM;
445 		pmd = pmd_alloc(mm, pud, vmaddr);
446 		if (!pmd)
447 			return -ENOMEM;
448 		if (!pmd_present(*pmd) &&
449 		    __pte_alloc(mm, vma, pmd, vmaddr))
450 			return -ENOMEM;
451 		/* pmd now points to a valid segment table entry. */
452 		rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
453 		if (!rmap)
454 			return -ENOMEM;
455 		/* Link gmap segment table entry location to page table. */
456 		page = pmd_page(*pmd);
457 		mp = (struct gmap_pgtable *) page->index;
458 		rmap->entry = table;
459 		spin_lock(&mm->page_table_lock);
460 		list_add(&rmap->list, &mp->mapper);
461 		spin_unlock(&mm->page_table_lock);
462 		/* Set gmap segment table entry to page table. */
463 		*table = pmd_val(*pmd) & PAGE_MASK;
464 		return vmaddr | (address & ~PMD_MASK);
465 	}
466 	return -EFAULT;
467 }
468 
469 unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
470 {
471 	unsigned long rc;
472 
473 	down_read(&gmap->mm->mmap_sem);
474 	rc = __gmap_fault(address, gmap);
475 	up_read(&gmap->mm->mmap_sem);
476 
477 	return rc;
478 }
479 EXPORT_SYMBOL_GPL(gmap_fault);
480 
481 void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
482 {
483 
484 	unsigned long *table, address, size;
485 	struct vm_area_struct *vma;
486 	struct gmap_pgtable *mp;
487 	struct page *page;
488 
489 	down_read(&gmap->mm->mmap_sem);
490 	address = from;
491 	while (address < to) {
492 		/* Walk the gmap address space page table */
493 		table = gmap->table + ((address >> 53) & 0x7ff);
494 		if (unlikely(*table & _REGION_ENTRY_INV)) {
495 			address = (address + PMD_SIZE) & PMD_MASK;
496 			continue;
497 		}
498 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
499 		table = table + ((address >> 42) & 0x7ff);
500 		if (unlikely(*table & _REGION_ENTRY_INV)) {
501 			address = (address + PMD_SIZE) & PMD_MASK;
502 			continue;
503 		}
504 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
505 		table = table + ((address >> 31) & 0x7ff);
506 		if (unlikely(*table & _REGION_ENTRY_INV)) {
507 			address = (address + PMD_SIZE) & PMD_MASK;
508 			continue;
509 		}
510 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
511 		table = table + ((address >> 20) & 0x7ff);
512 		if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
513 			address = (address + PMD_SIZE) & PMD_MASK;
514 			continue;
515 		}
516 		page = pfn_to_page(*table >> PAGE_SHIFT);
517 		mp = (struct gmap_pgtable *) page->index;
518 		vma = find_vma(gmap->mm, mp->vmaddr);
519 		size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
520 		zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
521 			       size, NULL);
522 		address = (address + PMD_SIZE) & PMD_MASK;
523 	}
524 	up_read(&gmap->mm->mmap_sem);
525 }
526 EXPORT_SYMBOL_GPL(gmap_discard);
527 
528 void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
529 {
530 	struct gmap_rmap *rmap, *next;
531 	struct gmap_pgtable *mp;
532 	struct page *page;
533 	int flush;
534 
535 	flush = 0;
536 	spin_lock(&mm->page_table_lock);
537 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
538 	mp = (struct gmap_pgtable *) page->index;
539 	list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
540 		*rmap->entry =
541 			_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
542 		list_del(&rmap->list);
543 		kfree(rmap);
544 		flush = 1;
545 	}
546 	spin_unlock(&mm->page_table_lock);
547 	if (flush)
548 		__tlb_flush_global();
549 }
550 
551 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
552 						    unsigned long vmaddr)
553 {
554 	struct page *page;
555 	unsigned long *table;
556 	struct gmap_pgtable *mp;
557 
558 	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
559 	if (!page)
560 		return NULL;
561 	mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
562 	if (!mp) {
563 		__free_page(page);
564 		return NULL;
565 	}
566 	pgtable_page_ctor(page);
567 	mp->vmaddr = vmaddr & PMD_MASK;
568 	INIT_LIST_HEAD(&mp->mapper);
569 	page->index = (unsigned long) mp;
570 	atomic_set(&page->_mapcount, 3);
571 	table = (unsigned long *) page_to_phys(page);
572 	clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
573 	clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
574 	return table;
575 }
576 
577 static inline void page_table_free_pgste(unsigned long *table)
578 {
579 	struct page *page;
580 	struct gmap_pgtable *mp;
581 
582 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
583 	mp = (struct gmap_pgtable *) page->index;
584 	BUG_ON(!list_empty(&mp->mapper));
585 	pgtable_page_ctor(page);
586 	atomic_set(&page->_mapcount, -1);
587 	kfree(mp);
588 	__free_page(page);
589 }
590 
591 #else /* CONFIG_PGSTE */
592 
593 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
594 						    unsigned long vmaddr)
595 {
596 	return NULL;
597 }
598 
599 static inline void page_table_free_pgste(unsigned long *table)
600 {
601 }
602 
603 static inline void gmap_unmap_notifier(struct mm_struct *mm,
604 					  unsigned long *table)
605 {
606 }
607 
608 #endif /* CONFIG_PGSTE */
609 
610 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
611 {
612 	unsigned int old, new;
613 
614 	do {
615 		old = atomic_read(v);
616 		new = old ^ bits;
617 	} while (atomic_cmpxchg(v, old, new) != old);
618 	return new;
619 }
620 
621 /*
622  * page table entry allocation/free routines.
623  */
624 unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
625 {
626 	struct page *page;
627 	unsigned long *table;
628 	unsigned int mask, bit;
629 
630 	if (mm_has_pgste(mm))
631 		return page_table_alloc_pgste(mm, vmaddr);
632 	/* Allocate fragments of a 4K page as 1K/2K page table */
633 	spin_lock_bh(&mm->context.list_lock);
634 	mask = FRAG_MASK;
635 	if (!list_empty(&mm->context.pgtable_list)) {
636 		page = list_first_entry(&mm->context.pgtable_list,
637 					struct page, lru);
638 		table = (unsigned long *) page_to_phys(page);
639 		mask = atomic_read(&page->_mapcount);
640 		mask = mask | (mask >> 4);
641 	}
642 	if ((mask & FRAG_MASK) == FRAG_MASK) {
643 		spin_unlock_bh(&mm->context.list_lock);
644 		page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
645 		if (!page)
646 			return NULL;
647 		pgtable_page_ctor(page);
648 		atomic_set(&page->_mapcount, 1);
649 		table = (unsigned long *) page_to_phys(page);
650 		clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
651 		spin_lock_bh(&mm->context.list_lock);
652 		list_add(&page->lru, &mm->context.pgtable_list);
653 	} else {
654 		for (bit = 1; mask & bit; bit <<= 1)
655 			table += PTRS_PER_PTE;
656 		mask = atomic_xor_bits(&page->_mapcount, bit);
657 		if ((mask & FRAG_MASK) == FRAG_MASK)
658 			list_del(&page->lru);
659 	}
660 	spin_unlock_bh(&mm->context.list_lock);
661 	return table;
662 }
663 
664 void page_table_free(struct mm_struct *mm, unsigned long *table)
665 {
666 	struct page *page;
667 	unsigned int bit, mask;
668 
669 	if (mm_has_pgste(mm)) {
670 		gmap_unmap_notifier(mm, table);
671 		return page_table_free_pgste(table);
672 	}
673 	/* Free 1K/2K page table fragment of a 4K page */
674 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
675 	bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
676 	spin_lock_bh(&mm->context.list_lock);
677 	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
678 		list_del(&page->lru);
679 	mask = atomic_xor_bits(&page->_mapcount, bit);
680 	if (mask & FRAG_MASK)
681 		list_add(&page->lru, &mm->context.pgtable_list);
682 	spin_unlock_bh(&mm->context.list_lock);
683 	if (mask == 0) {
684 		pgtable_page_dtor(page);
685 		atomic_set(&page->_mapcount, -1);
686 		__free_page(page);
687 	}
688 }
689 
690 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
691 
692 static void __page_table_free_rcu(void *table, unsigned bit)
693 {
694 	struct page *page;
695 
696 	if (bit == FRAG_MASK)
697 		return page_table_free_pgste(table);
698 	/* Free 1K/2K page table fragment of a 4K page */
699 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
700 	if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
701 		pgtable_page_dtor(page);
702 		atomic_set(&page->_mapcount, -1);
703 		__free_page(page);
704 	}
705 }
706 
707 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
708 {
709 	struct mm_struct *mm;
710 	struct page *page;
711 	unsigned int bit, mask;
712 
713 	mm = tlb->mm;
714 	if (mm_has_pgste(mm)) {
715 		gmap_unmap_notifier(mm, table);
716 		table = (unsigned long *) (__pa(table) | FRAG_MASK);
717 		tlb_remove_table(tlb, table);
718 		return;
719 	}
720 	bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
721 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
722 	spin_lock_bh(&mm->context.list_lock);
723 	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
724 		list_del(&page->lru);
725 	mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
726 	if (mask & FRAG_MASK)
727 		list_add_tail(&page->lru, &mm->context.pgtable_list);
728 	spin_unlock_bh(&mm->context.list_lock);
729 	table = (unsigned long *) (__pa(table) | (bit << 4));
730 	tlb_remove_table(tlb, table);
731 }
732 
733 void __tlb_remove_table(void *_table)
734 {
735 	const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
736 	void *table = (void *)((unsigned long) _table & ~mask);
737 	unsigned type = (unsigned long) _table & mask;
738 
739 	if (type)
740 		__page_table_free_rcu(table, type);
741 	else
742 		free_pages((unsigned long) table, ALLOC_ORDER);
743 }
744 
745 #endif
746 
747 /*
748  * switch on pgstes for its userspace process (for kvm)
749  */
750 int s390_enable_sie(void)
751 {
752 	struct task_struct *tsk = current;
753 	struct mm_struct *mm, *old_mm;
754 
755 	/* Do we have switched amode? If no, we cannot do sie */
756 	if (user_mode == HOME_SPACE_MODE)
757 		return -EINVAL;
758 
759 	/* Do we have pgstes? if yes, we are done */
760 	if (mm_has_pgste(tsk->mm))
761 		return 0;
762 
763 	/* lets check if we are allowed to replace the mm */
764 	task_lock(tsk);
765 	if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
766 #ifdef CONFIG_AIO
767 	    !hlist_empty(&tsk->mm->ioctx_list) ||
768 #endif
769 	    tsk->mm != tsk->active_mm) {
770 		task_unlock(tsk);
771 		return -EINVAL;
772 	}
773 	task_unlock(tsk);
774 
775 	/* we copy the mm and let dup_mm create the page tables with_pgstes */
776 	tsk->mm->context.alloc_pgste = 1;
777 	mm = dup_mm(tsk);
778 	tsk->mm->context.alloc_pgste = 0;
779 	if (!mm)
780 		return -ENOMEM;
781 
782 	/* Now lets check again if something happened */
783 	task_lock(tsk);
784 	if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
785 #ifdef CONFIG_AIO
786 	    !hlist_empty(&tsk->mm->ioctx_list) ||
787 #endif
788 	    tsk->mm != tsk->active_mm) {
789 		mmput(mm);
790 		task_unlock(tsk);
791 		return -EINVAL;
792 	}
793 
794 	/* ok, we are alone. No ptrace, no threads, etc. */
795 	old_mm = tsk->mm;
796 	tsk->mm = tsk->active_mm = mm;
797 	preempt_disable();
798 	update_mm(mm, tsk);
799 	atomic_inc(&mm->context.attach_count);
800 	atomic_dec(&old_mm->context.attach_count);
801 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
802 	preempt_enable();
803 	task_unlock(tsk);
804 	mmput(old_mm);
805 	return 0;
806 }
807 EXPORT_SYMBOL_GPL(s390_enable_sie);
808 
809 #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
810 bool kernel_page_present(struct page *page)
811 {
812 	unsigned long addr;
813 	int cc;
814 
815 	addr = page_to_phys(page);
816 	asm volatile(
817 		"	lra	%1,0(%1)\n"
818 		"	ipm	%0\n"
819 		"	srl	%0,28"
820 		: "=d" (cc), "+a" (addr) : : "cc");
821 	return cc == 0;
822 }
823 #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
824