pgalloc.c (eadcbfa58ae8693f0d6a0f591d8f51d55cf068e1) pgalloc.c (1aea9b3f921003f0880f0676ae85d87c9f1cb4a2)
1/*
2 * Page table allocation functions
3 *
4 * Copyright IBM Corp. 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/mm.h>

--- 62 unchanged lines hidden (view full) ---

71
72 if (current->active_mm == mm) {
73 clear_user_asce();
74 set_user_asce(mm);
75 }
76 __tlb_flush_local();
77}
78
1/*
2 * Page table allocation functions
3 *
4 * Copyright IBM Corp. 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/mm.h>

--- 62 unchanged lines hidden (view full) ---

71
72 if (current->active_mm == mm) {
73 clear_user_asce();
74 set_user_asce(mm);
75 }
76 __tlb_flush_local();
77}
78
79int crst_table_upgrade(struct mm_struct *mm)
79int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
80{
81 unsigned long *table, *pgd;
80{
81 unsigned long *table, *pgd;
82 int rc, notify;
82
83
83 /* upgrade should only happen from 3 to 4 levels */
84 BUG_ON(mm->context.asce_limit != (1UL << 42));
85
86 table = crst_table_alloc(mm);
87 if (!table)
84 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
85 BUG_ON(mm->context.asce_limit < (1UL << 42));
86 if (end >= TASK_SIZE_MAX)
88 return -ENOMEM;
87 return -ENOMEM;
89
90 spin_lock_bh(&mm->page_table_lock);
91 pgd = (unsigned long *) mm->pgd;
92 crst_table_init(table, _REGION2_ENTRY_EMPTY);
93 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
94 mm->pgd = (pgd_t *) table;
95 mm->context.asce_limit = 1UL << 53;
96 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
97 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
98 spin_unlock_bh(&mm->page_table_lock);
99
100 on_each_cpu(__crst_table_upgrade, mm, 0);
101 return 0;
88 rc = 0;
89 notify = 0;
90 while (mm->context.asce_limit < end) {
91 table = crst_table_alloc(mm);
92 if (!table) {
93 rc = -ENOMEM;
94 break;
95 }
96 spin_lock_bh(&mm->page_table_lock);
97 pgd = (unsigned long *) mm->pgd;
98 if (mm->context.asce_limit == (1UL << 42)) {
99 crst_table_init(table, _REGION2_ENTRY_EMPTY);
100 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
101 mm->pgd = (pgd_t *) table;
102 mm->context.asce_limit = 1UL << 53;
103 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
104 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
105 } else {
106 crst_table_init(table, _REGION1_ENTRY_EMPTY);
107 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
108 mm->pgd = (pgd_t *) table;
109 mm->context.asce_limit = -PAGE_SIZE;
110 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
111 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
112 }
113 notify = 1;
114 spin_unlock_bh(&mm->page_table_lock);
115 }
116 if (notify)
117 on_each_cpu(__crst_table_upgrade, mm, 0);
118 return rc;
102}
103
104void crst_table_downgrade(struct mm_struct *mm)
105{
106 pgd_t *pgd;
107
108 /* downgrade should only happen from 3 to 2 levels (compat only) */
109 BUG_ON(mm->context.asce_limit != (1UL << 42));

--- 159 unchanged lines hidden (view full) ---

269
270static void __tlb_remove_table(void *_table)
271{
272 unsigned int mask = (unsigned long) _table & 3;
273 void *table = (void *)((unsigned long) _table ^ mask);
274 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
275
276 switch (mask) {
119}
120
121void crst_table_downgrade(struct mm_struct *mm)
122{
123 pgd_t *pgd;
124
125 /* downgrade should only happen from 3 to 2 levels (compat only) */
126 BUG_ON(mm->context.asce_limit != (1UL << 42));

--- 159 unchanged lines hidden (view full) ---

286
287static void __tlb_remove_table(void *_table)
288{
289 unsigned int mask = (unsigned long) _table & 3;
290 void *table = (void *)((unsigned long) _table ^ mask);
291 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
292
293 switch (mask) {
277 case 0: /* pmd or pud */
294 case 0: /* pmd, pud, or p4d */
278 free_pages((unsigned long) table, 2);
279 break;
280 case 1: /* lower 2K of a 4K page table */
281 case 2: /* higher 2K of a 4K page table */
282 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
283 break;
284 /* fallthrough */
285 case 3: /* 4K page table with pgstes */

--- 67 unchanged lines hidden ---
295 free_pages((unsigned long) table, 2);
296 break;
297 case 1: /* lower 2K of a 4K page table */
298 case 2: /* higher 2K of a 4K page table */
299 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
300 break;
301 /* fallthrough */
302 case 3: /* 4K page table with pgstes */

--- 67 unchanged lines hidden ---