pgtable.c (bb5f33c069402035a3d6a2091ee68cac6999d774) pgtable.c (2fb4706057bcf8261b3b0521ec7a62b54b82ce48)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains common routines for dealing with free of page tables
4 * Along with common page table handling code
5 *
6 * Derived from arch/powerpc/mm/tlb_64.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *

--- 86 unchanged lines hidden (view full) ---

95static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
96
97#endif /* CONFIG_PPC_BOOK3S */
98
99/* Embedded type MMU with HW exec support. This is a bit more complicated
100 * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
101 * instead we "filter out" the exec permission for non clean pages.
102 */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains common routines for dealing with free of page tables
4 * Along with common page table handling code
5 *
6 * Derived from arch/powerpc/mm/tlb_64.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *

--- 86 unchanged lines hidden (view full) ---

95static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
96
97#endif /* CONFIG_PPC_BOOK3S */
98
99/* Embedded type MMU with HW exec support. This is a bit more complicated
100 * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
101 * instead we "filter out" the exec permission for non clean pages.
102 */
103static inline pte_t set_pte_filter(pte_t pte)
103static pte_t set_pte_filter(pte_t pte)
104{
105 struct page *pg;
106
107 if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
108 return set_pte_filter_hash(pte);
109
110 /* No exec permission in the first place, move on */
111 if (!pte_exec(pte) || !pte_looks_normal(pte))

--- 132 unchanged lines hidden (view full) ---

244
245 psize = hstate_get_psize(h);
246#ifdef CONFIG_DEBUG_VM
247 assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
248#endif
249
250#else
251 /*
104{
105 struct page *pg;
106
107 if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
108 return set_pte_filter_hash(pte);
109
110 /* No exec permission in the first place, move on */
111 if (!pte_exec(pte) || !pte_looks_normal(pte))

--- 132 unchanged lines hidden (view full) ---

244
245 psize = hstate_get_psize(h);
246#ifdef CONFIG_DEBUG_VM
247 assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
248#endif
249
250#else
251 /*
252 * Not used on non book3s64 platforms.
253 * 8xx compares it with mmu_virtual_psize to
254 * know if it is a huge page or not.
252 * Not used on non book3s64 platforms. But 8xx
253 * can possibly use tsize derived from hstate.
255 */
254 */
256 psize = MMU_PAGE_COUNT;
255 psize = 0;
257#endif
258 __ptep_set_access_flags(vma, ptep, pte, addr, psize);
259 }
260 return changed;
261#endif
262}
256#endif
257 __ptep_set_access_flags(vma, ptep, pte, addr, psize);
258 }
259 return changed;
260#endif
261}
263
264#if defined(CONFIG_PPC_8xx)
265void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
266{
267 pmd_t *pmd = pmd_ptr(mm, addr);
268 pte_basic_t val;
269 pte_basic_t *entry = &ptep->pte;
270 int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K;
271 int i;
272
273 /*
274 * Make sure hardware valid bit is not set. We don't do
275 * tlb flush for this update.
276 */
277 VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
278
279 pte = pte_mkpte(pte);
280
281 pte = set_pte_filter(pte);
282
283 val = pte_val(pte);
284 for (i = 0; i < num; i++, entry++, val += SZ_4K)
285 *entry = val;
286}
287#endif
288#endif /* CONFIG_HUGETLB_PAGE */
289
290#ifdef CONFIG_DEBUG_VM
291void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
292{
293 pgd_t *pgd;
262#endif /* CONFIG_HUGETLB_PAGE */
263
264#ifdef CONFIG_DEBUG_VM
265void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
266{
267 pgd_t *pgd;
268 p4d_t *p4d;
294 pud_t *pud;
295 pmd_t *pmd;
296
297 if (mm == &init_mm)
298 return;
299 pgd = mm->pgd + pgd_index(addr);
300 BUG_ON(pgd_none(*pgd));
269 pud_t *pud;
270 pmd_t *pmd;
271
272 if (mm == &init_mm)
273 return;
274 pgd = mm->pgd + pgd_index(addr);
275 BUG_ON(pgd_none(*pgd));
301 pud = pud_offset(pgd, addr);
276 p4d = p4d_offset(pgd, addr);
277 BUG_ON(p4d_none(*p4d));
278 pud = pud_offset(p4d, addr);
302 BUG_ON(pud_none(*pud));
303 pmd = pmd_offset(pud, addr);
304 /*
305 * khugepaged to collapse normal pages to hugepage, first set
306 * pmd to none to force page fault/gup to take mmap_sem. After
307 * pmd is set to none, we do a pte_clear which does this assertion
308 * so if we find pmd none, return.
309 */

--- 23 unchanged lines hidden (view full) ---

333 * So long as we atomically load page table pointers we are safe against teardown,
334 * we can follow the address down to the the page and take a ref on it.
335 * This function need to be called with interrupts disabled. We use this variant
336 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
337 */
338pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
339 bool *is_thp, unsigned *hpage_shift)
340{
279 BUG_ON(pud_none(*pud));
280 pmd = pmd_offset(pud, addr);
281 /*
282 * khugepaged to collapse normal pages to hugepage, first set
283 * pmd to none to force page fault/gup to take mmap_sem. After
284 * pmd is set to none, we do a pte_clear which does this assertion
285 * so if we find pmd none, return.
286 */

--- 23 unchanged lines hidden (view full) ---

310 * So long as we atomically load page table pointers we are safe against teardown,
311 * we can follow the address down to the the page and take a ref on it.
312 * This function need to be called with interrupts disabled. We use this variant
313 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
314 */
315pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
316 bool *is_thp, unsigned *hpage_shift)
317{
341 pgd_t pgd, *pgdp;
318 pgd_t *pgdp;
319 p4d_t p4d, *p4dp;
342 pud_t pud, *pudp;
343 pmd_t pmd, *pmdp;
344 pte_t *ret_pte;
345 hugepd_t *hpdp = NULL;
320 pud_t pud, *pudp;
321 pmd_t pmd, *pmdp;
322 pte_t *ret_pte;
323 hugepd_t *hpdp = NULL;
346 unsigned pdshift = PGDIR_SHIFT;
324 unsigned pdshift;
347
348 if (hpage_shift)
349 *hpage_shift = 0;
350
351 if (is_thp)
352 *is_thp = false;
353
325
326 if (hpage_shift)
327 *hpage_shift = 0;
328
329 if (is_thp)
330 *is_thp = false;
331
354 pgdp = pgdir + pgd_index(ea);
355 pgd = READ_ONCE(*pgdp);
356 /*
357 * Always operate on the local stack value. This make sure the
358 * value don't get updated by a parallel THP split/collapse,
359 * page fault or a page unmap. The return pte_t * is still not
360 * stable. So should be checked there for above conditions.
332 /*
333 * Always operate on the local stack value. This make sure the
334 * value don't get updated by a parallel THP split/collapse,
335 * page fault or a page unmap. The return pte_t * is still not
336 * stable. So should be checked there for above conditions.
337 * Top level is an exception because it is folded into p4d.
361 */
338 */
362 if (pgd_none(pgd))
339 pgdp = pgdir + pgd_index(ea);
340 p4dp = p4d_offset(pgdp, ea);
341 p4d = READ_ONCE(*p4dp);
342 pdshift = P4D_SHIFT;
343
344 if (p4d_none(p4d))
363 return NULL;
364
345 return NULL;
346
365 if (pgd_is_leaf(pgd)) {
366 ret_pte = (pte_t *)pgdp;
347 if (p4d_is_leaf(p4d)) {
348 ret_pte = (pte_t *)p4dp;
367 goto out;
368 }
369
349 goto out;
350 }
351
370 if (is_hugepd(__hugepd(pgd_val(pgd)))) {
371 hpdp = (hugepd_t *)&pgd;
352 if (is_hugepd(__hugepd(p4d_val(p4d)))) {
353 hpdp = (hugepd_t *)&p4d;
372 goto out_huge;
373 }
374
375 /*
376 * Even if we end up with an unmap, the pgtable will not
377 * be freed, because we do an rcu free and here we are
378 * irq disabled
379 */
380 pdshift = PUD_SHIFT;
354 goto out_huge;
355 }
356
357 /*
358 * Even if we end up with an unmap, the pgtable will not
359 * be freed, because we do an rcu free and here we are
360 * irq disabled
361 */
362 pdshift = PUD_SHIFT;
381 pudp = pud_offset(&pgd, ea);
363 pudp = pud_offset(&p4d, ea);
382 pud = READ_ONCE(*pudp);
383
384 if (pud_none(pud))
385 return NULL;
386
387 if (pud_is_leaf(pud)) {
388 ret_pte = (pte_t *)pudp;
389 goto out;

--- 60 unchanged lines hidden ---
364 pud = READ_ONCE(*pudp);
365
366 if (pud_none(pud))
367 return NULL;
368
369 if (pud_is_leaf(pud)) {
370 ret_pte = (pte_t *)pudp;
371 goto out;

--- 60 unchanged lines hidden ---