Lines Matching defs:pmd
48 * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
49 * above. pmd folding is special and typically pmd_* macros refer to upper
52 void pmd_clear_bad(pmd_t *pmd)
54 pmd_ERROR(*pmd);
55 pmd_clear(pmd);
140 pmd_t pmd;
143 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
145 return pmd;
221 * pmd and hugepage pte format are same. So we could
224 pmd_t pmd;
228 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
230 /* collapse entails shooting down ptes not pmd */
232 return pmd;
281 pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
288 pmdval = pmdp_get_lockless(pmd);
298 pmd_clear_bad(pmd);
307 pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
313 pte = __pte_offset_map(pmd, addr, &pmdval);
319 pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
326 pte = __pte_offset_map(pmd, addr, pmdvalp);
333 * pte_offset_map_lock(mm, pmd, addr, ptlp), and its internal implementation
334 * __pte_offset_map_lock() below, is usually called with the pmd pointer for
338 * write). In a few cases, it may be used with pmd pointing to a pmd_t already
348 * page table at *pmd: if, for example, the page table has just been removed,
349 * or replaced by the huge pmd of a THP. (When successful, *pmd is rechecked
353 * pte_offset_map(pmd, addr), and its internal helper __pte_offset_map() above,
355 * or NULL if there is no page table at *pmd. It does not attempt to lock the
362 * pte_offset_map_ro_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map();
365 * the caller to avoid a later pte_lockptr(mm, *pmd), which might by that time
366 * act on a changed *pmd: pte_offset_map_ro_nolock() provides the correct spinlock
374 * pte_offset_map_rw_nolock(mm, pmd, addr, pmdvalp, ptlp), above, is like
390 pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
397 pte = __pte_offset_map(pmd, addr, &pmdval);
402 if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) {