pgtable-3level.h (c316cf670491def52a396d3bdc5a63ad01f7fefa) pgtable-3level.h (e585513b76f7b05d08ca3fb250fed11f6ba46ee5)
1#ifndef _ASM_X86_PGTABLE_3LEVEL_H
2#define _ASM_X86_PGTABLE_3LEVEL_H
3
4/*
5 * Intel Physical Address Extension (PAE) Mode - three-level page
6 * tables on PPro+ CPUs.
7 *
8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>

--- 198 unchanged lines hidden (view full) ---

207/* Encode and de-code a swap entry */
208#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
209#define __swp_type(x) (((x).val) & 0x1f)
210#define __swp_offset(x) ((x).val >> 5)
211#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
212#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
213#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
214
1#ifndef _ASM_X86_PGTABLE_3LEVEL_H
2#define _ASM_X86_PGTABLE_3LEVEL_H
3
4/*
5 * Intel Physical Address Extension (PAE) Mode - three-level page
6 * tables on PPro+ CPUs.
7 *
8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>

--- 198 unchanged lines hidden (view full) ---

207/* Encode and de-code a swap entry */
208#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
209#define __swp_type(x) (((x).val) & 0x1f)
210#define __swp_offset(x) ((x).val >> 5)
211#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
212#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
213#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
214
215#define gup_get_pte gup_get_pte
216/*
217 * WARNING: only to be used in the get_user_pages_fast() implementation.
218 *
219 * With get_user_pages_fast(), we walk down the pagetables without taking
220 * any locks. For this we would like to load the pointers atomically,
221 * but that is not possible (without expensive cmpxchg8b) on PAE. What
222 * we do have is the guarantee that a PTE will only either go from not
223 * present to present, or present to not present or both -- it will not
224 * switch to a completely different present page without a TLB flush in
225 * between; something that we are blocking by holding interrupts off.
226 *
227 * Setting ptes from not present to present goes:
228 *
229 * ptep->pte_high = h;
230 * smp_wmb();
231 * ptep->pte_low = l;
232 *
233 * And present to not present goes:
234 *
235 * ptep->pte_low = 0;
236 * smp_wmb();
237 * ptep->pte_high = 0;
238 *
239 * We must ensure here that the load of pte_low sees 'l' iff pte_high
240 * sees 'h'. We load pte_high *after* loading pte_low, which ensures we
241 * don't see an older value of pte_high. *Then* we recheck pte_low,
242 * which ensures that we haven't picked up a changed pte high. We might
243 * have gotten rubbish values from pte_low and pte_high, but we are
244 * guaranteed that pte_low will not have the present bit set *unless*
245 * it is 'l'. Because get_user_pages_fast() only operates on present ptes
246 * we're safe.
247 */
248static inline pte_t gup_get_pte(pte_t *ptep)
249{
250 pte_t pte;
251
252 do {
253 pte.pte_low = ptep->pte_low;
254 smp_rmb();
255 pte.pte_high = ptep->pte_high;
256 smp_rmb();
257 } while (unlikely(pte.pte_low != ptep->pte_low));
258
259 return pte;
260}
261
215#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
262#endif /* _ASM_X86_PGTABLE_3LEVEL_H */