1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4
5 #include <asm-generic/pgtable-nopmd.h>
6
7 /*
8 * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
9 * table containing PTEs, together with a set of 16 segment registers,
10 * to define the virtual to physical address mapping.
11 *
12 * We use the hash table as an extended TLB, i.e. a cache of currently
13 * active mappings. We maintain a two-level page table tree, much
14 * like that used by the i386, for the sake of the Linux memory
15 * management code. Low-level assembler code in hash_low_32.S
16 * (procedure hash_page) is responsible for extracting ptes from the
17 * tree and putting them into the hash table when necessary, and
18 * updating the accessed and modified bits in the page table tree.
19 */
20
21 #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
22 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
23 #define _PAGE_READ 0x004 /* software: read access allowed */
24 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
25 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
26 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
27 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
28 #define _PAGE_DIRTY 0x080 /* C: page changed */
29 #define _PAGE_ACCESSED 0x100 /* R: page referenced */
30 #define _PAGE_EXEC 0x200 /* software: exec allowed */
31 #define _PAGE_WRITE 0x400 /* software: user write access allowed */
32 #define _PAGE_SPECIAL 0x800 /* software: Special page */
33
34 #ifdef CONFIG_PTE_64BIT
35 /* We never clear the high word of the pte */
36 #define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
37 #else
38 #define _PTE_NONE_MASK _PAGE_HASHPTE
39 #endif
40
41 #define _PMD_PRESENT 0
42 #define _PMD_PRESENT_MASK (PAGE_MASK)
43 #define _PMD_BAD (~PAGE_MASK)
44
45 /* We borrow the _PAGE_READ bit to store the exclusive marker in swap PTEs. */
46 #define _PAGE_SWP_EXCLUSIVE _PAGE_READ
47
48 /* And here we include common definitions */
49
50 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
51
52 /*
53 * Location of the PFN in the PTE. Most 32-bit platforms use the same
54 * as _PAGE_SHIFT here (ie, naturally aligned).
55 * Platform who don't just pre-define the value so we don't override it here.
56 */
57 #define PTE_RPN_SHIFT (PAGE_SHIFT)
58
59 /*
60 * The mask covered by the RPN must be a ULL on 32-bit platforms with
61 * 64-bit PTEs.
62 */
63 #ifdef CONFIG_PTE_64BIT
64 #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
65 #define MAX_POSSIBLE_PHYSMEM_BITS 36
66 #else
67 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
68 #define MAX_POSSIBLE_PHYSMEM_BITS 32
69 #endif
70
71 /*
72 * _PAGE_CHG_MASK masks of bits that are to be preserved across
73 * pgprot changes.
74 */
75 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
76 _PAGE_ACCESSED | _PAGE_SPECIAL)
77
78 /*
79 * We define 2 sets of base prot bits, one for basic pages (ie,
80 * cacheable kernel and user pages) and one for non cacheable
81 * pages. We always set _PAGE_COHERENT when SMP is enabled or
82 * the processor might need it for DMA coherency.
83 */
84 #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
85 #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
86
87 #include <asm/pgtable-masks.h>
88
89 /* Permission masks used for kernel mappings */
90 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
91 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
92 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
93 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
94 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
95 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
96
97 #define PTE_INDEX_SIZE PTE_SHIFT
98 #define PMD_INDEX_SIZE 0
99 #define PUD_INDEX_SIZE 0
100 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
101
102 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
103 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
104
105 #ifndef __ASSEMBLER__
106 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
107 #define PMD_TABLE_SIZE 0
108 #define PUD_TABLE_SIZE 0
109 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
110
111 /* Bits to mask out from a PMD to get to the PTE page */
112 #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
113 #endif /* __ASSEMBLER__ */
114
115 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
116 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
117
118 /*
119 * The normal case is that PTEs are 32-bits and we have a 1-page
120 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
121 *
122 * For any >32-bit physical address platform, we can use the following
123 * two level page table layout where the pgdir is 8KB and the MS 13 bits
124 * are an index to the second level table. The combined pgdir/pmd first
125 * level has 2048 entries and the second level has 512 64-bit PTE entries.
126 * -Matt
127 */
128 /* PGDIR_SHIFT determines what a top-level page table entry can map */
129 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
130 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
131 #define PGDIR_MASK (~(PGDIR_SIZE-1))
132
133 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
134
135 #ifndef __ASSEMBLER__
136
137 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
138 void unmap_kernel_page(unsigned long va);
139
140 #endif /* !__ASSEMBLER__ */
141
142 /*
143 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
144 * value (for now) on others, from where we can start layout kernel
145 * virtual space that goes below PKMAP and FIXMAP
146 */
147
148 #define FIXADDR_SIZE 0
149 #ifdef CONFIG_KASAN
150 #include <asm/kasan.h>
151 #define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
152 #else
153 #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
154 #endif
155
156 /*
157 * ioremap_bot starts at that address. Early ioremaps move down from there,
158 * until mem_init() at which point this becomes the top of the vmalloc
159 * and ioremap space
160 */
161 #ifdef CONFIG_HIGHMEM
162 #define IOREMAP_TOP PKMAP_BASE
163 #else
164 #define IOREMAP_TOP FIXADDR_START
165 #endif
166
167 /* PPC32 shares vmalloc area with ioremap */
168 #define IOREMAP_START VMALLOC_START
169 #define IOREMAP_END VMALLOC_END
170
171 /*
172 * Just any arbitrary offset to the start of the vmalloc VM area: the
173 * current 16MB value just means that there will be a 64MB "hole" after the
174 * physical memory until the kernel virtual memory starts. That means that
175 * any out-of-bounds memory accesses will hopefully be caught.
176 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
177 * area for the same reason. ;)
178 *
179 * We no longer map larger than phys RAM with the BATs so we don't have
180 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
181 * about clashes between our early calls to ioremap() that start growing down
182 * from ioremap_base being run into the VM area allocations (growing upwards
183 * from VMALLOC_START). For this reason we have ioremap_bot to check when
184 * we actually run into our mappings setup in the early boot with the VM
185 * system. This really does become a problem for machines with good amounts
186 * of RAM. -- Cort
187 */
188 #define VMALLOC_OFFSET (0x1000000) /* 16M */
189
190 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
191
192 #ifdef CONFIG_KASAN_VMALLOC
193 #define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
194 #else
195 #define VMALLOC_END ioremap_bot
196 #endif
197
198 #ifndef __ASSEMBLER__
199 #include <linux/sched.h>
200 #include <linux/threads.h>
201 #include <linux/page_table_check.h>
202
203 /* Bits to mask out from a PGD to get to the PUD page */
204 #define PGD_MASKED_BITS 0
205
206 #define pgd_ERROR(e) \
207 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
208 /*
209 * Bits in a linux-style PTE. These match the bits in the
210 * (hardware-defined) PowerPC PTE as closely as possible.
211 */
212
213 #define pte_clear(mm, addr, ptep) \
214 do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
215
216 #define pmd_none(pmd) (!pmd_val(pmd))
217 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
218 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
pmd_clear(pmd_t * pmdp)219 static inline void pmd_clear(pmd_t *pmdp)
220 {
221 *pmdp = __pmd(0);
222 }
223
224
225 /*
226 * When flushing the tlb entry for a page, we also need to flush the hash
227 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
228 */
229 extern int flush_hash_pages(unsigned context, unsigned long va,
230 unsigned long pmdval, int count);
231
232 /* Add an HPTE to the hash table */
233 extern void add_hash_page(unsigned context, unsigned long va,
234 unsigned long pmdval);
235
236 /* Flush an entry from the TLB/hash table */
flush_hash_entry(struct mm_struct * mm,pte_t * ptep,unsigned long addr)237 static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
238 {
239 if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
240 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
241
242 flush_hash_pages(mm->context.id, addr, ptephys, 1);
243 }
244 }
245
246 /*
247 * PTE updates. This function is called whenever an existing
248 * valid PTE is updated. This does -not- include set_pte_at()
249 * which nowadays only sets a new PTE.
250 *
251 * Depending on the type of MMU, we may need to use atomic updates
252 * and the PTE may be either 32 or 64 bit wide. In the later case,
253 * when using atomic updates, only the low part of the PTE is
254 * accessed atomically.
255 */
pte_update(struct mm_struct * mm,unsigned long addr,pte_t * p,unsigned long clr,unsigned long set,int huge)256 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
257 unsigned long clr, unsigned long set, int huge)
258 {
259 pte_basic_t old;
260
261 if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
262 unsigned long tmp;
263
264 asm volatile(
265 #ifndef CONFIG_PTE_64BIT
266 "1: lwarx %0, 0, %3\n"
267 " andc %1, %0, %4\n"
268 #else
269 "1: lwarx %L0, 0, %3\n"
270 " lwz %0, -4(%3)\n"
271 " andc %1, %L0, %4\n"
272 #endif
273 " or %1, %1, %5\n"
274 " stwcx. %1, 0, %3\n"
275 " bne- 1b"
276 : "=&r" (old), "=&r" (tmp), "=m" (*p)
277 #ifndef CONFIG_PTE_64BIT
278 : "r" (p),
279 #else
280 : "b" ((unsigned long)(p) + 4),
281 #endif
282 "r" (clr), "r" (set), "m" (*p)
283 : "cc" );
284 } else {
285 old = pte_val(*p);
286
287 *p = __pte((old & ~(pte_basic_t)clr) | set);
288 }
289
290 return old;
291 }
292
293 /*
294 * 2.6 calls this without flushing the TLB entry; this is wrong
295 * for our hash-based implementation, we fix that up here.
296 */
297 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
__ptep_test_and_clear_young(struct mm_struct * mm,unsigned long addr,pte_t * ptep)298 static inline bool __ptep_test_and_clear_young(struct mm_struct *mm,
299 unsigned long addr, pte_t *ptep)
300 {
301 unsigned long old;
302 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
303 if (old & _PAGE_HASHPTE)
304 flush_hash_entry(mm, ptep, addr);
305
306 return (old & _PAGE_ACCESSED) != 0;
307 }
308 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
309 __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
310
311 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)312 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
313 pte_t *ptep)
314 {
315 pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
316
317 page_table_check_pte_clear(mm, addr, old_pte);
318
319 return old_pte;
320 }
321
322 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)323 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
324 pte_t *ptep)
325 {
326 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
327 }
328
__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)329 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
330 pte_t *ptep, pte_t entry,
331 unsigned long address,
332 int psize)
333 {
334 unsigned long set = pte_val(entry) &
335 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
336
337 pte_update(vma->vm_mm, address, ptep, 0, set, 0);
338
339 flush_tlb_page(vma, address);
340 }
341
342 #define __HAVE_ARCH_PTE_SAME
343 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
344
345 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
346 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
347
348 /*
349 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
350 * are !pte_none() && !pte_present().
351 *
352 * Format of swap PTEs (32bit PTEs):
353 *
354 * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
355 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
356 * <----------------- offset --------------------> < type -> E H P
357 *
358 * E is the exclusive marker that is not stored in swap entries.
359 * _PAGE_PRESENT (P) and __PAGE_HASHPTE (H) must be 0.
360 *
361 * For 64bit PTEs, the offset is extended by 32bit.
362 */
363 #define __swp_type(entry) ((entry).val & 0x1f)
364 #define __swp_offset(entry) ((entry).val >> 5)
365 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) & 0x1f) | ((offset) << 5) })
366 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
367 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
368
pte_swp_exclusive(pte_t pte)369 static inline bool pte_swp_exclusive(pte_t pte)
370 {
371 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
372 }
373
pte_swp_mkexclusive(pte_t pte)374 static inline pte_t pte_swp_mkexclusive(pte_t pte)
375 {
376 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
377 }
378
pte_swp_clear_exclusive(pte_t pte)379 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
380 {
381 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
382 }
383
384 /* Generic accessors to PTE bits */
pte_read(pte_t pte)385 static inline bool pte_read(pte_t pte)
386 {
387 return !!(pte_val(pte) & _PAGE_READ);
388 }
389
pte_write(pte_t pte)390 static inline bool pte_write(pte_t pte)
391 {
392 return !!(pte_val(pte) & _PAGE_WRITE);
393 }
394
pte_dirty(pte_t pte)395 static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
pte_young(pte_t pte)396 static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
pte_special(pte_t pte)397 static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
pte_none(pte_t pte)398 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
pte_exec(pte_t pte)399 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
400
pte_present(pte_t pte)401 static inline int pte_present(pte_t pte)
402 {
403 return pte_val(pte) & _PAGE_PRESENT;
404 }
405
pte_hw_valid(pte_t pte)406 static inline bool pte_hw_valid(pte_t pte)
407 {
408 return pte_val(pte) & _PAGE_PRESENT;
409 }
410
pte_hashpte(pte_t pte)411 static inline bool pte_hashpte(pte_t pte)
412 {
413 return !!(pte_val(pte) & _PAGE_HASHPTE);
414 }
415
pte_ci(pte_t pte)416 static inline bool pte_ci(pte_t pte)
417 {
418 return !!(pte_val(pte) & _PAGE_NO_CACHE);
419 }
420
421 /*
422 * We only find page table entry in the last level
423 * Hence no need for other accessors
424 */
425 #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)426 static inline bool pte_access_permitted(pte_t pte, bool write)
427 {
428 /*
429 * A read-only access is controlled by _PAGE_READ bit.
430 * We have _PAGE_READ set for WRITE
431 */
432 if (!pte_present(pte) || !pte_read(pte))
433 return false;
434
435 if (write && !pte_write(pte))
436 return false;
437
438 return true;
439 }
440
pte_user_accessible_page(struct mm_struct * mm,unsigned long addr,pte_t pte)441 static inline bool pte_user_accessible_page(struct mm_struct *mm, unsigned long addr, pte_t pte)
442 {
443 return pte_present(pte) && !is_kernel_addr(addr);
444 }
445
446 /* Conversion functions: convert a page and protection to a page entry,
447 * and a page entry and page directory to the page they refer to.
448 *
449 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
450 * long for now.
451 */
pfn_pte(unsigned long pfn,pgprot_t pgprot)452 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
453 {
454 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
455 pgprot_val(pgprot));
456 }
457
458 /* Generic modifiers for PTE bits */
pte_wrprotect(pte_t pte)459 static inline pte_t pte_wrprotect(pte_t pte)
460 {
461 return __pte(pte_val(pte) & ~_PAGE_WRITE);
462 }
463
pte_exprotect(pte_t pte)464 static inline pte_t pte_exprotect(pte_t pte)
465 {
466 return __pte(pte_val(pte) & ~_PAGE_EXEC);
467 }
468
pte_mkclean(pte_t pte)469 static inline pte_t pte_mkclean(pte_t pte)
470 {
471 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
472 }
473
pte_mkold(pte_t pte)474 static inline pte_t pte_mkold(pte_t pte)
475 {
476 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
477 }
478
pte_mkexec(pte_t pte)479 static inline pte_t pte_mkexec(pte_t pte)
480 {
481 return __pte(pte_val(pte) | _PAGE_EXEC);
482 }
483
pte_mkpte(pte_t pte)484 static inline pte_t pte_mkpte(pte_t pte)
485 {
486 return pte;
487 }
488
pte_mkwrite_novma(pte_t pte)489 static inline pte_t pte_mkwrite_novma(pte_t pte)
490 {
491 /*
492 * write implies read, hence set both
493 */
494 return __pte(pte_val(pte) | _PAGE_RW);
495 }
496
pte_mkdirty(pte_t pte)497 static inline pte_t pte_mkdirty(pte_t pte)
498 {
499 return __pte(pte_val(pte) | _PAGE_DIRTY);
500 }
501
pte_mkyoung(pte_t pte)502 static inline pte_t pte_mkyoung(pte_t pte)
503 {
504 return __pte(pte_val(pte) | _PAGE_ACCESSED);
505 }
506
pte_mkspecial(pte_t pte)507 static inline pte_t pte_mkspecial(pte_t pte)
508 {
509 return __pte(pte_val(pte) | _PAGE_SPECIAL);
510 }
511
pte_mkhuge(pte_t pte)512 static inline pte_t pte_mkhuge(pte_t pte)
513 {
514 return pte;
515 }
516
pte_modify(pte_t pte,pgprot_t newprot)517 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
518 {
519 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
520 }
521
522
523
524 /* This low level function performs the actual PTE insertion
525 * Setting the PTE depends on the MMU type and other factors.
526 *
527 * First case is 32-bit in UP mode with 32-bit PTEs, we need to preserve
528 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
529 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
530 * and see we need to keep track that this PTE needs invalidating.
531 *
532 * Second case is 32-bit with 64-bit PTE. In this case, we
533 * can just store as long as we do the two halves in the right order
534 * with a barrier in between. This is possible because we take care,
535 * in the hash code, to pre-invalidate if the PTE was already hashed,
536 * which synchronizes us with any concurrent invalidation.
537 * In the percpu case, we fallback to the simple update preserving
538 * the hash bits (ie, same as the non-SMP case).
539 *
540 * Third case is 32-bit in SMP mode with 32-bit PTEs. We use the
541 * helper pte_update() which does an atomic update. We need to do that
542 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
543 * per-CPU PTE such as a kmap_atomic, we also do a simple update preserving
544 * the hash bits instead.
545 */
__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int percpu)546 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
547 pte_t *ptep, pte_t pte, int percpu)
548 {
549 if ((!IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_PTE_64BIT)) || percpu) {
550 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
551 (pte_val(pte) & ~_PAGE_HASHPTE));
552 } else if (IS_ENABLED(CONFIG_PTE_64BIT)) {
553 if (pte_val(*ptep) & _PAGE_HASHPTE)
554 flush_hash_entry(mm, ptep, addr);
555
556 asm volatile("stw%X0 %2,%0; eieio; stw%X1 %L2,%1" :
557 "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) :
558 "r" (pte) : "memory");
559 } else {
560 pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
561 }
562 }
563
564 /*
565 * Macro to mark a page protection value as "uncacheable".
566 */
567
568 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
569 _PAGE_WRITETHRU)
570
571 #define pgprot_noncached pgprot_noncached
pgprot_noncached(pgprot_t prot)572 static inline pgprot_t pgprot_noncached(pgprot_t prot)
573 {
574 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
575 _PAGE_NO_CACHE | _PAGE_GUARDED);
576 }
577
578 #define pgprot_noncached_wc pgprot_noncached_wc
pgprot_noncached_wc(pgprot_t prot)579 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
580 {
581 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
582 _PAGE_NO_CACHE);
583 }
584
585 #define pgprot_cached pgprot_cached
pgprot_cached(pgprot_t prot)586 static inline pgprot_t pgprot_cached(pgprot_t prot)
587 {
588 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
589 _PAGE_COHERENT);
590 }
591
592 #define pgprot_cached_wthru pgprot_cached_wthru
pgprot_cached_wthru(pgprot_t prot)593 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
594 {
595 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
596 _PAGE_COHERENT | _PAGE_WRITETHRU);
597 }
598
599 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
pgprot_cached_noncoherent(pgprot_t prot)600 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
601 {
602 return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
603 }
604
605 #define pgprot_writecombine pgprot_writecombine
pgprot_writecombine(pgprot_t prot)606 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
607 {
608 return pgprot_noncached_wc(prot);
609 }
610
611 #endif /* !__ASSEMBLER__ */
612
613 #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
614