xref: /linux/arch/um/include/asm/page.h (revision c358f53871605a1a8d7ed6e544a05ea00e9c80cb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
4  * Copyright 2003 PathScale, Inc.
5  */
6 
7 #ifndef __UM_PAGE_H
8 #define __UM_PAGE_H
9 
10 #include <linux/const.h>
11 
12 /* PAGE_SHIFT determines the page size */
13 #define PAGE_SHIFT	12
14 #define PAGE_SIZE	(_AC(1, UL) << PAGE_SHIFT)
15 #define PAGE_MASK	(~(PAGE_SIZE-1))
16 
17 #ifndef __ASSEMBLY__
18 
19 struct page;
20 
21 #include <linux/pfn.h>
22 #include <linux/types.h>
23 #include <asm/vm-flags.h>
24 
25 /*
26  * These are used to make use of C type-checking..
27  */
28 
29 #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
30 #define copy_page(to,from)	memcpy((void *)(to), (void *)(from), PAGE_SIZE)
31 
32 #define clear_user_page(page, vaddr, pg)	clear_page(page)
33 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
34 
35 #if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
36 
37 typedef struct { unsigned long pte; } pte_t;
38 typedef struct { unsigned long pmd; } pmd_t;
39 typedef struct { unsigned long pgd; } pgd_t;
40 #define pte_val(p) ((p).pte)
41 
42 #define pte_get_bits(p, bits) ((p).pte & (bits))
43 #define pte_set_bits(p, bits) ((p).pte |= (bits))
44 #define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
45 #define pte_copy(to, from) ({ (to).pte = (from).pte; })
46 #define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
47 #define pte_set_val(p, phys, prot) \
48 	({ (p).pte = (phys) | pgprot_val(prot); })
49 
50 #define pmd_val(x)	((x).pmd)
51 #define __pmd(x) ((pmd_t) { (x) } )
52 
53 typedef unsigned long long phys_t;
54 
55 #else
56 
57 typedef struct { unsigned long pte; } pte_t;
58 typedef struct { unsigned long pgd; } pgd_t;
59 
60 #ifdef CONFIG_3_LEVEL_PGTABLES
61 typedef struct { unsigned long pmd; } pmd_t;
62 #define pmd_val(x)	((x).pmd)
63 #define __pmd(x) ((pmd_t) { (x) } )
64 #endif
65 
66 #define pte_val(x)	((x).pte)
67 
68 
69 #define pte_get_bits(p, bits) ((p).pte & (bits))
70 #define pte_set_bits(p, bits) ((p).pte |= (bits))
71 #define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
72 #define pte_copy(to, from) ((to).pte = (from).pte)
73 #define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
74 #define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
75 
76 typedef unsigned long phys_t;
77 
78 #endif
79 
80 typedef struct { unsigned long pgprot; } pgprot_t;
81 
82 typedef struct page *pgtable_t;
83 
84 #define pgd_val(x)	((x).pgd)
85 #define pgprot_val(x)	((x).pgprot)
86 
87 #define __pte(x) ((pte_t) { (x) } )
88 #define __pgd(x) ((pgd_t) { (x) } )
89 #define __pgprot(x)	((pgprot_t) { (x) } )
90 
91 extern unsigned long uml_physmem;
92 
93 #define PAGE_OFFSET (uml_physmem)
94 #define KERNELBASE PAGE_OFFSET
95 
96 #define __va_space (8*1024*1024)
97 
98 #include <mem.h>
99 
100 /* Cast to unsigned long before casting to void * to avoid a warning from
101  * mmap_kmem about cutting a long long down to a void *.  Not sure that
102  * casting is the right thing, but 32-bit UML can't have 64-bit virtual
103  * addresses
104  */
105 #define __pa(virt) uml_to_phys((void *) (unsigned long) (virt))
106 #define __va(phys) uml_to_virt((unsigned long) (phys))
107 
108 #define phys_to_pfn(p) ((p) >> PAGE_SHIFT)
109 #define pfn_to_phys(pfn) PFN_PHYS(pfn)
110 
111 #define pfn_valid(pfn) ((pfn) < max_mapnr)
112 #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
113 
114 #include <asm-generic/memory_model.h>
115 #include <asm-generic/getorder.h>
116 
117 #endif	/* __ASSEMBLY__ */
118 
119 #ifdef CONFIG_X86_32
120 #define __HAVE_ARCH_GATE_AREA 1
121 #endif
122 
123 #endif	/* __UM_PAGE_H */
124