xref: /linux/arch/hexagon/include/asm/page.h (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Page management definitions for the Hexagon architecture
4  *
5  * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _ASM_PAGE_H
9 #define _ASM_PAGE_H
10 
11 #include <linux/const.h>
12 
13 /*  This is probably not the most graceful way to handle this.  */
14 
15 #ifdef CONFIG_PAGE_SIZE_4KB
16 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB
17 #endif
18 
19 #ifdef CONFIG_PAGE_SIZE_16KB
20 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB
21 #endif
22 
23 #ifdef CONFIG_PAGE_SIZE_64KB
24 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB
25 #endif
26 
27 #ifdef CONFIG_PAGE_SIZE_256KB
28 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB
29 #endif
30 
31 #ifdef CONFIG_PAGE_SIZE_1MB
32 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB
33 #endif
34 
35 /*
36  *  These should be defined in hugetlb.h, but apparently not.
37  *  "Huge" for us should be 4MB or 16MB, which are both represented
38  *  in L1 PTE's.  Right now, it's set up for 4MB.
39  */
40 #ifdef CONFIG_HUGETLB_PAGE
41 #define HPAGE_SHIFT 22
42 #define HPAGE_SIZE (1UL << HPAGE_SHIFT)
43 #define HPAGE_MASK (~(HPAGE_SIZE-1))
44 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
45 #define HVM_HUGEPAGE_SIZE 0x5
46 #endif
47 
48 #define PAGE_SHIFT CONFIG_PAGE_SHIFT
49 #define PAGE_SIZE  (1UL << PAGE_SHIFT)
50 #define PAGE_MASK  (~((1 << PAGE_SHIFT) - 1))
51 
52 #ifdef __KERNEL__
53 #ifndef __ASSEMBLY__
54 
55 /*
56  * This is for PFN_DOWN, which mm.h needs.  Seems the right place to pull it in.
57  */
58 #include <linux/pfn.h>
59 
60 /*
61  * We implement a two-level architecture-specific page table structure.
62  * Null intermediate page table level (pmd, pud) definitions will come from
63  * asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h
64  */
65 typedef struct { unsigned long pte; } pte_t;
66 typedef struct { unsigned long pgd; } pgd_t;
67 typedef struct { unsigned long pgprot; } pgprot_t;
68 typedef struct page *pgtable_t;
69 
70 #define pte_val(x)     ((x).pte)
71 #define pgd_val(x)     ((x).pgd)
72 #define pgprot_val(x)  ((x).pgprot)
73 #define __pte(x)       ((pte_t) { (x) })
74 #define __pgd(x)       ((pgd_t) { (x) })
75 #define __pgprot(x)    ((pgprot_t) { (x) })
76 
77 /* Needed for PAGE_OFFSET used in the macro right below */
78 #include <asm/mem-layout.h>
79 
80 /*
81  * We need a __pa and a __va routine for kernel space.
82  * MIPS says they're only used during mem_init.
83  * also, check if we need a PHYS_OFFSET.
84  */
85 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
86 #define __va(x) ((void *)((unsigned long)(x) - PHYS_OFFSET + PAGE_OFFSET))
87 
88 /* The "page frame" descriptor is defined in linux/mm.h */
89 struct page;
90 
91 /* Returns page frame descriptor for virtual address. */
92 #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
93 
94 /* Default vm area behavior is non-executable.  */
95 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_NON_EXEC
96 
97 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
98 
99 /*  Need to not use a define for linesize; may move this to another file.  */
clear_page(void * page)100 static inline void clear_page(void *page)
101 {
102 	/*  This can only be done on pages with L1 WB cache */
103 	asm volatile(
104 		"	loop0(1f,%1);\n"
105 		"1:	{ dczeroa(%0);\n"
106 		"	  %0 = add(%0,#32); }:endloop0\n"
107 		: "+r" (page)
108 		: "r" (PAGE_SIZE/32)
109 		: "lc0", "sa0", "memory"
110 	);
111 }
112 
113 #define copy_page(to, from)	memcpy((to), (from), PAGE_SIZE)
114 
115 /*
116  * Under assumption that kernel always "sees" user map...
117  */
118 #define clear_user_page(page, vaddr, pg)	clear_page(page)
119 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
120 
121 /*
122  * page_to_phys - convert page to physical address
123  * @page - pointer to page entry in mem_map
124  */
125 #define page_to_phys(page)      (page_to_pfn(page) << PAGE_SHIFT)
126 
virt_to_pfn(const void * kaddr)127 static inline unsigned long virt_to_pfn(const void *kaddr)
128 {
129 	return __pa(kaddr) >> PAGE_SHIFT;
130 }
131 
132 #define page_to_virt(page)	__va(page_to_phys(page))
133 
134 #include <asm/mem-layout.h>
135 #include <asm-generic/memory_model.h>
136 /* XXX Todo: implement assembly-optimized version of getorder. */
137 #include <asm-generic/getorder.h>
138 
139 #endif /* ifdef __ASSEMBLY__ */
140 #endif /* ifdef __KERNEL__ */
141 
142 #endif
143