xref: /linux/arch/hexagon/include/asm/page.h (revision 827634added7f38b7d724cab1dccdb2b004c13c3)
1 /*
2  * Page management definitions for the Hexagon architecture
3  *
4  * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 and
8  * only version 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA.
19  */
20 
21 #ifndef _ASM_PAGE_H
22 #define _ASM_PAGE_H
23 
24 #include <linux/const.h>
25 
26 /*  This is probably not the most graceful way to handle this.  */
27 
28 #ifdef CONFIG_PAGE_SIZE_4KB
29 #define PAGE_SHIFT 12
30 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB
31 #endif
32 
33 #ifdef CONFIG_PAGE_SIZE_16KB
34 #define PAGE_SHIFT 14
35 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB
36 #endif
37 
38 #ifdef CONFIG_PAGE_SIZE_64KB
39 #define PAGE_SHIFT 16
40 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB
41 #endif
42 
43 #ifdef CONFIG_PAGE_SIZE_256KB
44 #define PAGE_SHIFT 18
45 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB
46 #endif
47 
48 #ifdef CONFIG_PAGE_SIZE_1MB
49 #define PAGE_SHIFT 20
50 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB
51 #endif
52 
53 /*
54  *  These should be defined in hugetlb.h, but apparently not.
55  *  "Huge" for us should be 4MB or 16MB, which are both represented
56  *  in L1 PTE's.  Right now, it's set up for 4MB.
57  */
58 #ifdef CONFIG_HUGETLB_PAGE
59 #define HPAGE_SHIFT 22
60 #define HPAGE_SIZE (1UL << HPAGE_SHIFT)
61 #define HPAGE_MASK (~(HPAGE_SIZE-1))
62 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
63 #define HVM_HUGEPAGE_SIZE 0x5
64 #endif
65 
66 #define PAGE_SIZE  (1UL << PAGE_SHIFT)
67 #define PAGE_MASK  (~((1 << PAGE_SHIFT) - 1))
68 
69 #ifdef __KERNEL__
70 #ifndef __ASSEMBLY__
71 
72 /*
73  * This is for PFN_DOWN, which mm.h needs.  Seems the right place to pull it in.
74  */
75 #include <linux/pfn.h>
76 
77 /*
78  * We implement a two-level architecture-specific page table structure.
79  * Null intermediate page table level (pmd, pud) definitions will come from
80  * asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h
81  */
82 typedef struct { unsigned long pte; } pte_t;
83 typedef struct { unsigned long pgd; } pgd_t;
84 typedef struct { unsigned long pgprot; } pgprot_t;
85 typedef struct page *pgtable_t;
86 
87 #define pte_val(x)     ((x).pte)
88 #define pgd_val(x)     ((x).pgd)
89 #define pgprot_val(x)  ((x).pgprot)
90 #define __pte(x)       ((pte_t) { (x) })
91 #define __pgd(x)       ((pgd_t) { (x) })
92 #define __pgprot(x)    ((pgprot_t) { (x) })
93 
94 /*
95  * We need a __pa and a __va routine for kernel space.
96  * MIPS says they're only used during mem_init.
97  * also, check if we need a PHYS_OFFSET.
98  */
99 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
100 #define __va(x) ((void *)((unsigned long)(x) - PHYS_OFFSET + PAGE_OFFSET))
101 
102 /* The "page frame" descriptor is defined in linux/mm.h */
103 struct page;
104 
105 /* Returns page frame descriptor for virtual address. */
106 #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
107 
108 /* Default vm area behavior is non-executable.  */
109 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
110 				VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
111 
112 #define pfn_valid(pfn) ((pfn) < max_mapnr)
113 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
114 
115 /*  Need to not use a define for linesize; may move this to another file.  */
116 static inline void clear_page(void *page)
117 {
118 	/*  This can only be done on pages with L1 WB cache */
119 	asm volatile(
120 		"	loop0(1f,%1);\n"
121 		"1:	{ dczeroa(%0);\n"
122 		"	  %0 = add(%0,#32); }:endloop0\n"
123 		: "+r" (page)
124 		: "r" (PAGE_SIZE/32)
125 		: "lc0", "sa0", "memory"
126 	);
127 }
128 
129 #define copy_page(to, from)	memcpy((to), (from), PAGE_SIZE)
130 
131 /*
132  * Under assumption that kernel always "sees" user map...
133  */
134 #define clear_user_page(page, vaddr, pg)	clear_page(page)
135 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
136 
137 /*
138  * page_to_phys - convert page to physical address
139  * @page - pointer to page entry in mem_map
140  */
141 #define page_to_phys(page)      (page_to_pfn(page) << PAGE_SHIFT)
142 
143 #define virt_to_pfn(kaddr)      (__pa(kaddr) >> PAGE_SHIFT)
144 #define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
145 
146 #define page_to_virt(page)	__va(page_to_phys(page))
147 
148 /*
149  * For port to Hexagon Virtual Machine, MAYBE we check for attempts
150  * to reference reserved HVM space, but in any case, the VM will be
151  * protected.
152  */
153 #define kern_addr_valid(addr)   (1)
154 
155 #include <asm/mem-layout.h>
156 #include <asm-generic/memory_model.h>
157 /* XXX Todo: implement assembly-optimized version of getorder. */
158 #include <asm-generic/getorder.h>
159 
160 #endif /* ifdef __ASSEMBLY__ */
161 #endif /* ifdef __KERNEL__ */
162 
163 #endif
164