xref: /linux/arch/nios2/mm/init.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /*
2  * Copyright (C) 2013 Altera Corporation
3  * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
4  * Copyright (C) 2009 Wind River Systems Inc
5  *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
6  * Copyright (C) 2004 Microtronix Datacom Ltd
7  *
8  * based on arch/m68k/mm/init.c
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License. See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/init.h>
25 #include <linux/pagemap.h>
26 #include <linux/memblock.h>
27 #include <linux/slab.h>
28 #include <linux/binfmts.h>
29 #include <linux/execmem.h>
30 
31 #include <asm/setup.h>
32 #include <asm/page.h>
33 #include <asm/sections.h>
34 #include <asm/tlb.h>
35 #include <asm/mmu_context.h>
36 #include <asm/cpuinfo.h>
37 #include <asm/processor.h>
38 
39 pgd_t *pgd_current;
40 
41 /*
42  * paging_init() continues the virtual memory environment setup which
43  * was begun by the code in arch/head.S.
44  * The parameters are pointers to where to stick the starting and ending
45  * addresses of available kernel virtual memory.
46  */
47 void __init paging_init(void)
48 {
49 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
50 
51 	pagetable_init();
52 	pgd_current = swapper_pg_dir;
53 
54 	max_zone_pfn[ZONE_NORMAL] = max_mapnr;
55 
56 	/* pass the memory from the bootmem allocator to the main allocator */
57 	free_area_init(max_zone_pfn);
58 
59 	flush_dcache_range((unsigned long)empty_zero_page,
60 			(unsigned long)empty_zero_page + PAGE_SIZE);
61 }
62 
63 void __init mem_init(void)
64 {
65 	unsigned long end_mem   = memory_end; /* this must not include
66 						kernel stack at top */
67 
68 	pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
69 
70 	end_mem &= PAGE_MASK;
71 	high_memory = __va(end_mem);
72 
73 	/* this will put all memory onto the freelists */
74 	memblock_free_all();
75 }
76 
77 void __init mmu_init(void)
78 {
79 	flush_tlb_all();
80 }
81 
82 pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
83 pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE);
84 static struct page *kuser_page[1];
85 static struct vm_special_mapping vdso_mapping = {
86 	.name = "[vdso]",
87 	.pages = kuser_page,
88 };
89 
90 static int alloc_kuser_page(void)
91 {
92 	extern char __kuser_helper_start[], __kuser_helper_end[];
93 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
94 	unsigned long vpage;
95 
96 	vpage = get_zeroed_page(GFP_ATOMIC);
97 	if (!vpage)
98 		return -ENOMEM;
99 
100 	/* Copy kuser helpers */
101 	memcpy((void *)vpage, __kuser_helper_start, kuser_sz);
102 
103 	flush_icache_range(vpage, vpage + KUSER_SIZE);
104 	kuser_page[0] = virt_to_page(vpage);
105 
106 	return 0;
107 }
108 arch_initcall(alloc_kuser_page);
109 
110 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
111 {
112 	struct mm_struct *mm = current->mm;
113 	struct vm_area_struct *vma;
114 
115 	mmap_write_lock(mm);
116 
117 	/* Map kuser helpers to user space address */
118 	vma = _install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
119 				      VM_READ | VM_EXEC | VM_MAYREAD |
120 				      VM_MAYEXEC, &vdso_mapping);
121 
122 	mmap_write_unlock(mm);
123 
124 	return IS_ERR(vma) ? PTR_ERR(vma) : 0;
125 }
126 
127 const char *arch_vma_name(struct vm_area_struct *vma)
128 {
129 	return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
130 }
131 
132 static const pgprot_t protection_map[16] = {
133 	[VM_NONE]					= MKP(0, 0, 0),
134 	[VM_READ]					= MKP(0, 0, 1),
135 	[VM_WRITE]					= MKP(0, 0, 0),
136 	[VM_WRITE | VM_READ]				= MKP(0, 0, 1),
137 	[VM_EXEC]					= MKP(1, 0, 0),
138 	[VM_EXEC | VM_READ]				= MKP(1, 0, 1),
139 	[VM_EXEC | VM_WRITE]				= MKP(1, 0, 0),
140 	[VM_EXEC | VM_WRITE | VM_READ]			= MKP(1, 0, 1),
141 	[VM_SHARED]					= MKP(0, 0, 0),
142 	[VM_SHARED | VM_READ]				= MKP(0, 0, 1),
143 	[VM_SHARED | VM_WRITE]				= MKP(0, 1, 0),
144 	[VM_SHARED | VM_WRITE | VM_READ]		= MKP(0, 1, 1),
145 	[VM_SHARED | VM_EXEC]				= MKP(1, 0, 0),
146 	[VM_SHARED | VM_EXEC | VM_READ]			= MKP(1, 0, 1),
147 	[VM_SHARED | VM_EXEC | VM_WRITE]		= MKP(1, 1, 0),
148 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= MKP(1, 1, 1)
149 };
150 DECLARE_VM_GET_PAGE_PROT
151 
152 #ifdef CONFIG_EXECMEM
153 static struct execmem_info execmem_info __ro_after_init;
154 
155 struct execmem_info __init *execmem_arch_setup(void)
156 {
157 	execmem_info = (struct execmem_info){
158 		.ranges = {
159 			[EXECMEM_DEFAULT] = {
160 				.start	= MODULES_VADDR,
161 				.end	= MODULES_END,
162 				.pgprot	= PAGE_KERNEL_EXEC,
163 				.alignment = 1,
164 			},
165 		},
166 	};
167 
168 	return &execmem_info;
169 }
170 #endif /* CONFIG_EXECMEM */
171