1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #include <linux/bug.h> 5 #include <linux/module.h> 6 #include <linux/init.h> 7 #include <linux/signal.h> 8 #include <linux/sched.h> 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 #include <linux/string.h> 12 #include <linux/types.h> 13 #include <linux/pagemap.h> 14 #include <linux/ptrace.h> 15 #include <linux/mman.h> 16 #include <linux/mm.h> 17 #include <linux/highmem.h> 18 #include <linux/memblock.h> 19 #include <linux/swap.h> 20 #include <linux/proc_fs.h> 21 #include <linux/pfn.h> 22 23 #include <asm/setup.h> 24 #include <asm/cachectl.h> 25 #include <asm/dma.h> 26 #include <asm/pgtable.h> 27 #include <asm/pgalloc.h> 28 #include <asm/mmu_context.h> 29 #include <asm/sections.h> 30 #include <asm/tlb.h> 31 32 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 33 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; 34 EXPORT_SYMBOL(invalid_pte_table); 35 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 36 __page_aligned_bss; 37 EXPORT_SYMBOL(empty_zero_page); 38 39 void __init mem_init(void) 40 { 41 #ifdef CONFIG_HIGHMEM 42 unsigned long tmp; 43 44 max_mapnr = highend_pfn; 45 #else 46 max_mapnr = max_low_pfn; 47 #endif 48 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 49 50 memblock_free_all(); 51 52 #ifdef CONFIG_HIGHMEM 53 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 54 struct page *page = pfn_to_page(tmp); 55 56 /* FIXME not sure about */ 57 if (!memblock_is_reserved(tmp << PAGE_SHIFT)) 58 free_highmem_page(page); 59 } 60 #endif 61 mem_init_print_info(NULL); 62 } 63 64 extern char __init_begin[], __init_end[]; 65 66 void free_initmem(void) 67 { 68 unsigned long addr; 69 70 addr = (unsigned long) &__init_begin; 71 72 while (addr < (unsigned long) &__init_end) { 73 ClearPageReserved(virt_to_page(addr)); 74 init_page_count(virt_to_page(addr)); 75 free_page(addr); 76 totalram_pages_inc(); 77 addr += PAGE_SIZE; 78 } 79 80 pr_info("Freeing unused kernel memory: %dk freed\n", 81 ((unsigned int)&__init_end - (unsigned int)&__init_begin) >> 10); 82 } 83 84 void pgd_init(unsigned long *p) 85 { 86 int i; 87 88 for (i = 0; i < PTRS_PER_PGD; i++) 89 p[i] = __pa(invalid_pte_table); 90 } 91 92 void __init pre_mmu_init(void) 93 { 94 /* 95 * Setup page-table and enable TLB-hardrefill 96 */ 97 flush_tlb_all(); 98 pgd_init((unsigned long *)swapper_pg_dir); 99 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); 100 TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); 101 102 /* Setup page mask to 4k */ 103 write_mmu_pagemask(0); 104 } 105 106 void __init fixrange_init(unsigned long start, unsigned long end, 107 pgd_t *pgd_base) 108 { 109 pgd_t *pgd; 110 pud_t *pud; 111 pmd_t *pmd; 112 pte_t *pte; 113 int i, j, k; 114 unsigned long vaddr; 115 116 vaddr = start; 117 i = __pgd_offset(vaddr); 118 j = __pud_offset(vaddr); 119 k = __pmd_offset(vaddr); 120 pgd = pgd_base + i; 121 122 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 123 pud = (pud_t *)pgd; 124 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 125 pmd = (pmd_t *)pud; 126 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 127 if (pmd_none(*pmd)) { 128 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 129 if (!pte) 130 panic("%s: Failed to allocate %lu bytes align=%lx\n", 131 __func__, PAGE_SIZE, 132 PAGE_SIZE); 133 134 set_pmd(pmd, __pmd(__pa(pte))); 135 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 136 } 137 vaddr += PMD_SIZE; 138 } 139 k = 0; 140 } 141 j = 0; 142 } 143 } 144 145 void __init fixaddr_init(void) 146 { 147 unsigned long vaddr; 148 149 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 150 fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); 151 } 152