1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> 4 */ 5 6 #include <linux/efi.h> 7 #include <linux/memblock.h> 8 #include <asm/efi.h> 9 #include <asm/mach/map.h> 10 #include <asm/mmu_context.h> 11 12 static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) 13 { 14 efi_memory_desc_t *md = data; 15 pte_t pte = *ptep; 16 17 if (md->attribute & EFI_MEMORY_RO) 18 pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); 19 if (md->attribute & EFI_MEMORY_XP) 20 pte = set_pte_bit(pte, __pgprot(L_PTE_XN)); 21 set_pte_ext(ptep, pte, PTE_EXT_NG); 22 return 0; 23 } 24 25 int __init efi_set_mapping_permissions(struct mm_struct *mm, 26 efi_memory_desc_t *md) 27 { 28 unsigned long base, size; 29 30 base = md->virt_addr; 31 size = md->num_pages << EFI_PAGE_SHIFT; 32 33 /* 34 * We can only use apply_to_page_range() if we can guarantee that the 35 * entire region was mapped using pages. This should be the case if the 36 * region does not cover any naturally aligned SECTION_SIZE sized 37 * blocks. 38 */ 39 if (round_down(base + size, SECTION_SIZE) < 40 round_up(base, SECTION_SIZE) + SECTION_SIZE) 41 return apply_to_page_range(mm, base, size, set_permissions, md); 42 43 return 0; 44 } 45 46 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) 47 { 48 struct map_desc desc = { 49 .virtual = md->virt_addr, 50 .pfn = __phys_to_pfn(md->phys_addr), 51 .length = md->num_pages * EFI_PAGE_SIZE, 52 }; 53 54 /* 55 * Order is important here: memory regions may have all of the 56 * bits below set (and usually do), so we check them in order of 57 * preference. 58 */ 59 if (md->attribute & EFI_MEMORY_WB) 60 desc.type = MT_MEMORY_RWX; 61 else if (md->attribute & EFI_MEMORY_WT) 62 desc.type = MT_MEMORY_RWX_NONCACHED; 63 else if (md->attribute & EFI_MEMORY_WC) 64 desc.type = MT_DEVICE_WC; 65 else 66 desc.type = MT_DEVICE; 67 68 create_mapping_late(mm, &desc, true); 69 70 /* 71 * If stricter permissions were specified, apply them now. 72 */ 73 if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP)) 74 return efi_set_mapping_permissions(mm, md); 75 return 0; 76 } 77 78 static unsigned long __initdata cpu_state_table = EFI_INVALID_TABLE_ADDR; 79 80 const efi_config_table_type_t efi_arch_tables[] __initconst = { 81 {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table}, 82 {} 83 }; 84 85 static void __init load_cpu_state_table(void) 86 { 87 if (cpu_state_table != EFI_INVALID_TABLE_ADDR) { 88 struct efi_arm_entry_state *state; 89 bool dump_state = true; 90 91 state = early_memremap_ro(cpu_state_table, 92 sizeof(struct efi_arm_entry_state)); 93 if (state == NULL) { 94 pr_warn("Unable to map CPU entry state table.\n"); 95 return; 96 } 97 98 if ((state->sctlr_before_ebs & 1) == 0) 99 pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n"); 100 else if ((state->sctlr_after_ebs & 1) == 0) 101 pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n"); 102 else 103 dump_state = false; 104 105 if (dump_state || efi_enabled(EFI_DBG)) { 106 pr_info("CPSR at EFI stub entry : 0x%08x\n", 107 state->cpsr_before_ebs); 108 pr_info("SCTLR at EFI stub entry : 0x%08x\n", 109 state->sctlr_before_ebs); 110 pr_info("CPSR after ExitBootServices() : 0x%08x\n", 111 state->cpsr_after_ebs); 112 pr_info("SCTLR after ExitBootServices(): 0x%08x\n", 113 state->sctlr_after_ebs); 114 } 115 early_memunmap(state, sizeof(struct efi_arm_entry_state)); 116 } 117 } 118 119 void __init arm_efi_init(void) 120 { 121 efi_init(); 122 123 if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) { 124 /* dummycon on ARM needs non-zero values for columns/lines */ 125 screen_info.orig_video_cols = 80; 126 screen_info.orig_video_lines = 25; 127 } 128 129 /* ARM does not permit early mappings to persist across paging_init() */ 130 efi_memmap_unmap(); 131 132 load_cpu_state_table(); 133 } 134