1 /*- 2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #define __ELF_WORD_SIZE 64 29 #include <sys/param.h> 30 #include <sys/linker.h> 31 #include <vm/vm.h> 32 #include <vm/pmap.h> 33 #include <machine/elf.h> 34 #include <machine/pmap_pae.h> 35 #include <machine/segments.h> 36 37 #include <efi.h> 38 #include <efilib.h> 39 40 #include "bootstrap.h" 41 42 #include "loader_efi.h" 43 44 static int elf64_exec(struct preloaded_file *amp); 45 static int elf64_obj_exec(struct preloaded_file *amp); 46 47 static struct file_format amd64_elf = { 48 .l_load = elf64_loadfile, 49 .l_exec = elf64_exec 50 }; 51 52 static struct file_format amd64_elf_obj = { 53 .l_load = elf64_obj_loadfile, 54 .l_exec = elf64_obj_exec 55 }; 56 57 struct file_format *file_formats[] = { 58 &amd64_elf, 59 &amd64_elf_obj, 60 NULL 61 }; 62 63 /* 64 * i386's pmap_pae.h doesn't provide this, so 65 * just typedef our own. 66 */ 67 typedef pdpt_entry_t pml4_entry_t; 68 69 static void (*trampoline)(uint32_t stack, void *copy_finish, uint32_t kernend, 70 uint32_t modulep, uint64_t *pagetable, void *gdtr, uint64_t entry); 71 72 extern void *amd64_tramp; 73 extern uint32_t amd64_tramp_size; 74 75 /* 76 * There is an ELF kernel and one or more ELF modules loaded. 77 * We wish to start executing the kernel image, so make such 78 * preparations as are required, and do so. 79 */ 80 static int 81 elf64_exec(struct preloaded_file *fp) 82 { 83 /* 84 * segments.h gives us a 32-bit gdtr, but 85 * we want a 64-bit one, so define our own. 86 */ 87 struct { 88 uint16_t rd_limit; 89 uint64_t rd_base; 90 } __packed *gdtr; 91 EFI_PHYSICAL_ADDRESS ptr; 92 EFI_ALLOCATE_TYPE type; 93 EFI_STATUS err; 94 struct file_metadata *md; 95 Elf_Ehdr *ehdr; 96 pml4_entry_t *PT4; 97 pdpt_entry_t *PT3; 98 pd_entry_t *PT2; 99 struct user_segment_descriptor *gdt; 100 vm_offset_t modulep, kernend, trampstack; 101 int i; 102 103 switch (copy_staging) { 104 case COPY_STAGING_ENABLE: 105 type = AllocateMaxAddress; 106 break; 107 case COPY_STAGING_DISABLE: 108 type = AllocateAnyPages; 109 break; 110 case COPY_STAGING_AUTO: 111 type = fp->f_kernphys_relocatable ? 112 AllocateAnyPages : AllocateMaxAddress; 113 break; 114 } 115 116 if ((md = file_findmetadata(fp, MODINFOMD_ELFHDR)) == NULL) 117 return (EFTYPE); 118 ehdr = (Elf_Ehdr *)&(md->md_data); 119 120 ptr = G(1); 121 err = BS->AllocatePages(type, EfiLoaderCode, 122 EFI_SIZE_TO_PAGES(amd64_tramp_size), &ptr); 123 if (EFI_ERROR(err)) { 124 printf("Unable to allocate trampoline\n"); 125 return (ENOMEM); 126 } 127 128 trampoline = (void *)(uintptr_t)ptr; 129 bcopy(&amd64_tramp, trampoline, amd64_tramp_size); 130 131 /* 132 * Allocate enough space for the GDTR + two GDT segments + 133 * our temporary stack (28 bytes). 134 */ 135 #define DATASZ (sizeof(*gdtr) + \ 136 sizeof(struct user_segment_descriptor) * 2 + 28) 137 138 ptr = G(1); 139 err = BS->AllocatePages(type, EfiLoaderData, 140 EFI_SIZE_TO_PAGES(DATASZ), &ptr); 141 if (EFI_ERROR(err)) { 142 printf("Unable to allocate GDT and stack\n"); 143 BS->FreePages((uintptr_t)trampoline, 1); 144 return (ENOMEM); 145 } 146 147 trampstack = ptr + DATASZ; 148 149 #undef DATASZ 150 151 gdt = (void *)(uintptr_t)ptr; 152 gdt[0] = (struct user_segment_descriptor) { 0 }; 153 gdt[1] = (struct user_segment_descriptor) { 154 .sd_p = 1, .sd_long = 1, .sd_type = SDT_MEMERC 155 }; 156 157 gdtr = (void *)(uintptr_t)(ptr + 158 sizeof(struct user_segment_descriptor) * 2); 159 gdtr->rd_limit = sizeof(struct user_segment_descriptor) * 2 - 1; 160 gdtr->rd_base = (uintptr_t)gdt; 161 162 if (type == AllocateMaxAddress) { 163 /* Copy staging enabled */ 164 165 ptr = G(1); 166 err = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, 167 EFI_SIZE_TO_PAGES(512 * 3 * sizeof(uint64_t)), &ptr); 168 if (EFI_ERROR(err)) { 169 printf("Unable to allocate trampoline page table\n"); 170 BS->FreePages((uintptr_t)trampoline, 1); 171 BS->FreePages((uintptr_t)gdt, 1); 172 return (ENOMEM); 173 } 174 PT4 = (pml4_entry_t *)(uintptr_t)ptr; 175 176 PT3 = &PT4[512]; 177 PT2 = &PT3[512]; 178 179 /* 180 * This is kinda brutal, but every single 1GB VM 181 * memory segment points to the same first 1GB of 182 * physical memory. But it is more than adequate. 183 */ 184 for (i = 0; i < 512; i++) { 185 /* 186 * Each slot of the L4 pages points to the 187 * same L3 page. 188 */ 189 PT4[i] = (uintptr_t)PT3 | PG_V | PG_RW; 190 191 /* 192 * Each slot of the L3 pages points to the 193 * same L2 page. 194 */ 195 PT3[i] = (uintptr_t)PT2 | PG_V | PG_RW; 196 197 /* 198 * The L2 page slots are mapped with 2MB pages for 1GB. 199 */ 200 PT2[i] = (i * M(2)) | PG_V | PG_RW | PG_PS; 201 } 202 } else { 203 pdpt_entry_t *PT3_l, *PT3_u; 204 pd_entry_t *PT2_l0, *PT2_l1, *PT2_l2, *PT2_l3, *PT2_u0, *PT2_u1; 205 206 err = BS->AllocatePages(AllocateAnyPages, EfiLoaderData, 207 EFI_SIZE_TO_PAGES(512 * 9 * sizeof(uint64_t)), &ptr); 208 if (EFI_ERROR(err)) { 209 printf("Unable to allocate trampoline page table\n"); 210 BS->FreePages((uintptr_t)trampoline, 1); 211 BS->FreePages((uintptr_t)gdt, 1); 212 return (ENOMEM); 213 } 214 PT4 = (pml4_entry_t *)(uintptr_t)ptr; 215 216 PT3_l = &PT4[512]; 217 PT3_u = &PT3_l[512]; 218 PT2_l0 = &PT3_u[512]; 219 PT2_l1 = &PT2_l0[512]; 220 PT2_l2 = &PT2_l1[512]; 221 PT2_l3 = &PT2_l2[512]; 222 PT2_u0 = &PT2_l3[512]; 223 PT2_u1 = &PT2_u0[512]; 224 225 /* 1:1 mapping of lower 4G */ 226 PT4[0] = (uintptr_t)PT3_l | PG_V | PG_RW; 227 PT3_l[0] = (uintptr_t)PT2_l0 | PG_V | PG_RW; 228 PT3_l[1] = (uintptr_t)PT2_l1 | PG_V | PG_RW; 229 PT3_l[2] = (uintptr_t)PT2_l2 | PG_V | PG_RW; 230 PT3_l[3] = (uintptr_t)PT2_l3 | PG_V | PG_RW; 231 for (i = 0; i < 2048; i++) { 232 PT2_l0[i] = ((pd_entry_t)i * M(2)) | PG_V | PG_RW | PG_PS; 233 } 234 235 /* mapping of kernel 2G below top */ 236 PT4[511] = (uintptr_t)PT3_u | PG_V | PG_RW; 237 PT3_u[511] = (uintptr_t)PT2_u1 | PG_V | PG_RW; 238 PT3_u[510] = (uintptr_t)PT2_u0 | PG_V | PG_RW; 239 /* compat mapping of phys @0 */ 240 PT2_u0[0] = PG_PS | PG_V | PG_RW; 241 /* this maps past staging area */ 242 for (i = 1; i < 1024; i++) { 243 PT2_u0[i] = (staging + (i - 1) * M(2)) 244 | PG_V | PG_RW | PG_PS; 245 } 246 } 247 248 printf( 249 "staging %#llx (%scopying) tramp %p PT4 %p GDT %p\n" 250 "Start @ %#llx ...\n", staging, 251 type == AllocateMaxAddress ? "" : "not ", trampoline, PT4, gdt, 252 ehdr->e_entry 253 ); 254 255 efi_time_fini(); 256 err = bi_load(fp->f_args, &modulep, &kernend, true); 257 if (err != 0) { 258 efi_time_init(); 259 return (err); 260 } 261 262 dev_cleanup(); 263 264 trampoline(trampstack, type == AllocateMaxAddress ? efi_copy_finish : 265 efi_copy_finish_nop, kernend, modulep, PT4, gdtr, ehdr->e_entry); 266 267 panic("exec returned"); 268 } 269 270 static int 271 elf64_obj_exec(struct preloaded_file *fp) 272 { 273 return (EFTYPE); 274 } 275