xref: /freebsd/stand/efi/loader/arch/amd64/elf64_freebsd.c (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1 /*-
2  * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #define __ELF_WORD_SIZE 64
29 #include <sys/param.h>
30 #include <sys/exec.h>
31 #include <sys/linker.h>
32 #include <string.h>
33 #include <machine/elf.h>
34 #include <stand.h>
35 #include <vm/vm.h>
36 #include <vm/pmap.h>
37 
38 #include <efi.h>
39 #include <efilib.h>
40 
41 #include "bootstrap.h"
42 
43 #include "loader_efi.h"
44 
45 static int	elf64_exec(struct preloaded_file *amp);
46 static int	elf64_obj_exec(struct preloaded_file *amp);
47 
48 static struct file_format amd64_elf = {
49 	.l_load = elf64_loadfile,
50 	.l_exec = elf64_exec,
51 };
52 static struct file_format amd64_elf_obj = {
53 	.l_load = elf64_obj_loadfile,
54 	.l_exec = elf64_obj_exec,
55 };
56 
57 extern struct file_format multiboot2;
58 extern struct file_format multiboot2_obj;
59 
60 struct file_format *file_formats[] = {
61 	&multiboot2,
62 	&multiboot2_obj,
63 	&amd64_elf,
64 	&amd64_elf_obj,
65 	NULL
66 };
67 
68 static pml4_entry_t *PT4;
69 static pdp_entry_t *PT3;
70 static pdp_entry_t *PT3_l, *PT3_u;
71 static pd_entry_t *PT2;
72 static pd_entry_t *PT2_l0, *PT2_l1, *PT2_l2, *PT2_l3, *PT2_u0, *PT2_u1;
73 
74 static void (*trampoline)(uint64_t stack, void *copy_finish, uint64_t kernend,
75     uint64_t modulep, pml4_entry_t *pagetable, uint64_t entry);
76 
77 extern uintptr_t amd64_tramp;
78 extern uint32_t amd64_tramp_size;
79 
80 /*
81  * There is an ELF kernel and one or more ELF modules loaded.
82  * We wish to start executing the kernel image, so make such
83  * preparations as are required, and do so.
84  */
85 static int
86 elf64_exec(struct preloaded_file *fp)
87 {
88 	struct file_metadata	*md;
89 	Elf_Ehdr 		*ehdr;
90 	vm_offset_t		modulep, kernend, trampcode, trampstack;
91 	int			err, i;
92 	bool			copy_auto;
93 
94 	copy_auto = copy_staging == COPY_STAGING_AUTO;
95 	if (copy_auto)
96 		copy_staging = fp->f_kernphys_relocatable ?
97 		    COPY_STAGING_DISABLE : COPY_STAGING_ENABLE;
98 
99 	if ((md = file_findmetadata(fp, MODINFOMD_ELFHDR)) == NULL)
100 		return (EFTYPE);
101 	ehdr = (Elf_Ehdr *)&(md->md_data);
102 
103 	trampcode = copy_staging == COPY_STAGING_ENABLE ?
104 	    (vm_offset_t)G(1) : (vm_offset_t)G(4);
105 	err = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, 1,
106 	    (EFI_PHYSICAL_ADDRESS *)&trampcode);
107 	if (EFI_ERROR(err)) {
108 		printf("Unable to allocate trampoline\n");
109 		if (copy_auto)
110 			copy_staging = COPY_STAGING_AUTO;
111 		return (ENOMEM);
112 	}
113 	bzero((void *)trampcode, EFI_PAGE_SIZE);
114 	trampstack = trampcode + EFI_PAGE_SIZE - 8;
115 	bcopy((void *)&amd64_tramp, (void *)trampcode, amd64_tramp_size);
116 	trampoline = (void *)trampcode;
117 
118 	if (copy_staging == COPY_STAGING_ENABLE) {
119 		PT4 = (pml4_entry_t *)G(1);
120 		err = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, 3,
121 		    (EFI_PHYSICAL_ADDRESS *)&PT4);
122 		if (EFI_ERROR(err)) {
123 			printf("Unable to allocate trampoline page table\n");
124 			BS->FreePages(trampcode, 1);
125 			if (copy_auto)
126 				copy_staging = COPY_STAGING_AUTO;
127 			return (ENOMEM);
128 		}
129 		bzero(PT4, 3 * EFI_PAGE_SIZE);
130 		PT3 = &PT4[512];
131 		PT2 = &PT3[512];
132 
133 		/*
134 		 * This is kinda brutal, but every single 1GB VM
135 		 * memory segment points to the same first 1GB of
136 		 * physical memory.  But it is more than adequate.
137 		 */
138 		for (i = 0; i < NPTEPG; i++) {
139 			/*
140 			 * Each slot of the L4 pages points to the
141 			 * same L3 page.
142 			 */
143 			PT4[i] = (pml4_entry_t)PT3;
144 			PT4[i] |= PG_V | PG_RW;
145 
146 			/*
147 			 * Each slot of the L3 pages points to the
148 			 * same L2 page.
149 			 */
150 			PT3[i] = (pdp_entry_t)PT2;
151 			PT3[i] |= PG_V | PG_RW;
152 
153 			/*
154 			 * The L2 page slots are mapped with 2MB pages for 1GB.
155 			 */
156 			PT2[i] = (pd_entry_t)i * M(2);
157 			PT2[i] |= PG_V | PG_RW | PG_PS;
158 		}
159 	} else {
160 		PT4 = (pml4_entry_t *)G(4);
161 		err = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, 9,
162 		    (EFI_PHYSICAL_ADDRESS *)&PT4);
163 		if (EFI_ERROR(err)) {
164 			printf("Unable to allocate trampoline page table\n");
165 			BS->FreePages(trampcode, 9);
166 			if (copy_auto)
167 				copy_staging = COPY_STAGING_AUTO;
168 			return (ENOMEM);
169 		}
170 
171 		bzero(PT4, 9 * EFI_PAGE_SIZE);
172 
173 		PT3_l = &PT4[NPML4EPG * 1];
174 		PT3_u = &PT4[NPML4EPG * 2];
175 		PT2_l0 = &PT4[NPML4EPG * 3];
176 		PT2_l1 = &PT4[NPML4EPG * 4];
177 		PT2_l2 = &PT4[NPML4EPG * 5];
178 		PT2_l3 = &PT4[NPML4EPG * 6];
179 		PT2_u0 = &PT4[NPML4EPG * 7];
180 		PT2_u1 = &PT4[NPML4EPG * 8];
181 
182 		/* 1:1 mapping of lower 4G */
183 		PT4[0] = (pml4_entry_t)PT3_l | PG_V | PG_RW;
184 		PT3_l[0] = (pdp_entry_t)PT2_l0 | PG_V | PG_RW;
185 		PT3_l[1] = (pdp_entry_t)PT2_l1 | PG_V | PG_RW;
186 		PT3_l[2] = (pdp_entry_t)PT2_l2 | PG_V | PG_RW;
187 		PT3_l[3] = (pdp_entry_t)PT2_l3 | PG_V | PG_RW;
188 		for (i = 0; i < 4 * NPDEPG; i++) {
189 			PT2_l0[i] = ((pd_entry_t)i << PDRSHIFT) | PG_V |
190 			    PG_RW | PG_PS;
191 		}
192 
193 		/* mapping of kernel 2G below top */
194 		PT4[NPML4EPG - 1] = (pml4_entry_t)PT3_u | PG_V | PG_RW;
195 		PT3_u[NPDPEPG - 2] = (pdp_entry_t)PT2_u0 | PG_V | PG_RW;
196 		PT3_u[NPDPEPG - 1] = (pdp_entry_t)PT2_u1 | PG_V | PG_RW;
197 		/* compat mapping of phys @0 */
198 		PT2_u0[0] = PG_PS | PG_V | PG_RW;
199 		/* this maps past staging area */
200 		for (i = 1; i < 2 * NPDEPG; i++) {
201 			PT2_u0[i] = ((pd_entry_t)staging +
202 			    ((pd_entry_t)i - 1) * NBPDR) |
203 			    PG_V | PG_RW | PG_PS;
204 		}
205 	}
206 
207 	printf("staging %#lx (%scopying) tramp %p PT4 %p\n",
208 	    staging, copy_staging == COPY_STAGING_ENABLE ? "" : "not ",
209 	    trampoline, PT4);
210 	printf("Start @ 0x%lx ...\n", ehdr->e_entry);
211 
212 	efi_time_fini();
213 	err = bi_load(fp->f_args, &modulep, &kernend, true);
214 	if (err != 0) {
215 		efi_time_init();
216 		if (copy_auto)
217 			copy_staging = COPY_STAGING_AUTO;
218 		return (err);
219 	}
220 
221 	dev_cleanup();
222 
223 	trampoline(trampstack, copy_staging == COPY_STAGING_ENABLE ?
224 	    efi_copy_finish : efi_copy_finish_nop, kernend, modulep,
225 	    PT4, ehdr->e_entry);
226 
227 	panic("exec returned");
228 }
229 
230 static int
231 elf64_obj_exec(struct preloaded_file *fp)
232 {
233 
234 	return (EFTYPE);
235 }
236