xref: /linux/arch/powerpc/boot/main.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * Copyright (C) Paul Mackerras 1997.
3  *
4  * Updates for PPC64 by Todd Inglett, Dave Engebretsen & Peter Bergner.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <stdarg.h>
12 #include <stddef.h>
13 #include "elf.h"
14 #include "page.h"
15 #include "string.h"
16 #include "stdio.h"
17 #include "prom.h"
18 #include "zlib.h"
19 
20 extern void flush_cache(void *, unsigned long);
21 
22 
23 /* Value picked to match that used by yaboot */
24 #define PROG_START	0x01400000	/* only used on 64-bit systems */
25 #define RAM_END		(512<<20)	/* Fixme: use OF */
26 #define	ONE_MB		0x100000
27 
28 extern char _start[];
29 extern char __bss_start[];
30 extern char _end[];
31 extern char _vmlinux_start[];
32 extern char _vmlinux_end[];
33 extern char _initrd_start[];
34 extern char _initrd_end[];
35 
36 struct addr_range {
37 	unsigned long addr;
38 	unsigned long size;
39 	unsigned long memsize;
40 };
41 static struct addr_range vmlinux;
42 static struct addr_range vmlinuz;
43 static struct addr_range initrd;
44 
45 static unsigned long elfoffset;
46 
47 static char scratch[46912];	/* scratch space for gunzip, from zlib_inflate_workspacesize() */
48 static char elfheader[256];
49 
50 
51 typedef void (*kernel_entry_t)( unsigned long,
52                                 unsigned long,
53                                 void *,
54 				void *);
55 
56 
57 #undef DEBUG
58 
59 static unsigned long claim_base;
60 
61 #define HEAD_CRC	2
62 #define EXTRA_FIELD	4
63 #define ORIG_NAME	8
64 #define COMMENT		0x10
65 #define RESERVED	0xe0
66 
67 static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
68 {
69 	z_stream s;
70 	int r, i, flags;
71 
72 	/* skip header */
73 	i = 10;
74 	flags = src[3];
75 	if (src[2] != Z_DEFLATED || (flags & RESERVED) != 0) {
76 		printf("bad gzipped data\n\r");
77 		exit();
78 	}
79 	if ((flags & EXTRA_FIELD) != 0)
80 		i = 12 + src[10] + (src[11] << 8);
81 	if ((flags & ORIG_NAME) != 0)
82 		while (src[i++] != 0)
83 			;
84 	if ((flags & COMMENT) != 0)
85 		while (src[i++] != 0)
86 			;
87 	if ((flags & HEAD_CRC) != 0)
88 		i += 2;
89 	if (i >= *lenp) {
90 		printf("gunzip: ran out of data in header\n\r");
91 		exit();
92 	}
93 
94 	if (zlib_inflate_workspacesize() > sizeof(scratch)) {
95 		printf("gunzip needs more mem\n");
96 		exit();
97 	}
98 	memset(&s, 0, sizeof(s));
99 	s.workspace = scratch;
100 	r = zlib_inflateInit2(&s, -MAX_WBITS);
101 	if (r != Z_OK) {
102 		printf("inflateInit2 returned %d\n\r", r);
103 		exit();
104 	}
105 	s.next_in = src + i;
106 	s.avail_in = *lenp - i;
107 	s.next_out = dst;
108 	s.avail_out = dstlen;
109 	r = zlib_inflate(&s, Z_FULL_FLUSH);
110 	if (r != Z_OK && r != Z_STREAM_END) {
111 		printf("inflate returned %d msg: %s\n\r", r, s.msg);
112 		exit();
113 	}
114 	*lenp = s.next_out - (unsigned char *) dst;
115 	zlib_inflateEnd(&s);
116 }
117 
118 static unsigned long try_claim(unsigned long size)
119 {
120 	unsigned long addr = 0;
121 
122 	for(; claim_base < RAM_END; claim_base += ONE_MB) {
123 #ifdef DEBUG
124 		printf("    trying: 0x%08lx\n\r", claim_base);
125 #endif
126 		addr = (unsigned long)claim(claim_base, size, 0);
127 		if ((void *)addr != (void *)-1)
128 			break;
129 	}
130 	if (addr == 0)
131 		return 0;
132 	claim_base = PAGE_ALIGN(claim_base + size);
133 	return addr;
134 }
135 
136 static int is_elf64(void *hdr)
137 {
138 	Elf64_Ehdr *elf64 = hdr;
139 	Elf64_Phdr *elf64ph;
140 	unsigned int i;
141 
142 	if (!(elf64->e_ident[EI_MAG0]  == ELFMAG0	&&
143 	      elf64->e_ident[EI_MAG1]  == ELFMAG1	&&
144 	      elf64->e_ident[EI_MAG2]  == ELFMAG2	&&
145 	      elf64->e_ident[EI_MAG3]  == ELFMAG3	&&
146 	      elf64->e_ident[EI_CLASS] == ELFCLASS64	&&
147 	      elf64->e_ident[EI_DATA]  == ELFDATA2MSB	&&
148 	      elf64->e_type            == ET_EXEC	&&
149 	      elf64->e_machine         == EM_PPC64))
150 		return 0;
151 
152 	elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
153 				 (unsigned long)elf64->e_phoff);
154 	for (i = 0; i < (unsigned int)elf64->e_phnum; i++, elf64ph++)
155 		if (elf64ph->p_type == PT_LOAD)
156 			break;
157 	if (i >= (unsigned int)elf64->e_phnum)
158 		return 0;
159 
160 	elfoffset = (unsigned long)elf64ph->p_offset;
161 	vmlinux.size = (unsigned long)elf64ph->p_filesz + elfoffset;
162 	vmlinux.memsize = (unsigned long)elf64ph->p_memsz + elfoffset;
163 
164 #if defined(PROG_START)
165 	/*
166 	 * Maintain a "magic" minimum address. This keeps some older
167 	 * firmware platforms running.
168 	 */
169 
170 	if (claim_base < PROG_START)
171 		claim_base = PROG_START;
172 #endif
173 
174 	return 1;
175 }
176 
177 static int is_elf32(void *hdr)
178 {
179 	Elf32_Ehdr *elf32 = hdr;
180 	Elf32_Phdr *elf32ph;
181 	unsigned int i;
182 
183 	if (!(elf32->e_ident[EI_MAG0]  == ELFMAG0	&&
184 	      elf32->e_ident[EI_MAG1]  == ELFMAG1	&&
185 	      elf32->e_ident[EI_MAG2]  == ELFMAG2	&&
186 	      elf32->e_ident[EI_MAG3]  == ELFMAG3	&&
187 	      elf32->e_ident[EI_CLASS] == ELFCLASS32	&&
188 	      elf32->e_ident[EI_DATA]  == ELFDATA2MSB	&&
189 	      elf32->e_type            == ET_EXEC	&&
190 	      elf32->e_machine         == EM_PPC))
191 		return 0;
192 
193 	elf32 = (Elf32_Ehdr *)elfheader;
194 	elf32ph = (Elf32_Phdr *) ((unsigned long)elf32 + elf32->e_phoff);
195 	for (i = 0; i < elf32->e_phnum; i++, elf32ph++)
196 		if (elf32ph->p_type == PT_LOAD)
197 			break;
198 	if (i >= elf32->e_phnum)
199 		return 0;
200 
201 	elfoffset = elf32ph->p_offset;
202 	vmlinux.size = elf32ph->p_filesz + elf32ph->p_offset;
203 	vmlinux.memsize = elf32ph->p_memsz + elf32ph->p_offset;
204 	return 1;
205 }
206 
207 void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
208 {
209 	int len;
210 	kernel_entry_t kernel_entry;
211 
212 	memset(__bss_start, 0, _end - __bss_start);
213 
214 	prom = (int (*)(void *)) promptr;
215 	chosen_handle = finddevice("/chosen");
216 	if (chosen_handle == (void *) -1)
217 		exit();
218 	if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4)
219 		exit();
220 
221 	printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp);
222 
223 	/*
224 	 * The first available claim_base must be above the end of the
225 	 * the loaded kernel wrapper file (_start to _end includes the
226 	 * initrd image if it is present) and rounded up to a nice
227 	 * 1 MB boundary for good measure.
228 	 */
229 
230 	claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB);
231 
232 	vmlinuz.addr = (unsigned long)_vmlinux_start;
233 	vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
234 
235 	/* gunzip the ELF header of the kernel */
236 	if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
237 		len = vmlinuz.size;
238 		gunzip(elfheader, sizeof(elfheader),
239 				(unsigned char *)vmlinuz.addr, &len);
240 	} else
241 		memcpy(elfheader, (const void *)vmlinuz.addr, sizeof(elfheader));
242 
243 	if (!is_elf64(elfheader) && !is_elf32(elfheader)) {
244 		printf("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
245 		exit();
246 	}
247 
248 	/* We need to claim the memsize plus the file offset since gzip
249 	 * will expand the header (file offset), then the kernel, then
250 	 * possible rubbish we don't care about. But the kernel bss must
251 	 * be claimed (it will be zero'd by the kernel itself)
252 	 */
253 	printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize);
254 	vmlinux.addr = try_claim(vmlinux.memsize);
255 	if (vmlinux.addr == 0) {
256 		printf("Can't allocate memory for kernel image !\n\r");
257 		exit();
258 	}
259 
260 	/*
261 	 * Now we try to claim memory for the initrd (and copy it there)
262 	 */
263 	initrd.size = (unsigned long)(_initrd_end - _initrd_start);
264 	initrd.memsize = initrd.size;
265 	if ( initrd.size > 0 ) {
266 		printf("Allocating 0x%lx bytes for initrd ...\n\r", initrd.size);
267 		initrd.addr = try_claim(initrd.size);
268 		if (initrd.addr == 0) {
269 			printf("Can't allocate memory for initial ramdisk !\n\r");
270 			exit();
271 		}
272 		a1 = initrd.addr;
273 		a2 = initrd.size;
274 		printf("initial ramdisk moving 0x%lx <- 0x%lx (0x%lx bytes)\n\r",
275 		       initrd.addr, (unsigned long)_initrd_start, initrd.size);
276 		memmove((void *)initrd.addr, (void *)_initrd_start, initrd.size);
277 		printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd.addr));
278 	}
279 
280 	/* Eventually gunzip the kernel */
281 	if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
282 		printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...",
283 		       vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size);
284 		len = vmlinuz.size;
285 		gunzip((void *)vmlinux.addr, vmlinux.memsize,
286 			(unsigned char *)vmlinuz.addr, &len);
287 		printf("done 0x%lx bytes\n\r", len);
288 	} else {
289 		memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size);
290 	}
291 
292 	/* Skip over the ELF header */
293 #ifdef DEBUG
294 	printf("... skipping 0x%lx bytes of ELF header\n\r",
295 			elfoffset);
296 #endif
297 	vmlinux.addr += elfoffset;
298 
299 	flush_cache((void *)vmlinux.addr, vmlinux.size);
300 
301 	kernel_entry = (kernel_entry_t)vmlinux.addr;
302 #ifdef DEBUG
303 	printf( "kernel:\n\r"
304 		"        entry addr = 0x%lx\n\r"
305 		"        a1         = 0x%lx,\n\r"
306 		"        a2         = 0x%lx,\n\r"
307 		"        prom       = 0x%lx,\n\r"
308 		"        bi_recs    = 0x%lx,\n\r",
309 		(unsigned long)kernel_entry, a1, a2,
310 		(unsigned long)prom, NULL);
311 #endif
312 
313 	kernel_entry(a1, a2, prom, NULL);
314 
315 	printf("Error: Linux kernel returned to zImage bootloader!\n\r");
316 
317 	exit();
318 }
319 
320