1 /* 2 * Copyright (C) Paul Mackerras 1997. 3 * 4 * Updates for PPC64 by Todd Inglett, Dave Engebretsen & Peter Bergner. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 #include <stdarg.h> 12 #include <stddef.h> 13 #include "elf.h" 14 #include "page.h" 15 #include "string.h" 16 #include "stdio.h" 17 #include "prom.h" 18 #include "zlib.h" 19 20 extern void flush_cache(void *, unsigned long); 21 22 23 /* Value picked to match that used by yaboot */ 24 #define PROG_START 0x01400000 /* only used on 64-bit systems */ 25 #define RAM_END (512<<20) /* Fixme: use OF */ 26 #define ONE_MB 0x100000 27 28 extern char _start[]; 29 extern char __bss_start[]; 30 extern char _end[]; 31 extern char _vmlinux_start[]; 32 extern char _vmlinux_end[]; 33 extern char _initrd_start[]; 34 extern char _initrd_end[]; 35 36 /* A buffer that may be edited by tools operating on a zImage binary so as to 37 * edit the command line passed to vmlinux (by setting /chosen/bootargs). 38 * The buffer is put in it's own section so that tools may locate it easier. 39 */ 40 static char builtin_cmdline[512] 41 __attribute__((section("__builtin_cmdline"))); 42 43 44 struct addr_range { 45 unsigned long addr; 46 unsigned long size; 47 unsigned long memsize; 48 }; 49 static struct addr_range vmlinux; 50 static struct addr_range vmlinuz; 51 static struct addr_range initrd; 52 53 static unsigned long elfoffset; 54 55 static char scratch[46912]; /* scratch space for gunzip, from zlib_inflate_workspacesize() */ 56 static char elfheader[256]; 57 58 59 typedef void (*kernel_entry_t)( unsigned long, 60 unsigned long, 61 void *, 62 void *); 63 64 65 #undef DEBUG 66 67 static unsigned long claim_base; 68 69 #define HEAD_CRC 2 70 #define EXTRA_FIELD 4 71 #define ORIG_NAME 8 72 #define COMMENT 0x10 73 #define RESERVED 0xe0 74 75 static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp) 76 { 77 z_stream s; 78 int r, i, flags; 79 80 /* skip header */ 81 i = 10; 82 flags = src[3]; 83 if (src[2] != Z_DEFLATED || (flags & RESERVED) != 0) { 84 printf("bad gzipped data\n\r"); 85 exit(); 86 } 87 if ((flags & EXTRA_FIELD) != 0) 88 i = 12 + src[10] + (src[11] << 8); 89 if ((flags & ORIG_NAME) != 0) 90 while (src[i++] != 0) 91 ; 92 if ((flags & COMMENT) != 0) 93 while (src[i++] != 0) 94 ; 95 if ((flags & HEAD_CRC) != 0) 96 i += 2; 97 if (i >= *lenp) { 98 printf("gunzip: ran out of data in header\n\r"); 99 exit(); 100 } 101 102 if (zlib_inflate_workspacesize() > sizeof(scratch)) { 103 printf("gunzip needs more mem\n"); 104 exit(); 105 } 106 memset(&s, 0, sizeof(s)); 107 s.workspace = scratch; 108 r = zlib_inflateInit2(&s, -MAX_WBITS); 109 if (r != Z_OK) { 110 printf("inflateInit2 returned %d\n\r", r); 111 exit(); 112 } 113 s.next_in = src + i; 114 s.avail_in = *lenp - i; 115 s.next_out = dst; 116 s.avail_out = dstlen; 117 r = zlib_inflate(&s, Z_FULL_FLUSH); 118 if (r != Z_OK && r != Z_STREAM_END) { 119 printf("inflate returned %d msg: %s\n\r", r, s.msg); 120 exit(); 121 } 122 *lenp = s.next_out - (unsigned char *) dst; 123 zlib_inflateEnd(&s); 124 } 125 126 static unsigned long try_claim(unsigned long size) 127 { 128 unsigned long addr = 0; 129 130 for(; claim_base < RAM_END; claim_base += ONE_MB) { 131 #ifdef DEBUG 132 printf(" trying: 0x%08lx\n\r", claim_base); 133 #endif 134 addr = (unsigned long)claim(claim_base, size, 0); 135 if ((void *)addr != (void *)-1) 136 break; 137 } 138 if (addr == 0) 139 return 0; 140 claim_base = PAGE_ALIGN(claim_base + size); 141 return addr; 142 } 143 144 static int is_elf64(void *hdr) 145 { 146 Elf64_Ehdr *elf64 = hdr; 147 Elf64_Phdr *elf64ph; 148 unsigned int i; 149 150 if (!(elf64->e_ident[EI_MAG0] == ELFMAG0 && 151 elf64->e_ident[EI_MAG1] == ELFMAG1 && 152 elf64->e_ident[EI_MAG2] == ELFMAG2 && 153 elf64->e_ident[EI_MAG3] == ELFMAG3 && 154 elf64->e_ident[EI_CLASS] == ELFCLASS64 && 155 elf64->e_ident[EI_DATA] == ELFDATA2MSB && 156 elf64->e_type == ET_EXEC && 157 elf64->e_machine == EM_PPC64)) 158 return 0; 159 160 elf64ph = (Elf64_Phdr *)((unsigned long)elf64 + 161 (unsigned long)elf64->e_phoff); 162 for (i = 0; i < (unsigned int)elf64->e_phnum; i++, elf64ph++) 163 if (elf64ph->p_type == PT_LOAD) 164 break; 165 if (i >= (unsigned int)elf64->e_phnum) 166 return 0; 167 168 elfoffset = (unsigned long)elf64ph->p_offset; 169 vmlinux.size = (unsigned long)elf64ph->p_filesz + elfoffset; 170 vmlinux.memsize = (unsigned long)elf64ph->p_memsz + elfoffset; 171 172 #if defined(PROG_START) 173 /* 174 * Maintain a "magic" minimum address. This keeps some older 175 * firmware platforms running. 176 */ 177 178 if (claim_base < PROG_START) 179 claim_base = PROG_START; 180 #endif 181 182 return 1; 183 } 184 185 static int is_elf32(void *hdr) 186 { 187 Elf32_Ehdr *elf32 = hdr; 188 Elf32_Phdr *elf32ph; 189 unsigned int i; 190 191 if (!(elf32->e_ident[EI_MAG0] == ELFMAG0 && 192 elf32->e_ident[EI_MAG1] == ELFMAG1 && 193 elf32->e_ident[EI_MAG2] == ELFMAG2 && 194 elf32->e_ident[EI_MAG3] == ELFMAG3 && 195 elf32->e_ident[EI_CLASS] == ELFCLASS32 && 196 elf32->e_ident[EI_DATA] == ELFDATA2MSB && 197 elf32->e_type == ET_EXEC && 198 elf32->e_machine == EM_PPC)) 199 return 0; 200 201 elf32 = (Elf32_Ehdr *)elfheader; 202 elf32ph = (Elf32_Phdr *) ((unsigned long)elf32 + elf32->e_phoff); 203 for (i = 0; i < elf32->e_phnum; i++, elf32ph++) 204 if (elf32ph->p_type == PT_LOAD) 205 break; 206 if (i >= elf32->e_phnum) 207 return 0; 208 209 elfoffset = elf32ph->p_offset; 210 vmlinux.size = elf32ph->p_filesz + elf32ph->p_offset; 211 vmlinux.memsize = elf32ph->p_memsz + elf32ph->p_offset; 212 return 1; 213 } 214 215 void export_cmdline(void* chosen_handle) 216 { 217 int len; 218 char cmdline[2] = { 0, 0 }; 219 220 if (builtin_cmdline[0] == 0) 221 return; 222 223 len = getprop(chosen_handle, "bootargs", cmdline, sizeof(cmdline)); 224 if (len > 0 && cmdline[0] != 0) 225 return; 226 227 setprop(chosen_handle, "bootargs", builtin_cmdline, 228 strlen(builtin_cmdline) + 1); 229 } 230 231 232 void start(unsigned long a1, unsigned long a2, void *promptr, void *sp) 233 { 234 int len; 235 kernel_entry_t kernel_entry; 236 237 memset(__bss_start, 0, _end - __bss_start); 238 239 prom = (int (*)(void *)) promptr; 240 chosen_handle = finddevice("/chosen"); 241 if (chosen_handle == (void *) -1) 242 exit(); 243 if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4) 244 exit(); 245 246 printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp); 247 248 /* 249 * The first available claim_base must be above the end of the 250 * the loaded kernel wrapper file (_start to _end includes the 251 * initrd image if it is present) and rounded up to a nice 252 * 1 MB boundary for good measure. 253 */ 254 255 claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB); 256 257 vmlinuz.addr = (unsigned long)_vmlinux_start; 258 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start); 259 260 /* gunzip the ELF header of the kernel */ 261 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) { 262 len = vmlinuz.size; 263 gunzip(elfheader, sizeof(elfheader), 264 (unsigned char *)vmlinuz.addr, &len); 265 } else 266 memcpy(elfheader, (const void *)vmlinuz.addr, sizeof(elfheader)); 267 268 if (!is_elf64(elfheader) && !is_elf32(elfheader)) { 269 printf("Error: not a valid PPC32 or PPC64 ELF file!\n\r"); 270 exit(); 271 } 272 273 /* We need to claim the memsize plus the file offset since gzip 274 * will expand the header (file offset), then the kernel, then 275 * possible rubbish we don't care about. But the kernel bss must 276 * be claimed (it will be zero'd by the kernel itself) 277 */ 278 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize); 279 vmlinux.addr = try_claim(vmlinux.memsize); 280 if (vmlinux.addr == 0) { 281 printf("Can't allocate memory for kernel image !\n\r"); 282 exit(); 283 } 284 285 /* 286 * Now we try to claim memory for the initrd (and copy it there) 287 */ 288 initrd.size = (unsigned long)(_initrd_end - _initrd_start); 289 initrd.memsize = initrd.size; 290 if ( initrd.size > 0 ) { 291 printf("Allocating 0x%lx bytes for initrd ...\n\r", initrd.size); 292 initrd.addr = try_claim(initrd.size); 293 if (initrd.addr == 0) { 294 printf("Can't allocate memory for initial ramdisk !\n\r"); 295 exit(); 296 } 297 a1 = initrd.addr; 298 a2 = initrd.size; 299 printf("initial ramdisk moving 0x%lx <- 0x%lx (0x%lx bytes)\n\r", 300 initrd.addr, (unsigned long)_initrd_start, initrd.size); 301 memmove((void *)initrd.addr, (void *)_initrd_start, initrd.size); 302 printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd.addr)); 303 } 304 305 /* Eventually gunzip the kernel */ 306 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) { 307 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...", 308 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size); 309 len = vmlinuz.size; 310 gunzip((void *)vmlinux.addr, vmlinux.memsize, 311 (unsigned char *)vmlinuz.addr, &len); 312 printf("done 0x%lx bytes\n\r", len); 313 } else { 314 memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size); 315 } 316 317 export_cmdline(chosen_handle); 318 319 /* Skip over the ELF header */ 320 #ifdef DEBUG 321 printf("... skipping 0x%lx bytes of ELF header\n\r", 322 elfoffset); 323 #endif 324 vmlinux.addr += elfoffset; 325 326 flush_cache((void *)vmlinux.addr, vmlinux.size); 327 328 kernel_entry = (kernel_entry_t)vmlinux.addr; 329 #ifdef DEBUG 330 printf( "kernel:\n\r" 331 " entry addr = 0x%lx\n\r" 332 " a1 = 0x%lx,\n\r" 333 " a2 = 0x%lx,\n\r" 334 " prom = 0x%lx,\n\r" 335 " bi_recs = 0x%lx,\n\r", 336 (unsigned long)kernel_entry, a1, a2, 337 (unsigned long)prom, NULL); 338 #endif 339 340 kernel_entry(a1, a2, prom, NULL); 341 342 printf("Error: Linux kernel returned to zImage bootloader!\n\r"); 343 344 exit(); 345 } 346 347