xref: /linux/arch/powerpc/kernel/prom_init.c (revision ff2632d7d08edc11e8bd0629e9fcfebab25c78b4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Procedures for interfacing to Open Firmware.
4  *
5  * Paul Mackerras	August 1996.
6  * Copyright (C) 1996-2005 Paul Mackerras.
7  *
8  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9  *    {engebret|bergner}@us.ibm.com
10  */
11 
12 #undef DEBUG_PROM
13 
14 /* we cannot use FORTIFY as it brings in new symbols */
15 #define __NO_FORTIFY
16 
17 #include <linux/stdarg.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/threads.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/delay.h>
27 #include <linux/initrd.h>
28 #include <linux/bitops.h>
29 #include <linux/pgtable.h>
30 #include <linux/printk.h>
31 #include <linux/of.h>
32 #include <linux/of_fdt.h>
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/page.h>
36 #include <asm/processor.h>
37 #include <asm/interrupt.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/mmu.h>
42 #include <asm/iommu.h>
43 #include <asm/btext.h>
44 #include <asm/sections.h>
45 #include <asm/setup.h>
46 #include <asm/asm-prototypes.h>
47 #include <asm/ultravisor-api.h>
48 
49 #include <linux/linux_logo.h>
50 
51 /* All of prom_init bss lives here */
52 #define __prombss __section(".bss.prominit")
53 
54 /*
55  * Eventually bump that one up
56  */
57 #define DEVTREE_CHUNK_SIZE	0x100000
58 
59 /*
60  * This is the size of the local memory reserve map that gets copied
61  * into the boot params passed to the kernel. That size is totally
62  * flexible as the kernel just reads the list until it encounters an
63  * entry with size 0, so it can be changed without breaking binary
64  * compatibility
65  */
66 #define MEM_RESERVE_MAP_SIZE	8
67 
68 /*
69  * prom_init() is called very early on, before the kernel text
70  * and data have been mapped to KERNELBASE.  At this point the code
71  * is running at whatever address it has been loaded at.
72  * On ppc32 we compile with -mrelocatable, which means that references
73  * to extern and static variables get relocated automatically.
74  * ppc64 objects are always relocatable, we just need to relocate the
75  * TOC.
76  *
77  * Because OF may have mapped I/O devices into the area starting at
78  * KERNELBASE, particularly on CHRP machines, we can't safely call
79  * OF once the kernel has been mapped to KERNELBASE.  Therefore all
80  * OF calls must be done within prom_init().
81  *
82  * ADDR is used in calls to call_prom.  The 4th and following
83  * arguments to call_prom should be 32-bit values.
84  * On ppc64, 64 bit values are truncated to 32 bits (and
85  * fortunately don't get interpreted as two arguments).
86  */
87 #define ADDR(x)		(u32)(unsigned long)(x)
88 
89 #ifdef CONFIG_PPC64
90 #define OF_WORKAROUNDS	0
91 #else
92 #define OF_WORKAROUNDS	of_workarounds
93 static int of_workarounds __prombss;
94 #endif
95 
96 #define OF_WA_CLAIM	1	/* do phys/virt claim separately, then map */
97 #define OF_WA_LONGTRAIL	2	/* work around longtrail bugs */
98 
99 #ifdef DEBUG_PROM
100 #define prom_debug(x...)	prom_printf(x)
101 #else
102 #define prom_debug(x...)	do { } while (0)
103 #endif
104 
105 
106 typedef u32 prom_arg_t;
107 
108 struct prom_args {
109         __be32 service;
110         __be32 nargs;
111         __be32 nret;
112         __be32 args[10];
113 };
114 
115 struct prom_t {
116 	ihandle root;
117 	phandle chosen;
118 	int cpu;
119 	ihandle stdout;
120 	ihandle mmumap;
121 	ihandle memory;
122 };
123 
124 struct mem_map_entry {
125 	__be64	base;
126 	__be64	size;
127 };
128 
129 typedef __be32 cell_t;
130 
131 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
132 		    unsigned long r6, unsigned long r7, unsigned long r8,
133 		    unsigned long r9);
134 
135 #ifdef CONFIG_PPC64
136 extern int enter_prom(struct prom_args *args, unsigned long entry);
137 #else
enter_prom(struct prom_args * args,unsigned long entry)138 static inline int enter_prom(struct prom_args *args, unsigned long entry)
139 {
140 	return ((int (*)(struct prom_args *))entry)(args);
141 }
142 #endif
143 
144 extern void copy_and_flush(unsigned long dest, unsigned long src,
145 			   unsigned long size, unsigned long offset);
146 
147 /* prom structure */
148 static struct prom_t __prombss prom;
149 
150 static unsigned long __prombss prom_entry;
151 
152 static char __prombss of_stdout_device[256];
153 static char __prombss prom_scratch[256];
154 
155 static unsigned long __prombss dt_header_start;
156 static unsigned long __prombss dt_struct_start, dt_struct_end;
157 static unsigned long __prombss dt_string_start, dt_string_end;
158 
159 static unsigned long __prombss prom_initrd_start, prom_initrd_end;
160 
161 #ifdef CONFIG_PPC64
162 static int __prombss prom_iommu_force_on;
163 static int __prombss prom_iommu_off;
164 static unsigned long __prombss prom_tce_alloc_start;
165 static unsigned long __prombss prom_tce_alloc_end;
166 #endif
167 
168 #ifdef CONFIG_PPC_PSERIES
169 static bool __prombss prom_radix_disable;
170 static bool __prombss prom_radix_gtse_disable;
171 static bool __prombss prom_xive_disable;
172 #endif
173 
174 #ifdef CONFIG_PPC_SVM
175 static bool __prombss prom_svm_enable;
176 #endif
177 
178 struct platform_support {
179 	bool hash_mmu;
180 	bool radix_mmu;
181 	bool radix_gtse;
182 	bool xive;
183 };
184 
185 /* Platforms codes are now obsolete in the kernel. Now only used within this
186  * file and ultimately gone too. Feel free to change them if you need, they
187  * are not shared with anything outside of this file anymore
188  */
189 #define PLATFORM_PSERIES	0x0100
190 #define PLATFORM_PSERIES_LPAR	0x0101
191 #define PLATFORM_LPAR		0x0001
192 #define PLATFORM_POWERMAC	0x0400
193 #define PLATFORM_GENERIC	0x0500
194 
195 static int __prombss of_platform;
196 
197 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
198 
199 static unsigned long __prombss prom_memory_limit;
200 
201 static unsigned long __prombss alloc_top;
202 static unsigned long __prombss alloc_top_high;
203 static unsigned long __prombss alloc_bottom;
204 static unsigned long __prombss rmo_top;
205 static unsigned long __prombss ram_top;
206 
207 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
208 static int __prombss mem_reserve_cnt;
209 
210 static cell_t __prombss regbuf[1024];
211 
212 static bool  __prombss rtas_has_query_cpu_stopped;
213 
214 
215 /*
216  * Error results ... some OF calls will return "-1" on error, some
217  * will return 0, some will return either. To simplify, here are
218  * macros to use with any ihandle or phandle return value to check if
219  * it is valid
220  */
221 
222 #define PROM_ERROR		(-1u)
223 #define PHANDLE_VALID(p)	((p) != 0 && (p) != PROM_ERROR)
224 #define IHANDLE_VALID(i)	((i) != 0 && (i) != PROM_ERROR)
225 
226 /* Copied from lib/string.c and lib/kstrtox.c */
227 
prom_strcmp(const char * cs,const char * ct)228 static int __init prom_strcmp(const char *cs, const char *ct)
229 {
230 	unsigned char c1, c2;
231 
232 	while (1) {
233 		c1 = *cs++;
234 		c2 = *ct++;
235 		if (c1 != c2)
236 			return c1 < c2 ? -1 : 1;
237 		if (!c1)
238 			break;
239 	}
240 	return 0;
241 }
242 
prom_strscpy_pad(char * dest,const char * src,size_t n)243 static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
244 {
245 	ssize_t rc;
246 	size_t i;
247 
248 	if (n == 0 || n > INT_MAX)
249 		return -E2BIG;
250 
251 	// Copy up to n bytes
252 	for (i = 0; i < n && src[i] != '\0'; i++)
253 		dest[i] = src[i];
254 
255 	rc = i;
256 
257 	// If we copied all n then we have run out of space for the nul
258 	if (rc == n) {
259 		// Rewind by one character to ensure nul termination
260 		i--;
261 		rc = -E2BIG;
262 	}
263 
264 	for (; i < n; i++)
265 		dest[i] = '\0';
266 
267 	return rc;
268 }
269 
prom_strncmp(const char * cs,const char * ct,size_t count)270 static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
271 {
272 	unsigned char c1, c2;
273 
274 	while (count) {
275 		c1 = *cs++;
276 		c2 = *ct++;
277 		if (c1 != c2)
278 			return c1 < c2 ? -1 : 1;
279 		if (!c1)
280 			break;
281 		count--;
282 	}
283 	return 0;
284 }
285 
prom_strlen(const char * s)286 static size_t __init prom_strlen(const char *s)
287 {
288 	const char *sc;
289 
290 	for (sc = s; *sc != '\0'; ++sc)
291 		/* nothing */;
292 	return sc - s;
293 }
294 
prom_memcmp(const void * cs,const void * ct,size_t count)295 static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
296 {
297 	const unsigned char *su1, *su2;
298 	int res = 0;
299 
300 	for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
301 		if ((res = *su1 - *su2) != 0)
302 			break;
303 	return res;
304 }
305 
prom_strstr(const char * s1,const char * s2)306 static char __init *prom_strstr(const char *s1, const char *s2)
307 {
308 	size_t l1, l2;
309 
310 	l2 = prom_strlen(s2);
311 	if (!l2)
312 		return (char *)s1;
313 	l1 = prom_strlen(s1);
314 	while (l1 >= l2) {
315 		l1--;
316 		if (!prom_memcmp(s1, s2, l2))
317 			return (char *)s1;
318 		s1++;
319 	}
320 	return NULL;
321 }
322 
prom_strlcat(char * dest,const char * src,size_t count)323 static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
324 {
325 	size_t dsize = prom_strlen(dest);
326 	size_t len = prom_strlen(src);
327 	size_t res = dsize + len;
328 
329 	/* This would be a bug */
330 	if (dsize >= count)
331 		return count;
332 
333 	dest += dsize;
334 	count -= dsize;
335 	if (len >= count)
336 		len = count-1;
337 	memcpy(dest, src, len);
338 	dest[len] = 0;
339 	return res;
340 
341 }
342 
343 #ifdef CONFIG_PPC_PSERIES
prom_strtobool(const char * s,bool * res)344 static int __init prom_strtobool(const char *s, bool *res)
345 {
346 	if (!s)
347 		return -EINVAL;
348 
349 	switch (s[0]) {
350 	case 'y':
351 	case 'Y':
352 	case '1':
353 		*res = true;
354 		return 0;
355 	case 'n':
356 	case 'N':
357 	case '0':
358 		*res = false;
359 		return 0;
360 	case 'o':
361 	case 'O':
362 		switch (s[1]) {
363 		case 'n':
364 		case 'N':
365 			*res = true;
366 			return 0;
367 		case 'f':
368 		case 'F':
369 			*res = false;
370 			return 0;
371 		default:
372 			break;
373 		}
374 		break;
375 	default:
376 		break;
377 	}
378 
379 	return -EINVAL;
380 }
381 #endif
382 
383 /* This is the one and *ONLY* place where we actually call open
384  * firmware.
385  */
386 
call_prom(const char * service,int nargs,int nret,...)387 static int __init call_prom(const char *service, int nargs, int nret, ...)
388 {
389 	int i;
390 	struct prom_args args;
391 	va_list list;
392 
393 	args.service = cpu_to_be32(ADDR(service));
394 	args.nargs = cpu_to_be32(nargs);
395 	args.nret = cpu_to_be32(nret);
396 
397 	va_start(list, nret);
398 	for (i = 0; i < nargs; i++)
399 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
400 	va_end(list);
401 
402 	for (i = 0; i < nret; i++)
403 		args.args[nargs+i] = 0;
404 
405 	if (enter_prom(&args, prom_entry) < 0)
406 		return PROM_ERROR;
407 
408 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
409 }
410 
call_prom_ret(const char * service,int nargs,int nret,prom_arg_t * rets,...)411 static int __init call_prom_ret(const char *service, int nargs, int nret,
412 				prom_arg_t *rets, ...)
413 {
414 	int i;
415 	struct prom_args args;
416 	va_list list;
417 
418 	args.service = cpu_to_be32(ADDR(service));
419 	args.nargs = cpu_to_be32(nargs);
420 	args.nret = cpu_to_be32(nret);
421 
422 	va_start(list, rets);
423 	for (i = 0; i < nargs; i++)
424 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
425 	va_end(list);
426 
427 	for (i = 0; i < nret; i++)
428 		args.args[nargs+i] = 0;
429 
430 	if (enter_prom(&args, prom_entry) < 0)
431 		return PROM_ERROR;
432 
433 	if (rets != NULL)
434 		for (i = 1; i < nret; ++i)
435 			rets[i-1] = be32_to_cpu(args.args[nargs+i]);
436 
437 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
438 }
439 
440 
prom_print(const char * msg)441 static void __init prom_print(const char *msg)
442 {
443 	const char *p, *q;
444 
445 	if (prom.stdout == 0)
446 		return;
447 
448 	for (p = msg; *p != 0; p = q) {
449 		for (q = p; *q != 0 && *q != '\n'; ++q)
450 			;
451 		if (q > p)
452 			call_prom("write", 3, 1, prom.stdout, p, q - p);
453 		if (*q == 0)
454 			break;
455 		++q;
456 		call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
457 	}
458 }
459 
460 
461 /*
462  * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
463  * we do not need __udivdi3 or __umoddi3 on 32bits.
464  */
prom_print_hex(unsigned long val)465 static void __init prom_print_hex(unsigned long val)
466 {
467 	int i, nibbles = sizeof(val)*2;
468 	char buf[sizeof(val)*2+1];
469 
470 	for (i = nibbles-1;  i >= 0;  i--) {
471 		buf[i] = (val & 0xf) + '0';
472 		if (buf[i] > '9')
473 			buf[i] += ('a'-'0'-10);
474 		val >>= 4;
475 	}
476 	buf[nibbles] = '\0';
477 	call_prom("write", 3, 1, prom.stdout, buf, nibbles);
478 }
479 
480 /* max number of decimal digits in an unsigned long */
481 #define UL_DIGITS 21
prom_print_dec(unsigned long val)482 static void __init prom_print_dec(unsigned long val)
483 {
484 	int i, size;
485 	char buf[UL_DIGITS+1];
486 
487 	for (i = UL_DIGITS-1; i >= 0;  i--) {
488 		buf[i] = (val % 10) + '0';
489 		val = val/10;
490 		if (val == 0)
491 			break;
492 	}
493 	/* shift stuff down */
494 	size = UL_DIGITS - i;
495 	call_prom("write", 3, 1, prom.stdout, buf+i, size);
496 }
497 
498 __printf(1, 2)
prom_printf(const char * format,...)499 static void __init prom_printf(const char *format, ...)
500 {
501 	const char *p, *q, *s;
502 	va_list args;
503 	unsigned long v;
504 	long vs;
505 	int n = 0;
506 
507 	va_start(args, format);
508 	for (p = format; *p != 0; p = q) {
509 		for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
510 			;
511 		if (q > p)
512 			call_prom("write", 3, 1, prom.stdout, p, q - p);
513 		if (*q == 0)
514 			break;
515 		if (*q == '\n') {
516 			++q;
517 			call_prom("write", 3, 1, prom.stdout,
518 				  ADDR("\r\n"), 2);
519 			continue;
520 		}
521 		++q;
522 		if (*q == 0)
523 			break;
524 		while (*q == 'l') {
525 			++q;
526 			++n;
527 		}
528 		switch (*q) {
529 		case 's':
530 			++q;
531 			s = va_arg(args, const char *);
532 			prom_print(s);
533 			break;
534 		case 'x':
535 			++q;
536 			switch (n) {
537 			case 0:
538 				v = va_arg(args, unsigned int);
539 				break;
540 			case 1:
541 				v = va_arg(args, unsigned long);
542 				break;
543 			case 2:
544 			default:
545 				v = va_arg(args, unsigned long long);
546 				break;
547 			}
548 			prom_print_hex(v);
549 			break;
550 		case 'u':
551 			++q;
552 			switch (n) {
553 			case 0:
554 				v = va_arg(args, unsigned int);
555 				break;
556 			case 1:
557 				v = va_arg(args, unsigned long);
558 				break;
559 			case 2:
560 			default:
561 				v = va_arg(args, unsigned long long);
562 				break;
563 			}
564 			prom_print_dec(v);
565 			break;
566 		case 'd':
567 			++q;
568 			switch (n) {
569 			case 0:
570 				vs = va_arg(args, int);
571 				break;
572 			case 1:
573 				vs = va_arg(args, long);
574 				break;
575 			case 2:
576 			default:
577 				vs = va_arg(args, long long);
578 				break;
579 			}
580 			if (vs < 0) {
581 				prom_print("-");
582 				vs = -vs;
583 			}
584 			prom_print_dec(vs);
585 			break;
586 		}
587 	}
588 	va_end(args);
589 }
590 
591 
prom_claim(unsigned long virt,unsigned long size,unsigned long align)592 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
593 				unsigned long align)
594 {
595 
596 	if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
597 		/*
598 		 * Old OF requires we claim physical and virtual separately
599 		 * and then map explicitly (assuming virtual mode)
600 		 */
601 		int ret;
602 		prom_arg_t result;
603 
604 		ret = call_prom_ret("call-method", 5, 2, &result,
605 				    ADDR("claim"), prom.memory,
606 				    align, size, virt);
607 		if (ret != 0 || result == -1)
608 			return -1;
609 		ret = call_prom_ret("call-method", 5, 2, &result,
610 				    ADDR("claim"), prom.mmumap,
611 				    align, size, virt);
612 		if (ret != 0) {
613 			call_prom("call-method", 4, 1, ADDR("release"),
614 				  prom.memory, size, virt);
615 			return -1;
616 		}
617 		/* the 0x12 is M (coherence) + PP == read/write */
618 		call_prom("call-method", 6, 1,
619 			  ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
620 		return virt;
621 	}
622 	return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
623 			 (prom_arg_t)align);
624 }
625 
prom_panic(const char * reason)626 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
627 {
628 	prom_print(reason);
629 	/* Do not call exit because it clears the screen on pmac
630 	 * it also causes some sort of double-fault on early pmacs */
631 	if (of_platform == PLATFORM_POWERMAC)
632 		asm("trap\n");
633 
634 	/* ToDo: should put up an SRC here on pSeries */
635 	call_prom("exit", 0, 0);
636 
637 	for (;;)			/* should never get here */
638 		;
639 }
640 
641 
prom_next_node(phandle * nodep)642 static int __init prom_next_node(phandle *nodep)
643 {
644 	phandle node;
645 
646 	if ((node = *nodep) != 0
647 	    && (*nodep = call_prom("child", 1, 1, node)) != 0)
648 		return 1;
649 	if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
650 		return 1;
651 	for (;;) {
652 		if ((node = call_prom("parent", 1, 1, node)) == 0)
653 			return 0;
654 		if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
655 			return 1;
656 	}
657 }
658 
prom_getprop(phandle node,const char * pname,void * value,size_t valuelen)659 static inline int __init prom_getprop(phandle node, const char *pname,
660 				      void *value, size_t valuelen)
661 {
662 	return call_prom("getprop", 4, 1, node, ADDR(pname),
663 			 (u32)(unsigned long) value, (u32) valuelen);
664 }
665 
prom_getproplen(phandle node,const char * pname)666 static inline int __init prom_getproplen(phandle node, const char *pname)
667 {
668 	return call_prom("getproplen", 2, 1, node, ADDR(pname));
669 }
670 
add_string(char ** str,const char * q)671 static void __init add_string(char **str, const char *q)
672 {
673 	char *p = *str;
674 
675 	while (*q)
676 		*p++ = *q++;
677 	*p++ = ' ';
678 	*str = p;
679 }
680 
tohex(unsigned int x)681 static char *__init tohex(unsigned int x)
682 {
683 	static const char digits[] __initconst = "0123456789abcdef";
684 	static char result[9] __prombss;
685 	int i;
686 
687 	result[8] = 0;
688 	i = 8;
689 	do {
690 		--i;
691 		result[i] = digits[x & 0xf];
692 		x >>= 4;
693 	} while (x != 0 && i > 0);
694 	return &result[i];
695 }
696 
prom_setprop(phandle node,const char * nodename,const char * pname,void * value,size_t valuelen)697 static int __init prom_setprop(phandle node, const char *nodename,
698 			       const char *pname, void *value, size_t valuelen)
699 {
700 	char cmd[256], *p;
701 
702 	if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
703 		return call_prom("setprop", 4, 1, node, ADDR(pname),
704 				 (u32)(unsigned long) value, (u32) valuelen);
705 
706 	/* gah... setprop doesn't work on longtrail, have to use interpret */
707 	p = cmd;
708 	add_string(&p, "dev");
709 	add_string(&p, nodename);
710 	add_string(&p, tohex((u32)(unsigned long) value));
711 	add_string(&p, tohex(valuelen));
712 	add_string(&p, tohex(ADDR(pname)));
713 	add_string(&p, tohex(prom_strlen(pname)));
714 	add_string(&p, "property");
715 	*p = 0;
716 	return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
717 }
718 
719 /* We can't use the standard versions because of relocation headaches. */
720 #define prom_isxdigit(c) \
721 	(('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
722 
723 #define prom_isdigit(c)	('0' <= (c) && (c) <= '9')
724 #define prom_islower(c)	('a' <= (c) && (c) <= 'z')
725 #define prom_toupper(c)	(prom_islower(c) ? ((c) - 'a' + 'A') : (c))
726 
prom_strtoul(const char * cp,const char ** endp)727 static unsigned long __init prom_strtoul(const char *cp, const char **endp)
728 {
729 	unsigned long result = 0, base = 10, value;
730 
731 	if (*cp == '0') {
732 		base = 8;
733 		cp++;
734 		if (prom_toupper(*cp) == 'X') {
735 			cp++;
736 			base = 16;
737 		}
738 	}
739 
740 	while (prom_isxdigit(*cp) &&
741 	       (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
742 		result = result * base + value;
743 		cp++;
744 	}
745 
746 	if (endp)
747 		*endp = cp;
748 
749 	return result;
750 }
751 
prom_memparse(const char * ptr,const char ** retptr)752 static unsigned long __init prom_memparse(const char *ptr, const char **retptr)
753 {
754 	unsigned long ret = prom_strtoul(ptr, retptr);
755 	int shift = 0;
756 
757 	/*
758 	 * We can't use a switch here because GCC *may* generate a
759 	 * jump table which won't work, because we're not running at
760 	 * the address we're linked at.
761 	 */
762 	if ('G' == **retptr || 'g' == **retptr)
763 		shift = 30;
764 
765 	if ('M' == **retptr || 'm' == **retptr)
766 		shift = 20;
767 
768 	if ('K' == **retptr || 'k' == **retptr)
769 		shift = 10;
770 
771 	if (shift) {
772 		ret <<= shift;
773 		(*retptr)++;
774 	}
775 
776 	return ret;
777 }
778 
779 /*
780  * Early parsing of the command line passed to the kernel, used for
781  * "mem=x" and the options that affect the iommu
782  */
early_cmdline_parse(void)783 static void __init early_cmdline_parse(void)
784 {
785 	const char *opt;
786 
787 	char *p;
788 	int l = 0;
789 
790 	prom_cmd_line[0] = 0;
791 	p = prom_cmd_line;
792 
793 	if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
794 		l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
795 
796 	if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
797 		prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
798 			     sizeof(prom_cmd_line));
799 
800 	prom_printf("command line: %s\n", prom_cmd_line);
801 
802 #ifdef CONFIG_PPC64
803 	opt = prom_strstr(prom_cmd_line, "iommu=");
804 	if (opt) {
805 		prom_printf("iommu opt is: %s\n", opt);
806 		opt += 6;
807 		while (*opt && *opt == ' ')
808 			opt++;
809 		if (!prom_strncmp(opt, "off", 3))
810 			prom_iommu_off = 1;
811 		else if (!prom_strncmp(opt, "force", 5))
812 			prom_iommu_force_on = 1;
813 	}
814 #endif
815 	opt = prom_strstr(prom_cmd_line, "mem=");
816 	if (opt) {
817 		opt += 4;
818 		prom_memory_limit = prom_memparse(opt, (const char **)&opt);
819 #ifdef CONFIG_PPC64
820 		/* Align down to 16 MB which is large page size with hash page translation */
821 		prom_memory_limit = ALIGN_DOWN(prom_memory_limit, SZ_16M);
822 #endif
823 	}
824 
825 #ifdef CONFIG_PPC_PSERIES
826 	prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
827 	opt = prom_strstr(prom_cmd_line, "disable_radix");
828 	if (opt) {
829 		opt += 13;
830 		if (*opt && *opt == '=') {
831 			bool val;
832 
833 			if (prom_strtobool(++opt, &val))
834 				prom_radix_disable = false;
835 			else
836 				prom_radix_disable = val;
837 		} else
838 			prom_radix_disable = true;
839 	}
840 	if (prom_radix_disable)
841 		prom_debug("Radix disabled from cmdline\n");
842 
843 	opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
844 	if (opt) {
845 		prom_radix_gtse_disable = true;
846 		prom_debug("Radix GTSE disabled from cmdline\n");
847 	}
848 
849 	opt = prom_strstr(prom_cmd_line, "xive=off");
850 	if (opt) {
851 		prom_xive_disable = true;
852 		prom_debug("XIVE disabled from cmdline\n");
853 	}
854 #endif /* CONFIG_PPC_PSERIES */
855 
856 #ifdef CONFIG_PPC_SVM
857 	opt = prom_strstr(prom_cmd_line, "svm=");
858 	if (opt) {
859 		bool val;
860 
861 		opt += sizeof("svm=") - 1;
862 		if (!prom_strtobool(opt, &val))
863 			prom_svm_enable = val;
864 	}
865 #endif /* CONFIG_PPC_SVM */
866 }
867 
868 #ifdef CONFIG_PPC_PSERIES
869 /*
870  * The architecture vector has an array of PVR mask/value pairs,
871  * followed by # option vectors - 1, followed by the option vectors.
872  *
873  * See prom.h for the definition of the bits specified in the
874  * architecture vector.
875  */
876 
877 /* Firmware expects the value to be n - 1, where n is the # of vectors */
878 #define NUM_VECTORS(n)		((n) - 1)
879 
880 /*
881  * Firmware expects 1 + n - 2, where n is the length of the option vector in
882  * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
883  */
884 #define VECTOR_LENGTH(n)	(1 + (n) - 2)
885 
886 struct option_vector1 {
887 	u8 byte1;
888 	u8 arch_versions;
889 	u8 arch_versions3;
890 } __packed;
891 
892 struct option_vector2 {
893 	u8 byte1;
894 	__be16 reserved;
895 	__be32 real_base;
896 	__be32 real_size;
897 	__be32 virt_base;
898 	__be32 virt_size;
899 	__be32 load_base;
900 	__be32 min_rma;
901 	__be32 min_load;
902 	u8 min_rma_percent;
903 	u8 max_pft_size;
904 } __packed;
905 
906 struct option_vector3 {
907 	u8 byte1;
908 	u8 byte2;
909 } __packed;
910 
911 struct option_vector4 {
912 	u8 byte1;
913 	u8 min_vp_cap;
914 } __packed;
915 
916 struct option_vector5 {
917 	u8 byte1;
918 	u8 byte2;
919 	u8 byte3;
920 	u8 cmo;
921 	u8 associativity;
922 	u8 bin_opts;
923 	u8 micro_checkpoint;
924 	u8 reserved0;
925 	__be32 max_cpus;
926 	__be16 papr_level;
927 	__be16 reserved1;
928 	u8 platform_facilities;
929 	u8 reserved2;
930 	__be16 reserved3;
931 	u8 subprocessors;
932 	u8 byte22;
933 	u8 intarch;
934 	u8 mmu;
935 	u8 hash_ext;
936 	u8 radix_ext;
937 } __packed;
938 
939 struct option_vector6 {
940 	u8 reserved;
941 	u8 secondary_pteg;
942 	u8 os_name;
943 } __packed;
944 
945 struct option_vector7 {
946 	u8 os_id[256];
947 } __packed;
948 
949 struct ibm_arch_vec {
950 	struct { __be32 mask, val; } pvrs[16];
951 
952 	u8 num_vectors;
953 
954 	u8 vec1_len;
955 	struct option_vector1 vec1;
956 
957 	u8 vec2_len;
958 	struct option_vector2 vec2;
959 
960 	u8 vec3_len;
961 	struct option_vector3 vec3;
962 
963 	u8 vec4_len;
964 	struct option_vector4 vec4;
965 
966 	u8 vec5_len;
967 	struct option_vector5 vec5;
968 
969 	u8 vec6_len;
970 	struct option_vector6 vec6;
971 
972 	u8 vec7_len;
973 	struct option_vector7 vec7;
974 } __packed;
975 
976 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
977 	.pvrs = {
978 		{
979 			.mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
980 			.val  = cpu_to_be32(0x003a0000),
981 		},
982 		{
983 			.mask = cpu_to_be32(0xffff0000), /* POWER6 */
984 			.val  = cpu_to_be32(0x003e0000),
985 		},
986 		{
987 			.mask = cpu_to_be32(0xffff0000), /* POWER7 */
988 			.val  = cpu_to_be32(0x003f0000),
989 		},
990 		{
991 			.mask = cpu_to_be32(0xffff0000), /* POWER8E */
992 			.val  = cpu_to_be32(0x004b0000),
993 		},
994 		{
995 			.mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
996 			.val  = cpu_to_be32(0x004c0000),
997 		},
998 		{
999 			.mask = cpu_to_be32(0xffff0000), /* POWER8 */
1000 			.val  = cpu_to_be32(0x004d0000),
1001 		},
1002 		{
1003 			.mask = cpu_to_be32(0xffff0000), /* POWER9 */
1004 			.val  = cpu_to_be32(0x004e0000),
1005 		},
1006 		{
1007 			.mask = cpu_to_be32(0xffff0000), /* POWER10 */
1008 			.val  = cpu_to_be32(0x00800000),
1009 		},
1010 		{
1011 			.mask = cpu_to_be32(0xffff0000), /* POWER11 */
1012 			.val  = cpu_to_be32(0x00820000),
1013 		},
1014 		{
1015 			.mask = cpu_to_be32(0xffffffff), /* P11 compliant */
1016 			.val  = cpu_to_be32(0x0f000007),
1017 		},
1018 		{
1019 			.mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
1020 			.val  = cpu_to_be32(0x0f000006),
1021 		},
1022 		{
1023 			.mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
1024 			.val  = cpu_to_be32(0x0f000005),
1025 		},
1026 		{
1027 			.mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
1028 			.val  = cpu_to_be32(0x0f000004),
1029 		},
1030 		{
1031 			.mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1032 			.val  = cpu_to_be32(0x0f000003),
1033 		},
1034 		{
1035 			.mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1036 			.val  = cpu_to_be32(0x0f000002),
1037 		},
1038 		{
1039 			.mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1040 			.val  = cpu_to_be32(0x0f000001),
1041 		},
1042 	},
1043 
1044 	.num_vectors = NUM_VECTORS(6),
1045 
1046 	.vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
1047 	.vec1 = {
1048 		.byte1 = 0,
1049 		.arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
1050 				 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
1051 		.arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
1052 	},
1053 
1054 	.vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
1055 	/* option vector 2: Open Firmware options supported */
1056 	.vec2 = {
1057 		.byte1 = OV2_REAL_MODE,
1058 		.reserved = 0,
1059 		.real_base = cpu_to_be32(0xffffffff),
1060 		.real_size = cpu_to_be32(0xffffffff),
1061 		.virt_base = cpu_to_be32(0xffffffff),
1062 		.virt_size = cpu_to_be32(0xffffffff),
1063 		.load_base = cpu_to_be32(0xffffffff),
1064 		.min_rma = cpu_to_be32(512),		/* 512MB min RMA */
1065 		.min_load = cpu_to_be32(0xffffffff),	/* full client load */
1066 		.min_rma_percent = 0,	/* min RMA percentage of total RAM */
1067 		.max_pft_size = 48,	/* max log_2(hash table size) */
1068 	},
1069 
1070 	.vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
1071 	/* option vector 3: processor options supported */
1072 	.vec3 = {
1073 		.byte1 = 0,			/* don't ignore, don't halt */
1074 		.byte2 = OV3_FP | OV3_VMX | OV3_DFP,
1075 	},
1076 
1077 	.vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
1078 	/* option vector 4: IBM PAPR implementation */
1079 	.vec4 = {
1080 		.byte1 = 0,			/* don't halt */
1081 		.min_vp_cap = OV4_MIN_ENT_CAP,	/* minimum VP entitled capacity */
1082 	},
1083 
1084 	.vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
1085 	/* option vector 5: PAPR/OF options */
1086 	.vec5 = {
1087 		.byte1 = 0,				/* don't ignore, don't halt */
1088 		.byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
1089 		OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
1090 #ifdef CONFIG_PCI_MSI
1091 		/* PCIe/MSI support.  Without MSI full PCIe is not supported */
1092 		OV5_FEAT(OV5_MSI),
1093 #else
1094 		0,
1095 #endif
1096 		.byte3 = 0,
1097 		.cmo =
1098 #ifdef CONFIG_PPC_SMLPAR
1099 		OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
1100 #else
1101 		0,
1102 #endif
1103 		.associativity = OV5_FEAT(OV5_FORM1_AFFINITY) | OV5_FEAT(OV5_PRRN) |
1104 		OV5_FEAT(OV5_FORM2_AFFINITY),
1105 		.bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
1106 		.micro_checkpoint = 0,
1107 		.reserved0 = 0,
1108 		.max_cpus = cpu_to_be32(NR_CPUS),	/* number of cores supported */
1109 		.papr_level = 0,
1110 		.reserved1 = 0,
1111 		.platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
1112 		.reserved2 = 0,
1113 		.reserved3 = 0,
1114 		.subprocessors = 1,
1115 		.byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
1116 		.intarch = 0,
1117 		.mmu = 0,
1118 		.hash_ext = 0,
1119 		.radix_ext = 0,
1120 	},
1121 
1122 	/* option vector 6: IBM PAPR hints */
1123 	.vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
1124 	.vec6 = {
1125 		.reserved = 0,
1126 		.secondary_pteg = 0,
1127 		.os_name = OV6_LINUX,
1128 	},
1129 
1130 	/* option vector 7: OS Identification */
1131 	.vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
1132 };
1133 
1134 static struct ibm_arch_vec __prombss ibm_architecture_vec  ____cacheline_aligned;
1135 
1136 /* Old method - ELF header with PT_NOTE sections only works on BE */
1137 #ifdef __BIG_ENDIAN__
1138 static const struct fake_elf {
1139 	Elf32_Ehdr	elfhdr;
1140 	Elf32_Phdr	phdr[2];
1141 	struct chrpnote {
1142 		u32	namesz;
1143 		u32	descsz;
1144 		u32	type;
1145 		char	name[8];	/* "PowerPC" */
1146 		struct chrpdesc {
1147 			u32	real_mode;
1148 			u32	real_base;
1149 			u32	real_size;
1150 			u32	virt_base;
1151 			u32	virt_size;
1152 			u32	load_base;
1153 		} chrpdesc;
1154 	} chrpnote;
1155 	struct rpanote {
1156 		u32	namesz;
1157 		u32	descsz;
1158 		u32	type;
1159 		char	name[24];	/* "IBM,RPA-Client-Config" */
1160 		struct rpadesc {
1161 			u32	lpar_affinity;
1162 			u32	min_rmo_size;
1163 			u32	min_rmo_percent;
1164 			u32	max_pft_size;
1165 			u32	splpar;
1166 			u32	min_load;
1167 			u32	new_mem_def;
1168 			u32	ignore_me;
1169 		} rpadesc;
1170 	} rpanote;
1171 } fake_elf __initconst = {
1172 	.elfhdr = {
1173 		.e_ident = { 0x7f, 'E', 'L', 'F',
1174 			     ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
1175 		.e_type = ET_EXEC,	/* yeah right */
1176 		.e_machine = EM_PPC,
1177 		.e_version = EV_CURRENT,
1178 		.e_phoff = offsetof(struct fake_elf, phdr),
1179 		.e_phentsize = sizeof(Elf32_Phdr),
1180 		.e_phnum = 2
1181 	},
1182 	.phdr = {
1183 		[0] = {
1184 			.p_type = PT_NOTE,
1185 			.p_offset = offsetof(struct fake_elf, chrpnote),
1186 			.p_filesz = sizeof(struct chrpnote)
1187 		}, [1] = {
1188 			.p_type = PT_NOTE,
1189 			.p_offset = offsetof(struct fake_elf, rpanote),
1190 			.p_filesz = sizeof(struct rpanote)
1191 		}
1192 	},
1193 	.chrpnote = {
1194 		.namesz = sizeof("PowerPC"),
1195 		.descsz = sizeof(struct chrpdesc),
1196 		.type = 0x1275,
1197 		.name = "PowerPC",
1198 		.chrpdesc = {
1199 			.real_mode = ~0U,	/* ~0 means "don't care" */
1200 			.real_base = ~0U,
1201 			.real_size = ~0U,
1202 			.virt_base = ~0U,
1203 			.virt_size = ~0U,
1204 			.load_base = ~0U
1205 		},
1206 	},
1207 	.rpanote = {
1208 		.namesz = sizeof("IBM,RPA-Client-Config"),
1209 		.descsz = sizeof(struct rpadesc),
1210 		.type = 0x12759999,
1211 		.name = "IBM,RPA-Client-Config",
1212 		.rpadesc = {
1213 			.lpar_affinity = 0,
1214 			.min_rmo_size = 64,	/* in megabytes */
1215 			.min_rmo_percent = 0,
1216 			.max_pft_size = 48,	/* 2^48 bytes max PFT size */
1217 			.splpar = 1,
1218 			.min_load = ~0U,
1219 			.new_mem_def = 0
1220 		}
1221 	}
1222 };
1223 #endif /* __BIG_ENDIAN__ */
1224 
prom_count_smt_threads(void)1225 static int __init prom_count_smt_threads(void)
1226 {
1227 	phandle node;
1228 	char type[64];
1229 	unsigned int plen;
1230 
1231 	/* Pick up th first CPU node we can find */
1232 	for (node = 0; prom_next_node(&node); ) {
1233 		type[0] = 0;
1234 		prom_getprop(node, "device_type", type, sizeof(type));
1235 
1236 		if (prom_strcmp(type, "cpu"))
1237 			continue;
1238 		/*
1239 		 * There is an entry for each smt thread, each entry being
1240 		 * 4 bytes long.  All cpus should have the same number of
1241 		 * smt threads, so return after finding the first.
1242 		 */
1243 		plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1244 		if (plen == PROM_ERROR)
1245 			break;
1246 		plen >>= 2;
1247 		prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1248 
1249 		/* Sanity check */
1250 		if (plen < 1 || plen > 64) {
1251 			prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1252 				    (unsigned long)plen);
1253 			return 1;
1254 		}
1255 		return plen;
1256 	}
1257 	prom_debug("No threads found, assuming 1 per core\n");
1258 
1259 	return 1;
1260 
1261 }
1262 
prom_parse_mmu_model(u8 val,struct platform_support * support)1263 static void __init prom_parse_mmu_model(u8 val,
1264 					struct platform_support *support)
1265 {
1266 	switch (val) {
1267 	case OV5_FEAT(OV5_MMU_DYNAMIC):
1268 	case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1269 		prom_debug("MMU - either supported\n");
1270 		support->radix_mmu = !prom_radix_disable;
1271 		support->hash_mmu = true;
1272 		break;
1273 	case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1274 		prom_debug("MMU - radix only\n");
1275 		if (prom_radix_disable) {
1276 			/*
1277 			 * If we __have__ to do radix, we're better off ignoring
1278 			 * the command line rather than not booting.
1279 			 */
1280 			prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1281 		}
1282 		support->radix_mmu = true;
1283 		break;
1284 	case OV5_FEAT(OV5_MMU_HASH):
1285 		prom_debug("MMU - hash only\n");
1286 		support->hash_mmu = true;
1287 		break;
1288 	default:
1289 		prom_debug("Unknown mmu support option: 0x%x\n", val);
1290 		break;
1291 	}
1292 }
1293 
prom_parse_xive_model(u8 val,struct platform_support * support)1294 static void __init prom_parse_xive_model(u8 val,
1295 					 struct platform_support *support)
1296 {
1297 	switch (val) {
1298 	case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1299 		prom_debug("XIVE - either mode supported\n");
1300 		support->xive = !prom_xive_disable;
1301 		break;
1302 	case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1303 		prom_debug("XIVE - exploitation mode supported\n");
1304 		if (prom_xive_disable) {
1305 			/*
1306 			 * If we __have__ to do XIVE, we're better off ignoring
1307 			 * the command line rather than not booting.
1308 			 */
1309 			prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1310 		}
1311 		support->xive = true;
1312 		break;
1313 	case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1314 		prom_debug("XIVE - legacy mode supported\n");
1315 		break;
1316 	default:
1317 		prom_debug("Unknown xive support option: 0x%x\n", val);
1318 		break;
1319 	}
1320 }
1321 
prom_parse_platform_support(u8 index,u8 val,struct platform_support * support)1322 static void __init prom_parse_platform_support(u8 index, u8 val,
1323 					       struct platform_support *support)
1324 {
1325 	switch (index) {
1326 	case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1327 		prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1328 		break;
1329 	case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1330 		if (val & OV5_FEAT(OV5_RADIX_GTSE))
1331 			support->radix_gtse = !prom_radix_gtse_disable;
1332 		break;
1333 	case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1334 		prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1335 				      support);
1336 		break;
1337 	}
1338 }
1339 
prom_check_platform_support(void)1340 static void __init prom_check_platform_support(void)
1341 {
1342 	struct platform_support supported = {
1343 		.hash_mmu = false,
1344 		.radix_mmu = false,
1345 		.radix_gtse = false,
1346 		.xive = false
1347 	};
1348 	int prop_len = prom_getproplen(prom.chosen,
1349 				       "ibm,arch-vec-5-platform-support");
1350 
1351 	/*
1352 	 * First copy the architecture vec template
1353 	 *
1354 	 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1355 	 * by __memcpy() when KASAN is active
1356 	 */
1357 	memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
1358 	       sizeof(ibm_architecture_vec));
1359 
1360 	prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
1361 
1362 	if (prop_len > 1) {
1363 		int i;
1364 		u8 vec[8];
1365 		prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1366 			   prop_len);
1367 		if (prop_len > sizeof(vec))
1368 			prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1369 				    prop_len);
1370 		prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
1371 		for (i = 0; i < prop_len; i += 2) {
1372 			prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
1373 			prom_parse_platform_support(vec[i], vec[i + 1], &supported);
1374 		}
1375 	}
1376 
1377 	if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1378 		/* Radix preferred - Check if GTSE is also supported */
1379 		prom_debug("Asking for radix\n");
1380 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1381 		if (supported.radix_gtse)
1382 			ibm_architecture_vec.vec5.radix_ext =
1383 					OV5_FEAT(OV5_RADIX_GTSE);
1384 		else
1385 			prom_debug("Radix GTSE isn't supported\n");
1386 	} else if (supported.hash_mmu) {
1387 		/* Default to hash mmu (if we can) */
1388 		prom_debug("Asking for hash\n");
1389 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1390 	} else {
1391 		/* We're probably on a legacy hypervisor */
1392 		prom_debug("Assuming legacy hash support\n");
1393 	}
1394 
1395 	if (supported.xive) {
1396 		prom_debug("Asking for XIVE\n");
1397 		ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1398 	}
1399 }
1400 
prom_send_capabilities(void)1401 static void __init prom_send_capabilities(void)
1402 {
1403 	ihandle root;
1404 	prom_arg_t ret;
1405 	u32 cores;
1406 
1407 	/* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1408 	prom_check_platform_support();
1409 
1410 	root = call_prom("open", 1, 1, ADDR("/"));
1411 	if (root != 0) {
1412 		/* We need to tell the FW about the number of cores we support.
1413 		 *
1414 		 * To do that, we count the number of threads on the first core
1415 		 * (we assume this is the same for all cores) and use it to
1416 		 * divide NR_CPUS.
1417 		 */
1418 
1419 		cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1420 		prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1421 			    cores, NR_CPUS);
1422 
1423 		ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1424 
1425 		/* try calling the ibm,client-architecture-support method */
1426 		prom_printf("Calling ibm,client-architecture-support...");
1427 		if (call_prom_ret("call-method", 3, 2, &ret,
1428 				  ADDR("ibm,client-architecture-support"),
1429 				  root,
1430 				  ADDR(&ibm_architecture_vec)) == 0) {
1431 			/* the call exists... */
1432 			if (ret)
1433 				prom_printf("\nWARNING: ibm,client-architecture"
1434 					    "-support call FAILED!\n");
1435 			call_prom("close", 1, 0, root);
1436 			prom_printf(" done\n");
1437 			return;
1438 		}
1439 		call_prom("close", 1, 0, root);
1440 		prom_printf(" not implemented\n");
1441 	}
1442 
1443 #ifdef __BIG_ENDIAN__
1444 	{
1445 		ihandle elfloader;
1446 
1447 		/* no ibm,client-architecture-support call, try the old way */
1448 		elfloader = call_prom("open", 1, 1,
1449 				      ADDR("/packages/elf-loader"));
1450 		if (elfloader == 0) {
1451 			prom_printf("couldn't open /packages/elf-loader\n");
1452 			return;
1453 		}
1454 		call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1455 			  elfloader, ADDR(&fake_elf));
1456 		call_prom("close", 1, 0, elfloader);
1457 	}
1458 #endif /* __BIG_ENDIAN__ */
1459 }
1460 #endif /* CONFIG_PPC_PSERIES */
1461 
1462 /*
1463  * Memory allocation strategy... our layout is normally:
1464  *
1465  *  at 14Mb or more we have vmlinux, then a gap and initrd.  In some
1466  *  rare cases, initrd might end up being before the kernel though.
1467  *  We assume this won't override the final kernel at 0, we have no
1468  *  provision to handle that in this version, but it should hopefully
1469  *  never happen.
1470  *
1471  *  alloc_top is set to the top of RMO, eventually shrink down if the
1472  *  TCEs overlap
1473  *
1474  *  alloc_bottom is set to the top of kernel/initrd
1475  *
1476  *  from there, allocations are done this way : rtas is allocated
1477  *  topmost, and the device-tree is allocated from the bottom. We try
1478  *  to grow the device-tree allocation as we progress. If we can't,
1479  *  then we fail, we don't currently have a facility to restart
1480  *  elsewhere, but that shouldn't be necessary.
1481  *
1482  *  Note that calls to reserve_mem have to be done explicitly, memory
1483  *  allocated with either alloc_up or alloc_down isn't automatically
1484  *  reserved.
1485  */
1486 
1487 
1488 /*
1489  * Allocates memory in the RMO upward from the kernel/initrd
1490  *
1491  * When align is 0, this is a special case, it means to allocate in place
1492  * at the current location of alloc_bottom or fail (that is basically
1493  * extending the previous allocation). Used for the device-tree flattening
1494  */
alloc_up(unsigned long size,unsigned long align)1495 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1496 {
1497 	unsigned long base = alloc_bottom;
1498 	unsigned long addr = 0;
1499 
1500 	if (align)
1501 		base = ALIGN(base, align);
1502 	prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1503 	if (ram_top == 0)
1504 		prom_panic("alloc_up() called with mem not initialized\n");
1505 
1506 	if (align)
1507 		base = ALIGN(alloc_bottom, align);
1508 	else
1509 		base = alloc_bottom;
1510 
1511 	for(; (base + size) <= alloc_top;
1512 	    base = ALIGN(base + 0x100000, align)) {
1513 		prom_debug("    trying: 0x%lx\n\r", base);
1514 		addr = (unsigned long)prom_claim(base, size, 0);
1515 		if (addr != PROM_ERROR && addr != 0)
1516 			break;
1517 		addr = 0;
1518 		if (align == 0)
1519 			break;
1520 	}
1521 	if (addr == 0)
1522 		return 0;
1523 	alloc_bottom = addr + size;
1524 
1525 	prom_debug(" -> %lx\n", addr);
1526 	prom_debug("  alloc_bottom : %lx\n", alloc_bottom);
1527 	prom_debug("  alloc_top    : %lx\n", alloc_top);
1528 	prom_debug("  alloc_top_hi : %lx\n", alloc_top_high);
1529 	prom_debug("  rmo_top      : %lx\n", rmo_top);
1530 	prom_debug("  ram_top      : %lx\n", ram_top);
1531 
1532 	return addr;
1533 }
1534 
1535 /*
1536  * Allocates memory downward, either from top of RMO, or if highmem
1537  * is set, from the top of RAM.  Note that this one doesn't handle
1538  * failures.  It does claim memory if highmem is not set.
1539  */
alloc_down(unsigned long size,unsigned long align,int highmem)1540 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1541 				       int highmem)
1542 {
1543 	unsigned long base, addr = 0;
1544 
1545 	prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1546 		   highmem ? "(high)" : "(low)");
1547 	if (ram_top == 0)
1548 		prom_panic("alloc_down() called with mem not initialized\n");
1549 
1550 	if (highmem) {
1551 		/* Carve out storage for the TCE table. */
1552 		addr = ALIGN_DOWN(alloc_top_high - size, align);
1553 		if (addr <= alloc_bottom)
1554 			return 0;
1555 		/* Will we bump into the RMO ? If yes, check out that we
1556 		 * didn't overlap existing allocations there, if we did,
1557 		 * we are dead, we must be the first in town !
1558 		 */
1559 		if (addr < rmo_top) {
1560 			/* Good, we are first */
1561 			if (alloc_top == rmo_top)
1562 				alloc_top = rmo_top = addr;
1563 			else
1564 				return 0;
1565 		}
1566 		alloc_top_high = addr;
1567 		goto bail;
1568 	}
1569 
1570 	base = ALIGN_DOWN(alloc_top - size, align);
1571 	for (; base > alloc_bottom;
1572 	     base = ALIGN_DOWN(base - 0x100000, align))  {
1573 		prom_debug("    trying: 0x%lx\n\r", base);
1574 		addr = (unsigned long)prom_claim(base, size, 0);
1575 		if (addr != PROM_ERROR && addr != 0)
1576 			break;
1577 		addr = 0;
1578 	}
1579 	if (addr == 0)
1580 		return 0;
1581 	alloc_top = addr;
1582 
1583  bail:
1584 	prom_debug(" -> %lx\n", addr);
1585 	prom_debug("  alloc_bottom : %lx\n", alloc_bottom);
1586 	prom_debug("  alloc_top    : %lx\n", alloc_top);
1587 	prom_debug("  alloc_top_hi : %lx\n", alloc_top_high);
1588 	prom_debug("  rmo_top      : %lx\n", rmo_top);
1589 	prom_debug("  ram_top      : %lx\n", ram_top);
1590 
1591 	return addr;
1592 }
1593 
1594 /*
1595  * Parse a "reg" cell
1596  */
prom_next_cell(int s,cell_t ** cellp)1597 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1598 {
1599 	cell_t *p = *cellp;
1600 	unsigned long r = 0;
1601 
1602 	/* Ignore more than 2 cells */
1603 	while (s > sizeof(unsigned long) / 4) {
1604 		p++;
1605 		s--;
1606 	}
1607 	r = be32_to_cpu(*p++);
1608 #ifdef CONFIG_PPC64
1609 	if (s > 1) {
1610 		r <<= 32;
1611 		r |= be32_to_cpu(*(p++));
1612 	}
1613 #endif
1614 	*cellp = p;
1615 	return r;
1616 }
1617 
1618 /*
1619  * Very dumb function for adding to the memory reserve list, but
1620  * we don't need anything smarter at this point
1621  *
1622  * XXX Eventually check for collisions.  They should NEVER happen.
1623  * If problems seem to show up, it would be a good start to track
1624  * them down.
1625  */
reserve_mem(u64 base,u64 size)1626 static void __init reserve_mem(u64 base, u64 size)
1627 {
1628 	u64 top = base + size;
1629 	unsigned long cnt = mem_reserve_cnt;
1630 
1631 	if (size == 0)
1632 		return;
1633 
1634 	/* We need to always keep one empty entry so that we
1635 	 * have our terminator with "size" set to 0 since we are
1636 	 * dumb and just copy this entire array to the boot params
1637 	 */
1638 	base = ALIGN_DOWN(base, PAGE_SIZE);
1639 	top = ALIGN(top, PAGE_SIZE);
1640 	size = top - base;
1641 
1642 	if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1643 		prom_panic("Memory reserve map exhausted !\n");
1644 	mem_reserve_map[cnt].base = cpu_to_be64(base);
1645 	mem_reserve_map[cnt].size = cpu_to_be64(size);
1646 	mem_reserve_cnt = cnt + 1;
1647 }
1648 
1649 /*
1650  * Initialize memory allocation mechanism, parse "memory" nodes and
1651  * obtain that way the top of memory and RMO to setup out local allocator
1652  */
prom_init_mem(void)1653 static void __init prom_init_mem(void)
1654 {
1655 	phandle node;
1656 	char type[64];
1657 	unsigned int plen;
1658 	cell_t *p, *endp;
1659 	__be32 val;
1660 	u32 rac, rsc;
1661 
1662 	/*
1663 	 * We iterate the memory nodes to find
1664 	 * 1) top of RMO (first node)
1665 	 * 2) top of memory
1666 	 */
1667 	val = cpu_to_be32(2);
1668 	prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1669 	rac = be32_to_cpu(val);
1670 	val = cpu_to_be32(1);
1671 	prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1672 	rsc = be32_to_cpu(val);
1673 	prom_debug("root_addr_cells: %x\n", rac);
1674 	prom_debug("root_size_cells: %x\n", rsc);
1675 
1676 	prom_debug("scanning memory:\n");
1677 
1678 	for (node = 0; prom_next_node(&node); ) {
1679 		type[0] = 0;
1680 		prom_getprop(node, "device_type", type, sizeof(type));
1681 
1682 		if (type[0] == 0) {
1683 			/*
1684 			 * CHRP Longtrail machines have no device_type
1685 			 * on the memory node, so check the name instead...
1686 			 */
1687 			prom_getprop(node, "name", type, sizeof(type));
1688 		}
1689 		if (prom_strcmp(type, "memory"))
1690 			continue;
1691 
1692 		plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1693 		if (plen > sizeof(regbuf)) {
1694 			prom_printf("memory node too large for buffer !\n");
1695 			plen = sizeof(regbuf);
1696 		}
1697 		p = regbuf;
1698 		endp = p + (plen / sizeof(cell_t));
1699 
1700 #ifdef DEBUG_PROM
1701 		memset(prom_scratch, 0, sizeof(prom_scratch));
1702 		call_prom("package-to-path", 3, 1, node, prom_scratch,
1703 			  sizeof(prom_scratch) - 1);
1704 		prom_debug("  node %s :\n", prom_scratch);
1705 #endif /* DEBUG_PROM */
1706 
1707 		while ((endp - p) >= (rac + rsc)) {
1708 			unsigned long base, size;
1709 
1710 			base = prom_next_cell(rac, &p);
1711 			size = prom_next_cell(rsc, &p);
1712 
1713 			if (size == 0)
1714 				continue;
1715 			prom_debug("    %lx %lx\n", base, size);
1716 			if (base == 0 && (of_platform & PLATFORM_LPAR))
1717 				rmo_top = size;
1718 			if ((base + size) > ram_top)
1719 				ram_top = base + size;
1720 		}
1721 	}
1722 
1723 	alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1724 
1725 	/*
1726 	 * If prom_memory_limit is set we reduce the upper limits *except* for
1727 	 * alloc_top_high. This must be the real top of RAM so we can put
1728 	 * TCE's up there.
1729 	 */
1730 
1731 	alloc_top_high = ram_top;
1732 
1733 	if (prom_memory_limit) {
1734 		if (prom_memory_limit <= alloc_bottom) {
1735 			prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1736 				    prom_memory_limit);
1737 			prom_memory_limit = 0;
1738 		} else if (prom_memory_limit >= ram_top) {
1739 			prom_printf("Ignoring mem=%lx >= ram_top.\n",
1740 				    prom_memory_limit);
1741 			prom_memory_limit = 0;
1742 		} else {
1743 			ram_top = prom_memory_limit;
1744 			rmo_top = min(rmo_top, prom_memory_limit);
1745 		}
1746 	}
1747 
1748 	/*
1749 	 * Setup our top alloc point, that is top of RMO or top of
1750 	 * segment 0 when running non-LPAR.
1751 	 * Some RS64 machines have buggy firmware where claims up at
1752 	 * 1GB fail.  Cap at 768MB as a workaround.
1753 	 * Since 768MB is plenty of room, and we need to cap to something
1754 	 * reasonable on 32-bit, cap at 768MB on all machines.
1755 	 */
1756 	if (!rmo_top)
1757 		rmo_top = ram_top;
1758 	rmo_top = min(0x30000000ul, rmo_top);
1759 	alloc_top = rmo_top;
1760 	alloc_top_high = ram_top;
1761 
1762 	/*
1763 	 * Check if we have an initrd after the kernel but still inside
1764 	 * the RMO.  If we do move our bottom point to after it.
1765 	 */
1766 	if (prom_initrd_start &&
1767 	    prom_initrd_start < rmo_top &&
1768 	    prom_initrd_end > alloc_bottom)
1769 		alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1770 
1771 	prom_printf("memory layout at init:\n");
1772 	prom_printf("  memory_limit : %lx (16 MB aligned)\n",
1773 		    prom_memory_limit);
1774 	prom_printf("  alloc_bottom : %lx\n", alloc_bottom);
1775 	prom_printf("  alloc_top    : %lx\n", alloc_top);
1776 	prom_printf("  alloc_top_hi : %lx\n", alloc_top_high);
1777 	prom_printf("  rmo_top      : %lx\n", rmo_top);
1778 	prom_printf("  ram_top      : %lx\n", ram_top);
1779 }
1780 
prom_close_stdin(void)1781 static void __init prom_close_stdin(void)
1782 {
1783 	__be32 val;
1784 	ihandle stdin;
1785 
1786 	if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1787 		stdin = be32_to_cpu(val);
1788 		call_prom("close", 1, 0, stdin);
1789 	}
1790 }
1791 
1792 #ifdef CONFIG_PPC_SVM
prom_rtas_hcall(uint64_t args)1793 static int __init prom_rtas_hcall(uint64_t args)
1794 {
1795 	register uint64_t arg1 asm("r3") = H_RTAS;
1796 	register uint64_t arg2 asm("r4") = args;
1797 
1798 	asm volatile("sc 1\n" : "=r" (arg1) :
1799 			"r" (arg1),
1800 			"r" (arg2) :);
1801 	srr_regs_clobbered();
1802 
1803 	return arg1;
1804 }
1805 
1806 static struct rtas_args __prombss os_term_args;
1807 
prom_rtas_os_term(char * str)1808 static void __init prom_rtas_os_term(char *str)
1809 {
1810 	phandle rtas_node;
1811 	__be32 val;
1812 	u32 token;
1813 
1814 	prom_debug("%s: start...\n", __func__);
1815 	rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1816 	prom_debug("rtas_node: %x\n", rtas_node);
1817 	if (!PHANDLE_VALID(rtas_node))
1818 		return;
1819 
1820 	val = 0;
1821 	prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
1822 	token = be32_to_cpu(val);
1823 	prom_debug("ibm,os-term: %x\n", token);
1824 	if (token == 0)
1825 		prom_panic("Could not get token for ibm,os-term\n");
1826 	os_term_args.token = cpu_to_be32(token);
1827 	os_term_args.nargs = cpu_to_be32(1);
1828 	os_term_args.nret = cpu_to_be32(1);
1829 	os_term_args.args[0] = cpu_to_be32(__pa(str));
1830 	prom_rtas_hcall((uint64_t)&os_term_args);
1831 }
1832 #endif /* CONFIG_PPC_SVM */
1833 
1834 /*
1835  * Allocate room for and instantiate RTAS
1836  */
prom_instantiate_rtas(void)1837 static void __init prom_instantiate_rtas(void)
1838 {
1839 	phandle rtas_node;
1840 	ihandle rtas_inst;
1841 	u32 base, entry = 0;
1842 	__be32 val;
1843 	u32 size = 0;
1844 
1845 	prom_debug("prom_instantiate_rtas: start...\n");
1846 
1847 	rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1848 	prom_debug("rtas_node: %x\n", rtas_node);
1849 	if (!PHANDLE_VALID(rtas_node))
1850 		return;
1851 
1852 	val = 0;
1853 	prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1854 	size = be32_to_cpu(val);
1855 	if (size == 0)
1856 		return;
1857 
1858 	base = alloc_down(size, PAGE_SIZE, 0);
1859 	if (base == 0)
1860 		prom_panic("Could not allocate memory for RTAS\n");
1861 
1862 	rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1863 	if (!IHANDLE_VALID(rtas_inst)) {
1864 		prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1865 		return;
1866 	}
1867 
1868 	prom_printf("instantiating rtas at 0x%x...", base);
1869 
1870 	if (call_prom_ret("call-method", 3, 2, &entry,
1871 			  ADDR("instantiate-rtas"),
1872 			  rtas_inst, base) != 0
1873 	    || entry == 0) {
1874 		prom_printf(" failed\n");
1875 		return;
1876 	}
1877 	prom_printf(" done\n");
1878 
1879 	reserve_mem(base, size);
1880 
1881 	val = cpu_to_be32(base);
1882 	prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1883 		     &val, sizeof(val));
1884 	val = cpu_to_be32(entry);
1885 	prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1886 		     &val, sizeof(val));
1887 
1888 	/* Check if it supports "query-cpu-stopped-state" */
1889 	if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1890 			 &val, sizeof(val)) != PROM_ERROR)
1891 		rtas_has_query_cpu_stopped = true;
1892 
1893 	prom_debug("rtas base     = 0x%x\n", base);
1894 	prom_debug("rtas entry    = 0x%x\n", entry);
1895 	prom_debug("rtas size     = 0x%x\n", size);
1896 
1897 	prom_debug("prom_instantiate_rtas: end...\n");
1898 }
1899 
1900 #ifdef CONFIG_PPC64
1901 /*
1902  * Allocate room for and instantiate Stored Measurement Log (SML)
1903  */
prom_instantiate_sml(void)1904 static void __init prom_instantiate_sml(void)
1905 {
1906 	phandle ibmvtpm_node;
1907 	ihandle ibmvtpm_inst;
1908 	u32 entry = 0, size = 0, succ = 0;
1909 	u64 base;
1910 	__be32 val;
1911 
1912 	prom_debug("prom_instantiate_sml: start...\n");
1913 
1914 	ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1915 	prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1916 	if (!PHANDLE_VALID(ibmvtpm_node))
1917 		return;
1918 
1919 	ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1920 	if (!IHANDLE_VALID(ibmvtpm_inst)) {
1921 		prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1922 		return;
1923 	}
1924 
1925 	if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1926 			 &val, sizeof(val)) != PROM_ERROR) {
1927 		if (call_prom_ret("call-method", 2, 2, &succ,
1928 				  ADDR("reformat-sml-to-efi-alignment"),
1929 				  ibmvtpm_inst) != 0 || succ == 0) {
1930 			prom_printf("Reformat SML to EFI alignment failed\n");
1931 			return;
1932 		}
1933 
1934 		if (call_prom_ret("call-method", 2, 2, &size,
1935 				  ADDR("sml-get-allocated-size"),
1936 				  ibmvtpm_inst) != 0 || size == 0) {
1937 			prom_printf("SML get allocated size failed\n");
1938 			return;
1939 		}
1940 	} else {
1941 		if (call_prom_ret("call-method", 2, 2, &size,
1942 				  ADDR("sml-get-handover-size"),
1943 				  ibmvtpm_inst) != 0 || size == 0) {
1944 			prom_printf("SML get handover size failed\n");
1945 			return;
1946 		}
1947 	}
1948 
1949 	base = alloc_down(size, PAGE_SIZE, 0);
1950 	if (base == 0)
1951 		prom_panic("Could not allocate memory for sml\n");
1952 
1953 	prom_printf("instantiating sml at 0x%llx...", base);
1954 
1955 	memset((void *)base, 0, size);
1956 
1957 	if (call_prom_ret("call-method", 4, 2, &entry,
1958 			  ADDR("sml-handover"),
1959 			  ibmvtpm_inst, size, base) != 0 || entry == 0) {
1960 		prom_printf("SML handover failed\n");
1961 		return;
1962 	}
1963 	prom_printf(" done\n");
1964 
1965 	reserve_mem(base, size);
1966 
1967 	prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1968 		     &base, sizeof(base));
1969 	prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1970 		     &size, sizeof(size));
1971 
1972 	prom_debug("sml base     = 0x%llx\n", base);
1973 	prom_debug("sml size     = 0x%x\n", size);
1974 
1975 	prom_debug("prom_instantiate_sml: end...\n");
1976 }
1977 
1978 /*
1979  * Allocate room for and initialize TCE tables
1980  */
1981 #ifdef __BIG_ENDIAN__
prom_initialize_tce_table(void)1982 static void __init prom_initialize_tce_table(void)
1983 {
1984 	phandle node;
1985 	ihandle phb_node;
1986 	char compatible[64], type[64], model[64];
1987 	char *path = prom_scratch;
1988 	u64 base, align;
1989 	u32 minalign, minsize;
1990 	u64 tce_entry, *tce_entryp;
1991 	u64 local_alloc_top, local_alloc_bottom;
1992 	u64 i;
1993 
1994 	if (prom_iommu_off)
1995 		return;
1996 
1997 	prom_debug("starting prom_initialize_tce_table\n");
1998 
1999 	/* Cache current top of allocs so we reserve a single block */
2000 	local_alloc_top = alloc_top_high;
2001 	local_alloc_bottom = local_alloc_top;
2002 
2003 	/* Search all nodes looking for PHBs. */
2004 	for (node = 0; prom_next_node(&node); ) {
2005 		compatible[0] = 0;
2006 		type[0] = 0;
2007 		model[0] = 0;
2008 		prom_getprop(node, "compatible",
2009 			     compatible, sizeof(compatible));
2010 		prom_getprop(node, "device_type", type, sizeof(type));
2011 		prom_getprop(node, "model", model, sizeof(model));
2012 
2013 		if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
2014 			continue;
2015 
2016 		/* Keep the old logic intact to avoid regression. */
2017 		if (compatible[0] != 0) {
2018 			if ((prom_strstr(compatible, "python") == NULL) &&
2019 			    (prom_strstr(compatible, "Speedwagon") == NULL) &&
2020 			    (prom_strstr(compatible, "Winnipeg") == NULL))
2021 				continue;
2022 		} else if (model[0] != 0) {
2023 			if ((prom_strstr(model, "ython") == NULL) &&
2024 			    (prom_strstr(model, "peedwagon") == NULL) &&
2025 			    (prom_strstr(model, "innipeg") == NULL))
2026 				continue;
2027 		}
2028 
2029 		if (prom_getprop(node, "tce-table-minalign", &minalign,
2030 				 sizeof(minalign)) == PROM_ERROR)
2031 			minalign = 0;
2032 		if (prom_getprop(node, "tce-table-minsize", &minsize,
2033 				 sizeof(minsize)) == PROM_ERROR)
2034 			minsize = 4UL << 20;
2035 
2036 		/*
2037 		 * Even though we read what OF wants, we just set the table
2038 		 * size to 4 MB.  This is enough to map 2GB of PCI DMA space.
2039 		 * By doing this, we avoid the pitfalls of trying to DMA to
2040 		 * MMIO space and the DMA alias hole.
2041 		 */
2042 		minsize = 4UL << 20;
2043 
2044 		/* Align to the greater of the align or size */
2045 		align = max(minalign, minsize);
2046 		base = alloc_down(minsize, align, 1);
2047 		if (base == 0)
2048 			prom_panic("ERROR, cannot find space for TCE table.\n");
2049 		if (base < local_alloc_bottom)
2050 			local_alloc_bottom = base;
2051 
2052 		/* It seems OF doesn't null-terminate the path :-( */
2053 		memset(path, 0, sizeof(prom_scratch));
2054 		/* Call OF to setup the TCE hardware */
2055 		if (call_prom("package-to-path", 3, 1, node,
2056 			      path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
2057 			prom_printf("package-to-path failed\n");
2058 		}
2059 
2060 		/* Save away the TCE table attributes for later use. */
2061 		prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
2062 		prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
2063 
2064 		prom_debug("TCE table: %s\n", path);
2065 		prom_debug("\tnode = 0x%x\n", node);
2066 		prom_debug("\tbase = 0x%llx\n", base);
2067 		prom_debug("\tsize = 0x%x\n", minsize);
2068 
2069 		/* Initialize the table to have a one-to-one mapping
2070 		 * over the allocated size.
2071 		 */
2072 		tce_entryp = (u64 *)base;
2073 		for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
2074 			tce_entry = (i << PAGE_SHIFT);
2075 			tce_entry |= 0x3;
2076 			*tce_entryp = tce_entry;
2077 		}
2078 
2079 		prom_printf("opening PHB %s", path);
2080 		phb_node = call_prom("open", 1, 1, path);
2081 		if (phb_node == 0)
2082 			prom_printf("... failed\n");
2083 		else
2084 			prom_printf("... done\n");
2085 
2086 		call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2087 			  phb_node, -1, minsize,
2088 			  (u32) base, (u32) (base >> 32));
2089 		call_prom("close", 1, 0, phb_node);
2090 	}
2091 
2092 	reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
2093 
2094 	/* These are only really needed if there is a memory limit in
2095 	 * effect, but we don't know so export them always. */
2096 	prom_tce_alloc_start = local_alloc_bottom;
2097 	prom_tce_alloc_end = local_alloc_top;
2098 
2099 	/* Flag the first invalid entry */
2100 	prom_debug("ending prom_initialize_tce_table\n");
2101 }
2102 #endif /* __BIG_ENDIAN__ */
2103 #endif /* CONFIG_PPC64 */
2104 
2105 /*
2106  * With CHRP SMP we need to use the OF to start the other processors.
2107  * We can't wait until smp_boot_cpus (the OF is trashed by then)
2108  * so we have to put the processors into a holding pattern controlled
2109  * by the kernel (not OF) before we destroy the OF.
2110  *
2111  * This uses a chunk of low memory, puts some holding pattern
2112  * code there and sends the other processors off to there until
2113  * smp_boot_cpus tells them to do something.  The holding pattern
2114  * checks that address until its cpu # is there, when it is that
2115  * cpu jumps to __secondary_start().  smp_boot_cpus() takes care
2116  * of setting those values.
2117  *
2118  * We also use physical address 0x4 here to tell when a cpu
2119  * is in its holding pattern code.
2120  *
2121  * -- Cort
2122  */
2123 /*
2124  * We want to reference the copy of __secondary_hold_* in the
2125  * 0 - 0x100 address range
2126  */
2127 #define LOW_ADDR(x)	(((unsigned long) &(x)) & 0xff)
2128 
prom_hold_cpus(void)2129 static void __init prom_hold_cpus(void)
2130 {
2131 	unsigned long i;
2132 	phandle node;
2133 	char type[64];
2134 	unsigned long *spinloop
2135 		= (void *) LOW_ADDR(__secondary_hold_spinloop);
2136 	unsigned long *acknowledge
2137 		= (void *) LOW_ADDR(__secondary_hold_acknowledge);
2138 	unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
2139 
2140 	/*
2141 	 * On pseries, if RTAS supports "query-cpu-stopped-state",
2142 	 * we skip this stage, the CPUs will be started by the
2143 	 * kernel using RTAS.
2144 	 */
2145 	if ((of_platform == PLATFORM_PSERIES ||
2146 	     of_platform == PLATFORM_PSERIES_LPAR) &&
2147 	    rtas_has_query_cpu_stopped) {
2148 		prom_printf("prom_hold_cpus: skipped\n");
2149 		return;
2150 	}
2151 
2152 	prom_debug("prom_hold_cpus: start...\n");
2153 	prom_debug("    1) spinloop       = 0x%lx\n", (unsigned long)spinloop);
2154 	prom_debug("    1) *spinloop      = 0x%lx\n", *spinloop);
2155 	prom_debug("    1) acknowledge    = 0x%lx\n",
2156 		   (unsigned long)acknowledge);
2157 	prom_debug("    1) *acknowledge   = 0x%lx\n", *acknowledge);
2158 	prom_debug("    1) secondary_hold = 0x%lx\n", secondary_hold);
2159 
2160 	/* Set the common spinloop variable, so all of the secondary cpus
2161 	 * will block when they are awakened from their OF spinloop.
2162 	 * This must occur for both SMP and non SMP kernels, since OF will
2163 	 * be trashed when we move the kernel.
2164 	 */
2165 	*spinloop = 0;
2166 
2167 	/* look for cpus */
2168 	for (node = 0; prom_next_node(&node); ) {
2169 		unsigned int cpu_no;
2170 		__be32 reg;
2171 
2172 		type[0] = 0;
2173 		prom_getprop(node, "device_type", type, sizeof(type));
2174 		if (prom_strcmp(type, "cpu") != 0)
2175 			continue;
2176 
2177 		/* Skip non-configured cpus. */
2178 		if (prom_getprop(node, "status", type, sizeof(type)) > 0)
2179 			if (prom_strcmp(type, "okay") != 0)
2180 				continue;
2181 
2182 		reg = cpu_to_be32(-1); /* make sparse happy */
2183 		prom_getprop(node, "reg", &reg, sizeof(reg));
2184 		cpu_no = be32_to_cpu(reg);
2185 
2186 		prom_debug("cpu hw idx   = %u\n", cpu_no);
2187 
2188 		/* Init the acknowledge var which will be reset by
2189 		 * the secondary cpu when it awakens from its OF
2190 		 * spinloop.
2191 		 */
2192 		*acknowledge = (unsigned long)-1;
2193 
2194 		if (cpu_no != prom.cpu) {
2195 			/* Primary Thread of non-boot cpu or any thread */
2196 			prom_printf("starting cpu hw idx %u... ", cpu_no);
2197 			call_prom("start-cpu", 3, 0, node,
2198 				  secondary_hold, cpu_no);
2199 
2200 			for (i = 0; (i < 100000000) &&
2201 			     (*acknowledge == ((unsigned long)-1)); i++ )
2202 				mb();
2203 
2204 			if (*acknowledge == cpu_no)
2205 				prom_printf("done\n");
2206 			else
2207 				prom_printf("failed: %lx\n", *acknowledge);
2208 		}
2209 #ifdef CONFIG_SMP
2210 		else
2211 			prom_printf("boot cpu hw idx %u\n", cpu_no);
2212 #endif /* CONFIG_SMP */
2213 	}
2214 
2215 	prom_debug("prom_hold_cpus: end...\n");
2216 }
2217 
2218 
prom_init_client_services(unsigned long pp)2219 static void __init prom_init_client_services(unsigned long pp)
2220 {
2221 	/* Get a handle to the prom entry point before anything else */
2222 	prom_entry = pp;
2223 
2224 	/* get a handle for the stdout device */
2225 	prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2226 	if (!PHANDLE_VALID(prom.chosen))
2227 		prom_panic("cannot find chosen"); /* msg won't be printed :( */
2228 
2229 	/* get device tree root */
2230 	prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2231 	if (!PHANDLE_VALID(prom.root))
2232 		prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2233 
2234 	prom.mmumap = 0;
2235 }
2236 
2237 #ifdef CONFIG_PPC32
2238 /*
2239  * For really old powermacs, we need to map things we claim.
2240  * For that, we need the ihandle of the mmu.
2241  * Also, on the longtrail, we need to work around other bugs.
2242  */
prom_find_mmu(void)2243 static void __init prom_find_mmu(void)
2244 {
2245 	phandle oprom;
2246 	char version[64];
2247 
2248 	oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2249 	if (!PHANDLE_VALID(oprom))
2250 		return;
2251 	if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2252 		return;
2253 	version[sizeof(version) - 1] = 0;
2254 	/* XXX might need to add other versions here */
2255 	if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
2256 		of_workarounds = OF_WA_CLAIM;
2257 	else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
2258 		of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2259 		call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2260 	} else
2261 		return;
2262 	prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2263 	prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2264 		     sizeof(prom.mmumap));
2265 	prom.mmumap = be32_to_cpu(prom.mmumap);
2266 	if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2267 		of_workarounds &= ~OF_WA_CLAIM;		/* hmmm */
2268 }
2269 #else
2270 #define prom_find_mmu()
2271 #endif
2272 
prom_init_stdout(void)2273 static void __init prom_init_stdout(void)
2274 {
2275 	char *path = of_stdout_device;
2276 	char type[16];
2277 	phandle stdout_node;
2278 	__be32 val;
2279 
2280 	if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2281 		prom_panic("cannot find stdout");
2282 
2283 	prom.stdout = be32_to_cpu(val);
2284 
2285 	/* Get the full OF pathname of the stdout device */
2286 	memset(path, 0, 256);
2287 	call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2288 	prom_printf("OF stdout device is: %s\n", of_stdout_device);
2289 	prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2290 		     path, prom_strlen(path) + 1);
2291 
2292 	/* instance-to-package fails on PA-Semi */
2293 	stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2294 	if (stdout_node != PROM_ERROR) {
2295 		val = cpu_to_be32(stdout_node);
2296 
2297 		/* If it's a display, note it */
2298 		memset(type, 0, sizeof(type));
2299 		prom_getprop(stdout_node, "device_type", type, sizeof(type));
2300 		if (prom_strcmp(type, "display") == 0)
2301 			prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2302 	}
2303 }
2304 
prom_find_machine_type(void)2305 static int __init prom_find_machine_type(void)
2306 {
2307 	static char compat[256] __prombss;
2308 	int len, i = 0;
2309 #ifdef CONFIG_PPC64
2310 	phandle rtas;
2311 	int x;
2312 #endif
2313 
2314 	/* Look for a PowerMac or a Cell */
2315 	len = prom_getprop(prom.root, "compatible",
2316 			   compat, sizeof(compat)-1);
2317 	if (len > 0) {
2318 		compat[len] = 0;
2319 		while (i < len) {
2320 			char *p = &compat[i];
2321 			int sl = prom_strlen(p);
2322 			if (sl == 0)
2323 				break;
2324 			if (prom_strstr(p, "Power Macintosh") ||
2325 			    prom_strstr(p, "MacRISC"))
2326 				return PLATFORM_POWERMAC;
2327 #ifdef CONFIG_PPC64
2328 			/* We must make sure we don't detect the IBM Cell
2329 			 * blades as pSeries due to some firmware issues,
2330 			 * so we do it here.
2331 			 */
2332 			if (prom_strstr(p, "IBM,CBEA") ||
2333 			    prom_strstr(p, "IBM,CPBW-1.0"))
2334 				return PLATFORM_GENERIC;
2335 #endif /* CONFIG_PPC64 */
2336 			i += sl + 1;
2337 		}
2338 	}
2339 #ifdef CONFIG_PPC64
2340 	/* Try to figure out if it's an IBM pSeries or any other
2341 	 * PAPR compliant platform. We assume it is if :
2342 	 *  - /device_type is "chrp" (please, do NOT use that for future
2343 	 *    non-IBM designs !
2344 	 *  - it has /rtas
2345 	 */
2346 	len = prom_getprop(prom.root, "device_type",
2347 			   compat, sizeof(compat)-1);
2348 	if (len <= 0)
2349 		return PLATFORM_GENERIC;
2350 	if (prom_strcmp(compat, "chrp"))
2351 		return PLATFORM_GENERIC;
2352 
2353 	/* Default to pSeries. We need to know if we are running LPAR */
2354 	rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2355 	if (!PHANDLE_VALID(rtas))
2356 		return PLATFORM_GENERIC;
2357 	x = prom_getproplen(rtas, "ibm,hypertas-functions");
2358 	if (x != PROM_ERROR) {
2359 		prom_debug("Hypertas detected, assuming LPAR !\n");
2360 		return PLATFORM_PSERIES_LPAR;
2361 	}
2362 	return PLATFORM_PSERIES;
2363 #else
2364 	return PLATFORM_GENERIC;
2365 #endif
2366 }
2367 
prom_set_color(ihandle ih,int i,int r,int g,int b)2368 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2369 {
2370 	return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2371 }
2372 
2373 /*
2374  * If we have a display that we don't know how to drive,
2375  * we will want to try to execute OF's open method for it
2376  * later.  However, OF will probably fall over if we do that
2377  * we've taken over the MMU.
2378  * So we check whether we will need to open the display,
2379  * and if so, open it now.
2380  */
prom_check_displays(void)2381 static void __init prom_check_displays(void)
2382 {
2383 	char type[16], *path;
2384 	phandle node;
2385 	ihandle ih;
2386 	int i;
2387 
2388 	static const unsigned char default_colors[] __initconst = {
2389 		0x00, 0x00, 0x00,
2390 		0x00, 0x00, 0xaa,
2391 		0x00, 0xaa, 0x00,
2392 		0x00, 0xaa, 0xaa,
2393 		0xaa, 0x00, 0x00,
2394 		0xaa, 0x00, 0xaa,
2395 		0xaa, 0xaa, 0x00,
2396 		0xaa, 0xaa, 0xaa,
2397 		0x55, 0x55, 0x55,
2398 		0x55, 0x55, 0xff,
2399 		0x55, 0xff, 0x55,
2400 		0x55, 0xff, 0xff,
2401 		0xff, 0x55, 0x55,
2402 		0xff, 0x55, 0xff,
2403 		0xff, 0xff, 0x55,
2404 		0xff, 0xff, 0xff
2405 	};
2406 	const unsigned char *clut;
2407 
2408 	prom_debug("Looking for displays\n");
2409 	for (node = 0; prom_next_node(&node); ) {
2410 		memset(type, 0, sizeof(type));
2411 		prom_getprop(node, "device_type", type, sizeof(type));
2412 		if (prom_strcmp(type, "display") != 0)
2413 			continue;
2414 
2415 		/* It seems OF doesn't null-terminate the path :-( */
2416 		path = prom_scratch;
2417 		memset(path, 0, sizeof(prom_scratch));
2418 
2419 		/*
2420 		 * leave some room at the end of the path for appending extra
2421 		 * arguments
2422 		 */
2423 		if (call_prom("package-to-path", 3, 1, node, path,
2424 			      sizeof(prom_scratch) - 10) == PROM_ERROR)
2425 			continue;
2426 		prom_printf("found display   : %s, opening... ", path);
2427 
2428 		ih = call_prom("open", 1, 1, path);
2429 		if (ih == 0) {
2430 			prom_printf("failed\n");
2431 			continue;
2432 		}
2433 
2434 		/* Success */
2435 		prom_printf("done\n");
2436 		prom_setprop(node, path, "linux,opened", NULL, 0);
2437 
2438 		/* Setup a usable color table when the appropriate
2439 		 * method is available. Should update this to set-colors */
2440 		clut = default_colors;
2441 		for (i = 0; i < 16; i++, clut += 3)
2442 			if (prom_set_color(ih, i, clut[0], clut[1],
2443 					   clut[2]) != 0)
2444 				break;
2445 
2446 #ifdef CONFIG_LOGO_LINUX_CLUT224
2447 		clut = PTRRELOC(logo_linux_clut224.clut);
2448 		for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2449 			if (prom_set_color(ih, i + 32, clut[0], clut[1],
2450 					   clut[2]) != 0)
2451 				break;
2452 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2453 
2454 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2455 		if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2456 		    PROM_ERROR) {
2457 			u32 width, height, pitch, addr;
2458 
2459 			prom_printf("Setting btext !\n");
2460 
2461 			if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
2462 				return;
2463 
2464 			if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
2465 				return;
2466 
2467 			if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
2468 				return;
2469 
2470 			if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
2471 				return;
2472 
2473 			prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2474 				    width, height, pitch, addr);
2475 			btext_setup_display(width, height, 8, pitch, addr);
2476 			btext_prepare_BAT();
2477 		}
2478 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2479 	}
2480 }
2481 
2482 
2483 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
make_room(unsigned long * mem_start,unsigned long * mem_end,unsigned long needed,unsigned long align)2484 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2485 			      unsigned long needed, unsigned long align)
2486 {
2487 	void *ret;
2488 
2489 	*mem_start = ALIGN(*mem_start, align);
2490 	while ((*mem_start + needed) > *mem_end) {
2491 		unsigned long room, chunk;
2492 
2493 		prom_debug("Chunk exhausted, claiming more at %lx...\n",
2494 			   alloc_bottom);
2495 		room = alloc_top - alloc_bottom;
2496 		if (room > DEVTREE_CHUNK_SIZE)
2497 			room = DEVTREE_CHUNK_SIZE;
2498 		if (room < PAGE_SIZE)
2499 			prom_panic("No memory for flatten_device_tree "
2500 				   "(no room)\n");
2501 		chunk = alloc_up(room, 0);
2502 		if (chunk == 0)
2503 			prom_panic("No memory for flatten_device_tree "
2504 				   "(claim failed)\n");
2505 		*mem_end = chunk + room;
2506 	}
2507 
2508 	ret = (void *)*mem_start;
2509 	*mem_start += needed;
2510 
2511 	return ret;
2512 }
2513 
2514 #define dt_push_token(token, mem_start, mem_end) do { 			\
2515 		void *room = make_room(mem_start, mem_end, 4, 4);	\
2516 		*(__be32 *)room = cpu_to_be32(token);			\
2517 	} while(0)
2518 
dt_find_string(char * str)2519 static unsigned long __init dt_find_string(char *str)
2520 {
2521 	char *s, *os;
2522 
2523 	s = os = (char *)dt_string_start;
2524 	s += 4;
2525 	while (s <  (char *)dt_string_end) {
2526 		if (prom_strcmp(s, str) == 0)
2527 			return s - os;
2528 		s += prom_strlen(s) + 1;
2529 	}
2530 	return 0;
2531 }
2532 
2533 /*
2534  * The Open Firmware 1275 specification states properties must be 31 bytes or
2535  * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2536  */
2537 #define MAX_PROPERTY_NAME 64
2538 
scan_dt_build_strings(phandle node,unsigned long * mem_start,unsigned long * mem_end)2539 static void __init scan_dt_build_strings(phandle node,
2540 					 unsigned long *mem_start,
2541 					 unsigned long *mem_end)
2542 {
2543 	char *prev_name, *namep, *sstart;
2544 	unsigned long soff;
2545 	phandle child;
2546 
2547 	sstart =  (char *)dt_string_start;
2548 
2549 	/* get and store all property names */
2550 	prev_name = "";
2551 	for (;;) {
2552 		/* 64 is max len of name including nul. */
2553 		namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2554 		if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2555 			/* No more nodes: unwind alloc */
2556 			*mem_start = (unsigned long)namep;
2557 			break;
2558 		}
2559 
2560  		/* skip "name" */
2561 		if (prom_strcmp(namep, "name") == 0) {
2562  			*mem_start = (unsigned long)namep;
2563  			prev_name = "name";
2564  			continue;
2565  		}
2566 		/* get/create string entry */
2567 		soff = dt_find_string(namep);
2568 		if (soff != 0) {
2569 			*mem_start = (unsigned long)namep;
2570 			namep = sstart + soff;
2571 		} else {
2572 			/* Trim off some if we can */
2573 			*mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2574 			dt_string_end = *mem_start;
2575 		}
2576 		prev_name = namep;
2577 	}
2578 
2579 	/* do all our children */
2580 	child = call_prom("child", 1, 1, node);
2581 	while (child != 0) {
2582 		scan_dt_build_strings(child, mem_start, mem_end);
2583 		child = call_prom("peer", 1, 1, child);
2584 	}
2585 }
2586 
scan_dt_build_struct(phandle node,unsigned long * mem_start,unsigned long * mem_end)2587 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2588 					unsigned long *mem_end)
2589 {
2590 	phandle child;
2591 	char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2592 	unsigned long soff;
2593 	unsigned char *valp;
2594 	static char pname[MAX_PROPERTY_NAME] __prombss;
2595 	int l, room, has_phandle = 0;
2596 
2597 	dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2598 
2599 	/* get the node's full name */
2600 	namep = (char *)*mem_start;
2601 	room = *mem_end - *mem_start;
2602 	if (room > 255)
2603 		room = 255;
2604 	l = call_prom("package-to-path", 3, 1, node, namep, room);
2605 	if (l >= 0) {
2606 		/* Didn't fit?  Get more room. */
2607 		if (l >= room) {
2608 			if (l >= *mem_end - *mem_start)
2609 				namep = make_room(mem_start, mem_end, l+1, 1);
2610 			call_prom("package-to-path", 3, 1, node, namep, l);
2611 		}
2612 		namep[l] = '\0';
2613 
2614 		/* Fixup an Apple bug where they have bogus \0 chars in the
2615 		 * middle of the path in some properties, and extract
2616 		 * the unit name (everything after the last '/').
2617 		 */
2618 		for (lp = p = namep, ep = namep + l; p < ep; p++) {
2619 			if (*p == '/')
2620 				lp = namep;
2621 			else if (*p != 0)
2622 				*lp++ = *p;
2623 		}
2624 		*lp = 0;
2625 		*mem_start = ALIGN((unsigned long)lp + 1, 4);
2626 	}
2627 
2628 	/* get it again for debugging */
2629 	path = prom_scratch;
2630 	memset(path, 0, sizeof(prom_scratch));
2631 	call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
2632 
2633 	/* get and store all properties */
2634 	prev_name = "";
2635 	sstart = (char *)dt_string_start;
2636 	for (;;) {
2637 		if (call_prom("nextprop", 3, 1, node, prev_name,
2638 			      pname) != 1)
2639 			break;
2640 
2641  		/* skip "name" */
2642 		if (prom_strcmp(pname, "name") == 0) {
2643  			prev_name = "name";
2644  			continue;
2645  		}
2646 
2647 		/* find string offset */
2648 		soff = dt_find_string(pname);
2649 		if (soff == 0) {
2650 			prom_printf("WARNING: Can't find string index for"
2651 				    " <%s>, node %s\n", pname, path);
2652 			break;
2653 		}
2654 		prev_name = sstart + soff;
2655 
2656 		/* get length */
2657 		l = call_prom("getproplen", 2, 1, node, pname);
2658 
2659 		/* sanity checks */
2660 		if (l == PROM_ERROR)
2661 			continue;
2662 
2663 		/* push property head */
2664 		dt_push_token(OF_DT_PROP, mem_start, mem_end);
2665 		dt_push_token(l, mem_start, mem_end);
2666 		dt_push_token(soff, mem_start, mem_end);
2667 
2668 		/* push property content */
2669 		valp = make_room(mem_start, mem_end, l, 4);
2670 		call_prom("getprop", 4, 1, node, pname, valp, l);
2671 		*mem_start = ALIGN(*mem_start, 4);
2672 
2673 		if (!prom_strcmp(pname, "phandle"))
2674 			has_phandle = 1;
2675 	}
2676 
2677 	/* Add a "phandle" property if none already exist */
2678 	if (!has_phandle) {
2679 		soff = dt_find_string("phandle");
2680 		if (soff == 0)
2681 			prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2682 		else {
2683 			dt_push_token(OF_DT_PROP, mem_start, mem_end);
2684 			dt_push_token(4, mem_start, mem_end);
2685 			dt_push_token(soff, mem_start, mem_end);
2686 			valp = make_room(mem_start, mem_end, 4, 4);
2687 			*(__be32 *)valp = cpu_to_be32(node);
2688 		}
2689 	}
2690 
2691 	/* do all our children */
2692 	child = call_prom("child", 1, 1, node);
2693 	while (child != 0) {
2694 		scan_dt_build_struct(child, mem_start, mem_end);
2695 		child = call_prom("peer", 1, 1, child);
2696 	}
2697 
2698 	dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2699 }
2700 
flatten_device_tree(void)2701 static void __init flatten_device_tree(void)
2702 {
2703 	phandle root;
2704 	unsigned long mem_start, mem_end, room;
2705 	struct boot_param_header *hdr;
2706 	char *namep;
2707 	u64 *rsvmap;
2708 
2709 	/*
2710 	 * Check how much room we have between alloc top & bottom (+/- a
2711 	 * few pages), crop to 1MB, as this is our "chunk" size
2712 	 */
2713 	room = alloc_top - alloc_bottom - 0x4000;
2714 	if (room > DEVTREE_CHUNK_SIZE)
2715 		room = DEVTREE_CHUNK_SIZE;
2716 	prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2717 
2718 	/* Now try to claim that */
2719 	mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2720 	if (mem_start == 0)
2721 		prom_panic("Can't allocate initial device-tree chunk\n");
2722 	mem_end = mem_start + room;
2723 
2724 	/* Get root of tree */
2725 	root = call_prom("peer", 1, 1, (phandle)0);
2726 	if (root == (phandle)0)
2727 		prom_panic ("couldn't get device tree root\n");
2728 
2729 	/* Build header and make room for mem rsv map */
2730 	mem_start = ALIGN(mem_start, 4);
2731 	hdr = make_room(&mem_start, &mem_end,
2732 			sizeof(struct boot_param_header), 4);
2733 	dt_header_start = (unsigned long)hdr;
2734 	rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2735 
2736 	/* Start of strings */
2737 	mem_start = PAGE_ALIGN(mem_start);
2738 	dt_string_start = mem_start;
2739 	mem_start += 4; /* hole */
2740 
2741 	/* Add "phandle" in there, we'll need it */
2742 	namep = make_room(&mem_start, &mem_end, 16, 1);
2743 	prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
2744 	mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2745 
2746 	/* Build string array */
2747 	prom_printf("Building dt strings...\n");
2748 	scan_dt_build_strings(root, &mem_start, &mem_end);
2749 	dt_string_end = mem_start;
2750 
2751 	/* Build structure */
2752 	mem_start = PAGE_ALIGN(mem_start);
2753 	dt_struct_start = mem_start;
2754 	prom_printf("Building dt structure...\n");
2755 	scan_dt_build_struct(root, &mem_start, &mem_end);
2756 	dt_push_token(OF_DT_END, &mem_start, &mem_end);
2757 	dt_struct_end = PAGE_ALIGN(mem_start);
2758 
2759 	/* Finish header */
2760 	hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2761 	hdr->magic = cpu_to_be32(OF_DT_HEADER);
2762 	hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2763 	hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2764 	hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2765 	hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2766 	hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2767 	hdr->version = cpu_to_be32(OF_DT_VERSION);
2768 	/* Version 16 is not backward compatible */
2769 	hdr->last_comp_version = cpu_to_be32(0x10);
2770 
2771 	/* Copy the reserve map in */
2772 	memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2773 
2774 #ifdef DEBUG_PROM
2775 	{
2776 		int i;
2777 		prom_printf("reserved memory map:\n");
2778 		for (i = 0; i < mem_reserve_cnt; i++)
2779 			prom_printf("  %llx - %llx\n",
2780 				    be64_to_cpu(mem_reserve_map[i].base),
2781 				    be64_to_cpu(mem_reserve_map[i].size));
2782 	}
2783 #endif
2784 	/* Bump mem_reserve_cnt to cause further reservations to fail
2785 	 * since it's too late.
2786 	 */
2787 	mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2788 
2789 	prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2790 		    dt_string_start, dt_string_end);
2791 	prom_printf("Device tree struct  0x%lx -> 0x%lx\n",
2792 		    dt_struct_start, dt_struct_end);
2793 }
2794 
2795 #ifdef CONFIG_PPC_MAPLE
2796 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2797  * The values are bad, and it doesn't even have the right number of cells. */
fixup_device_tree_maple(void)2798 static void __init fixup_device_tree_maple(void)
2799 {
2800 	phandle isa;
2801 	u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2802 	u32 isa_ranges[6];
2803 	char *name;
2804 
2805 	name = "/ht@0/isa@4";
2806 	isa = call_prom("finddevice", 1, 1, ADDR(name));
2807 	if (!PHANDLE_VALID(isa)) {
2808 		name = "/ht@0/isa@6";
2809 		isa = call_prom("finddevice", 1, 1, ADDR(name));
2810 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2811 	}
2812 	if (!PHANDLE_VALID(isa))
2813 		return;
2814 
2815 	if (prom_getproplen(isa, "ranges") != 12)
2816 		return;
2817 	if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2818 		== PROM_ERROR)
2819 		return;
2820 
2821 	if (isa_ranges[0] != 0x1 ||
2822 		isa_ranges[1] != 0xf4000000 ||
2823 		isa_ranges[2] != 0x00010000)
2824 		return;
2825 
2826 	prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2827 
2828 	isa_ranges[0] = 0x1;
2829 	isa_ranges[1] = 0x0;
2830 	isa_ranges[2] = rloc;
2831 	isa_ranges[3] = 0x0;
2832 	isa_ranges[4] = 0x0;
2833 	isa_ranges[5] = 0x00010000;
2834 	prom_setprop(isa, name, "ranges",
2835 			isa_ranges, sizeof(isa_ranges));
2836 }
2837 
2838 #define CPC925_MC_START		0xf8000000
2839 #define CPC925_MC_LENGTH	0x1000000
2840 /* The values for memory-controller don't have right number of cells */
fixup_device_tree_maple_memory_controller(void)2841 static void __init fixup_device_tree_maple_memory_controller(void)
2842 {
2843 	phandle mc;
2844 	u32 mc_reg[4];
2845 	char *name = "/hostbridge@f8000000";
2846 	u32 ac, sc;
2847 
2848 	mc = call_prom("finddevice", 1, 1, ADDR(name));
2849 	if (!PHANDLE_VALID(mc))
2850 		return;
2851 
2852 	if (prom_getproplen(mc, "reg") != 8)
2853 		return;
2854 
2855 	prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2856 	prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2857 	if ((ac != 2) || (sc != 2))
2858 		return;
2859 
2860 	if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2861 		return;
2862 
2863 	if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2864 		return;
2865 
2866 	prom_printf("Fixing up bogus hostbridge on Maple...\n");
2867 
2868 	mc_reg[0] = 0x0;
2869 	mc_reg[1] = CPC925_MC_START;
2870 	mc_reg[2] = 0x0;
2871 	mc_reg[3] = CPC925_MC_LENGTH;
2872 	prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2873 }
2874 #else
2875 #define fixup_device_tree_maple()
2876 #define fixup_device_tree_maple_memory_controller()
2877 #endif
2878 
2879 #ifdef CONFIG_PPC_CHRP
2880 /*
2881  * Pegasos and BriQ lacks the "ranges" property in the isa node
2882  * Pegasos needs decimal IRQ 14/15, not hexadecimal
2883  * Pegasos has the IDE configured in legacy mode, but advertised as native
2884  */
fixup_device_tree_chrp(void)2885 static void __init fixup_device_tree_chrp(void)
2886 {
2887 	phandle ph;
2888 	u32 prop[6];
2889 	u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2890 	char *name;
2891 	int rc;
2892 
2893 	name = "/pci@80000000/isa@c";
2894 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2895 	if (!PHANDLE_VALID(ph)) {
2896 		name = "/pci@ff500000/isa@6";
2897 		ph = call_prom("finddevice", 1, 1, ADDR(name));
2898 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2899 	}
2900 	if (PHANDLE_VALID(ph)) {
2901 		rc = prom_getproplen(ph, "ranges");
2902 		if (rc == 0 || rc == PROM_ERROR) {
2903 			prom_printf("Fixing up missing ISA range on Pegasos...\n");
2904 
2905 			prop[0] = 0x1;
2906 			prop[1] = 0x0;
2907 			prop[2] = rloc;
2908 			prop[3] = 0x0;
2909 			prop[4] = 0x0;
2910 			prop[5] = 0x00010000;
2911 			prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2912 		}
2913 	}
2914 
2915 	name = "/pci@80000000/ide@C,1";
2916 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2917 	if (PHANDLE_VALID(ph)) {
2918 		prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2919 		prop[0] = 14;
2920 		prop[1] = 0x0;
2921 		prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2922 		prom_printf("Fixing up IDE class-code on Pegasos...\n");
2923 		rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2924 		if (rc == sizeof(u32)) {
2925 			prop[0] &= ~0x5;
2926 			prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2927 		}
2928 	}
2929 }
2930 #else
2931 #define fixup_device_tree_chrp()
2932 #endif
2933 
2934 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
fixup_device_tree_pmac(void)2935 static void __init fixup_device_tree_pmac(void)
2936 {
2937 	phandle u3, i2c, mpic;
2938 	u32 u3_rev;
2939 	u32 interrupts[2];
2940 	u32 parent;
2941 
2942 	/* Some G5s have a missing interrupt definition, fix it up here */
2943 	u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2944 	if (!PHANDLE_VALID(u3))
2945 		return;
2946 	i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2947 	if (!PHANDLE_VALID(i2c))
2948 		return;
2949 	mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2950 	if (!PHANDLE_VALID(mpic))
2951 		return;
2952 
2953 	/* check if proper rev of u3 */
2954 	if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2955 	    == PROM_ERROR)
2956 		return;
2957 	if (u3_rev < 0x35 || u3_rev > 0x39)
2958 		return;
2959 	/* does it need fixup ? */
2960 	if (prom_getproplen(i2c, "interrupts") > 0)
2961 		return;
2962 
2963 	prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2964 
2965 	/* interrupt on this revision of u3 is number 0 and level */
2966 	interrupts[0] = 0;
2967 	interrupts[1] = 1;
2968 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2969 		     &interrupts, sizeof(interrupts));
2970 	parent = (u32)mpic;
2971 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2972 		     &parent, sizeof(parent));
2973 }
2974 #else
2975 #define fixup_device_tree_pmac()
2976 #endif
2977 
2978 #ifdef CONFIG_PPC_EFIKA
2979 /*
2980  * The MPC5200 FEC driver requires an phy-handle property to tell it how
2981  * to talk to the phy.  If the phy-handle property is missing, then this
2982  * function is called to add the appropriate nodes and link it to the
2983  * ethernet node.
2984  */
fixup_device_tree_efika_add_phy(void)2985 static void __init fixup_device_tree_efika_add_phy(void)
2986 {
2987 	u32 node;
2988 	char prop[64];
2989 	int rv;
2990 
2991 	/* Check if /builtin/ethernet exists - bail if it doesn't */
2992 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2993 	if (!PHANDLE_VALID(node))
2994 		return;
2995 
2996 	/* Check if the phy-handle property exists - bail if it does */
2997 	rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2998 	if (rv <= 0)
2999 		return;
3000 
3001 	/*
3002 	 * At this point the ethernet device doesn't have a phy described.
3003 	 * Now we need to add the missing phy node and linkage
3004 	 */
3005 
3006 	/* Check for an MDIO bus node - if missing then create one */
3007 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
3008 	if (!PHANDLE_VALID(node)) {
3009 		prom_printf("Adding Ethernet MDIO node\n");
3010 		call_prom("interpret", 1, 1,
3011 			" s\" /builtin\" find-device"
3012 			" new-device"
3013 				" 1 encode-int s\" #address-cells\" property"
3014 				" 0 encode-int s\" #size-cells\" property"
3015 				" s\" mdio\" device-name"
3016 				" s\" fsl,mpc5200b-mdio\" encode-string"
3017 				" s\" compatible\" property"
3018 				" 0xf0003000 0x400 reg"
3019 				" 0x2 encode-int"
3020 				" 0x5 encode-int encode+"
3021 				" 0x3 encode-int encode+"
3022 				" s\" interrupts\" property"
3023 			" finish-device");
3024 	}
3025 
3026 	/* Check for a PHY device node - if missing then create one and
3027 	 * give it's phandle to the ethernet node */
3028 	node = call_prom("finddevice", 1, 1,
3029 			 ADDR("/builtin/mdio/ethernet-phy"));
3030 	if (!PHANDLE_VALID(node)) {
3031 		prom_printf("Adding Ethernet PHY node\n");
3032 		call_prom("interpret", 1, 1,
3033 			" s\" /builtin/mdio\" find-device"
3034 			" new-device"
3035 				" s\" ethernet-phy\" device-name"
3036 				" 0x10 encode-int s\" reg\" property"
3037 				" my-self"
3038 				" ihandle>phandle"
3039 			" finish-device"
3040 			" s\" /builtin/ethernet\" find-device"
3041 				" encode-int"
3042 				" s\" phy-handle\" property"
3043 			" device-end");
3044 	}
3045 }
3046 
fixup_device_tree_efika(void)3047 static void __init fixup_device_tree_efika(void)
3048 {
3049 	int sound_irq[3] = { 2, 2, 0 };
3050 	int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
3051 				3,4,0, 3,5,0, 3,6,0, 3,7,0,
3052 				3,8,0, 3,9,0, 3,10,0, 3,11,0,
3053 				3,12,0, 3,13,0, 3,14,0, 3,15,0 };
3054 	u32 node;
3055 	char prop[64];
3056 	int rv, len;
3057 
3058 	/* Check if we're really running on a EFIKA */
3059 	node = call_prom("finddevice", 1, 1, ADDR("/"));
3060 	if (!PHANDLE_VALID(node))
3061 		return;
3062 
3063 	rv = prom_getprop(node, "model", prop, sizeof(prop));
3064 	if (rv == PROM_ERROR)
3065 		return;
3066 	if (prom_strcmp(prop, "EFIKA5K2"))
3067 		return;
3068 
3069 	prom_printf("Applying EFIKA device tree fixups\n");
3070 
3071 	/* Claiming to be 'chrp' is death */
3072 	node = call_prom("finddevice", 1, 1, ADDR("/"));
3073 	rv = prom_getprop(node, "device_type", prop, sizeof(prop));
3074 	if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
3075 		prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
3076 
3077 	/* CODEGEN,description is exposed in /proc/cpuinfo so
3078 	   fix that too */
3079 	rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
3080 	if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
3081 		prom_setprop(node, "/", "CODEGEN,description",
3082 			     "Efika 5200B PowerPC System",
3083 			     sizeof("Efika 5200B PowerPC System"));
3084 
3085 	/* Fixup bestcomm interrupts property */
3086 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3087 	if (PHANDLE_VALID(node)) {
3088 		len = prom_getproplen(node, "interrupts");
3089 		if (len == 12) {
3090 			prom_printf("Fixing bestcomm interrupts property\n");
3091 			prom_setprop(node, "/builtin/bestcom", "interrupts",
3092 				     bcomm_irq, sizeof(bcomm_irq));
3093 		}
3094 	}
3095 
3096 	/* Fixup sound interrupts property */
3097 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3098 	if (PHANDLE_VALID(node)) {
3099 		rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
3100 		if (rv == PROM_ERROR) {
3101 			prom_printf("Adding sound interrupts property\n");
3102 			prom_setprop(node, "/builtin/sound", "interrupts",
3103 				     sound_irq, sizeof(sound_irq));
3104 		}
3105 	}
3106 
3107 	/* Make sure ethernet phy-handle property exists */
3108 	fixup_device_tree_efika_add_phy();
3109 }
3110 #else
3111 #define fixup_device_tree_efika()
3112 #endif
3113 
3114 #ifdef CONFIG_PPC_PASEMI_NEMO
3115 /*
3116  * CFE supplied on Nemo is broken in several ways, biggest
3117  * problem is that it reassigns ISA interrupts to unused mpic ints.
3118  * Add an interrupt-controller property for the io-bridge to use
3119  * and correct the ints so we can attach them to an irq_domain
3120  */
fixup_device_tree_pasemi(void)3121 static void __init fixup_device_tree_pasemi(void)
3122 {
3123 	u32 interrupts[2], parent, rval, val = 0;
3124 	char *name, *pci_name;
3125 	phandle iob, node;
3126 
3127 	/* Find the root pci node */
3128 	name = "/pxp@0,e0000000";
3129 	iob = call_prom("finddevice", 1, 1, ADDR(name));
3130 	if (!PHANDLE_VALID(iob))
3131 		return;
3132 
3133 	/* check if interrupt-controller node set yet */
3134 	if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
3135 		return;
3136 
3137 	prom_printf("adding interrupt-controller property for SB600...\n");
3138 
3139 	prom_setprop(iob, name, "interrupt-controller", &val, 0);
3140 
3141 	pci_name = "/pxp@0,e0000000/pci@11";
3142 	node = call_prom("finddevice", 1, 1, ADDR(pci_name));
3143 	parent = ADDR(iob);
3144 
3145 	for( ; prom_next_node(&node); ) {
3146 		/* scan each node for one with an interrupt */
3147 		if (!PHANDLE_VALID(node))
3148 			continue;
3149 
3150 		rval = prom_getproplen(node, "interrupts");
3151 		if (rval == 0 || rval == PROM_ERROR)
3152 			continue;
3153 
3154 		prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
3155 		if ((interrupts[0] < 212) || (interrupts[0] > 222))
3156 			continue;
3157 
3158 		/* found a node, update both interrupts and interrupt-parent */
3159 		if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
3160 			interrupts[0] -= 203;
3161 		if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
3162 			interrupts[0] -= 213;
3163 		if (interrupts[0] == 221)
3164 			interrupts[0] = 14;
3165 		if (interrupts[0] == 222)
3166 			interrupts[0] = 8;
3167 
3168 		prom_setprop(node, pci_name, "interrupts", interrupts,
3169 					sizeof(interrupts));
3170 		prom_setprop(node, pci_name, "interrupt-parent", &parent,
3171 					sizeof(parent));
3172 	}
3173 
3174 	/*
3175 	 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3176 	 * so that generic isa-bridge code can add the SB600 and its on-board
3177 	 * peripherals.
3178 	 */
3179 	name = "/pxp@0,e0000000/io-bridge@0";
3180 	iob = call_prom("finddevice", 1, 1, ADDR(name));
3181 	if (!PHANDLE_VALID(iob))
3182 		return;
3183 
3184 	/* device_type is already set, just change it. */
3185 
3186 	prom_printf("Changing device_type of SB600 node...\n");
3187 
3188 	prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
3189 }
3190 #else	/* !CONFIG_PPC_PASEMI_NEMO */
fixup_device_tree_pasemi(void)3191 static inline void fixup_device_tree_pasemi(void) { }
3192 #endif
3193 
fixup_device_tree(void)3194 static void __init fixup_device_tree(void)
3195 {
3196 	fixup_device_tree_maple();
3197 	fixup_device_tree_maple_memory_controller();
3198 	fixup_device_tree_chrp();
3199 	fixup_device_tree_pmac();
3200 	fixup_device_tree_efika();
3201 	fixup_device_tree_pasemi();
3202 }
3203 
prom_find_boot_cpu(void)3204 static void __init prom_find_boot_cpu(void)
3205 {
3206 	__be32 rval;
3207 	ihandle prom_cpu;
3208 	phandle cpu_pkg;
3209 
3210 	rval = 0;
3211 	if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3212 		return;
3213 	prom_cpu = be32_to_cpu(rval);
3214 
3215 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3216 
3217 	if (!PHANDLE_VALID(cpu_pkg))
3218 		return;
3219 
3220 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3221 	prom.cpu = be32_to_cpu(rval);
3222 
3223 	prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3224 }
3225 
prom_check_initrd(unsigned long r3,unsigned long r4)3226 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3227 {
3228 #ifdef CONFIG_BLK_DEV_INITRD
3229 	if (r3 && r4 && r4 != 0xdeadbeef) {
3230 		__be64 val;
3231 
3232 		prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3233 		prom_initrd_end = prom_initrd_start + r4;
3234 
3235 		val = cpu_to_be64(prom_initrd_start);
3236 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3237 			     &val, sizeof(val));
3238 		val = cpu_to_be64(prom_initrd_end);
3239 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3240 			     &val, sizeof(val));
3241 
3242 		reserve_mem(prom_initrd_start,
3243 			    prom_initrd_end - prom_initrd_start);
3244 
3245 		prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3246 		prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3247 	}
3248 #endif /* CONFIG_BLK_DEV_INITRD */
3249 }
3250 
3251 #ifdef CONFIG_PPC_SVM
3252 /*
3253  * Perform the Enter Secure Mode ultracall.
3254  */
enter_secure_mode(unsigned long kbase,unsigned long fdt)3255 static int __init enter_secure_mode(unsigned long kbase, unsigned long fdt)
3256 {
3257 	register unsigned long r3 asm("r3") = UV_ESM;
3258 	register unsigned long r4 asm("r4") = kbase;
3259 	register unsigned long r5 asm("r5") = fdt;
3260 
3261 	asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
3262 
3263 	return r3;
3264 }
3265 
3266 /*
3267  * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3268  */
setup_secure_guest(unsigned long kbase,unsigned long fdt)3269 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3270 {
3271 	int ret;
3272 
3273 	if (!prom_svm_enable)
3274 		return;
3275 
3276 	/* Switch to secure mode. */
3277 	prom_printf("Switching to secure mode.\n");
3278 
3279 	/*
3280 	 * The ultravisor will do an integrity check of the kernel image but we
3281 	 * relocated it so the check will fail. Restore the original image by
3282 	 * relocating it back to the kernel virtual base address.
3283 	 */
3284 	relocate(KERNELBASE);
3285 
3286 	ret = enter_secure_mode(kbase, fdt);
3287 
3288 	/* Relocate the kernel again. */
3289 	relocate(kbase);
3290 
3291 	if (ret != U_SUCCESS) {
3292 		prom_printf("Returned %d from switching to secure mode.\n", ret);
3293 		prom_rtas_os_term("Switch to secure mode failed.\n");
3294 	}
3295 }
3296 #else
setup_secure_guest(unsigned long kbase,unsigned long fdt)3297 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3298 {
3299 }
3300 #endif /* CONFIG_PPC_SVM */
3301 
3302 /*
3303  * We enter here early on, when the Open Firmware prom is still
3304  * handling exceptions and the MMU hash table for us.
3305  */
3306 
prom_init(unsigned long r3,unsigned long r4,unsigned long pp,unsigned long r6,unsigned long r7,unsigned long kbase)3307 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3308 			       unsigned long pp,
3309 			       unsigned long r6, unsigned long r7,
3310 			       unsigned long kbase)
3311 {
3312 	unsigned long hdr;
3313 
3314 #ifdef CONFIG_PPC32
3315 	unsigned long offset = reloc_offset();
3316 	reloc_got2(offset);
3317 #endif
3318 
3319 	/*
3320 	 * First zero the BSS
3321 	 */
3322 	memset(&__bss_start, 0, __bss_stop - __bss_start);
3323 
3324 	/*
3325 	 * Init interface to Open Firmware, get some node references,
3326 	 * like /chosen
3327 	 */
3328 	prom_init_client_services(pp);
3329 
3330 	/*
3331 	 * See if this OF is old enough that we need to do explicit maps
3332 	 * and other workarounds
3333 	 */
3334 	prom_find_mmu();
3335 
3336 	/*
3337 	 * Init prom stdout device
3338 	 */
3339 	prom_init_stdout();
3340 
3341 	prom_printf("Preparing to boot %s", linux_banner);
3342 
3343 	/*
3344 	 * Get default machine type. At this point, we do not differentiate
3345 	 * between pSeries SMP and pSeries LPAR
3346 	 */
3347 	of_platform = prom_find_machine_type();
3348 	prom_printf("Detected machine type: %x\n", of_platform);
3349 
3350 #ifndef CONFIG_NONSTATIC_KERNEL
3351 	/* Bail if this is a kdump kernel. */
3352 	if (PHYSICAL_START > 0)
3353 		prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3354 #endif
3355 
3356 	/*
3357 	 * Check for an initrd
3358 	 */
3359 	prom_check_initrd(r3, r4);
3360 
3361 	/*
3362 	 * Do early parsing of command line
3363 	 */
3364 	early_cmdline_parse();
3365 
3366 #ifdef CONFIG_PPC_PSERIES
3367 	/*
3368 	 * On pSeries, inform the firmware about our capabilities
3369 	 */
3370 	if (of_platform == PLATFORM_PSERIES ||
3371 	    of_platform == PLATFORM_PSERIES_LPAR)
3372 		prom_send_capabilities();
3373 #endif
3374 
3375 	/*
3376 	 * Copy the CPU hold code
3377 	 */
3378 	if (of_platform != PLATFORM_POWERMAC)
3379 		copy_and_flush(0, kbase, 0x100, 0);
3380 
3381 	/*
3382 	 * Initialize memory management within prom_init
3383 	 */
3384 	prom_init_mem();
3385 
3386 	/*
3387 	 * Determine which cpu is actually running right _now_
3388 	 */
3389 	prom_find_boot_cpu();
3390 
3391 	/*
3392 	 * Initialize display devices
3393 	 */
3394 	prom_check_displays();
3395 
3396 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3397 	/*
3398 	 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3399 	 * that uses the allocator, we need to make sure we get the top of memory
3400 	 * available for us here...
3401 	 */
3402 	if (of_platform == PLATFORM_PSERIES)
3403 		prom_initialize_tce_table();
3404 #endif
3405 
3406 	/*
3407 	 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3408 	 * have a usable RTAS implementation.
3409 	 */
3410 	if (of_platform != PLATFORM_POWERMAC)
3411 		prom_instantiate_rtas();
3412 
3413 #ifdef CONFIG_PPC64
3414 	/* instantiate sml */
3415 	prom_instantiate_sml();
3416 #endif
3417 
3418 	/*
3419 	 * On non-powermacs, put all CPUs in spin-loops.
3420 	 *
3421 	 * PowerMacs use a different mechanism to spin CPUs
3422 	 *
3423 	 * (This must be done after instantiating RTAS)
3424 	 */
3425 	if (of_platform != PLATFORM_POWERMAC)
3426 		prom_hold_cpus();
3427 
3428 	/*
3429 	 * Fill in some infos for use by the kernel later on
3430 	 */
3431 	if (prom_memory_limit) {
3432 		__be64 val = cpu_to_be64(prom_memory_limit);
3433 		prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3434 			     &val, sizeof(val));
3435 	}
3436 #ifdef CONFIG_PPC64
3437 	if (prom_iommu_off)
3438 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3439 			     NULL, 0);
3440 
3441 	if (prom_iommu_force_on)
3442 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3443 			     NULL, 0);
3444 
3445 	if (prom_tce_alloc_start) {
3446 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3447 			     &prom_tce_alloc_start,
3448 			     sizeof(prom_tce_alloc_start));
3449 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3450 			     &prom_tce_alloc_end,
3451 			     sizeof(prom_tce_alloc_end));
3452 	}
3453 #endif
3454 
3455 	/*
3456 	 * Fixup any known bugs in the device-tree
3457 	 */
3458 	fixup_device_tree();
3459 
3460 	/*
3461 	 * Now finally create the flattened device-tree
3462 	 */
3463 	prom_printf("copying OF device tree...\n");
3464 	flatten_device_tree();
3465 
3466 	/*
3467 	 * in case stdin is USB and still active on IBM machines...
3468 	 * Unfortunately quiesce crashes on some powermacs if we have
3469 	 * closed stdin already (in particular the powerbook 101).
3470 	 */
3471 	if (of_platform != PLATFORM_POWERMAC)
3472 		prom_close_stdin();
3473 
3474 	/*
3475 	 * Call OF "quiesce" method to shut down pending DMA's from
3476 	 * devices etc...
3477 	 */
3478 	prom_printf("Quiescing Open Firmware ...\n");
3479 	call_prom("quiesce", 0, 0);
3480 
3481 	/*
3482 	 * And finally, call the kernel passing it the flattened device
3483 	 * tree and NULL as r5, thus triggering the new entry point which
3484 	 * is common to us and kexec
3485 	 */
3486 	hdr = dt_header_start;
3487 
3488 	prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3489 	prom_debug("->dt_header_start=0x%lx\n", hdr);
3490 
3491 #ifdef CONFIG_PPC32
3492 	reloc_got2(-offset);
3493 #endif
3494 
3495 	/* Move to secure memory if we're supposed to be secure guests. */
3496 	setup_secure_guest(kbase, hdr);
3497 
3498 	__start(hdr, kbase, 0, 0, 0, 0, 0);
3499 
3500 	return 0;
3501 }
3502