xref: /freebsd/sys/powerpc/powerpc/machdep.c (revision 4d213c595ac3247a85cea5d3ea521db14151a427)
1 /*-
2  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3  * Copyright (C) 1995, 1996 TooLs GmbH.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by TooLs GmbH.
17  * 4. The name of TooLs GmbH may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*-
32  * Copyright (C) 2001 Benno Rice
33  * All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  *
44  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  *	$NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55  */
56 
57 #include <sys/cdefs.h>
58 #include "opt_ddb.h"
59 #include "opt_kstack_pages.h"
60 #include "opt_platform.h"
61 
62 #include <sys/param.h>
63 #include <sys/proc.h>
64 #include <sys/systm.h>
65 #include <sys/bio.h>
66 #include <sys/buf.h>
67 #include <sys/bus.h>
68 #include <sys/cons.h>
69 #include <sys/cpu.h>
70 #include <sys/eventhandler.h>
71 #include <sys/exec.h>
72 #include <sys/imgact.h>
73 #include <sys/kdb.h>
74 #include <sys/kernel.h>
75 #include <sys/ktr.h>
76 #include <sys/linker.h>
77 #include <sys/lock.h>
78 #include <sys/malloc.h>
79 #include <sys/mbuf.h>
80 #include <sys/msgbuf.h>
81 #include <sys/mutex.h>
82 #include <sys/ptrace.h>
83 #include <sys/reboot.h>
84 #include <sys/reg.h>
85 #include <sys/rwlock.h>
86 #include <sys/signalvar.h>
87 #include <sys/syscallsubr.h>
88 #include <sys/sysctl.h>
89 #include <sys/sysent.h>
90 #include <sys/sysproto.h>
91 #include <sys/ucontext.h>
92 #include <sys/uio.h>
93 #include <sys/vmmeter.h>
94 #include <sys/vnode.h>
95 
96 #include <net/netisr.h>
97 
98 #include <vm/vm.h>
99 #include <vm/vm_extern.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_phys.h>
103 #include <vm/vm_map.h>
104 #include <vm/vm_object.h>
105 #include <vm/vm_pager.h>
106 
107 #include <machine/altivec.h>
108 #ifndef __powerpc64__
109 #include <machine/bat.h>
110 #endif
111 #include <machine/cpu.h>
112 #include <machine/elf.h>
113 #include <machine/fpu.h>
114 #include <machine/hid.h>
115 #include <machine/ifunc.h>
116 #include <machine/kdb.h>
117 #include <machine/md_var.h>
118 #include <machine/metadata.h>
119 #include <machine/mmuvar.h>
120 #include <machine/pcb.h>
121 #include <machine/sigframe.h>
122 #include <machine/spr.h>
123 #include <machine/trap.h>
124 #include <machine/vmparam.h>
125 #include <machine/ofw_machdep.h>
126 
127 #include <ddb/ddb.h>
128 
129 #include <dev/ofw/openfirm.h>
130 #include <dev/ofw/ofw_subr.h>
131 
132 int cold = 1;
133 #ifdef __powerpc64__
134 int cacheline_size = 128;
135 #else
136 int cacheline_size = 32;
137 #endif
138 #ifdef __powerpc64__
139 int hw_direct_map = -1;
140 #else
141 int hw_direct_map = 1;
142 #endif
143 
144 #ifdef BOOKE
145 extern vm_paddr_t kernload;
146 #endif
147 
148 extern void *ap_pcpu;
149 
150 struct pcpu __pcpu[MAXCPU] __aligned(PAGE_SIZE);
151 static char init_kenv[2048];
152 
153 static struct trapframe frame0;
154 
155 const char	machine[] = "powerpc";
156 SYSCTL_CONST_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD | CTLFLAG_CAPRD,
157     machine, "Machine class");
158 
159 static void	cpu_startup(void *);
160 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
161 
162 SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
163 	   CTLFLAG_RD, &cacheline_size, 0, "");
164 
165 uintptr_t	powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *,
166 		    uint32_t);
167 
168 static void	fake_preload_metadata(void);
169 
170 long		Maxmem = 0;
171 long		realmem = 0;
172 
173 /* Default MSR values set in the AIM/Book-E early startup code */
174 register_t	psl_kernset;
175 register_t	psl_userset;
176 register_t	psl_userstatic;
177 #ifdef __powerpc64__
178 register_t	psl_userset32;
179 #endif
180 
181 struct kva_md_info kmi;
182 
183 static void
cpu_startup(void * dummy)184 cpu_startup(void *dummy)
185 {
186 
187 	/*
188 	 * Initialise the decrementer-based clock.
189 	 */
190 	decr_init();
191 
192 	/*
193 	 * Good {morning,afternoon,evening,night}.
194 	 */
195 	cpu_setup(PCPU_GET(cpuid));
196 
197 #ifdef PERFMON
198 	perfmon_init();
199 #endif
200 	printf("real memory  = %ju (%ju MB)\n", ptoa((uintmax_t)physmem),
201 	    ptoa((uintmax_t)physmem) / 1048576);
202 	realmem = physmem;
203 
204 	if (bootverbose)
205 		printf("available KVA = %zu (%zu MB)\n",
206 		    virtual_end - virtual_avail,
207 		    (virtual_end - virtual_avail) / 1048576);
208 
209 	/*
210 	 * Display any holes after the first chunk of extended memory.
211 	 */
212 	if (bootverbose) {
213 		int indx;
214 
215 		printf("Physical memory chunk(s):\n");
216 		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
217 			vm_paddr_t size1 =
218 			    phys_avail[indx + 1] - phys_avail[indx];
219 
220 			#ifdef __powerpc64__
221 			printf("0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
222 			#else
223 			printf("0x%09jx - 0x%09jx, %ju bytes (%ju pages)\n",
224 			#endif
225 			    (uintmax_t)phys_avail[indx],
226 			    (uintmax_t)phys_avail[indx + 1] - 1,
227 			    (uintmax_t)size1, (uintmax_t)size1 / PAGE_SIZE);
228 		}
229 	}
230 
231 	vm_ksubmap_init(&kmi);
232 
233 	printf("avail memory = %ju (%ju MB)\n",
234 	    ptoa((uintmax_t)vm_free_count()),
235 	    ptoa((uintmax_t)vm_free_count()) / 1048576);
236 
237 	/*
238 	 * Set up buffers, so they can be used to read disk labels.
239 	 */
240 	bufinit();
241 	vm_pager_bufferinit();
242 }
243 
244 extern vm_offset_t	__startkernel, __endkernel;
245 extern unsigned char	__bss_start[];
246 extern unsigned char	__sbss_start[];
247 extern unsigned char	__sbss_end[];
248 extern unsigned char	_end[];
249 
250 void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
251     void *mdp, uint32_t mdp_cookie);
252 void aim_cpu_init(vm_offset_t toc);
253 void booke_cpu_init(void);
254 
255 #ifdef DDB
256 static void	load_external_symtab(void);
257 #endif
258 
259 uintptr_t
powerpc_init(vm_offset_t fdt,vm_offset_t toc,vm_offset_t ofentry,void * mdp,uint32_t mdp_cookie)260 powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
261     uint32_t mdp_cookie)
262 {
263 	struct		pcpu *pc;
264 	struct cpuref	bsp;
265 	vm_offset_t	startkernel, endkernel;
266 	char		*env;
267         bool		ofw_bootargs = false;
268 #ifdef DDB
269 	bool		symbols_provided = false;
270 	vm_offset_t ksym_start;
271 	vm_offset_t ksym_end;
272 #endif
273 
274 	/* First guess at start/end kernel positions */
275 	startkernel = __startkernel;
276 	endkernel = __endkernel;
277 
278 	/*
279 	 * If the metadata pointer cookie is not set to the magic value,
280 	 * the number in mdp should be treated as nonsense.
281 	 */
282 	if (mdp_cookie != 0xfb5d104d)
283 		mdp = NULL;
284 
285 #if !defined(BOOKE)
286 	/*
287 	 * On BOOKE the BSS is already cleared and some variables
288 	 * initialized.  Do not wipe them out.
289 	 */
290 	bzero(__sbss_start, __sbss_end - __sbss_start);
291 	bzero(__bss_start, _end - __bss_start);
292 #endif
293 
294 	cpu_feature_setup();
295 
296 #ifdef AIM
297 	aim_early_init(fdt, toc, ofentry, mdp, mdp_cookie);
298 #endif
299 
300 	/*
301 	 * At this point, we are executing in our correct memory space.
302 	 * Book-E started there, and AIM has done an rfi and restarted
303 	 * execution from _start.
304 	 *
305 	 * We may still be in real mode, however. If we are running out of
306 	 * the direct map on 64 bit, this is possible to do.
307 	 */
308 
309 	/*
310 	 * Parse metadata if present and fetch parameters.  Must be done
311 	 * before console is inited so cninit gets the right value of
312 	 * boothowto.
313 	 */
314 	if (mdp != NULL) {
315 		/*
316 		 * Starting up from loader.
317 		 *
318 		 * Full metadata has been provided, but we need to figure
319 		 * out the correct address to relocate it to.
320 		 */
321 		char *envp = NULL;
322 		uintptr_t md_offset = 0;
323 		vm_paddr_t kernelendphys;
324 
325 #ifdef AIM
326 		if ((uintptr_t)&powerpc_init > DMAP_BASE_ADDRESS)
327 			md_offset = DMAP_BASE_ADDRESS;
328 #else /* BOOKE */
329 		md_offset = VM_MIN_KERNEL_ADDRESS - kernload;
330 #endif
331 
332 		preload_metadata = mdp;
333 		if (md_offset > 0) {
334 			/* Translate phys offset into DMAP offset. */
335 			preload_metadata += md_offset;
336 			preload_bootstrap_relocate(md_offset);
337 		}
338 
339 		/* Initialize preload_kmdp */
340 		preload_initkmdp(true);
341 
342 		boothowto = MD_FETCH(preload_kmdp, MODINFOMD_HOWTO, int);
343 		envp = MD_FETCH(preload_kmdp, MODINFOMD_ENVP, char *);
344 		if (envp != NULL)
345 			envp += md_offset;
346 		init_static_kenv(envp, 0);
347 		if (fdt == 0) {
348 			fdt = MD_FETCH(preload_kmdp, MODINFOMD_DTBP, uintptr_t);
349 			if (fdt != 0)
350 				fdt += md_offset;
351 		}
352 		/* kernelstartphys is already relocated. */
353 		kernelendphys = MD_FETCH(preload_kmdp, MODINFOMD_KERNEND,
354 		    vm_offset_t);
355 		if (kernelendphys != 0)
356 			kernelendphys += md_offset;
357 		endkernel = ulmax(endkernel, kernelendphys);
358 #ifdef DDB
359 		ksym_start = MD_FETCH(preload_kmdp, MODINFOMD_SSYM, uintptr_t);
360 		ksym_end = MD_FETCH(preload_kmdp, MODINFOMD_ESYM, uintptr_t);
361 
362 		db_fetch_ksymtab(ksym_start, ksym_end, md_offset);
363 		/* Symbols provided by loader. */
364 		symbols_provided = true;
365 #endif
366 	} else {
367 		/*
368 		 * Self-loading kernel, we have to fake up metadata.
369 		 *
370 		 * Since we are creating the metadata from the final
371 		 * memory space, we don't need to call
372 		 * preload_boostrap_relocate().
373 		 */
374 		fake_preload_metadata();
375 		/* Initialize preload_kmdp */
376 		preload_initkmdp(true);
377 		init_static_kenv(init_kenv, sizeof(init_kenv));
378 		ofw_bootargs = true;
379 	}
380 
381 	/* Store boot environment state */
382 	OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry);
383 
384 	/*
385 	 * Init params/tunables that can be overridden by the loader
386 	 */
387 	init_param1();
388 
389 	/*
390 	 * Start initializing proc0 and thread0.
391 	 */
392 	proc_linkup0(&proc0, &thread0);
393 	thread0.td_frame = &frame0;
394 #ifdef __powerpc64__
395 	__asm __volatile("mr 13,%0" :: "r"(&thread0));
396 #else
397 	__asm __volatile("mr 2,%0" :: "r"(&thread0));
398 #endif
399 
400 	/*
401 	 * Init mutexes, which we use heavily in PMAP
402 	 */
403 	mutex_init();
404 
405 	/*
406 	 * Install the OF client interface
407 	 */
408 	OF_bootstrap();
409 
410 #ifdef DDB
411 	if (!symbols_provided && hw_direct_map)
412 		load_external_symtab();
413 #endif
414 
415 	if (ofw_bootargs)
416 		ofw_parse_bootargs();
417 
418 #ifdef AIM
419 	/*
420 	 * Early I/O map needs to be initialized before console, in order to
421 	 * map frame buffers properly, and after boot args have been parsed,
422 	 * to handle tunables properly.
423 	 */
424 	pmap_early_io_map_init();
425 #endif
426 
427 	/*
428 	 * Initialize the console before printing anything.
429 	 */
430 	cninit();
431 
432 #ifdef AIM
433 	aim_cpu_init(toc);
434 #else /* BOOKE */
435 	booke_cpu_init();
436 
437 	/* Make sure the kernel icache is valid before we go too much further */
438 	__syncicache((caddr_t)startkernel, endkernel - startkernel);
439 #endif
440 
441 	/*
442 	 * Choose a platform module so we can get the physical memory map.
443 	 */
444 
445 	platform_probe_and_attach();
446 
447 	/*
448 	 * Set up per-cpu data for the BSP now that the platform can tell
449 	 * us which that is.
450 	 */
451 	if (platform_smp_get_bsp(&bsp) != 0)
452 		bsp.cr_cpuid = 0;
453 	pc = &__pcpu[bsp.cr_cpuid];
454 	__asm __volatile("mtsprg 0, %0" :: "r"(pc));
455 	pcpu_init(pc, bsp.cr_cpuid, sizeof(struct pcpu));
456 	pc->pc_curthread = &thread0;
457 	thread0.td_oncpu = bsp.cr_cpuid;
458 	pc->pc_cpuid = bsp.cr_cpuid;
459 	pc->pc_hwref = bsp.cr_hwref;
460 
461 	/*
462 	 * Init KDB
463 	 */
464 	kdb_init();
465 
466 	/*
467 	 * Bring up MMU
468 	 */
469 	pmap_mmu_init();
470 	link_elf_ireloc();
471 	pmap_bootstrap(startkernel, endkernel);
472 	mtmsr(psl_kernset & ~PSL_EE);
473 
474 	/*
475 	 * Initialize params/tunables that are derived from memsize
476 	 */
477 	init_param2(physmem);
478 
479 	/*
480 	 * Grab booted kernel's name
481 	 */
482         env = kern_getenv("kernelname");
483         if (env != NULL) {
484 		strlcpy(kernelname, env, sizeof(kernelname));
485 		freeenv(env);
486 	}
487 
488 	/*
489 	 * Finish setting up thread0.
490 	 */
491 	thread0.td_pcb = (struct pcb *)
492 	    ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
493 	    sizeof(struct pcb)) & ~15UL);
494 	bzero((void *)thread0.td_pcb, sizeof(struct pcb));
495 	pc->pc_curpcb = thread0.td_pcb;
496 
497 	/* Initialise the message buffer. */
498 	msgbufinit(msgbufp, msgbufsize);
499 
500 #ifdef KDB
501 	if (boothowto & RB_KDB)
502 		kdb_enter(KDB_WHY_BOOTFLAGS,
503 		    "Boot flags requested debugger");
504 #endif
505 
506 	return (((uintptr_t)thread0.td_pcb -
507 	    (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
508 }
509 
510 #ifdef DDB
511 /*
512  * On powernv and some booke systems, we might not have symbols loaded via
513  * loader. However, if the user passed the kernel in as the initrd as well,
514  * we can manually load it via reinterpreting the initrd copy of the kernel.
515  *
516  * In the BOOKE case, we don't actually have a DMAP yet, so we have to use
517  * temporary maps to inspect the memory, but write DMAP addresses to the
518  * configuration variables.
519  */
520 static void
load_external_symtab(void)521 load_external_symtab(void) {
522 	phandle_t chosen;
523 	vm_paddr_t start, end;
524 	pcell_t cell[2];
525 	ssize_t size;
526 	u_char *kernelimg;		/* Temporary map */
527 	u_char *kernelimg_final;	/* Final location */
528 
529 	int i;
530 
531 	Elf_Ehdr *ehdr;
532 	Elf_Shdr *shdr;
533 
534 	vm_offset_t ksym_start, ksym_sz, kstr_start, kstr_sz,
535 	    ksym_start_final, kstr_start_final;
536 
537 	if (!hw_direct_map)
538 		return;
539 
540 	chosen = OF_finddevice("/chosen");
541 	if (chosen <= 0)
542 		return;
543 
544 	if (!OF_hasprop(chosen, "linux,initrd-start") ||
545 	    !OF_hasprop(chosen, "linux,initrd-end"))
546 		return;
547 
548 	size = OF_getencprop(chosen, "linux,initrd-start", cell, sizeof(cell));
549 	if (size == 4)
550 		start = cell[0];
551 	else if (size == 8)
552 		start = (uint64_t)cell[0] << 32 | cell[1];
553 	else
554 		return;
555 
556 	size = OF_getencprop(chosen, "linux,initrd-end", cell, sizeof(cell));
557 	if (size == 4)
558 		end = cell[0];
559 	else if (size == 8)
560 		end = (uint64_t)cell[0] << 32 | cell[1];
561 	else
562 		return;
563 
564 	if (!(end - start > 0))
565 		return;
566 
567 	kernelimg_final = (u_char *) PHYS_TO_DMAP(start);
568 #ifdef	AIM
569 	kernelimg = kernelimg_final;
570 #else	/* BOOKE */
571 	kernelimg = (u_char *)pmap_early_io_map(start, PAGE_SIZE);
572 #endif
573 	ehdr = (Elf_Ehdr *)kernelimg;
574 
575 	if (!IS_ELF(*ehdr)) {
576 #ifdef	BOOKE
577 		pmap_early_io_unmap(start, PAGE_SIZE);
578 #endif
579 		return;
580 	}
581 
582 #ifdef	BOOKE
583 	pmap_early_io_unmap(start, PAGE_SIZE);
584 	kernelimg = (u_char *)pmap_early_io_map(start, (end - start));
585 #endif
586 
587 	shdr = (Elf_Shdr *)(kernelimg + ehdr->e_shoff);
588 
589 	ksym_start = 0;
590 	ksym_sz = 0;
591 	ksym_start_final = 0;
592 	kstr_start = 0;
593 	kstr_sz = 0;
594 	kstr_start_final = 0;
595 	for (i = 0; i < ehdr->e_shnum; i++) {
596 		if (shdr[i].sh_type == SHT_SYMTAB) {
597 			ksym_start = (vm_offset_t)(kernelimg +
598 			    shdr[i].sh_offset);
599 			ksym_start_final = (vm_offset_t)
600 			    (kernelimg_final + shdr[i].sh_offset);
601 			ksym_sz = (vm_offset_t)(shdr[i].sh_size);
602 			kstr_start = (vm_offset_t)(kernelimg +
603 			    shdr[shdr[i].sh_link].sh_offset);
604 			kstr_start_final = (vm_offset_t)
605 			    (kernelimg_final +
606 			    shdr[shdr[i].sh_link].sh_offset);
607 
608 			kstr_sz = (vm_offset_t)
609 			    (shdr[shdr[i].sh_link].sh_size);
610 		}
611 	}
612 
613 	if (ksym_start != 0 && kstr_start != 0 && ksym_sz != 0 &&
614 	    kstr_sz != 0 && ksym_start < kstr_start) {
615 		/*
616 		 * We can't use db_fetch_ksymtab() here, because we need to
617 		 * feed in DMAP addresses that are not mapped yet on booke.
618 		 *
619 		 * Write the variables directly, where db_init() will pick
620 		 * them up later, after the DMAP is up.
621 		 */
622 		ksymtab = ksym_start_final;
623 		ksymtab_size = ksym_sz;
624 		kstrtab = kstr_start_final;
625 		ksymtab_relbase = (__startkernel - KERNBASE);
626 	}
627 
628 #ifdef	BOOKE
629 	pmap_early_io_unmap(start, (end - start));
630 #endif
631 
632 };
633 #endif
634 
635 /*
636  * When not being loaded from loader, we need to create our own metadata
637  * so we can interact with the kernel linker.
638  */
639 static void
fake_preload_metadata(void)640 fake_preload_metadata(void) {
641 	/* We depend on dword alignment here. */
642 	static uint32_t fake_preload[36] __aligned(8);
643 	int i = 0;
644 
645 	fake_preload[i++] = MODINFO_NAME;
646 	fake_preload[i++] = strlen("kernel") + 1;
647 	strcpy((char *)&fake_preload[i], "kernel");
648 	/* ['k' 'e' 'r' 'n'] ['e' 'l' '\0' ..] */
649 	i += 2;
650 
651 	fake_preload[i++] = MODINFO_TYPE;
652 	fake_preload[i++] = strlen(preload_kerntype) + 1;
653 	strcpy((char *)&fake_preload[i], preload_kerntype);
654 	i += howmany(fake_preload[i - 1], sizeof(uint32_t));
655 
656 #ifdef __powerpc64__
657 	/* Padding -- Fields start on u_long boundaries */
658 	fake_preload[i++] = 0;
659 #endif
660 
661 	fake_preload[i++] = MODINFO_ADDR;
662 	fake_preload[i++] = sizeof(vm_offset_t);
663 	*(vm_offset_t *)&fake_preload[i] =
664 	    (vm_offset_t)(__startkernel);
665 	i += (sizeof(vm_offset_t) / 4);
666 
667 	fake_preload[i++] = MODINFO_SIZE;
668 	fake_preload[i++] = sizeof(vm_offset_t);
669 	*(vm_offset_t *)&fake_preload[i] =
670 	    (vm_offset_t)(__endkernel) - (vm_offset_t)(__startkernel);
671 	i += (sizeof(vm_offset_t) / 4);
672 
673 	/*
674 	 * MODINFOMD_SSYM and MODINFOMD_ESYM cannot be provided here,
675 	 * as the memory comes from outside the loaded ELF sections.
676 	 *
677 	 * If the symbols are being provided by other means (MFS), the
678 	 * tables will be loaded into the debugger directly.
679 	 */
680 
681 	/* Null field at end to mark end of data. */
682 	fake_preload[i++] = 0;
683 	fake_preload[i] = 0;
684 	preload_metadata = (void*)fake_preload;
685 }
686 
687 /*
688  * Flush the D-cache for non-DMA I/O so that the I-cache can
689  * be made coherent later.
690  */
691 void
cpu_flush_dcache(void * ptr,size_t len)692 cpu_flush_dcache(void *ptr, size_t len)
693 {
694 	register_t addr, off;
695 
696 	/*
697 	 * Align the address to a cacheline and adjust the length
698 	 * accordingly. Then round the length to a multiple of the
699 	 * cacheline for easy looping.
700 	 */
701 	addr = (uintptr_t)ptr;
702 	off = addr & (cacheline_size - 1);
703 	addr -= off;
704 	len = roundup2(len + off, cacheline_size);
705 
706 	while (len > 0) {
707 		__asm __volatile ("dcbf 0,%0" :: "r"(addr));
708 		__asm __volatile ("sync");
709 		addr += cacheline_size;
710 		len -= cacheline_size;
711 	}
712 }
713 
714 int
ptrace_set_pc(struct thread * td,unsigned long addr)715 ptrace_set_pc(struct thread *td, unsigned long addr)
716 {
717 	struct trapframe *tf;
718 
719 	tf = td->td_frame;
720 	tf->srr0 = (register_t)addr;
721 
722 	return (0);
723 }
724 
725 void
spinlock_enter(void)726 spinlock_enter(void)
727 {
728 	struct thread *td;
729 	register_t msr;
730 
731 	td = curthread;
732 	if (td->td_md.md_spinlock_count == 0) {
733 		nop_prio_mhigh();
734 		msr = intr_disable();
735 		td->td_md.md_spinlock_count = 1;
736 		td->td_md.md_saved_msr = msr;
737 		critical_enter();
738 	} else
739 		td->td_md.md_spinlock_count++;
740 }
741 
742 void
spinlock_exit(void)743 spinlock_exit(void)
744 {
745 	struct thread *td;
746 	register_t msr;
747 
748 	td = curthread;
749 	msr = td->td_md.md_saved_msr;
750 	td->td_md.md_spinlock_count--;
751 	if (td->td_md.md_spinlock_count == 0) {
752 		critical_exit();
753 		intr_restore(msr);
754 		nop_prio_medium();
755 	}
756 }
757 
758 /*
759  * Simple ddb(4) command/hack to view any SPR on the running CPU.
760  * Uses a trivial asm function to perform the mfspr, and rewrites the mfspr
761  * instruction each time.
762  * XXX: Since it uses code modification, it won't work if the kernel code pages
763  * are marked RO.
764  */
765 extern register_t get_spr(int);
766 
767 #ifdef DDB
DB_SHOW_COMMAND(spr,db_show_spr)768 DB_SHOW_COMMAND(spr, db_show_spr)
769 {
770 	register_t spr;
771 	volatile uint32_t *p;
772 	int sprno, saved_sprno;
773 
774 	if (!have_addr)
775 		return;
776 
777 	saved_sprno = sprno = (intptr_t) addr;
778 	sprno = ((sprno & 0x3e0) >> 5) | ((sprno & 0x1f) << 5);
779 	p = (uint32_t *)(void *)&get_spr;
780 #ifdef __powerpc64__
781 #if defined(_CALL_ELF) && _CALL_ELF == 2
782 	/* Account for ELFv2 function prologue. */
783 	p += 2;
784 #else
785 	p = *(volatile uint32_t * volatile *)p;
786 #endif
787 #endif
788 	*p = (*p & ~0x001ff800) | (sprno << 11);
789 	__syncicache(__DEVOLATILE(uint32_t *, p), cacheline_size);
790 	spr = get_spr(sprno);
791 
792 	db_printf("SPR %d(%x): %lx\n", saved_sprno, saved_sprno,
793 	    (unsigned long)spr);
794 }
795 
DB_SHOW_COMMAND(frame,db_show_frame)796 DB_SHOW_COMMAND(frame, db_show_frame)
797 {
798 	struct trapframe *tf;
799 	long reg;
800 	int i;
801 
802 	tf = have_addr ? (struct trapframe *)addr : curthread->td_frame;
803 
804 	/*
805 	 * Everything casts through long to simplify the printing.
806 	 * 'long' is native register size anyway.
807 	 */
808 	db_printf("trap frame %p\n", tf);
809 	for (i = 0; i < nitems(tf->fixreg); i++) {
810 		reg = tf->fixreg[i];
811 		db_printf("  r%d:\t%#lx (%ld)\n", i, reg, reg);
812 	}
813 	reg = tf->lr;
814 	db_printf("  lr:\t%#lx\n", reg);
815 	reg = tf->cr;
816 	db_printf("  cr:\t%#lx\n", reg);
817 	reg = tf->xer;
818 	db_printf("  xer:\t%#lx\n", reg);
819 	reg = tf->ctr;
820 	db_printf("  ctr:\t%#lx (%ld)\n", reg, reg);
821 	reg = tf->srr0;
822 	db_printf("  srr0:\t%#lx\n", reg);
823 	reg = tf->srr1;
824 	db_printf("  srr1:\t%#lx\n", reg);
825 	reg = tf->exc;
826 	db_printf("  exc:\t%#lx\n", reg);
827 	reg = tf->dar;
828 	db_printf("  dar:\t%#lx\n", reg);
829 #ifdef AIM
830 	reg = tf->cpu.aim.dsisr;
831 	db_printf("  dsisr:\t%#lx\n", reg);
832 #else
833 	reg = tf->cpu.booke.esr;
834 	db_printf("  esr:\t%#lx\n", reg);
835 	reg = tf->cpu.booke.dbcr0;
836 	db_printf("  dbcr0:\t%#lx\n", reg);
837 #endif
838 }
839 #endif
840 
841 /* __stack_chk_fail_local() is called in secure-plt (32-bit). */
842 #if !defined(__powerpc64__)
843 extern void __stack_chk_fail(void);
844 void __stack_chk_fail_local(void);
845 
846 void
__stack_chk_fail_local(void)847 __stack_chk_fail_local(void)
848 {
849 
850 	__stack_chk_fail();
851 }
852 #endif
853