1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008-2012 Semihalf.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "opt_platform.h"
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/module.h>
35 #include <sys/bus.h>
36 #include <sys/pcpu.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39
40 #include <machine/bus.h>
41 #include <machine/cpu.h>
42 #include <machine/hid.h>
43 #include <machine/_inttypes.h>
44 #include <machine/machdep.h>
45 #include <machine/md_var.h>
46 #include <machine/platform.h>
47 #include <machine/platformvar.h>
48 #include <machine/smp.h>
49 #include <machine/spr.h>
50 #include <machine/vmparam.h>
51
52 #include <dev/fdt/fdt_common.h>
53 #include <dev/ofw/ofw_bus.h>
54 #include <dev/ofw/ofw_bus_subr.h>
55 #include <dev/ofw/openfirm.h>
56
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_extern.h>
60
61 #include <powerpc/mpc85xx/mpc85xx.h>
62
63 #include "platform_if.h"
64
65 #ifdef SMP
66 extern void *ap_pcpu;
67 extern vm_paddr_t kernload; /* Kernel physical load address */
68 extern uint8_t __boot_page[]; /* Boot page body */
69 extern vm_paddr_t bp_kernload; /* Boot page copy of kernload */
70 extern vm_offset_t bp_virtaddr; /* Virtual address of boot page */
71 extern vm_offset_t __startkernel;
72
73 struct cpu_release {
74 uint32_t entry_h;
75 uint32_t entry_l;
76 uint32_t r3_h;
77 uint32_t r3_l;
78 uint32_t reserved;
79 uint32_t pir;
80 };
81 #endif
82
83 extern uint32_t *bootinfo;
84 vm_paddr_t ccsrbar_pa;
85 vm_offset_t ccsrbar_va;
86 vm_size_t ccsrbar_size;
87
88 static int cpu, maxcpu;
89
90 static device_t rcpm_dev;
91 static void dummy_freeze(device_t, bool);
92
93 static void (*freeze_timebase)(device_t, bool) = dummy_freeze;
94
95 static int mpc85xx_probe(platform_t);
96 static void mpc85xx_mem_regions(platform_t, struct mem_region *phys,
97 int *physsz, struct mem_region *avail, int *availsz);
98 static u_long mpc85xx_timebase_freq(platform_t, struct cpuref *cpuref);
99 static int mpc85xx_smp_first_cpu(platform_t, struct cpuref *cpuref);
100 static int mpc85xx_smp_next_cpu(platform_t, struct cpuref *cpuref);
101 static int mpc85xx_smp_get_bsp(platform_t, struct cpuref *cpuref);
102 static int mpc85xx_smp_start_cpu(platform_t, struct pcpu *cpu);
103 static void mpc85xx_smp_timebase_sync(platform_t, u_long tb, int ap);
104
105 static void mpc85xx_reset(platform_t);
106
107 static platform_method_t mpc85xx_methods[] = {
108 PLATFORMMETHOD(platform_probe, mpc85xx_probe),
109 PLATFORMMETHOD(platform_attach, mpc85xx_attach),
110 PLATFORMMETHOD(platform_mem_regions, mpc85xx_mem_regions),
111 PLATFORMMETHOD(platform_timebase_freq, mpc85xx_timebase_freq),
112
113 PLATFORMMETHOD(platform_smp_first_cpu, mpc85xx_smp_first_cpu),
114 PLATFORMMETHOD(platform_smp_next_cpu, mpc85xx_smp_next_cpu),
115 PLATFORMMETHOD(platform_smp_get_bsp, mpc85xx_smp_get_bsp),
116 PLATFORMMETHOD(platform_smp_start_cpu, mpc85xx_smp_start_cpu),
117 PLATFORMMETHOD(platform_smp_timebase_sync, mpc85xx_smp_timebase_sync),
118
119 PLATFORMMETHOD(platform_reset, mpc85xx_reset),
120
121 PLATFORMMETHOD_END
122 };
123
124 DEFINE_CLASS_0(mpc85xx, mpc85xx_platform, mpc85xx_methods, 0);
125
126 PLATFORM_DEF(mpc85xx_platform);
127
128 static int
mpc85xx_probe(platform_t plat)129 mpc85xx_probe(platform_t plat)
130 {
131 u_int pvr = (mfpvr() >> 16) & 0xFFFF;
132
133 switch (pvr) {
134 case FSL_E500v1:
135 case FSL_E500v2:
136 case FSL_E500mc:
137 case FSL_E5500:
138 case FSL_E6500:
139 return (BUS_PROBE_DEFAULT);
140 }
141 return (ENXIO);
142 }
143
144 int
mpc85xx_attach(platform_t plat)145 mpc85xx_attach(platform_t plat)
146 {
147 phandle_t cpus, child, ccsr;
148 const char *soc_name_guesses[] = {"/soc", "soc", NULL};
149 const char **name;
150 pcell_t ranges[6], acells, pacells, scells;
151 uint64_t ccsrbar, ccsrsize;
152 int i;
153
154 if ((cpus = OF_finddevice("/cpus")) != -1) {
155 for (maxcpu = 0, child = OF_child(cpus); child != 0;
156 child = OF_peer(child), maxcpu++)
157 ;
158 } else
159 maxcpu = 1;
160
161 /*
162 * Locate CCSR region. Irritatingly, there is no way to find it
163 * unless you already know where it is. Try to infer its location
164 * from the device tree.
165 */
166
167 ccsr = -1;
168 for (name = soc_name_guesses; *name != NULL && ccsr == -1; name++)
169 ccsr = OF_finddevice(*name);
170 if (ccsr == -1) {
171 char type[64];
172
173 /* That didn't work. Search for devices of type "soc" */
174 child = OF_child(OF_peer(0));
175 for (OF_child(child); child != 0; child = OF_peer(child)) {
176 if (OF_getprop(child, "device_type", type, sizeof(type))
177 <= 0)
178 continue;
179
180 if (strcmp(type, "soc") == 0) {
181 ccsr = child;
182 break;
183 }
184 }
185 }
186
187 if (ccsr == -1)
188 panic("Could not locate CCSR window!");
189
190 OF_getprop(ccsr, "#size-cells", &scells, sizeof(scells));
191 OF_getprop(ccsr, "#address-cells", &acells, sizeof(acells));
192 OF_searchprop(OF_parent(ccsr), "#address-cells", &pacells,
193 sizeof(pacells));
194 OF_getprop(ccsr, "ranges", ranges, sizeof(ranges));
195 ccsrbar = ccsrsize = 0;
196 for (i = acells; i < acells + pacells; i++) {
197 ccsrbar <<= 32;
198 ccsrbar |= ranges[i];
199 }
200 for (i = acells + pacells; i < acells + pacells + scells; i++) {
201 ccsrsize <<= 32;
202 ccsrsize |= ranges[i];
203 }
204 ccsrbar_va = pmap_early_io_map(ccsrbar, ccsrsize);
205 ccsrbar_pa = ccsrbar;
206 ccsrbar_size = ccsrsize;
207
208 mpc85xx_enable_l3_cache();
209
210 return (0);
211 }
212
213 void
mpc85xx_mem_regions(platform_t plat,struct mem_region * phys,int * physsz,struct mem_region * avail,int * availsz)214 mpc85xx_mem_regions(platform_t plat, struct mem_region *phys, int *physsz,
215 struct mem_region *avail, int *availsz)
216 {
217
218 ofw_mem_regions(phys, physsz, avail, availsz);
219 }
220
221 static u_long
mpc85xx_timebase_freq(platform_t plat,struct cpuref * cpuref)222 mpc85xx_timebase_freq(platform_t plat, struct cpuref *cpuref)
223 {
224 u_long ticks;
225 phandle_t cpus, child;
226 pcell_t freq;
227
228 if (bootinfo != NULL) {
229 if (bootinfo[0] == 1) {
230 /* Backward compatibility. See 8-STABLE. */
231 ticks = bootinfo[3] >> 3;
232 } else {
233 /* Compatibility with Juniper's loader. */
234 ticks = bootinfo[5] >> 3;
235 }
236 } else
237 ticks = 0;
238
239 if ((cpus = OF_finddevice("/cpus")) == -1)
240 goto out;
241
242 if ((child = OF_child(cpus)) == 0)
243 goto out;
244
245 switch (OF_getproplen(child, "timebase-frequency")) {
246 case 4:
247 {
248 uint32_t tbase;
249 OF_getprop(child, "timebase-frequency", &tbase, sizeof(tbase));
250 ticks = tbase;
251 return (ticks);
252 }
253 case 8:
254 {
255 uint64_t tbase;
256 OF_getprop(child, "timebase-frequency", &tbase, sizeof(tbase));
257 ticks = tbase;
258 return (ticks);
259 }
260 default:
261 break;
262 }
263
264 freq = 0;
265 if (OF_getprop(child, "bus-frequency", (void *)&freq,
266 sizeof(freq)) <= 0)
267 goto out;
268
269 if (freq == 0)
270 goto out;
271
272 /*
273 * Time Base and Decrementer are updated every 8 CCB bus clocks.
274 * HID0[SEL_TBCLK] = 0
275 */
276 if (mpc85xx_is_qoriq())
277 ticks = freq / 32;
278 else
279 ticks = freq / 8;
280
281 out:
282 if (ticks <= 0)
283 panic("Unable to determine timebase frequency!");
284
285 return (ticks);
286 }
287
288 static int
mpc85xx_smp_first_cpu(platform_t plat,struct cpuref * cpuref)289 mpc85xx_smp_first_cpu(platform_t plat, struct cpuref *cpuref)
290 {
291
292 cpu = 0;
293 cpuref->cr_cpuid = cpu;
294 cpuref->cr_hwref = cpuref->cr_cpuid;
295 if (bootverbose)
296 printf("powerpc_smp_first_cpu: cpuid %d\n", cpuref->cr_cpuid);
297 cpu++;
298
299 return (0);
300 }
301
302 static int
mpc85xx_smp_next_cpu(platform_t plat,struct cpuref * cpuref)303 mpc85xx_smp_next_cpu(platform_t plat, struct cpuref *cpuref)
304 {
305
306 if (cpu >= maxcpu)
307 return (ENOENT);
308
309 cpuref->cr_cpuid = cpu++;
310 cpuref->cr_hwref = cpuref->cr_cpuid;
311 if (bootverbose)
312 printf("powerpc_smp_next_cpu: cpuid %d\n", cpuref->cr_cpuid);
313
314 return (0);
315 }
316
317 static int
mpc85xx_smp_get_bsp(platform_t plat,struct cpuref * cpuref)318 mpc85xx_smp_get_bsp(platform_t plat, struct cpuref *cpuref)
319 {
320
321 cpuref->cr_cpuid = mfspr(SPR_PIR);
322 cpuref->cr_hwref = cpuref->cr_cpuid;
323
324 return (0);
325 }
326
327 #ifdef SMP
328 static int
mpc85xx_smp_start_cpu_epapr(platform_t plat,struct pcpu * pc)329 mpc85xx_smp_start_cpu_epapr(platform_t plat, struct pcpu *pc)
330 {
331 vm_paddr_t rel_pa, bptr;
332 volatile struct cpu_release *rel;
333 vm_offset_t rel_va, rel_page;
334 phandle_t node;
335 int i;
336
337 /* If we're calling this, the node already exists. */
338 node = OF_finddevice("/cpus");
339 for (i = 0, node = OF_child(node); i < pc->pc_cpuid;
340 i++, node = OF_peer(node))
341 ;
342 if (OF_getencprop(node, "cpu-release-addr", (pcell_t *)&rel_pa,
343 sizeof(rel_pa)) == -1) {
344 return (ENOENT);
345 }
346
347 rel_page = kva_alloc(PAGE_SIZE);
348 if (rel_page == 0)
349 return (ENOMEM);
350
351 critical_enter();
352 rel_va = rel_page + (rel_pa & PAGE_MASK);
353 pmap_kenter(rel_page, rel_pa & ~PAGE_MASK);
354 rel = (struct cpu_release *)rel_va;
355 bptr = pmap_kextract((uintptr_t)__boot_page);
356
357 cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
358 rel->pir = pc->pc_cpuid; __asm __volatile("sync" ::: "memory");
359 rel->entry_h = (bptr >> 32); __asm __volatile("sync" ::: "memory");
360 cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
361 rel->entry_l = bptr & 0xffffffff; __asm __volatile("sync" ::: "memory");
362 cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
363 if (bootverbose)
364 printf("Waking up CPU %d via CPU release page %p\n",
365 pc->pc_cpuid, rel);
366 critical_exit();
367 pmap_kremove(rel_page);
368 kva_free(rel_page, PAGE_SIZE);
369
370 return (0);
371 }
372 #endif
373
374 static int
mpc85xx_smp_start_cpu(platform_t plat,struct pcpu * pc)375 mpc85xx_smp_start_cpu(platform_t plat, struct pcpu *pc)
376 {
377 #ifdef SMP
378 vm_paddr_t bptr;
379 uint32_t reg;
380 int timeout;
381 uintptr_t brr;
382 int cpuid;
383 int epapr_boot = 0;
384 uint32_t tgt;
385
386 if (mpc85xx_is_qoriq()) {
387 reg = ccsr_read4(OCP85XX_COREDISR);
388 cpuid = pc->pc_cpuid;
389
390 if ((reg & (1 << cpuid)) != 0) {
391 printf("%s: CPU %d is disabled!\n", __func__, pc->pc_cpuid);
392 return (-1);
393 }
394
395 brr = OCP85XX_BRR;
396 } else {
397 brr = OCP85XX_EEBPCR;
398 cpuid = pc->pc_cpuid + 24;
399 }
400 bp_kernload = kernload;
401 bp_virtaddr = (vm_offset_t)&__boot_page;
402 /*
403 * bp_kernload and bp_virtaddr are in the boot page. Sync the cache
404 * because ePAPR booting has the other core(s) already running.
405 */
406 cpu_flush_dcache(&bp_kernload, sizeof(bp_kernload));
407 cpu_flush_dcache(&bp_virtaddr, sizeof(bp_virtaddr));
408
409 ap_pcpu = pc;
410 __asm __volatile("msync; isync");
411
412 /* First try the ePAPR way. */
413 if (mpc85xx_smp_start_cpu_epapr(plat, pc) == 0) {
414 epapr_boot = 1;
415 goto spin_wait;
416 }
417
418 reg = ccsr_read4(brr);
419 if ((reg & (1 << cpuid)) != 0) {
420 printf("SMP: CPU %d already out of hold-off state!\n",
421 pc->pc_cpuid);
422 return (ENXIO);
423 }
424
425 /* Flush caches to have our changes hit DRAM. */
426 cpu_flush_dcache(__boot_page, 4096);
427
428 bptr = pmap_kextract((uintptr_t)__boot_page);
429 KASSERT((bptr & 0xfff) == 0,
430 ("%s: boot page is not aligned (%#jx)", __func__, (uintmax_t)bptr));
431 if (mpc85xx_is_qoriq()) {
432 /*
433 * Read DDR controller configuration to select proper BPTR target ID.
434 *
435 * On P5020 bit 29 of DDR1_CS0_CONFIG enables DDR controllers
436 * interleaving. If this bit is set, we have to use
437 * OCP85XX_TGTIF_RAM_INTL as BPTR target ID. On other QorIQ DPAA SoCs,
438 * this bit is reserved and always 0.
439 */
440
441 reg = ccsr_read4(OCP85XX_DDR1_CS0_CONFIG);
442 if (reg & (1 << 29))
443 tgt = OCP85XX_TGTIF_RAM_INTL;
444 else
445 tgt = OCP85XX_TGTIF_RAM1;
446
447 /*
448 * Set BSTR to the physical address of the boot page
449 */
450 ccsr_write4(OCP85XX_BSTRH, bptr >> 32);
451 ccsr_write4(OCP85XX_BSTRL, bptr);
452 ccsr_write4(OCP85XX_BSTAR, OCP85XX_ENA_MASK |
453 (tgt << OCP85XX_TRGT_SHIFT_QORIQ) | (ffsl(PAGE_SIZE) - 2));
454
455 /* Read back OCP85XX_BSTAR to synchronize write */
456 ccsr_read4(OCP85XX_BSTAR);
457
458 /*
459 * Enable and configure time base on new CPU.
460 */
461
462 /* Set TB clock source to platform clock / 32 */
463 reg = ccsr_read4(CCSR_CTBCKSELR);
464 ccsr_write4(CCSR_CTBCKSELR, reg & ~(1 << pc->pc_cpuid));
465
466 /* Enable TB */
467 reg = ccsr_read4(CCSR_CTBENR);
468 ccsr_write4(CCSR_CTBENR, reg | (1 << pc->pc_cpuid));
469 } else {
470 /*
471 * Set BPTR to the physical address of the boot page
472 */
473 bptr = (bptr >> 12) | 0x80000000u;
474 ccsr_write4(OCP85XX_BPTR, bptr);
475 __asm __volatile("isync; msync");
476 }
477
478 /*
479 * Release AP from hold-off state
480 */
481 reg = ccsr_read4(brr);
482 ccsr_write4(brr, reg | (1 << cpuid));
483 __asm __volatile("isync; msync");
484
485 spin_wait:
486 timeout = 500;
487 while (!pc->pc_awake && timeout--)
488 DELAY(1000); /* wait 1ms */
489
490 /*
491 * Disable boot page translation so that the 4K page at the default
492 * address (= 0xfffff000) isn't permanently remapped and thus not
493 * usable otherwise.
494 */
495 if (!epapr_boot) {
496 if (mpc85xx_is_qoriq())
497 ccsr_write4(OCP85XX_BSTAR, 0);
498 else
499 ccsr_write4(OCP85XX_BPTR, 0);
500 __asm __volatile("isync; msync");
501 }
502
503 if (!pc->pc_awake)
504 panic("SMP: CPU %d didn't wake up.\n", pc->pc_cpuid);
505 return ((pc->pc_awake) ? 0 : EBUSY);
506 #else
507 /* No SMP support */
508 return (ENXIO);
509 #endif
510 }
511
512 static void
mpc85xx_reset(platform_t plat)513 mpc85xx_reset(platform_t plat)
514 {
515
516 /*
517 * Try the dedicated reset register first.
518 * If the SoC doesn't have one, we'll fall
519 * back to using the debug control register.
520 */
521 ccsr_write4(OCP85XX_RSTCR, 2);
522
523 mtmsr(mfmsr() & ~PSL_DE);
524
525 /* Enable debug interrupts and issue reset. */
526 mtspr(SPR_DBCR0, DBCR0_IDM | DBCR0_RST_SYSTEM);
527 __asm __volatile("isync");
528
529 /* Enable Debug Interrupts in MSR. */
530 mtmsr(mfmsr() | PSL_DE);
531
532 printf("Reset failed...\n");
533 while (1)
534 ;
535 }
536
537 static void
mpc85xx_smp_timebase_sync(platform_t plat,u_long tb,int ap)538 mpc85xx_smp_timebase_sync(platform_t plat, u_long tb, int ap)
539 {
540 static volatile bool tb_ready;
541 static volatile int cpu_done;
542
543 if (ap) {
544 /* APs. Hold off until we get a stable timebase. */
545 while (!tb_ready)
546 atomic_thread_fence_seq_cst();
547 mttb(tb);
548 atomic_add_int(&cpu_done, 1);
549 while (cpu_done < mp_ncpus)
550 atomic_thread_fence_seq_cst();
551 } else {
552 /* BSP */
553 freeze_timebase(rcpm_dev, true);
554 tb_ready = true;
555 mttb(tb);
556 atomic_add_int(&cpu_done, 1);
557 while (cpu_done < mp_ncpus)
558 atomic_thread_fence_seq_cst();
559 freeze_timebase(rcpm_dev, false);
560 }
561 }
562
563 /* Fallback freeze. In case no real handler is found in the device tree. */
564 static void
dummy_freeze(device_t dev,bool freeze)565 dummy_freeze(device_t dev, bool freeze)
566 {
567 /* Nothing to do here, move along. */
568 }
569
570 /* QorIQ Run control/power management timebase management. */
571
572 #define RCPM_CTBENR 0x00000084
573 struct mpc85xx_rcpm_softc {
574 struct resource *sc_mem;
575 };
576
577 static void
mpc85xx_rcpm_freeze_timebase(device_t dev,bool freeze)578 mpc85xx_rcpm_freeze_timebase(device_t dev, bool freeze)
579 {
580 struct mpc85xx_rcpm_softc *sc;
581
582 sc = device_get_softc(dev);
583
584 if (freeze)
585 bus_write_4(sc->sc_mem, RCPM_CTBENR, 0);
586 else
587 bus_write_4(sc->sc_mem, RCPM_CTBENR, (1 << maxcpu) - 1);
588 }
589
590 static int
mpc85xx_rcpm_probe(device_t dev)591 mpc85xx_rcpm_probe(device_t dev)
592 {
593 if (!ofw_bus_is_compatible(dev, "fsl,qoriq-rcpm-1.0"))
594 return (ENXIO);
595
596 device_set_desc(dev, "QorIQ Run control and power management");
597 return (BUS_PROBE_GENERIC);
598 }
599
600 static int
mpc85xx_rcpm_attach(device_t dev)601 mpc85xx_rcpm_attach(device_t dev)
602 {
603 struct mpc85xx_rcpm_softc *sc;
604 int rid;
605
606 sc = device_get_softc(dev);
607 freeze_timebase = mpc85xx_rcpm_freeze_timebase;
608 rcpm_dev = dev;
609
610 rid = 0;
611 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
612 RF_ACTIVE | RF_SHAREABLE);
613
614 return (0);
615 }
616
617 static device_method_t mpc85xx_rcpm_methods[] = {
618 DEVMETHOD(device_probe, mpc85xx_rcpm_probe),
619 DEVMETHOD(device_attach, mpc85xx_rcpm_attach),
620 DEVMETHOD_END
621 };
622
623 static driver_t mpc85xx_rcpm_driver = {
624 "rcpm",
625 mpc85xx_rcpm_methods,
626 sizeof(struct mpc85xx_rcpm_softc)
627 };
628
629 EARLY_DRIVER_MODULE(mpc85xx_rcpm, simplebus, mpc85xx_rcpm_driver, 0, 0,
630 BUS_PASS_BUS);
631
632 /* "Global utilities" power management/Timebase management. */
633
634 #define GUTS_DEVDISR 0x00000070
635 #define DEVDISR_TB0 0x00004000
636 #define DEVDISR_TB1 0x00001000
637
638 struct mpc85xx_guts_softc {
639 struct resource *sc_mem;
640 };
641
642 static void
mpc85xx_guts_freeze_timebase(device_t dev,bool freeze)643 mpc85xx_guts_freeze_timebase(device_t dev, bool freeze)
644 {
645 struct mpc85xx_guts_softc *sc;
646 uint32_t devdisr;
647
648 sc = device_get_softc(dev);
649
650 devdisr = bus_read_4(sc->sc_mem, GUTS_DEVDISR);
651 if (freeze)
652 bus_write_4(sc->sc_mem, GUTS_DEVDISR,
653 devdisr | (DEVDISR_TB0 | DEVDISR_TB1));
654 else
655 bus_write_4(sc->sc_mem, GUTS_DEVDISR,
656 devdisr & ~(DEVDISR_TB0 | DEVDISR_TB1));
657 }
658
659 static int
mpc85xx_guts_probe(device_t dev)660 mpc85xx_guts_probe(device_t dev)
661 {
662 if (!ofw_bus_is_compatible(dev, "fsl,mpc8572-guts") &&
663 !ofw_bus_is_compatible(dev, "fsl,p1020-guts") &&
664 !ofw_bus_is_compatible(dev, "fsl,p1021-guts") &&
665 !ofw_bus_is_compatible(dev, "fsl,p1022-guts") &&
666 !ofw_bus_is_compatible(dev, "fsl,p1023-guts") &&
667 !ofw_bus_is_compatible(dev, "fsl,p2020-guts"))
668 return (ENXIO);
669
670 device_set_desc(dev, "MPC85xx Global Utilities");
671 return (BUS_PROBE_GENERIC);
672 }
673
674 static int
mpc85xx_guts_attach(device_t dev)675 mpc85xx_guts_attach(device_t dev)
676 {
677 struct mpc85xx_rcpm_softc *sc;
678 int rid;
679
680 sc = device_get_softc(dev);
681 freeze_timebase = mpc85xx_guts_freeze_timebase;
682 rcpm_dev = dev;
683
684 rid = 0;
685 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
686 RF_ACTIVE | RF_SHAREABLE);
687
688 return (0);
689 }
690
691 static device_method_t mpc85xx_guts_methods[] = {
692 DEVMETHOD(device_probe, mpc85xx_guts_probe),
693 DEVMETHOD(device_attach, mpc85xx_guts_attach),
694 DEVMETHOD_END
695 };
696
697 static driver_t mpc85xx_guts_driver = {
698 "guts",
699 mpc85xx_guts_methods,
700 sizeof(struct mpc85xx_guts_softc)
701 };
702
703 EARLY_DRIVER_MODULE(mpc85xx_guts, simplebus, mpc85xx_guts_driver, 0, 0,
704 BUS_PASS_BUS);
705