1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2011 The FreeBSD Foundation
5 * Copyright (c) 2013 Ruslan Bukin <br@bsdpad.com>
6 * All rights reserved.
7 *
8 * Based on mpcore_timer.c developed by Ben Gray <ben.r.gray@gmail.com>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 * endorse or promote products derived from this software without specific
20 * prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 /**
36 * Cortex-A7, Cortex-A15, ARMv8 and later Generic Timer
37 */
38
39 #include "opt_acpi.h"
40 #include "opt_platform.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/malloc.h>
48 #include <sys/rman.h>
49 #include <sys/timeet.h>
50 #include <sys/timetc.h>
51 #include <sys/smp.h>
52 #include <sys/vdso.h>
53 #include <sys/watchdog.h>
54
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/intr.h>
58 #include <machine/machdep.h>
59 #include <machine/md_var.h>
60
61 #if defined(__aarch64__)
62 #include <machine/undefined.h>
63 #include <machine/cpufunc.h>
64 #include <machine/cpu_feat.h>
65 #endif
66
67 #ifdef FDT
68 #include <dev/ofw/openfirm.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 #endif
72
73 #ifdef DEV_ACPI
74 #include <contrib/dev/acpica/include/acpi.h>
75 #include <dev/acpica/acpivar.h>
76 #endif
77
78 #define GT_PHYS_SECURE 0
79 #define GT_PHYS_NONSECURE 1
80 #define GT_VIRT 2
81 #define GT_HYP_PHYS 3
82 #define GT_HYP_VIRT 4
83 #define GT_IRQ_COUNT 5
84
85 #define GT_CTRL_ENABLE (1 << 0)
86 #define GT_CTRL_INT_MASK (1 << 1)
87 #define GT_CTRL_INT_STAT (1 << 2)
88 #define GT_REG_CTRL 0
89 #define GT_REG_TVAL 1
90
91 #define GT_CNTKCTL_PL0PTEN (1 << 9) /* PL0 Physical timer reg access */
92 #define GT_CNTKCTL_PL0VTEN (1 << 8) /* PL0 Virtual timer reg access */
93 #define GT_CNTKCTL_EVNTI (0xf << 4) /* Virtual counter event bits */
94 #define GT_CNTKCTL_EVNTDIR (1 << 3) /* Virtual counter event transition */
95 #define GT_CNTKCTL_EVNTEN (1 << 2) /* Enables virtual counter events */
96 #define GT_CNTKCTL_PL0VCTEN (1 << 1) /* PL0 CNTVCT and CNTFRQ access */
97 #define GT_CNTKCTL_PL0PCTEN (1 << 0) /* PL0 CNTPCT and CNTFRQ access */
98
99 #if defined(__aarch64__)
100 static bool __read_mostly enable_wfxt = false;
101 #endif
102
103 struct arm_tmr_softc;
104
105 struct arm_tmr_irq {
106 struct resource *res;
107 void *ihl;
108 int rid;
109 int idx;
110 };
111
112 struct arm_tmr_softc {
113 struct arm_tmr_irq irqs[GT_IRQ_COUNT];
114 uint64_t (*get_cntxct)(bool);
115 uint32_t clkfreq;
116 int irq_count;
117 struct eventtimer et;
118 bool physical_sys;
119 bool physical_user;
120 };
121
122 static struct arm_tmr_softc *arm_tmr_sc = NULL;
123
124 static const struct arm_tmr_irq_defs {
125 int idx;
126 const char *name;
127 int flags;
128 } arm_tmr_irq_defs[] = {
129 {
130 .idx = GT_PHYS_SECURE,
131 .name = "sec-phys",
132 .flags = RF_ACTIVE | RF_OPTIONAL,
133 },
134 {
135 .idx = GT_PHYS_NONSECURE,
136 .name = "phys",
137 .flags = RF_ACTIVE,
138 },
139 {
140 .idx = GT_VIRT,
141 .name = "virt",
142 .flags = RF_ACTIVE,
143 },
144 {
145 .idx = GT_HYP_PHYS,
146 .name = "hyp-phys",
147 .flags = RF_ACTIVE | RF_OPTIONAL,
148 },
149 {
150 .idx = GT_HYP_VIRT,
151 .name = "hyp-virt",
152 .flags = RF_ACTIVE | RF_OPTIONAL,
153 },
154 };
155
156 static int arm_tmr_attach(device_t);
157
158 static uint32_t arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
159 struct timecounter *tc);
160 static void arm_tmr_do_delay(int usec, void *);
161
162 static timecounter_get_t arm_tmr_get_timecount;
163
164 static struct timecounter arm_tmr_timecount = {
165 .tc_name = "ARM MPCore Timecounter",
166 .tc_get_timecount = arm_tmr_get_timecount,
167 .tc_poll_pps = NULL,
168 .tc_counter_mask = ~0u,
169 .tc_frequency = 0,
170 .tc_quality = 1000,
171 .tc_fill_vdso_timehands = arm_tmr_fill_vdso_timehands,
172 };
173
174 #ifdef __arm__
175 #define get_el0(x) cp15_## x ##_get()
176 #define get_el1(x) cp15_## x ##_get()
177 #define set_el0(x, val) cp15_## x ##_set(val)
178 #define set_el1(x, val) cp15_## x ##_set(val)
179 #define HAS_PHYS true
180 #define IN_VHE false
181 #else /* __aarch64__ */
182 #define get_el0(x) READ_SPECIALREG(x ##_el0)
183 #define get_el1(x) READ_SPECIALREG(x ##_el1)
184 #define set_el0(x, val) WRITE_SPECIALREG(x ##_el0, val)
185 #define set_el1(x, val) WRITE_SPECIALREG(x ##_el1, val)
186 #define HAS_PHYS has_hyp()
187 #define IN_VHE in_vhe()
188 #endif
189
190 static int
get_freq(void)191 get_freq(void)
192 {
193 return (get_el0(cntfrq));
194 }
195
196 #ifdef FDT
197 static uint64_t
get_cntxct_a64_unstable(bool physical)198 get_cntxct_a64_unstable(bool physical)
199 {
200 uint64_t val;
201
202 isb();
203 if (physical) {
204 do {
205 val = get_el0(cntpct);
206 }
207 while (((val + 1) & 0x7FF) <= 1);
208 }
209 else {
210 do {
211 val = get_el0(cntvct);
212 }
213 while (((val + 1) & 0x7FF) <= 1);
214 }
215
216 return (val);
217 }
218 #endif
219
220 static uint64_t
get_cntxct(bool physical)221 get_cntxct(bool physical)
222 {
223 uint64_t val;
224
225 isb();
226 if (physical)
227 val = get_el0(cntpct);
228 else
229 val = get_el0(cntvct);
230
231 return (val);
232 }
233
234 static int
set_ctrl(uint32_t val,bool physical)235 set_ctrl(uint32_t val, bool physical)
236 {
237
238 if (physical)
239 set_el0(cntp_ctl, val);
240 else
241 set_el0(cntv_ctl, val);
242 isb();
243
244 return (0);
245 }
246
247 static int
set_tval(uint32_t val,bool physical)248 set_tval(uint32_t val, bool physical)
249 {
250
251 if (physical)
252 set_el0(cntp_tval, val);
253 else
254 set_el0(cntv_tval, val);
255 isb();
256
257 return (0);
258 }
259
260 static int
get_ctrl(bool physical)261 get_ctrl(bool physical)
262 {
263 uint32_t val;
264
265 if (physical)
266 val = get_el0(cntp_ctl);
267 else
268 val = get_el0(cntv_ctl);
269
270 return (val);
271 }
272
273 static void
setup_user_access(void * arg __unused)274 setup_user_access(void *arg __unused)
275 {
276 uint32_t cntkctl;
277
278 cntkctl = get_el1(cntkctl);
279 cntkctl &= ~(GT_CNTKCTL_PL0PTEN | GT_CNTKCTL_PL0VTEN |
280 GT_CNTKCTL_EVNTEN | GT_CNTKCTL_PL0PCTEN);
281 /* Always enable the virtual timer */
282 cntkctl |= GT_CNTKCTL_PL0VCTEN;
283 /* Enable the physical timer if supported */
284 if (arm_tmr_sc->physical_user) {
285 cntkctl |= GT_CNTKCTL_PL0PCTEN;
286 }
287 set_el1(cntkctl, cntkctl);
288 isb();
289 }
290
291 #ifdef __aarch64__
292 static bool
cntpct_handler(uint64_t esr,struct trapframe * frame)293 cntpct_handler(uint64_t esr, struct trapframe *frame)
294 {
295 uint64_t val;
296 int reg;
297
298 if (ESR_ELx_EXCEPTION(esr) != EXCP_MSR)
299 return (false);
300
301 if ((esr & ISS_MSR_DIR) == 0)
302 return (false);
303
304 if ((esr & ISS_MSR_REG_MASK) != CNTPCT_EL0_ISS)
305 return (false);
306
307 reg = ISS_MSR_Rt(esr);
308 val = READ_SPECIALREG(cntvct_el0);
309 if (reg < nitems(frame->tf_x)) {
310 frame->tf_x[reg] = val;
311 } else if (reg == 30) {
312 frame->tf_lr = val;
313 }
314
315 /*
316 * We will handle this instruction, move to the next so we
317 * don't trap here again.
318 */
319 frame->tf_elr += INSN_SIZE;
320
321 return (true);
322 }
323 #endif
324
325 static void
tmr_setup_user_access(void * arg __unused)326 tmr_setup_user_access(void *arg __unused)
327 {
328 #ifdef __aarch64__
329 int emulate;
330 #endif
331
332 if (arm_tmr_sc != NULL) {
333 smp_rendezvous(NULL, setup_user_access, NULL, NULL);
334 #ifdef __aarch64__
335 if (TUNABLE_INT_FETCH("hw.emulate_phys_counter", &emulate) &&
336 emulate != 0) {
337 install_sys_handler(cntpct_handler);
338 }
339 #endif
340 }
341 }
342 SYSINIT(tmr_ua, SI_SUB_SMP, SI_ORDER_ANY, tmr_setup_user_access, NULL);
343
344 static unsigned
arm_tmr_get_timecount(struct timecounter * tc)345 arm_tmr_get_timecount(struct timecounter *tc)
346 {
347
348 return (arm_tmr_sc->get_cntxct(arm_tmr_sc->physical_sys));
349 }
350
351 static int
arm_tmr_start(struct eventtimer * et,sbintime_t first,sbintime_t period __unused)352 arm_tmr_start(struct eventtimer *et, sbintime_t first,
353 sbintime_t period __unused)
354 {
355 struct arm_tmr_softc *sc;
356 int counts, ctrl;
357
358 sc = (struct arm_tmr_softc *)et->et_priv;
359
360 if (first != 0) {
361 counts = ((uint32_t)et->et_frequency * first) >> 32;
362 ctrl = get_ctrl(sc->physical_sys);
363 ctrl &= ~GT_CTRL_INT_MASK;
364 ctrl |= GT_CTRL_ENABLE;
365 set_tval(counts, sc->physical_sys);
366 set_ctrl(ctrl, sc->physical_sys);
367 return (0);
368 }
369
370 return (EINVAL);
371
372 }
373
374 static void
arm_tmr_disable(bool physical)375 arm_tmr_disable(bool physical)
376 {
377 int ctrl;
378
379 ctrl = get_ctrl(physical);
380 ctrl &= ~GT_CTRL_ENABLE;
381 set_ctrl(ctrl, physical);
382 }
383
384 static int
arm_tmr_stop(struct eventtimer * et)385 arm_tmr_stop(struct eventtimer *et)
386 {
387 struct arm_tmr_softc *sc;
388
389 sc = (struct arm_tmr_softc *)et->et_priv;
390 arm_tmr_disable(sc->physical_sys);
391
392 return (0);
393 }
394
395 static int
arm_tmr_intr(void * arg)396 arm_tmr_intr(void *arg)
397 {
398 struct arm_tmr_softc *sc;
399 int ctrl;
400
401 sc = (struct arm_tmr_softc *)arg;
402 ctrl = get_ctrl(sc->physical_sys);
403 if (ctrl & GT_CTRL_INT_STAT) {
404 ctrl |= GT_CTRL_INT_MASK;
405 set_ctrl(ctrl, sc->physical_sys);
406 }
407
408 if (sc->et.et_active)
409 sc->et.et_event_cb(&sc->et, sc->et.et_arg);
410
411 return (FILTER_HANDLED);
412 }
413
414 static int
arm_tmr_attach_irq(device_t dev,struct arm_tmr_softc * sc,const struct arm_tmr_irq_defs * irq_def,int rid,int flags)415 arm_tmr_attach_irq(device_t dev, struct arm_tmr_softc *sc,
416 const struct arm_tmr_irq_defs *irq_def, int rid, int flags)
417 {
418 struct arm_tmr_irq *irq;
419
420 irq = &sc->irqs[sc->irq_count];
421 irq->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
422 &rid, flags);
423 if (irq->res == NULL) {
424 if (bootverbose || (flags & RF_OPTIONAL) == 0) {
425 device_printf(dev,
426 "could not allocate irq for %s interrupt '%s'\n",
427 (flags & RF_OPTIONAL) != 0 ? "optional" :
428 "required", irq_def->name);
429 }
430
431 if ((flags & RF_OPTIONAL) == 0)
432 return (ENXIO);
433 } else {
434 if (bootverbose)
435 device_printf(dev, "allocated irq for '%s'\n",
436 irq_def->name);
437 irq->rid = rid;
438 irq->idx = irq_def->idx;
439 sc->irq_count++;
440 }
441
442 return (0);
443 }
444
445 #ifdef FDT
446 static int
arm_tmr_fdt_probe(device_t dev)447 arm_tmr_fdt_probe(device_t dev)
448 {
449
450 if (!ofw_bus_status_okay(dev))
451 return (ENXIO);
452
453 if (ofw_bus_is_compatible(dev, "arm,armv8-timer")) {
454 device_set_desc(dev, "ARMv8 Generic Timer");
455 return (BUS_PROBE_DEFAULT);
456 } else if (ofw_bus_is_compatible(dev, "arm,armv7-timer")) {
457 device_set_desc(dev, "ARMv7 Generic Timer");
458 return (BUS_PROBE_DEFAULT);
459 }
460
461 return (ENXIO);
462 }
463
464 static int
arm_tmr_fdt_attach(device_t dev)465 arm_tmr_fdt_attach(device_t dev)
466 {
467 struct arm_tmr_softc *sc;
468 const struct arm_tmr_irq_defs *irq_def;
469 size_t i;
470 phandle_t node;
471 int error, rid;
472 bool has_names;
473
474 sc = device_get_softc(dev);
475 node = ofw_bus_get_node(dev);
476
477 has_names = OF_hasprop(node, "interrupt-names");
478 for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
479 int flags;
480
481 /*
482 * If we don't have names to go off of, we assume that they're
483 * in the "usual" order with sec-phys first and allocate by idx.
484 */
485 irq_def = &arm_tmr_irq_defs[i];
486 rid = irq_def->idx;
487 flags = irq_def->flags;
488 if (has_names) {
489 error = ofw_bus_find_string_index(node,
490 "interrupt-names", irq_def->name, &rid);
491
492 /*
493 * If we have names, missing a name means we don't
494 * have it.
495 */
496 if (error != 0) {
497 /*
498 * Could be noisy on a lot of platforms for no
499 * good cause.
500 */
501 if (bootverbose || (flags & RF_OPTIONAL) == 0) {
502 device_printf(dev,
503 "could not find irq for %s interrupt '%s'\n",
504 (flags & RF_OPTIONAL) != 0 ?
505 "optional" : "required",
506 irq_def->name);
507 }
508
509 if ((flags & RF_OPTIONAL) == 0)
510 goto out;
511
512 continue;
513 }
514
515 /*
516 * Warn about failing to activate if we did actually
517 * have the name present.
518 */
519 flags &= ~RF_OPTIONAL;
520 }
521
522 error = arm_tmr_attach_irq(dev, sc, irq_def, rid, flags);
523 if (error != 0)
524 goto out;
525 }
526
527 error = arm_tmr_attach(dev);
528 out:
529 if (error != 0) {
530 for (i = 0; i < sc->irq_count; i++) {
531 bus_release_resource(dev, SYS_RES_IRQ, sc->irqs[i].rid,
532 sc->irqs[i].res);
533 }
534 }
535
536 return (error);
537
538 }
539 #endif
540
541 #ifdef DEV_ACPI
542 static void
arm_tmr_acpi_add_irq(device_t parent,device_t dev,int rid,u_int irq)543 arm_tmr_acpi_add_irq(device_t parent, device_t dev, int rid, u_int irq)
544 {
545
546 BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, rid, irq, 1);
547 }
548
549 static void
arm_tmr_acpi_identify(driver_t * driver,device_t parent)550 arm_tmr_acpi_identify(driver_t *driver, device_t parent)
551 {
552 ACPI_TABLE_GTDT *gtdt;
553 vm_paddr_t physaddr;
554 device_t dev;
555
556 physaddr = acpi_find_table(ACPI_SIG_GTDT);
557 if (physaddr == 0)
558 return;
559
560 gtdt = acpi_map_table(physaddr, ACPI_SIG_GTDT);
561 if (gtdt == NULL) {
562 device_printf(parent, "gic: Unable to map the GTDT\n");
563 return;
564 }
565
566 dev = BUS_ADD_CHILD(parent, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE,
567 "generic_timer", -1);
568 if (dev == NULL) {
569 device_printf(parent, "add gic child failed\n");
570 goto out;
571 }
572
573 arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_SECURE,
574 gtdt->SecureEl1Interrupt);
575 arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_NONSECURE,
576 gtdt->NonSecureEl1Interrupt);
577 arm_tmr_acpi_add_irq(parent, dev, GT_VIRT,
578 gtdt->VirtualTimerInterrupt);
579 arm_tmr_acpi_add_irq(parent, dev, GT_HYP_PHYS,
580 gtdt->NonSecureEl2Interrupt);
581
582 out:
583 acpi_unmap_table(gtdt);
584 }
585
586 static int
arm_tmr_acpi_probe(device_t dev)587 arm_tmr_acpi_probe(device_t dev)
588 {
589
590 device_set_desc(dev, "ARM Generic Timer");
591 return (BUS_PROBE_NOWILDCARD);
592 }
593
594 static int
arm_tmr_acpi_attach(device_t dev)595 arm_tmr_acpi_attach(device_t dev)
596 {
597 const struct arm_tmr_irq_defs *irq_def;
598 struct arm_tmr_softc *sc;
599 int error;
600
601 sc = device_get_softc(dev);
602 for (int i = 0; i < nitems(arm_tmr_irq_defs); i++) {
603 irq_def = &arm_tmr_irq_defs[i];
604 error = arm_tmr_attach_irq(dev, sc, irq_def, irq_def->idx,
605 irq_def->flags);
606 if (error != 0)
607 goto out;
608 }
609
610 error = arm_tmr_attach(dev);
611 out:
612 if (error != 0) {
613 for (int i = 0; i < sc->irq_count; i++) {
614 bus_release_resource(dev, SYS_RES_IRQ,
615 sc->irqs[i].rid, sc->irqs[i].res);
616 }
617 }
618 return (error);
619 }
620 #endif
621
622 static int
arm_tmr_attach(device_t dev)623 arm_tmr_attach(device_t dev)
624 {
625 struct arm_tmr_softc *sc;
626 #ifdef INVARIANTS
627 const struct arm_tmr_irq_defs *irq_def;
628 #endif
629 #ifdef FDT
630 phandle_t node;
631 pcell_t clock;
632 #endif
633 #ifdef __aarch64__
634 int user_phys;
635 #endif
636 int error;
637 int i, first_timer, last_timer;
638
639 sc = device_get_softc(dev);
640 if (arm_tmr_sc)
641 return (ENXIO);
642
643 sc->get_cntxct = &get_cntxct;
644 #ifdef FDT
645 /* Get the base clock frequency */
646 node = ofw_bus_get_node(dev);
647 if (node > 0) {
648 error = OF_getencprop(node, "clock-frequency", &clock,
649 sizeof(clock));
650 if (error > 0)
651 sc->clkfreq = clock;
652
653 if (OF_hasprop(node, "allwinner,sun50i-a64-unstable-timer")) {
654 sc->get_cntxct = &get_cntxct_a64_unstable;
655 if (bootverbose)
656 device_printf(dev,
657 "Enabling allwinner unstable timer workaround\n");
658 }
659 }
660 #endif
661
662 if (sc->clkfreq == 0) {
663 /* Try to get clock frequency from timer */
664 sc->clkfreq = get_freq();
665 }
666
667 if (sc->clkfreq == 0) {
668 device_printf(dev, "No clock frequency specified\n");
669 return (ENXIO);
670 }
671
672 #ifdef INVARIANTS
673 /* Confirm that non-optional irqs were allocated before coming in. */
674 for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
675 int j;
676
677 irq_def = &arm_tmr_irq_defs[i];
678
679 /* Skip optional interrupts */
680 if ((irq_def->flags & RF_OPTIONAL) != 0)
681 continue;
682
683 for (j = 0; j < sc->irq_count; j++) {
684 if (sc->irqs[j].idx == irq_def->idx)
685 break;
686 }
687 KASSERT(j < sc->irq_count, ("%s: Missing required interrupt %s",
688 __func__, irq_def->name));
689 }
690 #endif
691
692 #ifdef __aarch64__
693 if (IN_VHE) {
694 /*
695 * The kernel is running at EL2. The EL0 timer registers are
696 * re-mapped to the EL2 version. Because of this we need to
697 * use the EL2 interrupt.
698 */
699 sc->physical_sys = true;
700 first_timer = GT_HYP_PHYS;
701 last_timer = GT_HYP_PHYS;
702 } else if (!HAS_PHYS) {
703 /*
704 * Use the virtual timer when we can't use the hypervisor.
705 * A hypervisor guest may change the virtual timer registers
706 * while executing so any use of the virtual timer interrupt
707 * needs to be coordinated with the virtual machine manager.
708 */
709 sc->physical_sys = false;
710 first_timer = GT_VIRT;
711 last_timer = GT_VIRT;
712 } else
713 #endif
714 /* Otherwise set up the secure and non-secure physical timers. */
715 {
716 sc->physical_sys = true;
717 first_timer = GT_PHYS_SECURE;
718 last_timer = GT_PHYS_NONSECURE;
719 }
720
721 #ifdef __aarch64__
722 /*
723 * The virtual timer is always available on arm and arm64, tell
724 * userspace to use it.
725 */
726 sc->physical_user = false;
727 /* Allow use of the physical counter in userspace when available */
728 if (TUNABLE_INT_FETCH("hw.userspace_allow_phys_counter", &user_phys) &&
729 user_phys != 0)
730 sc->physical_user = sc->physical_sys;
731 #else
732 /*
733 * The virtual timer depends on setting cntvoff from the hypervisor
734 * privilege level/el2, however this is only set on arm64.
735 */
736 sc->physical_user = true;
737 #endif
738
739 arm_tmr_sc = sc;
740
741 /* Setup secure, non-secure and virtual IRQs handler */
742 for (i = 0; i < sc->irq_count; i++) {
743 /* Only enable IRQs on timers we expect to use */
744 if (sc->irqs[i].idx < first_timer ||
745 sc->irqs[i].idx > last_timer)
746 continue;
747 error = bus_setup_intr(dev, sc->irqs[i].res, INTR_TYPE_CLK,
748 arm_tmr_intr, NULL, sc, &sc->irqs[i].ihl);
749 if (error) {
750 device_printf(dev, "Unable to alloc int resource.\n");
751 for (int j = 0; j < i; j++)
752 bus_teardown_intr(dev, sc->irqs[j].res,
753 &sc->irqs[j].ihl);
754 return (ENXIO);
755 }
756 }
757
758 /* Disable the timers until we are ready */
759 arm_tmr_disable(false);
760 if (HAS_PHYS)
761 arm_tmr_disable(true);
762
763 arm_tmr_timecount.tc_frequency = sc->clkfreq;
764 tc_init(&arm_tmr_timecount);
765
766 sc->et.et_name = "ARM MPCore Eventtimer";
767 sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
768 sc->et.et_quality = 1000;
769
770 sc->et.et_frequency = sc->clkfreq;
771 sc->et.et_min_period = (0x00000010LLU << 32) / sc->et.et_frequency;
772 sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency;
773 sc->et.et_start = arm_tmr_start;
774 sc->et.et_stop = arm_tmr_stop;
775 sc->et.et_priv = sc;
776 et_register(&sc->et);
777
778 #if defined(__arm__)
779 arm_set_delay(arm_tmr_do_delay, sc);
780 #endif
781
782 return (0);
783 }
784
785 #ifdef FDT
786 static device_method_t arm_tmr_fdt_methods[] = {
787 DEVMETHOD(device_probe, arm_tmr_fdt_probe),
788 DEVMETHOD(device_attach, arm_tmr_fdt_attach),
789 { 0, 0 }
790 };
791
792 static DEFINE_CLASS_0(generic_timer, arm_tmr_fdt_driver, arm_tmr_fdt_methods,
793 sizeof(struct arm_tmr_softc));
794
795 EARLY_DRIVER_MODULE(timer, simplebus, arm_tmr_fdt_driver, 0, 0,
796 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
797 EARLY_DRIVER_MODULE(timer, ofwbus, arm_tmr_fdt_driver, 0, 0,
798 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
799 #endif
800
801 #ifdef DEV_ACPI
802 static device_method_t arm_tmr_acpi_methods[] = {
803 DEVMETHOD(device_identify, arm_tmr_acpi_identify),
804 DEVMETHOD(device_probe, arm_tmr_acpi_probe),
805 DEVMETHOD(device_attach, arm_tmr_acpi_attach),
806 { 0, 0 }
807 };
808
809 static DEFINE_CLASS_0(generic_timer, arm_tmr_acpi_driver, arm_tmr_acpi_methods,
810 sizeof(struct arm_tmr_softc));
811
812 EARLY_DRIVER_MODULE(timer, acpi, arm_tmr_acpi_driver, 0, 0,
813 BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
814 #endif
815
816 static int64_t
arm_tmr_get_counts(int usec)817 arm_tmr_get_counts(int usec)
818 {
819 int64_t counts, counts_per_usec;
820
821 /* Get the number of times to count */
822 counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1);
823
824 /*
825 * Clamp the timeout at a maximum value (about 32 seconds with
826 * a 66MHz clock). *Nobody* should be delay()ing for anywhere
827 * near that length of time and if they are, they should be hung
828 * out to dry.
829 */
830 if (usec >= (0x80000000U / counts_per_usec))
831 counts = (0x80000000U / counts_per_usec) - 1;
832 else
833 counts = usec * counts_per_usec;
834
835 return counts;
836 }
837
838 static void
arm_tmr_do_delay(int usec,void * arg)839 arm_tmr_do_delay(int usec, void *arg)
840 {
841 struct arm_tmr_softc *sc = arg;
842 int64_t counts;
843 uint64_t first;
844 #if defined(__aarch64__)
845 int64_t end;
846 #endif
847
848 counts = arm_tmr_get_counts(usec);
849 first = sc->get_cntxct(sc->physical_sys);
850 #if defined(__aarch64__)
851 end = first + counts;
852 #endif
853
854 while ((sc->get_cntxct(sc->physical_sys) - first) < counts) {
855 #if defined(__aarch64__)
856 if (enable_wfxt)
857 wfet(end);
858 #endif
859 }
860 }
861
862 #if defined(__aarch64__)
863 void
DELAY(int usec)864 DELAY(int usec)
865 {
866 int32_t counts;
867
868 TSENTER();
869 /*
870 * We have two options for a delay: using the timer, or using the wfet
871 * instruction. However, both of these are dependent on timers being
872 * setup, and if they're not just use a loop for the meantime.
873 */
874 if (arm_tmr_sc != NULL) {
875 arm_tmr_do_delay(usec, arm_tmr_sc);
876 } else {
877 for (; usec > 0; usec--)
878 for (counts = 200; counts > 0; counts--)
879 /* Prevent the compiler from optimizing out the loop */
880 cpufunc_nullop();
881 }
882 TSEXIT();
883 }
884
885 static bool
wfxt_check(const struct cpu_feat * feat __unused,u_int midr __unused)886 wfxt_check(const struct cpu_feat *feat __unused, u_int midr __unused)
887 {
888 uint64_t id_aa64isar2;
889
890 if (!get_kernel_reg(ID_AA64ISAR2_EL1, &id_aa64isar2))
891 return (false);
892 return (ID_AA64ISAR2_WFxT_VAL(id_aa64isar2) != ID_AA64ISAR2_WFxT_NONE);
893 }
894
895 static void
wfxt_enable(const struct cpu_feat * feat __unused,cpu_feat_errata errata_status __unused,u_int * errata_list __unused,u_int errata_count __unused)896 wfxt_enable(const struct cpu_feat *feat __unused,
897 cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
898 u_int errata_count __unused)
899 {
900 /* will be called if wfxt_check returns true */
901 enable_wfxt = true;
902 }
903
904 static struct cpu_feat feat_wfxt = {
905 .feat_name = "FEAT_WFXT",
906 .feat_check = wfxt_check,
907 .feat_enable = wfxt_enable,
908 .feat_flags = CPU_FEAT_AFTER_DEV | CPU_FEAT_SYSTEM,
909 };
910 DATA_SET(cpu_feat_set, feat_wfxt);
911 #endif
912
913 static uint32_t
arm_tmr_fill_vdso_timehands(struct vdso_timehands * vdso_th,struct timecounter * tc)914 arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
915 struct timecounter *tc)
916 {
917
918 vdso_th->th_algo = VDSO_TH_ALGO_ARM_GENTIM;
919 vdso_th->th_physical = arm_tmr_sc->physical_user;
920 bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
921 return (1);
922 }
923