xref: /freebsd/sys/arm/arm/generic_timer.c (revision 41dfdf04e2b1e39634603370299b15634a17e4a7)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 The FreeBSD Foundation
5  * Copyright (c) 2013 Ruslan Bukin <br@bsdpad.com>
6  * All rights reserved.
7  *
8  * Based on mpcore_timer.c developed by Ben Gray <ben.r.gray@gmail.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the company nor the name of the author may be used to
19  *    endorse or promote products derived from this software without specific
20  *    prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /**
36  *      Cortex-A7, Cortex-A15, ARMv8 and later Generic Timer
37  */
38 
39 #include "opt_acpi.h"
40 #include "opt_platform.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/malloc.h>
48 #include <sys/rman.h>
49 #include <sys/timeet.h>
50 #include <sys/timetc.h>
51 #include <sys/smp.h>
52 #include <sys/vdso.h>
53 #include <sys/watchdog.h>
54 
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/intr.h>
58 #include <machine/machdep.h>
59 #include <machine/md_var.h>
60 
61 #if defined(__aarch64__)
62 #include <machine/undefined.h>
63 #endif
64 
65 #ifdef FDT
66 #include <dev/ofw/openfirm.h>
67 #include <dev/ofw/ofw_bus.h>
68 #include <dev/ofw/ofw_bus_subr.h>
69 #endif
70 
71 #ifdef DEV_ACPI
72 #include <contrib/dev/acpica/include/acpi.h>
73 #include <dev/acpica/acpivar.h>
74 #endif
75 
76 #define	GT_PHYS_SECURE		0
77 #define	GT_PHYS_NONSECURE	1
78 #define	GT_VIRT			2
79 #define	GT_HYP_PHYS		3
80 #define	GT_HYP_VIRT		4
81 #define	GT_IRQ_COUNT		5
82 
83 #define	GT_CTRL_ENABLE		(1 << 0)
84 #define	GT_CTRL_INT_MASK	(1 << 1)
85 #define	GT_CTRL_INT_STAT	(1 << 2)
86 #define	GT_REG_CTRL		0
87 #define	GT_REG_TVAL		1
88 
89 #define	GT_CNTKCTL_PL0PTEN	(1 << 9) /* PL0 Physical timer reg access */
90 #define	GT_CNTKCTL_PL0VTEN	(1 << 8) /* PL0 Virtual timer reg access */
91 #define	GT_CNTKCTL_EVNTI	(0xf << 4) /* Virtual counter event bits */
92 #define	GT_CNTKCTL_EVNTDIR	(1 << 3) /* Virtual counter event transition */
93 #define	GT_CNTKCTL_EVNTEN	(1 << 2) /* Enables virtual counter events */
94 #define	GT_CNTKCTL_PL0VCTEN	(1 << 1) /* PL0 CNTVCT and CNTFRQ access */
95 #define	GT_CNTKCTL_PL0PCTEN	(1 << 0) /* PL0 CNTPCT and CNTFRQ access */
96 
97 struct arm_tmr_softc;
98 
99 struct arm_tmr_irq {
100 	struct resource	*res;
101 	void		*ihl;
102 	int		 rid;
103 	int		 idx;
104 };
105 
106 struct arm_tmr_softc {
107 	struct arm_tmr_irq	irqs[GT_IRQ_COUNT];
108 	uint64_t		(*get_cntxct)(bool);
109 	uint32_t		clkfreq;
110 	int			irq_count;
111 	struct eventtimer	et;
112 	bool			physical_sys;
113 	bool			physical_user;
114 };
115 
116 static struct arm_tmr_softc *arm_tmr_sc = NULL;
117 
118 static const struct arm_tmr_irq_defs {
119 	int idx;
120 	const char *name;
121 	int flags;
122 } arm_tmr_irq_defs[] = {
123 	{
124 		.idx = GT_PHYS_SECURE,
125 		.name = "sec-phys",
126 		.flags = RF_ACTIVE | RF_OPTIONAL,
127 	},
128 	{
129 		.idx = GT_PHYS_NONSECURE,
130 		.name = "phys",
131 		.flags = RF_ACTIVE,
132 	},
133 	{
134 		.idx = GT_VIRT,
135 		.name = "virt",
136 		.flags = RF_ACTIVE,
137 	},
138 	{
139 		.idx = GT_HYP_PHYS,
140 		.name = "hyp-phys",
141 		.flags = RF_ACTIVE | RF_OPTIONAL,
142 	},
143 	{
144 		.idx = GT_HYP_VIRT,
145 		.name = "hyp-virt",
146 		.flags = RF_ACTIVE | RF_OPTIONAL,
147 	},
148 };
149 
150 static int arm_tmr_attach(device_t);
151 
152 static uint32_t arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
153     struct timecounter *tc);
154 static void arm_tmr_do_delay(int usec, void *);
155 
156 static timecounter_get_t arm_tmr_get_timecount;
157 
158 static struct timecounter arm_tmr_timecount = {
159 	.tc_name           = "ARM MPCore Timecounter",
160 	.tc_get_timecount  = arm_tmr_get_timecount,
161 	.tc_poll_pps       = NULL,
162 	.tc_counter_mask   = ~0u,
163 	.tc_frequency      = 0,
164 	.tc_quality        = 1000,
165 	.tc_fill_vdso_timehands = arm_tmr_fill_vdso_timehands,
166 };
167 
168 #ifdef __arm__
169 #define	get_el0(x)	cp15_## x ##_get()
170 #define	get_el1(x)	cp15_## x ##_get()
171 #define	set_el0(x, val)	cp15_## x ##_set(val)
172 #define	set_el1(x, val)	cp15_## x ##_set(val)
173 #define	HAS_PHYS	true
174 #define	IN_VHE		false
175 #else /* __aarch64__ */
176 #define	get_el0(x)	READ_SPECIALREG(x ##_el0)
177 #define	get_el1(x)	READ_SPECIALREG(x ##_el1)
178 #define	set_el0(x, val)	WRITE_SPECIALREG(x ##_el0, val)
179 #define	set_el1(x, val)	WRITE_SPECIALREG(x ##_el1, val)
180 #define	HAS_PHYS	has_hyp()
181 #define	IN_VHE		in_vhe()
182 #endif
183 
184 static int
get_freq(void)185 get_freq(void)
186 {
187 	return (get_el0(cntfrq));
188 }
189 
190 #ifdef FDT
191 static uint64_t
get_cntxct_a64_unstable(bool physical)192 get_cntxct_a64_unstable(bool physical)
193 {
194 	uint64_t val;
195 
196 	isb();
197 	if (physical) {
198 		do {
199 			val = get_el0(cntpct);
200 		}
201 		while (((val + 1) & 0x7FF) <= 1);
202 	}
203 	else {
204 		do {
205 			val = get_el0(cntvct);
206 		}
207 		while (((val + 1) & 0x7FF) <= 1);
208 	}
209 
210 	return (val);
211 }
212 #endif
213 
214 static uint64_t
get_cntxct(bool physical)215 get_cntxct(bool physical)
216 {
217 	uint64_t val;
218 
219 	isb();
220 	if (physical)
221 		val = get_el0(cntpct);
222 	else
223 		val = get_el0(cntvct);
224 
225 	return (val);
226 }
227 
228 static int
set_ctrl(uint32_t val,bool physical)229 set_ctrl(uint32_t val, bool physical)
230 {
231 
232 	if (physical)
233 		set_el0(cntp_ctl, val);
234 	else
235 		set_el0(cntv_ctl, val);
236 	isb();
237 
238 	return (0);
239 }
240 
241 static int
set_tval(uint32_t val,bool physical)242 set_tval(uint32_t val, bool physical)
243 {
244 
245 	if (physical)
246 		set_el0(cntp_tval, val);
247 	else
248 		set_el0(cntv_tval, val);
249 	isb();
250 
251 	return (0);
252 }
253 
254 static int
get_ctrl(bool physical)255 get_ctrl(bool physical)
256 {
257 	uint32_t val;
258 
259 	if (physical)
260 		val = get_el0(cntp_ctl);
261 	else
262 		val = get_el0(cntv_ctl);
263 
264 	return (val);
265 }
266 
267 static void
setup_user_access(void * arg __unused)268 setup_user_access(void *arg __unused)
269 {
270 	uint32_t cntkctl;
271 
272 	cntkctl = get_el1(cntkctl);
273 	cntkctl &= ~(GT_CNTKCTL_PL0PTEN | GT_CNTKCTL_PL0VTEN |
274 	    GT_CNTKCTL_EVNTEN | GT_CNTKCTL_PL0PCTEN);
275 	/* Always enable the virtual timer */
276 	cntkctl |= GT_CNTKCTL_PL0VCTEN;
277 	/* Enable the physical timer if supported */
278 	if (arm_tmr_sc->physical_user) {
279 		cntkctl |= GT_CNTKCTL_PL0PCTEN;
280 	}
281 	set_el1(cntkctl, cntkctl);
282 	isb();
283 }
284 
285 #ifdef __aarch64__
286 static int
cntpct_handler(vm_offset_t va,uint32_t insn,struct trapframe * frame,uint32_t esr)287 cntpct_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
288     uint32_t esr)
289 {
290 	uint64_t val;
291 	int reg;
292 
293 	if ((insn & MRS_MASK) != MRS_VALUE)
294 		return (0);
295 
296 	if (MRS_SPECIAL(insn) != MRS_SPECIAL(CNTPCT_EL0))
297 		return (0);
298 
299 	reg = MRS_REGISTER(insn);
300 	val = READ_SPECIALREG(cntvct_el0);
301 	if (reg < nitems(frame->tf_x)) {
302 		frame->tf_x[reg] = val;
303 	} else if (reg == 30) {
304 		frame->tf_lr = val;
305 	}
306 
307 	/*
308 	 * We will handle this instruction, move to the next so we
309 	 * don't trap here again.
310 	 */
311 	frame->tf_elr += INSN_SIZE;
312 
313 	return (1);
314 }
315 #endif
316 
317 static void
tmr_setup_user_access(void * arg __unused)318 tmr_setup_user_access(void *arg __unused)
319 {
320 #ifdef __aarch64__
321 	int emulate;
322 #endif
323 
324 	if (arm_tmr_sc != NULL) {
325 		smp_rendezvous(NULL, setup_user_access, NULL, NULL);
326 #ifdef __aarch64__
327 		if (TUNABLE_INT_FETCH("hw.emulate_phys_counter", &emulate) &&
328 		    emulate != 0) {
329 			install_undef_handler(true, cntpct_handler);
330 		}
331 #endif
332 	}
333 }
334 SYSINIT(tmr_ua, SI_SUB_SMP, SI_ORDER_ANY, tmr_setup_user_access, NULL);
335 
336 static unsigned
arm_tmr_get_timecount(struct timecounter * tc)337 arm_tmr_get_timecount(struct timecounter *tc)
338 {
339 
340 	return (arm_tmr_sc->get_cntxct(arm_tmr_sc->physical_sys));
341 }
342 
343 static int
arm_tmr_start(struct eventtimer * et,sbintime_t first,sbintime_t period __unused)344 arm_tmr_start(struct eventtimer *et, sbintime_t first,
345     sbintime_t period __unused)
346 {
347 	struct arm_tmr_softc *sc;
348 	int counts, ctrl;
349 
350 	sc = (struct arm_tmr_softc *)et->et_priv;
351 
352 	if (first != 0) {
353 		counts = ((uint32_t)et->et_frequency * first) >> 32;
354 		ctrl = get_ctrl(sc->physical_sys);
355 		ctrl &= ~GT_CTRL_INT_MASK;
356 		ctrl |= GT_CTRL_ENABLE;
357 		set_tval(counts, sc->physical_sys);
358 		set_ctrl(ctrl, sc->physical_sys);
359 		return (0);
360 	}
361 
362 	return (EINVAL);
363 
364 }
365 
366 static void
arm_tmr_disable(bool physical)367 arm_tmr_disable(bool physical)
368 {
369 	int ctrl;
370 
371 	ctrl = get_ctrl(physical);
372 	ctrl &= ~GT_CTRL_ENABLE;
373 	set_ctrl(ctrl, physical);
374 }
375 
376 static int
arm_tmr_stop(struct eventtimer * et)377 arm_tmr_stop(struct eventtimer *et)
378 {
379 	struct arm_tmr_softc *sc;
380 
381 	sc = (struct arm_tmr_softc *)et->et_priv;
382 	arm_tmr_disable(sc->physical_sys);
383 
384 	return (0);
385 }
386 
387 static int
arm_tmr_intr(void * arg)388 arm_tmr_intr(void *arg)
389 {
390 	struct arm_tmr_softc *sc;
391 	int ctrl;
392 
393 	sc = (struct arm_tmr_softc *)arg;
394 	ctrl = get_ctrl(sc->physical_sys);
395 	if (ctrl & GT_CTRL_INT_STAT) {
396 		ctrl |= GT_CTRL_INT_MASK;
397 		set_ctrl(ctrl, sc->physical_sys);
398 	}
399 
400 	if (sc->et.et_active)
401 		sc->et.et_event_cb(&sc->et, sc->et.et_arg);
402 
403 	return (FILTER_HANDLED);
404 }
405 
406 static int
arm_tmr_attach_irq(device_t dev,struct arm_tmr_softc * sc,const struct arm_tmr_irq_defs * irq_def,int rid,int flags)407 arm_tmr_attach_irq(device_t dev, struct arm_tmr_softc *sc,
408     const struct arm_tmr_irq_defs *irq_def, int rid, int flags)
409 {
410 	struct arm_tmr_irq *irq;
411 
412 	irq = &sc->irqs[sc->irq_count];
413 	irq->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
414 	    &rid, flags);
415 	if (irq->res == NULL) {
416 		if (bootverbose || (flags & RF_OPTIONAL) == 0) {
417 			device_printf(dev,
418 			    "could not allocate irq for %s interrupt '%s'\n",
419 			    (flags & RF_OPTIONAL) != 0 ? "optional" :
420 			    "required", irq_def->name);
421 		}
422 
423 		if ((flags & RF_OPTIONAL) == 0)
424 			return (ENXIO);
425 	} else {
426 		if (bootverbose)
427 			device_printf(dev, "allocated irq for '%s'\n",
428 			    irq_def->name);
429 		irq->rid = rid;
430 		irq->idx = irq_def->idx;
431 		sc->irq_count++;
432 	}
433 
434 	return (0);
435 }
436 
437 #ifdef FDT
438 static int
arm_tmr_fdt_probe(device_t dev)439 arm_tmr_fdt_probe(device_t dev)
440 {
441 
442 	if (!ofw_bus_status_okay(dev))
443 		return (ENXIO);
444 
445 	if (ofw_bus_is_compatible(dev, "arm,armv8-timer")) {
446 		device_set_desc(dev, "ARMv8 Generic Timer");
447 		return (BUS_PROBE_DEFAULT);
448 	} else if (ofw_bus_is_compatible(dev, "arm,armv7-timer")) {
449 		device_set_desc(dev, "ARMv7 Generic Timer");
450 		return (BUS_PROBE_DEFAULT);
451 	}
452 
453 	return (ENXIO);
454 }
455 
456 static int
arm_tmr_fdt_attach(device_t dev)457 arm_tmr_fdt_attach(device_t dev)
458 {
459 	struct arm_tmr_softc *sc;
460 	const struct arm_tmr_irq_defs *irq_def;
461 	size_t i;
462 	phandle_t node;
463 	int error, rid;
464 	bool has_names;
465 
466 	sc = device_get_softc(dev);
467 	node = ofw_bus_get_node(dev);
468 
469 	has_names = OF_hasprop(node, "interrupt-names");
470 	for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
471 		int flags;
472 
473 		/*
474 		 * If we don't have names to go off of, we assume that they're
475 		 * in the "usual" order with sec-phys first and allocate by idx.
476 		 */
477 		irq_def = &arm_tmr_irq_defs[i];
478 		rid = irq_def->idx;
479 		flags = irq_def->flags;
480 		if (has_names) {
481 			error = ofw_bus_find_string_index(node,
482 			    "interrupt-names", irq_def->name, &rid);
483 
484 			/*
485 			 * If we have names, missing a name means we don't
486 			 * have it.
487 			 */
488 			if (error != 0) {
489 				/*
490 				 * Could be noisy on a lot of platforms for no
491 				 * good cause.
492 				 */
493 				if (bootverbose || (flags & RF_OPTIONAL) == 0) {
494 					device_printf(dev,
495 					    "could not find irq for %s interrupt '%s'\n",
496 					    (flags & RF_OPTIONAL) != 0 ?
497 					    "optional" : "required",
498 					    irq_def->name);
499 				}
500 
501 				if ((flags & RF_OPTIONAL) == 0)
502 					goto out;
503 
504 				continue;
505 			}
506 
507 			/*
508 			 * Warn about failing to activate if we did actually
509 			 * have the name present.
510 			 */
511 			flags &= ~RF_OPTIONAL;
512 		}
513 
514 		error = arm_tmr_attach_irq(dev, sc, irq_def, rid, flags);
515 		if (error != 0)
516 			goto out;
517 	}
518 
519 	error = arm_tmr_attach(dev);
520 out:
521 	if (error != 0) {
522 		for (i = 0; i < sc->irq_count; i++) {
523 			bus_release_resource(dev, SYS_RES_IRQ, sc->irqs[i].rid,
524 			    sc->irqs[i].res);
525 		}
526 	}
527 
528 	return (error);
529 
530 }
531 #endif
532 
533 #ifdef DEV_ACPI
534 static void
arm_tmr_acpi_add_irq(device_t parent,device_t dev,int rid,u_int irq)535 arm_tmr_acpi_add_irq(device_t parent, device_t dev, int rid, u_int irq)
536 {
537 
538 	BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, rid, irq, 1);
539 }
540 
541 static void
arm_tmr_acpi_identify(driver_t * driver,device_t parent)542 arm_tmr_acpi_identify(driver_t *driver, device_t parent)
543 {
544 	ACPI_TABLE_GTDT *gtdt;
545 	vm_paddr_t physaddr;
546 	device_t dev;
547 
548 	physaddr = acpi_find_table(ACPI_SIG_GTDT);
549 	if (physaddr == 0)
550 		return;
551 
552 	gtdt = acpi_map_table(physaddr, ACPI_SIG_GTDT);
553 	if (gtdt == NULL) {
554 		device_printf(parent, "gic: Unable to map the GTDT\n");
555 		return;
556 	}
557 
558 	dev = BUS_ADD_CHILD(parent, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE,
559 	    "generic_timer", -1);
560 	if (dev == NULL) {
561 		device_printf(parent, "add gic child failed\n");
562 		goto out;
563 	}
564 
565 	arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_SECURE,
566 	    gtdt->SecureEl1Interrupt);
567 	arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_NONSECURE,
568 	    gtdt->NonSecureEl1Interrupt);
569 	arm_tmr_acpi_add_irq(parent, dev, GT_VIRT,
570 	    gtdt->VirtualTimerInterrupt);
571 	arm_tmr_acpi_add_irq(parent, dev, GT_HYP_PHYS,
572 	    gtdt->NonSecureEl2Interrupt);
573 
574 out:
575 	acpi_unmap_table(gtdt);
576 }
577 
578 static int
arm_tmr_acpi_probe(device_t dev)579 arm_tmr_acpi_probe(device_t dev)
580 {
581 
582 	device_set_desc(dev, "ARM Generic Timer");
583 	return (BUS_PROBE_NOWILDCARD);
584 }
585 
586 static int
arm_tmr_acpi_attach(device_t dev)587 arm_tmr_acpi_attach(device_t dev)
588 {
589 	const struct arm_tmr_irq_defs *irq_def;
590 	struct arm_tmr_softc *sc;
591 	int error;
592 
593 	sc = device_get_softc(dev);
594 	for (int i = 0; i < nitems(arm_tmr_irq_defs); i++) {
595 		irq_def = &arm_tmr_irq_defs[i];
596 		error = arm_tmr_attach_irq(dev, sc, irq_def, irq_def->idx,
597 		    irq_def->flags);
598 		if (error != 0)
599 			goto out;
600 	}
601 
602 	error = arm_tmr_attach(dev);
603 out:
604 	if (error != 0) {
605 		for (int i = 0; i < sc->irq_count; i++) {
606 			bus_release_resource(dev, SYS_RES_IRQ,
607 			    sc->irqs[i].rid, sc->irqs[i].res);
608 		}
609 	}
610 	return (error);
611 }
612 #endif
613 
614 static int
arm_tmr_attach(device_t dev)615 arm_tmr_attach(device_t dev)
616 {
617 	struct arm_tmr_softc *sc;
618 #ifdef INVARIANTS
619 	const struct arm_tmr_irq_defs *irq_def;
620 #endif
621 #ifdef FDT
622 	phandle_t node;
623 	pcell_t clock;
624 #endif
625 #ifdef __aarch64__
626 	int user_phys;
627 #endif
628 	int error;
629 	int i, first_timer, last_timer;
630 
631 	sc = device_get_softc(dev);
632 	if (arm_tmr_sc)
633 		return (ENXIO);
634 
635 	sc->get_cntxct = &get_cntxct;
636 #ifdef FDT
637 	/* Get the base clock frequency */
638 	node = ofw_bus_get_node(dev);
639 	if (node > 0) {
640 		error = OF_getencprop(node, "clock-frequency", &clock,
641 		    sizeof(clock));
642 		if (error > 0)
643 			sc->clkfreq = clock;
644 
645 		if (OF_hasprop(node, "allwinner,sun50i-a64-unstable-timer")) {
646 			sc->get_cntxct = &get_cntxct_a64_unstable;
647 			if (bootverbose)
648 				device_printf(dev,
649 				    "Enabling allwinner unstable timer workaround\n");
650 		}
651 	}
652 #endif
653 
654 	if (sc->clkfreq == 0) {
655 		/* Try to get clock frequency from timer */
656 		sc->clkfreq = get_freq();
657 	}
658 
659 	if (sc->clkfreq == 0) {
660 		device_printf(dev, "No clock frequency specified\n");
661 		return (ENXIO);
662 	}
663 
664 #ifdef INVARIANTS
665 	/* Confirm that non-optional irqs were allocated before coming in. */
666 	for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
667 		int j;
668 
669 		irq_def = &arm_tmr_irq_defs[i];
670 
671 		/* Skip optional interrupts */
672 		if ((irq_def->flags & RF_OPTIONAL) != 0)
673 			continue;
674 
675 		for (j = 0; j < sc->irq_count; j++) {
676 			if (sc->irqs[j].idx == irq_def->idx)
677 				break;
678 		}
679 		KASSERT(j < sc->irq_count, ("%s: Missing required interrupt %s",
680 		    __func__, irq_def->name));
681 	}
682 #endif
683 
684 #ifdef __aarch64__
685 	if (IN_VHE) {
686 		/*
687 		 * The kernel is running at EL2. The EL0 timer registers are
688 		 * re-mapped to the EL2 version. Because of this we need to
689 		 * use the EL2 interrupt.
690 		 */
691 		sc->physical_sys = true;
692 		first_timer = GT_HYP_PHYS;
693 		last_timer = GT_HYP_PHYS;
694 	} else if (!HAS_PHYS) {
695 		/*
696 		 * Use the virtual timer when we can't use the hypervisor.
697 		 * A hypervisor guest may change the virtual timer registers
698 		 * while executing so any use of the virtual timer interrupt
699 		 * needs to be coordinated with the virtual machine manager.
700 		 */
701 		sc->physical_sys = false;
702 		first_timer = GT_VIRT;
703 		last_timer = GT_VIRT;
704 	} else
705 #endif
706 	/* Otherwise set up the secure and non-secure physical timers. */
707 	{
708 		sc->physical_sys = true;
709 		first_timer = GT_PHYS_SECURE;
710 		last_timer = GT_PHYS_NONSECURE;
711 	}
712 
713 #ifdef __aarch64__
714 	/*
715 	 * The virtual timer is always available on arm and arm64, tell
716 	 * userspace to use it.
717 	 */
718 	sc->physical_user = false;
719 	/* Allow use of the physical counter in userspace when available */
720 	if (TUNABLE_INT_FETCH("hw.userspace_allow_phys_counter", &user_phys) &&
721 	    user_phys != 0)
722 		sc->physical_user = sc->physical_sys;
723 #else
724 	/*
725 	 * The virtual timer depends on setting cntvoff from the hypervisor
726 	 * privilege level/el2, however this is only set on arm64.
727 	 */
728 	sc->physical_user = true;
729 #endif
730 
731 	arm_tmr_sc = sc;
732 
733 	/* Setup secure, non-secure and virtual IRQs handler */
734 	for (i = 0; i < sc->irq_count; i++) {
735 		/* Only enable IRQs on timers we expect to use */
736 		if (sc->irqs[i].idx < first_timer ||
737 		    sc->irqs[i].idx > last_timer)
738 			continue;
739 		error = bus_setup_intr(dev, sc->irqs[i].res, INTR_TYPE_CLK,
740 		    arm_tmr_intr, NULL, sc, &sc->irqs[i].ihl);
741 		if (error) {
742 			device_printf(dev, "Unable to alloc int resource.\n");
743 			for (int j = 0; j < i; j++)
744 				bus_teardown_intr(dev, sc->irqs[j].res,
745 				    &sc->irqs[j].ihl);
746 			return (ENXIO);
747 		}
748 	}
749 
750 	/* Disable the timers until we are ready */
751 	arm_tmr_disable(false);
752 	if (HAS_PHYS)
753 		arm_tmr_disable(true);
754 
755 	arm_tmr_timecount.tc_frequency = sc->clkfreq;
756 	tc_init(&arm_tmr_timecount);
757 
758 	sc->et.et_name = "ARM MPCore Eventtimer";
759 	sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
760 	sc->et.et_quality = 1000;
761 
762 	sc->et.et_frequency = sc->clkfreq;
763 	sc->et.et_min_period = (0x00000010LLU << 32) / sc->et.et_frequency;
764 	sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency;
765 	sc->et.et_start = arm_tmr_start;
766 	sc->et.et_stop = arm_tmr_stop;
767 	sc->et.et_priv = sc;
768 	et_register(&sc->et);
769 
770 #if defined(__arm__)
771 	arm_set_delay(arm_tmr_do_delay, sc);
772 #endif
773 
774 	return (0);
775 }
776 
777 #ifdef FDT
778 static device_method_t arm_tmr_fdt_methods[] = {
779 	DEVMETHOD(device_probe,		arm_tmr_fdt_probe),
780 	DEVMETHOD(device_attach,	arm_tmr_fdt_attach),
781 	{ 0, 0 }
782 };
783 
784 static DEFINE_CLASS_0(generic_timer, arm_tmr_fdt_driver, arm_tmr_fdt_methods,
785     sizeof(struct arm_tmr_softc));
786 
787 EARLY_DRIVER_MODULE(timer, simplebus, arm_tmr_fdt_driver, 0, 0,
788     BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
789 EARLY_DRIVER_MODULE(timer, ofwbus, arm_tmr_fdt_driver, 0, 0,
790     BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
791 #endif
792 
793 #ifdef DEV_ACPI
794 static device_method_t arm_tmr_acpi_methods[] = {
795 	DEVMETHOD(device_identify,	arm_tmr_acpi_identify),
796 	DEVMETHOD(device_probe,		arm_tmr_acpi_probe),
797 	DEVMETHOD(device_attach,	arm_tmr_acpi_attach),
798 	{ 0, 0 }
799 };
800 
801 static DEFINE_CLASS_0(generic_timer, arm_tmr_acpi_driver, arm_tmr_acpi_methods,
802     sizeof(struct arm_tmr_softc));
803 
804 EARLY_DRIVER_MODULE(timer, acpi, arm_tmr_acpi_driver, 0, 0,
805     BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
806 #endif
807 
808 static void
arm_tmr_do_delay(int usec,void * arg)809 arm_tmr_do_delay(int usec, void *arg)
810 {
811 	struct arm_tmr_softc *sc = arg;
812 	int32_t counts, counts_per_usec;
813 	uint32_t first, last;
814 
815 	/* Get the number of times to count */
816 	counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1);
817 
818 	/*
819 	 * Clamp the timeout at a maximum value (about 32 seconds with
820 	 * a 66MHz clock). *Nobody* should be delay()ing for anywhere
821 	 * near that length of time and if they are, they should be hung
822 	 * out to dry.
823 	 */
824 	if (usec >= (0x80000000U / counts_per_usec))
825 		counts = (0x80000000U / counts_per_usec) - 1;
826 	else
827 		counts = usec * counts_per_usec;
828 
829 	first = sc->get_cntxct(sc->physical_sys);
830 
831 	while (counts > 0) {
832 		last = sc->get_cntxct(sc->physical_sys);
833 		counts -= (int32_t)(last - first);
834 		first = last;
835 	}
836 }
837 
838 #if defined(__aarch64__)
839 void
DELAY(int usec)840 DELAY(int usec)
841 {
842 	int32_t counts;
843 
844 	TSENTER();
845 	/*
846 	 * Check the timers are setup, if not just
847 	 * use a for loop for the meantime
848 	 */
849 	if (arm_tmr_sc == NULL) {
850 		for (; usec > 0; usec--)
851 			for (counts = 200; counts > 0; counts--)
852 				/*
853 				 * Prevent the compiler from optimizing
854 				 * out the loop
855 				 */
856 				cpufunc_nullop();
857 	} else
858 		arm_tmr_do_delay(usec, arm_tmr_sc);
859 	TSEXIT();
860 }
861 #endif
862 
863 static uint32_t
arm_tmr_fill_vdso_timehands(struct vdso_timehands * vdso_th,struct timecounter * tc)864 arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
865     struct timecounter *tc)
866 {
867 
868 	vdso_th->th_algo = VDSO_TH_ALGO_ARM_GENTIM;
869 	vdso_th->th_physical = arm_tmr_sc->physical_user;
870 	bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
871 	return (1);
872 }
873