xref: /freebsd/sys/arm/arm/generic_timer.c (revision 43b3e755d07576da8a169a2d000d0f4b4ce33f19)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 The FreeBSD Foundation
5  * Copyright (c) 2013 Ruslan Bukin <br@bsdpad.com>
6  * All rights reserved.
7  *
8  * Based on mpcore_timer.c developed by Ben Gray <ben.r.gray@gmail.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the company nor the name of the author may be used to
19  *    endorse or promote products derived from this software without specific
20  *    prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /**
36  *      Cortex-A7, Cortex-A15, ARMv8 and later Generic Timer
37  */
38 
39 #include "opt_acpi.h"
40 #include "opt_platform.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/malloc.h>
48 #include <sys/rman.h>
49 #include <sys/timeet.h>
50 #include <sys/timetc.h>
51 #include <sys/smp.h>
52 #include <sys/vdso.h>
53 #include <sys/watchdog.h>
54 
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/intr.h>
58 #include <machine/machdep.h>
59 #include <machine/md_var.h>
60 
61 #if defined(__aarch64__)
62 #include <machine/undefined.h>
63 #include <machine/cpufunc.h>
64 #include <machine/cpu_feat.h>
65 #endif
66 
67 #ifdef FDT
68 #include <dev/ofw/openfirm.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 #endif
72 
73 #ifdef DEV_ACPI
74 #include <contrib/dev/acpica/include/acpi.h>
75 #include <dev/acpica/acpivar.h>
76 #endif
77 
78 #define	GT_PHYS_SECURE		0
79 #define	GT_PHYS_NONSECURE	1
80 #define	GT_VIRT			2
81 #define	GT_HYP_PHYS		3
82 #define	GT_HYP_VIRT		4
83 #define	GT_IRQ_COUNT		5
84 
85 #define	GT_CTRL_ENABLE		(1 << 0)
86 #define	GT_CTRL_INT_MASK	(1 << 1)
87 #define	GT_CTRL_INT_STAT	(1 << 2)
88 #define	GT_REG_CTRL		0
89 #define	GT_REG_TVAL		1
90 
91 #define	GT_CNTKCTL_PL0PTEN	(1 << 9) /* PL0 Physical timer reg access */
92 #define	GT_CNTKCTL_PL0VTEN	(1 << 8) /* PL0 Virtual timer reg access */
93 #define	GT_CNTKCTL_EVNTI	(0xf << 4) /* Virtual counter event bits */
94 #define	GT_CNTKCTL_EVNTDIR	(1 << 3) /* Virtual counter event transition */
95 #define	GT_CNTKCTL_EVNTEN	(1 << 2) /* Enables virtual counter events */
96 #define	GT_CNTKCTL_PL0VCTEN	(1 << 1) /* PL0 CNTVCT and CNTFRQ access */
97 #define	GT_CNTKCTL_PL0PCTEN	(1 << 0) /* PL0 CNTPCT and CNTFRQ access */
98 
99 #if defined(__aarch64__)
100 static bool __read_mostly enable_wfxt = false;
101 #endif
102 
103 struct arm_tmr_softc;
104 
105 struct arm_tmr_irq {
106 	struct resource	*res;
107 	void		*ihl;
108 	int		 rid;
109 	int		 idx;
110 };
111 
112 struct arm_tmr_softc {
113 	struct arm_tmr_irq	irqs[GT_IRQ_COUNT];
114 	uint64_t		(*get_cntxct)(bool);
115 	uint32_t		clkfreq;
116 	int			irq_count;
117 	struct eventtimer	et;
118 	bool			physical_sys;
119 	bool			physical_user;
120 };
121 
122 static struct arm_tmr_softc *arm_tmr_sc = NULL;
123 
124 static const struct arm_tmr_irq_defs {
125 	int idx;
126 	const char *name;
127 	int flags;
128 } arm_tmr_irq_defs[] = {
129 	{
130 		.idx = GT_PHYS_SECURE,
131 		.name = "sec-phys",
132 		.flags = RF_ACTIVE | RF_OPTIONAL,
133 	},
134 	{
135 		.idx = GT_PHYS_NONSECURE,
136 		.name = "phys",
137 		.flags = RF_ACTIVE,
138 	},
139 	{
140 		.idx = GT_VIRT,
141 		.name = "virt",
142 		.flags = RF_ACTIVE,
143 	},
144 	{
145 		.idx = GT_HYP_PHYS,
146 		.name = "hyp-phys",
147 		.flags = RF_ACTIVE | RF_OPTIONAL,
148 	},
149 	{
150 		.idx = GT_HYP_VIRT,
151 		.name = "hyp-virt",
152 		.flags = RF_ACTIVE | RF_OPTIONAL,
153 	},
154 };
155 
156 static int arm_tmr_attach(device_t);
157 
158 static uint32_t arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
159     struct timecounter *tc);
160 static void arm_tmr_do_delay(int usec, void *);
161 
162 static timecounter_get_t arm_tmr_get_timecount;
163 
164 static struct timecounter arm_tmr_timecount = {
165 	.tc_name           = "ARM MPCore Timecounter",
166 	.tc_get_timecount  = arm_tmr_get_timecount,
167 	.tc_poll_pps       = NULL,
168 	.tc_counter_mask   = ~0u,
169 	.tc_frequency      = 0,
170 	.tc_quality        = 1000,
171 	.tc_fill_vdso_timehands = arm_tmr_fill_vdso_timehands,
172 };
173 
174 #ifdef __arm__
175 #define	get_el0(x)	cp15_## x ##_get()
176 #define	get_el1(x)	cp15_## x ##_get()
177 #define	set_el0(x, val)	cp15_## x ##_set(val)
178 #define	set_el1(x, val)	cp15_## x ##_set(val)
179 #define	HAS_PHYS	true
180 #define	IN_VHE		false
181 #else /* __aarch64__ */
182 #define	get_el0(x)	READ_SPECIALREG(x ##_el0)
183 #define	get_el1(x)	READ_SPECIALREG(x ##_el1)
184 #define	set_el0(x, val)	WRITE_SPECIALREG(x ##_el0, val)
185 #define	set_el1(x, val)	WRITE_SPECIALREG(x ##_el1, val)
186 #define	HAS_PHYS	has_hyp()
187 #define	IN_VHE		in_vhe()
188 #endif
189 
190 static int
get_freq(void)191 get_freq(void)
192 {
193 	return (get_el0(cntfrq));
194 }
195 
196 #ifdef FDT
197 static uint64_t
get_cntxct_a64_unstable(bool physical)198 get_cntxct_a64_unstable(bool physical)
199 {
200 	uint64_t val;
201 
202 	isb();
203 	if (physical) {
204 		do {
205 			val = get_el0(cntpct);
206 		}
207 		while (((val + 1) & 0x7FF) <= 1);
208 	}
209 	else {
210 		do {
211 			val = get_el0(cntvct);
212 		}
213 		while (((val + 1) & 0x7FF) <= 1);
214 	}
215 
216 	return (val);
217 }
218 #endif
219 
220 static uint64_t
get_cntxct(bool physical)221 get_cntxct(bool physical)
222 {
223 	uint64_t val;
224 
225 	isb();
226 	if (physical)
227 		val = get_el0(cntpct);
228 	else
229 		val = get_el0(cntvct);
230 
231 	return (val);
232 }
233 
234 static int
set_ctrl(uint32_t val,bool physical)235 set_ctrl(uint32_t val, bool physical)
236 {
237 
238 	if (physical)
239 		set_el0(cntp_ctl, val);
240 	else
241 		set_el0(cntv_ctl, val);
242 	isb();
243 
244 	return (0);
245 }
246 
247 static int
set_tval(uint32_t val,bool physical)248 set_tval(uint32_t val, bool physical)
249 {
250 
251 	if (physical)
252 		set_el0(cntp_tval, val);
253 	else
254 		set_el0(cntv_tval, val);
255 	isb();
256 
257 	return (0);
258 }
259 
260 static int
get_ctrl(bool physical)261 get_ctrl(bool physical)
262 {
263 	uint32_t val;
264 
265 	if (physical)
266 		val = get_el0(cntp_ctl);
267 	else
268 		val = get_el0(cntv_ctl);
269 
270 	return (val);
271 }
272 
273 static void
setup_user_access(void * arg __unused)274 setup_user_access(void *arg __unused)
275 {
276 	uint32_t cntkctl;
277 
278 	cntkctl = get_el1(cntkctl);
279 	cntkctl &= ~(GT_CNTKCTL_PL0PTEN | GT_CNTKCTL_PL0VTEN |
280 	    GT_CNTKCTL_EVNTEN | GT_CNTKCTL_PL0PCTEN);
281 	/* Always enable the virtual timer */
282 	cntkctl |= GT_CNTKCTL_PL0VCTEN;
283 	/* Enable the physical timer if supported */
284 	if (arm_tmr_sc->physical_user) {
285 		cntkctl |= GT_CNTKCTL_PL0PCTEN;
286 	}
287 	set_el1(cntkctl, cntkctl);
288 	isb();
289 }
290 
291 #ifdef __aarch64__
292 static int
cntpct_handler(vm_offset_t va,uint32_t insn,struct trapframe * frame,uint32_t esr)293 cntpct_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
294     uint32_t esr)
295 {
296 	uint64_t val;
297 	int reg;
298 
299 	if ((insn & MRS_MASK) != MRS_VALUE)
300 		return (0);
301 
302 	if (MRS_SPECIAL(insn) != MRS_SPECIAL(CNTPCT_EL0))
303 		return (0);
304 
305 	reg = MRS_REGISTER(insn);
306 	val = READ_SPECIALREG(cntvct_el0);
307 	if (reg < nitems(frame->tf_x)) {
308 		frame->tf_x[reg] = val;
309 	} else if (reg == 30) {
310 		frame->tf_lr = val;
311 	}
312 
313 	/*
314 	 * We will handle this instruction, move to the next so we
315 	 * don't trap here again.
316 	 */
317 	frame->tf_elr += INSN_SIZE;
318 
319 	return (1);
320 }
321 #endif
322 
323 static void
tmr_setup_user_access(void * arg __unused)324 tmr_setup_user_access(void *arg __unused)
325 {
326 #ifdef __aarch64__
327 	int emulate;
328 #endif
329 
330 	if (arm_tmr_sc != NULL) {
331 		smp_rendezvous(NULL, setup_user_access, NULL, NULL);
332 #ifdef __aarch64__
333 		if (TUNABLE_INT_FETCH("hw.emulate_phys_counter", &emulate) &&
334 		    emulate != 0) {
335 			install_undef_handler(true, cntpct_handler);
336 		}
337 #endif
338 	}
339 }
340 SYSINIT(tmr_ua, SI_SUB_SMP, SI_ORDER_ANY, tmr_setup_user_access, NULL);
341 
342 static unsigned
arm_tmr_get_timecount(struct timecounter * tc)343 arm_tmr_get_timecount(struct timecounter *tc)
344 {
345 
346 	return (arm_tmr_sc->get_cntxct(arm_tmr_sc->physical_sys));
347 }
348 
349 static int
arm_tmr_start(struct eventtimer * et,sbintime_t first,sbintime_t period __unused)350 arm_tmr_start(struct eventtimer *et, sbintime_t first,
351     sbintime_t period __unused)
352 {
353 	struct arm_tmr_softc *sc;
354 	int counts, ctrl;
355 
356 	sc = (struct arm_tmr_softc *)et->et_priv;
357 
358 	if (first != 0) {
359 		counts = ((uint32_t)et->et_frequency * first) >> 32;
360 		ctrl = get_ctrl(sc->physical_sys);
361 		ctrl &= ~GT_CTRL_INT_MASK;
362 		ctrl |= GT_CTRL_ENABLE;
363 		set_tval(counts, sc->physical_sys);
364 		set_ctrl(ctrl, sc->physical_sys);
365 		return (0);
366 	}
367 
368 	return (EINVAL);
369 
370 }
371 
372 static void
arm_tmr_disable(bool physical)373 arm_tmr_disable(bool physical)
374 {
375 	int ctrl;
376 
377 	ctrl = get_ctrl(physical);
378 	ctrl &= ~GT_CTRL_ENABLE;
379 	set_ctrl(ctrl, physical);
380 }
381 
382 static int
arm_tmr_stop(struct eventtimer * et)383 arm_tmr_stop(struct eventtimer *et)
384 {
385 	struct arm_tmr_softc *sc;
386 
387 	sc = (struct arm_tmr_softc *)et->et_priv;
388 	arm_tmr_disable(sc->physical_sys);
389 
390 	return (0);
391 }
392 
393 static int
arm_tmr_intr(void * arg)394 arm_tmr_intr(void *arg)
395 {
396 	struct arm_tmr_softc *sc;
397 	int ctrl;
398 
399 	sc = (struct arm_tmr_softc *)arg;
400 	ctrl = get_ctrl(sc->physical_sys);
401 	if (ctrl & GT_CTRL_INT_STAT) {
402 		ctrl |= GT_CTRL_INT_MASK;
403 		set_ctrl(ctrl, sc->physical_sys);
404 	}
405 
406 	if (sc->et.et_active)
407 		sc->et.et_event_cb(&sc->et, sc->et.et_arg);
408 
409 	return (FILTER_HANDLED);
410 }
411 
412 static int
arm_tmr_attach_irq(device_t dev,struct arm_tmr_softc * sc,const struct arm_tmr_irq_defs * irq_def,int rid,int flags)413 arm_tmr_attach_irq(device_t dev, struct arm_tmr_softc *sc,
414     const struct arm_tmr_irq_defs *irq_def, int rid, int flags)
415 {
416 	struct arm_tmr_irq *irq;
417 
418 	irq = &sc->irqs[sc->irq_count];
419 	irq->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
420 	    &rid, flags);
421 	if (irq->res == NULL) {
422 		if (bootverbose || (flags & RF_OPTIONAL) == 0) {
423 			device_printf(dev,
424 			    "could not allocate irq for %s interrupt '%s'\n",
425 			    (flags & RF_OPTIONAL) != 0 ? "optional" :
426 			    "required", irq_def->name);
427 		}
428 
429 		if ((flags & RF_OPTIONAL) == 0)
430 			return (ENXIO);
431 	} else {
432 		if (bootverbose)
433 			device_printf(dev, "allocated irq for '%s'\n",
434 			    irq_def->name);
435 		irq->rid = rid;
436 		irq->idx = irq_def->idx;
437 		sc->irq_count++;
438 	}
439 
440 	return (0);
441 }
442 
443 #ifdef FDT
444 static int
arm_tmr_fdt_probe(device_t dev)445 arm_tmr_fdt_probe(device_t dev)
446 {
447 
448 	if (!ofw_bus_status_okay(dev))
449 		return (ENXIO);
450 
451 	if (ofw_bus_is_compatible(dev, "arm,armv8-timer")) {
452 		device_set_desc(dev, "ARMv8 Generic Timer");
453 		return (BUS_PROBE_DEFAULT);
454 	} else if (ofw_bus_is_compatible(dev, "arm,armv7-timer")) {
455 		device_set_desc(dev, "ARMv7 Generic Timer");
456 		return (BUS_PROBE_DEFAULT);
457 	}
458 
459 	return (ENXIO);
460 }
461 
462 static int
arm_tmr_fdt_attach(device_t dev)463 arm_tmr_fdt_attach(device_t dev)
464 {
465 	struct arm_tmr_softc *sc;
466 	const struct arm_tmr_irq_defs *irq_def;
467 	size_t i;
468 	phandle_t node;
469 	int error, rid;
470 	bool has_names;
471 
472 	sc = device_get_softc(dev);
473 	node = ofw_bus_get_node(dev);
474 
475 	has_names = OF_hasprop(node, "interrupt-names");
476 	for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
477 		int flags;
478 
479 		/*
480 		 * If we don't have names to go off of, we assume that they're
481 		 * in the "usual" order with sec-phys first and allocate by idx.
482 		 */
483 		irq_def = &arm_tmr_irq_defs[i];
484 		rid = irq_def->idx;
485 		flags = irq_def->flags;
486 		if (has_names) {
487 			error = ofw_bus_find_string_index(node,
488 			    "interrupt-names", irq_def->name, &rid);
489 
490 			/*
491 			 * If we have names, missing a name means we don't
492 			 * have it.
493 			 */
494 			if (error != 0) {
495 				/*
496 				 * Could be noisy on a lot of platforms for no
497 				 * good cause.
498 				 */
499 				if (bootverbose || (flags & RF_OPTIONAL) == 0) {
500 					device_printf(dev,
501 					    "could not find irq for %s interrupt '%s'\n",
502 					    (flags & RF_OPTIONAL) != 0 ?
503 					    "optional" : "required",
504 					    irq_def->name);
505 				}
506 
507 				if ((flags & RF_OPTIONAL) == 0)
508 					goto out;
509 
510 				continue;
511 			}
512 
513 			/*
514 			 * Warn about failing to activate if we did actually
515 			 * have the name present.
516 			 */
517 			flags &= ~RF_OPTIONAL;
518 		}
519 
520 		error = arm_tmr_attach_irq(dev, sc, irq_def, rid, flags);
521 		if (error != 0)
522 			goto out;
523 	}
524 
525 	error = arm_tmr_attach(dev);
526 out:
527 	if (error != 0) {
528 		for (i = 0; i < sc->irq_count; i++) {
529 			bus_release_resource(dev, SYS_RES_IRQ, sc->irqs[i].rid,
530 			    sc->irqs[i].res);
531 		}
532 	}
533 
534 	return (error);
535 
536 }
537 #endif
538 
539 #ifdef DEV_ACPI
540 static void
arm_tmr_acpi_add_irq(device_t parent,device_t dev,int rid,u_int irq)541 arm_tmr_acpi_add_irq(device_t parent, device_t dev, int rid, u_int irq)
542 {
543 
544 	BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, rid, irq, 1);
545 }
546 
547 static void
arm_tmr_acpi_identify(driver_t * driver,device_t parent)548 arm_tmr_acpi_identify(driver_t *driver, device_t parent)
549 {
550 	ACPI_TABLE_GTDT *gtdt;
551 	vm_paddr_t physaddr;
552 	device_t dev;
553 
554 	physaddr = acpi_find_table(ACPI_SIG_GTDT);
555 	if (physaddr == 0)
556 		return;
557 
558 	gtdt = acpi_map_table(physaddr, ACPI_SIG_GTDT);
559 	if (gtdt == NULL) {
560 		device_printf(parent, "gic: Unable to map the GTDT\n");
561 		return;
562 	}
563 
564 	dev = BUS_ADD_CHILD(parent, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE,
565 	    "generic_timer", -1);
566 	if (dev == NULL) {
567 		device_printf(parent, "add gic child failed\n");
568 		goto out;
569 	}
570 
571 	arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_SECURE,
572 	    gtdt->SecureEl1Interrupt);
573 	arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_NONSECURE,
574 	    gtdt->NonSecureEl1Interrupt);
575 	arm_tmr_acpi_add_irq(parent, dev, GT_VIRT,
576 	    gtdt->VirtualTimerInterrupt);
577 	arm_tmr_acpi_add_irq(parent, dev, GT_HYP_PHYS,
578 	    gtdt->NonSecureEl2Interrupt);
579 
580 out:
581 	acpi_unmap_table(gtdt);
582 }
583 
584 static int
arm_tmr_acpi_probe(device_t dev)585 arm_tmr_acpi_probe(device_t dev)
586 {
587 
588 	device_set_desc(dev, "ARM Generic Timer");
589 	return (BUS_PROBE_NOWILDCARD);
590 }
591 
592 static int
arm_tmr_acpi_attach(device_t dev)593 arm_tmr_acpi_attach(device_t dev)
594 {
595 	const struct arm_tmr_irq_defs *irq_def;
596 	struct arm_tmr_softc *sc;
597 	int error;
598 
599 	sc = device_get_softc(dev);
600 	for (int i = 0; i < nitems(arm_tmr_irq_defs); i++) {
601 		irq_def = &arm_tmr_irq_defs[i];
602 		error = arm_tmr_attach_irq(dev, sc, irq_def, irq_def->idx,
603 		    irq_def->flags);
604 		if (error != 0)
605 			goto out;
606 	}
607 
608 	error = arm_tmr_attach(dev);
609 out:
610 	if (error != 0) {
611 		for (int i = 0; i < sc->irq_count; i++) {
612 			bus_release_resource(dev, SYS_RES_IRQ,
613 			    sc->irqs[i].rid, sc->irqs[i].res);
614 		}
615 	}
616 	return (error);
617 }
618 #endif
619 
620 static int
arm_tmr_attach(device_t dev)621 arm_tmr_attach(device_t dev)
622 {
623 	struct arm_tmr_softc *sc;
624 #ifdef INVARIANTS
625 	const struct arm_tmr_irq_defs *irq_def;
626 #endif
627 #ifdef FDT
628 	phandle_t node;
629 	pcell_t clock;
630 #endif
631 #ifdef __aarch64__
632 	int user_phys;
633 #endif
634 	int error;
635 	int i, first_timer, last_timer;
636 
637 	sc = device_get_softc(dev);
638 	if (arm_tmr_sc)
639 		return (ENXIO);
640 
641 	sc->get_cntxct = &get_cntxct;
642 #ifdef FDT
643 	/* Get the base clock frequency */
644 	node = ofw_bus_get_node(dev);
645 	if (node > 0) {
646 		error = OF_getencprop(node, "clock-frequency", &clock,
647 		    sizeof(clock));
648 		if (error > 0)
649 			sc->clkfreq = clock;
650 
651 		if (OF_hasprop(node, "allwinner,sun50i-a64-unstable-timer")) {
652 			sc->get_cntxct = &get_cntxct_a64_unstable;
653 			if (bootverbose)
654 				device_printf(dev,
655 				    "Enabling allwinner unstable timer workaround\n");
656 		}
657 	}
658 #endif
659 
660 	if (sc->clkfreq == 0) {
661 		/* Try to get clock frequency from timer */
662 		sc->clkfreq = get_freq();
663 	}
664 
665 	if (sc->clkfreq == 0) {
666 		device_printf(dev, "No clock frequency specified\n");
667 		return (ENXIO);
668 	}
669 
670 #ifdef INVARIANTS
671 	/* Confirm that non-optional irqs were allocated before coming in. */
672 	for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
673 		int j;
674 
675 		irq_def = &arm_tmr_irq_defs[i];
676 
677 		/* Skip optional interrupts */
678 		if ((irq_def->flags & RF_OPTIONAL) != 0)
679 			continue;
680 
681 		for (j = 0; j < sc->irq_count; j++) {
682 			if (sc->irqs[j].idx == irq_def->idx)
683 				break;
684 		}
685 		KASSERT(j < sc->irq_count, ("%s: Missing required interrupt %s",
686 		    __func__, irq_def->name));
687 	}
688 #endif
689 
690 #ifdef __aarch64__
691 	if (IN_VHE) {
692 		/*
693 		 * The kernel is running at EL2. The EL0 timer registers are
694 		 * re-mapped to the EL2 version. Because of this we need to
695 		 * use the EL2 interrupt.
696 		 */
697 		sc->physical_sys = true;
698 		first_timer = GT_HYP_PHYS;
699 		last_timer = GT_HYP_PHYS;
700 	} else if (!HAS_PHYS) {
701 		/*
702 		 * Use the virtual timer when we can't use the hypervisor.
703 		 * A hypervisor guest may change the virtual timer registers
704 		 * while executing so any use of the virtual timer interrupt
705 		 * needs to be coordinated with the virtual machine manager.
706 		 */
707 		sc->physical_sys = false;
708 		first_timer = GT_VIRT;
709 		last_timer = GT_VIRT;
710 	} else
711 #endif
712 	/* Otherwise set up the secure and non-secure physical timers. */
713 	{
714 		sc->physical_sys = true;
715 		first_timer = GT_PHYS_SECURE;
716 		last_timer = GT_PHYS_NONSECURE;
717 	}
718 
719 #ifdef __aarch64__
720 	/*
721 	 * The virtual timer is always available on arm and arm64, tell
722 	 * userspace to use it.
723 	 */
724 	sc->physical_user = false;
725 	/* Allow use of the physical counter in userspace when available */
726 	if (TUNABLE_INT_FETCH("hw.userspace_allow_phys_counter", &user_phys) &&
727 	    user_phys != 0)
728 		sc->physical_user = sc->physical_sys;
729 #else
730 	/*
731 	 * The virtual timer depends on setting cntvoff from the hypervisor
732 	 * privilege level/el2, however this is only set on arm64.
733 	 */
734 	sc->physical_user = true;
735 #endif
736 
737 	arm_tmr_sc = sc;
738 
739 	/* Setup secure, non-secure and virtual IRQs handler */
740 	for (i = 0; i < sc->irq_count; i++) {
741 		/* Only enable IRQs on timers we expect to use */
742 		if (sc->irqs[i].idx < first_timer ||
743 		    sc->irqs[i].idx > last_timer)
744 			continue;
745 		error = bus_setup_intr(dev, sc->irqs[i].res, INTR_TYPE_CLK,
746 		    arm_tmr_intr, NULL, sc, &sc->irqs[i].ihl);
747 		if (error) {
748 			device_printf(dev, "Unable to alloc int resource.\n");
749 			for (int j = 0; j < i; j++)
750 				bus_teardown_intr(dev, sc->irqs[j].res,
751 				    &sc->irqs[j].ihl);
752 			return (ENXIO);
753 		}
754 	}
755 
756 	/* Disable the timers until we are ready */
757 	arm_tmr_disable(false);
758 	if (HAS_PHYS)
759 		arm_tmr_disable(true);
760 
761 	arm_tmr_timecount.tc_frequency = sc->clkfreq;
762 	tc_init(&arm_tmr_timecount);
763 
764 	sc->et.et_name = "ARM MPCore Eventtimer";
765 	sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
766 	sc->et.et_quality = 1000;
767 
768 	sc->et.et_frequency = sc->clkfreq;
769 	sc->et.et_min_period = (0x00000010LLU << 32) / sc->et.et_frequency;
770 	sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency;
771 	sc->et.et_start = arm_tmr_start;
772 	sc->et.et_stop = arm_tmr_stop;
773 	sc->et.et_priv = sc;
774 	et_register(&sc->et);
775 
776 #if defined(__arm__)
777 	arm_set_delay(arm_tmr_do_delay, sc);
778 #endif
779 
780 	return (0);
781 }
782 
783 #ifdef FDT
784 static device_method_t arm_tmr_fdt_methods[] = {
785 	DEVMETHOD(device_probe,		arm_tmr_fdt_probe),
786 	DEVMETHOD(device_attach,	arm_tmr_fdt_attach),
787 	{ 0, 0 }
788 };
789 
790 static DEFINE_CLASS_0(generic_timer, arm_tmr_fdt_driver, arm_tmr_fdt_methods,
791     sizeof(struct arm_tmr_softc));
792 
793 EARLY_DRIVER_MODULE(timer, simplebus, arm_tmr_fdt_driver, 0, 0,
794     BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
795 EARLY_DRIVER_MODULE(timer, ofwbus, arm_tmr_fdt_driver, 0, 0,
796     BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
797 #endif
798 
799 #ifdef DEV_ACPI
800 static device_method_t arm_tmr_acpi_methods[] = {
801 	DEVMETHOD(device_identify,	arm_tmr_acpi_identify),
802 	DEVMETHOD(device_probe,		arm_tmr_acpi_probe),
803 	DEVMETHOD(device_attach,	arm_tmr_acpi_attach),
804 	{ 0, 0 }
805 };
806 
807 static DEFINE_CLASS_0(generic_timer, arm_tmr_acpi_driver, arm_tmr_acpi_methods,
808     sizeof(struct arm_tmr_softc));
809 
810 EARLY_DRIVER_MODULE(timer, acpi, arm_tmr_acpi_driver, 0, 0,
811     BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
812 #endif
813 
814 static int64_t
arm_tmr_get_counts(int usec)815 arm_tmr_get_counts(int usec)
816 {
817 	int64_t counts, counts_per_usec;
818 
819 	/* Get the number of times to count */
820 	counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1);
821 
822 	/*
823 	 * Clamp the timeout at a maximum value (about 32 seconds with
824 	 * a 66MHz clock). *Nobody* should be delay()ing for anywhere
825 	 * near that length of time and if they are, they should be hung
826 	 * out to dry.
827 	 */
828 	if (usec >= (0x80000000U / counts_per_usec))
829 		counts = (0x80000000U / counts_per_usec) - 1;
830 	else
831 		counts = usec * counts_per_usec;
832 
833 	return counts;
834 }
835 
836 static void
arm_tmr_do_delay(int usec,void * arg)837 arm_tmr_do_delay(int usec, void *arg)
838 {
839 	struct arm_tmr_softc *sc = arg;
840 	int64_t counts;
841 	uint64_t first;
842 #if defined(__aarch64__)
843 	int64_t end;
844 #endif
845 
846 	counts = arm_tmr_get_counts(usec);
847 	first = sc->get_cntxct(sc->physical_sys);
848 #if defined(__aarch64__)
849 	end = first + counts;
850 #endif
851 
852 	while ((sc->get_cntxct(sc->physical_sys) - first) < counts) {
853 #if defined(__aarch64__)
854 		if (enable_wfxt)
855 			wfet(end);
856 #endif
857 	}
858 }
859 
860 #if defined(__aarch64__)
861 void
DELAY(int usec)862 DELAY(int usec)
863 {
864 	int32_t counts;
865 
866 	TSENTER();
867 	/*
868 	 * We have two options for a delay: using the timer, or using the wfet
869 	 * instruction. However, both of these are dependent on timers being
870 	 * setup, and if they're not just use a loop for the meantime.
871 	*/
872 	if (arm_tmr_sc != NULL) {
873 		arm_tmr_do_delay(usec, arm_tmr_sc);
874 	} else {
875 		for (; usec > 0; usec--)
876 			for (counts = 200; counts > 0; counts--)
877 				/* Prevent the compiler from optimizing out the loop */
878 				cpufunc_nullop();
879 	}
880 	TSEXIT();
881 }
882 
883 static bool
wfxt_check(const struct cpu_feat * feat __unused,u_int midr __unused)884 wfxt_check(const struct cpu_feat *feat __unused, u_int midr __unused)
885 {
886 	uint64_t id_aa64isar2;
887 
888 	if (!get_kernel_reg(ID_AA64ISAR2_EL1, &id_aa64isar2))
889 		return (false);
890 	return (ID_AA64ISAR2_WFxT_VAL(id_aa64isar2) != ID_AA64ISAR2_WFxT_NONE);
891 }
892 
893 static void
wfxt_enable(const struct cpu_feat * feat __unused,cpu_feat_errata errata_status __unused,u_int * errata_list __unused,u_int errata_count __unused)894 wfxt_enable(const struct cpu_feat *feat __unused,
895     cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
896     u_int errata_count __unused)
897 {
898 	/* will be called if wfxt_check returns true */
899 	enable_wfxt = true;
900 }
901 
902 static struct cpu_feat feat_wfxt = {
903 	.feat_name		= "FEAT_WFXT",
904 	.feat_check		= wfxt_check,
905 	.feat_enable		= wfxt_enable,
906 	.feat_flags		= CPU_FEAT_AFTER_DEV | CPU_FEAT_SYSTEM,
907 };
908 DATA_SET(cpu_feat_set, feat_wfxt);
909 #endif
910 
911 static uint32_t
arm_tmr_fill_vdso_timehands(struct vdso_timehands * vdso_th,struct timecounter * tc)912 arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
913     struct timecounter *tc)
914 {
915 
916 	vdso_th->th_algo = VDSO_TH_ALGO_ARM_GENTIM;
917 	vdso_th->th_physical = arm_tmr_sc->physical_user;
918 	bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
919 	return (1);
920 }
921