xref: /freebsd/sys/arm/arm/generic_timer.c (revision 0efa0fe26b9d980b2862bb58f8484f0123cff19f)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 The FreeBSD Foundation
5  * Copyright (c) 2013 Ruslan Bukin <br@bsdpad.com>
6  * All rights reserved.
7  *
8  * Based on mpcore_timer.c developed by Ben Gray <ben.r.gray@gmail.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the company nor the name of the author may be used to
19  *    endorse or promote products derived from this software without specific
20  *    prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /**
36  *      Cortex-A7, Cortex-A15, ARMv8 and later Generic Timer
37  */
38 
39 #include "opt_acpi.h"
40 #include "opt_platform.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/malloc.h>
48 #include <sys/rman.h>
49 #include <sys/timeet.h>
50 #include <sys/timetc.h>
51 #include <sys/smp.h>
52 #include <sys/vdso.h>
53 #include <sys/watchdog.h>
54 
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/intr.h>
58 #include <machine/machdep.h>
59 #include <machine/md_var.h>
60 
61 #if defined(__aarch64__)
62 #include <machine/undefined.h>
63 #include <machine/cpufunc.h>
64 #include <machine/cpu_feat.h>
65 #endif
66 
67 #ifdef FDT
68 #include <dev/ofw/openfirm.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 #endif
72 
73 #ifdef DEV_ACPI
74 #include <contrib/dev/acpica/include/acpi.h>
75 #include <dev/acpica/acpivar.h>
76 #endif
77 
78 #define	GT_PHYS_SECURE		0
79 #define	GT_PHYS_NONSECURE	1
80 #define	GT_VIRT			2
81 #define	GT_HYP_PHYS		3
82 #define	GT_HYP_VIRT		4
83 #define	GT_IRQ_COUNT		5
84 
85 #define	GT_CTRL_ENABLE		(1 << 0)
86 #define	GT_CTRL_INT_MASK	(1 << 1)
87 #define	GT_CTRL_INT_STAT	(1 << 2)
88 #define	GT_REG_CTRL		0
89 #define	GT_REG_TVAL		1
90 
91 #define	GT_CNTKCTL_PL0PTEN	(1 << 9) /* PL0 Physical timer reg access */
92 #define	GT_CNTKCTL_PL0VTEN	(1 << 8) /* PL0 Virtual timer reg access */
93 #define	GT_CNTKCTL_EVNTI	(0xf << 4) /* Virtual counter event bits */
94 #define	GT_CNTKCTL_EVNTDIR	(1 << 3) /* Virtual counter event transition */
95 #define	GT_CNTKCTL_EVNTEN	(1 << 2) /* Enables virtual counter events */
96 #define	GT_CNTKCTL_PL0VCTEN	(1 << 1) /* PL0 CNTVCT and CNTFRQ access */
97 #define	GT_CNTKCTL_PL0PCTEN	(1 << 0) /* PL0 CNTPCT and CNTFRQ access */
98 
99 #if defined(__aarch64__)
100 static bool __read_mostly enable_wfxt = false;
101 #endif
102 
103 struct arm_tmr_softc;
104 
105 struct arm_tmr_irq {
106 	struct resource	*res;
107 	void		*ihl;
108 	int		 rid;
109 	int		 idx;
110 };
111 
112 struct arm_tmr_softc {
113 	struct arm_tmr_irq	irqs[GT_IRQ_COUNT];
114 	uint64_t		(*get_cntxct)(bool);
115 	uint32_t		clkfreq;
116 	int			irq_count;
117 	struct eventtimer	et;
118 	bool			physical_sys;
119 	bool			physical_user;
120 };
121 
122 static struct arm_tmr_softc *arm_tmr_sc = NULL;
123 
124 static const struct arm_tmr_irq_defs {
125 	int idx;
126 	const char *name;
127 	int flags;
128 } arm_tmr_irq_defs[] = {
129 	{
130 		.idx = GT_PHYS_SECURE,
131 		.name = "sec-phys",
132 		.flags = RF_ACTIVE | RF_OPTIONAL,
133 	},
134 	{
135 		.idx = GT_PHYS_NONSECURE,
136 		.name = "phys",
137 		.flags = RF_ACTIVE,
138 	},
139 	{
140 		.idx = GT_VIRT,
141 		.name = "virt",
142 		.flags = RF_ACTIVE,
143 	},
144 	{
145 		.idx = GT_HYP_PHYS,
146 		.name = "hyp-phys",
147 		.flags = RF_ACTIVE | RF_OPTIONAL,
148 	},
149 	{
150 		.idx = GT_HYP_VIRT,
151 		.name = "hyp-virt",
152 		.flags = RF_ACTIVE | RF_OPTIONAL,
153 	},
154 };
155 
156 static int arm_tmr_attach(device_t);
157 
158 static uint32_t arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
159     struct timecounter *tc);
160 static void arm_tmr_do_delay(int usec, void *);
161 
162 static timecounter_get_t arm_tmr_get_timecount;
163 
164 static struct timecounter arm_tmr_timecount = {
165 	.tc_name           = "ARM MPCore Timecounter",
166 	.tc_get_timecount  = arm_tmr_get_timecount,
167 	.tc_poll_pps       = NULL,
168 	.tc_counter_mask   = ~0u,
169 	.tc_frequency      = 0,
170 	.tc_quality        = 1000,
171 	.tc_fill_vdso_timehands = arm_tmr_fill_vdso_timehands,
172 };
173 
174 #ifdef __arm__
175 #define	get_el0(x)	cp15_## x ##_get()
176 #define	get_el1(x)	cp15_## x ##_get()
177 #define	set_el0(x, val)	cp15_## x ##_set(val)
178 #define	set_el1(x, val)	cp15_## x ##_set(val)
179 #define	HAS_PHYS	true
180 #define	IN_VHE		false
181 #else /* __aarch64__ */
182 #define	get_el0(x)	READ_SPECIALREG(x ##_el0)
183 #define	get_el1(x)	READ_SPECIALREG(x ##_el1)
184 #define	set_el0(x, val)	WRITE_SPECIALREG(x ##_el0, val)
185 #define	set_el1(x, val)	WRITE_SPECIALREG(x ##_el1, val)
186 #define	HAS_PHYS	has_hyp()
187 #define	IN_VHE		in_vhe()
188 #endif
189 
190 static int
get_freq(void)191 get_freq(void)
192 {
193 	return (get_el0(cntfrq));
194 }
195 
196 #ifdef FDT
197 static uint64_t
get_cntxct_a64_unstable(bool physical)198 get_cntxct_a64_unstable(bool physical)
199 {
200 	uint64_t val;
201 
202 	isb();
203 	if (physical) {
204 		do {
205 			val = get_el0(cntpct);
206 		}
207 		while (((val + 1) & 0x7FF) <= 1);
208 	}
209 	else {
210 		do {
211 			val = get_el0(cntvct);
212 		}
213 		while (((val + 1) & 0x7FF) <= 1);
214 	}
215 
216 	return (val);
217 }
218 #endif
219 
220 static uint64_t
get_cntxct(bool physical)221 get_cntxct(bool physical)
222 {
223 	uint64_t val;
224 
225 	isb();
226 	if (physical)
227 		val = get_el0(cntpct);
228 	else
229 		val = get_el0(cntvct);
230 
231 	return (val);
232 }
233 
234 #ifdef __aarch64__
235 /*
236  * Read the self-syncronized counter. These cannot be read speculatively so
237  * don't need an isb before them.
238  */
239 static uint64_t
get_cntxctss(bool physical)240 get_cntxctss(bool physical)
241 {
242 	uint64_t val;
243 
244 	if (physical)
245 		val = READ_SPECIALREG(CNTPCTSS_EL0_REG);
246 	else
247 		val = READ_SPECIALREG(CNTVCTSS_EL0_REG);
248 
249 	return (val);
250 }
251 #endif
252 
253 static int
set_ctrl(uint32_t val,bool physical)254 set_ctrl(uint32_t val, bool physical)
255 {
256 
257 	if (physical)
258 		set_el0(cntp_ctl, val);
259 	else
260 		set_el0(cntv_ctl, val);
261 	isb();
262 
263 	return (0);
264 }
265 
266 static int
set_tval(uint32_t val,bool physical)267 set_tval(uint32_t val, bool physical)
268 {
269 
270 	if (physical)
271 		set_el0(cntp_tval, val);
272 	else
273 		set_el0(cntv_tval, val);
274 	isb();
275 
276 	return (0);
277 }
278 
279 static int
get_ctrl(bool physical)280 get_ctrl(bool physical)
281 {
282 	uint32_t val;
283 
284 	if (physical)
285 		val = get_el0(cntp_ctl);
286 	else
287 		val = get_el0(cntv_ctl);
288 
289 	return (val);
290 }
291 
292 static void
setup_user_access(void * arg __unused)293 setup_user_access(void *arg __unused)
294 {
295 	uint32_t cntkctl;
296 
297 	cntkctl = get_el1(cntkctl);
298 	cntkctl &= ~(GT_CNTKCTL_PL0PTEN | GT_CNTKCTL_PL0VTEN |
299 	    GT_CNTKCTL_EVNTEN | GT_CNTKCTL_PL0PCTEN);
300 	/* Always enable the virtual timer */
301 	cntkctl |= GT_CNTKCTL_PL0VCTEN;
302 	/* Enable the physical timer if supported */
303 	if (arm_tmr_sc->physical_user) {
304 		cntkctl |= GT_CNTKCTL_PL0PCTEN;
305 	}
306 	set_el1(cntkctl, cntkctl);
307 	isb();
308 }
309 
310 #ifdef __aarch64__
311 static bool
cntpct_handler(uint64_t esr,struct trapframe * frame)312 cntpct_handler(uint64_t esr, struct trapframe *frame)
313 {
314 	uint64_t val;
315 	int reg;
316 
317 	if (ESR_ELx_EXCEPTION(esr) != EXCP_MSR)
318 		return (false);
319 
320 	if ((esr & ISS_MSR_DIR) == 0)
321 		return (false);
322 
323 	if ((esr & ISS_MSR_REG_MASK) != CNTPCT_EL0_ISS)
324 		return (false);
325 
326 	reg = ISS_MSR_Rt(esr);
327 	val = READ_SPECIALREG(cntvct_el0);
328 	if (reg < nitems(frame->tf_x)) {
329 		frame->tf_x[reg] = val;
330 	} else if (reg == 30) {
331 		frame->tf_lr = val;
332 	}
333 
334 	/*
335 	 * We will handle this instruction, move to the next so we
336 	 * don't trap here again.
337 	 */
338 	frame->tf_elr += INSN_SIZE;
339 
340 	return (true);
341 }
342 #endif
343 
344 static void
tmr_setup_user_access(void * arg __unused)345 tmr_setup_user_access(void *arg __unused)
346 {
347 #ifdef __aarch64__
348 	int emulate;
349 #endif
350 
351 	if (arm_tmr_sc != NULL) {
352 		smp_rendezvous(NULL, setup_user_access, NULL, NULL);
353 #ifdef __aarch64__
354 		if (TUNABLE_INT_FETCH("hw.emulate_phys_counter", &emulate) &&
355 		    emulate != 0) {
356 			install_sys_handler(cntpct_handler);
357 		}
358 #endif
359 	}
360 }
361 SYSINIT(tmr_ua, SI_SUB_SMP, SI_ORDER_ANY, tmr_setup_user_access, NULL);
362 
363 static unsigned
arm_tmr_get_timecount(struct timecounter * tc)364 arm_tmr_get_timecount(struct timecounter *tc)
365 {
366 
367 	return (arm_tmr_sc->get_cntxct(arm_tmr_sc->physical_sys));
368 }
369 
370 static int
arm_tmr_start(struct eventtimer * et,sbintime_t first,sbintime_t period __unused)371 arm_tmr_start(struct eventtimer *et, sbintime_t first,
372     sbintime_t period __unused)
373 {
374 	struct arm_tmr_softc *sc;
375 	int counts, ctrl;
376 
377 	sc = (struct arm_tmr_softc *)et->et_priv;
378 
379 	if (first != 0) {
380 		counts = ((uint32_t)et->et_frequency * first) >> 32;
381 		ctrl = get_ctrl(sc->physical_sys);
382 		ctrl &= ~GT_CTRL_INT_MASK;
383 		ctrl |= GT_CTRL_ENABLE;
384 		set_tval(counts, sc->physical_sys);
385 		set_ctrl(ctrl, sc->physical_sys);
386 		return (0);
387 	}
388 
389 	return (EINVAL);
390 
391 }
392 
393 static void
arm_tmr_disable(bool physical)394 arm_tmr_disable(bool physical)
395 {
396 	int ctrl;
397 
398 	ctrl = get_ctrl(physical);
399 	ctrl &= ~GT_CTRL_ENABLE;
400 	set_ctrl(ctrl, physical);
401 }
402 
403 static int
arm_tmr_stop(struct eventtimer * et)404 arm_tmr_stop(struct eventtimer *et)
405 {
406 	struct arm_tmr_softc *sc;
407 
408 	sc = (struct arm_tmr_softc *)et->et_priv;
409 	arm_tmr_disable(sc->physical_sys);
410 
411 	return (0);
412 }
413 
414 static int
arm_tmr_intr(void * arg)415 arm_tmr_intr(void *arg)
416 {
417 	struct arm_tmr_softc *sc;
418 	int ctrl;
419 
420 	sc = (struct arm_tmr_softc *)arg;
421 	ctrl = get_ctrl(sc->physical_sys);
422 	if (ctrl & GT_CTRL_INT_STAT) {
423 		ctrl |= GT_CTRL_INT_MASK;
424 		set_ctrl(ctrl, sc->physical_sys);
425 	}
426 
427 	if (sc->et.et_active)
428 		sc->et.et_event_cb(&sc->et, sc->et.et_arg);
429 
430 	return (FILTER_HANDLED);
431 }
432 
433 static int
arm_tmr_attach_irq(device_t dev,struct arm_tmr_softc * sc,const struct arm_tmr_irq_defs * irq_def,int rid,int flags)434 arm_tmr_attach_irq(device_t dev, struct arm_tmr_softc *sc,
435     const struct arm_tmr_irq_defs *irq_def, int rid, int flags)
436 {
437 	struct arm_tmr_irq *irq;
438 
439 	irq = &sc->irqs[sc->irq_count];
440 	irq->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
441 	    &rid, flags);
442 	if (irq->res == NULL) {
443 		if (bootverbose || (flags & RF_OPTIONAL) == 0) {
444 			device_printf(dev,
445 			    "could not allocate irq for %s interrupt '%s'\n",
446 			    (flags & RF_OPTIONAL) != 0 ? "optional" :
447 			    "required", irq_def->name);
448 		}
449 
450 		if ((flags & RF_OPTIONAL) == 0)
451 			return (ENXIO);
452 	} else {
453 		if (bootverbose)
454 			device_printf(dev, "allocated irq for '%s'\n",
455 			    irq_def->name);
456 		irq->rid = rid;
457 		irq->idx = irq_def->idx;
458 		sc->irq_count++;
459 	}
460 
461 	return (0);
462 }
463 
464 #ifdef FDT
465 static int
arm_tmr_fdt_probe(device_t dev)466 arm_tmr_fdt_probe(device_t dev)
467 {
468 
469 	if (!ofw_bus_status_okay(dev))
470 		return (ENXIO);
471 
472 	if (ofw_bus_is_compatible(dev, "arm,armv8-timer")) {
473 		device_set_desc(dev, "ARMv8 Generic Timer");
474 		return (BUS_PROBE_DEFAULT);
475 	} else if (ofw_bus_is_compatible(dev, "arm,armv7-timer")) {
476 		device_set_desc(dev, "ARMv7 Generic Timer");
477 		return (BUS_PROBE_DEFAULT);
478 	}
479 
480 	return (ENXIO);
481 }
482 
483 static int
arm_tmr_fdt_attach(device_t dev)484 arm_tmr_fdt_attach(device_t dev)
485 {
486 	struct arm_tmr_softc *sc;
487 	const struct arm_tmr_irq_defs *irq_def;
488 	size_t i;
489 	phandle_t node;
490 	int error, rid;
491 	bool has_names;
492 
493 	sc = device_get_softc(dev);
494 	node = ofw_bus_get_node(dev);
495 
496 	has_names = OF_hasprop(node, "interrupt-names");
497 	for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
498 		int flags;
499 
500 		/*
501 		 * If we don't have names to go off of, we assume that they're
502 		 * in the "usual" order with sec-phys first and allocate by idx.
503 		 */
504 		irq_def = &arm_tmr_irq_defs[i];
505 		rid = irq_def->idx;
506 		flags = irq_def->flags;
507 		if (has_names) {
508 			error = ofw_bus_find_string_index(node,
509 			    "interrupt-names", irq_def->name, &rid);
510 
511 			/*
512 			 * If we have names, missing a name means we don't
513 			 * have it.
514 			 */
515 			if (error != 0) {
516 				/*
517 				 * Could be noisy on a lot of platforms for no
518 				 * good cause.
519 				 */
520 				if (bootverbose || (flags & RF_OPTIONAL) == 0) {
521 					device_printf(dev,
522 					    "could not find irq for %s interrupt '%s'\n",
523 					    (flags & RF_OPTIONAL) != 0 ?
524 					    "optional" : "required",
525 					    irq_def->name);
526 				}
527 
528 				if ((flags & RF_OPTIONAL) == 0)
529 					goto out;
530 
531 				continue;
532 			}
533 
534 			/*
535 			 * Warn about failing to activate if we did actually
536 			 * have the name present.
537 			 */
538 			flags &= ~RF_OPTIONAL;
539 		}
540 
541 		error = arm_tmr_attach_irq(dev, sc, irq_def, rid, flags);
542 		if (error != 0)
543 			goto out;
544 	}
545 
546 	error = arm_tmr_attach(dev);
547 out:
548 	if (error != 0) {
549 		for (i = 0; i < sc->irq_count; i++) {
550 			bus_release_resource(dev, SYS_RES_IRQ, sc->irqs[i].rid,
551 			    sc->irqs[i].res);
552 		}
553 	}
554 
555 	return (error);
556 
557 }
558 #endif
559 
560 #ifdef DEV_ACPI
561 static void
arm_tmr_acpi_add_irq(device_t parent,device_t dev,int rid,u_int irq)562 arm_tmr_acpi_add_irq(device_t parent, device_t dev, int rid, u_int irq)
563 {
564 
565 	BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, rid, irq, 1);
566 }
567 
568 static void
arm_tmr_acpi_identify(driver_t * driver,device_t parent)569 arm_tmr_acpi_identify(driver_t *driver, device_t parent)
570 {
571 	ACPI_TABLE_GTDT *gtdt;
572 	vm_paddr_t physaddr;
573 	device_t dev;
574 
575 	physaddr = acpi_find_table(ACPI_SIG_GTDT);
576 	if (physaddr == 0)
577 		return;
578 
579 	gtdt = acpi_map_table(physaddr, ACPI_SIG_GTDT);
580 	if (gtdt == NULL) {
581 		device_printf(parent, "gic: Unable to map the GTDT\n");
582 		return;
583 	}
584 
585 	dev = BUS_ADD_CHILD(parent, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE,
586 	    "generic_timer", -1);
587 	if (dev == NULL) {
588 		device_printf(parent, "add gic child failed\n");
589 		goto out;
590 	}
591 
592 	arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_SECURE,
593 	    gtdt->SecureEl1Interrupt);
594 	arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_NONSECURE,
595 	    gtdt->NonSecureEl1Interrupt);
596 	arm_tmr_acpi_add_irq(parent, dev, GT_VIRT,
597 	    gtdt->VirtualTimerInterrupt);
598 	arm_tmr_acpi_add_irq(parent, dev, GT_HYP_PHYS,
599 	    gtdt->NonSecureEl2Interrupt);
600 
601 out:
602 	acpi_unmap_table(gtdt);
603 }
604 
605 static int
arm_tmr_acpi_probe(device_t dev)606 arm_tmr_acpi_probe(device_t dev)
607 {
608 
609 	device_set_desc(dev, "ARM Generic Timer");
610 	return (BUS_PROBE_NOWILDCARD);
611 }
612 
613 static int
arm_tmr_acpi_attach(device_t dev)614 arm_tmr_acpi_attach(device_t dev)
615 {
616 	const struct arm_tmr_irq_defs *irq_def;
617 	struct arm_tmr_softc *sc;
618 	int error;
619 
620 	sc = device_get_softc(dev);
621 	for (int i = 0; i < nitems(arm_tmr_irq_defs); i++) {
622 		irq_def = &arm_tmr_irq_defs[i];
623 		error = arm_tmr_attach_irq(dev, sc, irq_def, irq_def->idx,
624 		    irq_def->flags);
625 		if (error != 0)
626 			goto out;
627 	}
628 
629 	error = arm_tmr_attach(dev);
630 out:
631 	if (error != 0) {
632 		for (int i = 0; i < sc->irq_count; i++) {
633 			bus_release_resource(dev, SYS_RES_IRQ,
634 			    sc->irqs[i].rid, sc->irqs[i].res);
635 		}
636 	}
637 	return (error);
638 }
639 #endif
640 
641 static int
arm_tmr_attach(device_t dev)642 arm_tmr_attach(device_t dev)
643 {
644 	struct arm_tmr_softc *sc;
645 #ifdef INVARIANTS
646 	const struct arm_tmr_irq_defs *irq_def;
647 #endif
648 #ifdef FDT
649 	phandle_t node;
650 	pcell_t clock;
651 #endif
652 #ifdef __aarch64__
653 	uint64_t id_aa64mmfr0_el1;
654 	int user_phys;
655 #endif
656 	int error;
657 	int i, first_timer, last_timer;
658 
659 	sc = device_get_softc(dev);
660 	if (arm_tmr_sc)
661 		return (ENXIO);
662 
663 	sc->get_cntxct = &get_cntxct;
664 #ifdef __aarch64__
665 	if (get_kernel_reg(ID_AA64MMFR0_EL1, &id_aa64mmfr0_el1) &&
666 	    ID_AA64MMFR0_ECV_VAL(id_aa64mmfr0_el1) >= ID_AA64MMFR0_ECV_IMPL)
667 		sc->get_cntxct = &get_cntxctss;
668 #endif
669 #ifdef FDT
670 	/* Get the base clock frequency */
671 	node = ofw_bus_get_node(dev);
672 	if (node > 0) {
673 		error = OF_getencprop(node, "clock-frequency", &clock,
674 		    sizeof(clock));
675 		if (error > 0)
676 			sc->clkfreq = clock;
677 
678 		if (OF_hasprop(node, "allwinner,sun50i-a64-unstable-timer")) {
679 			sc->get_cntxct = &get_cntxct_a64_unstable;
680 			if (bootverbose)
681 				device_printf(dev,
682 				    "Enabling allwinner unstable timer workaround\n");
683 		}
684 	}
685 #endif
686 
687 	if (sc->clkfreq == 0) {
688 		/* Try to get clock frequency from timer */
689 		sc->clkfreq = get_freq();
690 	}
691 
692 	if (sc->clkfreq == 0) {
693 		device_printf(dev, "No clock frequency specified\n");
694 		return (ENXIO);
695 	}
696 
697 #ifdef INVARIANTS
698 	/* Confirm that non-optional irqs were allocated before coming in. */
699 	for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
700 		int j;
701 
702 		irq_def = &arm_tmr_irq_defs[i];
703 
704 		/* Skip optional interrupts */
705 		if ((irq_def->flags & RF_OPTIONAL) != 0)
706 			continue;
707 
708 		for (j = 0; j < sc->irq_count; j++) {
709 			if (sc->irqs[j].idx == irq_def->idx)
710 				break;
711 		}
712 		KASSERT(j < sc->irq_count, ("%s: Missing required interrupt %s",
713 		    __func__, irq_def->name));
714 	}
715 #endif
716 
717 #ifdef __aarch64__
718 	if (IN_VHE) {
719 		/*
720 		 * The kernel is running at EL2. The EL0 timer registers are
721 		 * re-mapped to the EL2 version. Because of this we need to
722 		 * use the EL2 interrupt.
723 		 */
724 		sc->physical_sys = true;
725 		first_timer = GT_HYP_PHYS;
726 		last_timer = GT_HYP_PHYS;
727 	} else if (!HAS_PHYS) {
728 		/*
729 		 * Use the virtual timer when we can't use the hypervisor.
730 		 * A hypervisor guest may change the virtual timer registers
731 		 * while executing so any use of the virtual timer interrupt
732 		 * needs to be coordinated with the virtual machine manager.
733 		 */
734 		sc->physical_sys = false;
735 		first_timer = GT_VIRT;
736 		last_timer = GT_VIRT;
737 	} else
738 #endif
739 	/* Otherwise set up the secure and non-secure physical timers. */
740 	{
741 		sc->physical_sys = true;
742 		first_timer = GT_PHYS_SECURE;
743 		last_timer = GT_PHYS_NONSECURE;
744 	}
745 
746 #ifdef __aarch64__
747 	/*
748 	 * The virtual timer is always available on arm and arm64, tell
749 	 * userspace to use it.
750 	 */
751 	sc->physical_user = false;
752 	/* Allow use of the physical counter in userspace when available */
753 	if (TUNABLE_INT_FETCH("hw.userspace_allow_phys_counter", &user_phys) &&
754 	    user_phys != 0)
755 		sc->physical_user = sc->physical_sys;
756 #else
757 	/*
758 	 * The virtual timer depends on setting cntvoff from the hypervisor
759 	 * privilege level/el2, however this is only set on arm64.
760 	 */
761 	sc->physical_user = true;
762 #endif
763 
764 	arm_tmr_sc = sc;
765 
766 	/* Setup secure, non-secure and virtual IRQs handler */
767 	for (i = 0; i < sc->irq_count; i++) {
768 		/* Only enable IRQs on timers we expect to use */
769 		if (sc->irqs[i].idx < first_timer ||
770 		    sc->irqs[i].idx > last_timer)
771 			continue;
772 		error = bus_setup_intr(dev, sc->irqs[i].res, INTR_TYPE_CLK,
773 		    arm_tmr_intr, NULL, sc, &sc->irqs[i].ihl);
774 		if (error) {
775 			device_printf(dev, "Unable to alloc int resource.\n");
776 			for (int j = 0; j < i; j++)
777 				bus_teardown_intr(dev, sc->irqs[j].res,
778 				    &sc->irqs[j].ihl);
779 			return (ENXIO);
780 		}
781 	}
782 
783 	/* Disable the timers until we are ready */
784 	arm_tmr_disable(false);
785 	if (HAS_PHYS)
786 		arm_tmr_disable(true);
787 
788 	arm_tmr_timecount.tc_frequency = sc->clkfreq;
789 	tc_init(&arm_tmr_timecount);
790 
791 	sc->et.et_name = "ARM MPCore Eventtimer";
792 	sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
793 	sc->et.et_quality = 1000;
794 
795 	sc->et.et_frequency = sc->clkfreq;
796 	sc->et.et_min_period = (0x00000010LLU << 32) / sc->et.et_frequency;
797 	sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency;
798 	sc->et.et_start = arm_tmr_start;
799 	sc->et.et_stop = arm_tmr_stop;
800 	sc->et.et_priv = sc;
801 	et_register(&sc->et);
802 
803 #if defined(__arm__)
804 	arm_set_delay(arm_tmr_do_delay, sc);
805 #endif
806 
807 	return (0);
808 }
809 
810 #ifdef FDT
811 static device_method_t arm_tmr_fdt_methods[] = {
812 	DEVMETHOD(device_probe,		arm_tmr_fdt_probe),
813 	DEVMETHOD(device_attach,	arm_tmr_fdt_attach),
814 	{ 0, 0 }
815 };
816 
817 static DEFINE_CLASS_0(generic_timer, arm_tmr_fdt_driver, arm_tmr_fdt_methods,
818     sizeof(struct arm_tmr_softc));
819 
820 EARLY_DRIVER_MODULE(timer, simplebus, arm_tmr_fdt_driver, 0, 0,
821     BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
822 EARLY_DRIVER_MODULE(timer, ofwbus, arm_tmr_fdt_driver, 0, 0,
823     BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
824 #endif
825 
826 #ifdef DEV_ACPI
827 static device_method_t arm_tmr_acpi_methods[] = {
828 	DEVMETHOD(device_identify,	arm_tmr_acpi_identify),
829 	DEVMETHOD(device_probe,		arm_tmr_acpi_probe),
830 	DEVMETHOD(device_attach,	arm_tmr_acpi_attach),
831 	{ 0, 0 }
832 };
833 
834 static DEFINE_CLASS_0(generic_timer, arm_tmr_acpi_driver, arm_tmr_acpi_methods,
835     sizeof(struct arm_tmr_softc));
836 
837 EARLY_DRIVER_MODULE(timer, acpi, arm_tmr_acpi_driver, 0, 0,
838     BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
839 #endif
840 
841 static int64_t
arm_tmr_get_counts(int usec)842 arm_tmr_get_counts(int usec)
843 {
844 	int64_t counts, counts_per_usec;
845 
846 	/* Get the number of times to count */
847 	counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1);
848 
849 	/*
850 	 * Clamp the timeout at a maximum value (about 32 seconds with
851 	 * a 66MHz clock). *Nobody* should be delay()ing for anywhere
852 	 * near that length of time and if they are, they should be hung
853 	 * out to dry.
854 	 */
855 	if (usec >= (0x80000000U / counts_per_usec))
856 		counts = (0x80000000U / counts_per_usec) - 1;
857 	else
858 		counts = usec * counts_per_usec;
859 
860 	return counts;
861 }
862 
863 static void
arm_tmr_do_delay(int usec,void * arg)864 arm_tmr_do_delay(int usec, void *arg)
865 {
866 	struct arm_tmr_softc *sc = arg;
867 	int64_t counts;
868 	uint64_t first;
869 #if defined(__aarch64__)
870 	int64_t end;
871 #endif
872 
873 	counts = arm_tmr_get_counts(usec);
874 	first = sc->get_cntxct(sc->physical_sys);
875 #if defined(__aarch64__)
876 	end = first + counts;
877 #endif
878 
879 	while ((sc->get_cntxct(sc->physical_sys) - first) < counts) {
880 #if defined(__aarch64__)
881 		if (enable_wfxt)
882 			wfet(end);
883 #endif
884 	}
885 }
886 
887 #if defined(__aarch64__)
888 void
DELAY(int usec)889 DELAY(int usec)
890 {
891 	int32_t counts;
892 
893 	TSENTER();
894 	/*
895 	 * We have two options for a delay: using the timer, or using the wfet
896 	 * instruction. However, both of these are dependent on timers being
897 	 * setup, and if they're not just use a loop for the meantime.
898 	*/
899 	if (arm_tmr_sc != NULL) {
900 		arm_tmr_do_delay(usec, arm_tmr_sc);
901 	} else {
902 		for (; usec > 0; usec--)
903 			for (counts = 200; counts > 0; counts--)
904 				/* Prevent the compiler from optimizing out the loop */
905 				cpufunc_nullop();
906 	}
907 	TSEXIT();
908 }
909 
910 static cpu_feat_en
wfxt_check(const struct cpu_feat * feat __unused,u_int midr __unused)911 wfxt_check(const struct cpu_feat *feat __unused, u_int midr __unused)
912 {
913 	uint64_t id_aa64isar2;
914 
915 	if (!get_kernel_reg(ID_AA64ISAR2_EL1, &id_aa64isar2))
916 		return (FEAT_ALWAYS_DISABLE);
917 	if (ID_AA64ISAR2_WFxT_VAL(id_aa64isar2) >= ID_AA64ISAR2_WFxT_IMPL)
918 		return (FEAT_DEFAULT_ENABLE);
919 
920 	return (FEAT_ALWAYS_DISABLE);
921 }
922 
923 static bool
wfxt_enable(const struct cpu_feat * feat __unused,cpu_feat_errata errata_status __unused,u_int * errata_list __unused,u_int errata_count __unused)924 wfxt_enable(const struct cpu_feat *feat __unused,
925     cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
926     u_int errata_count __unused)
927 {
928 	/* will be called if wfxt_check returns true */
929 	enable_wfxt = true;
930 	return (true);
931 }
932 
933 static void
wfxt_disabled(const struct cpu_feat * feat __unused)934 wfxt_disabled(const struct cpu_feat *feat __unused)
935 {
936 	if (PCPU_GET(cpuid) == 0)
937 		update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_WFxT_MASK, 0);
938 }
939 
940 CPU_FEAT(feat_wfxt, "WFE and WFI instructions with timeout",
941     wfxt_check, NULL, wfxt_enable, wfxt_disabled,
942     CPU_FEAT_AFTER_DEV | CPU_FEAT_SYSTEM);
943 #endif
944 
945 static uint32_t
arm_tmr_fill_vdso_timehands(struct vdso_timehands * vdso_th,struct timecounter * tc)946 arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
947     struct timecounter *tc)
948 {
949 
950 	vdso_th->th_algo = VDSO_TH_ALGO_ARM_GENTIM;
951 	vdso_th->th_physical = arm_tmr_sc->physical_user;
952 	bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
953 	return (1);
954 }
955