1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2017 The FreeBSD Foundation
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the company nor the name of the author may be used to
15 * endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 #include <sys/types.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/rman.h>
39 #include <sys/sysctl.h>
40 #include <sys/time.h>
41 #include <sys/timeet.h>
42 #include <sys/timetc.h>
43
44 #include <machine/bus.h>
45 #include <machine/machdep.h>
46 #include <machine/vmm.h>
47
48 #include <arm64/vmm/arm64.h>
49
50 #include <dev/vmm/vmm_vm.h>
51
52 #include "vgic.h"
53 #include "vtimer.h"
54
55 #define RES1 0xffffffffffffffffUL
56
57 #define timer_enabled(ctl) \
58 (!((ctl) & CNTP_CTL_IMASK) && ((ctl) & CNTP_CTL_ENABLE))
59
60 static uint32_t tmr_frq;
61
62 #define timer_condition_met(ctl) ((ctl) & CNTP_CTL_ISTATUS)
63
64 SYSCTL_DECL(_hw_vmm);
65 SYSCTL_NODE(_hw_vmm, OID_AUTO, vtimer, CTLFLAG_RW, NULL, NULL);
66
67 static bool allow_ecv_phys = false;
68 SYSCTL_BOOL(_hw_vmm_vtimer, OID_AUTO, allow_ecv_phys, CTLFLAG_RW,
69 &allow_ecv_phys, 0,
70 "Enable hardware access to the physical timer if FEAT_ECV_POFF is supported");
71
72 static void vtimer_schedule_irq(struct hypctx *hypctx, bool phys);
73
74 static int
vtimer_virtual_timer_intr(void * arg)75 vtimer_virtual_timer_intr(void *arg)
76 {
77 struct hypctx *hypctx;
78 uint64_t cntpct_el0;
79 uint32_t cntv_ctl;
80
81 hypctx = arm64_get_active_vcpu();
82 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
83
84 if (!hypctx) {
85 /* vm_destroy() was called. */
86 eprintf("No active vcpu\n");
87 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
88 goto out;
89 }
90 if (!timer_enabled(cntv_ctl)) {
91 eprintf("Timer not enabled\n");
92 goto out;
93 }
94 if (!timer_condition_met(cntv_ctl)) {
95 eprintf("Timer condition not met\n");
96 goto out;
97 }
98
99 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
100 hypctx->hyp->vtimer.cntvoff_el2;
101 if (hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 < cntpct_el0)
102 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
103 GT_VIRT_IRQ, true);
104
105 cntv_ctl = hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0;
106
107 out:
108 /*
109 * Disable the timer interrupt. This will prevent the interrupt from
110 * being reasserted as soon as we exit the handler and getting stuck
111 * in an infinite loop.
112 *
113 * This is safe to do because the guest disabled the timer, and then
114 * enables it as part of the interrupt handling routine.
115 */
116 cntv_ctl &= ~CNTP_CTL_ENABLE;
117 WRITE_SPECIALREG(cntv_ctl_el0, cntv_ctl);
118
119 return (FILTER_HANDLED);
120 }
121
122 int
vtimer_init(void)123 vtimer_init(void)
124 {
125 /*
126 * The guest *MUST* use the same timer frequency as the host. The
127 * register CNTFRQ_EL0 is accessible to the guest and a different value
128 * in the guest dts file might have unforseen consequences.
129 */
130 tmr_frq = READ_SPECIALREG(cntfrq_el0);
131
132 return (0);
133 }
134
135 void
vtimer_vminit(struct hyp * hyp)136 vtimer_vminit(struct hyp *hyp)
137 {
138 uint64_t now;
139 bool ecv_poff;
140
141 ecv_poff = false;
142
143 if (allow_ecv_phys && (hyp->feats & HYP_FEAT_ECV_POFF) != 0)
144 ecv_poff = true;
145
146 /*
147 * Configure the Counter-timer Hypervisor Control Register for the VM.
148 */
149 if (in_vhe()) {
150 /*
151 * CNTHCTL_E2H_EL0PCTEN: trap EL0 access to CNTP{CT,CTSS}_EL0
152 * CNTHCTL_E2H_EL0VCTEN: don't trap EL0 access to
153 * CNTV{CT,CTXX}_EL0
154 * CNTHCTL_E2H_EL0VTEN: don't trap EL0 access to
155 * CNTV_{CTL,CVAL,TVAL}_EL0
156 * CNTHCTL_E2H_EL0PTEN: trap EL0 access to
157 * CNTP_{CTL,CVAL,TVAL}_EL0
158 * CNTHCTL_E2H_EL1PCTEN: trap access to CNTPCT_EL0
159 * CNTHCTL_E2H_EL1PTEN: trap access to
160 * CNTP_{CTL,CVAL,TVAL}_EL0
161 * CNTHCTL_E2H_EL1VCTEN: don't trap EL0 access to
162 * CNTV{CT,CTSS}_EL0
163 * CNTHCTL_E2H_EL1PCEN: trap EL1 access to
164 * CNTP_{CTL,CVAL,TVAL}_EL0
165 *
166 * TODO: Don't trap when FEAT_ECV is present
167 */
168 hyp->vtimer.cnthctl_el2 =
169 CNTHCTL_E2H_EL0VCTEN_NOTRAP |
170 CNTHCTL_E2H_EL0VTEN_NOTRAP;
171 if (ecv_poff) {
172 hyp->vtimer.cnthctl_el2 |=
173 CNTHCTL_E2H_EL0PCTEN_NOTRAP |
174 CNTHCTL_E2H_EL0PTEN_NOTRAP |
175 CNTHCTL_E2H_EL1PCTEN_NOTRAP |
176 CNTHCTL_E2H_EL1PTEN_NOTRAP;
177 } else {
178 hyp->vtimer.cnthctl_el2 |=
179 CNTHCTL_E2H_EL0PCTEN_TRAP |
180 CNTHCTL_E2H_EL0PTEN_TRAP |
181 CNTHCTL_E2H_EL1PCTEN_TRAP |
182 CNTHCTL_E2H_EL1PTEN_TRAP;
183 }
184 } else {
185 /*
186 * CNTHCTL_EL1PCEN: trap access to CNTP_{CTL, CVAL, TVAL}_EL0
187 * from EL1
188 * CNTHCTL_EL1PCTEN: trap access to CNTPCT_EL0
189 */
190 if (ecv_poff) {
191 hyp->vtimer.cnthctl_el2 =
192 CNTHCTL_EL1PCTEN_NOTRAP |
193 CNTHCTL_EL1PCEN_NOTRAP;
194 } else {
195 hyp->vtimer.cnthctl_el2 =
196 CNTHCTL_EL1PCTEN_TRAP |
197 CNTHCTL_EL1PCEN_TRAP;
198 }
199 }
200
201 if (ecv_poff)
202 hyp->vtimer.cnthctl_el2 |= CNTHCTL_ECV_EN;
203
204 now = READ_SPECIALREG(cntpct_el0);
205 hyp->vtimer.cntvoff_el2 = now;
206
207 return;
208 }
209
210 void
vtimer_cpuinit(struct hypctx * hypctx)211 vtimer_cpuinit(struct hypctx *hypctx)
212 {
213 struct vtimer_cpu *vtimer_cpu;
214
215 vtimer_cpu = &hypctx->vtimer_cpu;
216 /*
217 * Configure physical timer interrupts for the VCPU.
218 *
219 * CNTP_CTL_IMASK: mask interrupts
220 * ~CNTP_CTL_ENABLE: disable the timer
221 */
222 vtimer_cpu->phys_timer.cntx_ctl_el0 = CNTP_CTL_IMASK & ~CNTP_CTL_ENABLE;
223
224 mtx_init(&vtimer_cpu->phys_timer.mtx, "vtimer phys callout mutex", NULL,
225 MTX_DEF);
226 callout_init_mtx(&vtimer_cpu->phys_timer.callout,
227 &vtimer_cpu->phys_timer.mtx, 0);
228 vtimer_cpu->phys_timer.irqid = GT_PHYS_NS_IRQ;
229
230 mtx_init(&vtimer_cpu->virt_timer.mtx, "vtimer virt callout mutex", NULL,
231 MTX_DEF);
232 callout_init_mtx(&vtimer_cpu->virt_timer.callout,
233 &vtimer_cpu->virt_timer.mtx, 0);
234 vtimer_cpu->virt_timer.irqid = GT_VIRT_IRQ;
235 }
236
237 void
vtimer_cpucleanup(struct hypctx * hypctx)238 vtimer_cpucleanup(struct hypctx *hypctx)
239 {
240 struct vtimer_cpu *vtimer_cpu;
241
242 vtimer_cpu = &hypctx->vtimer_cpu;
243 callout_drain(&vtimer_cpu->phys_timer.callout);
244 callout_drain(&vtimer_cpu->virt_timer.callout);
245 mtx_destroy(&vtimer_cpu->phys_timer.mtx);
246 mtx_destroy(&vtimer_cpu->virt_timer.mtx);
247 }
248
249 void
vtimer_vmcleanup(struct hyp * hyp)250 vtimer_vmcleanup(struct hyp *hyp)
251 {
252 struct hypctx *hypctx;
253 uint32_t cntv_ctl;
254
255 hypctx = arm64_get_active_vcpu();
256 if (!hypctx) {
257 /* The active VM was destroyed, stop the timer. */
258 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
259 cntv_ctl &= ~CNTP_CTL_ENABLE;
260 WRITE_SPECIALREG(cntv_ctl_el0, cntv_ctl);
261 }
262 }
263
264 void
vtimer_cleanup(void)265 vtimer_cleanup(void)
266 {
267 }
268
269 static void
vtime_sync_timer(struct hypctx * hypctx,struct vtimer_timer * timer,uint64_t cntpct_el0)270 vtime_sync_timer(struct hypctx *hypctx, struct vtimer_timer *timer,
271 uint64_t cntpct_el0)
272 {
273 if (!timer_enabled(timer->cntx_ctl_el0)) {
274 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
275 timer->irqid, false);
276 } else if (timer->cntx_cval_el0 < cntpct_el0) {
277 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
278 timer->irqid, true);
279 } else {
280 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
281 timer->irqid, false);
282 vtimer_schedule_irq(hypctx, false);
283 }
284 }
285
286 void
vtimer_sync_hwstate(struct hypctx * hypctx)287 vtimer_sync_hwstate(struct hypctx *hypctx)
288 {
289 uint64_t cntpct_el0;
290
291 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
292 hypctx->hyp->vtimer.cntvoff_el2;
293 vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.virt_timer, cntpct_el0);
294 /* If FEAT_ECV_POFF is in use then we need to sync the physical timer */
295 if ((hypctx->hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0) {
296 vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.phys_timer,
297 cntpct_el0);
298 }
299 }
300
301 static void
vtimer_inject_irq_callout_phys(void * context)302 vtimer_inject_irq_callout_phys(void *context)
303 {
304 struct hypctx *hypctx;
305
306 hypctx = context;
307 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
308 hypctx->vtimer_cpu.phys_timer.irqid, true);
309 }
310
311 static void
vtimer_inject_irq_callout_virt(void * context)312 vtimer_inject_irq_callout_virt(void *context)
313 {
314 struct hypctx *hypctx;
315
316 hypctx = context;
317 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
318 hypctx->vtimer_cpu.virt_timer.irqid, true);
319 }
320
321 static void
vtimer_schedule_irq(struct hypctx * hypctx,bool phys)322 vtimer_schedule_irq(struct hypctx *hypctx, bool phys)
323 {
324 sbintime_t time;
325 struct vtimer_timer *timer;
326 uint64_t cntpct_el0;
327 uint64_t diff;
328
329 if (phys)
330 timer = &hypctx->vtimer_cpu.phys_timer;
331 else
332 timer = &hypctx->vtimer_cpu.virt_timer;
333 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
334 hypctx->hyp->vtimer.cntvoff_el2;
335 if (timer->cntx_cval_el0 < cntpct_el0) {
336 /* Timer set in the past, trigger interrupt */
337 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
338 timer->irqid, true);
339 } else {
340 diff = timer->cntx_cval_el0 - cntpct_el0;
341 time = diff * SBT_1S / tmr_frq;
342 if (phys)
343 callout_reset_sbt(&timer->callout, time, 0,
344 vtimer_inject_irq_callout_phys, hypctx, 0);
345 else
346 callout_reset_sbt(&timer->callout, time, 0,
347 vtimer_inject_irq_callout_virt, hypctx, 0);
348 }
349 }
350
351 static void
vtimer_remove_irq(struct hypctx * hypctx,struct vcpu * vcpu)352 vtimer_remove_irq(struct hypctx *hypctx, struct vcpu *vcpu)
353 {
354 struct vtimer_cpu *vtimer_cpu;
355 struct vtimer_timer *timer;
356
357 vtimer_cpu = &hypctx->vtimer_cpu;
358 timer = &vtimer_cpu->phys_timer;
359
360 callout_drain(&timer->callout);
361 /*
362 * The interrupt needs to be deactivated here regardless of the callout
363 * function having been executed. The timer interrupt can be masked with
364 * the CNTP_CTL_EL0.IMASK bit instead of reading the IAR register.
365 * Masking the interrupt doesn't remove it from the list registers.
366 */
367 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(vcpu), timer->irqid, false);
368 }
369
370 /*
371 * Timer emulation functions.
372 *
373 * The guest should use the virtual timer, however some software, e.g. u-boot,
374 * used the physical timer. Emulate this in software for the guest to use.
375 *
376 * Adjust for cntvoff_el2 so the physical and virtual timers are at similar
377 * times. This simplifies interrupt handling in the virtual timer as the
378 * adjustment will have already happened.
379 */
380
381 int
vtimer_phys_ctl_read(struct vcpu * vcpu,uint64_t * rval,void * arg)382 vtimer_phys_ctl_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
383 {
384 struct hyp *hyp;
385 struct hypctx *hypctx;
386 struct vtimer_cpu *vtimer_cpu;
387 uint64_t cntpct_el0;
388
389 hypctx = vcpu_get_cookie(vcpu);
390 hyp = hypctx->hyp;
391 vtimer_cpu = &hypctx->vtimer_cpu;
392
393 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
394 if (vtimer_cpu->phys_timer.cntx_cval_el0 < cntpct_el0)
395 /* Timer condition met */
396 *rval = vtimer_cpu->phys_timer.cntx_ctl_el0 | CNTP_CTL_ISTATUS;
397 else
398 *rval = vtimer_cpu->phys_timer.cntx_ctl_el0 & ~CNTP_CTL_ISTATUS;
399
400 return (0);
401 }
402
403 int
vtimer_phys_ctl_write(struct vcpu * vcpu,uint64_t wval,void * arg)404 vtimer_phys_ctl_write(struct vcpu *vcpu, uint64_t wval, void *arg)
405 {
406 struct hypctx *hypctx;
407 struct vtimer_cpu *vtimer_cpu;
408 uint64_t ctl_el0;
409 bool timer_toggled_on;
410
411 hypctx = vcpu_get_cookie(vcpu);
412 vtimer_cpu = &hypctx->vtimer_cpu;
413
414 timer_toggled_on = false;
415 ctl_el0 = vtimer_cpu->phys_timer.cntx_ctl_el0;
416
417 if (!timer_enabled(ctl_el0) && timer_enabled(wval))
418 timer_toggled_on = true;
419 else if (timer_enabled(ctl_el0) && !timer_enabled(wval))
420 vtimer_remove_irq(hypctx, vcpu);
421
422 vtimer_cpu->phys_timer.cntx_ctl_el0 = wval;
423
424 if (timer_toggled_on)
425 vtimer_schedule_irq(hypctx, true);
426
427 return (0);
428 }
429
430 int
vtimer_phys_cnt_read(struct vcpu * vcpu,uint64_t * rval,void * arg)431 vtimer_phys_cnt_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
432 {
433 struct vm *vm;
434 struct hyp *hyp;
435
436 vm = vcpu_vm(vcpu);
437 hyp = vm_get_cookie(vm);
438 *rval = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
439 return (0);
440 }
441
442 int
vtimer_phys_cnt_write(struct vcpu * vcpu,uint64_t wval,void * arg)443 vtimer_phys_cnt_write(struct vcpu *vcpu, uint64_t wval, void *arg)
444 {
445 return (0);
446 }
447
448 int
vtimer_phys_cval_read(struct vcpu * vcpu,uint64_t * rval,void * arg)449 vtimer_phys_cval_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
450 {
451 struct hypctx *hypctx;
452 struct vtimer_cpu *vtimer_cpu;
453
454 hypctx = vcpu_get_cookie(vcpu);
455 vtimer_cpu = &hypctx->vtimer_cpu;
456
457 *rval = vtimer_cpu->phys_timer.cntx_cval_el0;
458
459 return (0);
460 }
461
462 int
vtimer_phys_cval_write(struct vcpu * vcpu,uint64_t wval,void * arg)463 vtimer_phys_cval_write(struct vcpu *vcpu, uint64_t wval, void *arg)
464 {
465 struct hypctx *hypctx;
466 struct vtimer_cpu *vtimer_cpu;
467
468 hypctx = vcpu_get_cookie(vcpu);
469 vtimer_cpu = &hypctx->vtimer_cpu;
470
471 vtimer_cpu->phys_timer.cntx_cval_el0 = wval;
472
473 vtimer_remove_irq(hypctx, vcpu);
474 if (timer_enabled(vtimer_cpu->phys_timer.cntx_ctl_el0)) {
475 vtimer_schedule_irq(hypctx, true);
476 }
477
478 return (0);
479 }
480
481 int
vtimer_phys_tval_read(struct vcpu * vcpu,uint64_t * rval,void * arg)482 vtimer_phys_tval_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
483 {
484 struct hyp *hyp;
485 struct hypctx *hypctx;
486 struct vtimer_cpu *vtimer_cpu;
487 uint32_t cntpct_el0;
488
489 hypctx = vcpu_get_cookie(vcpu);
490 hyp = hypctx->hyp;
491 vtimer_cpu = &hypctx->vtimer_cpu;
492
493 if (!(vtimer_cpu->phys_timer.cntx_ctl_el0 & CNTP_CTL_ENABLE)) {
494 /*
495 * ARMv8 Architecture Manual, p. D7-2702: the result of reading
496 * TVAL when the timer is disabled is UNKNOWN. I have chosen to
497 * return the maximum value possible on 32 bits which means the
498 * timer will fire very far into the future.
499 */
500 *rval = (uint32_t)RES1;
501 } else {
502 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
503 hyp->vtimer.cntvoff_el2;
504 *rval = vtimer_cpu->phys_timer.cntx_cval_el0 - cntpct_el0;
505 }
506
507 return (0);
508 }
509
510 int
vtimer_phys_tval_write(struct vcpu * vcpu,uint64_t wval,void * arg)511 vtimer_phys_tval_write(struct vcpu *vcpu, uint64_t wval, void *arg)
512 {
513 struct hyp *hyp;
514 struct hypctx *hypctx;
515 struct vtimer_cpu *vtimer_cpu;
516 uint64_t cntpct_el0;
517
518 hypctx = vcpu_get_cookie(vcpu);
519 hyp = hypctx->hyp;
520 vtimer_cpu = &hypctx->vtimer_cpu;
521
522 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
523 vtimer_cpu->phys_timer.cntx_cval_el0 = (int32_t)wval + cntpct_el0;
524
525 vtimer_remove_irq(hypctx, vcpu);
526 if (timer_enabled(vtimer_cpu->phys_timer.cntx_ctl_el0)) {
527 vtimer_schedule_irq(hypctx, true);
528 }
529
530 return (0);
531 }
532
533 struct vtimer_softc {
534 struct resource *res;
535 void *ihl;
536 int rid;
537 };
538
539 static int
vtimer_probe(device_t dev)540 vtimer_probe(device_t dev)
541 {
542 device_set_desc(dev, "Virtual timer");
543 return (BUS_PROBE_DEFAULT);
544 }
545
546 static int
vtimer_attach(device_t dev)547 vtimer_attach(device_t dev)
548 {
549 struct vtimer_softc *sc;
550
551 sc = device_get_softc(dev);
552
553 sc->rid = 0;
554 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->rid, RF_ACTIVE);
555 if (sc->res == NULL)
556 return (ENXIO);
557
558 bus_setup_intr(dev, sc->res, INTR_TYPE_CLK, vtimer_virtual_timer_intr,
559 NULL, NULL, &sc->ihl);
560
561 return (0);
562 }
563
564 static device_method_t vtimer_methods[] = {
565 /* Device interface */
566 DEVMETHOD(device_probe, vtimer_probe),
567 DEVMETHOD(device_attach, vtimer_attach),
568
569 /* End */
570 DEVMETHOD_END
571 };
572
573 DEFINE_CLASS_0(vtimer, vtimer_driver, vtimer_methods,
574 sizeof(struct vtimer_softc));
575
576 DRIVER_MODULE(vtimer, generic_timer, vtimer_driver, 0, 0);
577