1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2017 The FreeBSD Foundation
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the company nor the name of the author may be used to
15 * endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 #include <sys/types.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/rman.h>
39 #include <sys/sysctl.h>
40 #include <sys/time.h>
41 #include <sys/timeet.h>
42 #include <sys/timetc.h>
43
44 #include <machine/bus.h>
45 #include <machine/machdep.h>
46 #include <machine/vmm.h>
47
48 #include <arm64/vmm/arm64.h>
49
50 #include "vgic.h"
51 #include "vtimer.h"
52
53 #define RES1 0xffffffffffffffffUL
54
55 #define timer_enabled(ctl) \
56 (!((ctl) & CNTP_CTL_IMASK) && ((ctl) & CNTP_CTL_ENABLE))
57
58 static uint32_t tmr_frq;
59
60 #define timer_condition_met(ctl) ((ctl) & CNTP_CTL_ISTATUS)
61
62 SYSCTL_DECL(_hw_vmm);
63 SYSCTL_NODE(_hw_vmm, OID_AUTO, vtimer, CTLFLAG_RW, NULL, NULL);
64
65 static bool allow_ecv_phys = false;
66 SYSCTL_BOOL(_hw_vmm_vtimer, OID_AUTO, allow_ecv_phys, CTLFLAG_RW,
67 &allow_ecv_phys, 0,
68 "Enable hardware access to the physical timer if FEAT_ECV_POFF is supported");
69
70 static void vtimer_schedule_irq(struct hypctx *hypctx, bool phys);
71
72 static int
vtimer_virtual_timer_intr(void * arg)73 vtimer_virtual_timer_intr(void *arg)
74 {
75 struct hypctx *hypctx;
76 uint64_t cntpct_el0;
77 uint32_t cntv_ctl;
78
79 hypctx = arm64_get_active_vcpu();
80 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
81
82 if (!hypctx) {
83 /* vm_destroy() was called. */
84 eprintf("No active vcpu\n");
85 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
86 goto out;
87 }
88 if (!timer_enabled(cntv_ctl)) {
89 eprintf("Timer not enabled\n");
90 goto out;
91 }
92 if (!timer_condition_met(cntv_ctl)) {
93 eprintf("Timer condition not met\n");
94 goto out;
95 }
96
97 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
98 hypctx->hyp->vtimer.cntvoff_el2;
99 if (hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 < cntpct_el0)
100 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
101 GT_VIRT_IRQ, true);
102
103 cntv_ctl = hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0;
104
105 out:
106 /*
107 * Disable the timer interrupt. This will prevent the interrupt from
108 * being reasserted as soon as we exit the handler and getting stuck
109 * in an infinite loop.
110 *
111 * This is safe to do because the guest disabled the timer, and then
112 * enables it as part of the interrupt handling routine.
113 */
114 cntv_ctl &= ~CNTP_CTL_ENABLE;
115 WRITE_SPECIALREG(cntv_ctl_el0, cntv_ctl);
116
117 return (FILTER_HANDLED);
118 }
119
120 int
vtimer_init(void)121 vtimer_init(void)
122 {
123 /*
124 * The guest *MUST* use the same timer frequency as the host. The
125 * register CNTFRQ_EL0 is accessible to the guest and a different value
126 * in the guest dts file might have unforseen consequences.
127 */
128 tmr_frq = READ_SPECIALREG(cntfrq_el0);
129
130 return (0);
131 }
132
133 void
vtimer_vminit(struct hyp * hyp)134 vtimer_vminit(struct hyp *hyp)
135 {
136 uint64_t now;
137 bool ecv_poff;
138
139 ecv_poff = false;
140
141 if (allow_ecv_phys && (hyp->feats & HYP_FEAT_ECV_POFF) != 0)
142 ecv_poff = true;
143
144 /*
145 * Configure the Counter-timer Hypervisor Control Register for the VM.
146 */
147 if (in_vhe()) {
148 /*
149 * CNTHCTL_E2H_EL0PCTEN: trap EL0 access to CNTP{CT,CTSS}_EL0
150 * CNTHCTL_E2H_EL0VCTEN: don't trap EL0 access to
151 * CNTV{CT,CTXX}_EL0
152 * CNTHCTL_E2H_EL0VTEN: don't trap EL0 access to
153 * CNTV_{CTL,CVAL,TVAL}_EL0
154 * CNTHCTL_E2H_EL0PTEN: trap EL0 access to
155 * CNTP_{CTL,CVAL,TVAL}_EL0
156 * CNTHCTL_E2H_EL1PCTEN: trap access to CNTPCT_EL0
157 * CNTHCTL_E2H_EL1PTEN: trap access to
158 * CNTP_{CTL,CVAL,TVAL}_EL0
159 * CNTHCTL_E2H_EL1VCTEN: don't trap EL0 access to
160 * CNTV{CT,CTSS}_EL0
161 * CNTHCTL_E2H_EL1PCEN: trap EL1 access to
162 * CNTP_{CTL,CVAL,TVAL}_EL0
163 *
164 * TODO: Don't trap when FEAT_ECV is present
165 */
166 hyp->vtimer.cnthctl_el2 =
167 CNTHCTL_E2H_EL0VCTEN_NOTRAP |
168 CNTHCTL_E2H_EL0VTEN_NOTRAP;
169 if (ecv_poff) {
170 hyp->vtimer.cnthctl_el2 |=
171 CNTHCTL_E2H_EL0PCTEN_NOTRAP |
172 CNTHCTL_E2H_EL0PTEN_NOTRAP |
173 CNTHCTL_E2H_EL1PCTEN_NOTRAP |
174 CNTHCTL_E2H_EL1PTEN_NOTRAP;
175 } else {
176 hyp->vtimer.cnthctl_el2 |=
177 CNTHCTL_E2H_EL0PCTEN_TRAP |
178 CNTHCTL_E2H_EL0PTEN_TRAP |
179 CNTHCTL_E2H_EL1PCTEN_TRAP |
180 CNTHCTL_E2H_EL1PTEN_TRAP;
181 }
182 } else {
183 /*
184 * CNTHCTL_EL1PCEN: trap access to CNTP_{CTL, CVAL, TVAL}_EL0
185 * from EL1
186 * CNTHCTL_EL1PCTEN: trap access to CNTPCT_EL0
187 */
188 if (ecv_poff) {
189 hyp->vtimer.cnthctl_el2 =
190 CNTHCTL_EL1PCTEN_NOTRAP |
191 CNTHCTL_EL1PCEN_NOTRAP;
192 } else {
193 hyp->vtimer.cnthctl_el2 =
194 CNTHCTL_EL1PCTEN_TRAP |
195 CNTHCTL_EL1PCEN_TRAP;
196 }
197 }
198
199 if (ecv_poff)
200 hyp->vtimer.cnthctl_el2 |= CNTHCTL_ECV_EN;
201
202 now = READ_SPECIALREG(cntpct_el0);
203 hyp->vtimer.cntvoff_el2 = now;
204
205 return;
206 }
207
208 void
vtimer_cpuinit(struct hypctx * hypctx)209 vtimer_cpuinit(struct hypctx *hypctx)
210 {
211 struct vtimer_cpu *vtimer_cpu;
212
213 vtimer_cpu = &hypctx->vtimer_cpu;
214 /*
215 * Configure physical timer interrupts for the VCPU.
216 *
217 * CNTP_CTL_IMASK: mask interrupts
218 * ~CNTP_CTL_ENABLE: disable the timer
219 */
220 vtimer_cpu->phys_timer.cntx_ctl_el0 = CNTP_CTL_IMASK & ~CNTP_CTL_ENABLE;
221
222 mtx_init(&vtimer_cpu->phys_timer.mtx, "vtimer phys callout mutex", NULL,
223 MTX_DEF);
224 callout_init_mtx(&vtimer_cpu->phys_timer.callout,
225 &vtimer_cpu->phys_timer.mtx, 0);
226 vtimer_cpu->phys_timer.irqid = GT_PHYS_NS_IRQ;
227
228 mtx_init(&vtimer_cpu->virt_timer.mtx, "vtimer virt callout mutex", NULL,
229 MTX_DEF);
230 callout_init_mtx(&vtimer_cpu->virt_timer.callout,
231 &vtimer_cpu->virt_timer.mtx, 0);
232 vtimer_cpu->virt_timer.irqid = GT_VIRT_IRQ;
233 }
234
235 void
vtimer_cpucleanup(struct hypctx * hypctx)236 vtimer_cpucleanup(struct hypctx *hypctx)
237 {
238 struct vtimer_cpu *vtimer_cpu;
239
240 vtimer_cpu = &hypctx->vtimer_cpu;
241 callout_drain(&vtimer_cpu->phys_timer.callout);
242 callout_drain(&vtimer_cpu->virt_timer.callout);
243 mtx_destroy(&vtimer_cpu->phys_timer.mtx);
244 mtx_destroy(&vtimer_cpu->virt_timer.mtx);
245 }
246
247 void
vtimer_vmcleanup(struct hyp * hyp)248 vtimer_vmcleanup(struct hyp *hyp)
249 {
250 struct hypctx *hypctx;
251 uint32_t cntv_ctl;
252
253 hypctx = arm64_get_active_vcpu();
254 if (!hypctx) {
255 /* The active VM was destroyed, stop the timer. */
256 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
257 cntv_ctl &= ~CNTP_CTL_ENABLE;
258 WRITE_SPECIALREG(cntv_ctl_el0, cntv_ctl);
259 }
260 }
261
262 void
vtimer_cleanup(void)263 vtimer_cleanup(void)
264 {
265 }
266
267 static void
vtime_sync_timer(struct hypctx * hypctx,struct vtimer_timer * timer,uint64_t cntpct_el0)268 vtime_sync_timer(struct hypctx *hypctx, struct vtimer_timer *timer,
269 uint64_t cntpct_el0)
270 {
271 if (!timer_enabled(timer->cntx_ctl_el0)) {
272 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
273 timer->irqid, false);
274 } else if (timer->cntx_cval_el0 < cntpct_el0) {
275 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
276 timer->irqid, true);
277 } else {
278 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
279 timer->irqid, false);
280 vtimer_schedule_irq(hypctx, false);
281 }
282 }
283
284 void
vtimer_sync_hwstate(struct hypctx * hypctx)285 vtimer_sync_hwstate(struct hypctx *hypctx)
286 {
287 uint64_t cntpct_el0;
288
289 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
290 hypctx->hyp->vtimer.cntvoff_el2;
291 vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.virt_timer, cntpct_el0);
292 /* If FEAT_ECV_POFF is in use then we need to sync the physical timer */
293 if ((hypctx->hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0) {
294 vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.phys_timer,
295 cntpct_el0);
296 }
297 }
298
299 static void
vtimer_inject_irq_callout_phys(void * context)300 vtimer_inject_irq_callout_phys(void *context)
301 {
302 struct hypctx *hypctx;
303
304 hypctx = context;
305 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
306 hypctx->vtimer_cpu.phys_timer.irqid, true);
307 }
308
309 static void
vtimer_inject_irq_callout_virt(void * context)310 vtimer_inject_irq_callout_virt(void *context)
311 {
312 struct hypctx *hypctx;
313
314 hypctx = context;
315 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
316 hypctx->vtimer_cpu.virt_timer.irqid, true);
317 }
318
319 static void
vtimer_schedule_irq(struct hypctx * hypctx,bool phys)320 vtimer_schedule_irq(struct hypctx *hypctx, bool phys)
321 {
322 sbintime_t time;
323 struct vtimer_timer *timer;
324 uint64_t cntpct_el0;
325 uint64_t diff;
326
327 if (phys)
328 timer = &hypctx->vtimer_cpu.phys_timer;
329 else
330 timer = &hypctx->vtimer_cpu.virt_timer;
331 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
332 hypctx->hyp->vtimer.cntvoff_el2;
333 if (timer->cntx_cval_el0 < cntpct_el0) {
334 /* Timer set in the past, trigger interrupt */
335 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
336 timer->irqid, true);
337 } else {
338 diff = timer->cntx_cval_el0 - cntpct_el0;
339 time = diff * SBT_1S / tmr_frq;
340 if (phys)
341 callout_reset_sbt(&timer->callout, time, 0,
342 vtimer_inject_irq_callout_phys, hypctx, 0);
343 else
344 callout_reset_sbt(&timer->callout, time, 0,
345 vtimer_inject_irq_callout_virt, hypctx, 0);
346 }
347 }
348
349 static void
vtimer_remove_irq(struct hypctx * hypctx,struct vcpu * vcpu)350 vtimer_remove_irq(struct hypctx *hypctx, struct vcpu *vcpu)
351 {
352 struct vtimer_cpu *vtimer_cpu;
353 struct vtimer_timer *timer;
354
355 vtimer_cpu = &hypctx->vtimer_cpu;
356 timer = &vtimer_cpu->phys_timer;
357
358 callout_drain(&timer->callout);
359 /*
360 * The interrupt needs to be deactivated here regardless of the callout
361 * function having been executed. The timer interrupt can be masked with
362 * the CNTP_CTL_EL0.IMASK bit instead of reading the IAR register.
363 * Masking the interrupt doesn't remove it from the list registers.
364 */
365 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(vcpu), timer->irqid, false);
366 }
367
368 /*
369 * Timer emulation functions.
370 *
371 * The guest should use the virtual timer, however some software, e.g. u-boot,
372 * used the physical timer. Emulate this in software for the guest to use.
373 *
374 * Adjust for cntvoff_el2 so the physical and virtual timers are at similar
375 * times. This simplifies interrupt handling in the virtual timer as the
376 * adjustment will have already happened.
377 */
378
379 int
vtimer_phys_ctl_read(struct vcpu * vcpu,uint64_t * rval,void * arg)380 vtimer_phys_ctl_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
381 {
382 struct hyp *hyp;
383 struct hypctx *hypctx;
384 struct vtimer_cpu *vtimer_cpu;
385 uint64_t cntpct_el0;
386
387 hypctx = vcpu_get_cookie(vcpu);
388 hyp = hypctx->hyp;
389 vtimer_cpu = &hypctx->vtimer_cpu;
390
391 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
392 if (vtimer_cpu->phys_timer.cntx_cval_el0 < cntpct_el0)
393 /* Timer condition met */
394 *rval = vtimer_cpu->phys_timer.cntx_ctl_el0 | CNTP_CTL_ISTATUS;
395 else
396 *rval = vtimer_cpu->phys_timer.cntx_ctl_el0 & ~CNTP_CTL_ISTATUS;
397
398 return (0);
399 }
400
401 int
vtimer_phys_ctl_write(struct vcpu * vcpu,uint64_t wval,void * arg)402 vtimer_phys_ctl_write(struct vcpu *vcpu, uint64_t wval, void *arg)
403 {
404 struct hypctx *hypctx;
405 struct vtimer_cpu *vtimer_cpu;
406 uint64_t ctl_el0;
407 bool timer_toggled_on;
408
409 hypctx = vcpu_get_cookie(vcpu);
410 vtimer_cpu = &hypctx->vtimer_cpu;
411
412 timer_toggled_on = false;
413 ctl_el0 = vtimer_cpu->phys_timer.cntx_ctl_el0;
414
415 if (!timer_enabled(ctl_el0) && timer_enabled(wval))
416 timer_toggled_on = true;
417 else if (timer_enabled(ctl_el0) && !timer_enabled(wval))
418 vtimer_remove_irq(hypctx, vcpu);
419
420 vtimer_cpu->phys_timer.cntx_ctl_el0 = wval;
421
422 if (timer_toggled_on)
423 vtimer_schedule_irq(hypctx, true);
424
425 return (0);
426 }
427
428 int
vtimer_phys_cnt_read(struct vcpu * vcpu,uint64_t * rval,void * arg)429 vtimer_phys_cnt_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
430 {
431 struct vm *vm;
432 struct hyp *hyp;
433
434 vm = vcpu_vm(vcpu);
435 hyp = vm_get_cookie(vm);
436 *rval = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
437 return (0);
438 }
439
440 int
vtimer_phys_cnt_write(struct vcpu * vcpu,uint64_t wval,void * arg)441 vtimer_phys_cnt_write(struct vcpu *vcpu, uint64_t wval, void *arg)
442 {
443 return (0);
444 }
445
446 int
vtimer_phys_cval_read(struct vcpu * vcpu,uint64_t * rval,void * arg)447 vtimer_phys_cval_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
448 {
449 struct hypctx *hypctx;
450 struct vtimer_cpu *vtimer_cpu;
451
452 hypctx = vcpu_get_cookie(vcpu);
453 vtimer_cpu = &hypctx->vtimer_cpu;
454
455 *rval = vtimer_cpu->phys_timer.cntx_cval_el0;
456
457 return (0);
458 }
459
460 int
vtimer_phys_cval_write(struct vcpu * vcpu,uint64_t wval,void * arg)461 vtimer_phys_cval_write(struct vcpu *vcpu, uint64_t wval, void *arg)
462 {
463 struct hypctx *hypctx;
464 struct vtimer_cpu *vtimer_cpu;
465
466 hypctx = vcpu_get_cookie(vcpu);
467 vtimer_cpu = &hypctx->vtimer_cpu;
468
469 vtimer_cpu->phys_timer.cntx_cval_el0 = wval;
470
471 vtimer_remove_irq(hypctx, vcpu);
472 if (timer_enabled(vtimer_cpu->phys_timer.cntx_ctl_el0)) {
473 vtimer_schedule_irq(hypctx, true);
474 }
475
476 return (0);
477 }
478
479 int
vtimer_phys_tval_read(struct vcpu * vcpu,uint64_t * rval,void * arg)480 vtimer_phys_tval_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
481 {
482 struct hyp *hyp;
483 struct hypctx *hypctx;
484 struct vtimer_cpu *vtimer_cpu;
485 uint32_t cntpct_el0;
486
487 hypctx = vcpu_get_cookie(vcpu);
488 hyp = hypctx->hyp;
489 vtimer_cpu = &hypctx->vtimer_cpu;
490
491 if (!(vtimer_cpu->phys_timer.cntx_ctl_el0 & CNTP_CTL_ENABLE)) {
492 /*
493 * ARMv8 Architecture Manual, p. D7-2702: the result of reading
494 * TVAL when the timer is disabled is UNKNOWN. I have chosen to
495 * return the maximum value possible on 32 bits which means the
496 * timer will fire very far into the future.
497 */
498 *rval = (uint32_t)RES1;
499 } else {
500 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
501 hyp->vtimer.cntvoff_el2;
502 *rval = vtimer_cpu->phys_timer.cntx_cval_el0 - cntpct_el0;
503 }
504
505 return (0);
506 }
507
508 int
vtimer_phys_tval_write(struct vcpu * vcpu,uint64_t wval,void * arg)509 vtimer_phys_tval_write(struct vcpu *vcpu, uint64_t wval, void *arg)
510 {
511 struct hyp *hyp;
512 struct hypctx *hypctx;
513 struct vtimer_cpu *vtimer_cpu;
514 uint64_t cntpct_el0;
515
516 hypctx = vcpu_get_cookie(vcpu);
517 hyp = hypctx->hyp;
518 vtimer_cpu = &hypctx->vtimer_cpu;
519
520 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
521 vtimer_cpu->phys_timer.cntx_cval_el0 = (int32_t)wval + cntpct_el0;
522
523 vtimer_remove_irq(hypctx, vcpu);
524 if (timer_enabled(vtimer_cpu->phys_timer.cntx_ctl_el0)) {
525 vtimer_schedule_irq(hypctx, true);
526 }
527
528 return (0);
529 }
530
531 struct vtimer_softc {
532 struct resource *res;
533 void *ihl;
534 int rid;
535 };
536
537 static int
vtimer_probe(device_t dev)538 vtimer_probe(device_t dev)
539 {
540 device_set_desc(dev, "Virtual timer");
541 return (BUS_PROBE_DEFAULT);
542 }
543
544 static int
vtimer_attach(device_t dev)545 vtimer_attach(device_t dev)
546 {
547 struct vtimer_softc *sc;
548
549 sc = device_get_softc(dev);
550
551 sc->rid = 0;
552 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->rid, RF_ACTIVE);
553 if (sc->res == NULL)
554 return (ENXIO);
555
556 bus_setup_intr(dev, sc->res, INTR_TYPE_CLK, vtimer_virtual_timer_intr,
557 NULL, NULL, &sc->ihl);
558
559 return (0);
560 }
561
562 static device_method_t vtimer_methods[] = {
563 /* Device interface */
564 DEVMETHOD(device_probe, vtimer_probe),
565 DEVMETHOD(device_attach, vtimer_attach),
566
567 /* End */
568 DEVMETHOD_END
569 };
570
571 DEFINE_CLASS_0(vtimer, vtimer_driver, vtimer_methods,
572 sizeof(struct vtimer_softc));
573
574 DRIVER_MODULE(vtimer, generic_timer, vtimer_driver, 0, 0);
575