1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2017 The FreeBSD Foundation
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the company nor the name of the author may be used to
15 * endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 #include <sys/types.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/rman.h>
39 #include <sys/sysctl.h>
40 #include <sys/time.h>
41 #include <sys/timeet.h>
42 #include <sys/timetc.h>
43
44 #include <machine/bus.h>
45 #include <machine/machdep.h>
46 #include <machine/vmm.h>
47 #include <machine/armreg.h>
48
49 #include <arm64/vmm/arm64.h>
50
51 #include "vgic.h"
52 #include "vtimer.h"
53
54 #define RES1 0xffffffffffffffffUL
55
56 #define timer_enabled(ctl) \
57 (!((ctl) & CNTP_CTL_IMASK) && ((ctl) & CNTP_CTL_ENABLE))
58
59 static uint32_t tmr_frq;
60
61 #define timer_condition_met(ctl) ((ctl) & CNTP_CTL_ISTATUS)
62
63 SYSCTL_DECL(_hw_vmm);
64 SYSCTL_NODE(_hw_vmm, OID_AUTO, vtimer, CTLFLAG_RW, NULL, NULL);
65
66 static bool allow_ecv_phys = false;
67 SYSCTL_BOOL(_hw_vmm_vtimer, OID_AUTO, allow_ecv_phys, CTLFLAG_RW,
68 &allow_ecv_phys, 0,
69 "Enable hardware access to the physical timer if FEAT_ECV_POFF is supported");
70
71 static void vtimer_schedule_irq(struct hypctx *hypctx, bool phys);
72
73 static int
vtimer_virtual_timer_intr(void * arg)74 vtimer_virtual_timer_intr(void *arg)
75 {
76 struct hypctx *hypctx;
77 uint64_t cntpct_el0;
78 uint32_t cntv_ctl;
79
80 hypctx = arm64_get_active_vcpu();
81 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
82
83 if (!hypctx) {
84 /* vm_destroy() was called. */
85 eprintf("No active vcpu\n");
86 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
87 goto out;
88 }
89 if (!timer_enabled(cntv_ctl)) {
90 eprintf("Timer not enabled\n");
91 goto out;
92 }
93 if (!timer_condition_met(cntv_ctl)) {
94 eprintf("Timer condition not met\n");
95 goto out;
96 }
97
98 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
99 hypctx->hyp->vtimer.cntvoff_el2;
100 if (hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 < cntpct_el0)
101 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
102 GT_VIRT_IRQ, true);
103
104 cntv_ctl = hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0;
105
106 out:
107 /*
108 * Disable the timer interrupt. This will prevent the interrupt from
109 * being reasserted as soon as we exit the handler and getting stuck
110 * in an infinite loop.
111 *
112 * This is safe to do because the guest disabled the timer, and then
113 * enables it as part of the interrupt handling routine.
114 */
115 cntv_ctl &= ~CNTP_CTL_ENABLE;
116 WRITE_SPECIALREG(cntv_ctl_el0, cntv_ctl);
117
118 return (FILTER_HANDLED);
119 }
120
121 int
vtimer_init(void)122 vtimer_init(void)
123 {
124 /*
125 * The guest *MUST* use the same timer frequency as the host. The
126 * register CNTFRQ_EL0 is accessible to the guest and a different value
127 * in the guest dts file might have unforseen consequences.
128 */
129 tmr_frq = READ_SPECIALREG(cntfrq_el0);
130
131 return (0);
132 }
133
134 void
vtimer_vminit(struct hyp * hyp)135 vtimer_vminit(struct hyp *hyp)
136 {
137 uint64_t now;
138 bool ecv_poff;
139
140 ecv_poff = false;
141
142 if (allow_ecv_phys && (hyp->feats & HYP_FEAT_ECV_POFF) != 0)
143 ecv_poff = true;
144
145 /*
146 * Configure the Counter-timer Hypervisor Control Register for the VM.
147 */
148 if (in_vhe()) {
149 /*
150 * CNTHCTL_E2H_EL0PCTEN: trap EL0 access to CNTP{CT,CTSS}_EL0
151 * CNTHCTL_E2H_EL0VCTEN: don't trap EL0 access to
152 * CNTV{CT,CTXX}_EL0
153 * CNTHCTL_E2H_EL0VTEN: don't trap EL0 access to
154 * CNTV_{CTL,CVAL,TVAL}_EL0
155 * CNTHCTL_E2H_EL0PTEN: trap EL0 access to
156 * CNTP_{CTL,CVAL,TVAL}_EL0
157 * CNTHCTL_E2H_EL1PCTEN: trap access to CNTPCT_EL0
158 * CNTHCTL_E2H_EL1PTEN: trap access to
159 * CNTP_{CTL,CVAL,TVAL}_EL0
160 * CNTHCTL_E2H_EL1VCTEN: don't trap EL0 access to
161 * CNTV{CT,CTSS}_EL0
162 * CNTHCTL_E2H_EL1PCEN: trap EL1 access to
163 * CNTP_{CTL,CVAL,TVAL}_EL0
164 *
165 * TODO: Don't trap when FEAT_ECV is present
166 */
167 hyp->vtimer.cnthctl_el2 =
168 CNTHCTL_E2H_EL0VCTEN_NOTRAP |
169 CNTHCTL_E2H_EL0VTEN_NOTRAP;
170 if (ecv_poff) {
171 hyp->vtimer.cnthctl_el2 |=
172 CNTHCTL_E2H_EL0PCTEN_NOTRAP |
173 CNTHCTL_E2H_EL0PTEN_NOTRAP |
174 CNTHCTL_E2H_EL1PCTEN_NOTRAP |
175 CNTHCTL_E2H_EL1PTEN_NOTRAP;
176 } else {
177 hyp->vtimer.cnthctl_el2 |=
178 CNTHCTL_E2H_EL0PCTEN_TRAP |
179 CNTHCTL_E2H_EL0PTEN_TRAP |
180 CNTHCTL_E2H_EL1PCTEN_TRAP |
181 CNTHCTL_E2H_EL1PTEN_TRAP;
182 }
183 } else {
184 /*
185 * CNTHCTL_EL1PCEN: trap access to CNTP_{CTL, CVAL, TVAL}_EL0
186 * from EL1
187 * CNTHCTL_EL1PCTEN: trap access to CNTPCT_EL0
188 */
189 if (ecv_poff) {
190 hyp->vtimer.cnthctl_el2 =
191 CNTHCTL_EL1PCTEN_NOTRAP |
192 CNTHCTL_EL1PCEN_NOTRAP;
193 } else {
194 hyp->vtimer.cnthctl_el2 =
195 CNTHCTL_EL1PCTEN_TRAP |
196 CNTHCTL_EL1PCEN_TRAP;
197 }
198 }
199
200 if (ecv_poff)
201 hyp->vtimer.cnthctl_el2 |= CNTHCTL_ECV_EN;
202
203 now = READ_SPECIALREG(cntpct_el0);
204 hyp->vtimer.cntvoff_el2 = now;
205
206 return;
207 }
208
209 void
vtimer_cpuinit(struct hypctx * hypctx)210 vtimer_cpuinit(struct hypctx *hypctx)
211 {
212 struct vtimer_cpu *vtimer_cpu;
213
214 vtimer_cpu = &hypctx->vtimer_cpu;
215 /*
216 * Configure physical timer interrupts for the VCPU.
217 *
218 * CNTP_CTL_IMASK: mask interrupts
219 * ~CNTP_CTL_ENABLE: disable the timer
220 */
221 vtimer_cpu->phys_timer.cntx_ctl_el0 = CNTP_CTL_IMASK & ~CNTP_CTL_ENABLE;
222
223 mtx_init(&vtimer_cpu->phys_timer.mtx, "vtimer phys callout mutex", NULL,
224 MTX_DEF);
225 callout_init_mtx(&vtimer_cpu->phys_timer.callout,
226 &vtimer_cpu->phys_timer.mtx, 0);
227 vtimer_cpu->phys_timer.irqid = GT_PHYS_NS_IRQ;
228
229 mtx_init(&vtimer_cpu->virt_timer.mtx, "vtimer virt callout mutex", NULL,
230 MTX_DEF);
231 callout_init_mtx(&vtimer_cpu->virt_timer.callout,
232 &vtimer_cpu->virt_timer.mtx, 0);
233 vtimer_cpu->virt_timer.irqid = GT_VIRT_IRQ;
234 }
235
236 void
vtimer_cpucleanup(struct hypctx * hypctx)237 vtimer_cpucleanup(struct hypctx *hypctx)
238 {
239 struct vtimer_cpu *vtimer_cpu;
240
241 vtimer_cpu = &hypctx->vtimer_cpu;
242 callout_drain(&vtimer_cpu->phys_timer.callout);
243 callout_drain(&vtimer_cpu->virt_timer.callout);
244 mtx_destroy(&vtimer_cpu->phys_timer.mtx);
245 mtx_destroy(&vtimer_cpu->virt_timer.mtx);
246 }
247
248 void
vtimer_vmcleanup(struct hyp * hyp)249 vtimer_vmcleanup(struct hyp *hyp)
250 {
251 struct hypctx *hypctx;
252 uint32_t cntv_ctl;
253
254 hypctx = arm64_get_active_vcpu();
255 if (!hypctx) {
256 /* The active VM was destroyed, stop the timer. */
257 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0);
258 cntv_ctl &= ~CNTP_CTL_ENABLE;
259 WRITE_SPECIALREG(cntv_ctl_el0, cntv_ctl);
260 }
261 }
262
263 void
vtimer_cleanup(void)264 vtimer_cleanup(void)
265 {
266 }
267
268 static void
vtime_sync_timer(struct hypctx * hypctx,struct vtimer_timer * timer,uint64_t cntpct_el0)269 vtime_sync_timer(struct hypctx *hypctx, struct vtimer_timer *timer,
270 uint64_t cntpct_el0)
271 {
272 if (!timer_enabled(timer->cntx_ctl_el0)) {
273 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
274 timer->irqid, false);
275 } else if (timer->cntx_cval_el0 < cntpct_el0) {
276 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
277 timer->irqid, true);
278 } else {
279 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
280 timer->irqid, false);
281 vtimer_schedule_irq(hypctx, false);
282 }
283 }
284
285 void
vtimer_sync_hwstate(struct hypctx * hypctx)286 vtimer_sync_hwstate(struct hypctx *hypctx)
287 {
288 uint64_t cntpct_el0;
289
290 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
291 hypctx->hyp->vtimer.cntvoff_el2;
292 vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.virt_timer, cntpct_el0);
293 /* If FEAT_ECV_POFF is in use then we need to sync the physical timer */
294 if ((hypctx->hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0) {
295 vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.phys_timer,
296 cntpct_el0);
297 }
298 }
299
300 static void
vtimer_inject_irq_callout_phys(void * context)301 vtimer_inject_irq_callout_phys(void *context)
302 {
303 struct hypctx *hypctx;
304
305 hypctx = context;
306 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
307 hypctx->vtimer_cpu.phys_timer.irqid, true);
308 }
309
310 static void
vtimer_inject_irq_callout_virt(void * context)311 vtimer_inject_irq_callout_virt(void *context)
312 {
313 struct hypctx *hypctx;
314
315 hypctx = context;
316 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
317 hypctx->vtimer_cpu.virt_timer.irqid, true);
318 }
319
320 static void
vtimer_schedule_irq(struct hypctx * hypctx,bool phys)321 vtimer_schedule_irq(struct hypctx *hypctx, bool phys)
322 {
323 sbintime_t time;
324 struct vtimer_timer *timer;
325 uint64_t cntpct_el0;
326 uint64_t diff;
327
328 if (phys)
329 timer = &hypctx->vtimer_cpu.phys_timer;
330 else
331 timer = &hypctx->vtimer_cpu.virt_timer;
332 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
333 hypctx->hyp->vtimer.cntvoff_el2;
334 if (timer->cntx_cval_el0 < cntpct_el0) {
335 /* Timer set in the past, trigger interrupt */
336 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
337 timer->irqid, true);
338 } else {
339 diff = timer->cntx_cval_el0 - cntpct_el0;
340 time = diff * SBT_1S / tmr_frq;
341 if (phys)
342 callout_reset_sbt(&timer->callout, time, 0,
343 vtimer_inject_irq_callout_phys, hypctx, 0);
344 else
345 callout_reset_sbt(&timer->callout, time, 0,
346 vtimer_inject_irq_callout_virt, hypctx, 0);
347 }
348 }
349
350 static void
vtimer_remove_irq(struct hypctx * hypctx,struct vcpu * vcpu)351 vtimer_remove_irq(struct hypctx *hypctx, struct vcpu *vcpu)
352 {
353 struct vtimer_cpu *vtimer_cpu;
354 struct vtimer_timer *timer;
355
356 vtimer_cpu = &hypctx->vtimer_cpu;
357 timer = &vtimer_cpu->phys_timer;
358
359 callout_drain(&timer->callout);
360 /*
361 * The interrupt needs to be deactivated here regardless of the callout
362 * function having been executed. The timer interrupt can be masked with
363 * the CNTP_CTL_EL0.IMASK bit instead of reading the IAR register.
364 * Masking the interrupt doesn't remove it from the list registers.
365 */
366 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(vcpu), timer->irqid, false);
367 }
368
369 /*
370 * Timer emulation functions.
371 *
372 * The guest should use the virtual timer, however some software, e.g. u-boot,
373 * used the physical timer. Emulate this in software for the guest to use.
374 *
375 * Adjust for cntvoff_el2 so the physical and virtual timers are at similar
376 * times. This simplifies interrupt handling in the virtual timer as the
377 * adjustment will have already happened.
378 */
379
380 int
vtimer_phys_ctl_read(struct vcpu * vcpu,uint64_t * rval,void * arg)381 vtimer_phys_ctl_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
382 {
383 struct hyp *hyp;
384 struct hypctx *hypctx;
385 struct vtimer_cpu *vtimer_cpu;
386 uint64_t cntpct_el0;
387
388 hypctx = vcpu_get_cookie(vcpu);
389 hyp = hypctx->hyp;
390 vtimer_cpu = &hypctx->vtimer_cpu;
391
392 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
393 if (vtimer_cpu->phys_timer.cntx_cval_el0 < cntpct_el0)
394 /* Timer condition met */
395 *rval = vtimer_cpu->phys_timer.cntx_ctl_el0 | CNTP_CTL_ISTATUS;
396 else
397 *rval = vtimer_cpu->phys_timer.cntx_ctl_el0 & ~CNTP_CTL_ISTATUS;
398
399 return (0);
400 }
401
402 int
vtimer_phys_ctl_write(struct vcpu * vcpu,uint64_t wval,void * arg)403 vtimer_phys_ctl_write(struct vcpu *vcpu, uint64_t wval, void *arg)
404 {
405 struct hypctx *hypctx;
406 struct vtimer_cpu *vtimer_cpu;
407 uint64_t ctl_el0;
408 bool timer_toggled_on;
409
410 hypctx = vcpu_get_cookie(vcpu);
411 vtimer_cpu = &hypctx->vtimer_cpu;
412
413 timer_toggled_on = false;
414 ctl_el0 = vtimer_cpu->phys_timer.cntx_ctl_el0;
415
416 if (!timer_enabled(ctl_el0) && timer_enabled(wval))
417 timer_toggled_on = true;
418 else if (timer_enabled(ctl_el0) && !timer_enabled(wval))
419 vtimer_remove_irq(hypctx, vcpu);
420
421 vtimer_cpu->phys_timer.cntx_ctl_el0 = wval;
422
423 if (timer_toggled_on)
424 vtimer_schedule_irq(hypctx, true);
425
426 return (0);
427 }
428
429 int
vtimer_phys_cnt_read(struct vcpu * vcpu,uint64_t * rval,void * arg)430 vtimer_phys_cnt_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
431 {
432 struct vm *vm;
433 struct hyp *hyp;
434
435 vm = vcpu_vm(vcpu);
436 hyp = vm_get_cookie(vm);
437 *rval = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
438 return (0);
439 }
440
441 int
vtimer_phys_cnt_write(struct vcpu * vcpu,uint64_t wval,void * arg)442 vtimer_phys_cnt_write(struct vcpu *vcpu, uint64_t wval, void *arg)
443 {
444 return (0);
445 }
446
447 int
vtimer_phys_cval_read(struct vcpu * vcpu,uint64_t * rval,void * arg)448 vtimer_phys_cval_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
449 {
450 struct hypctx *hypctx;
451 struct vtimer_cpu *vtimer_cpu;
452
453 hypctx = vcpu_get_cookie(vcpu);
454 vtimer_cpu = &hypctx->vtimer_cpu;
455
456 *rval = vtimer_cpu->phys_timer.cntx_cval_el0;
457
458 return (0);
459 }
460
461 int
vtimer_phys_cval_write(struct vcpu * vcpu,uint64_t wval,void * arg)462 vtimer_phys_cval_write(struct vcpu *vcpu, uint64_t wval, void *arg)
463 {
464 struct hypctx *hypctx;
465 struct vtimer_cpu *vtimer_cpu;
466
467 hypctx = vcpu_get_cookie(vcpu);
468 vtimer_cpu = &hypctx->vtimer_cpu;
469
470 vtimer_cpu->phys_timer.cntx_cval_el0 = wval;
471
472 vtimer_remove_irq(hypctx, vcpu);
473 if (timer_enabled(vtimer_cpu->phys_timer.cntx_ctl_el0)) {
474 vtimer_schedule_irq(hypctx, true);
475 }
476
477 return (0);
478 }
479
480 int
vtimer_phys_tval_read(struct vcpu * vcpu,uint64_t * rval,void * arg)481 vtimer_phys_tval_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
482 {
483 struct hyp *hyp;
484 struct hypctx *hypctx;
485 struct vtimer_cpu *vtimer_cpu;
486 uint32_t cntpct_el0;
487
488 hypctx = vcpu_get_cookie(vcpu);
489 hyp = hypctx->hyp;
490 vtimer_cpu = &hypctx->vtimer_cpu;
491
492 if (!(vtimer_cpu->phys_timer.cntx_ctl_el0 & CNTP_CTL_ENABLE)) {
493 /*
494 * ARMv8 Architecture Manual, p. D7-2702: the result of reading
495 * TVAL when the timer is disabled is UNKNOWN. I have chosen to
496 * return the maximum value possible on 32 bits which means the
497 * timer will fire very far into the future.
498 */
499 *rval = (uint32_t)RES1;
500 } else {
501 cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
502 hyp->vtimer.cntvoff_el2;
503 *rval = vtimer_cpu->phys_timer.cntx_cval_el0 - cntpct_el0;
504 }
505
506 return (0);
507 }
508
509 int
vtimer_phys_tval_write(struct vcpu * vcpu,uint64_t wval,void * arg)510 vtimer_phys_tval_write(struct vcpu *vcpu, uint64_t wval, void *arg)
511 {
512 struct hyp *hyp;
513 struct hypctx *hypctx;
514 struct vtimer_cpu *vtimer_cpu;
515 uint64_t cntpct_el0;
516
517 hypctx = vcpu_get_cookie(vcpu);
518 hyp = hypctx->hyp;
519 vtimer_cpu = &hypctx->vtimer_cpu;
520
521 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2;
522 vtimer_cpu->phys_timer.cntx_cval_el0 = (int32_t)wval + cntpct_el0;
523
524 vtimer_remove_irq(hypctx, vcpu);
525 if (timer_enabled(vtimer_cpu->phys_timer.cntx_ctl_el0)) {
526 vtimer_schedule_irq(hypctx, true);
527 }
528
529 return (0);
530 }
531
532 struct vtimer_softc {
533 struct resource *res;
534 void *ihl;
535 int rid;
536 };
537
538 static int
vtimer_probe(device_t dev)539 vtimer_probe(device_t dev)
540 {
541 device_set_desc(dev, "Virtual timer");
542 return (BUS_PROBE_DEFAULT);
543 }
544
545 static int
vtimer_attach(device_t dev)546 vtimer_attach(device_t dev)
547 {
548 struct vtimer_softc *sc;
549
550 sc = device_get_softc(dev);
551
552 sc->rid = 0;
553 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->rid, RF_ACTIVE);
554 if (sc->res == NULL)
555 return (ENXIO);
556
557 bus_setup_intr(dev, sc->res, INTR_TYPE_CLK, vtimer_virtual_timer_intr,
558 NULL, NULL, &sc->ihl);
559
560 return (0);
561 }
562
563 static device_method_t vtimer_methods[] = {
564 /* Device interface */
565 DEVMETHOD(device_probe, vtimer_probe),
566 DEVMETHOD(device_attach, vtimer_attach),
567
568 /* End */
569 DEVMETHOD_END
570 };
571
572 DEFINE_CLASS_0(vtimer, vtimer_driver, vtimer_methods,
573 sizeof(struct vtimer_softc));
574
575 DRIVER_MODULE(vtimer, generic_timer, vtimer_driver, 0, 0);
576