xref: /freebsd/sys/dev/sound/pci/hdspe.c (revision a03411e84728e9b267056fd31c7d1d9d1dc1b01e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012-2016 Ruslan Bukin <br@bsdpad.com>
5  * Copyright (c) 2023-2024 Florian Walpen <dev@submerge.ch>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * RME HDSPe driver for FreeBSD.
32  * Supported cards: AIO, RayDAT.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/sysctl.h>
37 
38 #include <dev/sound/pcm/sound.h>
39 #include <dev/sound/pci/hdspe.h>
40 #include <dev/sound/chip.h>
41 
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 
45 #include <mixer_if.h>
46 
47 static struct hdspe_clock_source hdspe_clock_source_table_rd[] = {
48 	{ "internal", 0 << 1 | 1, HDSPE_STATUS1_CLOCK(15),       0,       0 },
49 	{ "word",     0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0), 1 << 24, 1 << 25 },
50 	{ "aes",      1 << 1 | 0, HDSPE_STATUS1_CLOCK( 1),  1 << 0,  1 << 8 },
51 	{ "spdif",    2 << 1 | 0, HDSPE_STATUS1_CLOCK( 2),  1 << 1,  1 << 9 },
52 	{ "adat1",    3 << 1 | 0, HDSPE_STATUS1_CLOCK( 3),  1 << 2, 1 << 10 },
53 	{ "adat2",    4 << 1 | 0, HDSPE_STATUS1_CLOCK( 4),  1 << 3, 1 << 11 },
54 	{ "adat3",    5 << 1 | 0, HDSPE_STATUS1_CLOCK( 5),  1 << 4, 1 << 12 },
55 	{ "adat4",    6 << 1 | 0, HDSPE_STATUS1_CLOCK( 6),  1 << 5, 1 << 13 },
56 	{ "tco",      9 << 1 | 0, HDSPE_STATUS1_CLOCK( 9), 1 << 26, 1 << 27 },
57 	{ "sync_in", 10 << 1 | 0, HDSPE_STATUS1_CLOCK(10),       0,       0 },
58 	{ NULL,       0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0),       0,       0 },
59 };
60 
61 static struct hdspe_clock_source hdspe_clock_source_table_aio[] = {
62 	{ "internal", 0 << 1 | 1, HDSPE_STATUS1_CLOCK(15),       0,       0 },
63 	{ "word",     0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0), 1 << 24, 1 << 25 },
64 	{ "aes",      1 << 1 | 0, HDSPE_STATUS1_CLOCK( 1),  1 << 0,  1 << 8 },
65 	{ "spdif",    2 << 1 | 0, HDSPE_STATUS1_CLOCK( 2),  1 << 1,  1 << 9 },
66 	{ "adat",     3 << 1 | 0, HDSPE_STATUS1_CLOCK( 3),  1 << 2, 1 << 10 },
67 	{ "tco",      9 << 1 | 0, HDSPE_STATUS1_CLOCK( 9), 1 << 26, 1 << 27 },
68 	{ "sync_in", 10 << 1 | 0, HDSPE_STATUS1_CLOCK(10),       0,       0 },
69 	{ NULL,       0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0),       0,       0 },
70 };
71 
72 static struct hdspe_channel chan_map_aio[] = {
73 	{ HDSPE_CHAN_AIO_LINE,    "line" },
74 	{ HDSPE_CHAN_AIO_PHONE,  "phone" },
75 	{ HDSPE_CHAN_AIO_AES,      "aes" },
76 	{ HDSPE_CHAN_AIO_SPDIF, "s/pdif" },
77 	{ HDSPE_CHAN_AIO_ADAT,    "adat" },
78 	{ 0,                        NULL },
79 };
80 
81 static struct hdspe_channel chan_map_rd[] = {
82 	{ HDSPE_CHAN_RAY_AES,      "aes" },
83 	{ HDSPE_CHAN_RAY_SPDIF, "s/pdif" },
84 	{ HDSPE_CHAN_RAY_ADAT1,  "adat1" },
85 	{ HDSPE_CHAN_RAY_ADAT2,  "adat2" },
86 	{ HDSPE_CHAN_RAY_ADAT3,  "adat3" },
87 	{ HDSPE_CHAN_RAY_ADAT4,  "adat4" },
88 	{ 0,                        NULL },
89 };
90 
91 static void
92 hdspe_intr(void *p)
93 {
94 	struct sc_pcminfo *scp;
95 	struct sc_info *sc;
96 	device_t *devlist;
97 	int devcount;
98 	int status;
99 	int err;
100 	int i;
101 
102 	sc = (struct sc_info *)p;
103 
104 	snd_mtxlock(sc->lock);
105 
106 	status = hdspe_read_1(sc, HDSPE_STATUS_REG);
107 	if (status & HDSPE_AUDIO_IRQ_PENDING) {
108 		if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0)
109 			return;
110 
111 		for (i = 0; i < devcount; i++) {
112 			scp = device_get_ivars(devlist[i]);
113 			if (scp->ih != NULL)
114 				scp->ih(scp);
115 		}
116 
117 		hdspe_write_1(sc, HDSPE_INTERRUPT_ACK, 0);
118 		free(devlist, M_TEMP);
119 	}
120 
121 	snd_mtxunlock(sc->lock);
122 }
123 
124 static void
125 hdspe_dmapsetmap(void *arg, bus_dma_segment_t *segs, int nseg, int error)
126 {
127 #if 0
128 	device_printf(sc->dev, "hdspe_dmapsetmap()\n");
129 #endif
130 }
131 
132 static int
133 hdspe_alloc_resources(struct sc_info *sc)
134 {
135 
136 	/* Allocate resource. */
137 	sc->csid = PCIR_BAR(0);
138 	sc->cs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
139 	    &sc->csid, RF_ACTIVE);
140 
141 	if (!sc->cs) {
142 		device_printf(sc->dev, "Unable to map SYS_RES_MEMORY.\n");
143 		return (ENXIO);
144 	}
145 
146 	sc->cst = rman_get_bustag(sc->cs);
147 	sc->csh = rman_get_bushandle(sc->cs);
148 
149 	/* Allocate interrupt resource. */
150 	sc->irqid = 0;
151 	sc->irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irqid,
152 	    RF_ACTIVE | RF_SHAREABLE);
153 
154 	if (!sc->irq ||
155 	    bus_setup_intr(sc->dev, sc->irq, INTR_MPSAFE | INTR_TYPE_AV,
156 		NULL, hdspe_intr, sc, &sc->ih)) {
157 		device_printf(sc->dev, "Unable to alloc interrupt resource.\n");
158 		return (ENXIO);
159 	}
160 
161 	/* Allocate DMA resources. */
162 	if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(sc->dev),
163 		/*alignment*/4,
164 		/*boundary*/0,
165 		/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
166 		/*highaddr*/BUS_SPACE_MAXADDR,
167 		/*filter*/NULL,
168 		/*filterarg*/NULL,
169 		/*maxsize*/2 * HDSPE_DMASEGSIZE,
170 		/*nsegments*/2,
171 		/*maxsegsz*/HDSPE_DMASEGSIZE,
172 		/*flags*/0,
173 		/*lockfunc*/NULL,
174 		/*lockarg*/NULL,
175 		/*dmatag*/&sc->dmat) != 0) {
176 		device_printf(sc->dev, "Unable to create dma tag.\n");
177 		return (ENXIO);
178 	}
179 
180 	sc->bufsize = HDSPE_DMASEGSIZE;
181 
182 	/* pbuf (play buffer). */
183 	if (bus_dmamem_alloc(sc->dmat, (void **)&sc->pbuf, BUS_DMA_WAITOK,
184 	    &sc->pmap)) {
185 		device_printf(sc->dev, "Can't alloc pbuf.\n");
186 		return (ENXIO);
187 	}
188 
189 	if (bus_dmamap_load(sc->dmat, sc->pmap, sc->pbuf, sc->bufsize,
190 	    hdspe_dmapsetmap, sc, BUS_DMA_NOWAIT)) {
191 		device_printf(sc->dev, "Can't load pbuf.\n");
192 		return (ENXIO);
193 	}
194 
195 	/* rbuf (rec buffer). */
196 	if (bus_dmamem_alloc(sc->dmat, (void **)&sc->rbuf, BUS_DMA_WAITOK,
197 	    &sc->rmap)) {
198 		device_printf(sc->dev, "Can't alloc rbuf.\n");
199 		return (ENXIO);
200 	}
201 
202 	if (bus_dmamap_load(sc->dmat, sc->rmap, sc->rbuf, sc->bufsize,
203 	    hdspe_dmapsetmap, sc, BUS_DMA_NOWAIT)) {
204 		device_printf(sc->dev, "Can't load rbuf.\n");
205 		return (ENXIO);
206 	}
207 
208 	bzero(sc->pbuf, sc->bufsize);
209 	bzero(sc->rbuf, sc->bufsize);
210 
211 	return (0);
212 }
213 
214 static void
215 hdspe_map_dmabuf(struct sc_info *sc)
216 {
217 	uint32_t paddr, raddr;
218 	int i;
219 
220 	paddr = vtophys(sc->pbuf);
221 	raddr = vtophys(sc->rbuf);
222 
223 	for (i = 0; i < HDSPE_MAX_SLOTS * 16; i++) {
224 		hdspe_write_4(sc, HDSPE_PAGE_ADDR_BUF_OUT + 4 * i,
225                     paddr + i * 4096);
226 		hdspe_write_4(sc, HDSPE_PAGE_ADDR_BUF_IN + 4 * i,
227                     raddr + i * 4096);
228 	}
229 }
230 
231 static int
232 hdspe_sysctl_clock_preference(SYSCTL_HANDLER_ARGS)
233 {
234 	struct sc_info *sc;
235 	struct hdspe_clock_source *clock_table, *clock;
236 	char buf[16] = "invalid";
237 	int error;
238 	uint32_t setting;
239 
240 	sc = oidp->oid_arg1;
241 
242 	/* Select sync ports table for device type. */
243 	if (sc->type == HDSPE_AIO)
244 		clock_table = hdspe_clock_source_table_aio;
245 	else if (sc->type == HDSPE_RAYDAT)
246 		clock_table = hdspe_clock_source_table_rd;
247 	else
248 		return (ENXIO);
249 
250 	/* Extract preferred clock source from settings register. */
251 	setting = sc->settings_register & HDSPE_SETTING_CLOCK_MASK;
252 	for (clock = clock_table; clock->name != NULL; ++clock) {
253 		if (clock->setting == setting)
254 			break;
255 	}
256 	if (clock->name != NULL)
257 		strlcpy(buf, clock->name, sizeof(buf));
258 
259 	/* Process sysctl string request. */
260 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
261 	if (error != 0 || req->newptr == NULL)
262 		return (error);
263 
264 	/* Find clock source matching the sysctl string. */
265 	for (clock = clock_table; clock->name != NULL; ++clock) {
266 		if (strncasecmp(buf, clock->name, sizeof(buf)) == 0)
267 			break;
268 	}
269 
270 	/* Set preferred clock source in settings register. */
271 	if (clock->name != NULL) {
272 		setting = clock->setting & HDSPE_SETTING_CLOCK_MASK;
273 		snd_mtxlock(sc->lock);
274 		sc->settings_register &= ~HDSPE_SETTING_CLOCK_MASK;
275 		sc->settings_register |= setting;
276 		hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register);
277 		snd_mtxunlock(sc->lock);
278 	}
279 	return (0);
280 }
281 
282 static int
283 hdspe_sysctl_clock_source(SYSCTL_HANDLER_ARGS)
284 {
285 	struct sc_info *sc;
286 	struct hdspe_clock_source *clock_table, *clock;
287 	char buf[16] = "invalid";
288 	uint32_t status;
289 
290 	sc = oidp->oid_arg1;
291 
292 	/* Select sync ports table for device type. */
293 	if (sc->type == HDSPE_AIO)
294 		clock_table = hdspe_clock_source_table_aio;
295 	else if (sc->type == HDSPE_RAYDAT)
296 		clock_table = hdspe_clock_source_table_rd;
297 	else
298 		return (ENXIO);
299 
300 	/* Read current (autosync) clock source from status register. */
301 	snd_mtxlock(sc->lock);
302 	status = hdspe_read_4(sc, HDSPE_STATUS1_REG);
303 	status &= HDSPE_STATUS1_CLOCK_MASK;
304 	snd_mtxunlock(sc->lock);
305 
306 	/* Translate status register value to clock source. */
307 	for (clock = clock_table; clock->name != NULL; ++clock) {
308 		/* In clock master mode, override with internal clock source. */
309 		if (sc->settings_register & HDSPE_SETTING_MASTER) {
310 			if (clock->setting & HDSPE_SETTING_MASTER)
311 				break;
312 		} else if (clock->status == status)
313 			break;
314 	}
315 
316 	/* Process sysctl string request. */
317 	if (clock->name != NULL)
318 		strlcpy(buf, clock->name, sizeof(buf));
319 	return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
320 }
321 
322 static int
323 hdspe_sysctl_clock_list(SYSCTL_HANDLER_ARGS)
324 {
325 	struct sc_info *sc;
326 	struct hdspe_clock_source *clock_table, *clock;
327 	char buf[256];
328 	int n;
329 
330 	sc = oidp->oid_arg1;
331 	n = 0;
332 
333 	/* Select clock source table for device type. */
334 	if (sc->type == HDSPE_AIO)
335 		clock_table = hdspe_clock_source_table_aio;
336 	else if (sc->type == HDSPE_RAYDAT)
337 		clock_table = hdspe_clock_source_table_rd;
338 	else
339 		return (ENXIO);
340 
341 	/* List available clock sources. */
342 	buf[0] = 0;
343 	for (clock = clock_table; clock->name != NULL; ++clock) {
344 		if (n > 0)
345 			n += strlcpy(buf + n, ",", sizeof(buf) - n);
346 		n += strlcpy(buf + n, clock->name, sizeof(buf) - n);
347 	}
348 	return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
349 }
350 
351 static int
352 hdspe_sysctl_sync_status(SYSCTL_HANDLER_ARGS)
353 {
354 	struct sc_info *sc;
355 	struct hdspe_clock_source *clock_table, *clock;
356 	char buf[256];
357 	char *state;
358 	int n;
359 	uint32_t status;
360 
361 	sc = oidp->oid_arg1;
362 	n = 0;
363 
364 	/* Select sync ports table for device type. */
365 	if (sc->type == HDSPE_AIO)
366 		clock_table = hdspe_clock_source_table_aio;
367 	else if (sc->type == HDSPE_RAYDAT)
368 		clock_table = hdspe_clock_source_table_rd;
369 	else
370 		return (ENXIO);
371 
372 	/* Read current lock and sync bits from status register. */
373 	snd_mtxlock(sc->lock);
374 	status = hdspe_read_4(sc, HDSPE_STATUS1_REG);
375 	snd_mtxunlock(sc->lock);
376 
377 	/* List clock sources with lock and sync state. */
378 	for (clock = clock_table; clock->name != NULL; ++clock) {
379 		if (clock->sync_bit != 0) {
380 			if (n > 0)
381 				n += strlcpy(buf + n, ",", sizeof(buf) - n);
382 			state = "none";
383 			if ((clock->sync_bit & status) != 0)
384 				state = "sync";
385 			else if ((clock->lock_bit & status) != 0)
386 				state = "lock";
387 			n += snprintf(buf + n, sizeof(buf) - n, "%s(%s)",
388 			    clock->name, state);
389 		}
390 	}
391 	return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
392 }
393 
394 static int
395 hdspe_probe(device_t dev)
396 {
397 	uint32_t rev;
398 
399 	if (pci_get_vendor(dev) == PCI_VENDOR_XILINX &&
400 	    pci_get_device(dev) == PCI_DEVICE_XILINX_HDSPE) {
401 		rev = pci_get_revid(dev);
402 		switch (rev) {
403 		case PCI_REVISION_AIO:
404 			device_set_desc(dev, "RME HDSPe AIO");
405 			return (0);
406 		case PCI_REVISION_RAYDAT:
407 			device_set_desc(dev, "RME HDSPe RayDAT");
408 			return (0);
409 		}
410 	}
411 
412 	return (ENXIO);
413 }
414 
415 static int
416 hdspe_init(struct sc_info *sc)
417 {
418 	long long period;
419 
420 	/* Set latency. */
421 	sc->period = 32;
422 	sc->ctrl_register = hdspe_encode_latency(7);
423 
424 	/* Set rate. */
425 	sc->speed = HDSPE_SPEED_DEFAULT;
426 	sc->ctrl_register &= ~HDSPE_FREQ_MASK;
427 	sc->ctrl_register |= HDSPE_FREQ_MASK_DEFAULT;
428 	hdspe_write_4(sc, HDSPE_CONTROL_REG, sc->ctrl_register);
429 
430 	switch (sc->type) {
431 	case HDSPE_RAYDAT:
432 	case HDSPE_AIO:
433 		period = HDSPE_FREQ_AIO;
434 		break;
435 	default:
436 		return (ENXIO);
437 	}
438 
439 	/* Set DDS value. */
440 	period /= sc->speed;
441 	hdspe_write_4(sc, HDSPE_FREQ_REG, period);
442 
443 	/* Other settings. */
444 	sc->settings_register = 0;
445 	hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register);
446 
447 	return (0);
448 }
449 
450 static int
451 hdspe_attach(device_t dev)
452 {
453 	struct hdspe_channel *chan_map;
454 	struct sc_pcminfo *scp;
455 	struct sc_info *sc;
456 	uint32_t rev;
457 	int i, err;
458 
459 #if 0
460 	device_printf(dev, "hdspe_attach()\n");
461 #endif
462 
463 	sc = device_get_softc(dev);
464 	sc->lock = snd_mtxcreate(device_get_nameunit(dev),
465 	    "snd_hdspe softc");
466 	sc->dev = dev;
467 
468 	pci_enable_busmaster(dev);
469 	rev = pci_get_revid(dev);
470 	switch (rev) {
471 	case PCI_REVISION_AIO:
472 		sc->type = HDSPE_AIO;
473 		chan_map = chan_map_aio;
474 		break;
475 	case PCI_REVISION_RAYDAT:
476 		sc->type = HDSPE_RAYDAT;
477 		chan_map = chan_map_rd;
478 		break;
479 	default:
480 		return (ENXIO);
481 	}
482 
483 	/* Allocate resources. */
484 	err = hdspe_alloc_resources(sc);
485 	if (err) {
486 		device_printf(dev, "Unable to allocate system resources.\n");
487 		return (ENXIO);
488 	}
489 
490 	if (hdspe_init(sc) != 0)
491 		return (ENXIO);
492 
493 	for (i = 0; i < HDSPE_MAX_CHANS && chan_map[i].descr != NULL; i++) {
494 		scp = malloc(sizeof(struct sc_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO);
495 		scp->hc = &chan_map[i];
496 		scp->sc = sc;
497 		scp->dev = device_add_child(dev, "pcm", -1);
498 		device_set_ivars(scp->dev, scp);
499 	}
500 
501 	hdspe_map_dmabuf(sc);
502 
503 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
504 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
505 	    "sync_status", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
506 	    sc, 0, hdspe_sysctl_sync_status, "A",
507 	    "List clock source signal lock and sync status");
508 
509 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
510 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
511 	    "clock_source", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
512 	    sc, 0, hdspe_sysctl_clock_source, "A",
513 	    "Currently effective clock source");
514 
515 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
516 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
517 	    "clock_preference", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
518 	    sc, 0, hdspe_sysctl_clock_preference, "A",
519 	    "Set 'internal' (master) or preferred autosync clock source");
520 
521 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
522 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
523 	    "clock_list", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
524 	    sc, 0, hdspe_sysctl_clock_list, "A",
525 	    "List of supported clock sources");
526 
527 	return (bus_generic_attach(dev));
528 }
529 
530 static void
531 hdspe_dmafree(struct sc_info *sc)
532 {
533 
534 	bus_dmamap_unload(sc->dmat, sc->rmap);
535 	bus_dmamap_unload(sc->dmat, sc->pmap);
536 	bus_dmamem_free(sc->dmat, sc->rbuf, sc->rmap);
537 	bus_dmamem_free(sc->dmat, sc->pbuf, sc->pmap);
538 	sc->rbuf = sc->pbuf = NULL;
539 }
540 
541 static int
542 hdspe_detach(device_t dev)
543 {
544 	struct sc_info *sc;
545 	int err;
546 
547 	sc = device_get_softc(dev);
548 	if (sc == NULL) {
549 		device_printf(dev,"Can't detach: softc is null.\n");
550 		return (0);
551 	}
552 
553 	err = device_delete_children(dev);
554 	if (err)
555 		return (err);
556 
557 	hdspe_dmafree(sc);
558 
559 	if (sc->ih)
560 		bus_teardown_intr(dev, sc->irq, sc->ih);
561 	if (sc->dmat)
562 		bus_dma_tag_destroy(sc->dmat);
563 	if (sc->irq)
564 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
565 	if (sc->cs)
566 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->cs);
567 	if (sc->lock)
568 		snd_mtxfree(sc->lock);
569 
570 	return (0);
571 }
572 
573 static device_method_t hdspe_methods[] = {
574 	DEVMETHOD(device_probe,     hdspe_probe),
575 	DEVMETHOD(device_attach,    hdspe_attach),
576 	DEVMETHOD(device_detach,    hdspe_detach),
577 	{ 0, 0 }
578 };
579 
580 static driver_t hdspe_driver = {
581 	"hdspe",
582 	hdspe_methods,
583 	PCM_SOFTC_SIZE,
584 };
585 
586 DRIVER_MODULE(snd_hdspe, pci, hdspe_driver, 0, 0);
587