xref: /freebsd/sys/dev/sound/pci/hdspe.c (revision a64729f5077d77e13b9497cb33ecb3c82e606ee8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012-2016 Ruslan Bukin <br@bsdpad.com>
5  * Copyright (c) 2023-2024 Florian Walpen <dev@submerge.ch>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * RME HDSPe driver for FreeBSD.
32  * Supported cards: AIO, RayDAT.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/sysctl.h>
37 
38 #include <dev/sound/pcm/sound.h>
39 #include <dev/sound/pci/hdspe.h>
40 
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43 
44 #include <mixer_if.h>
45 
46 static bool hdspe_unified_pcm = false;
47 
48 static SYSCTL_NODE(_hw, OID_AUTO, hdspe, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
49     "PCI HDSPe");
50 
51 SYSCTL_BOOL(_hw_hdspe, OID_AUTO, unified_pcm, CTLFLAG_RWTUN,
52     &hdspe_unified_pcm, 0, "Combine physical ports in one unified pcm device");
53 
54 static struct hdspe_clock_source hdspe_clock_source_table_rd[] = {
55 	{ "internal", 0 << 1 | 1, HDSPE_STATUS1_CLOCK(15),       0,       0 },
56 	{ "word",     0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0), 1 << 24, 1 << 25 },
57 	{ "aes",      1 << 1 | 0, HDSPE_STATUS1_CLOCK( 1),  1 << 0,  1 << 8 },
58 	{ "spdif",    2 << 1 | 0, HDSPE_STATUS1_CLOCK( 2),  1 << 1,  1 << 9 },
59 	{ "adat1",    3 << 1 | 0, HDSPE_STATUS1_CLOCK( 3),  1 << 2, 1 << 10 },
60 	{ "adat2",    4 << 1 | 0, HDSPE_STATUS1_CLOCK( 4),  1 << 3, 1 << 11 },
61 	{ "adat3",    5 << 1 | 0, HDSPE_STATUS1_CLOCK( 5),  1 << 4, 1 << 12 },
62 	{ "adat4",    6 << 1 | 0, HDSPE_STATUS1_CLOCK( 6),  1 << 5, 1 << 13 },
63 	{ "tco",      9 << 1 | 0, HDSPE_STATUS1_CLOCK( 9), 1 << 26, 1 << 27 },
64 	{ "sync_in", 10 << 1 | 0, HDSPE_STATUS1_CLOCK(10),       0,       0 },
65 	{ NULL,       0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0),       0,       0 },
66 };
67 
68 static struct hdspe_clock_source hdspe_clock_source_table_aio[] = {
69 	{ "internal", 0 << 1 | 1, HDSPE_STATUS1_CLOCK(15),       0,       0 },
70 	{ "word",     0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0), 1 << 24, 1 << 25 },
71 	{ "aes",      1 << 1 | 0, HDSPE_STATUS1_CLOCK( 1),  1 << 0,  1 << 8 },
72 	{ "spdif",    2 << 1 | 0, HDSPE_STATUS1_CLOCK( 2),  1 << 1,  1 << 9 },
73 	{ "adat",     3 << 1 | 0, HDSPE_STATUS1_CLOCK( 3),  1 << 2, 1 << 10 },
74 	{ "tco",      9 << 1 | 0, HDSPE_STATUS1_CLOCK( 9), 1 << 26, 1 << 27 },
75 	{ "sync_in", 10 << 1 | 0, HDSPE_STATUS1_CLOCK(10),       0,       0 },
76 	{ NULL,       0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0),       0,       0 },
77 };
78 
79 static struct hdspe_channel chan_map_aio[] = {
80 	{ HDSPE_CHAN_AIO_LINE,    "line" },
81 	{ HDSPE_CHAN_AIO_EXT,      "ext" },
82 	{ HDSPE_CHAN_AIO_PHONE,  "phone" },
83 	{ HDSPE_CHAN_AIO_AES,      "aes" },
84 	{ HDSPE_CHAN_AIO_SPDIF, "s/pdif" },
85 	{ HDSPE_CHAN_AIO_ADAT,    "adat" },
86 	{ 0,                        NULL },
87 };
88 
89 static struct hdspe_channel chan_map_aio_uni[] = {
90 	{ HDSPE_CHAN_AIO_ALL, "all" },
91 	{ 0,                   NULL },
92 };
93 
94 static struct hdspe_channel chan_map_rd[] = {
95 	{ HDSPE_CHAN_RAY_AES,      "aes" },
96 	{ HDSPE_CHAN_RAY_SPDIF, "s/pdif" },
97 	{ HDSPE_CHAN_RAY_ADAT1,  "adat1" },
98 	{ HDSPE_CHAN_RAY_ADAT2,  "adat2" },
99 	{ HDSPE_CHAN_RAY_ADAT3,  "adat3" },
100 	{ HDSPE_CHAN_RAY_ADAT4,  "adat4" },
101 	{ 0,                        NULL },
102 };
103 
104 static struct hdspe_channel chan_map_rd_uni[] = {
105 	{ HDSPE_CHAN_RAY_ALL, "all" },
106 	{ 0,                   NULL },
107 };
108 
109 static void
110 hdspe_intr(void *p)
111 {
112 	struct sc_pcminfo *scp;
113 	struct sc_info *sc;
114 	device_t *devlist;
115 	int devcount;
116 	int status;
117 	int err;
118 	int i;
119 
120 	sc = (struct sc_info *)p;
121 
122 	snd_mtxlock(sc->lock);
123 
124 	status = hdspe_read_1(sc, HDSPE_STATUS_REG);
125 	if (status & HDSPE_AUDIO_IRQ_PENDING) {
126 		if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0)
127 			return;
128 
129 		for (i = 0; i < devcount; i++) {
130 			scp = device_get_ivars(devlist[i]);
131 			if (scp->ih != NULL)
132 				scp->ih(scp);
133 		}
134 
135 		hdspe_write_1(sc, HDSPE_INTERRUPT_ACK, 0);
136 		free(devlist, M_TEMP);
137 	}
138 
139 	snd_mtxunlock(sc->lock);
140 }
141 
142 static void
143 hdspe_dmapsetmap(void *arg, bus_dma_segment_t *segs, int nseg, int error)
144 {
145 #if 0
146 	device_printf(sc->dev, "hdspe_dmapsetmap()\n");
147 #endif
148 }
149 
150 static int
151 hdspe_alloc_resources(struct sc_info *sc)
152 {
153 
154 	/* Allocate resource. */
155 	sc->csid = PCIR_BAR(0);
156 	sc->cs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
157 	    &sc->csid, RF_ACTIVE);
158 
159 	if (!sc->cs) {
160 		device_printf(sc->dev, "Unable to map SYS_RES_MEMORY.\n");
161 		return (ENXIO);
162 	}
163 
164 	sc->cst = rman_get_bustag(sc->cs);
165 	sc->csh = rman_get_bushandle(sc->cs);
166 
167 	/* Allocate interrupt resource. */
168 	sc->irqid = 0;
169 	sc->irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irqid,
170 	    RF_ACTIVE | RF_SHAREABLE);
171 
172 	if (!sc->irq ||
173 	    bus_setup_intr(sc->dev, sc->irq, INTR_MPSAFE | INTR_TYPE_AV,
174 		NULL, hdspe_intr, sc, &sc->ih)) {
175 		device_printf(sc->dev, "Unable to alloc interrupt resource.\n");
176 		return (ENXIO);
177 	}
178 
179 	/* Allocate DMA resources. */
180 	if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(sc->dev),
181 		/*alignment*/4,
182 		/*boundary*/0,
183 		/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
184 		/*highaddr*/BUS_SPACE_MAXADDR,
185 		/*filter*/NULL,
186 		/*filterarg*/NULL,
187 		/*maxsize*/2 * HDSPE_DMASEGSIZE,
188 		/*nsegments*/2,
189 		/*maxsegsz*/HDSPE_DMASEGSIZE,
190 		/*flags*/0,
191 		/*lockfunc*/NULL,
192 		/*lockarg*/NULL,
193 		/*dmatag*/&sc->dmat) != 0) {
194 		device_printf(sc->dev, "Unable to create dma tag.\n");
195 		return (ENXIO);
196 	}
197 
198 	sc->bufsize = HDSPE_DMASEGSIZE;
199 
200 	/* pbuf (play buffer). */
201 	if (bus_dmamem_alloc(sc->dmat, (void **)&sc->pbuf, BUS_DMA_WAITOK,
202 	    &sc->pmap)) {
203 		device_printf(sc->dev, "Can't alloc pbuf.\n");
204 		return (ENXIO);
205 	}
206 
207 	if (bus_dmamap_load(sc->dmat, sc->pmap, sc->pbuf, sc->bufsize,
208 	    hdspe_dmapsetmap, sc, BUS_DMA_NOWAIT)) {
209 		device_printf(sc->dev, "Can't load pbuf.\n");
210 		return (ENXIO);
211 	}
212 
213 	/* rbuf (rec buffer). */
214 	if (bus_dmamem_alloc(sc->dmat, (void **)&sc->rbuf, BUS_DMA_WAITOK,
215 	    &sc->rmap)) {
216 		device_printf(sc->dev, "Can't alloc rbuf.\n");
217 		return (ENXIO);
218 	}
219 
220 	if (bus_dmamap_load(sc->dmat, sc->rmap, sc->rbuf, sc->bufsize,
221 	    hdspe_dmapsetmap, sc, BUS_DMA_NOWAIT)) {
222 		device_printf(sc->dev, "Can't load rbuf.\n");
223 		return (ENXIO);
224 	}
225 
226 	bzero(sc->pbuf, sc->bufsize);
227 	bzero(sc->rbuf, sc->bufsize);
228 
229 	return (0);
230 }
231 
232 static void
233 hdspe_map_dmabuf(struct sc_info *sc)
234 {
235 	uint32_t paddr, raddr;
236 	int i;
237 
238 	paddr = vtophys(sc->pbuf);
239 	raddr = vtophys(sc->rbuf);
240 
241 	for (i = 0; i < HDSPE_MAX_SLOTS * 16; i++) {
242 		hdspe_write_4(sc, HDSPE_PAGE_ADDR_BUF_OUT + 4 * i,
243                     paddr + i * 4096);
244 		hdspe_write_4(sc, HDSPE_PAGE_ADDR_BUF_IN + 4 * i,
245                     raddr + i * 4096);
246 	}
247 }
248 
249 static int
250 hdspe_sysctl_sample_rate(SYSCTL_HANDLER_ARGS)
251 {
252 	struct sc_info *sc = oidp->oid_arg1;
253 	int error;
254 	unsigned int speed, multiplier;
255 
256 	speed = sc->force_speed;
257 
258 	/* Process sysctl (unsigned) integer request. */
259 	error = sysctl_handle_int(oidp, &speed, 0, req);
260 	if (error != 0 || req->newptr == NULL)
261 		return (error);
262 
263 	/* Speed from 32000 to 192000, 0 falls back to pcm speed setting. */
264 	sc->force_speed = 0;
265 	if (speed > 0) {
266 		multiplier = 1;
267 		if (speed > (96000 + 128000) / 2)
268 			multiplier = 4;
269 		else if (speed > (48000 + 64000) / 2)
270 			multiplier = 2;
271 
272 		if (speed < ((32000 + 44100) / 2) * multiplier)
273 			sc->force_speed = 32000 * multiplier;
274 		else if (speed < ((44100 + 48000) / 2) * multiplier)
275 			sc->force_speed = 44100 * multiplier;
276 		else
277 			sc->force_speed = 48000 * multiplier;
278 	}
279 
280 	return (0);
281 }
282 
283 
284 static int
285 hdspe_sysctl_period(SYSCTL_HANDLER_ARGS)
286 {
287 	struct sc_info *sc = oidp->oid_arg1;
288 	int error;
289 	unsigned int period;
290 
291 	period = sc->force_period;
292 
293 	/* Process sysctl (unsigned) integer request. */
294 	error = sysctl_handle_int(oidp, &period, 0, req);
295 	if (error != 0 || req->newptr == NULL)
296 		return (error);
297 
298 	/* Period is from 2^5 to 2^14, 0 falls back to pcm latency settings. */
299 	sc->force_period = 0;
300 	if (period > 0) {
301 		sc->force_period = 32;
302 		while (sc->force_period < period && sc->force_period < 4096)
303 			sc->force_period <<= 1;
304 	}
305 
306 	return (0);
307 }
308 
309 static int
310 hdspe_sysctl_clock_preference(SYSCTL_HANDLER_ARGS)
311 {
312 	struct sc_info *sc;
313 	struct hdspe_clock_source *clock_table, *clock;
314 	char buf[16] = "invalid";
315 	int error;
316 	uint32_t setting;
317 
318 	sc = oidp->oid_arg1;
319 
320 	/* Select sync ports table for device type. */
321 	if (sc->type == HDSPE_AIO)
322 		clock_table = hdspe_clock_source_table_aio;
323 	else if (sc->type == HDSPE_RAYDAT)
324 		clock_table = hdspe_clock_source_table_rd;
325 	else
326 		return (ENXIO);
327 
328 	/* Extract preferred clock source from settings register. */
329 	setting = sc->settings_register & HDSPE_SETTING_CLOCK_MASK;
330 	for (clock = clock_table; clock->name != NULL; ++clock) {
331 		if (clock->setting == setting)
332 			break;
333 	}
334 	if (clock->name != NULL)
335 		strlcpy(buf, clock->name, sizeof(buf));
336 
337 	/* Process sysctl string request. */
338 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
339 	if (error != 0 || req->newptr == NULL)
340 		return (error);
341 
342 	/* Find clock source matching the sysctl string. */
343 	for (clock = clock_table; clock->name != NULL; ++clock) {
344 		if (strncasecmp(buf, clock->name, sizeof(buf)) == 0)
345 			break;
346 	}
347 
348 	/* Set preferred clock source in settings register. */
349 	if (clock->name != NULL) {
350 		setting = clock->setting & HDSPE_SETTING_CLOCK_MASK;
351 		snd_mtxlock(sc->lock);
352 		sc->settings_register &= ~HDSPE_SETTING_CLOCK_MASK;
353 		sc->settings_register |= setting;
354 		hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register);
355 		snd_mtxunlock(sc->lock);
356 	}
357 	return (0);
358 }
359 
360 static int
361 hdspe_sysctl_clock_source(SYSCTL_HANDLER_ARGS)
362 {
363 	struct sc_info *sc;
364 	struct hdspe_clock_source *clock_table, *clock;
365 	char buf[16] = "invalid";
366 	uint32_t status;
367 
368 	sc = oidp->oid_arg1;
369 
370 	/* Select sync ports table for device type. */
371 	if (sc->type == HDSPE_AIO)
372 		clock_table = hdspe_clock_source_table_aio;
373 	else if (sc->type == HDSPE_RAYDAT)
374 		clock_table = hdspe_clock_source_table_rd;
375 	else
376 		return (ENXIO);
377 
378 	/* Read current (autosync) clock source from status register. */
379 	snd_mtxlock(sc->lock);
380 	status = hdspe_read_4(sc, HDSPE_STATUS1_REG);
381 	status &= HDSPE_STATUS1_CLOCK_MASK;
382 	snd_mtxunlock(sc->lock);
383 
384 	/* Translate status register value to clock source. */
385 	for (clock = clock_table; clock->name != NULL; ++clock) {
386 		/* In clock master mode, override with internal clock source. */
387 		if (sc->settings_register & HDSPE_SETTING_MASTER) {
388 			if (clock->setting & HDSPE_SETTING_MASTER)
389 				break;
390 		} else if (clock->status == status)
391 			break;
392 	}
393 
394 	/* Process sysctl string request. */
395 	if (clock->name != NULL)
396 		strlcpy(buf, clock->name, sizeof(buf));
397 	return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
398 }
399 
400 static int
401 hdspe_sysctl_clock_list(SYSCTL_HANDLER_ARGS)
402 {
403 	struct sc_info *sc;
404 	struct hdspe_clock_source *clock_table, *clock;
405 	char buf[256];
406 	int n;
407 
408 	sc = oidp->oid_arg1;
409 	n = 0;
410 
411 	/* Select clock source table for device type. */
412 	if (sc->type == HDSPE_AIO)
413 		clock_table = hdspe_clock_source_table_aio;
414 	else if (sc->type == HDSPE_RAYDAT)
415 		clock_table = hdspe_clock_source_table_rd;
416 	else
417 		return (ENXIO);
418 
419 	/* List available clock sources. */
420 	buf[0] = 0;
421 	for (clock = clock_table; clock->name != NULL; ++clock) {
422 		if (n > 0)
423 			n += strlcpy(buf + n, ",", sizeof(buf) - n);
424 		n += strlcpy(buf + n, clock->name, sizeof(buf) - n);
425 	}
426 	return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
427 }
428 
429 static int
430 hdspe_sysctl_sync_status(SYSCTL_HANDLER_ARGS)
431 {
432 	struct sc_info *sc;
433 	struct hdspe_clock_source *clock_table, *clock;
434 	char buf[256];
435 	char *state;
436 	int n;
437 	uint32_t status;
438 
439 	sc = oidp->oid_arg1;
440 	n = 0;
441 
442 	/* Select sync ports table for device type. */
443 	if (sc->type == HDSPE_AIO)
444 		clock_table = hdspe_clock_source_table_aio;
445 	else if (sc->type == HDSPE_RAYDAT)
446 		clock_table = hdspe_clock_source_table_rd;
447 	else
448 		return (ENXIO);
449 
450 	/* Read current lock and sync bits from status register. */
451 	snd_mtxlock(sc->lock);
452 	status = hdspe_read_4(sc, HDSPE_STATUS1_REG);
453 	snd_mtxunlock(sc->lock);
454 
455 	/* List clock sources with lock and sync state. */
456 	for (clock = clock_table; clock->name != NULL; ++clock) {
457 		if (clock->sync_bit != 0) {
458 			if (n > 0)
459 				n += strlcpy(buf + n, ",", sizeof(buf) - n);
460 			state = "none";
461 			if ((clock->sync_bit & status) != 0)
462 				state = "sync";
463 			else if ((clock->lock_bit & status) != 0)
464 				state = "lock";
465 			n += snprintf(buf + n, sizeof(buf) - n, "%s(%s)",
466 			    clock->name, state);
467 		}
468 	}
469 	return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
470 }
471 
472 static int
473 hdspe_probe(device_t dev)
474 {
475 	uint32_t rev;
476 
477 	if ((pci_get_vendor(dev) == PCI_VENDOR_XILINX ||
478 	    pci_get_vendor(dev) == PCI_VENDOR_RME) &&
479 	    pci_get_device(dev) == PCI_DEVICE_XILINX_HDSPE) {
480 		rev = pci_get_revid(dev);
481 		switch (rev) {
482 		case PCI_REVISION_AIO:
483 			device_set_desc(dev, "RME HDSPe AIO");
484 			return (0);
485 		case PCI_REVISION_RAYDAT:
486 			device_set_desc(dev, "RME HDSPe RayDAT");
487 			return (0);
488 		}
489 	}
490 
491 	return (ENXIO);
492 }
493 
494 static int
495 hdspe_init(struct sc_info *sc)
496 {
497 	long long period;
498 
499 	/* Set latency. */
500 	sc->period = 32;
501 	/*
502 	 * The pcm channel latency settings propagate unreliable blocksizes,
503 	 * different for recording and playback, and skewed due to rounding
504 	 * and total buffer size limits.
505 	 * Force period to a consistent default until these issues are fixed.
506 	 */
507 	sc->force_period = 256;
508 	sc->ctrl_register = hdspe_encode_latency(7);
509 
510 	/* Set rate. */
511 	sc->speed = HDSPE_SPEED_DEFAULT;
512 	sc->force_speed = 0;
513 	sc->ctrl_register &= ~HDSPE_FREQ_MASK;
514 	sc->ctrl_register |= HDSPE_FREQ_MASK_DEFAULT;
515 	hdspe_write_4(sc, HDSPE_CONTROL_REG, sc->ctrl_register);
516 
517 	switch (sc->type) {
518 	case HDSPE_RAYDAT:
519 	case HDSPE_AIO:
520 		period = HDSPE_FREQ_AIO;
521 		break;
522 	default:
523 		return (ENXIO);
524 	}
525 
526 	/* Set DDS value. */
527 	period /= sc->speed;
528 	hdspe_write_4(sc, HDSPE_FREQ_REG, period);
529 
530 	/* Other settings. */
531 	sc->settings_register = 0;
532 	hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register);
533 
534 	return (0);
535 }
536 
537 static int
538 hdspe_attach(device_t dev)
539 {
540 	struct hdspe_channel *chan_map;
541 	struct sc_pcminfo *scp;
542 	struct sc_info *sc;
543 	uint32_t rev;
544 	int i, err;
545 
546 #if 0
547 	device_printf(dev, "hdspe_attach()\n");
548 #endif
549 
550 	sc = device_get_softc(dev);
551 	sc->lock = snd_mtxcreate(device_get_nameunit(dev),
552 	    "snd_hdspe softc");
553 	sc->dev = dev;
554 
555 	pci_enable_busmaster(dev);
556 	rev = pci_get_revid(dev);
557 	switch (rev) {
558 	case PCI_REVISION_AIO:
559 		sc->type = HDSPE_AIO;
560 		chan_map = hdspe_unified_pcm ? chan_map_aio_uni : chan_map_aio;
561 		break;
562 	case PCI_REVISION_RAYDAT:
563 		sc->type = HDSPE_RAYDAT;
564 		chan_map = hdspe_unified_pcm ? chan_map_rd_uni : chan_map_rd;
565 		break;
566 	default:
567 		return (ENXIO);
568 	}
569 
570 	/* Allocate resources. */
571 	err = hdspe_alloc_resources(sc);
572 	if (err) {
573 		device_printf(dev, "Unable to allocate system resources.\n");
574 		return (ENXIO);
575 	}
576 
577 	if (hdspe_init(sc) != 0)
578 		return (ENXIO);
579 
580 	for (i = 0; i < HDSPE_MAX_CHANS && chan_map[i].descr != NULL; i++) {
581 		scp = malloc(sizeof(struct sc_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO);
582 		scp->hc = &chan_map[i];
583 		scp->sc = sc;
584 		scp->dev = device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
585 		device_set_ivars(scp->dev, scp);
586 	}
587 
588 	hdspe_map_dmabuf(sc);
589 
590 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
591 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
592 	    "sync_status", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
593 	    sc, 0, hdspe_sysctl_sync_status, "A",
594 	    "List clock source signal lock and sync status");
595 
596 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
597 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
598 	    "clock_source", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
599 	    sc, 0, hdspe_sysctl_clock_source, "A",
600 	    "Currently effective clock source");
601 
602 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
603 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
604 	    "clock_preference", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
605 	    sc, 0, hdspe_sysctl_clock_preference, "A",
606 	    "Set 'internal' (master) or preferred autosync clock source");
607 
608 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
609 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
610 	    "clock_list", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
611 	    sc, 0, hdspe_sysctl_clock_list, "A",
612 	    "List of supported clock sources");
613 
614 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
615 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
616 	    "period", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
617 	    sc, 0, hdspe_sysctl_period, "A",
618 	    "Force period of samples per interrupt (32, 64, ... 4096)");
619 
620 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
621 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
622 	    "sample_rate", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
623 	    sc, 0, hdspe_sysctl_sample_rate, "A",
624 	    "Force sample rate (32000, 44100, 48000, ... 192000)");
625 
626 	return (bus_generic_attach(dev));
627 }
628 
629 static void
630 hdspe_dmafree(struct sc_info *sc)
631 {
632 
633 	bus_dmamap_unload(sc->dmat, sc->rmap);
634 	bus_dmamap_unload(sc->dmat, sc->pmap);
635 	bus_dmamem_free(sc->dmat, sc->rbuf, sc->rmap);
636 	bus_dmamem_free(sc->dmat, sc->pbuf, sc->pmap);
637 	sc->rbuf = sc->pbuf = NULL;
638 }
639 
640 static int
641 hdspe_detach(device_t dev)
642 {
643 	struct sc_info *sc;
644 	int err;
645 
646 	sc = device_get_softc(dev);
647 	if (sc == NULL) {
648 		device_printf(dev,"Can't detach: softc is null.\n");
649 		return (0);
650 	}
651 
652 	err = device_delete_children(dev);
653 	if (err)
654 		return (err);
655 
656 	hdspe_dmafree(sc);
657 
658 	if (sc->ih)
659 		bus_teardown_intr(dev, sc->irq, sc->ih);
660 	if (sc->dmat)
661 		bus_dma_tag_destroy(sc->dmat);
662 	if (sc->irq)
663 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
664 	if (sc->cs)
665 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->cs);
666 	if (sc->lock)
667 		snd_mtxfree(sc->lock);
668 
669 	return (0);
670 }
671 
672 static device_method_t hdspe_methods[] = {
673 	DEVMETHOD(device_probe,     hdspe_probe),
674 	DEVMETHOD(device_attach,    hdspe_attach),
675 	DEVMETHOD(device_detach,    hdspe_detach),
676 	{ 0, 0 }
677 };
678 
679 static driver_t hdspe_driver = {
680 	"hdspe",
681 	hdspe_methods,
682 	PCM_SOFTC_SIZE,
683 };
684 
685 DRIVER_MODULE(snd_hdspe, pci, hdspe_driver, 0, 0);
686