1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012-2016 Ruslan Bukin <br@bsdpad.com> 5 * Copyright (c) 2023-2024 Florian Walpen <dev@submerge.ch> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * RME HDSPe driver for FreeBSD. 32 * Supported cards: AIO, RayDAT. 33 */ 34 35 #include <sys/types.h> 36 #include <sys/sysctl.h> 37 38 #include <dev/sound/pcm/sound.h> 39 #include <dev/sound/pci/hdspe.h> 40 #include <dev/sound/chip.h> 41 42 #include <dev/pci/pcireg.h> 43 #include <dev/pci/pcivar.h> 44 45 #include <mixer_if.h> 46 47 static bool hdspe_unified_pcm = false; 48 49 static SYSCTL_NODE(_hw, OID_AUTO, hdspe, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 50 "PCI HDSPe"); 51 52 SYSCTL_BOOL(_hw_hdspe, OID_AUTO, unified_pcm, CTLFLAG_RWTUN, 53 &hdspe_unified_pcm, 0, "Combine physical ports in one unified pcm device"); 54 55 static struct hdspe_clock_source hdspe_clock_source_table_rd[] = { 56 { "internal", 0 << 1 | 1, HDSPE_STATUS1_CLOCK(15), 0, 0 }, 57 { "word", 0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0), 1 << 24, 1 << 25 }, 58 { "aes", 1 << 1 | 0, HDSPE_STATUS1_CLOCK( 1), 1 << 0, 1 << 8 }, 59 { "spdif", 2 << 1 | 0, HDSPE_STATUS1_CLOCK( 2), 1 << 1, 1 << 9 }, 60 { "adat1", 3 << 1 | 0, HDSPE_STATUS1_CLOCK( 3), 1 << 2, 1 << 10 }, 61 { "adat2", 4 << 1 | 0, HDSPE_STATUS1_CLOCK( 4), 1 << 3, 1 << 11 }, 62 { "adat3", 5 << 1 | 0, HDSPE_STATUS1_CLOCK( 5), 1 << 4, 1 << 12 }, 63 { "adat4", 6 << 1 | 0, HDSPE_STATUS1_CLOCK( 6), 1 << 5, 1 << 13 }, 64 { "tco", 9 << 1 | 0, HDSPE_STATUS1_CLOCK( 9), 1 << 26, 1 << 27 }, 65 { "sync_in", 10 << 1 | 0, HDSPE_STATUS1_CLOCK(10), 0, 0 }, 66 { NULL, 0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0), 0, 0 }, 67 }; 68 69 static struct hdspe_clock_source hdspe_clock_source_table_aio[] = { 70 { "internal", 0 << 1 | 1, HDSPE_STATUS1_CLOCK(15), 0, 0 }, 71 { "word", 0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0), 1 << 24, 1 << 25 }, 72 { "aes", 1 << 1 | 0, HDSPE_STATUS1_CLOCK( 1), 1 << 0, 1 << 8 }, 73 { "spdif", 2 << 1 | 0, HDSPE_STATUS1_CLOCK( 2), 1 << 1, 1 << 9 }, 74 { "adat", 3 << 1 | 0, HDSPE_STATUS1_CLOCK( 3), 1 << 2, 1 << 10 }, 75 { "tco", 9 << 1 | 0, HDSPE_STATUS1_CLOCK( 9), 1 << 26, 1 << 27 }, 76 { "sync_in", 10 << 1 | 0, HDSPE_STATUS1_CLOCK(10), 0, 0 }, 77 { NULL, 0 << 1 | 0, HDSPE_STATUS1_CLOCK( 0), 0, 0 }, 78 }; 79 80 static struct hdspe_channel chan_map_aio[] = { 81 { HDSPE_CHAN_AIO_LINE, "line" }, 82 { HDSPE_CHAN_AIO_PHONE, "phone" }, 83 { HDSPE_CHAN_AIO_AES, "aes" }, 84 { HDSPE_CHAN_AIO_SPDIF, "s/pdif" }, 85 { HDSPE_CHAN_AIO_ADAT, "adat" }, 86 { 0, NULL }, 87 }; 88 89 static struct hdspe_channel chan_map_aio_uni[] = { 90 { HDSPE_CHAN_AIO_ALL, "all" }, 91 { 0, NULL }, 92 }; 93 94 static struct hdspe_channel chan_map_rd[] = { 95 { HDSPE_CHAN_RAY_AES, "aes" }, 96 { HDSPE_CHAN_RAY_SPDIF, "s/pdif" }, 97 { HDSPE_CHAN_RAY_ADAT1, "adat1" }, 98 { HDSPE_CHAN_RAY_ADAT2, "adat2" }, 99 { HDSPE_CHAN_RAY_ADAT3, "adat3" }, 100 { HDSPE_CHAN_RAY_ADAT4, "adat4" }, 101 { 0, NULL }, 102 }; 103 104 static struct hdspe_channel chan_map_rd_uni[] = { 105 { HDSPE_CHAN_RAY_ALL, "all" }, 106 { 0, NULL }, 107 }; 108 109 static void 110 hdspe_intr(void *p) 111 { 112 struct sc_pcminfo *scp; 113 struct sc_info *sc; 114 device_t *devlist; 115 int devcount; 116 int status; 117 int err; 118 int i; 119 120 sc = (struct sc_info *)p; 121 122 snd_mtxlock(sc->lock); 123 124 status = hdspe_read_1(sc, HDSPE_STATUS_REG); 125 if (status & HDSPE_AUDIO_IRQ_PENDING) { 126 if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0) 127 return; 128 129 for (i = 0; i < devcount; i++) { 130 scp = device_get_ivars(devlist[i]); 131 if (scp->ih != NULL) 132 scp->ih(scp); 133 } 134 135 hdspe_write_1(sc, HDSPE_INTERRUPT_ACK, 0); 136 free(devlist, M_TEMP); 137 } 138 139 snd_mtxunlock(sc->lock); 140 } 141 142 static void 143 hdspe_dmapsetmap(void *arg, bus_dma_segment_t *segs, int nseg, int error) 144 { 145 #if 0 146 device_printf(sc->dev, "hdspe_dmapsetmap()\n"); 147 #endif 148 } 149 150 static int 151 hdspe_alloc_resources(struct sc_info *sc) 152 { 153 154 /* Allocate resource. */ 155 sc->csid = PCIR_BAR(0); 156 sc->cs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 157 &sc->csid, RF_ACTIVE); 158 159 if (!sc->cs) { 160 device_printf(sc->dev, "Unable to map SYS_RES_MEMORY.\n"); 161 return (ENXIO); 162 } 163 164 sc->cst = rman_get_bustag(sc->cs); 165 sc->csh = rman_get_bushandle(sc->cs); 166 167 /* Allocate interrupt resource. */ 168 sc->irqid = 0; 169 sc->irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irqid, 170 RF_ACTIVE | RF_SHAREABLE); 171 172 if (!sc->irq || 173 bus_setup_intr(sc->dev, sc->irq, INTR_MPSAFE | INTR_TYPE_AV, 174 NULL, hdspe_intr, sc, &sc->ih)) { 175 device_printf(sc->dev, "Unable to alloc interrupt resource.\n"); 176 return (ENXIO); 177 } 178 179 /* Allocate DMA resources. */ 180 if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(sc->dev), 181 /*alignment*/4, 182 /*boundary*/0, 183 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 184 /*highaddr*/BUS_SPACE_MAXADDR, 185 /*filter*/NULL, 186 /*filterarg*/NULL, 187 /*maxsize*/2 * HDSPE_DMASEGSIZE, 188 /*nsegments*/2, 189 /*maxsegsz*/HDSPE_DMASEGSIZE, 190 /*flags*/0, 191 /*lockfunc*/NULL, 192 /*lockarg*/NULL, 193 /*dmatag*/&sc->dmat) != 0) { 194 device_printf(sc->dev, "Unable to create dma tag.\n"); 195 return (ENXIO); 196 } 197 198 sc->bufsize = HDSPE_DMASEGSIZE; 199 200 /* pbuf (play buffer). */ 201 if (bus_dmamem_alloc(sc->dmat, (void **)&sc->pbuf, BUS_DMA_WAITOK, 202 &sc->pmap)) { 203 device_printf(sc->dev, "Can't alloc pbuf.\n"); 204 return (ENXIO); 205 } 206 207 if (bus_dmamap_load(sc->dmat, sc->pmap, sc->pbuf, sc->bufsize, 208 hdspe_dmapsetmap, sc, BUS_DMA_NOWAIT)) { 209 device_printf(sc->dev, "Can't load pbuf.\n"); 210 return (ENXIO); 211 } 212 213 /* rbuf (rec buffer). */ 214 if (bus_dmamem_alloc(sc->dmat, (void **)&sc->rbuf, BUS_DMA_WAITOK, 215 &sc->rmap)) { 216 device_printf(sc->dev, "Can't alloc rbuf.\n"); 217 return (ENXIO); 218 } 219 220 if (bus_dmamap_load(sc->dmat, sc->rmap, sc->rbuf, sc->bufsize, 221 hdspe_dmapsetmap, sc, BUS_DMA_NOWAIT)) { 222 device_printf(sc->dev, "Can't load rbuf.\n"); 223 return (ENXIO); 224 } 225 226 bzero(sc->pbuf, sc->bufsize); 227 bzero(sc->rbuf, sc->bufsize); 228 229 return (0); 230 } 231 232 static void 233 hdspe_map_dmabuf(struct sc_info *sc) 234 { 235 uint32_t paddr, raddr; 236 int i; 237 238 paddr = vtophys(sc->pbuf); 239 raddr = vtophys(sc->rbuf); 240 241 for (i = 0; i < HDSPE_MAX_SLOTS * 16; i++) { 242 hdspe_write_4(sc, HDSPE_PAGE_ADDR_BUF_OUT + 4 * i, 243 paddr + i * 4096); 244 hdspe_write_4(sc, HDSPE_PAGE_ADDR_BUF_IN + 4 * i, 245 raddr + i * 4096); 246 } 247 } 248 249 static int 250 hdspe_sysctl_sample_rate(SYSCTL_HANDLER_ARGS) 251 { 252 struct sc_info *sc = oidp->oid_arg1; 253 int error; 254 unsigned int speed, multiplier; 255 256 speed = sc->force_speed; 257 258 /* Process sysctl (unsigned) integer request. */ 259 error = sysctl_handle_int(oidp, &speed, 0, req); 260 if (error != 0 || req->newptr == NULL) 261 return (error); 262 263 /* Speed from 32000 to 192000, 0 falls back to pcm speed setting. */ 264 sc->force_speed = 0; 265 if (speed > 0) { 266 multiplier = 1; 267 if (speed > (96000 + 128000) / 2) 268 multiplier = 4; 269 else if (speed > (48000 + 64000) / 2) 270 multiplier = 2; 271 272 if (speed < ((32000 + 44100) / 2) * multiplier) 273 sc->force_speed = 32000 * multiplier; 274 else if (speed < ((44100 + 48000) / 2) * multiplier) 275 sc->force_speed = 44100 * multiplier; 276 else 277 sc->force_speed = 48000 * multiplier; 278 } 279 280 return (0); 281 } 282 283 284 static int 285 hdspe_sysctl_period(SYSCTL_HANDLER_ARGS) 286 { 287 struct sc_info *sc = oidp->oid_arg1; 288 int error; 289 unsigned int period; 290 291 period = sc->force_period; 292 293 /* Process sysctl (unsigned) integer request. */ 294 error = sysctl_handle_int(oidp, &period, 0, req); 295 if (error != 0 || req->newptr == NULL) 296 return (error); 297 298 /* Period is from 2^5 to 2^14, 0 falls back to pcm latency settings. */ 299 sc->force_period = 0; 300 if (period > 0) { 301 sc->force_period = 32; 302 while (sc->force_period < period && sc->force_period < 4096) 303 sc->force_period <<= 1; 304 } 305 306 return (0); 307 } 308 309 static int 310 hdspe_sysctl_clock_preference(SYSCTL_HANDLER_ARGS) 311 { 312 struct sc_info *sc; 313 struct hdspe_clock_source *clock_table, *clock; 314 char buf[16] = "invalid"; 315 int error; 316 uint32_t setting; 317 318 sc = oidp->oid_arg1; 319 320 /* Select sync ports table for device type. */ 321 if (sc->type == HDSPE_AIO) 322 clock_table = hdspe_clock_source_table_aio; 323 else if (sc->type == HDSPE_RAYDAT) 324 clock_table = hdspe_clock_source_table_rd; 325 else 326 return (ENXIO); 327 328 /* Extract preferred clock source from settings register. */ 329 setting = sc->settings_register & HDSPE_SETTING_CLOCK_MASK; 330 for (clock = clock_table; clock->name != NULL; ++clock) { 331 if (clock->setting == setting) 332 break; 333 } 334 if (clock->name != NULL) 335 strlcpy(buf, clock->name, sizeof(buf)); 336 337 /* Process sysctl string request. */ 338 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 339 if (error != 0 || req->newptr == NULL) 340 return (error); 341 342 /* Find clock source matching the sysctl string. */ 343 for (clock = clock_table; clock->name != NULL; ++clock) { 344 if (strncasecmp(buf, clock->name, sizeof(buf)) == 0) 345 break; 346 } 347 348 /* Set preferred clock source in settings register. */ 349 if (clock->name != NULL) { 350 setting = clock->setting & HDSPE_SETTING_CLOCK_MASK; 351 snd_mtxlock(sc->lock); 352 sc->settings_register &= ~HDSPE_SETTING_CLOCK_MASK; 353 sc->settings_register |= setting; 354 hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register); 355 snd_mtxunlock(sc->lock); 356 } 357 return (0); 358 } 359 360 static int 361 hdspe_sysctl_clock_source(SYSCTL_HANDLER_ARGS) 362 { 363 struct sc_info *sc; 364 struct hdspe_clock_source *clock_table, *clock; 365 char buf[16] = "invalid"; 366 uint32_t status; 367 368 sc = oidp->oid_arg1; 369 370 /* Select sync ports table for device type. */ 371 if (sc->type == HDSPE_AIO) 372 clock_table = hdspe_clock_source_table_aio; 373 else if (sc->type == HDSPE_RAYDAT) 374 clock_table = hdspe_clock_source_table_rd; 375 else 376 return (ENXIO); 377 378 /* Read current (autosync) clock source from status register. */ 379 snd_mtxlock(sc->lock); 380 status = hdspe_read_4(sc, HDSPE_STATUS1_REG); 381 status &= HDSPE_STATUS1_CLOCK_MASK; 382 snd_mtxunlock(sc->lock); 383 384 /* Translate status register value to clock source. */ 385 for (clock = clock_table; clock->name != NULL; ++clock) { 386 /* In clock master mode, override with internal clock source. */ 387 if (sc->settings_register & HDSPE_SETTING_MASTER) { 388 if (clock->setting & HDSPE_SETTING_MASTER) 389 break; 390 } else if (clock->status == status) 391 break; 392 } 393 394 /* Process sysctl string request. */ 395 if (clock->name != NULL) 396 strlcpy(buf, clock->name, sizeof(buf)); 397 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 398 } 399 400 static int 401 hdspe_sysctl_clock_list(SYSCTL_HANDLER_ARGS) 402 { 403 struct sc_info *sc; 404 struct hdspe_clock_source *clock_table, *clock; 405 char buf[256]; 406 int n; 407 408 sc = oidp->oid_arg1; 409 n = 0; 410 411 /* Select clock source table for device type. */ 412 if (sc->type == HDSPE_AIO) 413 clock_table = hdspe_clock_source_table_aio; 414 else if (sc->type == HDSPE_RAYDAT) 415 clock_table = hdspe_clock_source_table_rd; 416 else 417 return (ENXIO); 418 419 /* List available clock sources. */ 420 buf[0] = 0; 421 for (clock = clock_table; clock->name != NULL; ++clock) { 422 if (n > 0) 423 n += strlcpy(buf + n, ",", sizeof(buf) - n); 424 n += strlcpy(buf + n, clock->name, sizeof(buf) - n); 425 } 426 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 427 } 428 429 static int 430 hdspe_sysctl_sync_status(SYSCTL_HANDLER_ARGS) 431 { 432 struct sc_info *sc; 433 struct hdspe_clock_source *clock_table, *clock; 434 char buf[256]; 435 char *state; 436 int n; 437 uint32_t status; 438 439 sc = oidp->oid_arg1; 440 n = 0; 441 442 /* Select sync ports table for device type. */ 443 if (sc->type == HDSPE_AIO) 444 clock_table = hdspe_clock_source_table_aio; 445 else if (sc->type == HDSPE_RAYDAT) 446 clock_table = hdspe_clock_source_table_rd; 447 else 448 return (ENXIO); 449 450 /* Read current lock and sync bits from status register. */ 451 snd_mtxlock(sc->lock); 452 status = hdspe_read_4(sc, HDSPE_STATUS1_REG); 453 snd_mtxunlock(sc->lock); 454 455 /* List clock sources with lock and sync state. */ 456 for (clock = clock_table; clock->name != NULL; ++clock) { 457 if (clock->sync_bit != 0) { 458 if (n > 0) 459 n += strlcpy(buf + n, ",", sizeof(buf) - n); 460 state = "none"; 461 if ((clock->sync_bit & status) != 0) 462 state = "sync"; 463 else if ((clock->lock_bit & status) != 0) 464 state = "lock"; 465 n += snprintf(buf + n, sizeof(buf) - n, "%s(%s)", 466 clock->name, state); 467 } 468 } 469 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 470 } 471 472 static int 473 hdspe_probe(device_t dev) 474 { 475 uint32_t rev; 476 477 if (pci_get_vendor(dev) == PCI_VENDOR_XILINX && 478 pci_get_device(dev) == PCI_DEVICE_XILINX_HDSPE) { 479 rev = pci_get_revid(dev); 480 switch (rev) { 481 case PCI_REVISION_AIO: 482 device_set_desc(dev, "RME HDSPe AIO"); 483 return (0); 484 case PCI_REVISION_RAYDAT: 485 device_set_desc(dev, "RME HDSPe RayDAT"); 486 return (0); 487 } 488 } 489 490 return (ENXIO); 491 } 492 493 static int 494 hdspe_init(struct sc_info *sc) 495 { 496 long long period; 497 498 /* Set latency. */ 499 sc->period = 32; 500 /* 501 * The pcm channel latency settings propagate unreliable blocksizes, 502 * different for recording and playback, and skewed due to rounding 503 * and total buffer size limits. 504 * Force period to a consistent default until these issues are fixed. 505 */ 506 sc->force_period = 256; 507 sc->ctrl_register = hdspe_encode_latency(7); 508 509 /* Set rate. */ 510 sc->speed = HDSPE_SPEED_DEFAULT; 511 sc->force_speed = 0; 512 sc->ctrl_register &= ~HDSPE_FREQ_MASK; 513 sc->ctrl_register |= HDSPE_FREQ_MASK_DEFAULT; 514 hdspe_write_4(sc, HDSPE_CONTROL_REG, sc->ctrl_register); 515 516 switch (sc->type) { 517 case HDSPE_RAYDAT: 518 case HDSPE_AIO: 519 period = HDSPE_FREQ_AIO; 520 break; 521 default: 522 return (ENXIO); 523 } 524 525 /* Set DDS value. */ 526 period /= sc->speed; 527 hdspe_write_4(sc, HDSPE_FREQ_REG, period); 528 529 /* Other settings. */ 530 sc->settings_register = 0; 531 hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register); 532 533 return (0); 534 } 535 536 static int 537 hdspe_attach(device_t dev) 538 { 539 struct hdspe_channel *chan_map; 540 struct sc_pcminfo *scp; 541 struct sc_info *sc; 542 uint32_t rev; 543 int i, err; 544 545 #if 0 546 device_printf(dev, "hdspe_attach()\n"); 547 #endif 548 549 sc = device_get_softc(dev); 550 sc->lock = snd_mtxcreate(device_get_nameunit(dev), 551 "snd_hdspe softc"); 552 sc->dev = dev; 553 554 pci_enable_busmaster(dev); 555 rev = pci_get_revid(dev); 556 switch (rev) { 557 case PCI_REVISION_AIO: 558 sc->type = HDSPE_AIO; 559 chan_map = hdspe_unified_pcm ? chan_map_aio_uni : chan_map_aio; 560 break; 561 case PCI_REVISION_RAYDAT: 562 sc->type = HDSPE_RAYDAT; 563 chan_map = hdspe_unified_pcm ? chan_map_rd_uni : chan_map_rd; 564 break; 565 default: 566 return (ENXIO); 567 } 568 569 /* Allocate resources. */ 570 err = hdspe_alloc_resources(sc); 571 if (err) { 572 device_printf(dev, "Unable to allocate system resources.\n"); 573 return (ENXIO); 574 } 575 576 if (hdspe_init(sc) != 0) 577 return (ENXIO); 578 579 for (i = 0; i < HDSPE_MAX_CHANS && chan_map[i].descr != NULL; i++) { 580 scp = malloc(sizeof(struct sc_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO); 581 scp->hc = &chan_map[i]; 582 scp->sc = sc; 583 scp->dev = device_add_child(dev, "pcm", -1); 584 device_set_ivars(scp->dev, scp); 585 } 586 587 hdspe_map_dmabuf(sc); 588 589 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 590 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 591 "sync_status", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 592 sc, 0, hdspe_sysctl_sync_status, "A", 593 "List clock source signal lock and sync status"); 594 595 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 596 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 597 "clock_source", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 598 sc, 0, hdspe_sysctl_clock_source, "A", 599 "Currently effective clock source"); 600 601 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 602 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 603 "clock_preference", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 604 sc, 0, hdspe_sysctl_clock_preference, "A", 605 "Set 'internal' (master) or preferred autosync clock source"); 606 607 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 608 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 609 "clock_list", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 610 sc, 0, hdspe_sysctl_clock_list, "A", 611 "List of supported clock sources"); 612 613 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 614 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 615 "period", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 616 sc, 0, hdspe_sysctl_period, "A", 617 "Force period of samples per interrupt (32, 64, ... 4096)"); 618 619 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 620 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 621 "sample_rate", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 622 sc, 0, hdspe_sysctl_sample_rate, "A", 623 "Force sample rate (32000, 44100, 48000, ... 192000)"); 624 625 return (bus_generic_attach(dev)); 626 } 627 628 static void 629 hdspe_dmafree(struct sc_info *sc) 630 { 631 632 bus_dmamap_unload(sc->dmat, sc->rmap); 633 bus_dmamap_unload(sc->dmat, sc->pmap); 634 bus_dmamem_free(sc->dmat, sc->rbuf, sc->rmap); 635 bus_dmamem_free(sc->dmat, sc->pbuf, sc->pmap); 636 sc->rbuf = sc->pbuf = NULL; 637 } 638 639 static int 640 hdspe_detach(device_t dev) 641 { 642 struct sc_info *sc; 643 int err; 644 645 sc = device_get_softc(dev); 646 if (sc == NULL) { 647 device_printf(dev,"Can't detach: softc is null.\n"); 648 return (0); 649 } 650 651 err = device_delete_children(dev); 652 if (err) 653 return (err); 654 655 hdspe_dmafree(sc); 656 657 if (sc->ih) 658 bus_teardown_intr(dev, sc->irq, sc->ih); 659 if (sc->dmat) 660 bus_dma_tag_destroy(sc->dmat); 661 if (sc->irq) 662 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 663 if (sc->cs) 664 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->cs); 665 if (sc->lock) 666 snd_mtxfree(sc->lock); 667 668 return (0); 669 } 670 671 static device_method_t hdspe_methods[] = { 672 DEVMETHOD(device_probe, hdspe_probe), 673 DEVMETHOD(device_attach, hdspe_attach), 674 DEVMETHOD(device_detach, hdspe_detach), 675 { 0, 0 } 676 }; 677 678 static driver_t hdspe_driver = { 679 "hdspe", 680 hdspe_methods, 681 PCM_SOFTC_SIZE, 682 }; 683 684 DRIVER_MODULE(snd_hdspe, pci, hdspe_driver, 0, 0); 685