1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 SND_DECLARE_FILE("$FreeBSD$"); 55 56 #define hdac_lock(sc) snd_mtxlock((sc)->lock) 57 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) 58 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) 59 #define hdac_lockowned(sc) mtx_owned((sc)->lock) 60 61 #define HDAC_QUIRK_64BIT (1 << 0) 62 #define HDAC_QUIRK_DMAPOS (1 << 1) 63 #define HDAC_QUIRK_MSI (1 << 2) 64 65 static const struct { 66 const char *key; 67 uint32_t value; 68 } hdac_quirks_tab[] = { 69 { "64bit", HDAC_QUIRK_DMAPOS }, 70 { "dmapos", HDAC_QUIRK_DMAPOS }, 71 { "msi", HDAC_QUIRK_MSI }, 72 }; 73 74 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 75 76 static const struct { 77 uint32_t model; 78 const char *desc; 79 char quirks_on; 80 char quirks_off; 81 } hdac_devices[] = { 82 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 83 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 84 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 85 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 87 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 88 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 89 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 90 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 91 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 92 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 93 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 94 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 95 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 96 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 97 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 98 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 99 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 100 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 101 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 102 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 103 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 104 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 105 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 106 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 107 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 108 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 109 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 110 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 111 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 112 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 113 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 114 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 115 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 116 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 117 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 118 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 119 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 120 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 121 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 122 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 123 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 124 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 125 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 126 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 127 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 128 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 129 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 130 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 131 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 132 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 133 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 134 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 135 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 136 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 137 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 138 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 139 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 140 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 141 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 142 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 143 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 144 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 145 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 146 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 147 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 148 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 149 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 150 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 151 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 152 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 153 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 154 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 155 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 156 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 157 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 158 { HDA_ATI_R600, "ATI R600", 0, 0 }, 159 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 160 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 161 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 162 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 163 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 164 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 165 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 166 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 167 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 168 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 169 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 170 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 171 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 172 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 173 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 174 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 175 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 176 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 177 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 178 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 179 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 180 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 181 /* Unknown */ 182 { HDA_INTEL_ALL, "Intel", 0, 0 }, 183 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 184 { HDA_ATI_ALL, "ATI", 0, 0 }, 185 { HDA_AMD_ALL, "AMD", 0, 0 }, 186 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 187 { HDA_VIA_ALL, "VIA", 0, 0 }, 188 { HDA_SIS_ALL, "SiS", 0, 0 }, 189 { HDA_ULI_ALL, "ULI", 0, 0 }, 190 }; 191 192 static const struct { 193 uint16_t vendor; 194 uint8_t reg; 195 uint8_t mask; 196 uint8_t enable; 197 } hdac_pcie_snoop[] = { 198 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 199 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 200 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 201 }; 202 203 /**************************************************************************** 204 * Function prototypes 205 ****************************************************************************/ 206 static void hdac_intr_handler(void *); 207 static int hdac_reset(struct hdac_softc *, int); 208 static int hdac_get_capabilities(struct hdac_softc *); 209 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 210 static int hdac_dma_alloc(struct hdac_softc *, 211 struct hdac_dma *, bus_size_t); 212 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 213 static int hdac_mem_alloc(struct hdac_softc *); 214 static void hdac_mem_free(struct hdac_softc *); 215 static int hdac_irq_alloc(struct hdac_softc *); 216 static void hdac_irq_free(struct hdac_softc *); 217 static void hdac_corb_init(struct hdac_softc *); 218 static void hdac_rirb_init(struct hdac_softc *); 219 static void hdac_corb_start(struct hdac_softc *); 220 static void hdac_rirb_start(struct hdac_softc *); 221 222 static void hdac_attach2(void *); 223 224 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 225 226 static int hdac_probe(device_t); 227 static int hdac_attach(device_t); 228 static int hdac_detach(device_t); 229 static int hdac_suspend(device_t); 230 static int hdac_resume(device_t); 231 232 static int hdac_rirb_flush(struct hdac_softc *sc); 233 static int hdac_unsolq_flush(struct hdac_softc *sc); 234 235 #define hdac_command(a1, a2, a3) \ 236 hdac_send_command(a1, a3, a2) 237 238 /* This function surely going to make its way into upper level someday. */ 239 static void 240 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 241 { 242 const char *res = NULL; 243 int i = 0, j, k, len, inv; 244 245 if (resource_string_value(device_get_name(sc->dev), 246 device_get_unit(sc->dev), "config", &res) != 0) 247 return; 248 if (!(res != NULL && strlen(res) > 0)) 249 return; 250 HDA_BOOTVERBOSE( 251 device_printf(sc->dev, "Config options:"); 252 ); 253 for (;;) { 254 while (res[i] != '\0' && 255 (res[i] == ',' || isspace(res[i]) != 0)) 256 i++; 257 if (res[i] == '\0') { 258 HDA_BOOTVERBOSE( 259 printf("\n"); 260 ); 261 return; 262 } 263 j = i; 264 while (res[j] != '\0' && 265 !(res[j] == ',' || isspace(res[j]) != 0)) 266 j++; 267 len = j - i; 268 if (len > 2 && strncmp(res + i, "no", 2) == 0) 269 inv = 2; 270 else 271 inv = 0; 272 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 273 if (strncmp(res + i + inv, 274 hdac_quirks_tab[k].key, len - inv) != 0) 275 continue; 276 if (len - inv != strlen(hdac_quirks_tab[k].key)) 277 continue; 278 HDA_BOOTVERBOSE( 279 printf(" %s%s", (inv != 0) ? "no" : "", 280 hdac_quirks_tab[k].key); 281 ); 282 if (inv == 0) { 283 *on |= hdac_quirks_tab[k].value; 284 *on &= ~hdac_quirks_tab[k].value; 285 } else if (inv != 0) { 286 *off |= hdac_quirks_tab[k].value; 287 *off &= ~hdac_quirks_tab[k].value; 288 } 289 break; 290 } 291 i = j; 292 } 293 } 294 295 /**************************************************************************** 296 * void hdac_intr_handler(void *) 297 * 298 * Interrupt handler. Processes interrupts received from the hdac. 299 ****************************************************************************/ 300 static void 301 hdac_intr_handler(void *context) 302 { 303 struct hdac_softc *sc; 304 device_t dev; 305 uint32_t intsts; 306 uint8_t rirbsts; 307 int i; 308 309 sc = (struct hdac_softc *)context; 310 hdac_lock(sc); 311 312 /* Do we have anything to do? */ 313 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 314 if ((intsts & HDAC_INTSTS_GIS) == 0) { 315 hdac_unlock(sc); 316 return; 317 } 318 319 /* Was this a controller interrupt? */ 320 if (intsts & HDAC_INTSTS_CIS) { 321 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 322 /* Get as many responses that we can */ 323 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 324 HDAC_WRITE_1(&sc->mem, 325 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 326 hdac_rirb_flush(sc); 327 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 328 } 329 if (sc->unsolq_rp != sc->unsolq_wp) 330 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 331 } 332 333 if (intsts & HDAC_INTSTS_SIS_MASK) { 334 for (i = 0; i < sc->num_ss; i++) { 335 if ((intsts & (1 << i)) == 0) 336 continue; 337 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 338 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS ); 339 if ((dev = sc->streams[i].dev) != NULL) { 340 HDAC_STREAM_INTR(dev, 341 sc->streams[i].dir, sc->streams[i].stream); 342 } 343 } 344 } 345 346 HDAC_WRITE_4(&sc->mem, HDAC_INTSTS, intsts); 347 hdac_unlock(sc); 348 } 349 350 static void 351 hdac_poll_callback(void *arg) 352 { 353 struct hdac_softc *sc = arg; 354 355 if (sc == NULL) 356 return; 357 358 hdac_lock(sc); 359 if (sc->polling == 0) { 360 hdac_unlock(sc); 361 return; 362 } 363 callout_reset(&sc->poll_callout, sc->poll_ival, 364 hdac_poll_callback, sc); 365 hdac_unlock(sc); 366 367 hdac_intr_handler(sc); 368 } 369 370 /**************************************************************************** 371 * int hdac_reset(hdac_softc *, int) 372 * 373 * Reset the hdac to a quiescent and known state. 374 ****************************************************************************/ 375 static int 376 hdac_reset(struct hdac_softc *sc, int wakeup) 377 { 378 uint32_t gctl; 379 int count, i; 380 381 /* 382 * Stop all Streams DMA engine 383 */ 384 for (i = 0; i < sc->num_iss; i++) 385 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 386 for (i = 0; i < sc->num_oss; i++) 387 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 388 for (i = 0; i < sc->num_bss; i++) 389 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 390 391 /* 392 * Stop Control DMA engines. 393 */ 394 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 395 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 396 397 /* 398 * Reset DMA position buffer. 399 */ 400 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 401 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 402 403 /* 404 * Reset the controller. The reset must remain asserted for 405 * a minimum of 100us. 406 */ 407 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 408 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 409 count = 10000; 410 do { 411 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 412 if (!(gctl & HDAC_GCTL_CRST)) 413 break; 414 DELAY(10); 415 } while (--count); 416 if (gctl & HDAC_GCTL_CRST) { 417 device_printf(sc->dev, "Unable to put hdac in reset\n"); 418 return (ENXIO); 419 } 420 421 /* If wakeup is not requested - leave the controller in reset state. */ 422 if (!wakeup) 423 return (0); 424 425 DELAY(100); 426 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 427 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 428 count = 10000; 429 do { 430 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 431 if (gctl & HDAC_GCTL_CRST) 432 break; 433 DELAY(10); 434 } while (--count); 435 if (!(gctl & HDAC_GCTL_CRST)) { 436 device_printf(sc->dev, "Device stuck in reset\n"); 437 return (ENXIO); 438 } 439 440 /* 441 * Wait for codecs to finish their own reset sequence. The delay here 442 * should be of 250us but for some reasons, it's not enough on my 443 * computer. Let's use twice as much as necessary to make sure that 444 * it's reset properly. 445 */ 446 DELAY(1000); 447 448 return (0); 449 } 450 451 452 /**************************************************************************** 453 * int hdac_get_capabilities(struct hdac_softc *); 454 * 455 * Retreive the general capabilities of the hdac; 456 * Number of Input Streams 457 * Number of Output Streams 458 * Number of bidirectional Streams 459 * 64bit ready 460 * CORB and RIRB sizes 461 ****************************************************************************/ 462 static int 463 hdac_get_capabilities(struct hdac_softc *sc) 464 { 465 uint16_t gcap; 466 uint8_t corbsize, rirbsize; 467 468 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 469 sc->num_iss = HDAC_GCAP_ISS(gcap); 470 sc->num_oss = HDAC_GCAP_OSS(gcap); 471 sc->num_bss = HDAC_GCAP_BSS(gcap); 472 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 473 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 474 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 475 if (sc->quirks_on & HDAC_QUIRK_64BIT) 476 sc->support_64bit = 1; 477 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 478 sc->support_64bit = 0; 479 480 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 481 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 482 HDAC_CORBSIZE_CORBSZCAP_256) 483 sc->corb_size = 256; 484 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 485 HDAC_CORBSIZE_CORBSZCAP_16) 486 sc->corb_size = 16; 487 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 488 HDAC_CORBSIZE_CORBSZCAP_2) 489 sc->corb_size = 2; 490 else { 491 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 492 __func__, corbsize); 493 return (ENXIO); 494 } 495 496 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 497 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 498 HDAC_RIRBSIZE_RIRBSZCAP_256) 499 sc->rirb_size = 256; 500 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 501 HDAC_RIRBSIZE_RIRBSZCAP_16) 502 sc->rirb_size = 16; 503 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 504 HDAC_RIRBSIZE_RIRBSZCAP_2) 505 sc->rirb_size = 2; 506 else { 507 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 508 __func__, rirbsize); 509 return (ENXIO); 510 } 511 512 HDA_BOOTVERBOSE( 513 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 514 "NSDO %d%s, CORB %d, RIRB %d\n", 515 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 516 sc->support_64bit ? ", 64bit" : "", 517 sc->corb_size, sc->rirb_size); 518 ); 519 520 return (0); 521 } 522 523 524 /**************************************************************************** 525 * void hdac_dma_cb 526 * 527 * This function is called by bus_dmamap_load when the mapping has been 528 * established. We just record the physical address of the mapping into 529 * the struct hdac_dma passed in. 530 ****************************************************************************/ 531 static void 532 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 533 { 534 struct hdac_dma *dma; 535 536 if (error == 0) { 537 dma = (struct hdac_dma *)callback_arg; 538 dma->dma_paddr = segs[0].ds_addr; 539 } 540 } 541 542 543 /**************************************************************************** 544 * int hdac_dma_alloc 545 * 546 * This function allocate and setup a dma region (struct hdac_dma). 547 * It must be freed by a corresponding hdac_dma_free. 548 ****************************************************************************/ 549 static int 550 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 551 { 552 bus_size_t roundsz; 553 int result; 554 555 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 556 bzero(dma, sizeof(*dma)); 557 558 /* 559 * Create a DMA tag 560 */ 561 result = bus_dma_tag_create( 562 bus_get_dma_tag(sc->dev), /* parent */ 563 HDA_DMA_ALIGNMENT, /* alignment */ 564 0, /* boundary */ 565 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 566 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 567 BUS_SPACE_MAXADDR, /* highaddr */ 568 NULL, /* filtfunc */ 569 NULL, /* fistfuncarg */ 570 roundsz, /* maxsize */ 571 1, /* nsegments */ 572 roundsz, /* maxsegsz */ 573 0, /* flags */ 574 NULL, /* lockfunc */ 575 NULL, /* lockfuncarg */ 576 &dma->dma_tag); /* dmat */ 577 if (result != 0) { 578 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 579 __func__, result); 580 goto hdac_dma_alloc_fail; 581 } 582 583 /* 584 * Allocate DMA memory 585 */ 586 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 587 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 588 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 589 BUS_DMA_COHERENT), 590 &dma->dma_map); 591 if (result != 0) { 592 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 593 __func__, result); 594 goto hdac_dma_alloc_fail; 595 } 596 597 dma->dma_size = roundsz; 598 599 /* 600 * Map the memory 601 */ 602 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 603 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 604 if (result != 0 || dma->dma_paddr == 0) { 605 if (result == 0) 606 result = ENOMEM; 607 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 608 __func__, result); 609 goto hdac_dma_alloc_fail; 610 } 611 612 HDA_BOOTHVERBOSE( 613 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 614 __func__, (uintmax_t)size, (uintmax_t)roundsz); 615 ); 616 617 return (0); 618 619 hdac_dma_alloc_fail: 620 hdac_dma_free(sc, dma); 621 622 return (result); 623 } 624 625 626 /**************************************************************************** 627 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 628 * 629 * Free a struct dhac_dma that has been previously allocated via the 630 * hdac_dma_alloc function. 631 ****************************************************************************/ 632 static void 633 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 634 { 635 if (dma->dma_paddr != 0) { 636 /* Flush caches */ 637 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 638 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 639 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 640 dma->dma_paddr = 0; 641 } 642 if (dma->dma_vaddr != NULL) { 643 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 644 dma->dma_vaddr = NULL; 645 } 646 if (dma->dma_tag != NULL) { 647 bus_dma_tag_destroy(dma->dma_tag); 648 dma->dma_tag = NULL; 649 } 650 dma->dma_size = 0; 651 } 652 653 /**************************************************************************** 654 * int hdac_mem_alloc(struct hdac_softc *) 655 * 656 * Allocate all the bus resources necessary to speak with the physical 657 * controller. 658 ****************************************************************************/ 659 static int 660 hdac_mem_alloc(struct hdac_softc *sc) 661 { 662 struct hdac_mem *mem; 663 664 mem = &sc->mem; 665 mem->mem_rid = PCIR_BAR(0); 666 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 667 &mem->mem_rid, RF_ACTIVE); 668 if (mem->mem_res == NULL) { 669 device_printf(sc->dev, 670 "%s: Unable to allocate memory resource\n", __func__); 671 return (ENOMEM); 672 } 673 mem->mem_tag = rman_get_bustag(mem->mem_res); 674 mem->mem_handle = rman_get_bushandle(mem->mem_res); 675 676 return (0); 677 } 678 679 /**************************************************************************** 680 * void hdac_mem_free(struct hdac_softc *) 681 * 682 * Free up resources previously allocated by hdac_mem_alloc. 683 ****************************************************************************/ 684 static void 685 hdac_mem_free(struct hdac_softc *sc) 686 { 687 struct hdac_mem *mem; 688 689 mem = &sc->mem; 690 if (mem->mem_res != NULL) 691 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 692 mem->mem_res); 693 mem->mem_res = NULL; 694 } 695 696 /**************************************************************************** 697 * int hdac_irq_alloc(struct hdac_softc *) 698 * 699 * Allocate and setup the resources necessary for interrupt handling. 700 ****************************************************************************/ 701 static int 702 hdac_irq_alloc(struct hdac_softc *sc) 703 { 704 struct hdac_irq *irq; 705 int result; 706 707 irq = &sc->irq; 708 irq->irq_rid = 0x0; 709 710 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 711 (result = pci_msi_count(sc->dev)) == 1 && 712 pci_alloc_msi(sc->dev, &result) == 0) 713 irq->irq_rid = 0x1; 714 715 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 716 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 717 if (irq->irq_res == NULL) { 718 device_printf(sc->dev, "%s: Unable to allocate irq\n", 719 __func__); 720 goto hdac_irq_alloc_fail; 721 } 722 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 723 NULL, hdac_intr_handler, sc, &irq->irq_handle); 724 if (result != 0) { 725 device_printf(sc->dev, 726 "%s: Unable to setup interrupt handler (%d)\n", 727 __func__, result); 728 goto hdac_irq_alloc_fail; 729 } 730 731 return (0); 732 733 hdac_irq_alloc_fail: 734 hdac_irq_free(sc); 735 736 return (ENXIO); 737 } 738 739 /**************************************************************************** 740 * void hdac_irq_free(struct hdac_softc *) 741 * 742 * Free up resources previously allocated by hdac_irq_alloc. 743 ****************************************************************************/ 744 static void 745 hdac_irq_free(struct hdac_softc *sc) 746 { 747 struct hdac_irq *irq; 748 749 irq = &sc->irq; 750 if (irq->irq_res != NULL && irq->irq_handle != NULL) 751 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 752 if (irq->irq_res != NULL) 753 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 754 irq->irq_res); 755 if (irq->irq_rid == 0x1) 756 pci_release_msi(sc->dev); 757 irq->irq_handle = NULL; 758 irq->irq_res = NULL; 759 irq->irq_rid = 0x0; 760 } 761 762 /**************************************************************************** 763 * void hdac_corb_init(struct hdac_softc *) 764 * 765 * Initialize the corb registers for operations but do not start it up yet. 766 * The CORB engine must not be running when this function is called. 767 ****************************************************************************/ 768 static void 769 hdac_corb_init(struct hdac_softc *sc) 770 { 771 uint8_t corbsize; 772 uint64_t corbpaddr; 773 774 /* Setup the CORB size. */ 775 switch (sc->corb_size) { 776 case 256: 777 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 778 break; 779 case 16: 780 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 781 break; 782 case 2: 783 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 784 break; 785 default: 786 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 787 } 788 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 789 790 /* Setup the CORB Address in the hdac */ 791 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 792 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 793 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 794 795 /* Set the WP and RP */ 796 sc->corb_wp = 0; 797 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 798 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 799 /* 800 * The HDA specification indicates that the CORBRPRST bit will always 801 * read as zero. Unfortunately, it seems that at least the 82801G 802 * doesn't reset the bit to zero, which stalls the corb engine. 803 * manually reset the bit to zero before continuing. 804 */ 805 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 806 807 /* Enable CORB error reporting */ 808 #if 0 809 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 810 #endif 811 } 812 813 /**************************************************************************** 814 * void hdac_rirb_init(struct hdac_softc *) 815 * 816 * Initialize the rirb registers for operations but do not start it up yet. 817 * The RIRB engine must not be running when this function is called. 818 ****************************************************************************/ 819 static void 820 hdac_rirb_init(struct hdac_softc *sc) 821 { 822 uint8_t rirbsize; 823 uint64_t rirbpaddr; 824 825 /* Setup the RIRB size. */ 826 switch (sc->rirb_size) { 827 case 256: 828 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 829 break; 830 case 16: 831 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 832 break; 833 case 2: 834 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 835 break; 836 default: 837 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 838 } 839 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 840 841 /* Setup the RIRB Address in the hdac */ 842 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 843 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 844 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 845 846 /* Setup the WP and RP */ 847 sc->rirb_rp = 0; 848 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 849 850 /* Setup the interrupt threshold */ 851 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 852 853 /* Enable Overrun and response received reporting */ 854 #if 0 855 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 856 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 857 #else 858 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 859 #endif 860 861 /* 862 * Make sure that the Host CPU cache doesn't contain any dirty 863 * cache lines that falls in the rirb. If I understood correctly, it 864 * should be sufficient to do this only once as the rirb is purely 865 * read-only from now on. 866 */ 867 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 868 BUS_DMASYNC_PREREAD); 869 } 870 871 /**************************************************************************** 872 * void hdac_corb_start(hdac_softc *) 873 * 874 * Startup the corb DMA engine 875 ****************************************************************************/ 876 static void 877 hdac_corb_start(struct hdac_softc *sc) 878 { 879 uint32_t corbctl; 880 881 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 882 corbctl |= HDAC_CORBCTL_CORBRUN; 883 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 884 } 885 886 /**************************************************************************** 887 * void hdac_rirb_start(hdac_softc *) 888 * 889 * Startup the rirb DMA engine 890 ****************************************************************************/ 891 static void 892 hdac_rirb_start(struct hdac_softc *sc) 893 { 894 uint32_t rirbctl; 895 896 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 897 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 898 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 899 } 900 901 static int 902 hdac_rirb_flush(struct hdac_softc *sc) 903 { 904 struct hdac_rirb *rirb_base, *rirb; 905 nid_t cad; 906 uint32_t resp, resp_ex; 907 uint8_t rirbwp; 908 int ret; 909 910 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 911 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 912 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 913 BUS_DMASYNC_POSTREAD); 914 915 ret = 0; 916 while (sc->rirb_rp != rirbwp) { 917 sc->rirb_rp++; 918 sc->rirb_rp %= sc->rirb_size; 919 rirb = &rirb_base[sc->rirb_rp]; 920 resp = le32toh(rirb->response); 921 resp_ex = le32toh(rirb->response_ex); 922 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 923 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 924 sc->unsolq[sc->unsolq_wp++] = resp; 925 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 926 sc->unsolq[sc->unsolq_wp++] = cad; 927 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 928 } else if (sc->codecs[cad].pending <= 0) { 929 device_printf(sc->dev, "Unexpected unsolicited " 930 "response from address %d: %08x\n", cad, resp); 931 } else { 932 sc->codecs[cad].response = resp; 933 sc->codecs[cad].pending--; 934 } 935 ret++; 936 } 937 938 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 939 BUS_DMASYNC_PREREAD); 940 return (ret); 941 } 942 943 static int 944 hdac_unsolq_flush(struct hdac_softc *sc) 945 { 946 device_t child; 947 nid_t cad; 948 uint32_t resp; 949 int ret = 0; 950 951 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 952 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 953 while (sc->unsolq_rp != sc->unsolq_wp) { 954 resp = sc->unsolq[sc->unsolq_rp++]; 955 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 956 cad = sc->unsolq[sc->unsolq_rp++]; 957 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 958 if ((child = sc->codecs[cad].dev) != NULL) 959 HDAC_UNSOL_INTR(child, resp); 960 ret++; 961 } 962 sc->unsolq_st = HDAC_UNSOLQ_READY; 963 } 964 965 return (ret); 966 } 967 968 /**************************************************************************** 969 * uint32_t hdac_command_sendone_internal 970 * 971 * Wrapper function that sends only one command to a given codec 972 ****************************************************************************/ 973 static uint32_t 974 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 975 { 976 int timeout; 977 uint32_t *corb; 978 979 if (!hdac_lockowned(sc)) 980 device_printf(sc->dev, "WARNING!!!! mtx not owned!!!!\n"); 981 verb &= ~HDA_CMD_CAD_MASK; 982 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 983 sc->codecs[cad].response = HDA_INVALID; 984 985 sc->codecs[cad].pending++; 986 sc->corb_wp++; 987 sc->corb_wp %= sc->corb_size; 988 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 989 bus_dmamap_sync(sc->corb_dma.dma_tag, 990 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 991 corb[sc->corb_wp] = htole32(verb); 992 bus_dmamap_sync(sc->corb_dma.dma_tag, 993 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 994 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 995 996 timeout = 10000; 997 do { 998 if (hdac_rirb_flush(sc) == 0) 999 DELAY(10); 1000 } while (sc->codecs[cad].pending != 0 && --timeout); 1001 1002 if (sc->codecs[cad].pending != 0) { 1003 device_printf(sc->dev, "Command timeout on address %d\n", cad); 1004 sc->codecs[cad].pending = 0; 1005 } 1006 1007 if (sc->unsolq_rp != sc->unsolq_wp) 1008 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1009 return (sc->codecs[cad].response); 1010 } 1011 1012 /**************************************************************************** 1013 * Device Methods 1014 ****************************************************************************/ 1015 1016 /**************************************************************************** 1017 * int hdac_probe(device_t) 1018 * 1019 * Probe for the presence of an hdac. If none is found, check for a generic 1020 * match using the subclass of the device. 1021 ****************************************************************************/ 1022 static int 1023 hdac_probe(device_t dev) 1024 { 1025 int i, result; 1026 uint32_t model; 1027 uint16_t class, subclass; 1028 char desc[64]; 1029 1030 model = (uint32_t)pci_get_device(dev) << 16; 1031 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1032 class = pci_get_class(dev); 1033 subclass = pci_get_subclass(dev); 1034 1035 bzero(desc, sizeof(desc)); 1036 result = ENXIO; 1037 for (i = 0; i < nitems(hdac_devices); i++) { 1038 if (hdac_devices[i].model == model) { 1039 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1040 result = BUS_PROBE_DEFAULT; 1041 break; 1042 } 1043 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1044 class == PCIC_MULTIMEDIA && 1045 subclass == PCIS_MULTIMEDIA_HDA) { 1046 snprintf(desc, sizeof(desc), 1047 "%s (0x%04x)", 1048 hdac_devices[i].desc, pci_get_device(dev)); 1049 result = BUS_PROBE_GENERIC; 1050 break; 1051 } 1052 } 1053 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1054 subclass == PCIS_MULTIMEDIA_HDA) { 1055 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1056 result = BUS_PROBE_GENERIC; 1057 } 1058 if (result != ENXIO) { 1059 strlcat(desc, " HDA Controller", sizeof(desc)); 1060 device_set_desc_copy(dev, desc); 1061 } 1062 1063 return (result); 1064 } 1065 1066 static void 1067 hdac_unsolq_task(void *context, int pending) 1068 { 1069 struct hdac_softc *sc; 1070 1071 sc = (struct hdac_softc *)context; 1072 1073 hdac_lock(sc); 1074 hdac_unsolq_flush(sc); 1075 hdac_unlock(sc); 1076 } 1077 1078 /**************************************************************************** 1079 * int hdac_attach(device_t) 1080 * 1081 * Attach the device into the kernel. Interrupts usually won't be enabled 1082 * when this function is called. Setup everything that doesn't require 1083 * interrupts and defer probing of codecs until interrupts are enabled. 1084 ****************************************************************************/ 1085 static int 1086 hdac_attach(device_t dev) 1087 { 1088 struct hdac_softc *sc; 1089 int result; 1090 int i, devid = -1; 1091 uint32_t model; 1092 uint16_t class, subclass; 1093 uint16_t vendor; 1094 uint8_t v; 1095 1096 sc = device_get_softc(dev); 1097 HDA_BOOTVERBOSE( 1098 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1099 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1100 device_printf(dev, "HDA Driver Revision: %s\n", 1101 HDA_DRV_TEST_REV); 1102 ); 1103 1104 model = (uint32_t)pci_get_device(dev) << 16; 1105 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1106 class = pci_get_class(dev); 1107 subclass = pci_get_subclass(dev); 1108 1109 for (i = 0; i < nitems(hdac_devices); i++) { 1110 if (hdac_devices[i].model == model) { 1111 devid = i; 1112 break; 1113 } 1114 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1115 class == PCIC_MULTIMEDIA && 1116 subclass == PCIS_MULTIMEDIA_HDA) { 1117 devid = i; 1118 break; 1119 } 1120 } 1121 1122 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); 1123 sc->dev = dev; 1124 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1125 callout_init(&sc->poll_callout, 1); 1126 for (i = 0; i < HDAC_CODEC_MAX; i++) 1127 sc->codecs[i].dev = NULL; 1128 if (devid >= 0) { 1129 sc->quirks_on = hdac_devices[devid].quirks_on; 1130 sc->quirks_off = hdac_devices[devid].quirks_off; 1131 } else { 1132 sc->quirks_on = 0; 1133 sc->quirks_off = 0; 1134 } 1135 if (resource_int_value(device_get_name(dev), 1136 device_get_unit(dev), "msi", &i) == 0) { 1137 if (i == 0) 1138 sc->quirks_off |= HDAC_QUIRK_MSI; 1139 else { 1140 sc->quirks_on |= HDAC_QUIRK_MSI; 1141 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1142 } 1143 } 1144 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1145 HDA_BOOTVERBOSE( 1146 device_printf(sc->dev, 1147 "Config options: on=0x%08x off=0x%08x\n", 1148 sc->quirks_on, sc->quirks_off); 1149 ); 1150 sc->poll_ival = hz; 1151 if (resource_int_value(device_get_name(dev), 1152 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1153 sc->polling = 1; 1154 else 1155 sc->polling = 0; 1156 1157 pci_enable_busmaster(dev); 1158 1159 vendor = pci_get_vendor(dev); 1160 if (vendor == INTEL_VENDORID) { 1161 /* TCSEL -> TC0 */ 1162 v = pci_read_config(dev, 0x44, 1); 1163 pci_write_config(dev, 0x44, v & 0xf8, 1); 1164 HDA_BOOTHVERBOSE( 1165 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1166 pci_read_config(dev, 0x44, 1)); 1167 ); 1168 } 1169 1170 #if defined(__i386__) || defined(__amd64__) 1171 sc->flags |= HDAC_F_DMA_NOCACHE; 1172 1173 if (resource_int_value(device_get_name(dev), 1174 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1175 #else 1176 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1177 #endif 1178 /* 1179 * Try to enable PCIe snoop to avoid messing around with 1180 * uncacheable DMA attribute. Since PCIe snoop register 1181 * config is pretty much vendor specific, there are no 1182 * general solutions on how to enable it, forcing us (even 1183 * Microsoft) to enable uncacheable or write combined DMA 1184 * by default. 1185 * 1186 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1187 */ 1188 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1189 if (hdac_pcie_snoop[i].vendor != vendor) 1190 continue; 1191 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1192 if (hdac_pcie_snoop[i].reg == 0x00) 1193 break; 1194 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1195 if ((v & hdac_pcie_snoop[i].enable) == 1196 hdac_pcie_snoop[i].enable) 1197 break; 1198 v &= hdac_pcie_snoop[i].mask; 1199 v |= hdac_pcie_snoop[i].enable; 1200 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1201 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1202 if ((v & hdac_pcie_snoop[i].enable) != 1203 hdac_pcie_snoop[i].enable) { 1204 HDA_BOOTVERBOSE( 1205 device_printf(dev, 1206 "WARNING: Failed to enable PCIe " 1207 "snoop!\n"); 1208 ); 1209 #if defined(__i386__) || defined(__amd64__) 1210 sc->flags |= HDAC_F_DMA_NOCACHE; 1211 #endif 1212 } 1213 break; 1214 } 1215 #if defined(__i386__) || defined(__amd64__) 1216 } 1217 #endif 1218 1219 HDA_BOOTHVERBOSE( 1220 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1221 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1222 "Uncacheable" : "PCIe snoop", vendor); 1223 ); 1224 1225 /* Allocate resources */ 1226 result = hdac_mem_alloc(sc); 1227 if (result != 0) 1228 goto hdac_attach_fail; 1229 result = hdac_irq_alloc(sc); 1230 if (result != 0) 1231 goto hdac_attach_fail; 1232 1233 /* Get Capabilities */ 1234 result = hdac_get_capabilities(sc); 1235 if (result != 0) 1236 goto hdac_attach_fail; 1237 1238 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1239 result = hdac_dma_alloc(sc, &sc->corb_dma, 1240 sc->corb_size * sizeof(uint32_t)); 1241 if (result != 0) 1242 goto hdac_attach_fail; 1243 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1244 sc->rirb_size * sizeof(struct hdac_rirb)); 1245 if (result != 0) 1246 goto hdac_attach_fail; 1247 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1248 M_HDAC, M_ZERO | M_WAITOK); 1249 for (i = 0; i < sc->num_ss; i++) { 1250 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1251 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1252 if (result != 0) 1253 goto hdac_attach_fail; 1254 } 1255 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1256 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1257 HDA_BOOTVERBOSE( 1258 device_printf(dev, "Failed to " 1259 "allocate DMA pos buffer " 1260 "(non-fatal)\n"); 1261 ); 1262 } else { 1263 uint64_t addr = sc->pos_dma.dma_paddr; 1264 1265 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1266 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1267 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1268 HDAC_DPLBASE_DPLBASE_DMAPBE); 1269 } 1270 } 1271 1272 result = bus_dma_tag_create( 1273 bus_get_dma_tag(sc->dev), /* parent */ 1274 HDA_DMA_ALIGNMENT, /* alignment */ 1275 0, /* boundary */ 1276 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1277 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1278 BUS_SPACE_MAXADDR, /* highaddr */ 1279 NULL, /* filtfunc */ 1280 NULL, /* fistfuncarg */ 1281 HDA_BUFSZ_MAX, /* maxsize */ 1282 1, /* nsegments */ 1283 HDA_BUFSZ_MAX, /* maxsegsz */ 1284 0, /* flags */ 1285 NULL, /* lockfunc */ 1286 NULL, /* lockfuncarg */ 1287 &sc->chan_dmat); /* dmat */ 1288 if (result != 0) { 1289 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1290 __func__, result); 1291 goto hdac_attach_fail; 1292 } 1293 1294 /* Quiesce everything */ 1295 HDA_BOOTHVERBOSE( 1296 device_printf(dev, "Reset controller...\n"); 1297 ); 1298 hdac_reset(sc, 1); 1299 1300 /* Initialize the CORB and RIRB */ 1301 hdac_corb_init(sc); 1302 hdac_rirb_init(sc); 1303 1304 /* Defer remaining of initialization until interrupts are enabled */ 1305 sc->intrhook.ich_func = hdac_attach2; 1306 sc->intrhook.ich_arg = (void *)sc; 1307 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1308 sc->intrhook.ich_func = NULL; 1309 hdac_attach2((void *)sc); 1310 } 1311 1312 return (0); 1313 1314 hdac_attach_fail: 1315 hdac_irq_free(sc); 1316 if (sc->streams != NULL) 1317 for (i = 0; i < sc->num_ss; i++) 1318 hdac_dma_free(sc, &sc->streams[i].bdl); 1319 free(sc->streams, M_HDAC); 1320 hdac_dma_free(sc, &sc->rirb_dma); 1321 hdac_dma_free(sc, &sc->corb_dma); 1322 hdac_mem_free(sc); 1323 snd_mtxfree(sc->lock); 1324 1325 return (ENXIO); 1326 } 1327 1328 static int 1329 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1330 { 1331 struct hdac_softc *sc; 1332 device_t *devlist; 1333 device_t dev; 1334 int devcount, i, err, val; 1335 1336 dev = oidp->oid_arg1; 1337 sc = device_get_softc(dev); 1338 if (sc == NULL) 1339 return (EINVAL); 1340 val = 0; 1341 err = sysctl_handle_int(oidp, &val, 0, req); 1342 if (err != 0 || req->newptr == NULL || val == 0) 1343 return (err); 1344 1345 /* XXX: Temporary. For debugging. */ 1346 if (val == 100) { 1347 hdac_suspend(dev); 1348 return (0); 1349 } else if (val == 101) { 1350 hdac_resume(dev); 1351 return (0); 1352 } 1353 1354 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) 1355 return (err); 1356 hdac_lock(sc); 1357 for (i = 0; i < devcount; i++) 1358 HDAC_PINDUMP(devlist[i]); 1359 hdac_unlock(sc); 1360 free(devlist, M_TEMP); 1361 return (0); 1362 } 1363 1364 static int 1365 hdac_mdata_rate(uint16_t fmt) 1366 { 1367 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1368 int rate, bits; 1369 1370 if (fmt & (1 << 14)) 1371 rate = 44100; 1372 else 1373 rate = 48000; 1374 rate *= ((fmt >> 11) & 0x07) + 1; 1375 rate /= ((fmt >> 8) & 0x07) + 1; 1376 bits = mbits[(fmt >> 4) & 0x03]; 1377 bits *= (fmt & 0x0f) + 1; 1378 return (rate * bits); 1379 } 1380 1381 static int 1382 hdac_bdata_rate(uint16_t fmt, int output) 1383 { 1384 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1385 int rate, bits; 1386 1387 rate = 48000; 1388 rate *= ((fmt >> 11) & 0x07) + 1; 1389 bits = bbits[(fmt >> 4) & 0x03]; 1390 bits *= (fmt & 0x0f) + 1; 1391 if (!output) 1392 bits = ((bits + 7) & ~0x07) + 10; 1393 return (rate * bits); 1394 } 1395 1396 static void 1397 hdac_poll_reinit(struct hdac_softc *sc) 1398 { 1399 int i, pollticks, min = 1000000; 1400 struct hdac_stream *s; 1401 1402 if (sc->polling == 0) 1403 return; 1404 if (sc->unsol_registered > 0) 1405 min = hz / 2; 1406 for (i = 0; i < sc->num_ss; i++) { 1407 s = &sc->streams[i]; 1408 if (s->running == 0) 1409 continue; 1410 pollticks = ((uint64_t)hz * s->blksz) / 1411 (hdac_mdata_rate(s->format) / 8); 1412 pollticks >>= 1; 1413 if (pollticks > hz) 1414 pollticks = hz; 1415 if (pollticks < 1) { 1416 HDA_BOOTVERBOSE( 1417 device_printf(sc->dev, 1418 "poll interval < 1 tick !\n"); 1419 ); 1420 pollticks = 1; 1421 } 1422 if (min > pollticks) 1423 min = pollticks; 1424 } 1425 HDA_BOOTVERBOSE( 1426 device_printf(sc->dev, 1427 "poll interval %d -> %d ticks\n", 1428 sc->poll_ival, min); 1429 ); 1430 sc->poll_ival = min; 1431 if (min == 1000000) 1432 callout_stop(&sc->poll_callout); 1433 else 1434 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1435 } 1436 1437 static int 1438 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1439 { 1440 struct hdac_softc *sc; 1441 device_t dev; 1442 uint32_t ctl; 1443 int err, val; 1444 1445 dev = oidp->oid_arg1; 1446 sc = device_get_softc(dev); 1447 if (sc == NULL) 1448 return (EINVAL); 1449 hdac_lock(sc); 1450 val = sc->polling; 1451 hdac_unlock(sc); 1452 err = sysctl_handle_int(oidp, &val, 0, req); 1453 1454 if (err != 0 || req->newptr == NULL) 1455 return (err); 1456 if (val < 0 || val > 1) 1457 return (EINVAL); 1458 1459 hdac_lock(sc); 1460 if (val != sc->polling) { 1461 if (val == 0) { 1462 callout_stop(&sc->poll_callout); 1463 hdac_unlock(sc); 1464 callout_drain(&sc->poll_callout); 1465 hdac_lock(sc); 1466 sc->polling = 0; 1467 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1468 ctl |= HDAC_INTCTL_GIE; 1469 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1470 } else { 1471 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1472 ctl &= ~HDAC_INTCTL_GIE; 1473 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1474 sc->polling = 1; 1475 hdac_poll_reinit(sc); 1476 } 1477 } 1478 hdac_unlock(sc); 1479 1480 return (err); 1481 } 1482 1483 static void 1484 hdac_attach2(void *arg) 1485 { 1486 struct hdac_softc *sc; 1487 device_t child; 1488 uint32_t vendorid, revisionid; 1489 int i; 1490 uint16_t statests; 1491 1492 sc = (struct hdac_softc *)arg; 1493 1494 hdac_lock(sc); 1495 1496 /* Remove ourselves from the config hooks */ 1497 if (sc->intrhook.ich_func != NULL) { 1498 config_intrhook_disestablish(&sc->intrhook); 1499 sc->intrhook.ich_func = NULL; 1500 } 1501 1502 HDA_BOOTHVERBOSE( 1503 device_printf(sc->dev, "Starting CORB Engine...\n"); 1504 ); 1505 hdac_corb_start(sc); 1506 HDA_BOOTHVERBOSE( 1507 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1508 ); 1509 hdac_rirb_start(sc); 1510 HDA_BOOTHVERBOSE( 1511 device_printf(sc->dev, 1512 "Enabling controller interrupt...\n"); 1513 ); 1514 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1515 HDAC_GCTL_UNSOL); 1516 if (sc->polling == 0) { 1517 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1518 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1519 } 1520 DELAY(1000); 1521 1522 HDA_BOOTHVERBOSE( 1523 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1524 ); 1525 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1526 hdac_unlock(sc); 1527 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1528 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1529 HDA_BOOTHVERBOSE( 1530 device_printf(sc->dev, 1531 "Found CODEC at address %d\n", i); 1532 ); 1533 hdac_lock(sc); 1534 vendorid = hdac_send_command(sc, i, 1535 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1536 revisionid = hdac_send_command(sc, i, 1537 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1538 hdac_unlock(sc); 1539 if (vendorid == HDA_INVALID && 1540 revisionid == HDA_INVALID) { 1541 device_printf(sc->dev, 1542 "CODEC is not responding!\n"); 1543 continue; 1544 } 1545 sc->codecs[i].vendor_id = 1546 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1547 sc->codecs[i].device_id = 1548 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1549 sc->codecs[i].revision_id = 1550 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1551 sc->codecs[i].stepping_id = 1552 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1553 child = device_add_child(sc->dev, "hdacc", -1); 1554 if (child == NULL) { 1555 device_printf(sc->dev, 1556 "Failed to add CODEC device\n"); 1557 continue; 1558 } 1559 device_set_ivars(child, (void *)(intptr_t)i); 1560 sc->codecs[i].dev = child; 1561 } 1562 } 1563 bus_generic_attach(sc->dev); 1564 1565 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1566 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1567 "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev, sizeof(sc->dev), 1568 sysctl_hdac_pindump, "I", "Dump pin states/data"); 1569 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1570 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1571 "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev, sizeof(sc->dev), 1572 sysctl_hdac_polling, "I", "Enable polling mode"); 1573 } 1574 1575 /**************************************************************************** 1576 * int hdac_suspend(device_t) 1577 * 1578 * Suspend and power down HDA bus and codecs. 1579 ****************************************************************************/ 1580 static int 1581 hdac_suspend(device_t dev) 1582 { 1583 struct hdac_softc *sc = device_get_softc(dev); 1584 1585 HDA_BOOTHVERBOSE( 1586 device_printf(dev, "Suspend...\n"); 1587 ); 1588 bus_generic_suspend(dev); 1589 1590 hdac_lock(sc); 1591 HDA_BOOTHVERBOSE( 1592 device_printf(dev, "Reset controller...\n"); 1593 ); 1594 callout_stop(&sc->poll_callout); 1595 hdac_reset(sc, 0); 1596 hdac_unlock(sc); 1597 callout_drain(&sc->poll_callout); 1598 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1599 HDA_BOOTHVERBOSE( 1600 device_printf(dev, "Suspend done\n"); 1601 ); 1602 return (0); 1603 } 1604 1605 /**************************************************************************** 1606 * int hdac_resume(device_t) 1607 * 1608 * Powerup and restore HDA bus and codecs state. 1609 ****************************************************************************/ 1610 static int 1611 hdac_resume(device_t dev) 1612 { 1613 struct hdac_softc *sc = device_get_softc(dev); 1614 int error; 1615 1616 HDA_BOOTHVERBOSE( 1617 device_printf(dev, "Resume...\n"); 1618 ); 1619 hdac_lock(sc); 1620 1621 /* Quiesce everything */ 1622 HDA_BOOTHVERBOSE( 1623 device_printf(dev, "Reset controller...\n"); 1624 ); 1625 hdac_reset(sc, 1); 1626 1627 /* Initialize the CORB and RIRB */ 1628 hdac_corb_init(sc); 1629 hdac_rirb_init(sc); 1630 1631 HDA_BOOTHVERBOSE( 1632 device_printf(dev, "Starting CORB Engine...\n"); 1633 ); 1634 hdac_corb_start(sc); 1635 HDA_BOOTHVERBOSE( 1636 device_printf(dev, "Starting RIRB Engine...\n"); 1637 ); 1638 hdac_rirb_start(sc); 1639 HDA_BOOTHVERBOSE( 1640 device_printf(dev, "Enabling controller interrupt...\n"); 1641 ); 1642 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1643 HDAC_GCTL_UNSOL); 1644 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1645 DELAY(1000); 1646 hdac_poll_reinit(sc); 1647 hdac_unlock(sc); 1648 1649 error = bus_generic_resume(dev); 1650 HDA_BOOTHVERBOSE( 1651 device_printf(dev, "Resume done\n"); 1652 ); 1653 return (error); 1654 } 1655 1656 /**************************************************************************** 1657 * int hdac_detach(device_t) 1658 * 1659 * Detach and free up resources utilized by the hdac device. 1660 ****************************************************************************/ 1661 static int 1662 hdac_detach(device_t dev) 1663 { 1664 struct hdac_softc *sc = device_get_softc(dev); 1665 device_t *devlist; 1666 int cad, i, devcount, error; 1667 1668 if ((error = device_get_children(dev, &devlist, &devcount)) != 0) 1669 return (error); 1670 for (i = 0; i < devcount; i++) { 1671 cad = (intptr_t)device_get_ivars(devlist[i]); 1672 if ((error = device_delete_child(dev, devlist[i])) != 0) { 1673 free(devlist, M_TEMP); 1674 return (error); 1675 } 1676 sc->codecs[cad].dev = NULL; 1677 } 1678 free(devlist, M_TEMP); 1679 1680 hdac_lock(sc); 1681 hdac_reset(sc, 0); 1682 hdac_unlock(sc); 1683 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1684 hdac_irq_free(sc); 1685 1686 for (i = 0; i < sc->num_ss; i++) 1687 hdac_dma_free(sc, &sc->streams[i].bdl); 1688 free(sc->streams, M_HDAC); 1689 hdac_dma_free(sc, &sc->pos_dma); 1690 hdac_dma_free(sc, &sc->rirb_dma); 1691 hdac_dma_free(sc, &sc->corb_dma); 1692 if (sc->chan_dmat != NULL) { 1693 bus_dma_tag_destroy(sc->chan_dmat); 1694 sc->chan_dmat = NULL; 1695 } 1696 hdac_mem_free(sc); 1697 snd_mtxfree(sc->lock); 1698 return (0); 1699 } 1700 1701 static bus_dma_tag_t 1702 hdac_get_dma_tag(device_t dev, device_t child) 1703 { 1704 struct hdac_softc *sc = device_get_softc(dev); 1705 1706 return (sc->chan_dmat); 1707 } 1708 1709 static int 1710 hdac_print_child(device_t dev, device_t child) 1711 { 1712 int retval; 1713 1714 retval = bus_print_child_header(dev, child); 1715 retval += printf(" at cad %d", 1716 (int)(intptr_t)device_get_ivars(child)); 1717 retval += bus_print_child_footer(dev, child); 1718 1719 return (retval); 1720 } 1721 1722 static int 1723 hdac_child_location_str(device_t dev, device_t child, char *buf, 1724 size_t buflen) 1725 { 1726 1727 snprintf(buf, buflen, "cad=%d", 1728 (int)(intptr_t)device_get_ivars(child)); 1729 return (0); 1730 } 1731 1732 static int 1733 hdac_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, 1734 size_t buflen) 1735 { 1736 struct hdac_softc *sc = device_get_softc(dev); 1737 nid_t cad = (uintptr_t)device_get_ivars(child); 1738 1739 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x revision=0x%02x " 1740 "stepping=0x%02x", 1741 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1742 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1743 return (0); 1744 } 1745 1746 static int 1747 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1748 { 1749 struct hdac_softc *sc = device_get_softc(dev); 1750 nid_t cad = (uintptr_t)device_get_ivars(child); 1751 1752 switch (which) { 1753 case HDA_IVAR_CODEC_ID: 1754 *result = cad; 1755 break; 1756 case HDA_IVAR_VENDOR_ID: 1757 *result = sc->codecs[cad].vendor_id; 1758 break; 1759 case HDA_IVAR_DEVICE_ID: 1760 *result = sc->codecs[cad].device_id; 1761 break; 1762 case HDA_IVAR_REVISION_ID: 1763 *result = sc->codecs[cad].revision_id; 1764 break; 1765 case HDA_IVAR_STEPPING_ID: 1766 *result = sc->codecs[cad].stepping_id; 1767 break; 1768 case HDA_IVAR_SUBVENDOR_ID: 1769 *result = pci_get_subvendor(dev); 1770 break; 1771 case HDA_IVAR_SUBDEVICE_ID: 1772 *result = pci_get_subdevice(dev); 1773 break; 1774 case HDA_IVAR_DMA_NOCACHE: 1775 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1776 break; 1777 case HDA_IVAR_STRIPES_MASK: 1778 *result = (1 << (1 << sc->num_sdo)) - 1; 1779 break; 1780 default: 1781 return (ENOENT); 1782 } 1783 return (0); 1784 } 1785 1786 static struct mtx * 1787 hdac_get_mtx(device_t dev, device_t child) 1788 { 1789 struct hdac_softc *sc = device_get_softc(dev); 1790 1791 return (sc->lock); 1792 } 1793 1794 static uint32_t 1795 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1796 { 1797 1798 return (hdac_send_command(device_get_softc(dev), 1799 (intptr_t)device_get_ivars(child), verb)); 1800 } 1801 1802 static int 1803 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1804 { 1805 int i, ss; 1806 1807 ss = -1; 1808 /* Allocate ISS/OSS first. */ 1809 if (dir == 0) { 1810 for (i = 0; i < sc->num_iss; i++) { 1811 if (sc->streams[i].stream == stream) { 1812 ss = i; 1813 break; 1814 } 1815 } 1816 } else { 1817 for (i = 0; i < sc->num_oss; i++) { 1818 if (sc->streams[i + sc->num_iss].stream == stream) { 1819 ss = i + sc->num_iss; 1820 break; 1821 } 1822 } 1823 } 1824 /* Fallback to BSS. */ 1825 if (ss == -1) { 1826 for (i = 0; i < sc->num_bss; i++) { 1827 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1828 == stream) { 1829 ss = i + sc->num_iss + sc->num_oss; 1830 break; 1831 } 1832 } 1833 } 1834 return (ss); 1835 } 1836 1837 static int 1838 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1839 uint32_t **dmapos) 1840 { 1841 struct hdac_softc *sc = device_get_softc(dev); 1842 nid_t cad = (uintptr_t)device_get_ivars(child); 1843 int stream, ss, bw, maxbw, prevbw; 1844 1845 /* Look for empty stream. */ 1846 ss = hdac_find_stream(sc, dir, 0); 1847 1848 /* Return if found nothing. */ 1849 if (ss < 0) 1850 return (0); 1851 1852 /* Check bus bandwidth. */ 1853 bw = hdac_bdata_rate(format, dir); 1854 if (dir == 1) { 1855 bw *= 1 << (sc->num_sdo - stripe); 1856 prevbw = sc->sdo_bw_used; 1857 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1858 } else { 1859 prevbw = sc->codecs[cad].sdi_bw_used; 1860 maxbw = 48000 * 464; 1861 } 1862 HDA_BOOTHVERBOSE( 1863 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1864 (bw + prevbw) / 1000, maxbw / 1000, 1865 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1866 ); 1867 if (bw + prevbw > maxbw) 1868 return (0); 1869 if (dir == 1) 1870 sc->sdo_bw_used += bw; 1871 else 1872 sc->codecs[cad].sdi_bw_used += bw; 1873 1874 /* Allocate stream number */ 1875 if (ss >= sc->num_iss + sc->num_oss) 1876 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1877 else if (ss >= sc->num_iss) 1878 stream = ss - sc->num_iss + 1; 1879 else 1880 stream = ss + 1; 1881 1882 sc->streams[ss].dev = child; 1883 sc->streams[ss].dir = dir; 1884 sc->streams[ss].stream = stream; 1885 sc->streams[ss].bw = bw; 1886 sc->streams[ss].format = format; 1887 sc->streams[ss].stripe = stripe; 1888 if (dmapos != NULL) { 1889 if (sc->pos_dma.dma_vaddr != NULL) 1890 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1891 else 1892 *dmapos = NULL; 1893 } 1894 return (stream); 1895 } 1896 1897 static void 1898 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 1899 { 1900 struct hdac_softc *sc = device_get_softc(dev); 1901 nid_t cad = (uintptr_t)device_get_ivars(child); 1902 int ss; 1903 1904 ss = hdac_find_stream(sc, dir, stream); 1905 KASSERT(ss >= 0, 1906 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 1907 if (dir == 1) 1908 sc->sdo_bw_used -= sc->streams[ss].bw; 1909 else 1910 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 1911 sc->streams[ss].stream = 0; 1912 sc->streams[ss].dev = NULL; 1913 } 1914 1915 static int 1916 hdac_stream_start(device_t dev, device_t child, 1917 int dir, int stream, bus_addr_t buf, int blksz, int blkcnt) 1918 { 1919 struct hdac_softc *sc = device_get_softc(dev); 1920 struct hdac_bdle *bdle; 1921 uint64_t addr; 1922 int i, ss, off; 1923 uint32_t ctl; 1924 1925 ss = hdac_find_stream(sc, dir, stream); 1926 KASSERT(ss >= 0, 1927 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 1928 1929 addr = (uint64_t)buf; 1930 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 1931 for (i = 0; i < blkcnt; i++, bdle++) { 1932 bdle->addrl = htole32((uint32_t)addr); 1933 bdle->addrh = htole32((uint32_t)(addr >> 32)); 1934 bdle->len = htole32(blksz); 1935 bdle->ioc = htole32(1); 1936 addr += blksz; 1937 } 1938 1939 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 1940 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 1941 1942 off = ss << 5; 1943 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 1944 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 1945 addr = sc->streams[ss].bdl.dma_paddr; 1946 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 1947 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 1948 1949 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 1950 if (dir) 1951 ctl |= HDAC_SDCTL2_DIR; 1952 else 1953 ctl &= ~HDAC_SDCTL2_DIR; 1954 ctl &= ~HDAC_SDCTL2_STRM_MASK; 1955 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 1956 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 1957 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 1958 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 1959 1960 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 1961 1962 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1963 ctl |= 1 << ss; 1964 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1965 1966 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 1967 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 1968 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 1969 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 1970 HDAC_SDCTL_RUN; 1971 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 1972 1973 sc->streams[ss].blksz = blksz; 1974 sc->streams[ss].running = 1; 1975 hdac_poll_reinit(sc); 1976 return (0); 1977 } 1978 1979 static void 1980 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 1981 { 1982 struct hdac_softc *sc = device_get_softc(dev); 1983 int ss, off; 1984 uint32_t ctl; 1985 1986 ss = hdac_find_stream(sc, dir, stream); 1987 KASSERT(ss >= 0, 1988 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 1989 1990 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 1991 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 1992 1993 off = ss << 5; 1994 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 1995 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 1996 HDAC_SDCTL_RUN); 1997 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 1998 1999 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2000 ctl &= ~(1 << ss); 2001 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2002 2003 sc->streams[ss].running = 0; 2004 hdac_poll_reinit(sc); 2005 } 2006 2007 static void 2008 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2009 { 2010 struct hdac_softc *sc = device_get_softc(dev); 2011 int timeout = 1000; 2012 int to = timeout; 2013 int ss, off; 2014 uint32_t ctl; 2015 2016 ss = hdac_find_stream(sc, dir, stream); 2017 KASSERT(ss >= 0, 2018 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2019 2020 off = ss << 5; 2021 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2022 ctl |= HDAC_SDCTL_SRST; 2023 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2024 do { 2025 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2026 if (ctl & HDAC_SDCTL_SRST) 2027 break; 2028 DELAY(10); 2029 } while (--to); 2030 if (!(ctl & HDAC_SDCTL_SRST)) 2031 device_printf(dev, "Reset setting timeout\n"); 2032 ctl &= ~HDAC_SDCTL_SRST; 2033 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2034 to = timeout; 2035 do { 2036 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2037 if (!(ctl & HDAC_SDCTL_SRST)) 2038 break; 2039 DELAY(10); 2040 } while (--to); 2041 if (ctl & HDAC_SDCTL_SRST) 2042 device_printf(dev, "Reset timeout!\n"); 2043 } 2044 2045 static uint32_t 2046 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2047 { 2048 struct hdac_softc *sc = device_get_softc(dev); 2049 int ss, off; 2050 2051 ss = hdac_find_stream(sc, dir, stream); 2052 KASSERT(ss >= 0, 2053 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2054 2055 off = ss << 5; 2056 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2057 } 2058 2059 static int 2060 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2061 { 2062 struct hdac_softc *sc = device_get_softc(dev); 2063 2064 sc->unsol_registered++; 2065 hdac_poll_reinit(sc); 2066 return (tag); 2067 } 2068 2069 static void 2070 hdac_unsol_free(device_t dev, device_t child, int tag) 2071 { 2072 struct hdac_softc *sc = device_get_softc(dev); 2073 2074 sc->unsol_registered--; 2075 hdac_poll_reinit(sc); 2076 } 2077 2078 static device_method_t hdac_methods[] = { 2079 /* device interface */ 2080 DEVMETHOD(device_probe, hdac_probe), 2081 DEVMETHOD(device_attach, hdac_attach), 2082 DEVMETHOD(device_detach, hdac_detach), 2083 DEVMETHOD(device_suspend, hdac_suspend), 2084 DEVMETHOD(device_resume, hdac_resume), 2085 /* Bus interface */ 2086 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2087 DEVMETHOD(bus_print_child, hdac_print_child), 2088 DEVMETHOD(bus_child_location_str, hdac_child_location_str), 2089 DEVMETHOD(bus_child_pnpinfo_str, hdac_child_pnpinfo_str_method), 2090 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2091 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2092 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2093 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2094 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2095 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2096 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2097 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2098 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2099 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2100 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2101 DEVMETHOD_END 2102 }; 2103 2104 static driver_t hdac_driver = { 2105 "hdac", 2106 hdac_methods, 2107 sizeof(struct hdac_softc), 2108 }; 2109 2110 static devclass_t hdac_devclass; 2111 2112 DRIVER_MODULE(snd_hda, pci, hdac_driver, hdac_devclass, NULL, NULL); 2113