1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 SND_DECLARE_FILE("$FreeBSD$"); 55 56 #define hdac_lock(sc) snd_mtxlock((sc)->lock) 57 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) 58 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) 59 #define hdac_lockowned(sc) mtx_owned((sc)->lock) 60 61 #define HDAC_QUIRK_64BIT (1 << 0) 62 #define HDAC_QUIRK_DMAPOS (1 << 1) 63 #define HDAC_QUIRK_MSI (1 << 2) 64 65 static const struct { 66 const char *key; 67 uint32_t value; 68 } hdac_quirks_tab[] = { 69 { "64bit", HDAC_QUIRK_DMAPOS }, 70 { "dmapos", HDAC_QUIRK_DMAPOS }, 71 { "msi", HDAC_QUIRK_MSI }, 72 }; 73 74 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 75 76 static const struct { 77 uint32_t model; 78 const char *desc; 79 char quirks_on; 80 char quirks_off; 81 } hdac_devices[] = { 82 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 83 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 84 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 85 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 87 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 88 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 89 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 90 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 91 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 92 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 93 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 94 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 95 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 96 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 97 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 98 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 99 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 100 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 101 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 102 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 103 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 104 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 105 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 106 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 107 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 108 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 109 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, 110 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 111 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 112 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 113 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 114 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 115 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 116 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 117 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 118 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 119 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 120 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 121 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 122 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 123 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 124 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 125 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 126 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 127 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 128 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 129 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 130 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 131 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 132 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 133 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 134 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 135 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 136 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 137 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 138 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 139 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 140 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 141 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 142 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 143 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 144 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 145 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 146 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 147 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 148 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 149 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 150 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 151 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 152 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 153 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 154 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 155 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 156 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 157 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 158 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 159 { HDA_ATI_R600, "ATI R600", 0, 0 }, 160 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 161 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 162 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 163 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 164 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 165 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 166 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 167 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 168 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 169 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 170 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 171 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 172 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 173 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 174 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 175 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 176 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 177 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 178 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 179 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 180 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 181 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 182 /* Unknown */ 183 { HDA_INTEL_ALL, "Intel", 0, 0 }, 184 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 185 { HDA_ATI_ALL, "ATI", 0, 0 }, 186 { HDA_AMD_ALL, "AMD", 0, 0 }, 187 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 188 { HDA_VIA_ALL, "VIA", 0, 0 }, 189 { HDA_SIS_ALL, "SiS", 0, 0 }, 190 { HDA_ULI_ALL, "ULI", 0, 0 }, 191 }; 192 193 static const struct { 194 uint16_t vendor; 195 uint8_t reg; 196 uint8_t mask; 197 uint8_t enable; 198 } hdac_pcie_snoop[] = { 199 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 200 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 201 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 202 }; 203 204 /**************************************************************************** 205 * Function prototypes 206 ****************************************************************************/ 207 static void hdac_intr_handler(void *); 208 static int hdac_reset(struct hdac_softc *, int); 209 static int hdac_get_capabilities(struct hdac_softc *); 210 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 211 static int hdac_dma_alloc(struct hdac_softc *, 212 struct hdac_dma *, bus_size_t); 213 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 214 static int hdac_mem_alloc(struct hdac_softc *); 215 static void hdac_mem_free(struct hdac_softc *); 216 static int hdac_irq_alloc(struct hdac_softc *); 217 static void hdac_irq_free(struct hdac_softc *); 218 static void hdac_corb_init(struct hdac_softc *); 219 static void hdac_rirb_init(struct hdac_softc *); 220 static void hdac_corb_start(struct hdac_softc *); 221 static void hdac_rirb_start(struct hdac_softc *); 222 223 static void hdac_attach2(void *); 224 225 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 226 227 static int hdac_probe(device_t); 228 static int hdac_attach(device_t); 229 static int hdac_detach(device_t); 230 static int hdac_suspend(device_t); 231 static int hdac_resume(device_t); 232 233 static int hdac_rirb_flush(struct hdac_softc *sc); 234 static int hdac_unsolq_flush(struct hdac_softc *sc); 235 236 #define hdac_command(a1, a2, a3) \ 237 hdac_send_command(a1, a3, a2) 238 239 /* This function surely going to make its way into upper level someday. */ 240 static void 241 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 242 { 243 const char *res = NULL; 244 int i = 0, j, k, len, inv; 245 246 if (resource_string_value(device_get_name(sc->dev), 247 device_get_unit(sc->dev), "config", &res) != 0) 248 return; 249 if (!(res != NULL && strlen(res) > 0)) 250 return; 251 HDA_BOOTVERBOSE( 252 device_printf(sc->dev, "Config options:"); 253 ); 254 for (;;) { 255 while (res[i] != '\0' && 256 (res[i] == ',' || isspace(res[i]) != 0)) 257 i++; 258 if (res[i] == '\0') { 259 HDA_BOOTVERBOSE( 260 printf("\n"); 261 ); 262 return; 263 } 264 j = i; 265 while (res[j] != '\0' && 266 !(res[j] == ',' || isspace(res[j]) != 0)) 267 j++; 268 len = j - i; 269 if (len > 2 && strncmp(res + i, "no", 2) == 0) 270 inv = 2; 271 else 272 inv = 0; 273 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 274 if (strncmp(res + i + inv, 275 hdac_quirks_tab[k].key, len - inv) != 0) 276 continue; 277 if (len - inv != strlen(hdac_quirks_tab[k].key)) 278 continue; 279 HDA_BOOTVERBOSE( 280 printf(" %s%s", (inv != 0) ? "no" : "", 281 hdac_quirks_tab[k].key); 282 ); 283 if (inv == 0) { 284 *on |= hdac_quirks_tab[k].value; 285 *on &= ~hdac_quirks_tab[k].value; 286 } else if (inv != 0) { 287 *off |= hdac_quirks_tab[k].value; 288 *off &= ~hdac_quirks_tab[k].value; 289 } 290 break; 291 } 292 i = j; 293 } 294 } 295 296 /**************************************************************************** 297 * void hdac_intr_handler(void *) 298 * 299 * Interrupt handler. Processes interrupts received from the hdac. 300 ****************************************************************************/ 301 static void 302 hdac_intr_handler(void *context) 303 { 304 struct hdac_softc *sc; 305 device_t dev; 306 uint32_t intsts; 307 uint8_t rirbsts; 308 int i; 309 310 sc = (struct hdac_softc *)context; 311 hdac_lock(sc); 312 313 /* Do we have anything to do? */ 314 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 315 if ((intsts & HDAC_INTSTS_GIS) == 0) { 316 hdac_unlock(sc); 317 return; 318 } 319 320 /* Was this a controller interrupt? */ 321 if (intsts & HDAC_INTSTS_CIS) { 322 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 323 /* Get as many responses that we can */ 324 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 325 HDAC_WRITE_1(&sc->mem, 326 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 327 hdac_rirb_flush(sc); 328 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 329 } 330 if (sc->unsolq_rp != sc->unsolq_wp) 331 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 332 } 333 334 if (intsts & HDAC_INTSTS_SIS_MASK) { 335 for (i = 0; i < sc->num_ss; i++) { 336 if ((intsts & (1 << i)) == 0) 337 continue; 338 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 339 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS ); 340 if ((dev = sc->streams[i].dev) != NULL) { 341 HDAC_STREAM_INTR(dev, 342 sc->streams[i].dir, sc->streams[i].stream); 343 } 344 } 345 } 346 347 HDAC_WRITE_4(&sc->mem, HDAC_INTSTS, intsts); 348 hdac_unlock(sc); 349 } 350 351 static void 352 hdac_poll_callback(void *arg) 353 { 354 struct hdac_softc *sc = arg; 355 356 if (sc == NULL) 357 return; 358 359 hdac_lock(sc); 360 if (sc->polling == 0) { 361 hdac_unlock(sc); 362 return; 363 } 364 callout_reset(&sc->poll_callout, sc->poll_ival, 365 hdac_poll_callback, sc); 366 hdac_unlock(sc); 367 368 hdac_intr_handler(sc); 369 } 370 371 /**************************************************************************** 372 * int hdac_reset(hdac_softc *, int) 373 * 374 * Reset the hdac to a quiescent and known state. 375 ****************************************************************************/ 376 static int 377 hdac_reset(struct hdac_softc *sc, int wakeup) 378 { 379 uint32_t gctl; 380 int count, i; 381 382 /* 383 * Stop all Streams DMA engine 384 */ 385 for (i = 0; i < sc->num_iss; i++) 386 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 387 for (i = 0; i < sc->num_oss; i++) 388 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 389 for (i = 0; i < sc->num_bss; i++) 390 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 391 392 /* 393 * Stop Control DMA engines. 394 */ 395 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 396 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 397 398 /* 399 * Reset DMA position buffer. 400 */ 401 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 402 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 403 404 /* 405 * Reset the controller. The reset must remain asserted for 406 * a minimum of 100us. 407 */ 408 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 409 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 410 count = 10000; 411 do { 412 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 413 if (!(gctl & HDAC_GCTL_CRST)) 414 break; 415 DELAY(10); 416 } while (--count); 417 if (gctl & HDAC_GCTL_CRST) { 418 device_printf(sc->dev, "Unable to put hdac in reset\n"); 419 return (ENXIO); 420 } 421 422 /* If wakeup is not requested - leave the controller in reset state. */ 423 if (!wakeup) 424 return (0); 425 426 DELAY(100); 427 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 428 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 429 count = 10000; 430 do { 431 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 432 if (gctl & HDAC_GCTL_CRST) 433 break; 434 DELAY(10); 435 } while (--count); 436 if (!(gctl & HDAC_GCTL_CRST)) { 437 device_printf(sc->dev, "Device stuck in reset\n"); 438 return (ENXIO); 439 } 440 441 /* 442 * Wait for codecs to finish their own reset sequence. The delay here 443 * should be of 250us but for some reasons, it's not enough on my 444 * computer. Let's use twice as much as necessary to make sure that 445 * it's reset properly. 446 */ 447 DELAY(1000); 448 449 return (0); 450 } 451 452 453 /**************************************************************************** 454 * int hdac_get_capabilities(struct hdac_softc *); 455 * 456 * Retreive the general capabilities of the hdac; 457 * Number of Input Streams 458 * Number of Output Streams 459 * Number of bidirectional Streams 460 * 64bit ready 461 * CORB and RIRB sizes 462 ****************************************************************************/ 463 static int 464 hdac_get_capabilities(struct hdac_softc *sc) 465 { 466 uint16_t gcap; 467 uint8_t corbsize, rirbsize; 468 469 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 470 sc->num_iss = HDAC_GCAP_ISS(gcap); 471 sc->num_oss = HDAC_GCAP_OSS(gcap); 472 sc->num_bss = HDAC_GCAP_BSS(gcap); 473 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 474 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 475 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 476 if (sc->quirks_on & HDAC_QUIRK_64BIT) 477 sc->support_64bit = 1; 478 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 479 sc->support_64bit = 0; 480 481 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 482 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 483 HDAC_CORBSIZE_CORBSZCAP_256) 484 sc->corb_size = 256; 485 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 486 HDAC_CORBSIZE_CORBSZCAP_16) 487 sc->corb_size = 16; 488 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 489 HDAC_CORBSIZE_CORBSZCAP_2) 490 sc->corb_size = 2; 491 else { 492 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 493 __func__, corbsize); 494 return (ENXIO); 495 } 496 497 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 498 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 499 HDAC_RIRBSIZE_RIRBSZCAP_256) 500 sc->rirb_size = 256; 501 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 502 HDAC_RIRBSIZE_RIRBSZCAP_16) 503 sc->rirb_size = 16; 504 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 505 HDAC_RIRBSIZE_RIRBSZCAP_2) 506 sc->rirb_size = 2; 507 else { 508 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 509 __func__, rirbsize); 510 return (ENXIO); 511 } 512 513 HDA_BOOTVERBOSE( 514 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 515 "NSDO %d%s, CORB %d, RIRB %d\n", 516 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 517 sc->support_64bit ? ", 64bit" : "", 518 sc->corb_size, sc->rirb_size); 519 ); 520 521 return (0); 522 } 523 524 525 /**************************************************************************** 526 * void hdac_dma_cb 527 * 528 * This function is called by bus_dmamap_load when the mapping has been 529 * established. We just record the physical address of the mapping into 530 * the struct hdac_dma passed in. 531 ****************************************************************************/ 532 static void 533 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 534 { 535 struct hdac_dma *dma; 536 537 if (error == 0) { 538 dma = (struct hdac_dma *)callback_arg; 539 dma->dma_paddr = segs[0].ds_addr; 540 } 541 } 542 543 544 /**************************************************************************** 545 * int hdac_dma_alloc 546 * 547 * This function allocate and setup a dma region (struct hdac_dma). 548 * It must be freed by a corresponding hdac_dma_free. 549 ****************************************************************************/ 550 static int 551 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 552 { 553 bus_size_t roundsz; 554 int result; 555 556 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 557 bzero(dma, sizeof(*dma)); 558 559 /* 560 * Create a DMA tag 561 */ 562 result = bus_dma_tag_create( 563 bus_get_dma_tag(sc->dev), /* parent */ 564 HDA_DMA_ALIGNMENT, /* alignment */ 565 0, /* boundary */ 566 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 567 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 568 BUS_SPACE_MAXADDR, /* highaddr */ 569 NULL, /* filtfunc */ 570 NULL, /* fistfuncarg */ 571 roundsz, /* maxsize */ 572 1, /* nsegments */ 573 roundsz, /* maxsegsz */ 574 0, /* flags */ 575 NULL, /* lockfunc */ 576 NULL, /* lockfuncarg */ 577 &dma->dma_tag); /* dmat */ 578 if (result != 0) { 579 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 580 __func__, result); 581 goto hdac_dma_alloc_fail; 582 } 583 584 /* 585 * Allocate DMA memory 586 */ 587 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 588 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 589 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 590 BUS_DMA_COHERENT), 591 &dma->dma_map); 592 if (result != 0) { 593 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 594 __func__, result); 595 goto hdac_dma_alloc_fail; 596 } 597 598 dma->dma_size = roundsz; 599 600 /* 601 * Map the memory 602 */ 603 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 604 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 605 if (result != 0 || dma->dma_paddr == 0) { 606 if (result == 0) 607 result = ENOMEM; 608 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 609 __func__, result); 610 goto hdac_dma_alloc_fail; 611 } 612 613 HDA_BOOTHVERBOSE( 614 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 615 __func__, (uintmax_t)size, (uintmax_t)roundsz); 616 ); 617 618 return (0); 619 620 hdac_dma_alloc_fail: 621 hdac_dma_free(sc, dma); 622 623 return (result); 624 } 625 626 627 /**************************************************************************** 628 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 629 * 630 * Free a struct dhac_dma that has been previously allocated via the 631 * hdac_dma_alloc function. 632 ****************************************************************************/ 633 static void 634 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 635 { 636 if (dma->dma_paddr != 0) { 637 /* Flush caches */ 638 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 639 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 640 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 641 dma->dma_paddr = 0; 642 } 643 if (dma->dma_vaddr != NULL) { 644 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 645 dma->dma_vaddr = NULL; 646 } 647 if (dma->dma_tag != NULL) { 648 bus_dma_tag_destroy(dma->dma_tag); 649 dma->dma_tag = NULL; 650 } 651 dma->dma_size = 0; 652 } 653 654 /**************************************************************************** 655 * int hdac_mem_alloc(struct hdac_softc *) 656 * 657 * Allocate all the bus resources necessary to speak with the physical 658 * controller. 659 ****************************************************************************/ 660 static int 661 hdac_mem_alloc(struct hdac_softc *sc) 662 { 663 struct hdac_mem *mem; 664 665 mem = &sc->mem; 666 mem->mem_rid = PCIR_BAR(0); 667 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 668 &mem->mem_rid, RF_ACTIVE); 669 if (mem->mem_res == NULL) { 670 device_printf(sc->dev, 671 "%s: Unable to allocate memory resource\n", __func__); 672 return (ENOMEM); 673 } 674 mem->mem_tag = rman_get_bustag(mem->mem_res); 675 mem->mem_handle = rman_get_bushandle(mem->mem_res); 676 677 return (0); 678 } 679 680 /**************************************************************************** 681 * void hdac_mem_free(struct hdac_softc *) 682 * 683 * Free up resources previously allocated by hdac_mem_alloc. 684 ****************************************************************************/ 685 static void 686 hdac_mem_free(struct hdac_softc *sc) 687 { 688 struct hdac_mem *mem; 689 690 mem = &sc->mem; 691 if (mem->mem_res != NULL) 692 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 693 mem->mem_res); 694 mem->mem_res = NULL; 695 } 696 697 /**************************************************************************** 698 * int hdac_irq_alloc(struct hdac_softc *) 699 * 700 * Allocate and setup the resources necessary for interrupt handling. 701 ****************************************************************************/ 702 static int 703 hdac_irq_alloc(struct hdac_softc *sc) 704 { 705 struct hdac_irq *irq; 706 int result; 707 708 irq = &sc->irq; 709 irq->irq_rid = 0x0; 710 711 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 712 (result = pci_msi_count(sc->dev)) == 1 && 713 pci_alloc_msi(sc->dev, &result) == 0) 714 irq->irq_rid = 0x1; 715 716 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 717 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 718 if (irq->irq_res == NULL) { 719 device_printf(sc->dev, "%s: Unable to allocate irq\n", 720 __func__); 721 goto hdac_irq_alloc_fail; 722 } 723 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 724 NULL, hdac_intr_handler, sc, &irq->irq_handle); 725 if (result != 0) { 726 device_printf(sc->dev, 727 "%s: Unable to setup interrupt handler (%d)\n", 728 __func__, result); 729 goto hdac_irq_alloc_fail; 730 } 731 732 return (0); 733 734 hdac_irq_alloc_fail: 735 hdac_irq_free(sc); 736 737 return (ENXIO); 738 } 739 740 /**************************************************************************** 741 * void hdac_irq_free(struct hdac_softc *) 742 * 743 * Free up resources previously allocated by hdac_irq_alloc. 744 ****************************************************************************/ 745 static void 746 hdac_irq_free(struct hdac_softc *sc) 747 { 748 struct hdac_irq *irq; 749 750 irq = &sc->irq; 751 if (irq->irq_res != NULL && irq->irq_handle != NULL) 752 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 753 if (irq->irq_res != NULL) 754 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 755 irq->irq_res); 756 if (irq->irq_rid == 0x1) 757 pci_release_msi(sc->dev); 758 irq->irq_handle = NULL; 759 irq->irq_res = NULL; 760 irq->irq_rid = 0x0; 761 } 762 763 /**************************************************************************** 764 * void hdac_corb_init(struct hdac_softc *) 765 * 766 * Initialize the corb registers for operations but do not start it up yet. 767 * The CORB engine must not be running when this function is called. 768 ****************************************************************************/ 769 static void 770 hdac_corb_init(struct hdac_softc *sc) 771 { 772 uint8_t corbsize; 773 uint64_t corbpaddr; 774 775 /* Setup the CORB size. */ 776 switch (sc->corb_size) { 777 case 256: 778 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 779 break; 780 case 16: 781 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 782 break; 783 case 2: 784 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 785 break; 786 default: 787 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 788 } 789 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 790 791 /* Setup the CORB Address in the hdac */ 792 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 793 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 794 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 795 796 /* Set the WP and RP */ 797 sc->corb_wp = 0; 798 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 799 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 800 /* 801 * The HDA specification indicates that the CORBRPRST bit will always 802 * read as zero. Unfortunately, it seems that at least the 82801G 803 * doesn't reset the bit to zero, which stalls the corb engine. 804 * manually reset the bit to zero before continuing. 805 */ 806 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 807 808 /* Enable CORB error reporting */ 809 #if 0 810 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 811 #endif 812 } 813 814 /**************************************************************************** 815 * void hdac_rirb_init(struct hdac_softc *) 816 * 817 * Initialize the rirb registers for operations but do not start it up yet. 818 * The RIRB engine must not be running when this function is called. 819 ****************************************************************************/ 820 static void 821 hdac_rirb_init(struct hdac_softc *sc) 822 { 823 uint8_t rirbsize; 824 uint64_t rirbpaddr; 825 826 /* Setup the RIRB size. */ 827 switch (sc->rirb_size) { 828 case 256: 829 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 830 break; 831 case 16: 832 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 833 break; 834 case 2: 835 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 836 break; 837 default: 838 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 839 } 840 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 841 842 /* Setup the RIRB Address in the hdac */ 843 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 844 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 845 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 846 847 /* Setup the WP and RP */ 848 sc->rirb_rp = 0; 849 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 850 851 /* Setup the interrupt threshold */ 852 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 853 854 /* Enable Overrun and response received reporting */ 855 #if 0 856 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 857 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 858 #else 859 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 860 #endif 861 862 /* 863 * Make sure that the Host CPU cache doesn't contain any dirty 864 * cache lines that falls in the rirb. If I understood correctly, it 865 * should be sufficient to do this only once as the rirb is purely 866 * read-only from now on. 867 */ 868 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 869 BUS_DMASYNC_PREREAD); 870 } 871 872 /**************************************************************************** 873 * void hdac_corb_start(hdac_softc *) 874 * 875 * Startup the corb DMA engine 876 ****************************************************************************/ 877 static void 878 hdac_corb_start(struct hdac_softc *sc) 879 { 880 uint32_t corbctl; 881 882 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 883 corbctl |= HDAC_CORBCTL_CORBRUN; 884 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 885 } 886 887 /**************************************************************************** 888 * void hdac_rirb_start(hdac_softc *) 889 * 890 * Startup the rirb DMA engine 891 ****************************************************************************/ 892 static void 893 hdac_rirb_start(struct hdac_softc *sc) 894 { 895 uint32_t rirbctl; 896 897 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 898 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 899 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 900 } 901 902 static int 903 hdac_rirb_flush(struct hdac_softc *sc) 904 { 905 struct hdac_rirb *rirb_base, *rirb; 906 nid_t cad; 907 uint32_t resp, resp_ex; 908 uint8_t rirbwp; 909 int ret; 910 911 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 912 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 913 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 914 BUS_DMASYNC_POSTREAD); 915 916 ret = 0; 917 while (sc->rirb_rp != rirbwp) { 918 sc->rirb_rp++; 919 sc->rirb_rp %= sc->rirb_size; 920 rirb = &rirb_base[sc->rirb_rp]; 921 resp = le32toh(rirb->response); 922 resp_ex = le32toh(rirb->response_ex); 923 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 924 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 925 sc->unsolq[sc->unsolq_wp++] = resp; 926 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 927 sc->unsolq[sc->unsolq_wp++] = cad; 928 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 929 } else if (sc->codecs[cad].pending <= 0) { 930 device_printf(sc->dev, "Unexpected unsolicited " 931 "response from address %d: %08x\n", cad, resp); 932 } else { 933 sc->codecs[cad].response = resp; 934 sc->codecs[cad].pending--; 935 } 936 ret++; 937 } 938 939 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 940 BUS_DMASYNC_PREREAD); 941 return (ret); 942 } 943 944 static int 945 hdac_unsolq_flush(struct hdac_softc *sc) 946 { 947 device_t child; 948 nid_t cad; 949 uint32_t resp; 950 int ret = 0; 951 952 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 953 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 954 while (sc->unsolq_rp != sc->unsolq_wp) { 955 resp = sc->unsolq[sc->unsolq_rp++]; 956 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 957 cad = sc->unsolq[sc->unsolq_rp++]; 958 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 959 if ((child = sc->codecs[cad].dev) != NULL) 960 HDAC_UNSOL_INTR(child, resp); 961 ret++; 962 } 963 sc->unsolq_st = HDAC_UNSOLQ_READY; 964 } 965 966 return (ret); 967 } 968 969 /**************************************************************************** 970 * uint32_t hdac_command_sendone_internal 971 * 972 * Wrapper function that sends only one command to a given codec 973 ****************************************************************************/ 974 static uint32_t 975 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 976 { 977 int timeout; 978 uint32_t *corb; 979 980 if (!hdac_lockowned(sc)) 981 device_printf(sc->dev, "WARNING!!!! mtx not owned!!!!\n"); 982 verb &= ~HDA_CMD_CAD_MASK; 983 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 984 sc->codecs[cad].response = HDA_INVALID; 985 986 sc->codecs[cad].pending++; 987 sc->corb_wp++; 988 sc->corb_wp %= sc->corb_size; 989 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 990 bus_dmamap_sync(sc->corb_dma.dma_tag, 991 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 992 corb[sc->corb_wp] = htole32(verb); 993 bus_dmamap_sync(sc->corb_dma.dma_tag, 994 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 995 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 996 997 timeout = 10000; 998 do { 999 if (hdac_rirb_flush(sc) == 0) 1000 DELAY(10); 1001 } while (sc->codecs[cad].pending != 0 && --timeout); 1002 1003 if (sc->codecs[cad].pending != 0) { 1004 device_printf(sc->dev, "Command timeout on address %d\n", cad); 1005 sc->codecs[cad].pending = 0; 1006 } 1007 1008 if (sc->unsolq_rp != sc->unsolq_wp) 1009 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1010 return (sc->codecs[cad].response); 1011 } 1012 1013 /**************************************************************************** 1014 * Device Methods 1015 ****************************************************************************/ 1016 1017 /**************************************************************************** 1018 * int hdac_probe(device_t) 1019 * 1020 * Probe for the presence of an hdac. If none is found, check for a generic 1021 * match using the subclass of the device. 1022 ****************************************************************************/ 1023 static int 1024 hdac_probe(device_t dev) 1025 { 1026 int i, result; 1027 uint32_t model; 1028 uint16_t class, subclass; 1029 char desc[64]; 1030 1031 model = (uint32_t)pci_get_device(dev) << 16; 1032 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1033 class = pci_get_class(dev); 1034 subclass = pci_get_subclass(dev); 1035 1036 bzero(desc, sizeof(desc)); 1037 result = ENXIO; 1038 for (i = 0; i < nitems(hdac_devices); i++) { 1039 if (hdac_devices[i].model == model) { 1040 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1041 result = BUS_PROBE_DEFAULT; 1042 break; 1043 } 1044 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1045 class == PCIC_MULTIMEDIA && 1046 subclass == PCIS_MULTIMEDIA_HDA) { 1047 snprintf(desc, sizeof(desc), 1048 "%s (0x%04x)", 1049 hdac_devices[i].desc, pci_get_device(dev)); 1050 result = BUS_PROBE_GENERIC; 1051 break; 1052 } 1053 } 1054 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1055 subclass == PCIS_MULTIMEDIA_HDA) { 1056 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1057 result = BUS_PROBE_GENERIC; 1058 } 1059 if (result != ENXIO) { 1060 strlcat(desc, " HDA Controller", sizeof(desc)); 1061 device_set_desc_copy(dev, desc); 1062 } 1063 1064 return (result); 1065 } 1066 1067 static void 1068 hdac_unsolq_task(void *context, int pending) 1069 { 1070 struct hdac_softc *sc; 1071 1072 sc = (struct hdac_softc *)context; 1073 1074 hdac_lock(sc); 1075 hdac_unsolq_flush(sc); 1076 hdac_unlock(sc); 1077 } 1078 1079 /**************************************************************************** 1080 * int hdac_attach(device_t) 1081 * 1082 * Attach the device into the kernel. Interrupts usually won't be enabled 1083 * when this function is called. Setup everything that doesn't require 1084 * interrupts and defer probing of codecs until interrupts are enabled. 1085 ****************************************************************************/ 1086 static int 1087 hdac_attach(device_t dev) 1088 { 1089 struct hdac_softc *sc; 1090 int result; 1091 int i, devid = -1; 1092 uint32_t model; 1093 uint16_t class, subclass; 1094 uint16_t vendor; 1095 uint8_t v; 1096 1097 sc = device_get_softc(dev); 1098 HDA_BOOTVERBOSE( 1099 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1100 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1101 device_printf(dev, "HDA Driver Revision: %s\n", 1102 HDA_DRV_TEST_REV); 1103 ); 1104 1105 model = (uint32_t)pci_get_device(dev) << 16; 1106 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1107 class = pci_get_class(dev); 1108 subclass = pci_get_subclass(dev); 1109 1110 for (i = 0; i < nitems(hdac_devices); i++) { 1111 if (hdac_devices[i].model == model) { 1112 devid = i; 1113 break; 1114 } 1115 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1116 class == PCIC_MULTIMEDIA && 1117 subclass == PCIS_MULTIMEDIA_HDA) { 1118 devid = i; 1119 break; 1120 } 1121 } 1122 1123 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); 1124 sc->dev = dev; 1125 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1126 callout_init(&sc->poll_callout, 1); 1127 for (i = 0; i < HDAC_CODEC_MAX; i++) 1128 sc->codecs[i].dev = NULL; 1129 if (devid >= 0) { 1130 sc->quirks_on = hdac_devices[devid].quirks_on; 1131 sc->quirks_off = hdac_devices[devid].quirks_off; 1132 } else { 1133 sc->quirks_on = 0; 1134 sc->quirks_off = 0; 1135 } 1136 if (resource_int_value(device_get_name(dev), 1137 device_get_unit(dev), "msi", &i) == 0) { 1138 if (i == 0) 1139 sc->quirks_off |= HDAC_QUIRK_MSI; 1140 else { 1141 sc->quirks_on |= HDAC_QUIRK_MSI; 1142 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1143 } 1144 } 1145 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1146 HDA_BOOTVERBOSE( 1147 device_printf(sc->dev, 1148 "Config options: on=0x%08x off=0x%08x\n", 1149 sc->quirks_on, sc->quirks_off); 1150 ); 1151 sc->poll_ival = hz; 1152 if (resource_int_value(device_get_name(dev), 1153 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1154 sc->polling = 1; 1155 else 1156 sc->polling = 0; 1157 1158 pci_enable_busmaster(dev); 1159 1160 vendor = pci_get_vendor(dev); 1161 if (vendor == INTEL_VENDORID) { 1162 /* TCSEL -> TC0 */ 1163 v = pci_read_config(dev, 0x44, 1); 1164 pci_write_config(dev, 0x44, v & 0xf8, 1); 1165 HDA_BOOTHVERBOSE( 1166 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1167 pci_read_config(dev, 0x44, 1)); 1168 ); 1169 } 1170 1171 #if defined(__i386__) || defined(__amd64__) 1172 sc->flags |= HDAC_F_DMA_NOCACHE; 1173 1174 if (resource_int_value(device_get_name(dev), 1175 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1176 #else 1177 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1178 #endif 1179 /* 1180 * Try to enable PCIe snoop to avoid messing around with 1181 * uncacheable DMA attribute. Since PCIe snoop register 1182 * config is pretty much vendor specific, there are no 1183 * general solutions on how to enable it, forcing us (even 1184 * Microsoft) to enable uncacheable or write combined DMA 1185 * by default. 1186 * 1187 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1188 */ 1189 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1190 if (hdac_pcie_snoop[i].vendor != vendor) 1191 continue; 1192 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1193 if (hdac_pcie_snoop[i].reg == 0x00) 1194 break; 1195 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1196 if ((v & hdac_pcie_snoop[i].enable) == 1197 hdac_pcie_snoop[i].enable) 1198 break; 1199 v &= hdac_pcie_snoop[i].mask; 1200 v |= hdac_pcie_snoop[i].enable; 1201 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1202 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1203 if ((v & hdac_pcie_snoop[i].enable) != 1204 hdac_pcie_snoop[i].enable) { 1205 HDA_BOOTVERBOSE( 1206 device_printf(dev, 1207 "WARNING: Failed to enable PCIe " 1208 "snoop!\n"); 1209 ); 1210 #if defined(__i386__) || defined(__amd64__) 1211 sc->flags |= HDAC_F_DMA_NOCACHE; 1212 #endif 1213 } 1214 break; 1215 } 1216 #if defined(__i386__) || defined(__amd64__) 1217 } 1218 #endif 1219 1220 HDA_BOOTHVERBOSE( 1221 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1222 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1223 "Uncacheable" : "PCIe snoop", vendor); 1224 ); 1225 1226 /* Allocate resources */ 1227 result = hdac_mem_alloc(sc); 1228 if (result != 0) 1229 goto hdac_attach_fail; 1230 result = hdac_irq_alloc(sc); 1231 if (result != 0) 1232 goto hdac_attach_fail; 1233 1234 /* Get Capabilities */ 1235 result = hdac_get_capabilities(sc); 1236 if (result != 0) 1237 goto hdac_attach_fail; 1238 1239 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1240 result = hdac_dma_alloc(sc, &sc->corb_dma, 1241 sc->corb_size * sizeof(uint32_t)); 1242 if (result != 0) 1243 goto hdac_attach_fail; 1244 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1245 sc->rirb_size * sizeof(struct hdac_rirb)); 1246 if (result != 0) 1247 goto hdac_attach_fail; 1248 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1249 M_HDAC, M_ZERO | M_WAITOK); 1250 for (i = 0; i < sc->num_ss; i++) { 1251 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1252 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1253 if (result != 0) 1254 goto hdac_attach_fail; 1255 } 1256 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1257 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1258 HDA_BOOTVERBOSE( 1259 device_printf(dev, "Failed to " 1260 "allocate DMA pos buffer " 1261 "(non-fatal)\n"); 1262 ); 1263 } else { 1264 uint64_t addr = sc->pos_dma.dma_paddr; 1265 1266 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1267 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1268 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1269 HDAC_DPLBASE_DPLBASE_DMAPBE); 1270 } 1271 } 1272 1273 result = bus_dma_tag_create( 1274 bus_get_dma_tag(sc->dev), /* parent */ 1275 HDA_DMA_ALIGNMENT, /* alignment */ 1276 0, /* boundary */ 1277 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1278 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1279 BUS_SPACE_MAXADDR, /* highaddr */ 1280 NULL, /* filtfunc */ 1281 NULL, /* fistfuncarg */ 1282 HDA_BUFSZ_MAX, /* maxsize */ 1283 1, /* nsegments */ 1284 HDA_BUFSZ_MAX, /* maxsegsz */ 1285 0, /* flags */ 1286 NULL, /* lockfunc */ 1287 NULL, /* lockfuncarg */ 1288 &sc->chan_dmat); /* dmat */ 1289 if (result != 0) { 1290 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1291 __func__, result); 1292 goto hdac_attach_fail; 1293 } 1294 1295 /* Quiesce everything */ 1296 HDA_BOOTHVERBOSE( 1297 device_printf(dev, "Reset controller...\n"); 1298 ); 1299 hdac_reset(sc, 1); 1300 1301 /* Initialize the CORB and RIRB */ 1302 hdac_corb_init(sc); 1303 hdac_rirb_init(sc); 1304 1305 /* Defer remaining of initialization until interrupts are enabled */ 1306 sc->intrhook.ich_func = hdac_attach2; 1307 sc->intrhook.ich_arg = (void *)sc; 1308 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1309 sc->intrhook.ich_func = NULL; 1310 hdac_attach2((void *)sc); 1311 } 1312 1313 return (0); 1314 1315 hdac_attach_fail: 1316 hdac_irq_free(sc); 1317 if (sc->streams != NULL) 1318 for (i = 0; i < sc->num_ss; i++) 1319 hdac_dma_free(sc, &sc->streams[i].bdl); 1320 free(sc->streams, M_HDAC); 1321 hdac_dma_free(sc, &sc->rirb_dma); 1322 hdac_dma_free(sc, &sc->corb_dma); 1323 hdac_mem_free(sc); 1324 snd_mtxfree(sc->lock); 1325 1326 return (ENXIO); 1327 } 1328 1329 static int 1330 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1331 { 1332 struct hdac_softc *sc; 1333 device_t *devlist; 1334 device_t dev; 1335 int devcount, i, err, val; 1336 1337 dev = oidp->oid_arg1; 1338 sc = device_get_softc(dev); 1339 if (sc == NULL) 1340 return (EINVAL); 1341 val = 0; 1342 err = sysctl_handle_int(oidp, &val, 0, req); 1343 if (err != 0 || req->newptr == NULL || val == 0) 1344 return (err); 1345 1346 /* XXX: Temporary. For debugging. */ 1347 if (val == 100) { 1348 hdac_suspend(dev); 1349 return (0); 1350 } else if (val == 101) { 1351 hdac_resume(dev); 1352 return (0); 1353 } 1354 1355 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) 1356 return (err); 1357 hdac_lock(sc); 1358 for (i = 0; i < devcount; i++) 1359 HDAC_PINDUMP(devlist[i]); 1360 hdac_unlock(sc); 1361 free(devlist, M_TEMP); 1362 return (0); 1363 } 1364 1365 static int 1366 hdac_mdata_rate(uint16_t fmt) 1367 { 1368 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1369 int rate, bits; 1370 1371 if (fmt & (1 << 14)) 1372 rate = 44100; 1373 else 1374 rate = 48000; 1375 rate *= ((fmt >> 11) & 0x07) + 1; 1376 rate /= ((fmt >> 8) & 0x07) + 1; 1377 bits = mbits[(fmt >> 4) & 0x03]; 1378 bits *= (fmt & 0x0f) + 1; 1379 return (rate * bits); 1380 } 1381 1382 static int 1383 hdac_bdata_rate(uint16_t fmt, int output) 1384 { 1385 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1386 int rate, bits; 1387 1388 rate = 48000; 1389 rate *= ((fmt >> 11) & 0x07) + 1; 1390 bits = bbits[(fmt >> 4) & 0x03]; 1391 bits *= (fmt & 0x0f) + 1; 1392 if (!output) 1393 bits = ((bits + 7) & ~0x07) + 10; 1394 return (rate * bits); 1395 } 1396 1397 static void 1398 hdac_poll_reinit(struct hdac_softc *sc) 1399 { 1400 int i, pollticks, min = 1000000; 1401 struct hdac_stream *s; 1402 1403 if (sc->polling == 0) 1404 return; 1405 if (sc->unsol_registered > 0) 1406 min = hz / 2; 1407 for (i = 0; i < sc->num_ss; i++) { 1408 s = &sc->streams[i]; 1409 if (s->running == 0) 1410 continue; 1411 pollticks = ((uint64_t)hz * s->blksz) / 1412 (hdac_mdata_rate(s->format) / 8); 1413 pollticks >>= 1; 1414 if (pollticks > hz) 1415 pollticks = hz; 1416 if (pollticks < 1) { 1417 HDA_BOOTVERBOSE( 1418 device_printf(sc->dev, 1419 "poll interval < 1 tick !\n"); 1420 ); 1421 pollticks = 1; 1422 } 1423 if (min > pollticks) 1424 min = pollticks; 1425 } 1426 HDA_BOOTVERBOSE( 1427 device_printf(sc->dev, 1428 "poll interval %d -> %d ticks\n", 1429 sc->poll_ival, min); 1430 ); 1431 sc->poll_ival = min; 1432 if (min == 1000000) 1433 callout_stop(&sc->poll_callout); 1434 else 1435 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1436 } 1437 1438 static int 1439 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1440 { 1441 struct hdac_softc *sc; 1442 device_t dev; 1443 uint32_t ctl; 1444 int err, val; 1445 1446 dev = oidp->oid_arg1; 1447 sc = device_get_softc(dev); 1448 if (sc == NULL) 1449 return (EINVAL); 1450 hdac_lock(sc); 1451 val = sc->polling; 1452 hdac_unlock(sc); 1453 err = sysctl_handle_int(oidp, &val, 0, req); 1454 1455 if (err != 0 || req->newptr == NULL) 1456 return (err); 1457 if (val < 0 || val > 1) 1458 return (EINVAL); 1459 1460 hdac_lock(sc); 1461 if (val != sc->polling) { 1462 if (val == 0) { 1463 callout_stop(&sc->poll_callout); 1464 hdac_unlock(sc); 1465 callout_drain(&sc->poll_callout); 1466 hdac_lock(sc); 1467 sc->polling = 0; 1468 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1469 ctl |= HDAC_INTCTL_GIE; 1470 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1471 } else { 1472 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1473 ctl &= ~HDAC_INTCTL_GIE; 1474 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1475 sc->polling = 1; 1476 hdac_poll_reinit(sc); 1477 } 1478 } 1479 hdac_unlock(sc); 1480 1481 return (err); 1482 } 1483 1484 static void 1485 hdac_attach2(void *arg) 1486 { 1487 struct hdac_softc *sc; 1488 device_t child; 1489 uint32_t vendorid, revisionid; 1490 int i; 1491 uint16_t statests; 1492 1493 sc = (struct hdac_softc *)arg; 1494 1495 hdac_lock(sc); 1496 1497 /* Remove ourselves from the config hooks */ 1498 if (sc->intrhook.ich_func != NULL) { 1499 config_intrhook_disestablish(&sc->intrhook); 1500 sc->intrhook.ich_func = NULL; 1501 } 1502 1503 HDA_BOOTHVERBOSE( 1504 device_printf(sc->dev, "Starting CORB Engine...\n"); 1505 ); 1506 hdac_corb_start(sc); 1507 HDA_BOOTHVERBOSE( 1508 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1509 ); 1510 hdac_rirb_start(sc); 1511 HDA_BOOTHVERBOSE( 1512 device_printf(sc->dev, 1513 "Enabling controller interrupt...\n"); 1514 ); 1515 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1516 HDAC_GCTL_UNSOL); 1517 if (sc->polling == 0) { 1518 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1519 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1520 } 1521 DELAY(1000); 1522 1523 HDA_BOOTHVERBOSE( 1524 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1525 ); 1526 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1527 hdac_unlock(sc); 1528 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1529 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1530 HDA_BOOTHVERBOSE( 1531 device_printf(sc->dev, 1532 "Found CODEC at address %d\n", i); 1533 ); 1534 hdac_lock(sc); 1535 vendorid = hdac_send_command(sc, i, 1536 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1537 revisionid = hdac_send_command(sc, i, 1538 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1539 hdac_unlock(sc); 1540 if (vendorid == HDA_INVALID && 1541 revisionid == HDA_INVALID) { 1542 device_printf(sc->dev, 1543 "CODEC is not responding!\n"); 1544 continue; 1545 } 1546 sc->codecs[i].vendor_id = 1547 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1548 sc->codecs[i].device_id = 1549 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1550 sc->codecs[i].revision_id = 1551 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1552 sc->codecs[i].stepping_id = 1553 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1554 child = device_add_child(sc->dev, "hdacc", -1); 1555 if (child == NULL) { 1556 device_printf(sc->dev, 1557 "Failed to add CODEC device\n"); 1558 continue; 1559 } 1560 device_set_ivars(child, (void *)(intptr_t)i); 1561 sc->codecs[i].dev = child; 1562 } 1563 } 1564 bus_generic_attach(sc->dev); 1565 1566 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1567 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1568 "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev, sizeof(sc->dev), 1569 sysctl_hdac_pindump, "I", "Dump pin states/data"); 1570 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1571 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1572 "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev, sizeof(sc->dev), 1573 sysctl_hdac_polling, "I", "Enable polling mode"); 1574 } 1575 1576 /**************************************************************************** 1577 * int hdac_suspend(device_t) 1578 * 1579 * Suspend and power down HDA bus and codecs. 1580 ****************************************************************************/ 1581 static int 1582 hdac_suspend(device_t dev) 1583 { 1584 struct hdac_softc *sc = device_get_softc(dev); 1585 1586 HDA_BOOTHVERBOSE( 1587 device_printf(dev, "Suspend...\n"); 1588 ); 1589 bus_generic_suspend(dev); 1590 1591 hdac_lock(sc); 1592 HDA_BOOTHVERBOSE( 1593 device_printf(dev, "Reset controller...\n"); 1594 ); 1595 callout_stop(&sc->poll_callout); 1596 hdac_reset(sc, 0); 1597 hdac_unlock(sc); 1598 callout_drain(&sc->poll_callout); 1599 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1600 HDA_BOOTHVERBOSE( 1601 device_printf(dev, "Suspend done\n"); 1602 ); 1603 return (0); 1604 } 1605 1606 /**************************************************************************** 1607 * int hdac_resume(device_t) 1608 * 1609 * Powerup and restore HDA bus and codecs state. 1610 ****************************************************************************/ 1611 static int 1612 hdac_resume(device_t dev) 1613 { 1614 struct hdac_softc *sc = device_get_softc(dev); 1615 int error; 1616 1617 HDA_BOOTHVERBOSE( 1618 device_printf(dev, "Resume...\n"); 1619 ); 1620 hdac_lock(sc); 1621 1622 /* Quiesce everything */ 1623 HDA_BOOTHVERBOSE( 1624 device_printf(dev, "Reset controller...\n"); 1625 ); 1626 hdac_reset(sc, 1); 1627 1628 /* Initialize the CORB and RIRB */ 1629 hdac_corb_init(sc); 1630 hdac_rirb_init(sc); 1631 1632 HDA_BOOTHVERBOSE( 1633 device_printf(dev, "Starting CORB Engine...\n"); 1634 ); 1635 hdac_corb_start(sc); 1636 HDA_BOOTHVERBOSE( 1637 device_printf(dev, "Starting RIRB Engine...\n"); 1638 ); 1639 hdac_rirb_start(sc); 1640 HDA_BOOTHVERBOSE( 1641 device_printf(dev, "Enabling controller interrupt...\n"); 1642 ); 1643 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1644 HDAC_GCTL_UNSOL); 1645 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1646 DELAY(1000); 1647 hdac_poll_reinit(sc); 1648 hdac_unlock(sc); 1649 1650 error = bus_generic_resume(dev); 1651 HDA_BOOTHVERBOSE( 1652 device_printf(dev, "Resume done\n"); 1653 ); 1654 return (error); 1655 } 1656 1657 /**************************************************************************** 1658 * int hdac_detach(device_t) 1659 * 1660 * Detach and free up resources utilized by the hdac device. 1661 ****************************************************************************/ 1662 static int 1663 hdac_detach(device_t dev) 1664 { 1665 struct hdac_softc *sc = device_get_softc(dev); 1666 device_t *devlist; 1667 int cad, i, devcount, error; 1668 1669 if ((error = device_get_children(dev, &devlist, &devcount)) != 0) 1670 return (error); 1671 for (i = 0; i < devcount; i++) { 1672 cad = (intptr_t)device_get_ivars(devlist[i]); 1673 if ((error = device_delete_child(dev, devlist[i])) != 0) { 1674 free(devlist, M_TEMP); 1675 return (error); 1676 } 1677 sc->codecs[cad].dev = NULL; 1678 } 1679 free(devlist, M_TEMP); 1680 1681 hdac_lock(sc); 1682 hdac_reset(sc, 0); 1683 hdac_unlock(sc); 1684 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1685 hdac_irq_free(sc); 1686 1687 for (i = 0; i < sc->num_ss; i++) 1688 hdac_dma_free(sc, &sc->streams[i].bdl); 1689 free(sc->streams, M_HDAC); 1690 hdac_dma_free(sc, &sc->pos_dma); 1691 hdac_dma_free(sc, &sc->rirb_dma); 1692 hdac_dma_free(sc, &sc->corb_dma); 1693 if (sc->chan_dmat != NULL) { 1694 bus_dma_tag_destroy(sc->chan_dmat); 1695 sc->chan_dmat = NULL; 1696 } 1697 hdac_mem_free(sc); 1698 snd_mtxfree(sc->lock); 1699 return (0); 1700 } 1701 1702 static bus_dma_tag_t 1703 hdac_get_dma_tag(device_t dev, device_t child) 1704 { 1705 struct hdac_softc *sc = device_get_softc(dev); 1706 1707 return (sc->chan_dmat); 1708 } 1709 1710 static int 1711 hdac_print_child(device_t dev, device_t child) 1712 { 1713 int retval; 1714 1715 retval = bus_print_child_header(dev, child); 1716 retval += printf(" at cad %d", 1717 (int)(intptr_t)device_get_ivars(child)); 1718 retval += bus_print_child_footer(dev, child); 1719 1720 return (retval); 1721 } 1722 1723 static int 1724 hdac_child_location_str(device_t dev, device_t child, char *buf, 1725 size_t buflen) 1726 { 1727 1728 snprintf(buf, buflen, "cad=%d", 1729 (int)(intptr_t)device_get_ivars(child)); 1730 return (0); 1731 } 1732 1733 static int 1734 hdac_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, 1735 size_t buflen) 1736 { 1737 struct hdac_softc *sc = device_get_softc(dev); 1738 nid_t cad = (uintptr_t)device_get_ivars(child); 1739 1740 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x revision=0x%02x " 1741 "stepping=0x%02x", 1742 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1743 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1744 return (0); 1745 } 1746 1747 static int 1748 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1749 { 1750 struct hdac_softc *sc = device_get_softc(dev); 1751 nid_t cad = (uintptr_t)device_get_ivars(child); 1752 1753 switch (which) { 1754 case HDA_IVAR_CODEC_ID: 1755 *result = cad; 1756 break; 1757 case HDA_IVAR_VENDOR_ID: 1758 *result = sc->codecs[cad].vendor_id; 1759 break; 1760 case HDA_IVAR_DEVICE_ID: 1761 *result = sc->codecs[cad].device_id; 1762 break; 1763 case HDA_IVAR_REVISION_ID: 1764 *result = sc->codecs[cad].revision_id; 1765 break; 1766 case HDA_IVAR_STEPPING_ID: 1767 *result = sc->codecs[cad].stepping_id; 1768 break; 1769 case HDA_IVAR_SUBVENDOR_ID: 1770 *result = pci_get_subvendor(dev); 1771 break; 1772 case HDA_IVAR_SUBDEVICE_ID: 1773 *result = pci_get_subdevice(dev); 1774 break; 1775 case HDA_IVAR_DMA_NOCACHE: 1776 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1777 break; 1778 case HDA_IVAR_STRIPES_MASK: 1779 *result = (1 << (1 << sc->num_sdo)) - 1; 1780 break; 1781 default: 1782 return (ENOENT); 1783 } 1784 return (0); 1785 } 1786 1787 static struct mtx * 1788 hdac_get_mtx(device_t dev, device_t child) 1789 { 1790 struct hdac_softc *sc = device_get_softc(dev); 1791 1792 return (sc->lock); 1793 } 1794 1795 static uint32_t 1796 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1797 { 1798 1799 return (hdac_send_command(device_get_softc(dev), 1800 (intptr_t)device_get_ivars(child), verb)); 1801 } 1802 1803 static int 1804 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1805 { 1806 int i, ss; 1807 1808 ss = -1; 1809 /* Allocate ISS/OSS first. */ 1810 if (dir == 0) { 1811 for (i = 0; i < sc->num_iss; i++) { 1812 if (sc->streams[i].stream == stream) { 1813 ss = i; 1814 break; 1815 } 1816 } 1817 } else { 1818 for (i = 0; i < sc->num_oss; i++) { 1819 if (sc->streams[i + sc->num_iss].stream == stream) { 1820 ss = i + sc->num_iss; 1821 break; 1822 } 1823 } 1824 } 1825 /* Fallback to BSS. */ 1826 if (ss == -1) { 1827 for (i = 0; i < sc->num_bss; i++) { 1828 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1829 == stream) { 1830 ss = i + sc->num_iss + sc->num_oss; 1831 break; 1832 } 1833 } 1834 } 1835 return (ss); 1836 } 1837 1838 static int 1839 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1840 uint32_t **dmapos) 1841 { 1842 struct hdac_softc *sc = device_get_softc(dev); 1843 nid_t cad = (uintptr_t)device_get_ivars(child); 1844 int stream, ss, bw, maxbw, prevbw; 1845 1846 /* Look for empty stream. */ 1847 ss = hdac_find_stream(sc, dir, 0); 1848 1849 /* Return if found nothing. */ 1850 if (ss < 0) 1851 return (0); 1852 1853 /* Check bus bandwidth. */ 1854 bw = hdac_bdata_rate(format, dir); 1855 if (dir == 1) { 1856 bw *= 1 << (sc->num_sdo - stripe); 1857 prevbw = sc->sdo_bw_used; 1858 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1859 } else { 1860 prevbw = sc->codecs[cad].sdi_bw_used; 1861 maxbw = 48000 * 464; 1862 } 1863 HDA_BOOTHVERBOSE( 1864 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1865 (bw + prevbw) / 1000, maxbw / 1000, 1866 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1867 ); 1868 if (bw + prevbw > maxbw) 1869 return (0); 1870 if (dir == 1) 1871 sc->sdo_bw_used += bw; 1872 else 1873 sc->codecs[cad].sdi_bw_used += bw; 1874 1875 /* Allocate stream number */ 1876 if (ss >= sc->num_iss + sc->num_oss) 1877 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1878 else if (ss >= sc->num_iss) 1879 stream = ss - sc->num_iss + 1; 1880 else 1881 stream = ss + 1; 1882 1883 sc->streams[ss].dev = child; 1884 sc->streams[ss].dir = dir; 1885 sc->streams[ss].stream = stream; 1886 sc->streams[ss].bw = bw; 1887 sc->streams[ss].format = format; 1888 sc->streams[ss].stripe = stripe; 1889 if (dmapos != NULL) { 1890 if (sc->pos_dma.dma_vaddr != NULL) 1891 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1892 else 1893 *dmapos = NULL; 1894 } 1895 return (stream); 1896 } 1897 1898 static void 1899 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 1900 { 1901 struct hdac_softc *sc = device_get_softc(dev); 1902 nid_t cad = (uintptr_t)device_get_ivars(child); 1903 int ss; 1904 1905 ss = hdac_find_stream(sc, dir, stream); 1906 KASSERT(ss >= 0, 1907 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 1908 if (dir == 1) 1909 sc->sdo_bw_used -= sc->streams[ss].bw; 1910 else 1911 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 1912 sc->streams[ss].stream = 0; 1913 sc->streams[ss].dev = NULL; 1914 } 1915 1916 static int 1917 hdac_stream_start(device_t dev, device_t child, 1918 int dir, int stream, bus_addr_t buf, int blksz, int blkcnt) 1919 { 1920 struct hdac_softc *sc = device_get_softc(dev); 1921 struct hdac_bdle *bdle; 1922 uint64_t addr; 1923 int i, ss, off; 1924 uint32_t ctl; 1925 1926 ss = hdac_find_stream(sc, dir, stream); 1927 KASSERT(ss >= 0, 1928 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 1929 1930 addr = (uint64_t)buf; 1931 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 1932 for (i = 0; i < blkcnt; i++, bdle++) { 1933 bdle->addrl = htole32((uint32_t)addr); 1934 bdle->addrh = htole32((uint32_t)(addr >> 32)); 1935 bdle->len = htole32(blksz); 1936 bdle->ioc = htole32(1); 1937 addr += blksz; 1938 } 1939 1940 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 1941 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 1942 1943 off = ss << 5; 1944 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 1945 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 1946 addr = sc->streams[ss].bdl.dma_paddr; 1947 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 1948 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 1949 1950 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 1951 if (dir) 1952 ctl |= HDAC_SDCTL2_DIR; 1953 else 1954 ctl &= ~HDAC_SDCTL2_DIR; 1955 ctl &= ~HDAC_SDCTL2_STRM_MASK; 1956 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 1957 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 1958 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 1959 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 1960 1961 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 1962 1963 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1964 ctl |= 1 << ss; 1965 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1966 1967 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 1968 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 1969 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 1970 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 1971 HDAC_SDCTL_RUN; 1972 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 1973 1974 sc->streams[ss].blksz = blksz; 1975 sc->streams[ss].running = 1; 1976 hdac_poll_reinit(sc); 1977 return (0); 1978 } 1979 1980 static void 1981 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 1982 { 1983 struct hdac_softc *sc = device_get_softc(dev); 1984 int ss, off; 1985 uint32_t ctl; 1986 1987 ss = hdac_find_stream(sc, dir, stream); 1988 KASSERT(ss >= 0, 1989 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 1990 1991 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 1992 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 1993 1994 off = ss << 5; 1995 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 1996 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 1997 HDAC_SDCTL_RUN); 1998 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 1999 2000 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2001 ctl &= ~(1 << ss); 2002 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2003 2004 sc->streams[ss].running = 0; 2005 hdac_poll_reinit(sc); 2006 } 2007 2008 static void 2009 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2010 { 2011 struct hdac_softc *sc = device_get_softc(dev); 2012 int timeout = 1000; 2013 int to = timeout; 2014 int ss, off; 2015 uint32_t ctl; 2016 2017 ss = hdac_find_stream(sc, dir, stream); 2018 KASSERT(ss >= 0, 2019 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2020 2021 off = ss << 5; 2022 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2023 ctl |= HDAC_SDCTL_SRST; 2024 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2025 do { 2026 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2027 if (ctl & HDAC_SDCTL_SRST) 2028 break; 2029 DELAY(10); 2030 } while (--to); 2031 if (!(ctl & HDAC_SDCTL_SRST)) 2032 device_printf(dev, "Reset setting timeout\n"); 2033 ctl &= ~HDAC_SDCTL_SRST; 2034 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2035 to = timeout; 2036 do { 2037 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2038 if (!(ctl & HDAC_SDCTL_SRST)) 2039 break; 2040 DELAY(10); 2041 } while (--to); 2042 if (ctl & HDAC_SDCTL_SRST) 2043 device_printf(dev, "Reset timeout!\n"); 2044 } 2045 2046 static uint32_t 2047 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2048 { 2049 struct hdac_softc *sc = device_get_softc(dev); 2050 int ss, off; 2051 2052 ss = hdac_find_stream(sc, dir, stream); 2053 KASSERT(ss >= 0, 2054 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2055 2056 off = ss << 5; 2057 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2058 } 2059 2060 static int 2061 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2062 { 2063 struct hdac_softc *sc = device_get_softc(dev); 2064 2065 sc->unsol_registered++; 2066 hdac_poll_reinit(sc); 2067 return (tag); 2068 } 2069 2070 static void 2071 hdac_unsol_free(device_t dev, device_t child, int tag) 2072 { 2073 struct hdac_softc *sc = device_get_softc(dev); 2074 2075 sc->unsol_registered--; 2076 hdac_poll_reinit(sc); 2077 } 2078 2079 static device_method_t hdac_methods[] = { 2080 /* device interface */ 2081 DEVMETHOD(device_probe, hdac_probe), 2082 DEVMETHOD(device_attach, hdac_attach), 2083 DEVMETHOD(device_detach, hdac_detach), 2084 DEVMETHOD(device_suspend, hdac_suspend), 2085 DEVMETHOD(device_resume, hdac_resume), 2086 /* Bus interface */ 2087 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2088 DEVMETHOD(bus_print_child, hdac_print_child), 2089 DEVMETHOD(bus_child_location_str, hdac_child_location_str), 2090 DEVMETHOD(bus_child_pnpinfo_str, hdac_child_pnpinfo_str_method), 2091 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2092 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2093 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2094 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2095 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2096 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2097 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2098 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2099 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2100 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2101 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2102 DEVMETHOD_END 2103 }; 2104 2105 static driver_t hdac_driver = { 2106 "hdac", 2107 hdac_methods, 2108 sizeof(struct hdac_softc), 2109 }; 2110 2111 static devclass_t hdac_devclass; 2112 2113 DRIVER_MODULE(snd_hda, pci, hdac_driver, hdac_devclass, NULL, NULL); 2114