1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 SND_DECLARE_FILE("$FreeBSD$"); 55 56 #define hdac_lock(sc) snd_mtxlock((sc)->lock) 57 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) 58 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) 59 60 #define HDAC_QUIRK_64BIT (1 << 0) 61 #define HDAC_QUIRK_DMAPOS (1 << 1) 62 #define HDAC_QUIRK_MSI (1 << 2) 63 64 static const struct { 65 const char *key; 66 uint32_t value; 67 } hdac_quirks_tab[] = { 68 { "64bit", HDAC_QUIRK_64BIT }, 69 { "dmapos", HDAC_QUIRK_DMAPOS }, 70 { "msi", HDAC_QUIRK_MSI }, 71 }; 72 73 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 74 75 static const struct { 76 uint32_t model; 77 const char *desc; 78 char quirks_on; 79 char quirks_off; 80 } hdac_devices[] = { 81 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 82 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 83 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 84 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 85 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 87 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 88 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 89 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 90 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, 91 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 92 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 93 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 94 { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, 95 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 96 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 97 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 98 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 99 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 100 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 101 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 102 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 103 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 104 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 105 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 106 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 107 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 108 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, 109 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 110 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 111 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 112 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 113 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, 114 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 115 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 116 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 117 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 118 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 119 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 120 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, 121 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 122 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 123 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 124 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 125 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, 126 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, 127 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, 128 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 129 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 130 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 131 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 132 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 133 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 134 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 135 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 136 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 137 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 138 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 139 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 140 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 141 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 142 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 143 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 144 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 145 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 146 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 147 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 148 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 149 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 150 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 151 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 152 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 153 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 154 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 155 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 156 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 157 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 158 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 159 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 160 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 161 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 162 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 163 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 164 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 165 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 166 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 167 { HDA_ATI_R600, "ATI R600", 0, 0 }, 168 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 169 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 170 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 171 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 172 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 173 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 174 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 175 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 176 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 177 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 178 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 179 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 180 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 181 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 182 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 183 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 184 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 185 { HDA_AMD_X370, "AMD X370", 0, 0 }, 186 { HDA_AMD_X570, "AMD X570", 0, 0 }, 187 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, 188 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, 189 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 190 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 191 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 192 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 193 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 194 /* Unknown */ 195 { HDA_INTEL_ALL, "Intel", 0, 0 }, 196 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 197 { HDA_ATI_ALL, "ATI", 0, 0 }, 198 { HDA_AMD_ALL, "AMD", 0, 0 }, 199 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 200 { HDA_VIA_ALL, "VIA", 0, 0 }, 201 { HDA_SIS_ALL, "SiS", 0, 0 }, 202 { HDA_ULI_ALL, "ULI", 0, 0 }, 203 }; 204 205 static const struct { 206 uint16_t vendor; 207 uint8_t reg; 208 uint8_t mask; 209 uint8_t enable; 210 } hdac_pcie_snoop[] = { 211 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 212 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 213 { AMD_VENDORID, 0x42, 0xf8, 0x02 }, 214 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 215 }; 216 217 /**************************************************************************** 218 * Function prototypes 219 ****************************************************************************/ 220 static void hdac_intr_handler(void *); 221 static int hdac_reset(struct hdac_softc *, bool); 222 static int hdac_get_capabilities(struct hdac_softc *); 223 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 224 static int hdac_dma_alloc(struct hdac_softc *, 225 struct hdac_dma *, bus_size_t); 226 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 227 static int hdac_mem_alloc(struct hdac_softc *); 228 static void hdac_mem_free(struct hdac_softc *); 229 static int hdac_irq_alloc(struct hdac_softc *); 230 static void hdac_irq_free(struct hdac_softc *); 231 static void hdac_corb_init(struct hdac_softc *); 232 static void hdac_rirb_init(struct hdac_softc *); 233 static void hdac_corb_start(struct hdac_softc *); 234 static void hdac_rirb_start(struct hdac_softc *); 235 236 static void hdac_attach2(void *); 237 238 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 239 240 static int hdac_probe(device_t); 241 static int hdac_attach(device_t); 242 static int hdac_detach(device_t); 243 static int hdac_suspend(device_t); 244 static int hdac_resume(device_t); 245 246 static int hdac_rirb_flush(struct hdac_softc *sc); 247 static int hdac_unsolq_flush(struct hdac_softc *sc); 248 249 /* This function surely going to make its way into upper level someday. */ 250 static void 251 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 252 { 253 const char *res = NULL; 254 int i = 0, j, k, len, inv; 255 256 if (resource_string_value(device_get_name(sc->dev), 257 device_get_unit(sc->dev), "config", &res) != 0) 258 return; 259 if (!(res != NULL && strlen(res) > 0)) 260 return; 261 HDA_BOOTVERBOSE( 262 device_printf(sc->dev, "Config options:"); 263 ); 264 for (;;) { 265 while (res[i] != '\0' && 266 (res[i] == ',' || isspace(res[i]) != 0)) 267 i++; 268 if (res[i] == '\0') { 269 HDA_BOOTVERBOSE( 270 printf("\n"); 271 ); 272 return; 273 } 274 j = i; 275 while (res[j] != '\0' && 276 !(res[j] == ',' || isspace(res[j]) != 0)) 277 j++; 278 len = j - i; 279 if (len > 2 && strncmp(res + i, "no", 2) == 0) 280 inv = 2; 281 else 282 inv = 0; 283 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 284 if (strncmp(res + i + inv, 285 hdac_quirks_tab[k].key, len - inv) != 0) 286 continue; 287 if (len - inv != strlen(hdac_quirks_tab[k].key)) 288 continue; 289 HDA_BOOTVERBOSE( 290 printf(" %s%s", (inv != 0) ? "no" : "", 291 hdac_quirks_tab[k].key); 292 ); 293 if (inv == 0) { 294 *on |= hdac_quirks_tab[k].value; 295 *off &= ~hdac_quirks_tab[k].value; 296 } else if (inv != 0) { 297 *off |= hdac_quirks_tab[k].value; 298 *on &= ~hdac_quirks_tab[k].value; 299 } 300 break; 301 } 302 i = j; 303 } 304 } 305 306 static void 307 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) 308 { 309 device_t dev; 310 uint8_t rirbsts; 311 int i; 312 313 /* Was this a controller interrupt? */ 314 if (intsts & HDAC_INTSTS_CIS) { 315 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 316 /* Get as many responses that we can */ 317 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 318 HDAC_WRITE_1(&sc->mem, 319 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 320 hdac_rirb_flush(sc); 321 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 322 } 323 if (sc->unsolq_rp != sc->unsolq_wp) 324 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 325 } 326 327 if (intsts & HDAC_INTSTS_SIS_MASK) { 328 for (i = 0; i < sc->num_ss; i++) { 329 if ((intsts & (1 << i)) == 0) 330 continue; 331 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 332 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 333 if ((dev = sc->streams[i].dev) != NULL) { 334 HDAC_STREAM_INTR(dev, 335 sc->streams[i].dir, sc->streams[i].stream); 336 } 337 } 338 } 339 } 340 341 /**************************************************************************** 342 * void hdac_intr_handler(void *) 343 * 344 * Interrupt handler. Processes interrupts received from the hdac. 345 ****************************************************************************/ 346 static void 347 hdac_intr_handler(void *context) 348 { 349 struct hdac_softc *sc; 350 uint32_t intsts; 351 352 sc = (struct hdac_softc *)context; 353 354 /* 355 * Loop until HDAC_INTSTS_GIS gets clear. 356 * It is plausible that hardware interrupts a host only when GIS goes 357 * from zero to one. GIS is formed by OR-ing multiple hardware 358 * statuses, so it's possible that a previously cleared status gets set 359 * again while another status has not been cleared yet. Thus, there 360 * will be no new interrupt as GIS always stayed set. If we don't 361 * re-examine GIS then we can leave it set and never get an interrupt 362 * again. 363 */ 364 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 365 while ((intsts & HDAC_INTSTS_GIS) != 0) { 366 hdac_lock(sc); 367 hdac_one_intr(sc, intsts); 368 hdac_unlock(sc); 369 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 370 } 371 } 372 373 static void 374 hdac_poll_callback(void *arg) 375 { 376 struct hdac_softc *sc = arg; 377 378 if (sc == NULL) 379 return; 380 381 hdac_lock(sc); 382 if (sc->polling == 0) { 383 hdac_unlock(sc); 384 return; 385 } 386 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); 387 hdac_unlock(sc); 388 389 hdac_intr_handler(sc); 390 } 391 392 /**************************************************************************** 393 * int hdac_reset(hdac_softc *, bool) 394 * 395 * Reset the hdac to a quiescent and known state. 396 ****************************************************************************/ 397 static int 398 hdac_reset(struct hdac_softc *sc, bool wakeup) 399 { 400 uint32_t gctl; 401 int count, i; 402 403 /* 404 * Stop all Streams DMA engine 405 */ 406 for (i = 0; i < sc->num_iss; i++) 407 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 408 for (i = 0; i < sc->num_oss; i++) 409 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 410 for (i = 0; i < sc->num_bss; i++) 411 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 412 413 /* 414 * Stop Control DMA engines. 415 */ 416 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 417 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 418 419 /* 420 * Reset DMA position buffer. 421 */ 422 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 423 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 424 425 /* 426 * Reset the controller. The reset must remain asserted for 427 * a minimum of 100us. 428 */ 429 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 430 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 431 count = 10000; 432 do { 433 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 434 if (!(gctl & HDAC_GCTL_CRST)) 435 break; 436 DELAY(10); 437 } while (--count); 438 if (gctl & HDAC_GCTL_CRST) { 439 device_printf(sc->dev, "Unable to put hdac in reset\n"); 440 return (ENXIO); 441 } 442 443 /* If wakeup is not requested - leave the controller in reset state. */ 444 if (!wakeup) 445 return (0); 446 447 DELAY(100); 448 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 449 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 450 count = 10000; 451 do { 452 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 453 if (gctl & HDAC_GCTL_CRST) 454 break; 455 DELAY(10); 456 } while (--count); 457 if (!(gctl & HDAC_GCTL_CRST)) { 458 device_printf(sc->dev, "Device stuck in reset\n"); 459 return (ENXIO); 460 } 461 462 /* 463 * Wait for codecs to finish their own reset sequence. The delay here 464 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). 465 */ 466 DELAY(1000); 467 468 return (0); 469 } 470 471 /**************************************************************************** 472 * int hdac_get_capabilities(struct hdac_softc *); 473 * 474 * Retreive the general capabilities of the hdac; 475 * Number of Input Streams 476 * Number of Output Streams 477 * Number of bidirectional Streams 478 * 64bit ready 479 * CORB and RIRB sizes 480 ****************************************************************************/ 481 static int 482 hdac_get_capabilities(struct hdac_softc *sc) 483 { 484 uint16_t gcap; 485 uint8_t corbsize, rirbsize; 486 487 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 488 sc->num_iss = HDAC_GCAP_ISS(gcap); 489 sc->num_oss = HDAC_GCAP_OSS(gcap); 490 sc->num_bss = HDAC_GCAP_BSS(gcap); 491 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 492 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 493 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 494 if (sc->quirks_on & HDAC_QUIRK_64BIT) 495 sc->support_64bit = 1; 496 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 497 sc->support_64bit = 0; 498 499 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 500 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 501 HDAC_CORBSIZE_CORBSZCAP_256) 502 sc->corb_size = 256; 503 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 504 HDAC_CORBSIZE_CORBSZCAP_16) 505 sc->corb_size = 16; 506 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 507 HDAC_CORBSIZE_CORBSZCAP_2) 508 sc->corb_size = 2; 509 else { 510 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 511 __func__, corbsize); 512 return (ENXIO); 513 } 514 515 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 516 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 517 HDAC_RIRBSIZE_RIRBSZCAP_256) 518 sc->rirb_size = 256; 519 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 520 HDAC_RIRBSIZE_RIRBSZCAP_16) 521 sc->rirb_size = 16; 522 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 523 HDAC_RIRBSIZE_RIRBSZCAP_2) 524 sc->rirb_size = 2; 525 else { 526 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 527 __func__, rirbsize); 528 return (ENXIO); 529 } 530 531 HDA_BOOTVERBOSE( 532 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 533 "NSDO %d%s, CORB %d, RIRB %d\n", 534 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 535 sc->support_64bit ? ", 64bit" : "", 536 sc->corb_size, sc->rirb_size); 537 ); 538 539 return (0); 540 } 541 542 543 /**************************************************************************** 544 * void hdac_dma_cb 545 * 546 * This function is called by bus_dmamap_load when the mapping has been 547 * established. We just record the physical address of the mapping into 548 * the struct hdac_dma passed in. 549 ****************************************************************************/ 550 static void 551 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 552 { 553 struct hdac_dma *dma; 554 555 if (error == 0) { 556 dma = (struct hdac_dma *)callback_arg; 557 dma->dma_paddr = segs[0].ds_addr; 558 } 559 } 560 561 562 /**************************************************************************** 563 * int hdac_dma_alloc 564 * 565 * This function allocate and setup a dma region (struct hdac_dma). 566 * It must be freed by a corresponding hdac_dma_free. 567 ****************************************************************************/ 568 static int 569 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 570 { 571 bus_size_t roundsz; 572 int result; 573 574 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 575 bzero(dma, sizeof(*dma)); 576 577 /* 578 * Create a DMA tag 579 */ 580 result = bus_dma_tag_create( 581 bus_get_dma_tag(sc->dev), /* parent */ 582 HDA_DMA_ALIGNMENT, /* alignment */ 583 0, /* boundary */ 584 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 585 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 586 BUS_SPACE_MAXADDR, /* highaddr */ 587 NULL, /* filtfunc */ 588 NULL, /* fistfuncarg */ 589 roundsz, /* maxsize */ 590 1, /* nsegments */ 591 roundsz, /* maxsegsz */ 592 0, /* flags */ 593 NULL, /* lockfunc */ 594 NULL, /* lockfuncarg */ 595 &dma->dma_tag); /* dmat */ 596 if (result != 0) { 597 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 598 __func__, result); 599 goto hdac_dma_alloc_fail; 600 } 601 602 /* 603 * Allocate DMA memory 604 */ 605 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 606 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 607 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 608 BUS_DMA_COHERENT), 609 &dma->dma_map); 610 if (result != 0) { 611 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 612 __func__, result); 613 goto hdac_dma_alloc_fail; 614 } 615 616 dma->dma_size = roundsz; 617 618 /* 619 * Map the memory 620 */ 621 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 622 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 623 if (result != 0 || dma->dma_paddr == 0) { 624 if (result == 0) 625 result = ENOMEM; 626 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 627 __func__, result); 628 goto hdac_dma_alloc_fail; 629 } 630 631 HDA_BOOTHVERBOSE( 632 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 633 __func__, (uintmax_t)size, (uintmax_t)roundsz); 634 ); 635 636 return (0); 637 638 hdac_dma_alloc_fail: 639 hdac_dma_free(sc, dma); 640 641 return (result); 642 } 643 644 /**************************************************************************** 645 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 646 * 647 * Free a struct hdac_dma that has been previously allocated via the 648 * hdac_dma_alloc function. 649 ****************************************************************************/ 650 static void 651 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 652 { 653 if (dma->dma_paddr != 0) { 654 /* Flush caches */ 655 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 656 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 657 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 658 dma->dma_paddr = 0; 659 } 660 if (dma->dma_vaddr != NULL) { 661 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 662 dma->dma_vaddr = NULL; 663 } 664 if (dma->dma_tag != NULL) { 665 bus_dma_tag_destroy(dma->dma_tag); 666 dma->dma_tag = NULL; 667 } 668 dma->dma_size = 0; 669 } 670 671 /**************************************************************************** 672 * int hdac_mem_alloc(struct hdac_softc *) 673 * 674 * Allocate all the bus resources necessary to speak with the physical 675 * controller. 676 ****************************************************************************/ 677 static int 678 hdac_mem_alloc(struct hdac_softc *sc) 679 { 680 struct hdac_mem *mem; 681 682 mem = &sc->mem; 683 mem->mem_rid = PCIR_BAR(0); 684 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 685 &mem->mem_rid, RF_ACTIVE); 686 if (mem->mem_res == NULL) { 687 device_printf(sc->dev, 688 "%s: Unable to allocate memory resource\n", __func__); 689 return (ENOMEM); 690 } 691 mem->mem_tag = rman_get_bustag(mem->mem_res); 692 mem->mem_handle = rman_get_bushandle(mem->mem_res); 693 694 return (0); 695 } 696 697 /**************************************************************************** 698 * void hdac_mem_free(struct hdac_softc *) 699 * 700 * Free up resources previously allocated by hdac_mem_alloc. 701 ****************************************************************************/ 702 static void 703 hdac_mem_free(struct hdac_softc *sc) 704 { 705 struct hdac_mem *mem; 706 707 mem = &sc->mem; 708 if (mem->mem_res != NULL) 709 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 710 mem->mem_res); 711 mem->mem_res = NULL; 712 } 713 714 /**************************************************************************** 715 * int hdac_irq_alloc(struct hdac_softc *) 716 * 717 * Allocate and setup the resources necessary for interrupt handling. 718 ****************************************************************************/ 719 static int 720 hdac_irq_alloc(struct hdac_softc *sc) 721 { 722 struct hdac_irq *irq; 723 int result; 724 725 irq = &sc->irq; 726 irq->irq_rid = 0x0; 727 728 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 729 (result = pci_msi_count(sc->dev)) == 1 && 730 pci_alloc_msi(sc->dev, &result) == 0) 731 irq->irq_rid = 0x1; 732 733 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 734 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 735 if (irq->irq_res == NULL) { 736 device_printf(sc->dev, "%s: Unable to allocate irq\n", 737 __func__); 738 goto hdac_irq_alloc_fail; 739 } 740 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 741 NULL, hdac_intr_handler, sc, &irq->irq_handle); 742 if (result != 0) { 743 device_printf(sc->dev, 744 "%s: Unable to setup interrupt handler (%d)\n", 745 __func__, result); 746 goto hdac_irq_alloc_fail; 747 } 748 749 return (0); 750 751 hdac_irq_alloc_fail: 752 hdac_irq_free(sc); 753 754 return (ENXIO); 755 } 756 757 /**************************************************************************** 758 * void hdac_irq_free(struct hdac_softc *) 759 * 760 * Free up resources previously allocated by hdac_irq_alloc. 761 ****************************************************************************/ 762 static void 763 hdac_irq_free(struct hdac_softc *sc) 764 { 765 struct hdac_irq *irq; 766 767 irq = &sc->irq; 768 if (irq->irq_res != NULL && irq->irq_handle != NULL) 769 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 770 if (irq->irq_res != NULL) 771 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 772 irq->irq_res); 773 if (irq->irq_rid == 0x1) 774 pci_release_msi(sc->dev); 775 irq->irq_handle = NULL; 776 irq->irq_res = NULL; 777 irq->irq_rid = 0x0; 778 } 779 780 /**************************************************************************** 781 * void hdac_corb_init(struct hdac_softc *) 782 * 783 * Initialize the corb registers for operations but do not start it up yet. 784 * The CORB engine must not be running when this function is called. 785 ****************************************************************************/ 786 static void 787 hdac_corb_init(struct hdac_softc *sc) 788 { 789 uint8_t corbsize; 790 uint64_t corbpaddr; 791 792 /* Setup the CORB size. */ 793 switch (sc->corb_size) { 794 case 256: 795 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 796 break; 797 case 16: 798 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 799 break; 800 case 2: 801 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 802 break; 803 default: 804 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 805 } 806 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 807 808 /* Setup the CORB Address in the hdac */ 809 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 810 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 811 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 812 813 /* Set the WP and RP */ 814 sc->corb_wp = 0; 815 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 816 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 817 /* 818 * The HDA specification indicates that the CORBRPRST bit will always 819 * read as zero. Unfortunately, it seems that at least the 82801G 820 * doesn't reset the bit to zero, which stalls the corb engine. 821 * manually reset the bit to zero before continuing. 822 */ 823 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 824 825 /* Enable CORB error reporting */ 826 #if 0 827 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 828 #endif 829 } 830 831 /**************************************************************************** 832 * void hdac_rirb_init(struct hdac_softc *) 833 * 834 * Initialize the rirb registers for operations but do not start it up yet. 835 * The RIRB engine must not be running when this function is called. 836 ****************************************************************************/ 837 static void 838 hdac_rirb_init(struct hdac_softc *sc) 839 { 840 uint8_t rirbsize; 841 uint64_t rirbpaddr; 842 843 /* Setup the RIRB size. */ 844 switch (sc->rirb_size) { 845 case 256: 846 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 847 break; 848 case 16: 849 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 850 break; 851 case 2: 852 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 853 break; 854 default: 855 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 856 } 857 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 858 859 /* Setup the RIRB Address in the hdac */ 860 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 861 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 862 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 863 864 /* Setup the WP and RP */ 865 sc->rirb_rp = 0; 866 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 867 868 /* Setup the interrupt threshold */ 869 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 870 871 /* Enable Overrun and response received reporting */ 872 #if 0 873 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 874 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 875 #else 876 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 877 #endif 878 879 /* 880 * Make sure that the Host CPU cache doesn't contain any dirty 881 * cache lines that falls in the rirb. If I understood correctly, it 882 * should be sufficient to do this only once as the rirb is purely 883 * read-only from now on. 884 */ 885 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 886 BUS_DMASYNC_PREREAD); 887 } 888 889 /**************************************************************************** 890 * void hdac_corb_start(hdac_softc *) 891 * 892 * Startup the corb DMA engine 893 ****************************************************************************/ 894 static void 895 hdac_corb_start(struct hdac_softc *sc) 896 { 897 uint32_t corbctl; 898 899 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 900 corbctl |= HDAC_CORBCTL_CORBRUN; 901 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 902 } 903 904 /**************************************************************************** 905 * void hdac_rirb_start(hdac_softc *) 906 * 907 * Startup the rirb DMA engine 908 ****************************************************************************/ 909 static void 910 hdac_rirb_start(struct hdac_softc *sc) 911 { 912 uint32_t rirbctl; 913 914 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 915 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 916 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 917 } 918 919 static int 920 hdac_rirb_flush(struct hdac_softc *sc) 921 { 922 struct hdac_rirb *rirb_base, *rirb; 923 nid_t cad; 924 uint32_t resp, resp_ex; 925 uint8_t rirbwp; 926 int ret; 927 928 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 929 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 930 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 931 BUS_DMASYNC_POSTREAD); 932 933 ret = 0; 934 while (sc->rirb_rp != rirbwp) { 935 sc->rirb_rp++; 936 sc->rirb_rp %= sc->rirb_size; 937 rirb = &rirb_base[sc->rirb_rp]; 938 resp = le32toh(rirb->response); 939 resp_ex = le32toh(rirb->response_ex); 940 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 941 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 942 sc->unsolq[sc->unsolq_wp++] = resp; 943 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 944 sc->unsolq[sc->unsolq_wp++] = cad; 945 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 946 } else if (sc->codecs[cad].pending <= 0) { 947 device_printf(sc->dev, "Unexpected unsolicited " 948 "response from address %d: %08x\n", cad, resp); 949 } else { 950 sc->codecs[cad].response = resp; 951 sc->codecs[cad].pending--; 952 } 953 ret++; 954 } 955 956 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 957 BUS_DMASYNC_PREREAD); 958 return (ret); 959 } 960 961 static int 962 hdac_unsolq_flush(struct hdac_softc *sc) 963 { 964 device_t child; 965 nid_t cad; 966 uint32_t resp; 967 int ret = 0; 968 969 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 970 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 971 while (sc->unsolq_rp != sc->unsolq_wp) { 972 resp = sc->unsolq[sc->unsolq_rp++]; 973 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 974 cad = sc->unsolq[sc->unsolq_rp++]; 975 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 976 if ((child = sc->codecs[cad].dev) != NULL) 977 HDAC_UNSOL_INTR(child, resp); 978 ret++; 979 } 980 sc->unsolq_st = HDAC_UNSOLQ_READY; 981 } 982 983 return (ret); 984 } 985 986 /**************************************************************************** 987 * uint32_t hdac_send_command 988 * 989 * Wrapper function that sends only one command to a given codec 990 ****************************************************************************/ 991 static uint32_t 992 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 993 { 994 int timeout; 995 uint32_t *corb; 996 997 hdac_lockassert(sc); 998 verb &= ~HDA_CMD_CAD_MASK; 999 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 1000 sc->codecs[cad].response = HDA_INVALID; 1001 1002 sc->codecs[cad].pending++; 1003 sc->corb_wp++; 1004 sc->corb_wp %= sc->corb_size; 1005 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 1006 bus_dmamap_sync(sc->corb_dma.dma_tag, 1007 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 1008 corb[sc->corb_wp] = htole32(verb); 1009 bus_dmamap_sync(sc->corb_dma.dma_tag, 1010 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 1011 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 1012 1013 timeout = 10000; 1014 do { 1015 if (hdac_rirb_flush(sc) == 0) 1016 DELAY(10); 1017 } while (sc->codecs[cad].pending != 0 && --timeout); 1018 1019 if (sc->codecs[cad].pending != 0) { 1020 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", 1021 verb, cad); 1022 sc->codecs[cad].pending = 0; 1023 } 1024 1025 if (sc->unsolq_rp != sc->unsolq_wp) 1026 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1027 return (sc->codecs[cad].response); 1028 } 1029 1030 /**************************************************************************** 1031 * Device Methods 1032 ****************************************************************************/ 1033 1034 /**************************************************************************** 1035 * int hdac_probe(device_t) 1036 * 1037 * Probe for the presence of an hdac. If none is found, check for a generic 1038 * match using the subclass of the device. 1039 ****************************************************************************/ 1040 static int 1041 hdac_probe(device_t dev) 1042 { 1043 int i, result; 1044 uint32_t model; 1045 uint16_t class, subclass; 1046 char desc[64]; 1047 1048 model = (uint32_t)pci_get_device(dev) << 16; 1049 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1050 class = pci_get_class(dev); 1051 subclass = pci_get_subclass(dev); 1052 1053 bzero(desc, sizeof(desc)); 1054 result = ENXIO; 1055 for (i = 0; i < nitems(hdac_devices); i++) { 1056 if (hdac_devices[i].model == model) { 1057 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1058 result = BUS_PROBE_DEFAULT; 1059 break; 1060 } 1061 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1062 class == PCIC_MULTIMEDIA && 1063 subclass == PCIS_MULTIMEDIA_HDA) { 1064 snprintf(desc, sizeof(desc), "%s (0x%04x)", 1065 hdac_devices[i].desc, pci_get_device(dev)); 1066 result = BUS_PROBE_GENERIC; 1067 break; 1068 } 1069 } 1070 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1071 subclass == PCIS_MULTIMEDIA_HDA) { 1072 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1073 result = BUS_PROBE_GENERIC; 1074 } 1075 if (result != ENXIO) { 1076 strlcat(desc, " HDA Controller", sizeof(desc)); 1077 device_set_desc_copy(dev, desc); 1078 } 1079 1080 return (result); 1081 } 1082 1083 static void 1084 hdac_unsolq_task(void *context, int pending) 1085 { 1086 struct hdac_softc *sc; 1087 1088 sc = (struct hdac_softc *)context; 1089 1090 hdac_lock(sc); 1091 hdac_unsolq_flush(sc); 1092 hdac_unlock(sc); 1093 } 1094 1095 /**************************************************************************** 1096 * int hdac_attach(device_t) 1097 * 1098 * Attach the device into the kernel. Interrupts usually won't be enabled 1099 * when this function is called. Setup everything that doesn't require 1100 * interrupts and defer probing of codecs until interrupts are enabled. 1101 ****************************************************************************/ 1102 static int 1103 hdac_attach(device_t dev) 1104 { 1105 struct hdac_softc *sc; 1106 int result; 1107 int i, devid = -1; 1108 uint32_t model; 1109 uint16_t class, subclass; 1110 uint16_t vendor; 1111 uint8_t v; 1112 1113 sc = device_get_softc(dev); 1114 HDA_BOOTVERBOSE( 1115 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1116 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1117 device_printf(dev, "HDA Driver Revision: %s\n", 1118 HDA_DRV_TEST_REV); 1119 ); 1120 1121 model = (uint32_t)pci_get_device(dev) << 16; 1122 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1123 class = pci_get_class(dev); 1124 subclass = pci_get_subclass(dev); 1125 1126 for (i = 0; i < nitems(hdac_devices); i++) { 1127 if (hdac_devices[i].model == model) { 1128 devid = i; 1129 break; 1130 } 1131 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1132 class == PCIC_MULTIMEDIA && 1133 subclass == PCIS_MULTIMEDIA_HDA) { 1134 devid = i; 1135 break; 1136 } 1137 } 1138 1139 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); 1140 sc->dev = dev; 1141 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1142 callout_init(&sc->poll_callout, 1); 1143 for (i = 0; i < HDAC_CODEC_MAX; i++) 1144 sc->codecs[i].dev = NULL; 1145 if (devid >= 0) { 1146 sc->quirks_on = hdac_devices[devid].quirks_on; 1147 sc->quirks_off = hdac_devices[devid].quirks_off; 1148 } else { 1149 sc->quirks_on = 0; 1150 sc->quirks_off = 0; 1151 } 1152 if (resource_int_value(device_get_name(dev), 1153 device_get_unit(dev), "msi", &i) == 0) { 1154 if (i == 0) 1155 sc->quirks_off |= HDAC_QUIRK_MSI; 1156 else { 1157 sc->quirks_on |= HDAC_QUIRK_MSI; 1158 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1159 } 1160 } 1161 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1162 HDA_BOOTVERBOSE( 1163 device_printf(sc->dev, 1164 "Config options: on=0x%08x off=0x%08x\n", 1165 sc->quirks_on, sc->quirks_off); 1166 ); 1167 sc->poll_ival = hz; 1168 if (resource_int_value(device_get_name(dev), 1169 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1170 sc->polling = 1; 1171 else 1172 sc->polling = 0; 1173 1174 pci_enable_busmaster(dev); 1175 1176 vendor = pci_get_vendor(dev); 1177 if (vendor == INTEL_VENDORID) { 1178 /* TCSEL -> TC0 */ 1179 v = pci_read_config(dev, 0x44, 1); 1180 pci_write_config(dev, 0x44, v & 0xf8, 1); 1181 HDA_BOOTHVERBOSE( 1182 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1183 pci_read_config(dev, 0x44, 1)); 1184 ); 1185 } 1186 1187 #if defined(__i386__) || defined(__amd64__) 1188 sc->flags |= HDAC_F_DMA_NOCACHE; 1189 1190 if (resource_int_value(device_get_name(dev), 1191 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1192 #else 1193 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1194 #endif 1195 /* 1196 * Try to enable PCIe snoop to avoid messing around with 1197 * uncacheable DMA attribute. Since PCIe snoop register 1198 * config is pretty much vendor specific, there are no 1199 * general solutions on how to enable it, forcing us (even 1200 * Microsoft) to enable uncacheable or write combined DMA 1201 * by default. 1202 * 1203 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1204 */ 1205 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1206 if (hdac_pcie_snoop[i].vendor != vendor) 1207 continue; 1208 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1209 if (hdac_pcie_snoop[i].reg == 0x00) 1210 break; 1211 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1212 if ((v & hdac_pcie_snoop[i].enable) == 1213 hdac_pcie_snoop[i].enable) 1214 break; 1215 v &= hdac_pcie_snoop[i].mask; 1216 v |= hdac_pcie_snoop[i].enable; 1217 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1218 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1219 if ((v & hdac_pcie_snoop[i].enable) != 1220 hdac_pcie_snoop[i].enable) { 1221 HDA_BOOTVERBOSE( 1222 device_printf(dev, 1223 "WARNING: Failed to enable PCIe " 1224 "snoop!\n"); 1225 ); 1226 #if defined(__i386__) || defined(__amd64__) 1227 sc->flags |= HDAC_F_DMA_NOCACHE; 1228 #endif 1229 } 1230 break; 1231 } 1232 #if defined(__i386__) || defined(__amd64__) 1233 } 1234 #endif 1235 1236 HDA_BOOTHVERBOSE( 1237 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1238 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1239 "Uncacheable" : "PCIe snoop", vendor); 1240 ); 1241 1242 /* Allocate resources */ 1243 result = hdac_mem_alloc(sc); 1244 if (result != 0) 1245 goto hdac_attach_fail; 1246 result = hdac_irq_alloc(sc); 1247 if (result != 0) 1248 goto hdac_attach_fail; 1249 1250 /* Get Capabilities */ 1251 result = hdac_get_capabilities(sc); 1252 if (result != 0) 1253 goto hdac_attach_fail; 1254 1255 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1256 result = hdac_dma_alloc(sc, &sc->corb_dma, 1257 sc->corb_size * sizeof(uint32_t)); 1258 if (result != 0) 1259 goto hdac_attach_fail; 1260 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1261 sc->rirb_size * sizeof(struct hdac_rirb)); 1262 if (result != 0) 1263 goto hdac_attach_fail; 1264 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1265 M_HDAC, M_ZERO | M_WAITOK); 1266 for (i = 0; i < sc->num_ss; i++) { 1267 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1268 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1269 if (result != 0) 1270 goto hdac_attach_fail; 1271 } 1272 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1273 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1274 HDA_BOOTVERBOSE( 1275 device_printf(dev, "Failed to " 1276 "allocate DMA pos buffer " 1277 "(non-fatal)\n"); 1278 ); 1279 } else { 1280 uint64_t addr = sc->pos_dma.dma_paddr; 1281 1282 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1283 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1284 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1285 HDAC_DPLBASE_DPLBASE_DMAPBE); 1286 } 1287 } 1288 1289 result = bus_dma_tag_create( 1290 bus_get_dma_tag(sc->dev), /* parent */ 1291 HDA_DMA_ALIGNMENT, /* alignment */ 1292 0, /* boundary */ 1293 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1294 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1295 BUS_SPACE_MAXADDR, /* highaddr */ 1296 NULL, /* filtfunc */ 1297 NULL, /* fistfuncarg */ 1298 HDA_BUFSZ_MAX, /* maxsize */ 1299 1, /* nsegments */ 1300 HDA_BUFSZ_MAX, /* maxsegsz */ 1301 0, /* flags */ 1302 NULL, /* lockfunc */ 1303 NULL, /* lockfuncarg */ 1304 &sc->chan_dmat); /* dmat */ 1305 if (result != 0) { 1306 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1307 __func__, result); 1308 goto hdac_attach_fail; 1309 } 1310 1311 /* Quiesce everything */ 1312 HDA_BOOTHVERBOSE( 1313 device_printf(dev, "Reset controller...\n"); 1314 ); 1315 hdac_reset(sc, true); 1316 1317 /* Initialize the CORB and RIRB */ 1318 hdac_corb_init(sc); 1319 hdac_rirb_init(sc); 1320 1321 /* Defer remaining of initialization until interrupts are enabled */ 1322 sc->intrhook.ich_func = hdac_attach2; 1323 sc->intrhook.ich_arg = (void *)sc; 1324 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1325 sc->intrhook.ich_func = NULL; 1326 hdac_attach2((void *)sc); 1327 } 1328 1329 return (0); 1330 1331 hdac_attach_fail: 1332 hdac_irq_free(sc); 1333 if (sc->streams != NULL) 1334 for (i = 0; i < sc->num_ss; i++) 1335 hdac_dma_free(sc, &sc->streams[i].bdl); 1336 free(sc->streams, M_HDAC); 1337 hdac_dma_free(sc, &sc->rirb_dma); 1338 hdac_dma_free(sc, &sc->corb_dma); 1339 hdac_mem_free(sc); 1340 snd_mtxfree(sc->lock); 1341 1342 return (ENXIO); 1343 } 1344 1345 static int 1346 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1347 { 1348 struct hdac_softc *sc; 1349 device_t *devlist; 1350 device_t dev; 1351 int devcount, i, err, val; 1352 1353 dev = oidp->oid_arg1; 1354 sc = device_get_softc(dev); 1355 if (sc == NULL) 1356 return (EINVAL); 1357 val = 0; 1358 err = sysctl_handle_int(oidp, &val, 0, req); 1359 if (err != 0 || req->newptr == NULL || val == 0) 1360 return (err); 1361 1362 /* XXX: Temporary. For debugging. */ 1363 if (val == 100) { 1364 hdac_suspend(dev); 1365 return (0); 1366 } else if (val == 101) { 1367 hdac_resume(dev); 1368 return (0); 1369 } 1370 1371 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) 1372 return (err); 1373 hdac_lock(sc); 1374 for (i = 0; i < devcount; i++) 1375 HDAC_PINDUMP(devlist[i]); 1376 hdac_unlock(sc); 1377 free(devlist, M_TEMP); 1378 return (0); 1379 } 1380 1381 static int 1382 hdac_mdata_rate(uint16_t fmt) 1383 { 1384 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1385 int rate, bits; 1386 1387 if (fmt & (1 << 14)) 1388 rate = 44100; 1389 else 1390 rate = 48000; 1391 rate *= ((fmt >> 11) & 0x07) + 1; 1392 rate /= ((fmt >> 8) & 0x07) + 1; 1393 bits = mbits[(fmt >> 4) & 0x03]; 1394 bits *= (fmt & 0x0f) + 1; 1395 return (rate * bits); 1396 } 1397 1398 static int 1399 hdac_bdata_rate(uint16_t fmt, int output) 1400 { 1401 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1402 int rate, bits; 1403 1404 rate = 48000; 1405 rate *= ((fmt >> 11) & 0x07) + 1; 1406 bits = bbits[(fmt >> 4) & 0x03]; 1407 bits *= (fmt & 0x0f) + 1; 1408 if (!output) 1409 bits = ((bits + 7) & ~0x07) + 10; 1410 return (rate * bits); 1411 } 1412 1413 static void 1414 hdac_poll_reinit(struct hdac_softc *sc) 1415 { 1416 int i, pollticks, min = 1000000; 1417 struct hdac_stream *s; 1418 1419 if (sc->polling == 0) 1420 return; 1421 if (sc->unsol_registered > 0) 1422 min = hz / 2; 1423 for (i = 0; i < sc->num_ss; i++) { 1424 s = &sc->streams[i]; 1425 if (s->running == 0) 1426 continue; 1427 pollticks = ((uint64_t)hz * s->blksz) / 1428 (hdac_mdata_rate(s->format) / 8); 1429 pollticks >>= 1; 1430 if (pollticks > hz) 1431 pollticks = hz; 1432 if (pollticks < 1) 1433 pollticks = 1; 1434 if (min > pollticks) 1435 min = pollticks; 1436 } 1437 sc->poll_ival = min; 1438 if (min == 1000000) 1439 callout_stop(&sc->poll_callout); 1440 else 1441 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1442 } 1443 1444 static int 1445 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1446 { 1447 struct hdac_softc *sc; 1448 device_t dev; 1449 uint32_t ctl; 1450 int err, val; 1451 1452 dev = oidp->oid_arg1; 1453 sc = device_get_softc(dev); 1454 if (sc == NULL) 1455 return (EINVAL); 1456 hdac_lock(sc); 1457 val = sc->polling; 1458 hdac_unlock(sc); 1459 err = sysctl_handle_int(oidp, &val, 0, req); 1460 1461 if (err != 0 || req->newptr == NULL) 1462 return (err); 1463 if (val < 0 || val > 1) 1464 return (EINVAL); 1465 1466 hdac_lock(sc); 1467 if (val != sc->polling) { 1468 if (val == 0) { 1469 callout_stop(&sc->poll_callout); 1470 hdac_unlock(sc); 1471 callout_drain(&sc->poll_callout); 1472 hdac_lock(sc); 1473 sc->polling = 0; 1474 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1475 ctl |= HDAC_INTCTL_GIE; 1476 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1477 } else { 1478 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1479 ctl &= ~HDAC_INTCTL_GIE; 1480 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1481 sc->polling = 1; 1482 hdac_poll_reinit(sc); 1483 } 1484 } 1485 hdac_unlock(sc); 1486 1487 return (err); 1488 } 1489 1490 static void 1491 hdac_attach2(void *arg) 1492 { 1493 struct hdac_softc *sc; 1494 device_t child; 1495 uint32_t vendorid, revisionid; 1496 int i; 1497 uint16_t statests; 1498 1499 sc = (struct hdac_softc *)arg; 1500 1501 hdac_lock(sc); 1502 1503 /* Remove ourselves from the config hooks */ 1504 if (sc->intrhook.ich_func != NULL) { 1505 config_intrhook_disestablish(&sc->intrhook); 1506 sc->intrhook.ich_func = NULL; 1507 } 1508 1509 HDA_BOOTHVERBOSE( 1510 device_printf(sc->dev, "Starting CORB Engine...\n"); 1511 ); 1512 hdac_corb_start(sc); 1513 HDA_BOOTHVERBOSE( 1514 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1515 ); 1516 hdac_rirb_start(sc); 1517 HDA_BOOTHVERBOSE( 1518 device_printf(sc->dev, 1519 "Enabling controller interrupt...\n"); 1520 ); 1521 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1522 HDAC_GCTL_UNSOL); 1523 if (sc->polling == 0) { 1524 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1525 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1526 } 1527 DELAY(1000); 1528 1529 HDA_BOOTHVERBOSE( 1530 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1531 ); 1532 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1533 hdac_unlock(sc); 1534 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1535 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1536 HDA_BOOTHVERBOSE( 1537 device_printf(sc->dev, 1538 "Found CODEC at address %d\n", i); 1539 ); 1540 hdac_lock(sc); 1541 vendorid = hdac_send_command(sc, i, 1542 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1543 revisionid = hdac_send_command(sc, i, 1544 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1545 hdac_unlock(sc); 1546 if (vendorid == HDA_INVALID && 1547 revisionid == HDA_INVALID) { 1548 device_printf(sc->dev, 1549 "CODEC at address %d not responding!\n", i); 1550 continue; 1551 } 1552 sc->codecs[i].vendor_id = 1553 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1554 sc->codecs[i].device_id = 1555 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1556 sc->codecs[i].revision_id = 1557 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1558 sc->codecs[i].stepping_id = 1559 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1560 child = device_add_child(sc->dev, "hdacc", -1); 1561 if (child == NULL) { 1562 device_printf(sc->dev, 1563 "Failed to add CODEC device\n"); 1564 continue; 1565 } 1566 device_set_ivars(child, (void *)(intptr_t)i); 1567 sc->codecs[i].dev = child; 1568 } 1569 } 1570 bus_generic_attach(sc->dev); 1571 1572 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1573 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1574 "pindump", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev, 1575 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); 1576 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1577 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1578 "polling", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev, 1579 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); 1580 } 1581 1582 /**************************************************************************** 1583 * int hdac_suspend(device_t) 1584 * 1585 * Suspend and power down HDA bus and codecs. 1586 ****************************************************************************/ 1587 static int 1588 hdac_suspend(device_t dev) 1589 { 1590 struct hdac_softc *sc = device_get_softc(dev); 1591 1592 HDA_BOOTHVERBOSE( 1593 device_printf(dev, "Suspend...\n"); 1594 ); 1595 bus_generic_suspend(dev); 1596 1597 hdac_lock(sc); 1598 HDA_BOOTHVERBOSE( 1599 device_printf(dev, "Reset controller...\n"); 1600 ); 1601 callout_stop(&sc->poll_callout); 1602 hdac_reset(sc, false); 1603 hdac_unlock(sc); 1604 callout_drain(&sc->poll_callout); 1605 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1606 HDA_BOOTHVERBOSE( 1607 device_printf(dev, "Suspend done\n"); 1608 ); 1609 return (0); 1610 } 1611 1612 /**************************************************************************** 1613 * int hdac_resume(device_t) 1614 * 1615 * Powerup and restore HDA bus and codecs state. 1616 ****************************************************************************/ 1617 static int 1618 hdac_resume(device_t dev) 1619 { 1620 struct hdac_softc *sc = device_get_softc(dev); 1621 int error; 1622 1623 HDA_BOOTHVERBOSE( 1624 device_printf(dev, "Resume...\n"); 1625 ); 1626 hdac_lock(sc); 1627 1628 /* Quiesce everything */ 1629 HDA_BOOTHVERBOSE( 1630 device_printf(dev, "Reset controller...\n"); 1631 ); 1632 hdac_reset(sc, true); 1633 1634 /* Initialize the CORB and RIRB */ 1635 hdac_corb_init(sc); 1636 hdac_rirb_init(sc); 1637 1638 HDA_BOOTHVERBOSE( 1639 device_printf(dev, "Starting CORB Engine...\n"); 1640 ); 1641 hdac_corb_start(sc); 1642 HDA_BOOTHVERBOSE( 1643 device_printf(dev, "Starting RIRB Engine...\n"); 1644 ); 1645 hdac_rirb_start(sc); 1646 HDA_BOOTHVERBOSE( 1647 device_printf(dev, "Enabling controller interrupt...\n"); 1648 ); 1649 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1650 HDAC_GCTL_UNSOL); 1651 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1652 DELAY(1000); 1653 hdac_poll_reinit(sc); 1654 hdac_unlock(sc); 1655 1656 error = bus_generic_resume(dev); 1657 HDA_BOOTHVERBOSE( 1658 device_printf(dev, "Resume done\n"); 1659 ); 1660 return (error); 1661 } 1662 1663 /**************************************************************************** 1664 * int hdac_detach(device_t) 1665 * 1666 * Detach and free up resources utilized by the hdac device. 1667 ****************************************************************************/ 1668 static int 1669 hdac_detach(device_t dev) 1670 { 1671 struct hdac_softc *sc = device_get_softc(dev); 1672 device_t *devlist; 1673 int cad, i, devcount, error; 1674 1675 if ((error = device_get_children(dev, &devlist, &devcount)) != 0) 1676 return (error); 1677 for (i = 0; i < devcount; i++) { 1678 cad = (intptr_t)device_get_ivars(devlist[i]); 1679 if ((error = device_delete_child(dev, devlist[i])) != 0) { 1680 free(devlist, M_TEMP); 1681 return (error); 1682 } 1683 sc->codecs[cad].dev = NULL; 1684 } 1685 free(devlist, M_TEMP); 1686 1687 hdac_lock(sc); 1688 hdac_reset(sc, false); 1689 hdac_unlock(sc); 1690 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1691 hdac_irq_free(sc); 1692 1693 for (i = 0; i < sc->num_ss; i++) 1694 hdac_dma_free(sc, &sc->streams[i].bdl); 1695 free(sc->streams, M_HDAC); 1696 hdac_dma_free(sc, &sc->pos_dma); 1697 hdac_dma_free(sc, &sc->rirb_dma); 1698 hdac_dma_free(sc, &sc->corb_dma); 1699 if (sc->chan_dmat != NULL) { 1700 bus_dma_tag_destroy(sc->chan_dmat); 1701 sc->chan_dmat = NULL; 1702 } 1703 hdac_mem_free(sc); 1704 snd_mtxfree(sc->lock); 1705 return (0); 1706 } 1707 1708 static bus_dma_tag_t 1709 hdac_get_dma_tag(device_t dev, device_t child) 1710 { 1711 struct hdac_softc *sc = device_get_softc(dev); 1712 1713 return (sc->chan_dmat); 1714 } 1715 1716 static int 1717 hdac_print_child(device_t dev, device_t child) 1718 { 1719 int retval; 1720 1721 retval = bus_print_child_header(dev, child); 1722 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); 1723 retval += bus_print_child_footer(dev, child); 1724 1725 return (retval); 1726 } 1727 1728 static int 1729 hdac_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) 1730 { 1731 1732 snprintf(buf, buflen, "cad=%d", (int)(intptr_t)device_get_ivars(child)); 1733 return (0); 1734 } 1735 1736 static int 1737 hdac_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, 1738 size_t buflen) 1739 { 1740 struct hdac_softc *sc = device_get_softc(dev); 1741 nid_t cad = (uintptr_t)device_get_ivars(child); 1742 1743 snprintf(buf, buflen, 1744 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", 1745 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1746 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1747 return (0); 1748 } 1749 1750 static int 1751 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1752 { 1753 struct hdac_softc *sc = device_get_softc(dev); 1754 nid_t cad = (uintptr_t)device_get_ivars(child); 1755 1756 switch (which) { 1757 case HDA_IVAR_CODEC_ID: 1758 *result = cad; 1759 break; 1760 case HDA_IVAR_VENDOR_ID: 1761 *result = sc->codecs[cad].vendor_id; 1762 break; 1763 case HDA_IVAR_DEVICE_ID: 1764 *result = sc->codecs[cad].device_id; 1765 break; 1766 case HDA_IVAR_REVISION_ID: 1767 *result = sc->codecs[cad].revision_id; 1768 break; 1769 case HDA_IVAR_STEPPING_ID: 1770 *result = sc->codecs[cad].stepping_id; 1771 break; 1772 case HDA_IVAR_SUBVENDOR_ID: 1773 *result = pci_get_subvendor(dev); 1774 break; 1775 case HDA_IVAR_SUBDEVICE_ID: 1776 *result = pci_get_subdevice(dev); 1777 break; 1778 case HDA_IVAR_DMA_NOCACHE: 1779 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1780 break; 1781 case HDA_IVAR_STRIPES_MASK: 1782 *result = (1 << (1 << sc->num_sdo)) - 1; 1783 break; 1784 default: 1785 return (ENOENT); 1786 } 1787 return (0); 1788 } 1789 1790 static struct mtx * 1791 hdac_get_mtx(device_t dev, device_t child) 1792 { 1793 struct hdac_softc *sc = device_get_softc(dev); 1794 1795 return (sc->lock); 1796 } 1797 1798 static uint32_t 1799 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1800 { 1801 1802 return (hdac_send_command(device_get_softc(dev), 1803 (intptr_t)device_get_ivars(child), verb)); 1804 } 1805 1806 static int 1807 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1808 { 1809 int i, ss; 1810 1811 ss = -1; 1812 /* Allocate ISS/OSS first. */ 1813 if (dir == 0) { 1814 for (i = 0; i < sc->num_iss; i++) { 1815 if (sc->streams[i].stream == stream) { 1816 ss = i; 1817 break; 1818 } 1819 } 1820 } else { 1821 for (i = 0; i < sc->num_oss; i++) { 1822 if (sc->streams[i + sc->num_iss].stream == stream) { 1823 ss = i + sc->num_iss; 1824 break; 1825 } 1826 } 1827 } 1828 /* Fallback to BSS. */ 1829 if (ss == -1) { 1830 for (i = 0; i < sc->num_bss; i++) { 1831 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1832 == stream) { 1833 ss = i + sc->num_iss + sc->num_oss; 1834 break; 1835 } 1836 } 1837 } 1838 return (ss); 1839 } 1840 1841 static int 1842 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1843 uint32_t **dmapos) 1844 { 1845 struct hdac_softc *sc = device_get_softc(dev); 1846 nid_t cad = (uintptr_t)device_get_ivars(child); 1847 int stream, ss, bw, maxbw, prevbw; 1848 1849 /* Look for empty stream. */ 1850 ss = hdac_find_stream(sc, dir, 0); 1851 1852 /* Return if found nothing. */ 1853 if (ss < 0) 1854 return (0); 1855 1856 /* Check bus bandwidth. */ 1857 bw = hdac_bdata_rate(format, dir); 1858 if (dir == 1) { 1859 bw *= 1 << (sc->num_sdo - stripe); 1860 prevbw = sc->sdo_bw_used; 1861 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1862 } else { 1863 prevbw = sc->codecs[cad].sdi_bw_used; 1864 maxbw = 48000 * 464; 1865 } 1866 HDA_BOOTHVERBOSE( 1867 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1868 (bw + prevbw) / 1000, maxbw / 1000, 1869 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1870 ); 1871 if (bw + prevbw > maxbw) 1872 return (0); 1873 if (dir == 1) 1874 sc->sdo_bw_used += bw; 1875 else 1876 sc->codecs[cad].sdi_bw_used += bw; 1877 1878 /* Allocate stream number */ 1879 if (ss >= sc->num_iss + sc->num_oss) 1880 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1881 else if (ss >= sc->num_iss) 1882 stream = ss - sc->num_iss + 1; 1883 else 1884 stream = ss + 1; 1885 1886 sc->streams[ss].dev = child; 1887 sc->streams[ss].dir = dir; 1888 sc->streams[ss].stream = stream; 1889 sc->streams[ss].bw = bw; 1890 sc->streams[ss].format = format; 1891 sc->streams[ss].stripe = stripe; 1892 if (dmapos != NULL) { 1893 if (sc->pos_dma.dma_vaddr != NULL) 1894 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1895 else 1896 *dmapos = NULL; 1897 } 1898 return (stream); 1899 } 1900 1901 static void 1902 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 1903 { 1904 struct hdac_softc *sc = device_get_softc(dev); 1905 nid_t cad = (uintptr_t)device_get_ivars(child); 1906 int ss; 1907 1908 ss = hdac_find_stream(sc, dir, stream); 1909 KASSERT(ss >= 0, 1910 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 1911 if (dir == 1) 1912 sc->sdo_bw_used -= sc->streams[ss].bw; 1913 else 1914 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 1915 sc->streams[ss].stream = 0; 1916 sc->streams[ss].dev = NULL; 1917 } 1918 1919 static int 1920 hdac_stream_start(device_t dev, device_t child, int dir, int stream, 1921 bus_addr_t buf, int blksz, int blkcnt) 1922 { 1923 struct hdac_softc *sc = device_get_softc(dev); 1924 struct hdac_bdle *bdle; 1925 uint64_t addr; 1926 int i, ss, off; 1927 uint32_t ctl; 1928 1929 ss = hdac_find_stream(sc, dir, stream); 1930 KASSERT(ss >= 0, 1931 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 1932 1933 addr = (uint64_t)buf; 1934 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 1935 for (i = 0; i < blkcnt; i++, bdle++) { 1936 bdle->addrl = htole32((uint32_t)addr); 1937 bdle->addrh = htole32((uint32_t)(addr >> 32)); 1938 bdle->len = htole32(blksz); 1939 bdle->ioc = htole32(1); 1940 addr += blksz; 1941 } 1942 1943 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 1944 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 1945 1946 off = ss << 5; 1947 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 1948 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 1949 addr = sc->streams[ss].bdl.dma_paddr; 1950 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 1951 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 1952 1953 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 1954 if (dir) 1955 ctl |= HDAC_SDCTL2_DIR; 1956 else 1957 ctl &= ~HDAC_SDCTL2_DIR; 1958 ctl &= ~HDAC_SDCTL2_STRM_MASK; 1959 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 1960 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 1961 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 1962 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 1963 1964 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 1965 1966 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1967 ctl |= 1 << ss; 1968 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1969 1970 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 1971 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 1972 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 1973 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 1974 HDAC_SDCTL_RUN; 1975 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 1976 1977 sc->streams[ss].blksz = blksz; 1978 sc->streams[ss].running = 1; 1979 hdac_poll_reinit(sc); 1980 return (0); 1981 } 1982 1983 static void 1984 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 1985 { 1986 struct hdac_softc *sc = device_get_softc(dev); 1987 int ss, off; 1988 uint32_t ctl; 1989 1990 ss = hdac_find_stream(sc, dir, stream); 1991 KASSERT(ss >= 0, 1992 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 1993 1994 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 1995 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 1996 1997 off = ss << 5; 1998 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 1999 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2000 HDAC_SDCTL_RUN); 2001 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2002 2003 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2004 ctl &= ~(1 << ss); 2005 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2006 2007 sc->streams[ss].running = 0; 2008 hdac_poll_reinit(sc); 2009 } 2010 2011 static void 2012 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2013 { 2014 struct hdac_softc *sc = device_get_softc(dev); 2015 int timeout = 1000; 2016 int to = timeout; 2017 int ss, off; 2018 uint32_t ctl; 2019 2020 ss = hdac_find_stream(sc, dir, stream); 2021 KASSERT(ss >= 0, 2022 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2023 2024 off = ss << 5; 2025 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2026 ctl |= HDAC_SDCTL_SRST; 2027 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2028 do { 2029 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2030 if (ctl & HDAC_SDCTL_SRST) 2031 break; 2032 DELAY(10); 2033 } while (--to); 2034 if (!(ctl & HDAC_SDCTL_SRST)) 2035 device_printf(dev, "Reset setting timeout\n"); 2036 ctl &= ~HDAC_SDCTL_SRST; 2037 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2038 to = timeout; 2039 do { 2040 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2041 if (!(ctl & HDAC_SDCTL_SRST)) 2042 break; 2043 DELAY(10); 2044 } while (--to); 2045 if (ctl & HDAC_SDCTL_SRST) 2046 device_printf(dev, "Reset timeout!\n"); 2047 } 2048 2049 static uint32_t 2050 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2051 { 2052 struct hdac_softc *sc = device_get_softc(dev); 2053 int ss, off; 2054 2055 ss = hdac_find_stream(sc, dir, stream); 2056 KASSERT(ss >= 0, 2057 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2058 2059 off = ss << 5; 2060 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2061 } 2062 2063 static int 2064 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2065 { 2066 struct hdac_softc *sc = device_get_softc(dev); 2067 2068 sc->unsol_registered++; 2069 hdac_poll_reinit(sc); 2070 return (tag); 2071 } 2072 2073 static void 2074 hdac_unsol_free(device_t dev, device_t child, int tag) 2075 { 2076 struct hdac_softc *sc = device_get_softc(dev); 2077 2078 sc->unsol_registered--; 2079 hdac_poll_reinit(sc); 2080 } 2081 2082 static device_method_t hdac_methods[] = { 2083 /* device interface */ 2084 DEVMETHOD(device_probe, hdac_probe), 2085 DEVMETHOD(device_attach, hdac_attach), 2086 DEVMETHOD(device_detach, hdac_detach), 2087 DEVMETHOD(device_suspend, hdac_suspend), 2088 DEVMETHOD(device_resume, hdac_resume), 2089 /* Bus interface */ 2090 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2091 DEVMETHOD(bus_print_child, hdac_print_child), 2092 DEVMETHOD(bus_child_location_str, hdac_child_location_str), 2093 DEVMETHOD(bus_child_pnpinfo_str, hdac_child_pnpinfo_str_method), 2094 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2095 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2096 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2097 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2098 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2099 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2100 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2101 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2102 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2103 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2104 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2105 DEVMETHOD_END 2106 }; 2107 2108 static driver_t hdac_driver = { 2109 "hdac", 2110 hdac_methods, 2111 sizeof(struct hdac_softc), 2112 }; 2113 2114 static devclass_t hdac_devclass; 2115 2116 DRIVER_MODULE(snd_hda, pci, hdac_driver, hdac_devclass, NULL, NULL); 2117