1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 SND_DECLARE_FILE("$FreeBSD$"); 55 56 #define hdac_lock(sc) snd_mtxlock((sc)->lock) 57 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) 58 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) 59 60 #define HDAC_QUIRK_64BIT (1 << 0) 61 #define HDAC_QUIRK_DMAPOS (1 << 1) 62 #define HDAC_QUIRK_MSI (1 << 2) 63 64 static const struct { 65 const char *key; 66 uint32_t value; 67 } hdac_quirks_tab[] = { 68 { "64bit", HDAC_QUIRK_64BIT }, 69 { "dmapos", HDAC_QUIRK_DMAPOS }, 70 { "msi", HDAC_QUIRK_MSI }, 71 }; 72 73 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 74 75 static const struct { 76 uint32_t model; 77 const char *desc; 78 char quirks_on; 79 char quirks_off; 80 } hdac_devices[] = { 81 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 82 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 83 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 84 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 85 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 87 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 88 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 89 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 90 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, 91 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 92 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 93 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 94 { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, 95 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 96 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 97 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 98 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 99 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 100 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 101 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 102 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 103 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 104 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 105 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 106 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 107 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 108 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, 109 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 110 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 111 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 112 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 113 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, 114 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 115 { HDA_INTEL_ALLK, "Intel Alder Lake", 0, 0 }, 116 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 117 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 118 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 119 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 120 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 121 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, 122 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 123 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 124 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 125 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 126 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, 127 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, 128 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, 129 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 130 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 131 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 132 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 133 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 134 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 135 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 136 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 137 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 138 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 139 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 140 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 141 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 142 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 143 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 144 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 145 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 146 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 147 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 148 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 149 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 150 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 151 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 152 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 153 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 154 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 155 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 156 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 157 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 158 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 159 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 160 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 161 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 162 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 163 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 164 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 165 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 166 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 167 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 168 { HDA_ATI_RS880, "ATI RS880", 0, 0 }, 169 { HDA_ATI_R600, "ATI R600", 0, 0 }, 170 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 171 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 172 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 173 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 174 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 175 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 176 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 177 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 178 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 179 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 180 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 181 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 182 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 183 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 184 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 185 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 186 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 187 { HDA_ATI_KABINI, "ATI Kabini", 0, 0 }, 188 { HDA_ATI_TRINITY, "ATI Trinity", 0, 0 }, 189 { HDA_AMD_X370, "AMD X370", 0, 0 }, 190 { HDA_AMD_X570, "AMD X570", 0, 0 }, 191 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, 192 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, 193 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 194 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 195 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 196 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 197 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 198 /* Unknown */ 199 { HDA_INTEL_ALL, "Intel", 0, 0 }, 200 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 201 { HDA_ATI_ALL, "ATI", 0, 0 }, 202 { HDA_AMD_ALL, "AMD", 0, 0 }, 203 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 204 { HDA_VIA_ALL, "VIA", 0, 0 }, 205 { HDA_SIS_ALL, "SiS", 0, 0 }, 206 { HDA_ULI_ALL, "ULI", 0, 0 }, 207 }; 208 209 static const struct { 210 uint16_t vendor; 211 uint8_t reg; 212 uint8_t mask; 213 uint8_t enable; 214 } hdac_pcie_snoop[] = { 215 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 216 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 217 { AMD_VENDORID, 0x42, 0xf8, 0x02 }, 218 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 219 }; 220 221 /**************************************************************************** 222 * Function prototypes 223 ****************************************************************************/ 224 static void hdac_intr_handler(void *); 225 static int hdac_reset(struct hdac_softc *, bool); 226 static int hdac_get_capabilities(struct hdac_softc *); 227 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 228 static int hdac_dma_alloc(struct hdac_softc *, 229 struct hdac_dma *, bus_size_t); 230 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 231 static int hdac_mem_alloc(struct hdac_softc *); 232 static void hdac_mem_free(struct hdac_softc *); 233 static int hdac_irq_alloc(struct hdac_softc *); 234 static void hdac_irq_free(struct hdac_softc *); 235 static void hdac_corb_init(struct hdac_softc *); 236 static void hdac_rirb_init(struct hdac_softc *); 237 static void hdac_corb_start(struct hdac_softc *); 238 static void hdac_rirb_start(struct hdac_softc *); 239 240 static void hdac_attach2(void *); 241 242 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 243 244 static int hdac_probe(device_t); 245 static int hdac_attach(device_t); 246 static int hdac_detach(device_t); 247 static int hdac_suspend(device_t); 248 static int hdac_resume(device_t); 249 250 static int hdac_rirb_flush(struct hdac_softc *sc); 251 static int hdac_unsolq_flush(struct hdac_softc *sc); 252 253 /* This function surely going to make its way into upper level someday. */ 254 static void 255 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 256 { 257 const char *res = NULL; 258 int i = 0, j, k, len, inv; 259 260 if (resource_string_value(device_get_name(sc->dev), 261 device_get_unit(sc->dev), "config", &res) != 0) 262 return; 263 if (!(res != NULL && strlen(res) > 0)) 264 return; 265 HDA_BOOTVERBOSE( 266 device_printf(sc->dev, "Config options:"); 267 ); 268 for (;;) { 269 while (res[i] != '\0' && 270 (res[i] == ',' || isspace(res[i]) != 0)) 271 i++; 272 if (res[i] == '\0') { 273 HDA_BOOTVERBOSE( 274 printf("\n"); 275 ); 276 return; 277 } 278 j = i; 279 while (res[j] != '\0' && 280 !(res[j] == ',' || isspace(res[j]) != 0)) 281 j++; 282 len = j - i; 283 if (len > 2 && strncmp(res + i, "no", 2) == 0) 284 inv = 2; 285 else 286 inv = 0; 287 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 288 if (strncmp(res + i + inv, 289 hdac_quirks_tab[k].key, len - inv) != 0) 290 continue; 291 if (len - inv != strlen(hdac_quirks_tab[k].key)) 292 continue; 293 HDA_BOOTVERBOSE( 294 printf(" %s%s", (inv != 0) ? "no" : "", 295 hdac_quirks_tab[k].key); 296 ); 297 if (inv == 0) { 298 *on |= hdac_quirks_tab[k].value; 299 *off &= ~hdac_quirks_tab[k].value; 300 } else if (inv != 0) { 301 *off |= hdac_quirks_tab[k].value; 302 *on &= ~hdac_quirks_tab[k].value; 303 } 304 break; 305 } 306 i = j; 307 } 308 } 309 310 static void 311 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) 312 { 313 device_t dev; 314 uint8_t rirbsts; 315 int i; 316 317 /* Was this a controller interrupt? */ 318 if (intsts & HDAC_INTSTS_CIS) { 319 /* 320 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then 321 * we will need to check and clear HDAC_STATESTS. 322 * That event is used to report codec status changes such as 323 * a reset or a wake-up event. 324 */ 325 /* 326 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we 327 * will need to check and clear HDAC_CORBSTS_CMEI in 328 * HDAC_CORBSTS. 329 * That event is used to report CORB memory errors. 330 */ 331 /* 332 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we 333 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in 334 * HDAC_RIRBSTS. 335 * That event is used to report response FIFO overruns. 336 */ 337 338 /* Get as many responses that we can */ 339 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 340 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 341 HDAC_WRITE_1(&sc->mem, 342 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 343 hdac_rirb_flush(sc); 344 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 345 } 346 if (sc->unsolq_rp != sc->unsolq_wp) 347 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 348 } 349 350 if (intsts & HDAC_INTSTS_SIS_MASK) { 351 for (i = 0; i < sc->num_ss; i++) { 352 if ((intsts & (1 << i)) == 0) 353 continue; 354 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 355 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 356 if ((dev = sc->streams[i].dev) != NULL) { 357 HDAC_STREAM_INTR(dev, 358 sc->streams[i].dir, sc->streams[i].stream); 359 } 360 } 361 } 362 } 363 364 /**************************************************************************** 365 * void hdac_intr_handler(void *) 366 * 367 * Interrupt handler. Processes interrupts received from the hdac. 368 ****************************************************************************/ 369 static void 370 hdac_intr_handler(void *context) 371 { 372 struct hdac_softc *sc; 373 uint32_t intsts; 374 375 sc = (struct hdac_softc *)context; 376 377 /* 378 * Loop until HDAC_INTSTS_GIS gets clear. 379 * It is plausible that hardware interrupts a host only when GIS goes 380 * from zero to one. GIS is formed by OR-ing multiple hardware 381 * statuses, so it's possible that a previously cleared status gets set 382 * again while another status has not been cleared yet. Thus, there 383 * will be no new interrupt as GIS always stayed set. If we don't 384 * re-examine GIS then we can leave it set and never get an interrupt 385 * again. 386 */ 387 hdac_lock(sc); 388 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 389 while (intsts != 0xffffffff && (intsts & HDAC_INTSTS_GIS) != 0) { 390 hdac_one_intr(sc, intsts); 391 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 392 } 393 hdac_unlock(sc); 394 } 395 396 static void 397 hdac_poll_callback(void *arg) 398 { 399 struct hdac_softc *sc = arg; 400 401 if (sc == NULL) 402 return; 403 404 hdac_lock(sc); 405 if (sc->polling == 0) { 406 hdac_unlock(sc); 407 return; 408 } 409 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); 410 hdac_unlock(sc); 411 412 hdac_intr_handler(sc); 413 } 414 415 /**************************************************************************** 416 * int hdac_reset(hdac_softc *, bool) 417 * 418 * Reset the hdac to a quiescent and known state. 419 ****************************************************************************/ 420 static int 421 hdac_reset(struct hdac_softc *sc, bool wakeup) 422 { 423 uint32_t gctl; 424 int count, i; 425 426 /* 427 * Stop all Streams DMA engine 428 */ 429 for (i = 0; i < sc->num_iss; i++) 430 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 431 for (i = 0; i < sc->num_oss; i++) 432 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 433 for (i = 0; i < sc->num_bss; i++) 434 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 435 436 /* 437 * Stop Control DMA engines. 438 */ 439 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 440 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 441 442 /* 443 * Reset DMA position buffer. 444 */ 445 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 446 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 447 448 /* 449 * Reset the controller. The reset must remain asserted for 450 * a minimum of 100us. 451 */ 452 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 453 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 454 count = 10000; 455 do { 456 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 457 if (!(gctl & HDAC_GCTL_CRST)) 458 break; 459 DELAY(10); 460 } while (--count); 461 if (gctl & HDAC_GCTL_CRST) { 462 device_printf(sc->dev, "Unable to put hdac in reset\n"); 463 return (ENXIO); 464 } 465 466 /* If wakeup is not requested - leave the controller in reset state. */ 467 if (!wakeup) 468 return (0); 469 470 DELAY(100); 471 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 472 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 473 count = 10000; 474 do { 475 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 476 if (gctl & HDAC_GCTL_CRST) 477 break; 478 DELAY(10); 479 } while (--count); 480 if (!(gctl & HDAC_GCTL_CRST)) { 481 device_printf(sc->dev, "Device stuck in reset\n"); 482 return (ENXIO); 483 } 484 485 /* 486 * Wait for codecs to finish their own reset sequence. The delay here 487 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). 488 */ 489 DELAY(1000); 490 491 return (0); 492 } 493 494 /**************************************************************************** 495 * int hdac_get_capabilities(struct hdac_softc *); 496 * 497 * Retreive the general capabilities of the hdac; 498 * Number of Input Streams 499 * Number of Output Streams 500 * Number of bidirectional Streams 501 * 64bit ready 502 * CORB and RIRB sizes 503 ****************************************************************************/ 504 static int 505 hdac_get_capabilities(struct hdac_softc *sc) 506 { 507 uint16_t gcap; 508 uint8_t corbsize, rirbsize; 509 510 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 511 sc->num_iss = HDAC_GCAP_ISS(gcap); 512 sc->num_oss = HDAC_GCAP_OSS(gcap); 513 sc->num_bss = HDAC_GCAP_BSS(gcap); 514 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 515 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 516 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 517 if (sc->quirks_on & HDAC_QUIRK_64BIT) 518 sc->support_64bit = 1; 519 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 520 sc->support_64bit = 0; 521 522 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 523 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 524 HDAC_CORBSIZE_CORBSZCAP_256) 525 sc->corb_size = 256; 526 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 527 HDAC_CORBSIZE_CORBSZCAP_16) 528 sc->corb_size = 16; 529 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 530 HDAC_CORBSIZE_CORBSZCAP_2) 531 sc->corb_size = 2; 532 else { 533 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 534 __func__, corbsize); 535 return (ENXIO); 536 } 537 538 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 539 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 540 HDAC_RIRBSIZE_RIRBSZCAP_256) 541 sc->rirb_size = 256; 542 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 543 HDAC_RIRBSIZE_RIRBSZCAP_16) 544 sc->rirb_size = 16; 545 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 546 HDAC_RIRBSIZE_RIRBSZCAP_2) 547 sc->rirb_size = 2; 548 else { 549 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 550 __func__, rirbsize); 551 return (ENXIO); 552 } 553 554 HDA_BOOTVERBOSE( 555 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 556 "NSDO %d%s, CORB %d, RIRB %d\n", 557 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 558 sc->support_64bit ? ", 64bit" : "", 559 sc->corb_size, sc->rirb_size); 560 ); 561 562 return (0); 563 } 564 565 /**************************************************************************** 566 * void hdac_dma_cb 567 * 568 * This function is called by bus_dmamap_load when the mapping has been 569 * established. We just record the physical address of the mapping into 570 * the struct hdac_dma passed in. 571 ****************************************************************************/ 572 static void 573 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 574 { 575 struct hdac_dma *dma; 576 577 if (error == 0) { 578 dma = (struct hdac_dma *)callback_arg; 579 dma->dma_paddr = segs[0].ds_addr; 580 } 581 } 582 583 /**************************************************************************** 584 * int hdac_dma_alloc 585 * 586 * This function allocate and setup a dma region (struct hdac_dma). 587 * It must be freed by a corresponding hdac_dma_free. 588 ****************************************************************************/ 589 static int 590 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 591 { 592 bus_size_t roundsz; 593 int result; 594 595 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 596 bzero(dma, sizeof(*dma)); 597 598 /* 599 * Create a DMA tag 600 */ 601 result = bus_dma_tag_create( 602 bus_get_dma_tag(sc->dev), /* parent */ 603 HDA_DMA_ALIGNMENT, /* alignment */ 604 0, /* boundary */ 605 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 606 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 607 BUS_SPACE_MAXADDR, /* highaddr */ 608 NULL, /* filtfunc */ 609 NULL, /* fistfuncarg */ 610 roundsz, /* maxsize */ 611 1, /* nsegments */ 612 roundsz, /* maxsegsz */ 613 0, /* flags */ 614 NULL, /* lockfunc */ 615 NULL, /* lockfuncarg */ 616 &dma->dma_tag); /* dmat */ 617 if (result != 0) { 618 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 619 __func__, result); 620 goto hdac_dma_alloc_fail; 621 } 622 623 /* 624 * Allocate DMA memory 625 */ 626 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 627 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 628 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 629 BUS_DMA_COHERENT), 630 &dma->dma_map); 631 if (result != 0) { 632 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 633 __func__, result); 634 goto hdac_dma_alloc_fail; 635 } 636 637 dma->dma_size = roundsz; 638 639 /* 640 * Map the memory 641 */ 642 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 643 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 644 if (result != 0 || dma->dma_paddr == 0) { 645 if (result == 0) 646 result = ENOMEM; 647 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 648 __func__, result); 649 goto hdac_dma_alloc_fail; 650 } 651 652 HDA_BOOTHVERBOSE( 653 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 654 __func__, (uintmax_t)size, (uintmax_t)roundsz); 655 ); 656 657 return (0); 658 659 hdac_dma_alloc_fail: 660 hdac_dma_free(sc, dma); 661 662 return (result); 663 } 664 665 /**************************************************************************** 666 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 667 * 668 * Free a struct hdac_dma that has been previously allocated via the 669 * hdac_dma_alloc function. 670 ****************************************************************************/ 671 static void 672 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 673 { 674 if (dma->dma_paddr != 0) { 675 /* Flush caches */ 676 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 677 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 678 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 679 dma->dma_paddr = 0; 680 } 681 if (dma->dma_vaddr != NULL) { 682 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 683 dma->dma_vaddr = NULL; 684 } 685 if (dma->dma_tag != NULL) { 686 bus_dma_tag_destroy(dma->dma_tag); 687 dma->dma_tag = NULL; 688 } 689 dma->dma_size = 0; 690 } 691 692 /**************************************************************************** 693 * int hdac_mem_alloc(struct hdac_softc *) 694 * 695 * Allocate all the bus resources necessary to speak with the physical 696 * controller. 697 ****************************************************************************/ 698 static int 699 hdac_mem_alloc(struct hdac_softc *sc) 700 { 701 struct hdac_mem *mem; 702 703 mem = &sc->mem; 704 mem->mem_rid = PCIR_BAR(0); 705 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 706 &mem->mem_rid, RF_ACTIVE); 707 if (mem->mem_res == NULL) { 708 device_printf(sc->dev, 709 "%s: Unable to allocate memory resource\n", __func__); 710 return (ENOMEM); 711 } 712 mem->mem_tag = rman_get_bustag(mem->mem_res); 713 mem->mem_handle = rman_get_bushandle(mem->mem_res); 714 715 return (0); 716 } 717 718 /**************************************************************************** 719 * void hdac_mem_free(struct hdac_softc *) 720 * 721 * Free up resources previously allocated by hdac_mem_alloc. 722 ****************************************************************************/ 723 static void 724 hdac_mem_free(struct hdac_softc *sc) 725 { 726 struct hdac_mem *mem; 727 728 mem = &sc->mem; 729 if (mem->mem_res != NULL) 730 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 731 mem->mem_res); 732 mem->mem_res = NULL; 733 } 734 735 /**************************************************************************** 736 * int hdac_irq_alloc(struct hdac_softc *) 737 * 738 * Allocate and setup the resources necessary for interrupt handling. 739 ****************************************************************************/ 740 static int 741 hdac_irq_alloc(struct hdac_softc *sc) 742 { 743 struct hdac_irq *irq; 744 int result; 745 746 irq = &sc->irq; 747 irq->irq_rid = 0x0; 748 749 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 750 (result = pci_msi_count(sc->dev)) == 1 && 751 pci_alloc_msi(sc->dev, &result) == 0) 752 irq->irq_rid = 0x1; 753 754 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 755 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 756 if (irq->irq_res == NULL) { 757 device_printf(sc->dev, "%s: Unable to allocate irq\n", 758 __func__); 759 goto hdac_irq_alloc_fail; 760 } 761 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 762 NULL, hdac_intr_handler, sc, &irq->irq_handle); 763 if (result != 0) { 764 device_printf(sc->dev, 765 "%s: Unable to setup interrupt handler (%d)\n", 766 __func__, result); 767 goto hdac_irq_alloc_fail; 768 } 769 770 return (0); 771 772 hdac_irq_alloc_fail: 773 hdac_irq_free(sc); 774 775 return (ENXIO); 776 } 777 778 /**************************************************************************** 779 * void hdac_irq_free(struct hdac_softc *) 780 * 781 * Free up resources previously allocated by hdac_irq_alloc. 782 ****************************************************************************/ 783 static void 784 hdac_irq_free(struct hdac_softc *sc) 785 { 786 struct hdac_irq *irq; 787 788 irq = &sc->irq; 789 if (irq->irq_res != NULL && irq->irq_handle != NULL) 790 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 791 if (irq->irq_res != NULL) 792 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 793 irq->irq_res); 794 if (irq->irq_rid == 0x1) 795 pci_release_msi(sc->dev); 796 irq->irq_handle = NULL; 797 irq->irq_res = NULL; 798 irq->irq_rid = 0x0; 799 } 800 801 /**************************************************************************** 802 * void hdac_corb_init(struct hdac_softc *) 803 * 804 * Initialize the corb registers for operations but do not start it up yet. 805 * The CORB engine must not be running when this function is called. 806 ****************************************************************************/ 807 static void 808 hdac_corb_init(struct hdac_softc *sc) 809 { 810 uint8_t corbsize; 811 uint64_t corbpaddr; 812 813 /* Setup the CORB size. */ 814 switch (sc->corb_size) { 815 case 256: 816 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 817 break; 818 case 16: 819 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 820 break; 821 case 2: 822 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 823 break; 824 default: 825 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 826 } 827 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 828 829 /* Setup the CORB Address in the hdac */ 830 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 831 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 832 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 833 834 /* Set the WP and RP */ 835 sc->corb_wp = 0; 836 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 837 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 838 /* 839 * The HDA specification indicates that the CORBRPRST bit will always 840 * read as zero. Unfortunately, it seems that at least the 82801G 841 * doesn't reset the bit to zero, which stalls the corb engine. 842 * manually reset the bit to zero before continuing. 843 */ 844 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 845 846 /* Enable CORB error reporting */ 847 #if 0 848 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 849 #endif 850 } 851 852 /**************************************************************************** 853 * void hdac_rirb_init(struct hdac_softc *) 854 * 855 * Initialize the rirb registers for operations but do not start it up yet. 856 * The RIRB engine must not be running when this function is called. 857 ****************************************************************************/ 858 static void 859 hdac_rirb_init(struct hdac_softc *sc) 860 { 861 uint8_t rirbsize; 862 uint64_t rirbpaddr; 863 864 /* Setup the RIRB size. */ 865 switch (sc->rirb_size) { 866 case 256: 867 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 868 break; 869 case 16: 870 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 871 break; 872 case 2: 873 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 874 break; 875 default: 876 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 877 } 878 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 879 880 /* Setup the RIRB Address in the hdac */ 881 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 882 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 883 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 884 885 /* Setup the WP and RP */ 886 sc->rirb_rp = 0; 887 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 888 889 /* Setup the interrupt threshold */ 890 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 891 892 /* Enable Overrun and response received reporting */ 893 #if 0 894 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 895 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 896 #else 897 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 898 #endif 899 900 /* 901 * Make sure that the Host CPU cache doesn't contain any dirty 902 * cache lines that falls in the rirb. If I understood correctly, it 903 * should be sufficient to do this only once as the rirb is purely 904 * read-only from now on. 905 */ 906 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 907 BUS_DMASYNC_PREREAD); 908 } 909 910 /**************************************************************************** 911 * void hdac_corb_start(hdac_softc *) 912 * 913 * Startup the corb DMA engine 914 ****************************************************************************/ 915 static void 916 hdac_corb_start(struct hdac_softc *sc) 917 { 918 uint32_t corbctl; 919 920 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 921 corbctl |= HDAC_CORBCTL_CORBRUN; 922 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 923 } 924 925 /**************************************************************************** 926 * void hdac_rirb_start(hdac_softc *) 927 * 928 * Startup the rirb DMA engine 929 ****************************************************************************/ 930 static void 931 hdac_rirb_start(struct hdac_softc *sc) 932 { 933 uint32_t rirbctl; 934 935 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 936 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 937 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 938 } 939 940 static int 941 hdac_rirb_flush(struct hdac_softc *sc) 942 { 943 struct hdac_rirb *rirb_base, *rirb; 944 nid_t cad; 945 uint32_t resp, resp_ex; 946 uint8_t rirbwp; 947 int ret; 948 949 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 950 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 951 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 952 BUS_DMASYNC_POSTREAD); 953 954 ret = 0; 955 while (sc->rirb_rp != rirbwp) { 956 sc->rirb_rp++; 957 sc->rirb_rp %= sc->rirb_size; 958 rirb = &rirb_base[sc->rirb_rp]; 959 resp = le32toh(rirb->response); 960 resp_ex = le32toh(rirb->response_ex); 961 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 962 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 963 sc->unsolq[sc->unsolq_wp++] = resp; 964 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 965 sc->unsolq[sc->unsolq_wp++] = cad; 966 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 967 } else if (sc->codecs[cad].pending <= 0) { 968 device_printf(sc->dev, "Unexpected unsolicited " 969 "response from address %d: %08x\n", cad, resp); 970 } else { 971 sc->codecs[cad].response = resp; 972 sc->codecs[cad].pending--; 973 } 974 ret++; 975 } 976 977 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 978 BUS_DMASYNC_PREREAD); 979 return (ret); 980 } 981 982 static int 983 hdac_unsolq_flush(struct hdac_softc *sc) 984 { 985 device_t child; 986 nid_t cad; 987 uint32_t resp; 988 int ret = 0; 989 990 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 991 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 992 while (sc->unsolq_rp != sc->unsolq_wp) { 993 resp = sc->unsolq[sc->unsolq_rp++]; 994 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 995 cad = sc->unsolq[sc->unsolq_rp++]; 996 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 997 if ((child = sc->codecs[cad].dev) != NULL && 998 device_is_attached(child)) 999 HDAC_UNSOL_INTR(child, resp); 1000 ret++; 1001 } 1002 sc->unsolq_st = HDAC_UNSOLQ_READY; 1003 } 1004 1005 return (ret); 1006 } 1007 1008 /**************************************************************************** 1009 * uint32_t hdac_send_command 1010 * 1011 * Wrapper function that sends only one command to a given codec 1012 ****************************************************************************/ 1013 static uint32_t 1014 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 1015 { 1016 int timeout; 1017 uint32_t *corb; 1018 1019 hdac_lockassert(sc); 1020 verb &= ~HDA_CMD_CAD_MASK; 1021 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 1022 sc->codecs[cad].response = HDA_INVALID; 1023 1024 sc->codecs[cad].pending++; 1025 sc->corb_wp++; 1026 sc->corb_wp %= sc->corb_size; 1027 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 1028 bus_dmamap_sync(sc->corb_dma.dma_tag, 1029 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 1030 corb[sc->corb_wp] = htole32(verb); 1031 bus_dmamap_sync(sc->corb_dma.dma_tag, 1032 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 1033 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 1034 1035 timeout = 10000; 1036 do { 1037 if (hdac_rirb_flush(sc) == 0) 1038 DELAY(10); 1039 } while (sc->codecs[cad].pending != 0 && --timeout); 1040 1041 if (sc->codecs[cad].pending != 0) { 1042 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", 1043 verb, cad); 1044 sc->codecs[cad].pending = 0; 1045 } 1046 1047 if (sc->unsolq_rp != sc->unsolq_wp) 1048 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1049 return (sc->codecs[cad].response); 1050 } 1051 1052 /**************************************************************************** 1053 * Device Methods 1054 ****************************************************************************/ 1055 1056 /**************************************************************************** 1057 * int hdac_probe(device_t) 1058 * 1059 * Probe for the presence of an hdac. If none is found, check for a generic 1060 * match using the subclass of the device. 1061 ****************************************************************************/ 1062 static int 1063 hdac_probe(device_t dev) 1064 { 1065 int i, result; 1066 uint32_t model; 1067 uint16_t class, subclass; 1068 char desc[64]; 1069 1070 model = (uint32_t)pci_get_device(dev) << 16; 1071 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1072 class = pci_get_class(dev); 1073 subclass = pci_get_subclass(dev); 1074 1075 bzero(desc, sizeof(desc)); 1076 result = ENXIO; 1077 for (i = 0; i < nitems(hdac_devices); i++) { 1078 if (hdac_devices[i].model == model) { 1079 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1080 result = BUS_PROBE_DEFAULT; 1081 break; 1082 } 1083 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1084 class == PCIC_MULTIMEDIA && 1085 subclass == PCIS_MULTIMEDIA_HDA) { 1086 snprintf(desc, sizeof(desc), "%s (0x%04x)", 1087 hdac_devices[i].desc, pci_get_device(dev)); 1088 result = BUS_PROBE_GENERIC; 1089 break; 1090 } 1091 } 1092 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1093 subclass == PCIS_MULTIMEDIA_HDA) { 1094 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1095 result = BUS_PROBE_GENERIC; 1096 } 1097 if (result != ENXIO) { 1098 strlcat(desc, " HDA Controller", sizeof(desc)); 1099 device_set_desc_copy(dev, desc); 1100 } 1101 1102 return (result); 1103 } 1104 1105 static void 1106 hdac_unsolq_task(void *context, int pending) 1107 { 1108 struct hdac_softc *sc; 1109 1110 sc = (struct hdac_softc *)context; 1111 1112 hdac_lock(sc); 1113 hdac_unsolq_flush(sc); 1114 hdac_unlock(sc); 1115 } 1116 1117 /**************************************************************************** 1118 * int hdac_attach(device_t) 1119 * 1120 * Attach the device into the kernel. Interrupts usually won't be enabled 1121 * when this function is called. Setup everything that doesn't require 1122 * interrupts and defer probing of codecs until interrupts are enabled. 1123 ****************************************************************************/ 1124 static int 1125 hdac_attach(device_t dev) 1126 { 1127 struct hdac_softc *sc; 1128 int result; 1129 int i, devid = -1; 1130 uint32_t model; 1131 uint16_t class, subclass; 1132 uint16_t vendor; 1133 uint8_t v; 1134 1135 sc = device_get_softc(dev); 1136 HDA_BOOTVERBOSE( 1137 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1138 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1139 device_printf(dev, "HDA Driver Revision: %s\n", 1140 HDA_DRV_TEST_REV); 1141 ); 1142 1143 model = (uint32_t)pci_get_device(dev) << 16; 1144 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1145 class = pci_get_class(dev); 1146 subclass = pci_get_subclass(dev); 1147 1148 for (i = 0; i < nitems(hdac_devices); i++) { 1149 if (hdac_devices[i].model == model) { 1150 devid = i; 1151 break; 1152 } 1153 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1154 class == PCIC_MULTIMEDIA && 1155 subclass == PCIS_MULTIMEDIA_HDA) { 1156 devid = i; 1157 break; 1158 } 1159 } 1160 1161 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); 1162 sc->dev = dev; 1163 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1164 callout_init(&sc->poll_callout, 1); 1165 for (i = 0; i < HDAC_CODEC_MAX; i++) 1166 sc->codecs[i].dev = NULL; 1167 if (devid >= 0) { 1168 sc->quirks_on = hdac_devices[devid].quirks_on; 1169 sc->quirks_off = hdac_devices[devid].quirks_off; 1170 } else { 1171 sc->quirks_on = 0; 1172 sc->quirks_off = 0; 1173 } 1174 if (resource_int_value(device_get_name(dev), 1175 device_get_unit(dev), "msi", &i) == 0) { 1176 if (i == 0) 1177 sc->quirks_off |= HDAC_QUIRK_MSI; 1178 else { 1179 sc->quirks_on |= HDAC_QUIRK_MSI; 1180 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1181 } 1182 } 1183 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1184 HDA_BOOTVERBOSE( 1185 device_printf(sc->dev, 1186 "Config options: on=0x%08x off=0x%08x\n", 1187 sc->quirks_on, sc->quirks_off); 1188 ); 1189 sc->poll_ival = hz; 1190 if (resource_int_value(device_get_name(dev), 1191 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1192 sc->polling = 1; 1193 else 1194 sc->polling = 0; 1195 1196 pci_enable_busmaster(dev); 1197 1198 vendor = pci_get_vendor(dev); 1199 if (vendor == INTEL_VENDORID) { 1200 /* TCSEL -> TC0 */ 1201 v = pci_read_config(dev, 0x44, 1); 1202 pci_write_config(dev, 0x44, v & 0xf8, 1); 1203 HDA_BOOTHVERBOSE( 1204 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1205 pci_read_config(dev, 0x44, 1)); 1206 ); 1207 } 1208 1209 #if defined(__i386__) || defined(__amd64__) 1210 sc->flags |= HDAC_F_DMA_NOCACHE; 1211 1212 if (resource_int_value(device_get_name(dev), 1213 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1214 #else 1215 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1216 #endif 1217 /* 1218 * Try to enable PCIe snoop to avoid messing around with 1219 * uncacheable DMA attribute. Since PCIe snoop register 1220 * config is pretty much vendor specific, there are no 1221 * general solutions on how to enable it, forcing us (even 1222 * Microsoft) to enable uncacheable or write combined DMA 1223 * by default. 1224 * 1225 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1226 */ 1227 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1228 if (hdac_pcie_snoop[i].vendor != vendor) 1229 continue; 1230 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1231 if (hdac_pcie_snoop[i].reg == 0x00) 1232 break; 1233 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1234 if ((v & hdac_pcie_snoop[i].enable) == 1235 hdac_pcie_snoop[i].enable) 1236 break; 1237 v &= hdac_pcie_snoop[i].mask; 1238 v |= hdac_pcie_snoop[i].enable; 1239 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1240 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1241 if ((v & hdac_pcie_snoop[i].enable) != 1242 hdac_pcie_snoop[i].enable) { 1243 HDA_BOOTVERBOSE( 1244 device_printf(dev, 1245 "WARNING: Failed to enable PCIe " 1246 "snoop!\n"); 1247 ); 1248 #if defined(__i386__) || defined(__amd64__) 1249 sc->flags |= HDAC_F_DMA_NOCACHE; 1250 #endif 1251 } 1252 break; 1253 } 1254 #if defined(__i386__) || defined(__amd64__) 1255 } 1256 #endif 1257 1258 HDA_BOOTHVERBOSE( 1259 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1260 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1261 "Uncacheable" : "PCIe snoop", vendor); 1262 ); 1263 1264 /* Allocate resources */ 1265 result = hdac_mem_alloc(sc); 1266 if (result != 0) 1267 goto hdac_attach_fail; 1268 result = hdac_irq_alloc(sc); 1269 if (result != 0) 1270 goto hdac_attach_fail; 1271 1272 /* Get Capabilities */ 1273 result = hdac_get_capabilities(sc); 1274 if (result != 0) 1275 goto hdac_attach_fail; 1276 1277 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1278 result = hdac_dma_alloc(sc, &sc->corb_dma, 1279 sc->corb_size * sizeof(uint32_t)); 1280 if (result != 0) 1281 goto hdac_attach_fail; 1282 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1283 sc->rirb_size * sizeof(struct hdac_rirb)); 1284 if (result != 0) 1285 goto hdac_attach_fail; 1286 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1287 M_HDAC, M_ZERO | M_WAITOK); 1288 for (i = 0; i < sc->num_ss; i++) { 1289 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1290 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1291 if (result != 0) 1292 goto hdac_attach_fail; 1293 } 1294 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1295 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1296 HDA_BOOTVERBOSE( 1297 device_printf(dev, "Failed to " 1298 "allocate DMA pos buffer " 1299 "(non-fatal)\n"); 1300 ); 1301 } else { 1302 uint64_t addr = sc->pos_dma.dma_paddr; 1303 1304 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1305 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1306 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1307 HDAC_DPLBASE_DPLBASE_DMAPBE); 1308 } 1309 } 1310 1311 result = bus_dma_tag_create( 1312 bus_get_dma_tag(sc->dev), /* parent */ 1313 HDA_DMA_ALIGNMENT, /* alignment */ 1314 0, /* boundary */ 1315 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1316 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1317 BUS_SPACE_MAXADDR, /* highaddr */ 1318 NULL, /* filtfunc */ 1319 NULL, /* fistfuncarg */ 1320 HDA_BUFSZ_MAX, /* maxsize */ 1321 1, /* nsegments */ 1322 HDA_BUFSZ_MAX, /* maxsegsz */ 1323 0, /* flags */ 1324 NULL, /* lockfunc */ 1325 NULL, /* lockfuncarg */ 1326 &sc->chan_dmat); /* dmat */ 1327 if (result != 0) { 1328 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1329 __func__, result); 1330 goto hdac_attach_fail; 1331 } 1332 1333 /* Quiesce everything */ 1334 HDA_BOOTHVERBOSE( 1335 device_printf(dev, "Reset controller...\n"); 1336 ); 1337 hdac_reset(sc, true); 1338 1339 /* Initialize the CORB and RIRB */ 1340 hdac_corb_init(sc); 1341 hdac_rirb_init(sc); 1342 1343 /* Defer remaining of initialization until interrupts are enabled */ 1344 sc->intrhook.ich_func = hdac_attach2; 1345 sc->intrhook.ich_arg = (void *)sc; 1346 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1347 sc->intrhook.ich_func = NULL; 1348 hdac_attach2((void *)sc); 1349 } 1350 1351 return (0); 1352 1353 hdac_attach_fail: 1354 hdac_irq_free(sc); 1355 if (sc->streams != NULL) 1356 for (i = 0; i < sc->num_ss; i++) 1357 hdac_dma_free(sc, &sc->streams[i].bdl); 1358 free(sc->streams, M_HDAC); 1359 hdac_dma_free(sc, &sc->rirb_dma); 1360 hdac_dma_free(sc, &sc->corb_dma); 1361 hdac_mem_free(sc); 1362 snd_mtxfree(sc->lock); 1363 1364 return (ENXIO); 1365 } 1366 1367 static int 1368 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1369 { 1370 struct hdac_softc *sc; 1371 device_t *devlist; 1372 device_t dev; 1373 int devcount, i, err, val; 1374 1375 dev = oidp->oid_arg1; 1376 sc = device_get_softc(dev); 1377 if (sc == NULL) 1378 return (EINVAL); 1379 val = 0; 1380 err = sysctl_handle_int(oidp, &val, 0, req); 1381 if (err != 0 || req->newptr == NULL || val == 0) 1382 return (err); 1383 1384 /* XXX: Temporary. For debugging. */ 1385 if (val == 100) { 1386 hdac_suspend(dev); 1387 return (0); 1388 } else if (val == 101) { 1389 hdac_resume(dev); 1390 return (0); 1391 } 1392 1393 bus_topo_lock(); 1394 1395 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) { 1396 bus_topo_unlock(); 1397 return (err); 1398 } 1399 1400 hdac_lock(sc); 1401 for (i = 0; i < devcount; i++) 1402 HDAC_PINDUMP(devlist[i]); 1403 hdac_unlock(sc); 1404 1405 bus_topo_unlock(); 1406 1407 free(devlist, M_TEMP); 1408 return (0); 1409 } 1410 1411 static int 1412 hdac_mdata_rate(uint16_t fmt) 1413 { 1414 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1415 int rate, bits; 1416 1417 if (fmt & (1 << 14)) 1418 rate = 44100; 1419 else 1420 rate = 48000; 1421 rate *= ((fmt >> 11) & 0x07) + 1; 1422 rate /= ((fmt >> 8) & 0x07) + 1; 1423 bits = mbits[(fmt >> 4) & 0x03]; 1424 bits *= (fmt & 0x0f) + 1; 1425 return (rate * bits); 1426 } 1427 1428 static int 1429 hdac_bdata_rate(uint16_t fmt, int output) 1430 { 1431 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1432 int rate, bits; 1433 1434 rate = 48000; 1435 rate *= ((fmt >> 11) & 0x07) + 1; 1436 bits = bbits[(fmt >> 4) & 0x03]; 1437 bits *= (fmt & 0x0f) + 1; 1438 if (!output) 1439 bits = ((bits + 7) & ~0x07) + 10; 1440 return (rate * bits); 1441 } 1442 1443 static void 1444 hdac_poll_reinit(struct hdac_softc *sc) 1445 { 1446 int i, pollticks, min = 1000000; 1447 struct hdac_stream *s; 1448 1449 if (sc->polling == 0) 1450 return; 1451 if (sc->unsol_registered > 0) 1452 min = hz / 2; 1453 for (i = 0; i < sc->num_ss; i++) { 1454 s = &sc->streams[i]; 1455 if (s->running == 0) 1456 continue; 1457 pollticks = ((uint64_t)hz * s->blksz) / 1458 (hdac_mdata_rate(s->format) / 8); 1459 pollticks >>= 1; 1460 if (pollticks > hz) 1461 pollticks = hz; 1462 if (pollticks < 1) 1463 pollticks = 1; 1464 if (min > pollticks) 1465 min = pollticks; 1466 } 1467 sc->poll_ival = min; 1468 if (min == 1000000) 1469 callout_stop(&sc->poll_callout); 1470 else 1471 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1472 } 1473 1474 static int 1475 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1476 { 1477 struct hdac_softc *sc; 1478 device_t dev; 1479 uint32_t ctl; 1480 int err, val; 1481 1482 dev = oidp->oid_arg1; 1483 sc = device_get_softc(dev); 1484 if (sc == NULL) 1485 return (EINVAL); 1486 hdac_lock(sc); 1487 val = sc->polling; 1488 hdac_unlock(sc); 1489 err = sysctl_handle_int(oidp, &val, 0, req); 1490 1491 if (err != 0 || req->newptr == NULL) 1492 return (err); 1493 if (val < 0 || val > 1) 1494 return (EINVAL); 1495 1496 hdac_lock(sc); 1497 if (val != sc->polling) { 1498 if (val == 0) { 1499 callout_stop(&sc->poll_callout); 1500 hdac_unlock(sc); 1501 callout_drain(&sc->poll_callout); 1502 hdac_lock(sc); 1503 sc->polling = 0; 1504 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1505 ctl |= HDAC_INTCTL_GIE; 1506 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1507 } else { 1508 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1509 ctl &= ~HDAC_INTCTL_GIE; 1510 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1511 sc->polling = 1; 1512 hdac_poll_reinit(sc); 1513 } 1514 } 1515 hdac_unlock(sc); 1516 1517 return (err); 1518 } 1519 1520 static void 1521 hdac_attach2(void *arg) 1522 { 1523 struct hdac_softc *sc; 1524 device_t child; 1525 uint32_t vendorid, revisionid; 1526 int i; 1527 uint16_t statests; 1528 1529 sc = (struct hdac_softc *)arg; 1530 1531 hdac_lock(sc); 1532 1533 /* Remove ourselves from the config hooks */ 1534 if (sc->intrhook.ich_func != NULL) { 1535 config_intrhook_disestablish(&sc->intrhook); 1536 sc->intrhook.ich_func = NULL; 1537 } 1538 1539 HDA_BOOTHVERBOSE( 1540 device_printf(sc->dev, "Starting CORB Engine...\n"); 1541 ); 1542 hdac_corb_start(sc); 1543 HDA_BOOTHVERBOSE( 1544 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1545 ); 1546 hdac_rirb_start(sc); 1547 1548 /* 1549 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1550 * (status change) interrupts. The documentation says that we 1551 * should not make any assumptions about the state of this register 1552 * and set it explicitly. 1553 * NB: this needs to be done before the interrupt is enabled as 1554 * the handler does not expect this interrupt source. 1555 */ 1556 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1557 1558 /* 1559 * Read and clear post-reset SDI wake status. 1560 * Each set bit corresponds to a codec that came out of reset. 1561 */ 1562 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1563 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests); 1564 1565 HDA_BOOTHVERBOSE( 1566 device_printf(sc->dev, 1567 "Enabling controller interrupt...\n"); 1568 ); 1569 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1570 HDAC_GCTL_UNSOL); 1571 if (sc->polling == 0) { 1572 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1573 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1574 } 1575 DELAY(1000); 1576 1577 HDA_BOOTHVERBOSE( 1578 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1579 ); 1580 hdac_unlock(sc); 1581 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1582 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1583 HDA_BOOTHVERBOSE( 1584 device_printf(sc->dev, 1585 "Found CODEC at address %d\n", i); 1586 ); 1587 hdac_lock(sc); 1588 vendorid = hdac_send_command(sc, i, 1589 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1590 revisionid = hdac_send_command(sc, i, 1591 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1592 hdac_unlock(sc); 1593 if (vendorid == HDA_INVALID && 1594 revisionid == HDA_INVALID) { 1595 device_printf(sc->dev, 1596 "CODEC at address %d not responding!\n", i); 1597 continue; 1598 } 1599 sc->codecs[i].vendor_id = 1600 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1601 sc->codecs[i].device_id = 1602 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1603 sc->codecs[i].revision_id = 1604 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1605 sc->codecs[i].stepping_id = 1606 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1607 child = device_add_child(sc->dev, "hdacc", -1); 1608 if (child == NULL) { 1609 device_printf(sc->dev, 1610 "Failed to add CODEC device\n"); 1611 continue; 1612 } 1613 device_set_ivars(child, (void *)(intptr_t)i); 1614 sc->codecs[i].dev = child; 1615 } 1616 } 1617 bus_generic_attach(sc->dev); 1618 1619 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1620 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1621 "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev, 1622 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); 1623 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1624 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1625 "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev, 1626 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); 1627 } 1628 1629 /**************************************************************************** 1630 * int hdac_suspend(device_t) 1631 * 1632 * Suspend and power down HDA bus and codecs. 1633 ****************************************************************************/ 1634 static int 1635 hdac_suspend(device_t dev) 1636 { 1637 struct hdac_softc *sc = device_get_softc(dev); 1638 1639 HDA_BOOTHVERBOSE( 1640 device_printf(dev, "Suspend...\n"); 1641 ); 1642 bus_generic_suspend(dev); 1643 1644 hdac_lock(sc); 1645 HDA_BOOTHVERBOSE( 1646 device_printf(dev, "Reset controller...\n"); 1647 ); 1648 callout_stop(&sc->poll_callout); 1649 hdac_reset(sc, false); 1650 hdac_unlock(sc); 1651 callout_drain(&sc->poll_callout); 1652 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1653 HDA_BOOTHVERBOSE( 1654 device_printf(dev, "Suspend done\n"); 1655 ); 1656 return (0); 1657 } 1658 1659 /**************************************************************************** 1660 * int hdac_resume(device_t) 1661 * 1662 * Powerup and restore HDA bus and codecs state. 1663 ****************************************************************************/ 1664 static int 1665 hdac_resume(device_t dev) 1666 { 1667 struct hdac_softc *sc = device_get_softc(dev); 1668 int error; 1669 1670 HDA_BOOTHVERBOSE( 1671 device_printf(dev, "Resume...\n"); 1672 ); 1673 hdac_lock(sc); 1674 1675 /* Quiesce everything */ 1676 HDA_BOOTHVERBOSE( 1677 device_printf(dev, "Reset controller...\n"); 1678 ); 1679 hdac_reset(sc, true); 1680 1681 /* Initialize the CORB and RIRB */ 1682 hdac_corb_init(sc); 1683 hdac_rirb_init(sc); 1684 1685 HDA_BOOTHVERBOSE( 1686 device_printf(dev, "Starting CORB Engine...\n"); 1687 ); 1688 hdac_corb_start(sc); 1689 HDA_BOOTHVERBOSE( 1690 device_printf(dev, "Starting RIRB Engine...\n"); 1691 ); 1692 hdac_rirb_start(sc); 1693 1694 /* 1695 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1696 * (status change) events. The documentation says that we should 1697 * not make any assumptions about the state of this register and 1698 * set it explicitly. 1699 * Also, clear HDAC_STATESTS. 1700 * NB: this needs to be done before the interrupt is enabled as 1701 * the handler does not expect this interrupt source. 1702 */ 1703 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1704 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK); 1705 1706 HDA_BOOTHVERBOSE( 1707 device_printf(dev, "Enabling controller interrupt...\n"); 1708 ); 1709 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1710 HDAC_GCTL_UNSOL); 1711 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1712 DELAY(1000); 1713 hdac_poll_reinit(sc); 1714 hdac_unlock(sc); 1715 1716 error = bus_generic_resume(dev); 1717 HDA_BOOTHVERBOSE( 1718 device_printf(dev, "Resume done\n"); 1719 ); 1720 return (error); 1721 } 1722 1723 /**************************************************************************** 1724 * int hdac_detach(device_t) 1725 * 1726 * Detach and free up resources utilized by the hdac device. 1727 ****************************************************************************/ 1728 static int 1729 hdac_detach(device_t dev) 1730 { 1731 struct hdac_softc *sc = device_get_softc(dev); 1732 device_t *devlist; 1733 int cad, i, devcount, error; 1734 1735 if ((error = device_get_children(dev, &devlist, &devcount)) != 0) 1736 return (error); 1737 for (i = 0; i < devcount; i++) { 1738 cad = (intptr_t)device_get_ivars(devlist[i]); 1739 if ((error = device_delete_child(dev, devlist[i])) != 0) { 1740 free(devlist, M_TEMP); 1741 return (error); 1742 } 1743 sc->codecs[cad].dev = NULL; 1744 } 1745 free(devlist, M_TEMP); 1746 1747 hdac_lock(sc); 1748 hdac_reset(sc, false); 1749 hdac_unlock(sc); 1750 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1751 hdac_irq_free(sc); 1752 1753 for (i = 0; i < sc->num_ss; i++) 1754 hdac_dma_free(sc, &sc->streams[i].bdl); 1755 free(sc->streams, M_HDAC); 1756 hdac_dma_free(sc, &sc->pos_dma); 1757 hdac_dma_free(sc, &sc->rirb_dma); 1758 hdac_dma_free(sc, &sc->corb_dma); 1759 if (sc->chan_dmat != NULL) { 1760 bus_dma_tag_destroy(sc->chan_dmat); 1761 sc->chan_dmat = NULL; 1762 } 1763 hdac_mem_free(sc); 1764 snd_mtxfree(sc->lock); 1765 return (0); 1766 } 1767 1768 static bus_dma_tag_t 1769 hdac_get_dma_tag(device_t dev, device_t child) 1770 { 1771 struct hdac_softc *sc = device_get_softc(dev); 1772 1773 return (sc->chan_dmat); 1774 } 1775 1776 static int 1777 hdac_print_child(device_t dev, device_t child) 1778 { 1779 int retval; 1780 1781 retval = bus_print_child_header(dev, child); 1782 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); 1783 retval += bus_print_child_footer(dev, child); 1784 1785 return (retval); 1786 } 1787 1788 static int 1789 hdac_child_location(device_t dev, device_t child, struct sbuf *sb) 1790 { 1791 1792 sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child)); 1793 return (0); 1794 } 1795 1796 static int 1797 hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) 1798 { 1799 struct hdac_softc *sc = device_get_softc(dev); 1800 nid_t cad = (uintptr_t)device_get_ivars(child); 1801 1802 sbuf_printf(sb, 1803 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", 1804 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1805 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1806 return (0); 1807 } 1808 1809 static int 1810 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1811 { 1812 struct hdac_softc *sc = device_get_softc(dev); 1813 nid_t cad = (uintptr_t)device_get_ivars(child); 1814 1815 switch (which) { 1816 case HDA_IVAR_CODEC_ID: 1817 *result = cad; 1818 break; 1819 case HDA_IVAR_VENDOR_ID: 1820 *result = sc->codecs[cad].vendor_id; 1821 break; 1822 case HDA_IVAR_DEVICE_ID: 1823 *result = sc->codecs[cad].device_id; 1824 break; 1825 case HDA_IVAR_REVISION_ID: 1826 *result = sc->codecs[cad].revision_id; 1827 break; 1828 case HDA_IVAR_STEPPING_ID: 1829 *result = sc->codecs[cad].stepping_id; 1830 break; 1831 case HDA_IVAR_SUBVENDOR_ID: 1832 *result = pci_get_subvendor(dev); 1833 break; 1834 case HDA_IVAR_SUBDEVICE_ID: 1835 *result = pci_get_subdevice(dev); 1836 break; 1837 case HDA_IVAR_DMA_NOCACHE: 1838 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1839 break; 1840 case HDA_IVAR_STRIPES_MASK: 1841 *result = (1 << (1 << sc->num_sdo)) - 1; 1842 break; 1843 default: 1844 return (ENOENT); 1845 } 1846 return (0); 1847 } 1848 1849 static struct mtx * 1850 hdac_get_mtx(device_t dev, device_t child) 1851 { 1852 struct hdac_softc *sc = device_get_softc(dev); 1853 1854 return (sc->lock); 1855 } 1856 1857 static uint32_t 1858 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1859 { 1860 1861 return (hdac_send_command(device_get_softc(dev), 1862 (intptr_t)device_get_ivars(child), verb)); 1863 } 1864 1865 static int 1866 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1867 { 1868 int i, ss; 1869 1870 ss = -1; 1871 /* Allocate ISS/OSS first. */ 1872 if (dir == 0) { 1873 for (i = 0; i < sc->num_iss; i++) { 1874 if (sc->streams[i].stream == stream) { 1875 ss = i; 1876 break; 1877 } 1878 } 1879 } else { 1880 for (i = 0; i < sc->num_oss; i++) { 1881 if (sc->streams[i + sc->num_iss].stream == stream) { 1882 ss = i + sc->num_iss; 1883 break; 1884 } 1885 } 1886 } 1887 /* Fallback to BSS. */ 1888 if (ss == -1) { 1889 for (i = 0; i < sc->num_bss; i++) { 1890 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1891 == stream) { 1892 ss = i + sc->num_iss + sc->num_oss; 1893 break; 1894 } 1895 } 1896 } 1897 return (ss); 1898 } 1899 1900 static int 1901 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1902 uint32_t **dmapos) 1903 { 1904 struct hdac_softc *sc = device_get_softc(dev); 1905 nid_t cad = (uintptr_t)device_get_ivars(child); 1906 int stream, ss, bw, maxbw, prevbw; 1907 1908 /* Look for empty stream. */ 1909 ss = hdac_find_stream(sc, dir, 0); 1910 1911 /* Return if found nothing. */ 1912 if (ss < 0) 1913 return (0); 1914 1915 /* Check bus bandwidth. */ 1916 bw = hdac_bdata_rate(format, dir); 1917 if (dir == 1) { 1918 bw *= 1 << (sc->num_sdo - stripe); 1919 prevbw = sc->sdo_bw_used; 1920 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1921 } else { 1922 prevbw = sc->codecs[cad].sdi_bw_used; 1923 maxbw = 48000 * 464; 1924 } 1925 HDA_BOOTHVERBOSE( 1926 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1927 (bw + prevbw) / 1000, maxbw / 1000, 1928 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1929 ); 1930 if (bw + prevbw > maxbw) 1931 return (0); 1932 if (dir == 1) 1933 sc->sdo_bw_used += bw; 1934 else 1935 sc->codecs[cad].sdi_bw_used += bw; 1936 1937 /* Allocate stream number */ 1938 if (ss >= sc->num_iss + sc->num_oss) 1939 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1940 else if (ss >= sc->num_iss) 1941 stream = ss - sc->num_iss + 1; 1942 else 1943 stream = ss + 1; 1944 1945 sc->streams[ss].dev = child; 1946 sc->streams[ss].dir = dir; 1947 sc->streams[ss].stream = stream; 1948 sc->streams[ss].bw = bw; 1949 sc->streams[ss].format = format; 1950 sc->streams[ss].stripe = stripe; 1951 if (dmapos != NULL) { 1952 if (sc->pos_dma.dma_vaddr != NULL) 1953 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1954 else 1955 *dmapos = NULL; 1956 } 1957 return (stream); 1958 } 1959 1960 static void 1961 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 1962 { 1963 struct hdac_softc *sc = device_get_softc(dev); 1964 nid_t cad = (uintptr_t)device_get_ivars(child); 1965 int ss; 1966 1967 ss = hdac_find_stream(sc, dir, stream); 1968 KASSERT(ss >= 0, 1969 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 1970 if (dir == 1) 1971 sc->sdo_bw_used -= sc->streams[ss].bw; 1972 else 1973 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 1974 sc->streams[ss].stream = 0; 1975 sc->streams[ss].dev = NULL; 1976 } 1977 1978 static int 1979 hdac_stream_start(device_t dev, device_t child, int dir, int stream, 1980 bus_addr_t buf, int blksz, int blkcnt) 1981 { 1982 struct hdac_softc *sc = device_get_softc(dev); 1983 struct hdac_bdle *bdle; 1984 uint64_t addr; 1985 int i, ss, off; 1986 uint32_t ctl; 1987 1988 ss = hdac_find_stream(sc, dir, stream); 1989 KASSERT(ss >= 0, 1990 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 1991 1992 addr = (uint64_t)buf; 1993 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 1994 for (i = 0; i < blkcnt; i++, bdle++) { 1995 bdle->addrl = htole32((uint32_t)addr); 1996 bdle->addrh = htole32((uint32_t)(addr >> 32)); 1997 bdle->len = htole32(blksz); 1998 bdle->ioc = htole32(1); 1999 addr += blksz; 2000 } 2001 2002 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2003 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 2004 2005 off = ss << 5; 2006 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 2007 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 2008 addr = sc->streams[ss].bdl.dma_paddr; 2009 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 2010 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 2011 2012 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 2013 if (dir) 2014 ctl |= HDAC_SDCTL2_DIR; 2015 else 2016 ctl &= ~HDAC_SDCTL2_DIR; 2017 ctl &= ~HDAC_SDCTL2_STRM_MASK; 2018 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 2019 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 2020 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 2021 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 2022 2023 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 2024 2025 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2026 ctl |= 1 << ss; 2027 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2028 2029 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 2030 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 2031 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2032 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2033 HDAC_SDCTL_RUN; 2034 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2035 2036 sc->streams[ss].blksz = blksz; 2037 sc->streams[ss].running = 1; 2038 hdac_poll_reinit(sc); 2039 return (0); 2040 } 2041 2042 static void 2043 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 2044 { 2045 struct hdac_softc *sc = device_get_softc(dev); 2046 int ss, off; 2047 uint32_t ctl; 2048 2049 ss = hdac_find_stream(sc, dir, stream); 2050 KASSERT(ss >= 0, 2051 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 2052 2053 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2054 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 2055 2056 off = ss << 5; 2057 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2058 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2059 HDAC_SDCTL_RUN); 2060 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2061 2062 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2063 ctl &= ~(1 << ss); 2064 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2065 2066 sc->streams[ss].running = 0; 2067 hdac_poll_reinit(sc); 2068 } 2069 2070 static void 2071 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2072 { 2073 struct hdac_softc *sc = device_get_softc(dev); 2074 int timeout = 1000; 2075 int to = timeout; 2076 int ss, off; 2077 uint32_t ctl; 2078 2079 ss = hdac_find_stream(sc, dir, stream); 2080 KASSERT(ss >= 0, 2081 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2082 2083 off = ss << 5; 2084 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2085 ctl |= HDAC_SDCTL_SRST; 2086 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2087 do { 2088 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2089 if (ctl & HDAC_SDCTL_SRST) 2090 break; 2091 DELAY(10); 2092 } while (--to); 2093 if (!(ctl & HDAC_SDCTL_SRST)) 2094 device_printf(dev, "Reset setting timeout\n"); 2095 ctl &= ~HDAC_SDCTL_SRST; 2096 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2097 to = timeout; 2098 do { 2099 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2100 if (!(ctl & HDAC_SDCTL_SRST)) 2101 break; 2102 DELAY(10); 2103 } while (--to); 2104 if (ctl & HDAC_SDCTL_SRST) 2105 device_printf(dev, "Reset timeout!\n"); 2106 } 2107 2108 static uint32_t 2109 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2110 { 2111 struct hdac_softc *sc = device_get_softc(dev); 2112 int ss, off; 2113 2114 ss = hdac_find_stream(sc, dir, stream); 2115 KASSERT(ss >= 0, 2116 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2117 2118 off = ss << 5; 2119 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2120 } 2121 2122 static int 2123 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2124 { 2125 struct hdac_softc *sc = device_get_softc(dev); 2126 2127 sc->unsol_registered++; 2128 hdac_poll_reinit(sc); 2129 return (tag); 2130 } 2131 2132 static void 2133 hdac_unsol_free(device_t dev, device_t child, int tag) 2134 { 2135 struct hdac_softc *sc = device_get_softc(dev); 2136 2137 sc->unsol_registered--; 2138 hdac_poll_reinit(sc); 2139 } 2140 2141 static device_method_t hdac_methods[] = { 2142 /* device interface */ 2143 DEVMETHOD(device_probe, hdac_probe), 2144 DEVMETHOD(device_attach, hdac_attach), 2145 DEVMETHOD(device_detach, hdac_detach), 2146 DEVMETHOD(device_suspend, hdac_suspend), 2147 DEVMETHOD(device_resume, hdac_resume), 2148 /* Bus interface */ 2149 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2150 DEVMETHOD(bus_print_child, hdac_print_child), 2151 DEVMETHOD(bus_child_location, hdac_child_location), 2152 DEVMETHOD(bus_child_pnpinfo, hdac_child_pnpinfo_method), 2153 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2154 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2155 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2156 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2157 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2158 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2159 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2160 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2161 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2162 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2163 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2164 DEVMETHOD_END 2165 }; 2166 2167 static driver_t hdac_driver = { 2168 "hdac", 2169 hdac_methods, 2170 sizeof(struct hdac_softc), 2171 }; 2172 2173 static devclass_t hdac_devclass; 2174 2175 DRIVER_MODULE(snd_hda, pci, hdac_driver, hdac_devclass, NULL, NULL); 2176