1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 SND_DECLARE_FILE("$FreeBSD$"); 55 56 #define hdac_lock(sc) snd_mtxlock((sc)->lock) 57 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) 58 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) 59 60 #define HDAC_QUIRK_64BIT (1 << 0) 61 #define HDAC_QUIRK_DMAPOS (1 << 1) 62 #define HDAC_QUIRK_MSI (1 << 2) 63 64 static const struct { 65 const char *key; 66 uint32_t value; 67 } hdac_quirks_tab[] = { 68 { "64bit", HDAC_QUIRK_64BIT }, 69 { "dmapos", HDAC_QUIRK_DMAPOS }, 70 { "msi", HDAC_QUIRK_MSI }, 71 }; 72 73 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 74 75 static const struct { 76 uint32_t model; 77 const char *desc; 78 char quirks_on; 79 char quirks_off; 80 } hdac_devices[] = { 81 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 82 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 83 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 84 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 85 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 87 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 88 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 89 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 90 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, 91 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 92 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 93 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 94 { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, 95 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 96 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 97 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 98 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 99 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 100 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 101 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 102 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 103 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 104 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 105 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 106 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 107 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 108 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, 109 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 110 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 111 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 112 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 113 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, 114 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 115 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 116 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 117 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 118 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 119 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 120 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, 121 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 122 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 123 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 124 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 125 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, 126 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, 127 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, 128 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 129 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 130 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 131 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 132 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 133 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 134 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 135 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 136 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 137 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 138 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 139 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 140 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 141 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 142 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 143 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 144 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 145 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 146 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 147 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 148 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 149 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 150 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 151 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 152 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 153 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 154 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 155 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 156 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 157 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 158 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 159 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 160 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 161 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 162 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 163 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 164 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 165 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 166 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 167 { HDA_ATI_R600, "ATI R600", 0, 0 }, 168 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 169 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 170 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 171 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 172 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 173 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 174 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 175 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 176 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 177 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 178 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 179 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 180 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 181 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 182 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 183 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 184 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 185 { HDA_AMD_X370, "AMD X370", 0, 0 }, 186 { HDA_AMD_X570, "AMD X570", 0, 0 }, 187 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, 188 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, 189 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 190 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 191 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 192 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 193 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 194 /* Unknown */ 195 { HDA_INTEL_ALL, "Intel", 0, 0 }, 196 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 197 { HDA_ATI_ALL, "ATI", 0, 0 }, 198 { HDA_AMD_ALL, "AMD", 0, 0 }, 199 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 200 { HDA_VIA_ALL, "VIA", 0, 0 }, 201 { HDA_SIS_ALL, "SiS", 0, 0 }, 202 { HDA_ULI_ALL, "ULI", 0, 0 }, 203 }; 204 205 static const struct { 206 uint16_t vendor; 207 uint8_t reg; 208 uint8_t mask; 209 uint8_t enable; 210 } hdac_pcie_snoop[] = { 211 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 212 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 213 { AMD_VENDORID, 0x42, 0xf8, 0x02 }, 214 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 215 }; 216 217 /**************************************************************************** 218 * Function prototypes 219 ****************************************************************************/ 220 static void hdac_intr_handler(void *); 221 static int hdac_reset(struct hdac_softc *, bool); 222 static int hdac_get_capabilities(struct hdac_softc *); 223 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 224 static int hdac_dma_alloc(struct hdac_softc *, 225 struct hdac_dma *, bus_size_t); 226 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 227 static int hdac_mem_alloc(struct hdac_softc *); 228 static void hdac_mem_free(struct hdac_softc *); 229 static int hdac_irq_alloc(struct hdac_softc *); 230 static void hdac_irq_free(struct hdac_softc *); 231 static void hdac_corb_init(struct hdac_softc *); 232 static void hdac_rirb_init(struct hdac_softc *); 233 static void hdac_corb_start(struct hdac_softc *); 234 static void hdac_rirb_start(struct hdac_softc *); 235 236 static void hdac_attach2(void *); 237 238 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 239 240 static int hdac_probe(device_t); 241 static int hdac_attach(device_t); 242 static int hdac_detach(device_t); 243 static int hdac_suspend(device_t); 244 static int hdac_resume(device_t); 245 246 static int hdac_rirb_flush(struct hdac_softc *sc); 247 static int hdac_unsolq_flush(struct hdac_softc *sc); 248 249 /* This function surely going to make its way into upper level someday. */ 250 static void 251 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 252 { 253 const char *res = NULL; 254 int i = 0, j, k, len, inv; 255 256 if (resource_string_value(device_get_name(sc->dev), 257 device_get_unit(sc->dev), "config", &res) != 0) 258 return; 259 if (!(res != NULL && strlen(res) > 0)) 260 return; 261 HDA_BOOTVERBOSE( 262 device_printf(sc->dev, "Config options:"); 263 ); 264 for (;;) { 265 while (res[i] != '\0' && 266 (res[i] == ',' || isspace(res[i]) != 0)) 267 i++; 268 if (res[i] == '\0') { 269 HDA_BOOTVERBOSE( 270 printf("\n"); 271 ); 272 return; 273 } 274 j = i; 275 while (res[j] != '\0' && 276 !(res[j] == ',' || isspace(res[j]) != 0)) 277 j++; 278 len = j - i; 279 if (len > 2 && strncmp(res + i, "no", 2) == 0) 280 inv = 2; 281 else 282 inv = 0; 283 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 284 if (strncmp(res + i + inv, 285 hdac_quirks_tab[k].key, len - inv) != 0) 286 continue; 287 if (len - inv != strlen(hdac_quirks_tab[k].key)) 288 continue; 289 HDA_BOOTVERBOSE( 290 printf(" %s%s", (inv != 0) ? "no" : "", 291 hdac_quirks_tab[k].key); 292 ); 293 if (inv == 0) { 294 *on |= hdac_quirks_tab[k].value; 295 *off &= ~hdac_quirks_tab[k].value; 296 } else if (inv != 0) { 297 *off |= hdac_quirks_tab[k].value; 298 *on &= ~hdac_quirks_tab[k].value; 299 } 300 break; 301 } 302 i = j; 303 } 304 } 305 306 static void 307 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) 308 { 309 device_t dev; 310 uint8_t rirbsts; 311 int i; 312 313 /* Was this a controller interrupt? */ 314 if (intsts & HDAC_INTSTS_CIS) { 315 /* 316 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then 317 * we will need to check and clear HDAC_STATESTS. 318 * That event is used to report codec status changes such as 319 * a reset or a wake-up event. 320 */ 321 /* 322 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we 323 * will need to check and clear HDAC_CORBSTS_CMEI in 324 * HDAC_CORBSTS. 325 * That event is used to report CORB memory errors. 326 */ 327 /* 328 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we 329 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in 330 * HDAC_RIRBSTS. 331 * That event is used to report response FIFO overruns. 332 */ 333 334 /* Get as many responses that we can */ 335 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 336 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 337 HDAC_WRITE_1(&sc->mem, 338 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 339 hdac_rirb_flush(sc); 340 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 341 } 342 if (sc->unsolq_rp != sc->unsolq_wp) 343 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 344 } 345 346 if (intsts & HDAC_INTSTS_SIS_MASK) { 347 for (i = 0; i < sc->num_ss; i++) { 348 if ((intsts & (1 << i)) == 0) 349 continue; 350 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 351 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 352 if ((dev = sc->streams[i].dev) != NULL) { 353 HDAC_STREAM_INTR(dev, 354 sc->streams[i].dir, sc->streams[i].stream); 355 } 356 } 357 } 358 } 359 360 /**************************************************************************** 361 * void hdac_intr_handler(void *) 362 * 363 * Interrupt handler. Processes interrupts received from the hdac. 364 ****************************************************************************/ 365 static void 366 hdac_intr_handler(void *context) 367 { 368 struct hdac_softc *sc; 369 uint32_t intsts; 370 371 sc = (struct hdac_softc *)context; 372 373 /* 374 * Loop until HDAC_INTSTS_GIS gets clear. 375 * It is plausible that hardware interrupts a host only when GIS goes 376 * from zero to one. GIS is formed by OR-ing multiple hardware 377 * statuses, so it's possible that a previously cleared status gets set 378 * again while another status has not been cleared yet. Thus, there 379 * will be no new interrupt as GIS always stayed set. If we don't 380 * re-examine GIS then we can leave it set and never get an interrupt 381 * again. 382 */ 383 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 384 while ((intsts & HDAC_INTSTS_GIS) != 0) { 385 hdac_lock(sc); 386 hdac_one_intr(sc, intsts); 387 hdac_unlock(sc); 388 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 389 } 390 } 391 392 static void 393 hdac_poll_callback(void *arg) 394 { 395 struct hdac_softc *sc = arg; 396 397 if (sc == NULL) 398 return; 399 400 hdac_lock(sc); 401 if (sc->polling == 0) { 402 hdac_unlock(sc); 403 return; 404 } 405 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); 406 hdac_unlock(sc); 407 408 hdac_intr_handler(sc); 409 } 410 411 /**************************************************************************** 412 * int hdac_reset(hdac_softc *, bool) 413 * 414 * Reset the hdac to a quiescent and known state. 415 ****************************************************************************/ 416 static int 417 hdac_reset(struct hdac_softc *sc, bool wakeup) 418 { 419 uint32_t gctl; 420 int count, i; 421 422 /* 423 * Stop all Streams DMA engine 424 */ 425 for (i = 0; i < sc->num_iss; i++) 426 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 427 for (i = 0; i < sc->num_oss; i++) 428 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 429 for (i = 0; i < sc->num_bss; i++) 430 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 431 432 /* 433 * Stop Control DMA engines. 434 */ 435 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 436 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 437 438 /* 439 * Reset DMA position buffer. 440 */ 441 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 442 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 443 444 /* 445 * Reset the controller. The reset must remain asserted for 446 * a minimum of 100us. 447 */ 448 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 449 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 450 count = 10000; 451 do { 452 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 453 if (!(gctl & HDAC_GCTL_CRST)) 454 break; 455 DELAY(10); 456 } while (--count); 457 if (gctl & HDAC_GCTL_CRST) { 458 device_printf(sc->dev, "Unable to put hdac in reset\n"); 459 return (ENXIO); 460 } 461 462 /* If wakeup is not requested - leave the controller in reset state. */ 463 if (!wakeup) 464 return (0); 465 466 DELAY(100); 467 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 468 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 469 count = 10000; 470 do { 471 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 472 if (gctl & HDAC_GCTL_CRST) 473 break; 474 DELAY(10); 475 } while (--count); 476 if (!(gctl & HDAC_GCTL_CRST)) { 477 device_printf(sc->dev, "Device stuck in reset\n"); 478 return (ENXIO); 479 } 480 481 /* 482 * Wait for codecs to finish their own reset sequence. The delay here 483 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). 484 */ 485 DELAY(1000); 486 487 return (0); 488 } 489 490 /**************************************************************************** 491 * int hdac_get_capabilities(struct hdac_softc *); 492 * 493 * Retreive the general capabilities of the hdac; 494 * Number of Input Streams 495 * Number of Output Streams 496 * Number of bidirectional Streams 497 * 64bit ready 498 * CORB and RIRB sizes 499 ****************************************************************************/ 500 static int 501 hdac_get_capabilities(struct hdac_softc *sc) 502 { 503 uint16_t gcap; 504 uint8_t corbsize, rirbsize; 505 506 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 507 sc->num_iss = HDAC_GCAP_ISS(gcap); 508 sc->num_oss = HDAC_GCAP_OSS(gcap); 509 sc->num_bss = HDAC_GCAP_BSS(gcap); 510 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 511 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 512 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 513 if (sc->quirks_on & HDAC_QUIRK_64BIT) 514 sc->support_64bit = 1; 515 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 516 sc->support_64bit = 0; 517 518 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 519 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 520 HDAC_CORBSIZE_CORBSZCAP_256) 521 sc->corb_size = 256; 522 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 523 HDAC_CORBSIZE_CORBSZCAP_16) 524 sc->corb_size = 16; 525 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 526 HDAC_CORBSIZE_CORBSZCAP_2) 527 sc->corb_size = 2; 528 else { 529 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 530 __func__, corbsize); 531 return (ENXIO); 532 } 533 534 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 535 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 536 HDAC_RIRBSIZE_RIRBSZCAP_256) 537 sc->rirb_size = 256; 538 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 539 HDAC_RIRBSIZE_RIRBSZCAP_16) 540 sc->rirb_size = 16; 541 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 542 HDAC_RIRBSIZE_RIRBSZCAP_2) 543 sc->rirb_size = 2; 544 else { 545 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 546 __func__, rirbsize); 547 return (ENXIO); 548 } 549 550 HDA_BOOTVERBOSE( 551 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 552 "NSDO %d%s, CORB %d, RIRB %d\n", 553 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 554 sc->support_64bit ? ", 64bit" : "", 555 sc->corb_size, sc->rirb_size); 556 ); 557 558 return (0); 559 } 560 561 /**************************************************************************** 562 * void hdac_dma_cb 563 * 564 * This function is called by bus_dmamap_load when the mapping has been 565 * established. We just record the physical address of the mapping into 566 * the struct hdac_dma passed in. 567 ****************************************************************************/ 568 static void 569 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 570 { 571 struct hdac_dma *dma; 572 573 if (error == 0) { 574 dma = (struct hdac_dma *)callback_arg; 575 dma->dma_paddr = segs[0].ds_addr; 576 } 577 } 578 579 /**************************************************************************** 580 * int hdac_dma_alloc 581 * 582 * This function allocate and setup a dma region (struct hdac_dma). 583 * It must be freed by a corresponding hdac_dma_free. 584 ****************************************************************************/ 585 static int 586 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 587 { 588 bus_size_t roundsz; 589 int result; 590 591 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 592 bzero(dma, sizeof(*dma)); 593 594 /* 595 * Create a DMA tag 596 */ 597 result = bus_dma_tag_create( 598 bus_get_dma_tag(sc->dev), /* parent */ 599 HDA_DMA_ALIGNMENT, /* alignment */ 600 0, /* boundary */ 601 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 602 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 603 BUS_SPACE_MAXADDR, /* highaddr */ 604 NULL, /* filtfunc */ 605 NULL, /* fistfuncarg */ 606 roundsz, /* maxsize */ 607 1, /* nsegments */ 608 roundsz, /* maxsegsz */ 609 0, /* flags */ 610 NULL, /* lockfunc */ 611 NULL, /* lockfuncarg */ 612 &dma->dma_tag); /* dmat */ 613 if (result != 0) { 614 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 615 __func__, result); 616 goto hdac_dma_alloc_fail; 617 } 618 619 /* 620 * Allocate DMA memory 621 */ 622 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 623 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 624 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 625 BUS_DMA_COHERENT), 626 &dma->dma_map); 627 if (result != 0) { 628 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 629 __func__, result); 630 goto hdac_dma_alloc_fail; 631 } 632 633 dma->dma_size = roundsz; 634 635 /* 636 * Map the memory 637 */ 638 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 639 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 640 if (result != 0 || dma->dma_paddr == 0) { 641 if (result == 0) 642 result = ENOMEM; 643 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 644 __func__, result); 645 goto hdac_dma_alloc_fail; 646 } 647 648 HDA_BOOTHVERBOSE( 649 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 650 __func__, (uintmax_t)size, (uintmax_t)roundsz); 651 ); 652 653 return (0); 654 655 hdac_dma_alloc_fail: 656 hdac_dma_free(sc, dma); 657 658 return (result); 659 } 660 661 /**************************************************************************** 662 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 663 * 664 * Free a struct hdac_dma that has been previously allocated via the 665 * hdac_dma_alloc function. 666 ****************************************************************************/ 667 static void 668 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 669 { 670 if (dma->dma_paddr != 0) { 671 /* Flush caches */ 672 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 673 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 674 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 675 dma->dma_paddr = 0; 676 } 677 if (dma->dma_vaddr != NULL) { 678 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 679 dma->dma_vaddr = NULL; 680 } 681 if (dma->dma_tag != NULL) { 682 bus_dma_tag_destroy(dma->dma_tag); 683 dma->dma_tag = NULL; 684 } 685 dma->dma_size = 0; 686 } 687 688 /**************************************************************************** 689 * int hdac_mem_alloc(struct hdac_softc *) 690 * 691 * Allocate all the bus resources necessary to speak with the physical 692 * controller. 693 ****************************************************************************/ 694 static int 695 hdac_mem_alloc(struct hdac_softc *sc) 696 { 697 struct hdac_mem *mem; 698 699 mem = &sc->mem; 700 mem->mem_rid = PCIR_BAR(0); 701 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 702 &mem->mem_rid, RF_ACTIVE); 703 if (mem->mem_res == NULL) { 704 device_printf(sc->dev, 705 "%s: Unable to allocate memory resource\n", __func__); 706 return (ENOMEM); 707 } 708 mem->mem_tag = rman_get_bustag(mem->mem_res); 709 mem->mem_handle = rman_get_bushandle(mem->mem_res); 710 711 return (0); 712 } 713 714 /**************************************************************************** 715 * void hdac_mem_free(struct hdac_softc *) 716 * 717 * Free up resources previously allocated by hdac_mem_alloc. 718 ****************************************************************************/ 719 static void 720 hdac_mem_free(struct hdac_softc *sc) 721 { 722 struct hdac_mem *mem; 723 724 mem = &sc->mem; 725 if (mem->mem_res != NULL) 726 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 727 mem->mem_res); 728 mem->mem_res = NULL; 729 } 730 731 /**************************************************************************** 732 * int hdac_irq_alloc(struct hdac_softc *) 733 * 734 * Allocate and setup the resources necessary for interrupt handling. 735 ****************************************************************************/ 736 static int 737 hdac_irq_alloc(struct hdac_softc *sc) 738 { 739 struct hdac_irq *irq; 740 int result; 741 742 irq = &sc->irq; 743 irq->irq_rid = 0x0; 744 745 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 746 (result = pci_msi_count(sc->dev)) == 1 && 747 pci_alloc_msi(sc->dev, &result) == 0) 748 irq->irq_rid = 0x1; 749 750 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 751 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 752 if (irq->irq_res == NULL) { 753 device_printf(sc->dev, "%s: Unable to allocate irq\n", 754 __func__); 755 goto hdac_irq_alloc_fail; 756 } 757 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 758 NULL, hdac_intr_handler, sc, &irq->irq_handle); 759 if (result != 0) { 760 device_printf(sc->dev, 761 "%s: Unable to setup interrupt handler (%d)\n", 762 __func__, result); 763 goto hdac_irq_alloc_fail; 764 } 765 766 return (0); 767 768 hdac_irq_alloc_fail: 769 hdac_irq_free(sc); 770 771 return (ENXIO); 772 } 773 774 /**************************************************************************** 775 * void hdac_irq_free(struct hdac_softc *) 776 * 777 * Free up resources previously allocated by hdac_irq_alloc. 778 ****************************************************************************/ 779 static void 780 hdac_irq_free(struct hdac_softc *sc) 781 { 782 struct hdac_irq *irq; 783 784 irq = &sc->irq; 785 if (irq->irq_res != NULL && irq->irq_handle != NULL) 786 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 787 if (irq->irq_res != NULL) 788 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 789 irq->irq_res); 790 if (irq->irq_rid == 0x1) 791 pci_release_msi(sc->dev); 792 irq->irq_handle = NULL; 793 irq->irq_res = NULL; 794 irq->irq_rid = 0x0; 795 } 796 797 /**************************************************************************** 798 * void hdac_corb_init(struct hdac_softc *) 799 * 800 * Initialize the corb registers for operations but do not start it up yet. 801 * The CORB engine must not be running when this function is called. 802 ****************************************************************************/ 803 static void 804 hdac_corb_init(struct hdac_softc *sc) 805 { 806 uint8_t corbsize; 807 uint64_t corbpaddr; 808 809 /* Setup the CORB size. */ 810 switch (sc->corb_size) { 811 case 256: 812 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 813 break; 814 case 16: 815 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 816 break; 817 case 2: 818 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 819 break; 820 default: 821 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 822 } 823 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 824 825 /* Setup the CORB Address in the hdac */ 826 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 827 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 828 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 829 830 /* Set the WP and RP */ 831 sc->corb_wp = 0; 832 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 833 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 834 /* 835 * The HDA specification indicates that the CORBRPRST bit will always 836 * read as zero. Unfortunately, it seems that at least the 82801G 837 * doesn't reset the bit to zero, which stalls the corb engine. 838 * manually reset the bit to zero before continuing. 839 */ 840 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 841 842 /* Enable CORB error reporting */ 843 #if 0 844 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 845 #endif 846 } 847 848 /**************************************************************************** 849 * void hdac_rirb_init(struct hdac_softc *) 850 * 851 * Initialize the rirb registers for operations but do not start it up yet. 852 * The RIRB engine must not be running when this function is called. 853 ****************************************************************************/ 854 static void 855 hdac_rirb_init(struct hdac_softc *sc) 856 { 857 uint8_t rirbsize; 858 uint64_t rirbpaddr; 859 860 /* Setup the RIRB size. */ 861 switch (sc->rirb_size) { 862 case 256: 863 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 864 break; 865 case 16: 866 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 867 break; 868 case 2: 869 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 870 break; 871 default: 872 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 873 } 874 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 875 876 /* Setup the RIRB Address in the hdac */ 877 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 878 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 879 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 880 881 /* Setup the WP and RP */ 882 sc->rirb_rp = 0; 883 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 884 885 /* Setup the interrupt threshold */ 886 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 887 888 /* Enable Overrun and response received reporting */ 889 #if 0 890 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 891 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 892 #else 893 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 894 #endif 895 896 /* 897 * Make sure that the Host CPU cache doesn't contain any dirty 898 * cache lines that falls in the rirb. If I understood correctly, it 899 * should be sufficient to do this only once as the rirb is purely 900 * read-only from now on. 901 */ 902 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 903 BUS_DMASYNC_PREREAD); 904 } 905 906 /**************************************************************************** 907 * void hdac_corb_start(hdac_softc *) 908 * 909 * Startup the corb DMA engine 910 ****************************************************************************/ 911 static void 912 hdac_corb_start(struct hdac_softc *sc) 913 { 914 uint32_t corbctl; 915 916 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 917 corbctl |= HDAC_CORBCTL_CORBRUN; 918 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 919 } 920 921 /**************************************************************************** 922 * void hdac_rirb_start(hdac_softc *) 923 * 924 * Startup the rirb DMA engine 925 ****************************************************************************/ 926 static void 927 hdac_rirb_start(struct hdac_softc *sc) 928 { 929 uint32_t rirbctl; 930 931 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 932 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 933 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 934 } 935 936 static int 937 hdac_rirb_flush(struct hdac_softc *sc) 938 { 939 struct hdac_rirb *rirb_base, *rirb; 940 nid_t cad; 941 uint32_t resp, resp_ex; 942 uint8_t rirbwp; 943 int ret; 944 945 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 946 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 947 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 948 BUS_DMASYNC_POSTREAD); 949 950 ret = 0; 951 while (sc->rirb_rp != rirbwp) { 952 sc->rirb_rp++; 953 sc->rirb_rp %= sc->rirb_size; 954 rirb = &rirb_base[sc->rirb_rp]; 955 resp = le32toh(rirb->response); 956 resp_ex = le32toh(rirb->response_ex); 957 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 958 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 959 sc->unsolq[sc->unsolq_wp++] = resp; 960 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 961 sc->unsolq[sc->unsolq_wp++] = cad; 962 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 963 } else if (sc->codecs[cad].pending <= 0) { 964 device_printf(sc->dev, "Unexpected unsolicited " 965 "response from address %d: %08x\n", cad, resp); 966 } else { 967 sc->codecs[cad].response = resp; 968 sc->codecs[cad].pending--; 969 } 970 ret++; 971 } 972 973 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 974 BUS_DMASYNC_PREREAD); 975 return (ret); 976 } 977 978 static int 979 hdac_unsolq_flush(struct hdac_softc *sc) 980 { 981 device_t child; 982 nid_t cad; 983 uint32_t resp; 984 int ret = 0; 985 986 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 987 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 988 while (sc->unsolq_rp != sc->unsolq_wp) { 989 resp = sc->unsolq[sc->unsolq_rp++]; 990 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 991 cad = sc->unsolq[sc->unsolq_rp++]; 992 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 993 if ((child = sc->codecs[cad].dev) != NULL) 994 HDAC_UNSOL_INTR(child, resp); 995 ret++; 996 } 997 sc->unsolq_st = HDAC_UNSOLQ_READY; 998 } 999 1000 return (ret); 1001 } 1002 1003 /**************************************************************************** 1004 * uint32_t hdac_send_command 1005 * 1006 * Wrapper function that sends only one command to a given codec 1007 ****************************************************************************/ 1008 static uint32_t 1009 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 1010 { 1011 int timeout; 1012 uint32_t *corb; 1013 1014 hdac_lockassert(sc); 1015 verb &= ~HDA_CMD_CAD_MASK; 1016 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 1017 sc->codecs[cad].response = HDA_INVALID; 1018 1019 sc->codecs[cad].pending++; 1020 sc->corb_wp++; 1021 sc->corb_wp %= sc->corb_size; 1022 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 1023 bus_dmamap_sync(sc->corb_dma.dma_tag, 1024 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 1025 corb[sc->corb_wp] = htole32(verb); 1026 bus_dmamap_sync(sc->corb_dma.dma_tag, 1027 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 1028 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 1029 1030 timeout = 10000; 1031 do { 1032 if (hdac_rirb_flush(sc) == 0) 1033 DELAY(10); 1034 } while (sc->codecs[cad].pending != 0 && --timeout); 1035 1036 if (sc->codecs[cad].pending != 0) { 1037 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", 1038 verb, cad); 1039 sc->codecs[cad].pending = 0; 1040 } 1041 1042 if (sc->unsolq_rp != sc->unsolq_wp) 1043 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1044 return (sc->codecs[cad].response); 1045 } 1046 1047 /**************************************************************************** 1048 * Device Methods 1049 ****************************************************************************/ 1050 1051 /**************************************************************************** 1052 * int hdac_probe(device_t) 1053 * 1054 * Probe for the presence of an hdac. If none is found, check for a generic 1055 * match using the subclass of the device. 1056 ****************************************************************************/ 1057 static int 1058 hdac_probe(device_t dev) 1059 { 1060 int i, result; 1061 uint32_t model; 1062 uint16_t class, subclass; 1063 char desc[64]; 1064 1065 model = (uint32_t)pci_get_device(dev) << 16; 1066 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1067 class = pci_get_class(dev); 1068 subclass = pci_get_subclass(dev); 1069 1070 bzero(desc, sizeof(desc)); 1071 result = ENXIO; 1072 for (i = 0; i < nitems(hdac_devices); i++) { 1073 if (hdac_devices[i].model == model) { 1074 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1075 result = BUS_PROBE_DEFAULT; 1076 break; 1077 } 1078 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1079 class == PCIC_MULTIMEDIA && 1080 subclass == PCIS_MULTIMEDIA_HDA) { 1081 snprintf(desc, sizeof(desc), "%s (0x%04x)", 1082 hdac_devices[i].desc, pci_get_device(dev)); 1083 result = BUS_PROBE_GENERIC; 1084 break; 1085 } 1086 } 1087 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1088 subclass == PCIS_MULTIMEDIA_HDA) { 1089 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1090 result = BUS_PROBE_GENERIC; 1091 } 1092 if (result != ENXIO) { 1093 strlcat(desc, " HDA Controller", sizeof(desc)); 1094 device_set_desc_copy(dev, desc); 1095 } 1096 1097 return (result); 1098 } 1099 1100 static void 1101 hdac_unsolq_task(void *context, int pending) 1102 { 1103 struct hdac_softc *sc; 1104 1105 sc = (struct hdac_softc *)context; 1106 1107 hdac_lock(sc); 1108 hdac_unsolq_flush(sc); 1109 hdac_unlock(sc); 1110 } 1111 1112 /**************************************************************************** 1113 * int hdac_attach(device_t) 1114 * 1115 * Attach the device into the kernel. Interrupts usually won't be enabled 1116 * when this function is called. Setup everything that doesn't require 1117 * interrupts and defer probing of codecs until interrupts are enabled. 1118 ****************************************************************************/ 1119 static int 1120 hdac_attach(device_t dev) 1121 { 1122 struct hdac_softc *sc; 1123 int result; 1124 int i, devid = -1; 1125 uint32_t model; 1126 uint16_t class, subclass; 1127 uint16_t vendor; 1128 uint8_t v; 1129 1130 sc = device_get_softc(dev); 1131 HDA_BOOTVERBOSE( 1132 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1133 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1134 device_printf(dev, "HDA Driver Revision: %s\n", 1135 HDA_DRV_TEST_REV); 1136 ); 1137 1138 model = (uint32_t)pci_get_device(dev) << 16; 1139 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1140 class = pci_get_class(dev); 1141 subclass = pci_get_subclass(dev); 1142 1143 for (i = 0; i < nitems(hdac_devices); i++) { 1144 if (hdac_devices[i].model == model) { 1145 devid = i; 1146 break; 1147 } 1148 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1149 class == PCIC_MULTIMEDIA && 1150 subclass == PCIS_MULTIMEDIA_HDA) { 1151 devid = i; 1152 break; 1153 } 1154 } 1155 1156 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); 1157 sc->dev = dev; 1158 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1159 callout_init(&sc->poll_callout, 1); 1160 for (i = 0; i < HDAC_CODEC_MAX; i++) 1161 sc->codecs[i].dev = NULL; 1162 if (devid >= 0) { 1163 sc->quirks_on = hdac_devices[devid].quirks_on; 1164 sc->quirks_off = hdac_devices[devid].quirks_off; 1165 } else { 1166 sc->quirks_on = 0; 1167 sc->quirks_off = 0; 1168 } 1169 if (resource_int_value(device_get_name(dev), 1170 device_get_unit(dev), "msi", &i) == 0) { 1171 if (i == 0) 1172 sc->quirks_off |= HDAC_QUIRK_MSI; 1173 else { 1174 sc->quirks_on |= HDAC_QUIRK_MSI; 1175 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1176 } 1177 } 1178 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1179 HDA_BOOTVERBOSE( 1180 device_printf(sc->dev, 1181 "Config options: on=0x%08x off=0x%08x\n", 1182 sc->quirks_on, sc->quirks_off); 1183 ); 1184 sc->poll_ival = hz; 1185 if (resource_int_value(device_get_name(dev), 1186 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1187 sc->polling = 1; 1188 else 1189 sc->polling = 0; 1190 1191 pci_enable_busmaster(dev); 1192 1193 vendor = pci_get_vendor(dev); 1194 if (vendor == INTEL_VENDORID) { 1195 /* TCSEL -> TC0 */ 1196 v = pci_read_config(dev, 0x44, 1); 1197 pci_write_config(dev, 0x44, v & 0xf8, 1); 1198 HDA_BOOTHVERBOSE( 1199 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1200 pci_read_config(dev, 0x44, 1)); 1201 ); 1202 } 1203 1204 #if defined(__i386__) || defined(__amd64__) 1205 sc->flags |= HDAC_F_DMA_NOCACHE; 1206 1207 if (resource_int_value(device_get_name(dev), 1208 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1209 #else 1210 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1211 #endif 1212 /* 1213 * Try to enable PCIe snoop to avoid messing around with 1214 * uncacheable DMA attribute. Since PCIe snoop register 1215 * config is pretty much vendor specific, there are no 1216 * general solutions on how to enable it, forcing us (even 1217 * Microsoft) to enable uncacheable or write combined DMA 1218 * by default. 1219 * 1220 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1221 */ 1222 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1223 if (hdac_pcie_snoop[i].vendor != vendor) 1224 continue; 1225 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1226 if (hdac_pcie_snoop[i].reg == 0x00) 1227 break; 1228 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1229 if ((v & hdac_pcie_snoop[i].enable) == 1230 hdac_pcie_snoop[i].enable) 1231 break; 1232 v &= hdac_pcie_snoop[i].mask; 1233 v |= hdac_pcie_snoop[i].enable; 1234 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1235 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1236 if ((v & hdac_pcie_snoop[i].enable) != 1237 hdac_pcie_snoop[i].enable) { 1238 HDA_BOOTVERBOSE( 1239 device_printf(dev, 1240 "WARNING: Failed to enable PCIe " 1241 "snoop!\n"); 1242 ); 1243 #if defined(__i386__) || defined(__amd64__) 1244 sc->flags |= HDAC_F_DMA_NOCACHE; 1245 #endif 1246 } 1247 break; 1248 } 1249 #if defined(__i386__) || defined(__amd64__) 1250 } 1251 #endif 1252 1253 HDA_BOOTHVERBOSE( 1254 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1255 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1256 "Uncacheable" : "PCIe snoop", vendor); 1257 ); 1258 1259 /* Allocate resources */ 1260 result = hdac_mem_alloc(sc); 1261 if (result != 0) 1262 goto hdac_attach_fail; 1263 result = hdac_irq_alloc(sc); 1264 if (result != 0) 1265 goto hdac_attach_fail; 1266 1267 /* Get Capabilities */ 1268 result = hdac_get_capabilities(sc); 1269 if (result != 0) 1270 goto hdac_attach_fail; 1271 1272 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1273 result = hdac_dma_alloc(sc, &sc->corb_dma, 1274 sc->corb_size * sizeof(uint32_t)); 1275 if (result != 0) 1276 goto hdac_attach_fail; 1277 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1278 sc->rirb_size * sizeof(struct hdac_rirb)); 1279 if (result != 0) 1280 goto hdac_attach_fail; 1281 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1282 M_HDAC, M_ZERO | M_WAITOK); 1283 for (i = 0; i < sc->num_ss; i++) { 1284 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1285 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1286 if (result != 0) 1287 goto hdac_attach_fail; 1288 } 1289 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1290 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1291 HDA_BOOTVERBOSE( 1292 device_printf(dev, "Failed to " 1293 "allocate DMA pos buffer " 1294 "(non-fatal)\n"); 1295 ); 1296 } else { 1297 uint64_t addr = sc->pos_dma.dma_paddr; 1298 1299 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1300 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1301 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1302 HDAC_DPLBASE_DPLBASE_DMAPBE); 1303 } 1304 } 1305 1306 result = bus_dma_tag_create( 1307 bus_get_dma_tag(sc->dev), /* parent */ 1308 HDA_DMA_ALIGNMENT, /* alignment */ 1309 0, /* boundary */ 1310 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1311 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1312 BUS_SPACE_MAXADDR, /* highaddr */ 1313 NULL, /* filtfunc */ 1314 NULL, /* fistfuncarg */ 1315 HDA_BUFSZ_MAX, /* maxsize */ 1316 1, /* nsegments */ 1317 HDA_BUFSZ_MAX, /* maxsegsz */ 1318 0, /* flags */ 1319 NULL, /* lockfunc */ 1320 NULL, /* lockfuncarg */ 1321 &sc->chan_dmat); /* dmat */ 1322 if (result != 0) { 1323 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1324 __func__, result); 1325 goto hdac_attach_fail; 1326 } 1327 1328 /* Quiesce everything */ 1329 HDA_BOOTHVERBOSE( 1330 device_printf(dev, "Reset controller...\n"); 1331 ); 1332 hdac_reset(sc, true); 1333 1334 /* Initialize the CORB and RIRB */ 1335 hdac_corb_init(sc); 1336 hdac_rirb_init(sc); 1337 1338 /* Defer remaining of initialization until interrupts are enabled */ 1339 sc->intrhook.ich_func = hdac_attach2; 1340 sc->intrhook.ich_arg = (void *)sc; 1341 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1342 sc->intrhook.ich_func = NULL; 1343 hdac_attach2((void *)sc); 1344 } 1345 1346 return (0); 1347 1348 hdac_attach_fail: 1349 hdac_irq_free(sc); 1350 if (sc->streams != NULL) 1351 for (i = 0; i < sc->num_ss; i++) 1352 hdac_dma_free(sc, &sc->streams[i].bdl); 1353 free(sc->streams, M_HDAC); 1354 hdac_dma_free(sc, &sc->rirb_dma); 1355 hdac_dma_free(sc, &sc->corb_dma); 1356 hdac_mem_free(sc); 1357 snd_mtxfree(sc->lock); 1358 1359 return (ENXIO); 1360 } 1361 1362 static int 1363 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1364 { 1365 struct hdac_softc *sc; 1366 device_t *devlist; 1367 device_t dev; 1368 int devcount, i, err, val; 1369 1370 dev = oidp->oid_arg1; 1371 sc = device_get_softc(dev); 1372 if (sc == NULL) 1373 return (EINVAL); 1374 val = 0; 1375 err = sysctl_handle_int(oidp, &val, 0, req); 1376 if (err != 0 || req->newptr == NULL || val == 0) 1377 return (err); 1378 1379 /* XXX: Temporary. For debugging. */ 1380 if (val == 100) { 1381 hdac_suspend(dev); 1382 return (0); 1383 } else if (val == 101) { 1384 hdac_resume(dev); 1385 return (0); 1386 } 1387 1388 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) 1389 return (err); 1390 hdac_lock(sc); 1391 for (i = 0; i < devcount; i++) 1392 HDAC_PINDUMP(devlist[i]); 1393 hdac_unlock(sc); 1394 free(devlist, M_TEMP); 1395 return (0); 1396 } 1397 1398 static int 1399 hdac_mdata_rate(uint16_t fmt) 1400 { 1401 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1402 int rate, bits; 1403 1404 if (fmt & (1 << 14)) 1405 rate = 44100; 1406 else 1407 rate = 48000; 1408 rate *= ((fmt >> 11) & 0x07) + 1; 1409 rate /= ((fmt >> 8) & 0x07) + 1; 1410 bits = mbits[(fmt >> 4) & 0x03]; 1411 bits *= (fmt & 0x0f) + 1; 1412 return (rate * bits); 1413 } 1414 1415 static int 1416 hdac_bdata_rate(uint16_t fmt, int output) 1417 { 1418 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1419 int rate, bits; 1420 1421 rate = 48000; 1422 rate *= ((fmt >> 11) & 0x07) + 1; 1423 bits = bbits[(fmt >> 4) & 0x03]; 1424 bits *= (fmt & 0x0f) + 1; 1425 if (!output) 1426 bits = ((bits + 7) & ~0x07) + 10; 1427 return (rate * bits); 1428 } 1429 1430 static void 1431 hdac_poll_reinit(struct hdac_softc *sc) 1432 { 1433 int i, pollticks, min = 1000000; 1434 struct hdac_stream *s; 1435 1436 if (sc->polling == 0) 1437 return; 1438 if (sc->unsol_registered > 0) 1439 min = hz / 2; 1440 for (i = 0; i < sc->num_ss; i++) { 1441 s = &sc->streams[i]; 1442 if (s->running == 0) 1443 continue; 1444 pollticks = ((uint64_t)hz * s->blksz) / 1445 (hdac_mdata_rate(s->format) / 8); 1446 pollticks >>= 1; 1447 if (pollticks > hz) 1448 pollticks = hz; 1449 if (pollticks < 1) 1450 pollticks = 1; 1451 if (min > pollticks) 1452 min = pollticks; 1453 } 1454 sc->poll_ival = min; 1455 if (min == 1000000) 1456 callout_stop(&sc->poll_callout); 1457 else 1458 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1459 } 1460 1461 static int 1462 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1463 { 1464 struct hdac_softc *sc; 1465 device_t dev; 1466 uint32_t ctl; 1467 int err, val; 1468 1469 dev = oidp->oid_arg1; 1470 sc = device_get_softc(dev); 1471 if (sc == NULL) 1472 return (EINVAL); 1473 hdac_lock(sc); 1474 val = sc->polling; 1475 hdac_unlock(sc); 1476 err = sysctl_handle_int(oidp, &val, 0, req); 1477 1478 if (err != 0 || req->newptr == NULL) 1479 return (err); 1480 if (val < 0 || val > 1) 1481 return (EINVAL); 1482 1483 hdac_lock(sc); 1484 if (val != sc->polling) { 1485 if (val == 0) { 1486 callout_stop(&sc->poll_callout); 1487 hdac_unlock(sc); 1488 callout_drain(&sc->poll_callout); 1489 hdac_lock(sc); 1490 sc->polling = 0; 1491 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1492 ctl |= HDAC_INTCTL_GIE; 1493 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1494 } else { 1495 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1496 ctl &= ~HDAC_INTCTL_GIE; 1497 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1498 sc->polling = 1; 1499 hdac_poll_reinit(sc); 1500 } 1501 } 1502 hdac_unlock(sc); 1503 1504 return (err); 1505 } 1506 1507 static void 1508 hdac_attach2(void *arg) 1509 { 1510 struct hdac_softc *sc; 1511 device_t child; 1512 uint32_t vendorid, revisionid; 1513 int i; 1514 uint16_t statests; 1515 1516 sc = (struct hdac_softc *)arg; 1517 1518 hdac_lock(sc); 1519 1520 /* Remove ourselves from the config hooks */ 1521 if (sc->intrhook.ich_func != NULL) { 1522 config_intrhook_disestablish(&sc->intrhook); 1523 sc->intrhook.ich_func = NULL; 1524 } 1525 1526 HDA_BOOTHVERBOSE( 1527 device_printf(sc->dev, "Starting CORB Engine...\n"); 1528 ); 1529 hdac_corb_start(sc); 1530 HDA_BOOTHVERBOSE( 1531 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1532 ); 1533 hdac_rirb_start(sc); 1534 1535 /* 1536 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1537 * (status change) interrupts. The documentation says that we 1538 * should not make any assumptions about the state of this register 1539 * and set it explicitly. 1540 * NB: this needs to be done before the interrupt is enabled as 1541 * the handler does not expect this interrupt source. 1542 */ 1543 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1544 1545 /* 1546 * Read and clear post-reset SDI wake status. 1547 * Each set bit corresponds to a codec that came out of reset. 1548 */ 1549 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1550 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests); 1551 1552 HDA_BOOTHVERBOSE( 1553 device_printf(sc->dev, 1554 "Enabling controller interrupt...\n"); 1555 ); 1556 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1557 HDAC_GCTL_UNSOL); 1558 if (sc->polling == 0) { 1559 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1560 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1561 } 1562 DELAY(1000); 1563 1564 HDA_BOOTHVERBOSE( 1565 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1566 ); 1567 hdac_unlock(sc); 1568 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1569 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1570 HDA_BOOTHVERBOSE( 1571 device_printf(sc->dev, 1572 "Found CODEC at address %d\n", i); 1573 ); 1574 hdac_lock(sc); 1575 vendorid = hdac_send_command(sc, i, 1576 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1577 revisionid = hdac_send_command(sc, i, 1578 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1579 hdac_unlock(sc); 1580 if (vendorid == HDA_INVALID && 1581 revisionid == HDA_INVALID) { 1582 device_printf(sc->dev, 1583 "CODEC at address %d not responding!\n", i); 1584 continue; 1585 } 1586 sc->codecs[i].vendor_id = 1587 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1588 sc->codecs[i].device_id = 1589 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1590 sc->codecs[i].revision_id = 1591 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1592 sc->codecs[i].stepping_id = 1593 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1594 child = device_add_child(sc->dev, "hdacc", -1); 1595 if (child == NULL) { 1596 device_printf(sc->dev, 1597 "Failed to add CODEC device\n"); 1598 continue; 1599 } 1600 device_set_ivars(child, (void *)(intptr_t)i); 1601 sc->codecs[i].dev = child; 1602 } 1603 } 1604 bus_generic_attach(sc->dev); 1605 1606 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1607 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1608 "pindump", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev, 1609 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); 1610 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1611 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1612 "polling", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev, 1613 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); 1614 } 1615 1616 /**************************************************************************** 1617 * int hdac_suspend(device_t) 1618 * 1619 * Suspend and power down HDA bus and codecs. 1620 ****************************************************************************/ 1621 static int 1622 hdac_suspend(device_t dev) 1623 { 1624 struct hdac_softc *sc = device_get_softc(dev); 1625 1626 HDA_BOOTHVERBOSE( 1627 device_printf(dev, "Suspend...\n"); 1628 ); 1629 bus_generic_suspend(dev); 1630 1631 hdac_lock(sc); 1632 HDA_BOOTHVERBOSE( 1633 device_printf(dev, "Reset controller...\n"); 1634 ); 1635 callout_stop(&sc->poll_callout); 1636 hdac_reset(sc, false); 1637 hdac_unlock(sc); 1638 callout_drain(&sc->poll_callout); 1639 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1640 HDA_BOOTHVERBOSE( 1641 device_printf(dev, "Suspend done\n"); 1642 ); 1643 return (0); 1644 } 1645 1646 /**************************************************************************** 1647 * int hdac_resume(device_t) 1648 * 1649 * Powerup and restore HDA bus and codecs state. 1650 ****************************************************************************/ 1651 static int 1652 hdac_resume(device_t dev) 1653 { 1654 struct hdac_softc *sc = device_get_softc(dev); 1655 int error; 1656 1657 HDA_BOOTHVERBOSE( 1658 device_printf(dev, "Resume...\n"); 1659 ); 1660 hdac_lock(sc); 1661 1662 /* Quiesce everything */ 1663 HDA_BOOTHVERBOSE( 1664 device_printf(dev, "Reset controller...\n"); 1665 ); 1666 hdac_reset(sc, true); 1667 1668 /* Initialize the CORB and RIRB */ 1669 hdac_corb_init(sc); 1670 hdac_rirb_init(sc); 1671 1672 HDA_BOOTHVERBOSE( 1673 device_printf(dev, "Starting CORB Engine...\n"); 1674 ); 1675 hdac_corb_start(sc); 1676 HDA_BOOTHVERBOSE( 1677 device_printf(dev, "Starting RIRB Engine...\n"); 1678 ); 1679 hdac_rirb_start(sc); 1680 1681 /* 1682 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1683 * (status change) events. The documentation says that we should 1684 * not make any assumptions about the state of this register and 1685 * set it explicitly. 1686 * Also, clear HDAC_STATESTS. 1687 * NB: this needs to be done before the interrupt is enabled as 1688 * the handler does not expect this interrupt source. 1689 */ 1690 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1691 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK); 1692 1693 HDA_BOOTHVERBOSE( 1694 device_printf(dev, "Enabling controller interrupt...\n"); 1695 ); 1696 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1697 HDAC_GCTL_UNSOL); 1698 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1699 DELAY(1000); 1700 hdac_poll_reinit(sc); 1701 hdac_unlock(sc); 1702 1703 error = bus_generic_resume(dev); 1704 HDA_BOOTHVERBOSE( 1705 device_printf(dev, "Resume done\n"); 1706 ); 1707 return (error); 1708 } 1709 1710 /**************************************************************************** 1711 * int hdac_detach(device_t) 1712 * 1713 * Detach and free up resources utilized by the hdac device. 1714 ****************************************************************************/ 1715 static int 1716 hdac_detach(device_t dev) 1717 { 1718 struct hdac_softc *sc = device_get_softc(dev); 1719 device_t *devlist; 1720 int cad, i, devcount, error; 1721 1722 if ((error = device_get_children(dev, &devlist, &devcount)) != 0) 1723 return (error); 1724 for (i = 0; i < devcount; i++) { 1725 cad = (intptr_t)device_get_ivars(devlist[i]); 1726 if ((error = device_delete_child(dev, devlist[i])) != 0) { 1727 free(devlist, M_TEMP); 1728 return (error); 1729 } 1730 sc->codecs[cad].dev = NULL; 1731 } 1732 free(devlist, M_TEMP); 1733 1734 hdac_lock(sc); 1735 hdac_reset(sc, false); 1736 hdac_unlock(sc); 1737 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1738 hdac_irq_free(sc); 1739 1740 for (i = 0; i < sc->num_ss; i++) 1741 hdac_dma_free(sc, &sc->streams[i].bdl); 1742 free(sc->streams, M_HDAC); 1743 hdac_dma_free(sc, &sc->pos_dma); 1744 hdac_dma_free(sc, &sc->rirb_dma); 1745 hdac_dma_free(sc, &sc->corb_dma); 1746 if (sc->chan_dmat != NULL) { 1747 bus_dma_tag_destroy(sc->chan_dmat); 1748 sc->chan_dmat = NULL; 1749 } 1750 hdac_mem_free(sc); 1751 snd_mtxfree(sc->lock); 1752 return (0); 1753 } 1754 1755 static bus_dma_tag_t 1756 hdac_get_dma_tag(device_t dev, device_t child) 1757 { 1758 struct hdac_softc *sc = device_get_softc(dev); 1759 1760 return (sc->chan_dmat); 1761 } 1762 1763 static int 1764 hdac_print_child(device_t dev, device_t child) 1765 { 1766 int retval; 1767 1768 retval = bus_print_child_header(dev, child); 1769 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); 1770 retval += bus_print_child_footer(dev, child); 1771 1772 return (retval); 1773 } 1774 1775 static int 1776 hdac_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) 1777 { 1778 1779 snprintf(buf, buflen, "cad=%d", (int)(intptr_t)device_get_ivars(child)); 1780 return (0); 1781 } 1782 1783 static int 1784 hdac_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, 1785 size_t buflen) 1786 { 1787 struct hdac_softc *sc = device_get_softc(dev); 1788 nid_t cad = (uintptr_t)device_get_ivars(child); 1789 1790 snprintf(buf, buflen, 1791 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", 1792 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1793 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1794 return (0); 1795 } 1796 1797 static int 1798 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1799 { 1800 struct hdac_softc *sc = device_get_softc(dev); 1801 nid_t cad = (uintptr_t)device_get_ivars(child); 1802 1803 switch (which) { 1804 case HDA_IVAR_CODEC_ID: 1805 *result = cad; 1806 break; 1807 case HDA_IVAR_VENDOR_ID: 1808 *result = sc->codecs[cad].vendor_id; 1809 break; 1810 case HDA_IVAR_DEVICE_ID: 1811 *result = sc->codecs[cad].device_id; 1812 break; 1813 case HDA_IVAR_REVISION_ID: 1814 *result = sc->codecs[cad].revision_id; 1815 break; 1816 case HDA_IVAR_STEPPING_ID: 1817 *result = sc->codecs[cad].stepping_id; 1818 break; 1819 case HDA_IVAR_SUBVENDOR_ID: 1820 *result = pci_get_subvendor(dev); 1821 break; 1822 case HDA_IVAR_SUBDEVICE_ID: 1823 *result = pci_get_subdevice(dev); 1824 break; 1825 case HDA_IVAR_DMA_NOCACHE: 1826 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1827 break; 1828 case HDA_IVAR_STRIPES_MASK: 1829 *result = (1 << (1 << sc->num_sdo)) - 1; 1830 break; 1831 default: 1832 return (ENOENT); 1833 } 1834 return (0); 1835 } 1836 1837 static struct mtx * 1838 hdac_get_mtx(device_t dev, device_t child) 1839 { 1840 struct hdac_softc *sc = device_get_softc(dev); 1841 1842 return (sc->lock); 1843 } 1844 1845 static uint32_t 1846 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1847 { 1848 1849 return (hdac_send_command(device_get_softc(dev), 1850 (intptr_t)device_get_ivars(child), verb)); 1851 } 1852 1853 static int 1854 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1855 { 1856 int i, ss; 1857 1858 ss = -1; 1859 /* Allocate ISS/OSS first. */ 1860 if (dir == 0) { 1861 for (i = 0; i < sc->num_iss; i++) { 1862 if (sc->streams[i].stream == stream) { 1863 ss = i; 1864 break; 1865 } 1866 } 1867 } else { 1868 for (i = 0; i < sc->num_oss; i++) { 1869 if (sc->streams[i + sc->num_iss].stream == stream) { 1870 ss = i + sc->num_iss; 1871 break; 1872 } 1873 } 1874 } 1875 /* Fallback to BSS. */ 1876 if (ss == -1) { 1877 for (i = 0; i < sc->num_bss; i++) { 1878 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1879 == stream) { 1880 ss = i + sc->num_iss + sc->num_oss; 1881 break; 1882 } 1883 } 1884 } 1885 return (ss); 1886 } 1887 1888 static int 1889 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1890 uint32_t **dmapos) 1891 { 1892 struct hdac_softc *sc = device_get_softc(dev); 1893 nid_t cad = (uintptr_t)device_get_ivars(child); 1894 int stream, ss, bw, maxbw, prevbw; 1895 1896 /* Look for empty stream. */ 1897 ss = hdac_find_stream(sc, dir, 0); 1898 1899 /* Return if found nothing. */ 1900 if (ss < 0) 1901 return (0); 1902 1903 /* Check bus bandwidth. */ 1904 bw = hdac_bdata_rate(format, dir); 1905 if (dir == 1) { 1906 bw *= 1 << (sc->num_sdo - stripe); 1907 prevbw = sc->sdo_bw_used; 1908 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1909 } else { 1910 prevbw = sc->codecs[cad].sdi_bw_used; 1911 maxbw = 48000 * 464; 1912 } 1913 HDA_BOOTHVERBOSE( 1914 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1915 (bw + prevbw) / 1000, maxbw / 1000, 1916 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1917 ); 1918 if (bw + prevbw > maxbw) 1919 return (0); 1920 if (dir == 1) 1921 sc->sdo_bw_used += bw; 1922 else 1923 sc->codecs[cad].sdi_bw_used += bw; 1924 1925 /* Allocate stream number */ 1926 if (ss >= sc->num_iss + sc->num_oss) 1927 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1928 else if (ss >= sc->num_iss) 1929 stream = ss - sc->num_iss + 1; 1930 else 1931 stream = ss + 1; 1932 1933 sc->streams[ss].dev = child; 1934 sc->streams[ss].dir = dir; 1935 sc->streams[ss].stream = stream; 1936 sc->streams[ss].bw = bw; 1937 sc->streams[ss].format = format; 1938 sc->streams[ss].stripe = stripe; 1939 if (dmapos != NULL) { 1940 if (sc->pos_dma.dma_vaddr != NULL) 1941 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1942 else 1943 *dmapos = NULL; 1944 } 1945 return (stream); 1946 } 1947 1948 static void 1949 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 1950 { 1951 struct hdac_softc *sc = device_get_softc(dev); 1952 nid_t cad = (uintptr_t)device_get_ivars(child); 1953 int ss; 1954 1955 ss = hdac_find_stream(sc, dir, stream); 1956 KASSERT(ss >= 0, 1957 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 1958 if (dir == 1) 1959 sc->sdo_bw_used -= sc->streams[ss].bw; 1960 else 1961 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 1962 sc->streams[ss].stream = 0; 1963 sc->streams[ss].dev = NULL; 1964 } 1965 1966 static int 1967 hdac_stream_start(device_t dev, device_t child, int dir, int stream, 1968 bus_addr_t buf, int blksz, int blkcnt) 1969 { 1970 struct hdac_softc *sc = device_get_softc(dev); 1971 struct hdac_bdle *bdle; 1972 uint64_t addr; 1973 int i, ss, off; 1974 uint32_t ctl; 1975 1976 ss = hdac_find_stream(sc, dir, stream); 1977 KASSERT(ss >= 0, 1978 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 1979 1980 addr = (uint64_t)buf; 1981 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 1982 for (i = 0; i < blkcnt; i++, bdle++) { 1983 bdle->addrl = htole32((uint32_t)addr); 1984 bdle->addrh = htole32((uint32_t)(addr >> 32)); 1985 bdle->len = htole32(blksz); 1986 bdle->ioc = htole32(1); 1987 addr += blksz; 1988 } 1989 1990 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 1991 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 1992 1993 off = ss << 5; 1994 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 1995 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 1996 addr = sc->streams[ss].bdl.dma_paddr; 1997 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 1998 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 1999 2000 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 2001 if (dir) 2002 ctl |= HDAC_SDCTL2_DIR; 2003 else 2004 ctl &= ~HDAC_SDCTL2_DIR; 2005 ctl &= ~HDAC_SDCTL2_STRM_MASK; 2006 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 2007 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 2008 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 2009 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 2010 2011 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 2012 2013 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2014 ctl |= 1 << ss; 2015 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2016 2017 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 2018 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 2019 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2020 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2021 HDAC_SDCTL_RUN; 2022 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2023 2024 sc->streams[ss].blksz = blksz; 2025 sc->streams[ss].running = 1; 2026 hdac_poll_reinit(sc); 2027 return (0); 2028 } 2029 2030 static void 2031 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 2032 { 2033 struct hdac_softc *sc = device_get_softc(dev); 2034 int ss, off; 2035 uint32_t ctl; 2036 2037 ss = hdac_find_stream(sc, dir, stream); 2038 KASSERT(ss >= 0, 2039 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 2040 2041 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2042 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 2043 2044 off = ss << 5; 2045 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2046 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2047 HDAC_SDCTL_RUN); 2048 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2049 2050 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2051 ctl &= ~(1 << ss); 2052 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2053 2054 sc->streams[ss].running = 0; 2055 hdac_poll_reinit(sc); 2056 } 2057 2058 static void 2059 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2060 { 2061 struct hdac_softc *sc = device_get_softc(dev); 2062 int timeout = 1000; 2063 int to = timeout; 2064 int ss, off; 2065 uint32_t ctl; 2066 2067 ss = hdac_find_stream(sc, dir, stream); 2068 KASSERT(ss >= 0, 2069 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2070 2071 off = ss << 5; 2072 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2073 ctl |= HDAC_SDCTL_SRST; 2074 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2075 do { 2076 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2077 if (ctl & HDAC_SDCTL_SRST) 2078 break; 2079 DELAY(10); 2080 } while (--to); 2081 if (!(ctl & HDAC_SDCTL_SRST)) 2082 device_printf(dev, "Reset setting timeout\n"); 2083 ctl &= ~HDAC_SDCTL_SRST; 2084 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2085 to = timeout; 2086 do { 2087 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2088 if (!(ctl & HDAC_SDCTL_SRST)) 2089 break; 2090 DELAY(10); 2091 } while (--to); 2092 if (ctl & HDAC_SDCTL_SRST) 2093 device_printf(dev, "Reset timeout!\n"); 2094 } 2095 2096 static uint32_t 2097 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2098 { 2099 struct hdac_softc *sc = device_get_softc(dev); 2100 int ss, off; 2101 2102 ss = hdac_find_stream(sc, dir, stream); 2103 KASSERT(ss >= 0, 2104 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2105 2106 off = ss << 5; 2107 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2108 } 2109 2110 static int 2111 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2112 { 2113 struct hdac_softc *sc = device_get_softc(dev); 2114 2115 sc->unsol_registered++; 2116 hdac_poll_reinit(sc); 2117 return (tag); 2118 } 2119 2120 static void 2121 hdac_unsol_free(device_t dev, device_t child, int tag) 2122 { 2123 struct hdac_softc *sc = device_get_softc(dev); 2124 2125 sc->unsol_registered--; 2126 hdac_poll_reinit(sc); 2127 } 2128 2129 static device_method_t hdac_methods[] = { 2130 /* device interface */ 2131 DEVMETHOD(device_probe, hdac_probe), 2132 DEVMETHOD(device_attach, hdac_attach), 2133 DEVMETHOD(device_detach, hdac_detach), 2134 DEVMETHOD(device_suspend, hdac_suspend), 2135 DEVMETHOD(device_resume, hdac_resume), 2136 /* Bus interface */ 2137 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2138 DEVMETHOD(bus_print_child, hdac_print_child), 2139 DEVMETHOD(bus_child_location_str, hdac_child_location_str), 2140 DEVMETHOD(bus_child_pnpinfo_str, hdac_child_pnpinfo_str_method), 2141 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2142 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2143 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2144 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2145 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2146 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2147 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2148 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2149 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2150 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2151 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2152 DEVMETHOD_END 2153 }; 2154 2155 static driver_t hdac_driver = { 2156 "hdac", 2157 hdac_methods, 2158 sizeof(struct hdac_softc), 2159 }; 2160 2161 static devclass_t hdac_devclass; 2162 2163 DRIVER_MODULE(snd_hda, pci, hdac_driver, hdac_devclass, NULL, NULL); 2164