1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 SND_DECLARE_FILE("$FreeBSD$"); 55 56 #define hdac_lock(sc) snd_mtxlock((sc)->lock) 57 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) 58 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) 59 60 #define HDAC_QUIRK_64BIT (1 << 0) 61 #define HDAC_QUIRK_DMAPOS (1 << 1) 62 #define HDAC_QUIRK_MSI (1 << 2) 63 64 static const struct { 65 const char *key; 66 uint32_t value; 67 } hdac_quirks_tab[] = { 68 { "64bit", HDAC_QUIRK_64BIT }, 69 { "dmapos", HDAC_QUIRK_DMAPOS }, 70 { "msi", HDAC_QUIRK_MSI }, 71 }; 72 73 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 74 75 static const struct { 76 uint32_t model; 77 const char *desc; 78 char quirks_on; 79 char quirks_off; 80 } hdac_devices[] = { 81 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 82 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 83 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 84 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 85 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 87 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 88 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 89 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 90 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, 91 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 92 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 93 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 94 { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, 95 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 96 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 97 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 98 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 99 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 100 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 101 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 102 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 103 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 104 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 105 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 106 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 107 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 108 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, 109 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 110 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 111 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 112 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 113 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, 114 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 115 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 116 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 117 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 118 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 119 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 120 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, 121 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 122 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 123 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 124 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 125 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, 126 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, 127 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, 128 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 129 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 130 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 131 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 132 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 133 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 134 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 135 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 136 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 137 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 138 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 139 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 140 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 141 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 142 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 143 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 144 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 145 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 146 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 147 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 148 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 149 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 150 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 151 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 152 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 153 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 154 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 155 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 156 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 157 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 158 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 159 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 160 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 161 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 162 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 163 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 164 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 165 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 166 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 167 { HDA_ATI_R600, "ATI R600", 0, 0 }, 168 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 169 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 170 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 171 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 172 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 173 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 174 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 175 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 176 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 177 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 178 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 179 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 180 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 181 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 182 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 183 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 184 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 185 { HDA_AMD_X370, "AMD X370", 0, 0 }, 186 { HDA_AMD_X570, "AMD X570", 0, 0 }, 187 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, 188 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, 189 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 190 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 191 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 192 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 193 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 194 /* Unknown */ 195 { HDA_INTEL_ALL, "Intel", 0, 0 }, 196 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 197 { HDA_ATI_ALL, "ATI", 0, 0 }, 198 { HDA_AMD_ALL, "AMD", 0, 0 }, 199 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 200 { HDA_VIA_ALL, "VIA", 0, 0 }, 201 { HDA_SIS_ALL, "SiS", 0, 0 }, 202 { HDA_ULI_ALL, "ULI", 0, 0 }, 203 }; 204 205 static const struct { 206 uint16_t vendor; 207 uint8_t reg; 208 uint8_t mask; 209 uint8_t enable; 210 } hdac_pcie_snoop[] = { 211 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 212 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 213 { AMD_VENDORID, 0x42, 0xf8, 0x02 }, 214 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 215 }; 216 217 /**************************************************************************** 218 * Function prototypes 219 ****************************************************************************/ 220 static void hdac_intr_handler(void *); 221 static int hdac_reset(struct hdac_softc *, bool); 222 static int hdac_get_capabilities(struct hdac_softc *); 223 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 224 static int hdac_dma_alloc(struct hdac_softc *, 225 struct hdac_dma *, bus_size_t); 226 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 227 static int hdac_mem_alloc(struct hdac_softc *); 228 static void hdac_mem_free(struct hdac_softc *); 229 static int hdac_irq_alloc(struct hdac_softc *); 230 static void hdac_irq_free(struct hdac_softc *); 231 static void hdac_corb_init(struct hdac_softc *); 232 static void hdac_rirb_init(struct hdac_softc *); 233 static void hdac_corb_start(struct hdac_softc *); 234 static void hdac_rirb_start(struct hdac_softc *); 235 236 static void hdac_attach2(void *); 237 238 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 239 240 static int hdac_probe(device_t); 241 static int hdac_attach(device_t); 242 static int hdac_detach(device_t); 243 static int hdac_suspend(device_t); 244 static int hdac_resume(device_t); 245 246 static int hdac_rirb_flush(struct hdac_softc *sc); 247 static int hdac_unsolq_flush(struct hdac_softc *sc); 248 249 /* This function surely going to make its way into upper level someday. */ 250 static void 251 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 252 { 253 const char *res = NULL; 254 int i = 0, j, k, len, inv; 255 256 if (resource_string_value(device_get_name(sc->dev), 257 device_get_unit(sc->dev), "config", &res) != 0) 258 return; 259 if (!(res != NULL && strlen(res) > 0)) 260 return; 261 HDA_BOOTVERBOSE( 262 device_printf(sc->dev, "Config options:"); 263 ); 264 for (;;) { 265 while (res[i] != '\0' && 266 (res[i] == ',' || isspace(res[i]) != 0)) 267 i++; 268 if (res[i] == '\0') { 269 HDA_BOOTVERBOSE( 270 printf("\n"); 271 ); 272 return; 273 } 274 j = i; 275 while (res[j] != '\0' && 276 !(res[j] == ',' || isspace(res[j]) != 0)) 277 j++; 278 len = j - i; 279 if (len > 2 && strncmp(res + i, "no", 2) == 0) 280 inv = 2; 281 else 282 inv = 0; 283 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 284 if (strncmp(res + i + inv, 285 hdac_quirks_tab[k].key, len - inv) != 0) 286 continue; 287 if (len - inv != strlen(hdac_quirks_tab[k].key)) 288 continue; 289 HDA_BOOTVERBOSE( 290 printf(" %s%s", (inv != 0) ? "no" : "", 291 hdac_quirks_tab[k].key); 292 ); 293 if (inv == 0) { 294 *on |= hdac_quirks_tab[k].value; 295 *off &= ~hdac_quirks_tab[k].value; 296 } else if (inv != 0) { 297 *off |= hdac_quirks_tab[k].value; 298 *on &= ~hdac_quirks_tab[k].value; 299 } 300 break; 301 } 302 i = j; 303 } 304 } 305 306 static void 307 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) 308 { 309 device_t dev; 310 uint8_t rirbsts; 311 int i; 312 313 /* Was this a controller interrupt? */ 314 if (intsts & HDAC_INTSTS_CIS) { 315 /* 316 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then 317 * we will need to check and clear HDAC_STATESTS. 318 * That event is used to report codec status changes such as 319 * a reset or a wake-up event. 320 */ 321 /* 322 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we 323 * will need to check and clear HDAC_CORBSTS_CMEI in 324 * HDAC_CORBSTS. 325 * That event is used to report CORB memory errors. 326 */ 327 /* 328 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we 329 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in 330 * HDAC_RIRBSTS. 331 * That event is used to report response FIFO overruns. 332 */ 333 334 /* Get as many responses that we can */ 335 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 336 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 337 HDAC_WRITE_1(&sc->mem, 338 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 339 hdac_rirb_flush(sc); 340 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 341 } 342 if (sc->unsolq_rp != sc->unsolq_wp) 343 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 344 } 345 346 if (intsts & HDAC_INTSTS_SIS_MASK) { 347 for (i = 0; i < sc->num_ss; i++) { 348 if ((intsts & (1 << i)) == 0) 349 continue; 350 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 351 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 352 if ((dev = sc->streams[i].dev) != NULL) { 353 HDAC_STREAM_INTR(dev, 354 sc->streams[i].dir, sc->streams[i].stream); 355 } 356 } 357 } 358 } 359 360 /**************************************************************************** 361 * void hdac_intr_handler(void *) 362 * 363 * Interrupt handler. Processes interrupts received from the hdac. 364 ****************************************************************************/ 365 static void 366 hdac_intr_handler(void *context) 367 { 368 struct hdac_softc *sc; 369 uint32_t intsts; 370 371 sc = (struct hdac_softc *)context; 372 373 /* 374 * Loop until HDAC_INTSTS_GIS gets clear. 375 * It is plausible that hardware interrupts a host only when GIS goes 376 * from zero to one. GIS is formed by OR-ing multiple hardware 377 * statuses, so it's possible that a previously cleared status gets set 378 * again while another status has not been cleared yet. Thus, there 379 * will be no new interrupt as GIS always stayed set. If we don't 380 * re-examine GIS then we can leave it set and never get an interrupt 381 * again. 382 */ 383 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 384 while ((intsts & HDAC_INTSTS_GIS) != 0) { 385 hdac_lock(sc); 386 hdac_one_intr(sc, intsts); 387 hdac_unlock(sc); 388 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 389 } 390 } 391 392 static void 393 hdac_poll_callback(void *arg) 394 { 395 struct hdac_softc *sc = arg; 396 397 if (sc == NULL) 398 return; 399 400 hdac_lock(sc); 401 if (sc->polling == 0) { 402 hdac_unlock(sc); 403 return; 404 } 405 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); 406 hdac_unlock(sc); 407 408 hdac_intr_handler(sc); 409 } 410 411 /**************************************************************************** 412 * int hdac_reset(hdac_softc *, bool) 413 * 414 * Reset the hdac to a quiescent and known state. 415 ****************************************************************************/ 416 static int 417 hdac_reset(struct hdac_softc *sc, bool wakeup) 418 { 419 uint32_t gctl; 420 int count, i; 421 422 /* 423 * Stop all Streams DMA engine 424 */ 425 for (i = 0; i < sc->num_iss; i++) 426 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 427 for (i = 0; i < sc->num_oss; i++) 428 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 429 for (i = 0; i < sc->num_bss; i++) 430 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 431 432 /* 433 * Stop Control DMA engines. 434 */ 435 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 436 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 437 438 /* 439 * Reset DMA position buffer. 440 */ 441 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 442 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 443 444 /* 445 * Reset the controller. The reset must remain asserted for 446 * a minimum of 100us. 447 */ 448 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 449 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 450 count = 10000; 451 do { 452 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 453 if (!(gctl & HDAC_GCTL_CRST)) 454 break; 455 DELAY(10); 456 } while (--count); 457 if (gctl & HDAC_GCTL_CRST) { 458 device_printf(sc->dev, "Unable to put hdac in reset\n"); 459 return (ENXIO); 460 } 461 462 /* If wakeup is not requested - leave the controller in reset state. */ 463 if (!wakeup) 464 return (0); 465 466 DELAY(100); 467 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 468 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 469 count = 10000; 470 do { 471 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 472 if (gctl & HDAC_GCTL_CRST) 473 break; 474 DELAY(10); 475 } while (--count); 476 if (!(gctl & HDAC_GCTL_CRST)) { 477 device_printf(sc->dev, "Device stuck in reset\n"); 478 return (ENXIO); 479 } 480 481 /* 482 * Wait for codecs to finish their own reset sequence. The delay here 483 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). 484 */ 485 DELAY(1000); 486 487 return (0); 488 } 489 490 /**************************************************************************** 491 * int hdac_get_capabilities(struct hdac_softc *); 492 * 493 * Retreive the general capabilities of the hdac; 494 * Number of Input Streams 495 * Number of Output Streams 496 * Number of bidirectional Streams 497 * 64bit ready 498 * CORB and RIRB sizes 499 ****************************************************************************/ 500 static int 501 hdac_get_capabilities(struct hdac_softc *sc) 502 { 503 uint16_t gcap; 504 uint8_t corbsize, rirbsize; 505 506 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 507 sc->num_iss = HDAC_GCAP_ISS(gcap); 508 sc->num_oss = HDAC_GCAP_OSS(gcap); 509 sc->num_bss = HDAC_GCAP_BSS(gcap); 510 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 511 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 512 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 513 if (sc->quirks_on & HDAC_QUIRK_64BIT) 514 sc->support_64bit = 1; 515 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 516 sc->support_64bit = 0; 517 518 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 519 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 520 HDAC_CORBSIZE_CORBSZCAP_256) 521 sc->corb_size = 256; 522 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 523 HDAC_CORBSIZE_CORBSZCAP_16) 524 sc->corb_size = 16; 525 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 526 HDAC_CORBSIZE_CORBSZCAP_2) 527 sc->corb_size = 2; 528 else { 529 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 530 __func__, corbsize); 531 return (ENXIO); 532 } 533 534 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 535 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 536 HDAC_RIRBSIZE_RIRBSZCAP_256) 537 sc->rirb_size = 256; 538 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 539 HDAC_RIRBSIZE_RIRBSZCAP_16) 540 sc->rirb_size = 16; 541 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 542 HDAC_RIRBSIZE_RIRBSZCAP_2) 543 sc->rirb_size = 2; 544 else { 545 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 546 __func__, rirbsize); 547 return (ENXIO); 548 } 549 550 HDA_BOOTVERBOSE( 551 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 552 "NSDO %d%s, CORB %d, RIRB %d\n", 553 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 554 sc->support_64bit ? ", 64bit" : "", 555 sc->corb_size, sc->rirb_size); 556 ); 557 558 return (0); 559 } 560 561 562 /**************************************************************************** 563 * void hdac_dma_cb 564 * 565 * This function is called by bus_dmamap_load when the mapping has been 566 * established. We just record the physical address of the mapping into 567 * the struct hdac_dma passed in. 568 ****************************************************************************/ 569 static void 570 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 571 { 572 struct hdac_dma *dma; 573 574 if (error == 0) { 575 dma = (struct hdac_dma *)callback_arg; 576 dma->dma_paddr = segs[0].ds_addr; 577 } 578 } 579 580 581 /**************************************************************************** 582 * int hdac_dma_alloc 583 * 584 * This function allocate and setup a dma region (struct hdac_dma). 585 * It must be freed by a corresponding hdac_dma_free. 586 ****************************************************************************/ 587 static int 588 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 589 { 590 bus_size_t roundsz; 591 int result; 592 593 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 594 bzero(dma, sizeof(*dma)); 595 596 /* 597 * Create a DMA tag 598 */ 599 result = bus_dma_tag_create( 600 bus_get_dma_tag(sc->dev), /* parent */ 601 HDA_DMA_ALIGNMENT, /* alignment */ 602 0, /* boundary */ 603 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 604 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 605 BUS_SPACE_MAXADDR, /* highaddr */ 606 NULL, /* filtfunc */ 607 NULL, /* fistfuncarg */ 608 roundsz, /* maxsize */ 609 1, /* nsegments */ 610 roundsz, /* maxsegsz */ 611 0, /* flags */ 612 NULL, /* lockfunc */ 613 NULL, /* lockfuncarg */ 614 &dma->dma_tag); /* dmat */ 615 if (result != 0) { 616 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 617 __func__, result); 618 goto hdac_dma_alloc_fail; 619 } 620 621 /* 622 * Allocate DMA memory 623 */ 624 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 625 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 626 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 627 BUS_DMA_COHERENT), 628 &dma->dma_map); 629 if (result != 0) { 630 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 631 __func__, result); 632 goto hdac_dma_alloc_fail; 633 } 634 635 dma->dma_size = roundsz; 636 637 /* 638 * Map the memory 639 */ 640 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 641 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 642 if (result != 0 || dma->dma_paddr == 0) { 643 if (result == 0) 644 result = ENOMEM; 645 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 646 __func__, result); 647 goto hdac_dma_alloc_fail; 648 } 649 650 HDA_BOOTHVERBOSE( 651 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 652 __func__, (uintmax_t)size, (uintmax_t)roundsz); 653 ); 654 655 return (0); 656 657 hdac_dma_alloc_fail: 658 hdac_dma_free(sc, dma); 659 660 return (result); 661 } 662 663 /**************************************************************************** 664 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 665 * 666 * Free a struct hdac_dma that has been previously allocated via the 667 * hdac_dma_alloc function. 668 ****************************************************************************/ 669 static void 670 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 671 { 672 if (dma->dma_paddr != 0) { 673 /* Flush caches */ 674 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 675 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 676 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 677 dma->dma_paddr = 0; 678 } 679 if (dma->dma_vaddr != NULL) { 680 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 681 dma->dma_vaddr = NULL; 682 } 683 if (dma->dma_tag != NULL) { 684 bus_dma_tag_destroy(dma->dma_tag); 685 dma->dma_tag = NULL; 686 } 687 dma->dma_size = 0; 688 } 689 690 /**************************************************************************** 691 * int hdac_mem_alloc(struct hdac_softc *) 692 * 693 * Allocate all the bus resources necessary to speak with the physical 694 * controller. 695 ****************************************************************************/ 696 static int 697 hdac_mem_alloc(struct hdac_softc *sc) 698 { 699 struct hdac_mem *mem; 700 701 mem = &sc->mem; 702 mem->mem_rid = PCIR_BAR(0); 703 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 704 &mem->mem_rid, RF_ACTIVE); 705 if (mem->mem_res == NULL) { 706 device_printf(sc->dev, 707 "%s: Unable to allocate memory resource\n", __func__); 708 return (ENOMEM); 709 } 710 mem->mem_tag = rman_get_bustag(mem->mem_res); 711 mem->mem_handle = rman_get_bushandle(mem->mem_res); 712 713 return (0); 714 } 715 716 /**************************************************************************** 717 * void hdac_mem_free(struct hdac_softc *) 718 * 719 * Free up resources previously allocated by hdac_mem_alloc. 720 ****************************************************************************/ 721 static void 722 hdac_mem_free(struct hdac_softc *sc) 723 { 724 struct hdac_mem *mem; 725 726 mem = &sc->mem; 727 if (mem->mem_res != NULL) 728 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 729 mem->mem_res); 730 mem->mem_res = NULL; 731 } 732 733 /**************************************************************************** 734 * int hdac_irq_alloc(struct hdac_softc *) 735 * 736 * Allocate and setup the resources necessary for interrupt handling. 737 ****************************************************************************/ 738 static int 739 hdac_irq_alloc(struct hdac_softc *sc) 740 { 741 struct hdac_irq *irq; 742 int result; 743 744 irq = &sc->irq; 745 irq->irq_rid = 0x0; 746 747 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 748 (result = pci_msi_count(sc->dev)) == 1 && 749 pci_alloc_msi(sc->dev, &result) == 0) 750 irq->irq_rid = 0x1; 751 752 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 753 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 754 if (irq->irq_res == NULL) { 755 device_printf(sc->dev, "%s: Unable to allocate irq\n", 756 __func__); 757 goto hdac_irq_alloc_fail; 758 } 759 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 760 NULL, hdac_intr_handler, sc, &irq->irq_handle); 761 if (result != 0) { 762 device_printf(sc->dev, 763 "%s: Unable to setup interrupt handler (%d)\n", 764 __func__, result); 765 goto hdac_irq_alloc_fail; 766 } 767 768 return (0); 769 770 hdac_irq_alloc_fail: 771 hdac_irq_free(sc); 772 773 return (ENXIO); 774 } 775 776 /**************************************************************************** 777 * void hdac_irq_free(struct hdac_softc *) 778 * 779 * Free up resources previously allocated by hdac_irq_alloc. 780 ****************************************************************************/ 781 static void 782 hdac_irq_free(struct hdac_softc *sc) 783 { 784 struct hdac_irq *irq; 785 786 irq = &sc->irq; 787 if (irq->irq_res != NULL && irq->irq_handle != NULL) 788 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 789 if (irq->irq_res != NULL) 790 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 791 irq->irq_res); 792 if (irq->irq_rid == 0x1) 793 pci_release_msi(sc->dev); 794 irq->irq_handle = NULL; 795 irq->irq_res = NULL; 796 irq->irq_rid = 0x0; 797 } 798 799 /**************************************************************************** 800 * void hdac_corb_init(struct hdac_softc *) 801 * 802 * Initialize the corb registers for operations but do not start it up yet. 803 * The CORB engine must not be running when this function is called. 804 ****************************************************************************/ 805 static void 806 hdac_corb_init(struct hdac_softc *sc) 807 { 808 uint8_t corbsize; 809 uint64_t corbpaddr; 810 811 /* Setup the CORB size. */ 812 switch (sc->corb_size) { 813 case 256: 814 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 815 break; 816 case 16: 817 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 818 break; 819 case 2: 820 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 821 break; 822 default: 823 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 824 } 825 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 826 827 /* Setup the CORB Address in the hdac */ 828 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 829 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 830 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 831 832 /* Set the WP and RP */ 833 sc->corb_wp = 0; 834 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 835 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 836 /* 837 * The HDA specification indicates that the CORBRPRST bit will always 838 * read as zero. Unfortunately, it seems that at least the 82801G 839 * doesn't reset the bit to zero, which stalls the corb engine. 840 * manually reset the bit to zero before continuing. 841 */ 842 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 843 844 /* Enable CORB error reporting */ 845 #if 0 846 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 847 #endif 848 } 849 850 /**************************************************************************** 851 * void hdac_rirb_init(struct hdac_softc *) 852 * 853 * Initialize the rirb registers for operations but do not start it up yet. 854 * The RIRB engine must not be running when this function is called. 855 ****************************************************************************/ 856 static void 857 hdac_rirb_init(struct hdac_softc *sc) 858 { 859 uint8_t rirbsize; 860 uint64_t rirbpaddr; 861 862 /* Setup the RIRB size. */ 863 switch (sc->rirb_size) { 864 case 256: 865 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 866 break; 867 case 16: 868 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 869 break; 870 case 2: 871 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 872 break; 873 default: 874 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 875 } 876 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 877 878 /* Setup the RIRB Address in the hdac */ 879 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 880 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 881 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 882 883 /* Setup the WP and RP */ 884 sc->rirb_rp = 0; 885 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 886 887 /* Setup the interrupt threshold */ 888 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 889 890 /* Enable Overrun and response received reporting */ 891 #if 0 892 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 893 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 894 #else 895 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 896 #endif 897 898 /* 899 * Make sure that the Host CPU cache doesn't contain any dirty 900 * cache lines that falls in the rirb. If I understood correctly, it 901 * should be sufficient to do this only once as the rirb is purely 902 * read-only from now on. 903 */ 904 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 905 BUS_DMASYNC_PREREAD); 906 } 907 908 /**************************************************************************** 909 * void hdac_corb_start(hdac_softc *) 910 * 911 * Startup the corb DMA engine 912 ****************************************************************************/ 913 static void 914 hdac_corb_start(struct hdac_softc *sc) 915 { 916 uint32_t corbctl; 917 918 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 919 corbctl |= HDAC_CORBCTL_CORBRUN; 920 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 921 } 922 923 /**************************************************************************** 924 * void hdac_rirb_start(hdac_softc *) 925 * 926 * Startup the rirb DMA engine 927 ****************************************************************************/ 928 static void 929 hdac_rirb_start(struct hdac_softc *sc) 930 { 931 uint32_t rirbctl; 932 933 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 934 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 935 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 936 } 937 938 static int 939 hdac_rirb_flush(struct hdac_softc *sc) 940 { 941 struct hdac_rirb *rirb_base, *rirb; 942 nid_t cad; 943 uint32_t resp, resp_ex; 944 uint8_t rirbwp; 945 int ret; 946 947 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 948 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 949 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 950 BUS_DMASYNC_POSTREAD); 951 952 ret = 0; 953 while (sc->rirb_rp != rirbwp) { 954 sc->rirb_rp++; 955 sc->rirb_rp %= sc->rirb_size; 956 rirb = &rirb_base[sc->rirb_rp]; 957 resp = le32toh(rirb->response); 958 resp_ex = le32toh(rirb->response_ex); 959 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 960 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 961 sc->unsolq[sc->unsolq_wp++] = resp; 962 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 963 sc->unsolq[sc->unsolq_wp++] = cad; 964 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 965 } else if (sc->codecs[cad].pending <= 0) { 966 device_printf(sc->dev, "Unexpected unsolicited " 967 "response from address %d: %08x\n", cad, resp); 968 } else { 969 sc->codecs[cad].response = resp; 970 sc->codecs[cad].pending--; 971 } 972 ret++; 973 } 974 975 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 976 BUS_DMASYNC_PREREAD); 977 return (ret); 978 } 979 980 static int 981 hdac_unsolq_flush(struct hdac_softc *sc) 982 { 983 device_t child; 984 nid_t cad; 985 uint32_t resp; 986 int ret = 0; 987 988 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 989 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 990 while (sc->unsolq_rp != sc->unsolq_wp) { 991 resp = sc->unsolq[sc->unsolq_rp++]; 992 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 993 cad = sc->unsolq[sc->unsolq_rp++]; 994 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 995 if ((child = sc->codecs[cad].dev) != NULL) 996 HDAC_UNSOL_INTR(child, resp); 997 ret++; 998 } 999 sc->unsolq_st = HDAC_UNSOLQ_READY; 1000 } 1001 1002 return (ret); 1003 } 1004 1005 /**************************************************************************** 1006 * uint32_t hdac_send_command 1007 * 1008 * Wrapper function that sends only one command to a given codec 1009 ****************************************************************************/ 1010 static uint32_t 1011 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 1012 { 1013 int timeout; 1014 uint32_t *corb; 1015 1016 hdac_lockassert(sc); 1017 verb &= ~HDA_CMD_CAD_MASK; 1018 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 1019 sc->codecs[cad].response = HDA_INVALID; 1020 1021 sc->codecs[cad].pending++; 1022 sc->corb_wp++; 1023 sc->corb_wp %= sc->corb_size; 1024 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 1025 bus_dmamap_sync(sc->corb_dma.dma_tag, 1026 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 1027 corb[sc->corb_wp] = htole32(verb); 1028 bus_dmamap_sync(sc->corb_dma.dma_tag, 1029 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 1030 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 1031 1032 timeout = 10000; 1033 do { 1034 if (hdac_rirb_flush(sc) == 0) 1035 DELAY(10); 1036 } while (sc->codecs[cad].pending != 0 && --timeout); 1037 1038 if (sc->codecs[cad].pending != 0) { 1039 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", 1040 verb, cad); 1041 sc->codecs[cad].pending = 0; 1042 } 1043 1044 if (sc->unsolq_rp != sc->unsolq_wp) 1045 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1046 return (sc->codecs[cad].response); 1047 } 1048 1049 /**************************************************************************** 1050 * Device Methods 1051 ****************************************************************************/ 1052 1053 /**************************************************************************** 1054 * int hdac_probe(device_t) 1055 * 1056 * Probe for the presence of an hdac. If none is found, check for a generic 1057 * match using the subclass of the device. 1058 ****************************************************************************/ 1059 static int 1060 hdac_probe(device_t dev) 1061 { 1062 int i, result; 1063 uint32_t model; 1064 uint16_t class, subclass; 1065 char desc[64]; 1066 1067 model = (uint32_t)pci_get_device(dev) << 16; 1068 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1069 class = pci_get_class(dev); 1070 subclass = pci_get_subclass(dev); 1071 1072 bzero(desc, sizeof(desc)); 1073 result = ENXIO; 1074 for (i = 0; i < nitems(hdac_devices); i++) { 1075 if (hdac_devices[i].model == model) { 1076 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1077 result = BUS_PROBE_DEFAULT; 1078 break; 1079 } 1080 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1081 class == PCIC_MULTIMEDIA && 1082 subclass == PCIS_MULTIMEDIA_HDA) { 1083 snprintf(desc, sizeof(desc), "%s (0x%04x)", 1084 hdac_devices[i].desc, pci_get_device(dev)); 1085 result = BUS_PROBE_GENERIC; 1086 break; 1087 } 1088 } 1089 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1090 subclass == PCIS_MULTIMEDIA_HDA) { 1091 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1092 result = BUS_PROBE_GENERIC; 1093 } 1094 if (result != ENXIO) { 1095 strlcat(desc, " HDA Controller", sizeof(desc)); 1096 device_set_desc_copy(dev, desc); 1097 } 1098 1099 return (result); 1100 } 1101 1102 static void 1103 hdac_unsolq_task(void *context, int pending) 1104 { 1105 struct hdac_softc *sc; 1106 1107 sc = (struct hdac_softc *)context; 1108 1109 hdac_lock(sc); 1110 hdac_unsolq_flush(sc); 1111 hdac_unlock(sc); 1112 } 1113 1114 /**************************************************************************** 1115 * int hdac_attach(device_t) 1116 * 1117 * Attach the device into the kernel. Interrupts usually won't be enabled 1118 * when this function is called. Setup everything that doesn't require 1119 * interrupts and defer probing of codecs until interrupts are enabled. 1120 ****************************************************************************/ 1121 static int 1122 hdac_attach(device_t dev) 1123 { 1124 struct hdac_softc *sc; 1125 int result; 1126 int i, devid = -1; 1127 uint32_t model; 1128 uint16_t class, subclass; 1129 uint16_t vendor; 1130 uint8_t v; 1131 1132 sc = device_get_softc(dev); 1133 HDA_BOOTVERBOSE( 1134 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1135 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1136 device_printf(dev, "HDA Driver Revision: %s\n", 1137 HDA_DRV_TEST_REV); 1138 ); 1139 1140 model = (uint32_t)pci_get_device(dev) << 16; 1141 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1142 class = pci_get_class(dev); 1143 subclass = pci_get_subclass(dev); 1144 1145 for (i = 0; i < nitems(hdac_devices); i++) { 1146 if (hdac_devices[i].model == model) { 1147 devid = i; 1148 break; 1149 } 1150 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1151 class == PCIC_MULTIMEDIA && 1152 subclass == PCIS_MULTIMEDIA_HDA) { 1153 devid = i; 1154 break; 1155 } 1156 } 1157 1158 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); 1159 sc->dev = dev; 1160 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1161 callout_init(&sc->poll_callout, 1); 1162 for (i = 0; i < HDAC_CODEC_MAX; i++) 1163 sc->codecs[i].dev = NULL; 1164 if (devid >= 0) { 1165 sc->quirks_on = hdac_devices[devid].quirks_on; 1166 sc->quirks_off = hdac_devices[devid].quirks_off; 1167 } else { 1168 sc->quirks_on = 0; 1169 sc->quirks_off = 0; 1170 } 1171 if (resource_int_value(device_get_name(dev), 1172 device_get_unit(dev), "msi", &i) == 0) { 1173 if (i == 0) 1174 sc->quirks_off |= HDAC_QUIRK_MSI; 1175 else { 1176 sc->quirks_on |= HDAC_QUIRK_MSI; 1177 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1178 } 1179 } 1180 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1181 HDA_BOOTVERBOSE( 1182 device_printf(sc->dev, 1183 "Config options: on=0x%08x off=0x%08x\n", 1184 sc->quirks_on, sc->quirks_off); 1185 ); 1186 sc->poll_ival = hz; 1187 if (resource_int_value(device_get_name(dev), 1188 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1189 sc->polling = 1; 1190 else 1191 sc->polling = 0; 1192 1193 pci_enable_busmaster(dev); 1194 1195 vendor = pci_get_vendor(dev); 1196 if (vendor == INTEL_VENDORID) { 1197 /* TCSEL -> TC0 */ 1198 v = pci_read_config(dev, 0x44, 1); 1199 pci_write_config(dev, 0x44, v & 0xf8, 1); 1200 HDA_BOOTHVERBOSE( 1201 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1202 pci_read_config(dev, 0x44, 1)); 1203 ); 1204 } 1205 1206 #if defined(__i386__) || defined(__amd64__) 1207 sc->flags |= HDAC_F_DMA_NOCACHE; 1208 1209 if (resource_int_value(device_get_name(dev), 1210 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1211 #else 1212 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1213 #endif 1214 /* 1215 * Try to enable PCIe snoop to avoid messing around with 1216 * uncacheable DMA attribute. Since PCIe snoop register 1217 * config is pretty much vendor specific, there are no 1218 * general solutions on how to enable it, forcing us (even 1219 * Microsoft) to enable uncacheable or write combined DMA 1220 * by default. 1221 * 1222 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1223 */ 1224 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1225 if (hdac_pcie_snoop[i].vendor != vendor) 1226 continue; 1227 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1228 if (hdac_pcie_snoop[i].reg == 0x00) 1229 break; 1230 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1231 if ((v & hdac_pcie_snoop[i].enable) == 1232 hdac_pcie_snoop[i].enable) 1233 break; 1234 v &= hdac_pcie_snoop[i].mask; 1235 v |= hdac_pcie_snoop[i].enable; 1236 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1237 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1238 if ((v & hdac_pcie_snoop[i].enable) != 1239 hdac_pcie_snoop[i].enable) { 1240 HDA_BOOTVERBOSE( 1241 device_printf(dev, 1242 "WARNING: Failed to enable PCIe " 1243 "snoop!\n"); 1244 ); 1245 #if defined(__i386__) || defined(__amd64__) 1246 sc->flags |= HDAC_F_DMA_NOCACHE; 1247 #endif 1248 } 1249 break; 1250 } 1251 #if defined(__i386__) || defined(__amd64__) 1252 } 1253 #endif 1254 1255 HDA_BOOTHVERBOSE( 1256 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1257 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1258 "Uncacheable" : "PCIe snoop", vendor); 1259 ); 1260 1261 /* Allocate resources */ 1262 result = hdac_mem_alloc(sc); 1263 if (result != 0) 1264 goto hdac_attach_fail; 1265 result = hdac_irq_alloc(sc); 1266 if (result != 0) 1267 goto hdac_attach_fail; 1268 1269 /* Get Capabilities */ 1270 result = hdac_get_capabilities(sc); 1271 if (result != 0) 1272 goto hdac_attach_fail; 1273 1274 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1275 result = hdac_dma_alloc(sc, &sc->corb_dma, 1276 sc->corb_size * sizeof(uint32_t)); 1277 if (result != 0) 1278 goto hdac_attach_fail; 1279 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1280 sc->rirb_size * sizeof(struct hdac_rirb)); 1281 if (result != 0) 1282 goto hdac_attach_fail; 1283 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1284 M_HDAC, M_ZERO | M_WAITOK); 1285 for (i = 0; i < sc->num_ss; i++) { 1286 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1287 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1288 if (result != 0) 1289 goto hdac_attach_fail; 1290 } 1291 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1292 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1293 HDA_BOOTVERBOSE( 1294 device_printf(dev, "Failed to " 1295 "allocate DMA pos buffer " 1296 "(non-fatal)\n"); 1297 ); 1298 } else { 1299 uint64_t addr = sc->pos_dma.dma_paddr; 1300 1301 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1302 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1303 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1304 HDAC_DPLBASE_DPLBASE_DMAPBE); 1305 } 1306 } 1307 1308 result = bus_dma_tag_create( 1309 bus_get_dma_tag(sc->dev), /* parent */ 1310 HDA_DMA_ALIGNMENT, /* alignment */ 1311 0, /* boundary */ 1312 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1313 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1314 BUS_SPACE_MAXADDR, /* highaddr */ 1315 NULL, /* filtfunc */ 1316 NULL, /* fistfuncarg */ 1317 HDA_BUFSZ_MAX, /* maxsize */ 1318 1, /* nsegments */ 1319 HDA_BUFSZ_MAX, /* maxsegsz */ 1320 0, /* flags */ 1321 NULL, /* lockfunc */ 1322 NULL, /* lockfuncarg */ 1323 &sc->chan_dmat); /* dmat */ 1324 if (result != 0) { 1325 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1326 __func__, result); 1327 goto hdac_attach_fail; 1328 } 1329 1330 /* Quiesce everything */ 1331 HDA_BOOTHVERBOSE( 1332 device_printf(dev, "Reset controller...\n"); 1333 ); 1334 hdac_reset(sc, true); 1335 1336 /* Initialize the CORB and RIRB */ 1337 hdac_corb_init(sc); 1338 hdac_rirb_init(sc); 1339 1340 /* Defer remaining of initialization until interrupts are enabled */ 1341 sc->intrhook.ich_func = hdac_attach2; 1342 sc->intrhook.ich_arg = (void *)sc; 1343 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1344 sc->intrhook.ich_func = NULL; 1345 hdac_attach2((void *)sc); 1346 } 1347 1348 return (0); 1349 1350 hdac_attach_fail: 1351 hdac_irq_free(sc); 1352 if (sc->streams != NULL) 1353 for (i = 0; i < sc->num_ss; i++) 1354 hdac_dma_free(sc, &sc->streams[i].bdl); 1355 free(sc->streams, M_HDAC); 1356 hdac_dma_free(sc, &sc->rirb_dma); 1357 hdac_dma_free(sc, &sc->corb_dma); 1358 hdac_mem_free(sc); 1359 snd_mtxfree(sc->lock); 1360 1361 return (ENXIO); 1362 } 1363 1364 static int 1365 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1366 { 1367 struct hdac_softc *sc; 1368 device_t *devlist; 1369 device_t dev; 1370 int devcount, i, err, val; 1371 1372 dev = oidp->oid_arg1; 1373 sc = device_get_softc(dev); 1374 if (sc == NULL) 1375 return (EINVAL); 1376 val = 0; 1377 err = sysctl_handle_int(oidp, &val, 0, req); 1378 if (err != 0 || req->newptr == NULL || val == 0) 1379 return (err); 1380 1381 /* XXX: Temporary. For debugging. */ 1382 if (val == 100) { 1383 hdac_suspend(dev); 1384 return (0); 1385 } else if (val == 101) { 1386 hdac_resume(dev); 1387 return (0); 1388 } 1389 1390 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) 1391 return (err); 1392 hdac_lock(sc); 1393 for (i = 0; i < devcount; i++) 1394 HDAC_PINDUMP(devlist[i]); 1395 hdac_unlock(sc); 1396 free(devlist, M_TEMP); 1397 return (0); 1398 } 1399 1400 static int 1401 hdac_mdata_rate(uint16_t fmt) 1402 { 1403 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1404 int rate, bits; 1405 1406 if (fmt & (1 << 14)) 1407 rate = 44100; 1408 else 1409 rate = 48000; 1410 rate *= ((fmt >> 11) & 0x07) + 1; 1411 rate /= ((fmt >> 8) & 0x07) + 1; 1412 bits = mbits[(fmt >> 4) & 0x03]; 1413 bits *= (fmt & 0x0f) + 1; 1414 return (rate * bits); 1415 } 1416 1417 static int 1418 hdac_bdata_rate(uint16_t fmt, int output) 1419 { 1420 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1421 int rate, bits; 1422 1423 rate = 48000; 1424 rate *= ((fmt >> 11) & 0x07) + 1; 1425 bits = bbits[(fmt >> 4) & 0x03]; 1426 bits *= (fmt & 0x0f) + 1; 1427 if (!output) 1428 bits = ((bits + 7) & ~0x07) + 10; 1429 return (rate * bits); 1430 } 1431 1432 static void 1433 hdac_poll_reinit(struct hdac_softc *sc) 1434 { 1435 int i, pollticks, min = 1000000; 1436 struct hdac_stream *s; 1437 1438 if (sc->polling == 0) 1439 return; 1440 if (sc->unsol_registered > 0) 1441 min = hz / 2; 1442 for (i = 0; i < sc->num_ss; i++) { 1443 s = &sc->streams[i]; 1444 if (s->running == 0) 1445 continue; 1446 pollticks = ((uint64_t)hz * s->blksz) / 1447 (hdac_mdata_rate(s->format) / 8); 1448 pollticks >>= 1; 1449 if (pollticks > hz) 1450 pollticks = hz; 1451 if (pollticks < 1) 1452 pollticks = 1; 1453 if (min > pollticks) 1454 min = pollticks; 1455 } 1456 sc->poll_ival = min; 1457 if (min == 1000000) 1458 callout_stop(&sc->poll_callout); 1459 else 1460 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1461 } 1462 1463 static int 1464 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1465 { 1466 struct hdac_softc *sc; 1467 device_t dev; 1468 uint32_t ctl; 1469 int err, val; 1470 1471 dev = oidp->oid_arg1; 1472 sc = device_get_softc(dev); 1473 if (sc == NULL) 1474 return (EINVAL); 1475 hdac_lock(sc); 1476 val = sc->polling; 1477 hdac_unlock(sc); 1478 err = sysctl_handle_int(oidp, &val, 0, req); 1479 1480 if (err != 0 || req->newptr == NULL) 1481 return (err); 1482 if (val < 0 || val > 1) 1483 return (EINVAL); 1484 1485 hdac_lock(sc); 1486 if (val != sc->polling) { 1487 if (val == 0) { 1488 callout_stop(&sc->poll_callout); 1489 hdac_unlock(sc); 1490 callout_drain(&sc->poll_callout); 1491 hdac_lock(sc); 1492 sc->polling = 0; 1493 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1494 ctl |= HDAC_INTCTL_GIE; 1495 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1496 } else { 1497 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1498 ctl &= ~HDAC_INTCTL_GIE; 1499 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1500 sc->polling = 1; 1501 hdac_poll_reinit(sc); 1502 } 1503 } 1504 hdac_unlock(sc); 1505 1506 return (err); 1507 } 1508 1509 static void 1510 hdac_attach2(void *arg) 1511 { 1512 struct hdac_softc *sc; 1513 device_t child; 1514 uint32_t vendorid, revisionid; 1515 int i; 1516 uint16_t statests; 1517 1518 sc = (struct hdac_softc *)arg; 1519 1520 hdac_lock(sc); 1521 1522 /* Remove ourselves from the config hooks */ 1523 if (sc->intrhook.ich_func != NULL) { 1524 config_intrhook_disestablish(&sc->intrhook); 1525 sc->intrhook.ich_func = NULL; 1526 } 1527 1528 HDA_BOOTHVERBOSE( 1529 device_printf(sc->dev, "Starting CORB Engine...\n"); 1530 ); 1531 hdac_corb_start(sc); 1532 HDA_BOOTHVERBOSE( 1533 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1534 ); 1535 hdac_rirb_start(sc); 1536 1537 /* 1538 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1539 * (status change) interrupts. The documentation says that we 1540 * should not make any assumptions about the state of this register 1541 * and set it explicitly. 1542 * NB: this needs to be done before the interrupt is enabled as 1543 * the handler does not expect this interrupt source. 1544 */ 1545 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1546 1547 /* 1548 * Read and clear post-reset SDI wake status. 1549 * Each set bit corresponds to a codec that came out of reset. 1550 */ 1551 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1552 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests); 1553 1554 HDA_BOOTHVERBOSE( 1555 device_printf(sc->dev, 1556 "Enabling controller interrupt...\n"); 1557 ); 1558 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1559 HDAC_GCTL_UNSOL); 1560 if (sc->polling == 0) { 1561 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1562 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1563 } 1564 DELAY(1000); 1565 1566 HDA_BOOTHVERBOSE( 1567 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1568 ); 1569 hdac_unlock(sc); 1570 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1571 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1572 HDA_BOOTHVERBOSE( 1573 device_printf(sc->dev, 1574 "Found CODEC at address %d\n", i); 1575 ); 1576 hdac_lock(sc); 1577 vendorid = hdac_send_command(sc, i, 1578 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1579 revisionid = hdac_send_command(sc, i, 1580 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1581 hdac_unlock(sc); 1582 if (vendorid == HDA_INVALID && 1583 revisionid == HDA_INVALID) { 1584 device_printf(sc->dev, 1585 "CODEC at address %d not responding!\n", i); 1586 continue; 1587 } 1588 sc->codecs[i].vendor_id = 1589 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1590 sc->codecs[i].device_id = 1591 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1592 sc->codecs[i].revision_id = 1593 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1594 sc->codecs[i].stepping_id = 1595 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1596 child = device_add_child(sc->dev, "hdacc", -1); 1597 if (child == NULL) { 1598 device_printf(sc->dev, 1599 "Failed to add CODEC device\n"); 1600 continue; 1601 } 1602 device_set_ivars(child, (void *)(intptr_t)i); 1603 sc->codecs[i].dev = child; 1604 } 1605 } 1606 bus_generic_attach(sc->dev); 1607 1608 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1609 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1610 "pindump", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev, 1611 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); 1612 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1613 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1614 "polling", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev, 1615 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); 1616 } 1617 1618 /**************************************************************************** 1619 * int hdac_suspend(device_t) 1620 * 1621 * Suspend and power down HDA bus and codecs. 1622 ****************************************************************************/ 1623 static int 1624 hdac_suspend(device_t dev) 1625 { 1626 struct hdac_softc *sc = device_get_softc(dev); 1627 1628 HDA_BOOTHVERBOSE( 1629 device_printf(dev, "Suspend...\n"); 1630 ); 1631 bus_generic_suspend(dev); 1632 1633 hdac_lock(sc); 1634 HDA_BOOTHVERBOSE( 1635 device_printf(dev, "Reset controller...\n"); 1636 ); 1637 callout_stop(&sc->poll_callout); 1638 hdac_reset(sc, false); 1639 hdac_unlock(sc); 1640 callout_drain(&sc->poll_callout); 1641 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1642 HDA_BOOTHVERBOSE( 1643 device_printf(dev, "Suspend done\n"); 1644 ); 1645 return (0); 1646 } 1647 1648 /**************************************************************************** 1649 * int hdac_resume(device_t) 1650 * 1651 * Powerup and restore HDA bus and codecs state. 1652 ****************************************************************************/ 1653 static int 1654 hdac_resume(device_t dev) 1655 { 1656 struct hdac_softc *sc = device_get_softc(dev); 1657 int error; 1658 1659 HDA_BOOTHVERBOSE( 1660 device_printf(dev, "Resume...\n"); 1661 ); 1662 hdac_lock(sc); 1663 1664 /* Quiesce everything */ 1665 HDA_BOOTHVERBOSE( 1666 device_printf(dev, "Reset controller...\n"); 1667 ); 1668 hdac_reset(sc, true); 1669 1670 /* Initialize the CORB and RIRB */ 1671 hdac_corb_init(sc); 1672 hdac_rirb_init(sc); 1673 1674 HDA_BOOTHVERBOSE( 1675 device_printf(dev, "Starting CORB Engine...\n"); 1676 ); 1677 hdac_corb_start(sc); 1678 HDA_BOOTHVERBOSE( 1679 device_printf(dev, "Starting RIRB Engine...\n"); 1680 ); 1681 hdac_rirb_start(sc); 1682 1683 /* 1684 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1685 * (status change) events. The documentation says that we should 1686 * not make any assumptions about the state of this register and 1687 * set it explicitly. 1688 * Also, clear HDAC_STATESTS. 1689 * NB: this needs to be done before the interrupt is enabled as 1690 * the handler does not expect this interrupt source. 1691 */ 1692 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1693 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK); 1694 1695 HDA_BOOTHVERBOSE( 1696 device_printf(dev, "Enabling controller interrupt...\n"); 1697 ); 1698 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1699 HDAC_GCTL_UNSOL); 1700 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1701 DELAY(1000); 1702 hdac_poll_reinit(sc); 1703 hdac_unlock(sc); 1704 1705 error = bus_generic_resume(dev); 1706 HDA_BOOTHVERBOSE( 1707 device_printf(dev, "Resume done\n"); 1708 ); 1709 return (error); 1710 } 1711 1712 /**************************************************************************** 1713 * int hdac_detach(device_t) 1714 * 1715 * Detach and free up resources utilized by the hdac device. 1716 ****************************************************************************/ 1717 static int 1718 hdac_detach(device_t dev) 1719 { 1720 struct hdac_softc *sc = device_get_softc(dev); 1721 device_t *devlist; 1722 int cad, i, devcount, error; 1723 1724 if ((error = device_get_children(dev, &devlist, &devcount)) != 0) 1725 return (error); 1726 for (i = 0; i < devcount; i++) { 1727 cad = (intptr_t)device_get_ivars(devlist[i]); 1728 if ((error = device_delete_child(dev, devlist[i])) != 0) { 1729 free(devlist, M_TEMP); 1730 return (error); 1731 } 1732 sc->codecs[cad].dev = NULL; 1733 } 1734 free(devlist, M_TEMP); 1735 1736 hdac_lock(sc); 1737 hdac_reset(sc, false); 1738 hdac_unlock(sc); 1739 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1740 hdac_irq_free(sc); 1741 1742 for (i = 0; i < sc->num_ss; i++) 1743 hdac_dma_free(sc, &sc->streams[i].bdl); 1744 free(sc->streams, M_HDAC); 1745 hdac_dma_free(sc, &sc->pos_dma); 1746 hdac_dma_free(sc, &sc->rirb_dma); 1747 hdac_dma_free(sc, &sc->corb_dma); 1748 if (sc->chan_dmat != NULL) { 1749 bus_dma_tag_destroy(sc->chan_dmat); 1750 sc->chan_dmat = NULL; 1751 } 1752 hdac_mem_free(sc); 1753 snd_mtxfree(sc->lock); 1754 return (0); 1755 } 1756 1757 static bus_dma_tag_t 1758 hdac_get_dma_tag(device_t dev, device_t child) 1759 { 1760 struct hdac_softc *sc = device_get_softc(dev); 1761 1762 return (sc->chan_dmat); 1763 } 1764 1765 static int 1766 hdac_print_child(device_t dev, device_t child) 1767 { 1768 int retval; 1769 1770 retval = bus_print_child_header(dev, child); 1771 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); 1772 retval += bus_print_child_footer(dev, child); 1773 1774 return (retval); 1775 } 1776 1777 static int 1778 hdac_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) 1779 { 1780 1781 snprintf(buf, buflen, "cad=%d", (int)(intptr_t)device_get_ivars(child)); 1782 return (0); 1783 } 1784 1785 static int 1786 hdac_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, 1787 size_t buflen) 1788 { 1789 struct hdac_softc *sc = device_get_softc(dev); 1790 nid_t cad = (uintptr_t)device_get_ivars(child); 1791 1792 snprintf(buf, buflen, 1793 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", 1794 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1795 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1796 return (0); 1797 } 1798 1799 static int 1800 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1801 { 1802 struct hdac_softc *sc = device_get_softc(dev); 1803 nid_t cad = (uintptr_t)device_get_ivars(child); 1804 1805 switch (which) { 1806 case HDA_IVAR_CODEC_ID: 1807 *result = cad; 1808 break; 1809 case HDA_IVAR_VENDOR_ID: 1810 *result = sc->codecs[cad].vendor_id; 1811 break; 1812 case HDA_IVAR_DEVICE_ID: 1813 *result = sc->codecs[cad].device_id; 1814 break; 1815 case HDA_IVAR_REVISION_ID: 1816 *result = sc->codecs[cad].revision_id; 1817 break; 1818 case HDA_IVAR_STEPPING_ID: 1819 *result = sc->codecs[cad].stepping_id; 1820 break; 1821 case HDA_IVAR_SUBVENDOR_ID: 1822 *result = pci_get_subvendor(dev); 1823 break; 1824 case HDA_IVAR_SUBDEVICE_ID: 1825 *result = pci_get_subdevice(dev); 1826 break; 1827 case HDA_IVAR_DMA_NOCACHE: 1828 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1829 break; 1830 case HDA_IVAR_STRIPES_MASK: 1831 *result = (1 << (1 << sc->num_sdo)) - 1; 1832 break; 1833 default: 1834 return (ENOENT); 1835 } 1836 return (0); 1837 } 1838 1839 static struct mtx * 1840 hdac_get_mtx(device_t dev, device_t child) 1841 { 1842 struct hdac_softc *sc = device_get_softc(dev); 1843 1844 return (sc->lock); 1845 } 1846 1847 static uint32_t 1848 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1849 { 1850 1851 return (hdac_send_command(device_get_softc(dev), 1852 (intptr_t)device_get_ivars(child), verb)); 1853 } 1854 1855 static int 1856 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1857 { 1858 int i, ss; 1859 1860 ss = -1; 1861 /* Allocate ISS/OSS first. */ 1862 if (dir == 0) { 1863 for (i = 0; i < sc->num_iss; i++) { 1864 if (sc->streams[i].stream == stream) { 1865 ss = i; 1866 break; 1867 } 1868 } 1869 } else { 1870 for (i = 0; i < sc->num_oss; i++) { 1871 if (sc->streams[i + sc->num_iss].stream == stream) { 1872 ss = i + sc->num_iss; 1873 break; 1874 } 1875 } 1876 } 1877 /* Fallback to BSS. */ 1878 if (ss == -1) { 1879 for (i = 0; i < sc->num_bss; i++) { 1880 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1881 == stream) { 1882 ss = i + sc->num_iss + sc->num_oss; 1883 break; 1884 } 1885 } 1886 } 1887 return (ss); 1888 } 1889 1890 static int 1891 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1892 uint32_t **dmapos) 1893 { 1894 struct hdac_softc *sc = device_get_softc(dev); 1895 nid_t cad = (uintptr_t)device_get_ivars(child); 1896 int stream, ss, bw, maxbw, prevbw; 1897 1898 /* Look for empty stream. */ 1899 ss = hdac_find_stream(sc, dir, 0); 1900 1901 /* Return if found nothing. */ 1902 if (ss < 0) 1903 return (0); 1904 1905 /* Check bus bandwidth. */ 1906 bw = hdac_bdata_rate(format, dir); 1907 if (dir == 1) { 1908 bw *= 1 << (sc->num_sdo - stripe); 1909 prevbw = sc->sdo_bw_used; 1910 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1911 } else { 1912 prevbw = sc->codecs[cad].sdi_bw_used; 1913 maxbw = 48000 * 464; 1914 } 1915 HDA_BOOTHVERBOSE( 1916 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1917 (bw + prevbw) / 1000, maxbw / 1000, 1918 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1919 ); 1920 if (bw + prevbw > maxbw) 1921 return (0); 1922 if (dir == 1) 1923 sc->sdo_bw_used += bw; 1924 else 1925 sc->codecs[cad].sdi_bw_used += bw; 1926 1927 /* Allocate stream number */ 1928 if (ss >= sc->num_iss + sc->num_oss) 1929 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1930 else if (ss >= sc->num_iss) 1931 stream = ss - sc->num_iss + 1; 1932 else 1933 stream = ss + 1; 1934 1935 sc->streams[ss].dev = child; 1936 sc->streams[ss].dir = dir; 1937 sc->streams[ss].stream = stream; 1938 sc->streams[ss].bw = bw; 1939 sc->streams[ss].format = format; 1940 sc->streams[ss].stripe = stripe; 1941 if (dmapos != NULL) { 1942 if (sc->pos_dma.dma_vaddr != NULL) 1943 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1944 else 1945 *dmapos = NULL; 1946 } 1947 return (stream); 1948 } 1949 1950 static void 1951 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 1952 { 1953 struct hdac_softc *sc = device_get_softc(dev); 1954 nid_t cad = (uintptr_t)device_get_ivars(child); 1955 int ss; 1956 1957 ss = hdac_find_stream(sc, dir, stream); 1958 KASSERT(ss >= 0, 1959 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 1960 if (dir == 1) 1961 sc->sdo_bw_used -= sc->streams[ss].bw; 1962 else 1963 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 1964 sc->streams[ss].stream = 0; 1965 sc->streams[ss].dev = NULL; 1966 } 1967 1968 static int 1969 hdac_stream_start(device_t dev, device_t child, int dir, int stream, 1970 bus_addr_t buf, int blksz, int blkcnt) 1971 { 1972 struct hdac_softc *sc = device_get_softc(dev); 1973 struct hdac_bdle *bdle; 1974 uint64_t addr; 1975 int i, ss, off; 1976 uint32_t ctl; 1977 1978 ss = hdac_find_stream(sc, dir, stream); 1979 KASSERT(ss >= 0, 1980 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 1981 1982 addr = (uint64_t)buf; 1983 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 1984 for (i = 0; i < blkcnt; i++, bdle++) { 1985 bdle->addrl = htole32((uint32_t)addr); 1986 bdle->addrh = htole32((uint32_t)(addr >> 32)); 1987 bdle->len = htole32(blksz); 1988 bdle->ioc = htole32(1); 1989 addr += blksz; 1990 } 1991 1992 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 1993 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 1994 1995 off = ss << 5; 1996 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 1997 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 1998 addr = sc->streams[ss].bdl.dma_paddr; 1999 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 2000 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 2001 2002 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 2003 if (dir) 2004 ctl |= HDAC_SDCTL2_DIR; 2005 else 2006 ctl &= ~HDAC_SDCTL2_DIR; 2007 ctl &= ~HDAC_SDCTL2_STRM_MASK; 2008 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 2009 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 2010 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 2011 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 2012 2013 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 2014 2015 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2016 ctl |= 1 << ss; 2017 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2018 2019 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 2020 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 2021 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2022 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2023 HDAC_SDCTL_RUN; 2024 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2025 2026 sc->streams[ss].blksz = blksz; 2027 sc->streams[ss].running = 1; 2028 hdac_poll_reinit(sc); 2029 return (0); 2030 } 2031 2032 static void 2033 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 2034 { 2035 struct hdac_softc *sc = device_get_softc(dev); 2036 int ss, off; 2037 uint32_t ctl; 2038 2039 ss = hdac_find_stream(sc, dir, stream); 2040 KASSERT(ss >= 0, 2041 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 2042 2043 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2044 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 2045 2046 off = ss << 5; 2047 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2048 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2049 HDAC_SDCTL_RUN); 2050 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2051 2052 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2053 ctl &= ~(1 << ss); 2054 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2055 2056 sc->streams[ss].running = 0; 2057 hdac_poll_reinit(sc); 2058 } 2059 2060 static void 2061 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2062 { 2063 struct hdac_softc *sc = device_get_softc(dev); 2064 int timeout = 1000; 2065 int to = timeout; 2066 int ss, off; 2067 uint32_t ctl; 2068 2069 ss = hdac_find_stream(sc, dir, stream); 2070 KASSERT(ss >= 0, 2071 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2072 2073 off = ss << 5; 2074 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2075 ctl |= HDAC_SDCTL_SRST; 2076 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2077 do { 2078 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2079 if (ctl & HDAC_SDCTL_SRST) 2080 break; 2081 DELAY(10); 2082 } while (--to); 2083 if (!(ctl & HDAC_SDCTL_SRST)) 2084 device_printf(dev, "Reset setting timeout\n"); 2085 ctl &= ~HDAC_SDCTL_SRST; 2086 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2087 to = timeout; 2088 do { 2089 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2090 if (!(ctl & HDAC_SDCTL_SRST)) 2091 break; 2092 DELAY(10); 2093 } while (--to); 2094 if (ctl & HDAC_SDCTL_SRST) 2095 device_printf(dev, "Reset timeout!\n"); 2096 } 2097 2098 static uint32_t 2099 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2100 { 2101 struct hdac_softc *sc = device_get_softc(dev); 2102 int ss, off; 2103 2104 ss = hdac_find_stream(sc, dir, stream); 2105 KASSERT(ss >= 0, 2106 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2107 2108 off = ss << 5; 2109 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2110 } 2111 2112 static int 2113 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2114 { 2115 struct hdac_softc *sc = device_get_softc(dev); 2116 2117 sc->unsol_registered++; 2118 hdac_poll_reinit(sc); 2119 return (tag); 2120 } 2121 2122 static void 2123 hdac_unsol_free(device_t dev, device_t child, int tag) 2124 { 2125 struct hdac_softc *sc = device_get_softc(dev); 2126 2127 sc->unsol_registered--; 2128 hdac_poll_reinit(sc); 2129 } 2130 2131 static device_method_t hdac_methods[] = { 2132 /* device interface */ 2133 DEVMETHOD(device_probe, hdac_probe), 2134 DEVMETHOD(device_attach, hdac_attach), 2135 DEVMETHOD(device_detach, hdac_detach), 2136 DEVMETHOD(device_suspend, hdac_suspend), 2137 DEVMETHOD(device_resume, hdac_resume), 2138 /* Bus interface */ 2139 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2140 DEVMETHOD(bus_print_child, hdac_print_child), 2141 DEVMETHOD(bus_child_location_str, hdac_child_location_str), 2142 DEVMETHOD(bus_child_pnpinfo_str, hdac_child_pnpinfo_str_method), 2143 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2144 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2145 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2146 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2147 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2148 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2149 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2150 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2151 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2152 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2153 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2154 DEVMETHOD_END 2155 }; 2156 2157 static driver_t hdac_driver = { 2158 "hdac", 2159 hdac_methods, 2160 sizeof(struct hdac_softc), 2161 }; 2162 2163 static devclass_t hdac_devclass; 2164 2165 DRIVER_MODULE(snd_hda, pci, hdac_driver, hdac_devclass, NULL, NULL); 2166