1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 SND_DECLARE_FILE("$FreeBSD$"); 55 56 #define hdac_lock(sc) snd_mtxlock((sc)->lock) 57 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) 58 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) 59 60 #define HDAC_QUIRK_64BIT (1 << 0) 61 #define HDAC_QUIRK_DMAPOS (1 << 1) 62 #define HDAC_QUIRK_MSI (1 << 2) 63 64 static const struct { 65 const char *key; 66 uint32_t value; 67 } hdac_quirks_tab[] = { 68 { "64bit", HDAC_QUIRK_64BIT }, 69 { "dmapos", HDAC_QUIRK_DMAPOS }, 70 { "msi", HDAC_QUIRK_MSI }, 71 }; 72 73 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 74 75 static const struct { 76 uint32_t model; 77 const char *desc; 78 char quirks_on; 79 char quirks_off; 80 } hdac_devices[] = { 81 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 82 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 83 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 84 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 85 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 87 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 88 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 89 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 90 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, 91 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 92 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 93 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 94 { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, 95 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 96 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 97 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 98 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 99 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 100 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 101 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 102 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 103 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 104 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 105 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 106 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 107 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 108 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, 109 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 110 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 111 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 112 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 113 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, 114 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 115 { HDA_INTEL_ALLK, "Intel Alder Lake", 0, 0 }, 116 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 117 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 118 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 119 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 120 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 121 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, 122 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 123 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 124 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 125 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 126 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, 127 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, 128 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, 129 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 130 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 131 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 132 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 133 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 134 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 135 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 136 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 137 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 138 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 139 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 140 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 141 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 142 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 143 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 144 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 145 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 146 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 147 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 148 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 149 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 150 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 151 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 152 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 153 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 154 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 155 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 156 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 157 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 158 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 159 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 160 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 161 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 162 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 163 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 164 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 165 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 166 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 167 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 168 { HDA_ATI_R600, "ATI R600", 0, 0 }, 169 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 170 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 171 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 172 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 173 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 174 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 175 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 176 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 177 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 178 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 179 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 180 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 181 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 182 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 183 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 184 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 185 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 186 { HDA_AMD_X370, "AMD X370", 0, 0 }, 187 { HDA_AMD_X570, "AMD X570", 0, 0 }, 188 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, 189 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, 190 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 191 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 192 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 193 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 194 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 195 /* Unknown */ 196 { HDA_INTEL_ALL, "Intel", 0, 0 }, 197 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 198 { HDA_ATI_ALL, "ATI", 0, 0 }, 199 { HDA_AMD_ALL, "AMD", 0, 0 }, 200 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 201 { HDA_VIA_ALL, "VIA", 0, 0 }, 202 { HDA_SIS_ALL, "SiS", 0, 0 }, 203 { HDA_ULI_ALL, "ULI", 0, 0 }, 204 }; 205 206 static const struct { 207 uint16_t vendor; 208 uint8_t reg; 209 uint8_t mask; 210 uint8_t enable; 211 } hdac_pcie_snoop[] = { 212 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 213 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 214 { AMD_VENDORID, 0x42, 0xf8, 0x02 }, 215 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 216 }; 217 218 /**************************************************************************** 219 * Function prototypes 220 ****************************************************************************/ 221 static void hdac_intr_handler(void *); 222 static int hdac_reset(struct hdac_softc *, bool); 223 static int hdac_get_capabilities(struct hdac_softc *); 224 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 225 static int hdac_dma_alloc(struct hdac_softc *, 226 struct hdac_dma *, bus_size_t); 227 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 228 static int hdac_mem_alloc(struct hdac_softc *); 229 static void hdac_mem_free(struct hdac_softc *); 230 static int hdac_irq_alloc(struct hdac_softc *); 231 static void hdac_irq_free(struct hdac_softc *); 232 static void hdac_corb_init(struct hdac_softc *); 233 static void hdac_rirb_init(struct hdac_softc *); 234 static void hdac_corb_start(struct hdac_softc *); 235 static void hdac_rirb_start(struct hdac_softc *); 236 237 static void hdac_attach2(void *); 238 239 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 240 241 static int hdac_probe(device_t); 242 static int hdac_attach(device_t); 243 static int hdac_detach(device_t); 244 static int hdac_suspend(device_t); 245 static int hdac_resume(device_t); 246 247 static int hdac_rirb_flush(struct hdac_softc *sc); 248 static int hdac_unsolq_flush(struct hdac_softc *sc); 249 250 /* This function surely going to make its way into upper level someday. */ 251 static void 252 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 253 { 254 const char *res = NULL; 255 int i = 0, j, k, len, inv; 256 257 if (resource_string_value(device_get_name(sc->dev), 258 device_get_unit(sc->dev), "config", &res) != 0) 259 return; 260 if (!(res != NULL && strlen(res) > 0)) 261 return; 262 HDA_BOOTVERBOSE( 263 device_printf(sc->dev, "Config options:"); 264 ); 265 for (;;) { 266 while (res[i] != '\0' && 267 (res[i] == ',' || isspace(res[i]) != 0)) 268 i++; 269 if (res[i] == '\0') { 270 HDA_BOOTVERBOSE( 271 printf("\n"); 272 ); 273 return; 274 } 275 j = i; 276 while (res[j] != '\0' && 277 !(res[j] == ',' || isspace(res[j]) != 0)) 278 j++; 279 len = j - i; 280 if (len > 2 && strncmp(res + i, "no", 2) == 0) 281 inv = 2; 282 else 283 inv = 0; 284 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 285 if (strncmp(res + i + inv, 286 hdac_quirks_tab[k].key, len - inv) != 0) 287 continue; 288 if (len - inv != strlen(hdac_quirks_tab[k].key)) 289 continue; 290 HDA_BOOTVERBOSE( 291 printf(" %s%s", (inv != 0) ? "no" : "", 292 hdac_quirks_tab[k].key); 293 ); 294 if (inv == 0) { 295 *on |= hdac_quirks_tab[k].value; 296 *off &= ~hdac_quirks_tab[k].value; 297 } else if (inv != 0) { 298 *off |= hdac_quirks_tab[k].value; 299 *on &= ~hdac_quirks_tab[k].value; 300 } 301 break; 302 } 303 i = j; 304 } 305 } 306 307 static void 308 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) 309 { 310 device_t dev; 311 uint8_t rirbsts; 312 int i; 313 314 /* Was this a controller interrupt? */ 315 if (intsts & HDAC_INTSTS_CIS) { 316 /* 317 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then 318 * we will need to check and clear HDAC_STATESTS. 319 * That event is used to report codec status changes such as 320 * a reset or a wake-up event. 321 */ 322 /* 323 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we 324 * will need to check and clear HDAC_CORBSTS_CMEI in 325 * HDAC_CORBSTS. 326 * That event is used to report CORB memory errors. 327 */ 328 /* 329 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we 330 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in 331 * HDAC_RIRBSTS. 332 * That event is used to report response FIFO overruns. 333 */ 334 335 /* Get as many responses that we can */ 336 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 337 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 338 HDAC_WRITE_1(&sc->mem, 339 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 340 hdac_rirb_flush(sc); 341 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 342 } 343 if (sc->unsolq_rp != sc->unsolq_wp) 344 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 345 } 346 347 if (intsts & HDAC_INTSTS_SIS_MASK) { 348 for (i = 0; i < sc->num_ss; i++) { 349 if ((intsts & (1 << i)) == 0) 350 continue; 351 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 352 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 353 if ((dev = sc->streams[i].dev) != NULL) { 354 HDAC_STREAM_INTR(dev, 355 sc->streams[i].dir, sc->streams[i].stream); 356 } 357 } 358 } 359 } 360 361 /**************************************************************************** 362 * void hdac_intr_handler(void *) 363 * 364 * Interrupt handler. Processes interrupts received from the hdac. 365 ****************************************************************************/ 366 static void 367 hdac_intr_handler(void *context) 368 { 369 struct hdac_softc *sc; 370 uint32_t intsts; 371 372 sc = (struct hdac_softc *)context; 373 374 /* 375 * Loop until HDAC_INTSTS_GIS gets clear. 376 * It is plausible that hardware interrupts a host only when GIS goes 377 * from zero to one. GIS is formed by OR-ing multiple hardware 378 * statuses, so it's possible that a previously cleared status gets set 379 * again while another status has not been cleared yet. Thus, there 380 * will be no new interrupt as GIS always stayed set. If we don't 381 * re-examine GIS then we can leave it set and never get an interrupt 382 * again. 383 */ 384 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 385 while ((intsts & HDAC_INTSTS_GIS) != 0) { 386 hdac_lock(sc); 387 hdac_one_intr(sc, intsts); 388 hdac_unlock(sc); 389 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 390 } 391 } 392 393 static void 394 hdac_poll_callback(void *arg) 395 { 396 struct hdac_softc *sc = arg; 397 398 if (sc == NULL) 399 return; 400 401 hdac_lock(sc); 402 if (sc->polling == 0) { 403 hdac_unlock(sc); 404 return; 405 } 406 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); 407 hdac_unlock(sc); 408 409 hdac_intr_handler(sc); 410 } 411 412 /**************************************************************************** 413 * int hdac_reset(hdac_softc *, bool) 414 * 415 * Reset the hdac to a quiescent and known state. 416 ****************************************************************************/ 417 static int 418 hdac_reset(struct hdac_softc *sc, bool wakeup) 419 { 420 uint32_t gctl; 421 int count, i; 422 423 /* 424 * Stop all Streams DMA engine 425 */ 426 for (i = 0; i < sc->num_iss; i++) 427 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 428 for (i = 0; i < sc->num_oss; i++) 429 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 430 for (i = 0; i < sc->num_bss; i++) 431 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 432 433 /* 434 * Stop Control DMA engines. 435 */ 436 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 437 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 438 439 /* 440 * Reset DMA position buffer. 441 */ 442 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 443 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 444 445 /* 446 * Reset the controller. The reset must remain asserted for 447 * a minimum of 100us. 448 */ 449 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 450 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 451 count = 10000; 452 do { 453 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 454 if (!(gctl & HDAC_GCTL_CRST)) 455 break; 456 DELAY(10); 457 } while (--count); 458 if (gctl & HDAC_GCTL_CRST) { 459 device_printf(sc->dev, "Unable to put hdac in reset\n"); 460 return (ENXIO); 461 } 462 463 /* If wakeup is not requested - leave the controller in reset state. */ 464 if (!wakeup) 465 return (0); 466 467 DELAY(100); 468 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 469 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 470 count = 10000; 471 do { 472 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 473 if (gctl & HDAC_GCTL_CRST) 474 break; 475 DELAY(10); 476 } while (--count); 477 if (!(gctl & HDAC_GCTL_CRST)) { 478 device_printf(sc->dev, "Device stuck in reset\n"); 479 return (ENXIO); 480 } 481 482 /* 483 * Wait for codecs to finish their own reset sequence. The delay here 484 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). 485 */ 486 DELAY(1000); 487 488 return (0); 489 } 490 491 /**************************************************************************** 492 * int hdac_get_capabilities(struct hdac_softc *); 493 * 494 * Retreive the general capabilities of the hdac; 495 * Number of Input Streams 496 * Number of Output Streams 497 * Number of bidirectional Streams 498 * 64bit ready 499 * CORB and RIRB sizes 500 ****************************************************************************/ 501 static int 502 hdac_get_capabilities(struct hdac_softc *sc) 503 { 504 uint16_t gcap; 505 uint8_t corbsize, rirbsize; 506 507 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 508 sc->num_iss = HDAC_GCAP_ISS(gcap); 509 sc->num_oss = HDAC_GCAP_OSS(gcap); 510 sc->num_bss = HDAC_GCAP_BSS(gcap); 511 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 512 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 513 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 514 if (sc->quirks_on & HDAC_QUIRK_64BIT) 515 sc->support_64bit = 1; 516 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 517 sc->support_64bit = 0; 518 519 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 520 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 521 HDAC_CORBSIZE_CORBSZCAP_256) 522 sc->corb_size = 256; 523 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 524 HDAC_CORBSIZE_CORBSZCAP_16) 525 sc->corb_size = 16; 526 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 527 HDAC_CORBSIZE_CORBSZCAP_2) 528 sc->corb_size = 2; 529 else { 530 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 531 __func__, corbsize); 532 return (ENXIO); 533 } 534 535 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 536 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 537 HDAC_RIRBSIZE_RIRBSZCAP_256) 538 sc->rirb_size = 256; 539 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 540 HDAC_RIRBSIZE_RIRBSZCAP_16) 541 sc->rirb_size = 16; 542 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 543 HDAC_RIRBSIZE_RIRBSZCAP_2) 544 sc->rirb_size = 2; 545 else { 546 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 547 __func__, rirbsize); 548 return (ENXIO); 549 } 550 551 HDA_BOOTVERBOSE( 552 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 553 "NSDO %d%s, CORB %d, RIRB %d\n", 554 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 555 sc->support_64bit ? ", 64bit" : "", 556 sc->corb_size, sc->rirb_size); 557 ); 558 559 return (0); 560 } 561 562 /**************************************************************************** 563 * void hdac_dma_cb 564 * 565 * This function is called by bus_dmamap_load when the mapping has been 566 * established. We just record the physical address of the mapping into 567 * the struct hdac_dma passed in. 568 ****************************************************************************/ 569 static void 570 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 571 { 572 struct hdac_dma *dma; 573 574 if (error == 0) { 575 dma = (struct hdac_dma *)callback_arg; 576 dma->dma_paddr = segs[0].ds_addr; 577 } 578 } 579 580 /**************************************************************************** 581 * int hdac_dma_alloc 582 * 583 * This function allocate and setup a dma region (struct hdac_dma). 584 * It must be freed by a corresponding hdac_dma_free. 585 ****************************************************************************/ 586 static int 587 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 588 { 589 bus_size_t roundsz; 590 int result; 591 592 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 593 bzero(dma, sizeof(*dma)); 594 595 /* 596 * Create a DMA tag 597 */ 598 result = bus_dma_tag_create( 599 bus_get_dma_tag(sc->dev), /* parent */ 600 HDA_DMA_ALIGNMENT, /* alignment */ 601 0, /* boundary */ 602 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 603 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 604 BUS_SPACE_MAXADDR, /* highaddr */ 605 NULL, /* filtfunc */ 606 NULL, /* fistfuncarg */ 607 roundsz, /* maxsize */ 608 1, /* nsegments */ 609 roundsz, /* maxsegsz */ 610 0, /* flags */ 611 NULL, /* lockfunc */ 612 NULL, /* lockfuncarg */ 613 &dma->dma_tag); /* dmat */ 614 if (result != 0) { 615 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 616 __func__, result); 617 goto hdac_dma_alloc_fail; 618 } 619 620 /* 621 * Allocate DMA memory 622 */ 623 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 624 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 625 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 626 BUS_DMA_COHERENT), 627 &dma->dma_map); 628 if (result != 0) { 629 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 630 __func__, result); 631 goto hdac_dma_alloc_fail; 632 } 633 634 dma->dma_size = roundsz; 635 636 /* 637 * Map the memory 638 */ 639 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 640 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 641 if (result != 0 || dma->dma_paddr == 0) { 642 if (result == 0) 643 result = ENOMEM; 644 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 645 __func__, result); 646 goto hdac_dma_alloc_fail; 647 } 648 649 HDA_BOOTHVERBOSE( 650 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 651 __func__, (uintmax_t)size, (uintmax_t)roundsz); 652 ); 653 654 return (0); 655 656 hdac_dma_alloc_fail: 657 hdac_dma_free(sc, dma); 658 659 return (result); 660 } 661 662 /**************************************************************************** 663 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 664 * 665 * Free a struct hdac_dma that has been previously allocated via the 666 * hdac_dma_alloc function. 667 ****************************************************************************/ 668 static void 669 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 670 { 671 if (dma->dma_paddr != 0) { 672 /* Flush caches */ 673 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 674 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 675 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 676 dma->dma_paddr = 0; 677 } 678 if (dma->dma_vaddr != NULL) { 679 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 680 dma->dma_vaddr = NULL; 681 } 682 if (dma->dma_tag != NULL) { 683 bus_dma_tag_destroy(dma->dma_tag); 684 dma->dma_tag = NULL; 685 } 686 dma->dma_size = 0; 687 } 688 689 /**************************************************************************** 690 * int hdac_mem_alloc(struct hdac_softc *) 691 * 692 * Allocate all the bus resources necessary to speak with the physical 693 * controller. 694 ****************************************************************************/ 695 static int 696 hdac_mem_alloc(struct hdac_softc *sc) 697 { 698 struct hdac_mem *mem; 699 700 mem = &sc->mem; 701 mem->mem_rid = PCIR_BAR(0); 702 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 703 &mem->mem_rid, RF_ACTIVE); 704 if (mem->mem_res == NULL) { 705 device_printf(sc->dev, 706 "%s: Unable to allocate memory resource\n", __func__); 707 return (ENOMEM); 708 } 709 mem->mem_tag = rman_get_bustag(mem->mem_res); 710 mem->mem_handle = rman_get_bushandle(mem->mem_res); 711 712 return (0); 713 } 714 715 /**************************************************************************** 716 * void hdac_mem_free(struct hdac_softc *) 717 * 718 * Free up resources previously allocated by hdac_mem_alloc. 719 ****************************************************************************/ 720 static void 721 hdac_mem_free(struct hdac_softc *sc) 722 { 723 struct hdac_mem *mem; 724 725 mem = &sc->mem; 726 if (mem->mem_res != NULL) 727 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 728 mem->mem_res); 729 mem->mem_res = NULL; 730 } 731 732 /**************************************************************************** 733 * int hdac_irq_alloc(struct hdac_softc *) 734 * 735 * Allocate and setup the resources necessary for interrupt handling. 736 ****************************************************************************/ 737 static int 738 hdac_irq_alloc(struct hdac_softc *sc) 739 { 740 struct hdac_irq *irq; 741 int result; 742 743 irq = &sc->irq; 744 irq->irq_rid = 0x0; 745 746 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 747 (result = pci_msi_count(sc->dev)) == 1 && 748 pci_alloc_msi(sc->dev, &result) == 0) 749 irq->irq_rid = 0x1; 750 751 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 752 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 753 if (irq->irq_res == NULL) { 754 device_printf(sc->dev, "%s: Unable to allocate irq\n", 755 __func__); 756 goto hdac_irq_alloc_fail; 757 } 758 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 759 NULL, hdac_intr_handler, sc, &irq->irq_handle); 760 if (result != 0) { 761 device_printf(sc->dev, 762 "%s: Unable to setup interrupt handler (%d)\n", 763 __func__, result); 764 goto hdac_irq_alloc_fail; 765 } 766 767 return (0); 768 769 hdac_irq_alloc_fail: 770 hdac_irq_free(sc); 771 772 return (ENXIO); 773 } 774 775 /**************************************************************************** 776 * void hdac_irq_free(struct hdac_softc *) 777 * 778 * Free up resources previously allocated by hdac_irq_alloc. 779 ****************************************************************************/ 780 static void 781 hdac_irq_free(struct hdac_softc *sc) 782 { 783 struct hdac_irq *irq; 784 785 irq = &sc->irq; 786 if (irq->irq_res != NULL && irq->irq_handle != NULL) 787 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 788 if (irq->irq_res != NULL) 789 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 790 irq->irq_res); 791 if (irq->irq_rid == 0x1) 792 pci_release_msi(sc->dev); 793 irq->irq_handle = NULL; 794 irq->irq_res = NULL; 795 irq->irq_rid = 0x0; 796 } 797 798 /**************************************************************************** 799 * void hdac_corb_init(struct hdac_softc *) 800 * 801 * Initialize the corb registers for operations but do not start it up yet. 802 * The CORB engine must not be running when this function is called. 803 ****************************************************************************/ 804 static void 805 hdac_corb_init(struct hdac_softc *sc) 806 { 807 uint8_t corbsize; 808 uint64_t corbpaddr; 809 810 /* Setup the CORB size. */ 811 switch (sc->corb_size) { 812 case 256: 813 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 814 break; 815 case 16: 816 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 817 break; 818 case 2: 819 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 820 break; 821 default: 822 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 823 } 824 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 825 826 /* Setup the CORB Address in the hdac */ 827 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 828 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 829 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 830 831 /* Set the WP and RP */ 832 sc->corb_wp = 0; 833 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 834 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 835 /* 836 * The HDA specification indicates that the CORBRPRST bit will always 837 * read as zero. Unfortunately, it seems that at least the 82801G 838 * doesn't reset the bit to zero, which stalls the corb engine. 839 * manually reset the bit to zero before continuing. 840 */ 841 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 842 843 /* Enable CORB error reporting */ 844 #if 0 845 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 846 #endif 847 } 848 849 /**************************************************************************** 850 * void hdac_rirb_init(struct hdac_softc *) 851 * 852 * Initialize the rirb registers for operations but do not start it up yet. 853 * The RIRB engine must not be running when this function is called. 854 ****************************************************************************/ 855 static void 856 hdac_rirb_init(struct hdac_softc *sc) 857 { 858 uint8_t rirbsize; 859 uint64_t rirbpaddr; 860 861 /* Setup the RIRB size. */ 862 switch (sc->rirb_size) { 863 case 256: 864 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 865 break; 866 case 16: 867 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 868 break; 869 case 2: 870 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 871 break; 872 default: 873 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 874 } 875 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 876 877 /* Setup the RIRB Address in the hdac */ 878 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 879 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 880 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 881 882 /* Setup the WP and RP */ 883 sc->rirb_rp = 0; 884 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 885 886 /* Setup the interrupt threshold */ 887 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 888 889 /* Enable Overrun and response received reporting */ 890 #if 0 891 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 892 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 893 #else 894 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 895 #endif 896 897 /* 898 * Make sure that the Host CPU cache doesn't contain any dirty 899 * cache lines that falls in the rirb. If I understood correctly, it 900 * should be sufficient to do this only once as the rirb is purely 901 * read-only from now on. 902 */ 903 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 904 BUS_DMASYNC_PREREAD); 905 } 906 907 /**************************************************************************** 908 * void hdac_corb_start(hdac_softc *) 909 * 910 * Startup the corb DMA engine 911 ****************************************************************************/ 912 static void 913 hdac_corb_start(struct hdac_softc *sc) 914 { 915 uint32_t corbctl; 916 917 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 918 corbctl |= HDAC_CORBCTL_CORBRUN; 919 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 920 } 921 922 /**************************************************************************** 923 * void hdac_rirb_start(hdac_softc *) 924 * 925 * Startup the rirb DMA engine 926 ****************************************************************************/ 927 static void 928 hdac_rirb_start(struct hdac_softc *sc) 929 { 930 uint32_t rirbctl; 931 932 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 933 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 934 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 935 } 936 937 static int 938 hdac_rirb_flush(struct hdac_softc *sc) 939 { 940 struct hdac_rirb *rirb_base, *rirb; 941 nid_t cad; 942 uint32_t resp, resp_ex; 943 uint8_t rirbwp; 944 int ret; 945 946 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 947 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 948 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 949 BUS_DMASYNC_POSTREAD); 950 951 ret = 0; 952 while (sc->rirb_rp != rirbwp) { 953 sc->rirb_rp++; 954 sc->rirb_rp %= sc->rirb_size; 955 rirb = &rirb_base[sc->rirb_rp]; 956 resp = le32toh(rirb->response); 957 resp_ex = le32toh(rirb->response_ex); 958 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 959 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 960 sc->unsolq[sc->unsolq_wp++] = resp; 961 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 962 sc->unsolq[sc->unsolq_wp++] = cad; 963 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 964 } else if (sc->codecs[cad].pending <= 0) { 965 device_printf(sc->dev, "Unexpected unsolicited " 966 "response from address %d: %08x\n", cad, resp); 967 } else { 968 sc->codecs[cad].response = resp; 969 sc->codecs[cad].pending--; 970 } 971 ret++; 972 } 973 974 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 975 BUS_DMASYNC_PREREAD); 976 return (ret); 977 } 978 979 static int 980 hdac_unsolq_flush(struct hdac_softc *sc) 981 { 982 device_t child; 983 nid_t cad; 984 uint32_t resp; 985 int ret = 0; 986 987 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 988 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 989 while (sc->unsolq_rp != sc->unsolq_wp) { 990 resp = sc->unsolq[sc->unsolq_rp++]; 991 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 992 cad = sc->unsolq[sc->unsolq_rp++]; 993 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 994 if ((child = sc->codecs[cad].dev) != NULL && 995 device_is_attached(child)) 996 HDAC_UNSOL_INTR(child, resp); 997 ret++; 998 } 999 sc->unsolq_st = HDAC_UNSOLQ_READY; 1000 } 1001 1002 return (ret); 1003 } 1004 1005 /**************************************************************************** 1006 * uint32_t hdac_send_command 1007 * 1008 * Wrapper function that sends only one command to a given codec 1009 ****************************************************************************/ 1010 static uint32_t 1011 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 1012 { 1013 int timeout; 1014 uint32_t *corb; 1015 1016 hdac_lockassert(sc); 1017 verb &= ~HDA_CMD_CAD_MASK; 1018 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 1019 sc->codecs[cad].response = HDA_INVALID; 1020 1021 sc->codecs[cad].pending++; 1022 sc->corb_wp++; 1023 sc->corb_wp %= sc->corb_size; 1024 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 1025 bus_dmamap_sync(sc->corb_dma.dma_tag, 1026 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 1027 corb[sc->corb_wp] = htole32(verb); 1028 bus_dmamap_sync(sc->corb_dma.dma_tag, 1029 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 1030 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 1031 1032 timeout = 10000; 1033 do { 1034 if (hdac_rirb_flush(sc) == 0) 1035 DELAY(10); 1036 } while (sc->codecs[cad].pending != 0 && --timeout); 1037 1038 if (sc->codecs[cad].pending != 0) { 1039 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", 1040 verb, cad); 1041 sc->codecs[cad].pending = 0; 1042 } 1043 1044 if (sc->unsolq_rp != sc->unsolq_wp) 1045 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1046 return (sc->codecs[cad].response); 1047 } 1048 1049 /**************************************************************************** 1050 * Device Methods 1051 ****************************************************************************/ 1052 1053 /**************************************************************************** 1054 * int hdac_probe(device_t) 1055 * 1056 * Probe for the presence of an hdac. If none is found, check for a generic 1057 * match using the subclass of the device. 1058 ****************************************************************************/ 1059 static int 1060 hdac_probe(device_t dev) 1061 { 1062 int i, result; 1063 uint32_t model; 1064 uint16_t class, subclass; 1065 char desc[64]; 1066 1067 model = (uint32_t)pci_get_device(dev) << 16; 1068 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1069 class = pci_get_class(dev); 1070 subclass = pci_get_subclass(dev); 1071 1072 bzero(desc, sizeof(desc)); 1073 result = ENXIO; 1074 for (i = 0; i < nitems(hdac_devices); i++) { 1075 if (hdac_devices[i].model == model) { 1076 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1077 result = BUS_PROBE_DEFAULT; 1078 break; 1079 } 1080 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1081 class == PCIC_MULTIMEDIA && 1082 subclass == PCIS_MULTIMEDIA_HDA) { 1083 snprintf(desc, sizeof(desc), "%s (0x%04x)", 1084 hdac_devices[i].desc, pci_get_device(dev)); 1085 result = BUS_PROBE_GENERIC; 1086 break; 1087 } 1088 } 1089 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1090 subclass == PCIS_MULTIMEDIA_HDA) { 1091 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1092 result = BUS_PROBE_GENERIC; 1093 } 1094 if (result != ENXIO) { 1095 strlcat(desc, " HDA Controller", sizeof(desc)); 1096 device_set_desc_copy(dev, desc); 1097 } 1098 1099 return (result); 1100 } 1101 1102 static void 1103 hdac_unsolq_task(void *context, int pending) 1104 { 1105 struct hdac_softc *sc; 1106 1107 sc = (struct hdac_softc *)context; 1108 1109 hdac_lock(sc); 1110 hdac_unsolq_flush(sc); 1111 hdac_unlock(sc); 1112 } 1113 1114 /**************************************************************************** 1115 * int hdac_attach(device_t) 1116 * 1117 * Attach the device into the kernel. Interrupts usually won't be enabled 1118 * when this function is called. Setup everything that doesn't require 1119 * interrupts and defer probing of codecs until interrupts are enabled. 1120 ****************************************************************************/ 1121 static int 1122 hdac_attach(device_t dev) 1123 { 1124 struct hdac_softc *sc; 1125 int result; 1126 int i, devid = -1; 1127 uint32_t model; 1128 uint16_t class, subclass; 1129 uint16_t vendor; 1130 uint8_t v; 1131 1132 sc = device_get_softc(dev); 1133 HDA_BOOTVERBOSE( 1134 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1135 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1136 device_printf(dev, "HDA Driver Revision: %s\n", 1137 HDA_DRV_TEST_REV); 1138 ); 1139 1140 model = (uint32_t)pci_get_device(dev) << 16; 1141 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1142 class = pci_get_class(dev); 1143 subclass = pci_get_subclass(dev); 1144 1145 for (i = 0; i < nitems(hdac_devices); i++) { 1146 if (hdac_devices[i].model == model) { 1147 devid = i; 1148 break; 1149 } 1150 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1151 class == PCIC_MULTIMEDIA && 1152 subclass == PCIS_MULTIMEDIA_HDA) { 1153 devid = i; 1154 break; 1155 } 1156 } 1157 1158 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); 1159 sc->dev = dev; 1160 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1161 callout_init(&sc->poll_callout, 1); 1162 for (i = 0; i < HDAC_CODEC_MAX; i++) 1163 sc->codecs[i].dev = NULL; 1164 if (devid >= 0) { 1165 sc->quirks_on = hdac_devices[devid].quirks_on; 1166 sc->quirks_off = hdac_devices[devid].quirks_off; 1167 } else { 1168 sc->quirks_on = 0; 1169 sc->quirks_off = 0; 1170 } 1171 if (resource_int_value(device_get_name(dev), 1172 device_get_unit(dev), "msi", &i) == 0) { 1173 if (i == 0) 1174 sc->quirks_off |= HDAC_QUIRK_MSI; 1175 else { 1176 sc->quirks_on |= HDAC_QUIRK_MSI; 1177 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1178 } 1179 } 1180 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1181 HDA_BOOTVERBOSE( 1182 device_printf(sc->dev, 1183 "Config options: on=0x%08x off=0x%08x\n", 1184 sc->quirks_on, sc->quirks_off); 1185 ); 1186 sc->poll_ival = hz; 1187 if (resource_int_value(device_get_name(dev), 1188 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1189 sc->polling = 1; 1190 else 1191 sc->polling = 0; 1192 1193 pci_enable_busmaster(dev); 1194 1195 vendor = pci_get_vendor(dev); 1196 if (vendor == INTEL_VENDORID) { 1197 /* TCSEL -> TC0 */ 1198 v = pci_read_config(dev, 0x44, 1); 1199 pci_write_config(dev, 0x44, v & 0xf8, 1); 1200 HDA_BOOTHVERBOSE( 1201 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1202 pci_read_config(dev, 0x44, 1)); 1203 ); 1204 } 1205 1206 #if defined(__i386__) || defined(__amd64__) 1207 sc->flags |= HDAC_F_DMA_NOCACHE; 1208 1209 if (resource_int_value(device_get_name(dev), 1210 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1211 #else 1212 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1213 #endif 1214 /* 1215 * Try to enable PCIe snoop to avoid messing around with 1216 * uncacheable DMA attribute. Since PCIe snoop register 1217 * config is pretty much vendor specific, there are no 1218 * general solutions on how to enable it, forcing us (even 1219 * Microsoft) to enable uncacheable or write combined DMA 1220 * by default. 1221 * 1222 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1223 */ 1224 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1225 if (hdac_pcie_snoop[i].vendor != vendor) 1226 continue; 1227 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1228 if (hdac_pcie_snoop[i].reg == 0x00) 1229 break; 1230 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1231 if ((v & hdac_pcie_snoop[i].enable) == 1232 hdac_pcie_snoop[i].enable) 1233 break; 1234 v &= hdac_pcie_snoop[i].mask; 1235 v |= hdac_pcie_snoop[i].enable; 1236 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1237 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1238 if ((v & hdac_pcie_snoop[i].enable) != 1239 hdac_pcie_snoop[i].enable) { 1240 HDA_BOOTVERBOSE( 1241 device_printf(dev, 1242 "WARNING: Failed to enable PCIe " 1243 "snoop!\n"); 1244 ); 1245 #if defined(__i386__) || defined(__amd64__) 1246 sc->flags |= HDAC_F_DMA_NOCACHE; 1247 #endif 1248 } 1249 break; 1250 } 1251 #if defined(__i386__) || defined(__amd64__) 1252 } 1253 #endif 1254 1255 HDA_BOOTHVERBOSE( 1256 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1257 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1258 "Uncacheable" : "PCIe snoop", vendor); 1259 ); 1260 1261 /* Allocate resources */ 1262 result = hdac_mem_alloc(sc); 1263 if (result != 0) 1264 goto hdac_attach_fail; 1265 result = hdac_irq_alloc(sc); 1266 if (result != 0) 1267 goto hdac_attach_fail; 1268 1269 /* Get Capabilities */ 1270 result = hdac_get_capabilities(sc); 1271 if (result != 0) 1272 goto hdac_attach_fail; 1273 1274 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1275 result = hdac_dma_alloc(sc, &sc->corb_dma, 1276 sc->corb_size * sizeof(uint32_t)); 1277 if (result != 0) 1278 goto hdac_attach_fail; 1279 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1280 sc->rirb_size * sizeof(struct hdac_rirb)); 1281 if (result != 0) 1282 goto hdac_attach_fail; 1283 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1284 M_HDAC, M_ZERO | M_WAITOK); 1285 for (i = 0; i < sc->num_ss; i++) { 1286 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1287 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1288 if (result != 0) 1289 goto hdac_attach_fail; 1290 } 1291 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1292 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1293 HDA_BOOTVERBOSE( 1294 device_printf(dev, "Failed to " 1295 "allocate DMA pos buffer " 1296 "(non-fatal)\n"); 1297 ); 1298 } else { 1299 uint64_t addr = sc->pos_dma.dma_paddr; 1300 1301 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1302 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1303 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1304 HDAC_DPLBASE_DPLBASE_DMAPBE); 1305 } 1306 } 1307 1308 result = bus_dma_tag_create( 1309 bus_get_dma_tag(sc->dev), /* parent */ 1310 HDA_DMA_ALIGNMENT, /* alignment */ 1311 0, /* boundary */ 1312 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1313 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1314 BUS_SPACE_MAXADDR, /* highaddr */ 1315 NULL, /* filtfunc */ 1316 NULL, /* fistfuncarg */ 1317 HDA_BUFSZ_MAX, /* maxsize */ 1318 1, /* nsegments */ 1319 HDA_BUFSZ_MAX, /* maxsegsz */ 1320 0, /* flags */ 1321 NULL, /* lockfunc */ 1322 NULL, /* lockfuncarg */ 1323 &sc->chan_dmat); /* dmat */ 1324 if (result != 0) { 1325 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1326 __func__, result); 1327 goto hdac_attach_fail; 1328 } 1329 1330 /* Quiesce everything */ 1331 HDA_BOOTHVERBOSE( 1332 device_printf(dev, "Reset controller...\n"); 1333 ); 1334 hdac_reset(sc, true); 1335 1336 /* Initialize the CORB and RIRB */ 1337 hdac_corb_init(sc); 1338 hdac_rirb_init(sc); 1339 1340 /* Defer remaining of initialization until interrupts are enabled */ 1341 sc->intrhook.ich_func = hdac_attach2; 1342 sc->intrhook.ich_arg = (void *)sc; 1343 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1344 sc->intrhook.ich_func = NULL; 1345 hdac_attach2((void *)sc); 1346 } 1347 1348 return (0); 1349 1350 hdac_attach_fail: 1351 hdac_irq_free(sc); 1352 if (sc->streams != NULL) 1353 for (i = 0; i < sc->num_ss; i++) 1354 hdac_dma_free(sc, &sc->streams[i].bdl); 1355 free(sc->streams, M_HDAC); 1356 hdac_dma_free(sc, &sc->rirb_dma); 1357 hdac_dma_free(sc, &sc->corb_dma); 1358 hdac_mem_free(sc); 1359 snd_mtxfree(sc->lock); 1360 1361 return (ENXIO); 1362 } 1363 1364 static int 1365 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1366 { 1367 struct hdac_softc *sc; 1368 device_t *devlist; 1369 device_t dev; 1370 int devcount, i, err, val; 1371 1372 dev = oidp->oid_arg1; 1373 sc = device_get_softc(dev); 1374 if (sc == NULL) 1375 return (EINVAL); 1376 val = 0; 1377 err = sysctl_handle_int(oidp, &val, 0, req); 1378 if (err != 0 || req->newptr == NULL || val == 0) 1379 return (err); 1380 1381 /* XXX: Temporary. For debugging. */ 1382 if (val == 100) { 1383 hdac_suspend(dev); 1384 return (0); 1385 } else if (val == 101) { 1386 hdac_resume(dev); 1387 return (0); 1388 } 1389 1390 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) 1391 return (err); 1392 hdac_lock(sc); 1393 for (i = 0; i < devcount; i++) 1394 HDAC_PINDUMP(devlist[i]); 1395 hdac_unlock(sc); 1396 free(devlist, M_TEMP); 1397 return (0); 1398 } 1399 1400 static int 1401 hdac_mdata_rate(uint16_t fmt) 1402 { 1403 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1404 int rate, bits; 1405 1406 if (fmt & (1 << 14)) 1407 rate = 44100; 1408 else 1409 rate = 48000; 1410 rate *= ((fmt >> 11) & 0x07) + 1; 1411 rate /= ((fmt >> 8) & 0x07) + 1; 1412 bits = mbits[(fmt >> 4) & 0x03]; 1413 bits *= (fmt & 0x0f) + 1; 1414 return (rate * bits); 1415 } 1416 1417 static int 1418 hdac_bdata_rate(uint16_t fmt, int output) 1419 { 1420 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1421 int rate, bits; 1422 1423 rate = 48000; 1424 rate *= ((fmt >> 11) & 0x07) + 1; 1425 bits = bbits[(fmt >> 4) & 0x03]; 1426 bits *= (fmt & 0x0f) + 1; 1427 if (!output) 1428 bits = ((bits + 7) & ~0x07) + 10; 1429 return (rate * bits); 1430 } 1431 1432 static void 1433 hdac_poll_reinit(struct hdac_softc *sc) 1434 { 1435 int i, pollticks, min = 1000000; 1436 struct hdac_stream *s; 1437 1438 if (sc->polling == 0) 1439 return; 1440 if (sc->unsol_registered > 0) 1441 min = hz / 2; 1442 for (i = 0; i < sc->num_ss; i++) { 1443 s = &sc->streams[i]; 1444 if (s->running == 0) 1445 continue; 1446 pollticks = ((uint64_t)hz * s->blksz) / 1447 (hdac_mdata_rate(s->format) / 8); 1448 pollticks >>= 1; 1449 if (pollticks > hz) 1450 pollticks = hz; 1451 if (pollticks < 1) 1452 pollticks = 1; 1453 if (min > pollticks) 1454 min = pollticks; 1455 } 1456 sc->poll_ival = min; 1457 if (min == 1000000) 1458 callout_stop(&sc->poll_callout); 1459 else 1460 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1461 } 1462 1463 static int 1464 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1465 { 1466 struct hdac_softc *sc; 1467 device_t dev; 1468 uint32_t ctl; 1469 int err, val; 1470 1471 dev = oidp->oid_arg1; 1472 sc = device_get_softc(dev); 1473 if (sc == NULL) 1474 return (EINVAL); 1475 hdac_lock(sc); 1476 val = sc->polling; 1477 hdac_unlock(sc); 1478 err = sysctl_handle_int(oidp, &val, 0, req); 1479 1480 if (err != 0 || req->newptr == NULL) 1481 return (err); 1482 if (val < 0 || val > 1) 1483 return (EINVAL); 1484 1485 hdac_lock(sc); 1486 if (val != sc->polling) { 1487 if (val == 0) { 1488 callout_stop(&sc->poll_callout); 1489 hdac_unlock(sc); 1490 callout_drain(&sc->poll_callout); 1491 hdac_lock(sc); 1492 sc->polling = 0; 1493 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1494 ctl |= HDAC_INTCTL_GIE; 1495 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1496 } else { 1497 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1498 ctl &= ~HDAC_INTCTL_GIE; 1499 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1500 sc->polling = 1; 1501 hdac_poll_reinit(sc); 1502 } 1503 } 1504 hdac_unlock(sc); 1505 1506 return (err); 1507 } 1508 1509 static void 1510 hdac_attach2(void *arg) 1511 { 1512 struct hdac_softc *sc; 1513 device_t child; 1514 uint32_t vendorid, revisionid; 1515 int i; 1516 uint16_t statests; 1517 1518 sc = (struct hdac_softc *)arg; 1519 1520 hdac_lock(sc); 1521 1522 /* Remove ourselves from the config hooks */ 1523 if (sc->intrhook.ich_func != NULL) { 1524 config_intrhook_disestablish(&sc->intrhook); 1525 sc->intrhook.ich_func = NULL; 1526 } 1527 1528 HDA_BOOTHVERBOSE( 1529 device_printf(sc->dev, "Starting CORB Engine...\n"); 1530 ); 1531 hdac_corb_start(sc); 1532 HDA_BOOTHVERBOSE( 1533 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1534 ); 1535 hdac_rirb_start(sc); 1536 1537 /* 1538 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1539 * (status change) interrupts. The documentation says that we 1540 * should not make any assumptions about the state of this register 1541 * and set it explicitly. 1542 * NB: this needs to be done before the interrupt is enabled as 1543 * the handler does not expect this interrupt source. 1544 */ 1545 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1546 1547 /* 1548 * Read and clear post-reset SDI wake status. 1549 * Each set bit corresponds to a codec that came out of reset. 1550 */ 1551 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1552 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests); 1553 1554 HDA_BOOTHVERBOSE( 1555 device_printf(sc->dev, 1556 "Enabling controller interrupt...\n"); 1557 ); 1558 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1559 HDAC_GCTL_UNSOL); 1560 if (sc->polling == 0) { 1561 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1562 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1563 } 1564 DELAY(1000); 1565 1566 HDA_BOOTHVERBOSE( 1567 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1568 ); 1569 hdac_unlock(sc); 1570 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1571 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1572 HDA_BOOTHVERBOSE( 1573 device_printf(sc->dev, 1574 "Found CODEC at address %d\n", i); 1575 ); 1576 hdac_lock(sc); 1577 vendorid = hdac_send_command(sc, i, 1578 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1579 revisionid = hdac_send_command(sc, i, 1580 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1581 hdac_unlock(sc); 1582 if (vendorid == HDA_INVALID && 1583 revisionid == HDA_INVALID) { 1584 device_printf(sc->dev, 1585 "CODEC at address %d not responding!\n", i); 1586 continue; 1587 } 1588 sc->codecs[i].vendor_id = 1589 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1590 sc->codecs[i].device_id = 1591 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1592 sc->codecs[i].revision_id = 1593 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1594 sc->codecs[i].stepping_id = 1595 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1596 child = device_add_child(sc->dev, "hdacc", -1); 1597 if (child == NULL) { 1598 device_printf(sc->dev, 1599 "Failed to add CODEC device\n"); 1600 continue; 1601 } 1602 device_set_ivars(child, (void *)(intptr_t)i); 1603 sc->codecs[i].dev = child; 1604 } 1605 } 1606 bus_generic_attach(sc->dev); 1607 1608 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1609 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1610 "pindump", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev, 1611 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); 1612 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1613 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1614 "polling", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev, 1615 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); 1616 } 1617 1618 /**************************************************************************** 1619 * int hdac_suspend(device_t) 1620 * 1621 * Suspend and power down HDA bus and codecs. 1622 ****************************************************************************/ 1623 static int 1624 hdac_suspend(device_t dev) 1625 { 1626 struct hdac_softc *sc = device_get_softc(dev); 1627 1628 HDA_BOOTHVERBOSE( 1629 device_printf(dev, "Suspend...\n"); 1630 ); 1631 bus_generic_suspend(dev); 1632 1633 hdac_lock(sc); 1634 HDA_BOOTHVERBOSE( 1635 device_printf(dev, "Reset controller...\n"); 1636 ); 1637 callout_stop(&sc->poll_callout); 1638 hdac_reset(sc, false); 1639 hdac_unlock(sc); 1640 callout_drain(&sc->poll_callout); 1641 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1642 HDA_BOOTHVERBOSE( 1643 device_printf(dev, "Suspend done\n"); 1644 ); 1645 return (0); 1646 } 1647 1648 /**************************************************************************** 1649 * int hdac_resume(device_t) 1650 * 1651 * Powerup and restore HDA bus and codecs state. 1652 ****************************************************************************/ 1653 static int 1654 hdac_resume(device_t dev) 1655 { 1656 struct hdac_softc *sc = device_get_softc(dev); 1657 int error; 1658 1659 HDA_BOOTHVERBOSE( 1660 device_printf(dev, "Resume...\n"); 1661 ); 1662 hdac_lock(sc); 1663 1664 /* Quiesce everything */ 1665 HDA_BOOTHVERBOSE( 1666 device_printf(dev, "Reset controller...\n"); 1667 ); 1668 hdac_reset(sc, true); 1669 1670 /* Initialize the CORB and RIRB */ 1671 hdac_corb_init(sc); 1672 hdac_rirb_init(sc); 1673 1674 HDA_BOOTHVERBOSE( 1675 device_printf(dev, "Starting CORB Engine...\n"); 1676 ); 1677 hdac_corb_start(sc); 1678 HDA_BOOTHVERBOSE( 1679 device_printf(dev, "Starting RIRB Engine...\n"); 1680 ); 1681 hdac_rirb_start(sc); 1682 1683 /* 1684 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1685 * (status change) events. The documentation says that we should 1686 * not make any assumptions about the state of this register and 1687 * set it explicitly. 1688 * Also, clear HDAC_STATESTS. 1689 * NB: this needs to be done before the interrupt is enabled as 1690 * the handler does not expect this interrupt source. 1691 */ 1692 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1693 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK); 1694 1695 HDA_BOOTHVERBOSE( 1696 device_printf(dev, "Enabling controller interrupt...\n"); 1697 ); 1698 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1699 HDAC_GCTL_UNSOL); 1700 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1701 DELAY(1000); 1702 hdac_poll_reinit(sc); 1703 hdac_unlock(sc); 1704 1705 error = bus_generic_resume(dev); 1706 HDA_BOOTHVERBOSE( 1707 device_printf(dev, "Resume done\n"); 1708 ); 1709 return (error); 1710 } 1711 1712 /**************************************************************************** 1713 * int hdac_detach(device_t) 1714 * 1715 * Detach and free up resources utilized by the hdac device. 1716 ****************************************************************************/ 1717 static int 1718 hdac_detach(device_t dev) 1719 { 1720 struct hdac_softc *sc = device_get_softc(dev); 1721 device_t *devlist; 1722 int cad, i, devcount, error; 1723 1724 if ((error = device_get_children(dev, &devlist, &devcount)) != 0) 1725 return (error); 1726 for (i = 0; i < devcount; i++) { 1727 cad = (intptr_t)device_get_ivars(devlist[i]); 1728 if ((error = device_delete_child(dev, devlist[i])) != 0) { 1729 free(devlist, M_TEMP); 1730 return (error); 1731 } 1732 sc->codecs[cad].dev = NULL; 1733 } 1734 free(devlist, M_TEMP); 1735 1736 hdac_lock(sc); 1737 hdac_reset(sc, false); 1738 hdac_unlock(sc); 1739 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1740 hdac_irq_free(sc); 1741 1742 for (i = 0; i < sc->num_ss; i++) 1743 hdac_dma_free(sc, &sc->streams[i].bdl); 1744 free(sc->streams, M_HDAC); 1745 hdac_dma_free(sc, &sc->pos_dma); 1746 hdac_dma_free(sc, &sc->rirb_dma); 1747 hdac_dma_free(sc, &sc->corb_dma); 1748 if (sc->chan_dmat != NULL) { 1749 bus_dma_tag_destroy(sc->chan_dmat); 1750 sc->chan_dmat = NULL; 1751 } 1752 hdac_mem_free(sc); 1753 snd_mtxfree(sc->lock); 1754 return (0); 1755 } 1756 1757 static bus_dma_tag_t 1758 hdac_get_dma_tag(device_t dev, device_t child) 1759 { 1760 struct hdac_softc *sc = device_get_softc(dev); 1761 1762 return (sc->chan_dmat); 1763 } 1764 1765 static int 1766 hdac_print_child(device_t dev, device_t child) 1767 { 1768 int retval; 1769 1770 retval = bus_print_child_header(dev, child); 1771 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); 1772 retval += bus_print_child_footer(dev, child); 1773 1774 return (retval); 1775 } 1776 1777 static int 1778 hdac_child_location(device_t dev, device_t child, struct sbuf *sb) 1779 { 1780 1781 sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child)); 1782 return (0); 1783 } 1784 1785 static int 1786 hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) 1787 { 1788 struct hdac_softc *sc = device_get_softc(dev); 1789 nid_t cad = (uintptr_t)device_get_ivars(child); 1790 1791 sbuf_printf(sb, 1792 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", 1793 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1794 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1795 return (0); 1796 } 1797 1798 static int 1799 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1800 { 1801 struct hdac_softc *sc = device_get_softc(dev); 1802 nid_t cad = (uintptr_t)device_get_ivars(child); 1803 1804 switch (which) { 1805 case HDA_IVAR_CODEC_ID: 1806 *result = cad; 1807 break; 1808 case HDA_IVAR_VENDOR_ID: 1809 *result = sc->codecs[cad].vendor_id; 1810 break; 1811 case HDA_IVAR_DEVICE_ID: 1812 *result = sc->codecs[cad].device_id; 1813 break; 1814 case HDA_IVAR_REVISION_ID: 1815 *result = sc->codecs[cad].revision_id; 1816 break; 1817 case HDA_IVAR_STEPPING_ID: 1818 *result = sc->codecs[cad].stepping_id; 1819 break; 1820 case HDA_IVAR_SUBVENDOR_ID: 1821 *result = pci_get_subvendor(dev); 1822 break; 1823 case HDA_IVAR_SUBDEVICE_ID: 1824 *result = pci_get_subdevice(dev); 1825 break; 1826 case HDA_IVAR_DMA_NOCACHE: 1827 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1828 break; 1829 case HDA_IVAR_STRIPES_MASK: 1830 *result = (1 << (1 << sc->num_sdo)) - 1; 1831 break; 1832 default: 1833 return (ENOENT); 1834 } 1835 return (0); 1836 } 1837 1838 static struct mtx * 1839 hdac_get_mtx(device_t dev, device_t child) 1840 { 1841 struct hdac_softc *sc = device_get_softc(dev); 1842 1843 return (sc->lock); 1844 } 1845 1846 static uint32_t 1847 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1848 { 1849 1850 return (hdac_send_command(device_get_softc(dev), 1851 (intptr_t)device_get_ivars(child), verb)); 1852 } 1853 1854 static int 1855 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1856 { 1857 int i, ss; 1858 1859 ss = -1; 1860 /* Allocate ISS/OSS first. */ 1861 if (dir == 0) { 1862 for (i = 0; i < sc->num_iss; i++) { 1863 if (sc->streams[i].stream == stream) { 1864 ss = i; 1865 break; 1866 } 1867 } 1868 } else { 1869 for (i = 0; i < sc->num_oss; i++) { 1870 if (sc->streams[i + sc->num_iss].stream == stream) { 1871 ss = i + sc->num_iss; 1872 break; 1873 } 1874 } 1875 } 1876 /* Fallback to BSS. */ 1877 if (ss == -1) { 1878 for (i = 0; i < sc->num_bss; i++) { 1879 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1880 == stream) { 1881 ss = i + sc->num_iss + sc->num_oss; 1882 break; 1883 } 1884 } 1885 } 1886 return (ss); 1887 } 1888 1889 static int 1890 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1891 uint32_t **dmapos) 1892 { 1893 struct hdac_softc *sc = device_get_softc(dev); 1894 nid_t cad = (uintptr_t)device_get_ivars(child); 1895 int stream, ss, bw, maxbw, prevbw; 1896 1897 /* Look for empty stream. */ 1898 ss = hdac_find_stream(sc, dir, 0); 1899 1900 /* Return if found nothing. */ 1901 if (ss < 0) 1902 return (0); 1903 1904 /* Check bus bandwidth. */ 1905 bw = hdac_bdata_rate(format, dir); 1906 if (dir == 1) { 1907 bw *= 1 << (sc->num_sdo - stripe); 1908 prevbw = sc->sdo_bw_used; 1909 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1910 } else { 1911 prevbw = sc->codecs[cad].sdi_bw_used; 1912 maxbw = 48000 * 464; 1913 } 1914 HDA_BOOTHVERBOSE( 1915 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1916 (bw + prevbw) / 1000, maxbw / 1000, 1917 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1918 ); 1919 if (bw + prevbw > maxbw) 1920 return (0); 1921 if (dir == 1) 1922 sc->sdo_bw_used += bw; 1923 else 1924 sc->codecs[cad].sdi_bw_used += bw; 1925 1926 /* Allocate stream number */ 1927 if (ss >= sc->num_iss + sc->num_oss) 1928 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1929 else if (ss >= sc->num_iss) 1930 stream = ss - sc->num_iss + 1; 1931 else 1932 stream = ss + 1; 1933 1934 sc->streams[ss].dev = child; 1935 sc->streams[ss].dir = dir; 1936 sc->streams[ss].stream = stream; 1937 sc->streams[ss].bw = bw; 1938 sc->streams[ss].format = format; 1939 sc->streams[ss].stripe = stripe; 1940 if (dmapos != NULL) { 1941 if (sc->pos_dma.dma_vaddr != NULL) 1942 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1943 else 1944 *dmapos = NULL; 1945 } 1946 return (stream); 1947 } 1948 1949 static void 1950 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 1951 { 1952 struct hdac_softc *sc = device_get_softc(dev); 1953 nid_t cad = (uintptr_t)device_get_ivars(child); 1954 int ss; 1955 1956 ss = hdac_find_stream(sc, dir, stream); 1957 KASSERT(ss >= 0, 1958 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 1959 if (dir == 1) 1960 sc->sdo_bw_used -= sc->streams[ss].bw; 1961 else 1962 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 1963 sc->streams[ss].stream = 0; 1964 sc->streams[ss].dev = NULL; 1965 } 1966 1967 static int 1968 hdac_stream_start(device_t dev, device_t child, int dir, int stream, 1969 bus_addr_t buf, int blksz, int blkcnt) 1970 { 1971 struct hdac_softc *sc = device_get_softc(dev); 1972 struct hdac_bdle *bdle; 1973 uint64_t addr; 1974 int i, ss, off; 1975 uint32_t ctl; 1976 1977 ss = hdac_find_stream(sc, dir, stream); 1978 KASSERT(ss >= 0, 1979 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 1980 1981 addr = (uint64_t)buf; 1982 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 1983 for (i = 0; i < blkcnt; i++, bdle++) { 1984 bdle->addrl = htole32((uint32_t)addr); 1985 bdle->addrh = htole32((uint32_t)(addr >> 32)); 1986 bdle->len = htole32(blksz); 1987 bdle->ioc = htole32(1); 1988 addr += blksz; 1989 } 1990 1991 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 1992 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 1993 1994 off = ss << 5; 1995 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 1996 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 1997 addr = sc->streams[ss].bdl.dma_paddr; 1998 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 1999 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 2000 2001 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 2002 if (dir) 2003 ctl |= HDAC_SDCTL2_DIR; 2004 else 2005 ctl &= ~HDAC_SDCTL2_DIR; 2006 ctl &= ~HDAC_SDCTL2_STRM_MASK; 2007 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 2008 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 2009 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 2010 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 2011 2012 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 2013 2014 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2015 ctl |= 1 << ss; 2016 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2017 2018 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 2019 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 2020 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2021 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2022 HDAC_SDCTL_RUN; 2023 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2024 2025 sc->streams[ss].blksz = blksz; 2026 sc->streams[ss].running = 1; 2027 hdac_poll_reinit(sc); 2028 return (0); 2029 } 2030 2031 static void 2032 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 2033 { 2034 struct hdac_softc *sc = device_get_softc(dev); 2035 int ss, off; 2036 uint32_t ctl; 2037 2038 ss = hdac_find_stream(sc, dir, stream); 2039 KASSERT(ss >= 0, 2040 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 2041 2042 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2043 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 2044 2045 off = ss << 5; 2046 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2047 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2048 HDAC_SDCTL_RUN); 2049 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2050 2051 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2052 ctl &= ~(1 << ss); 2053 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2054 2055 sc->streams[ss].running = 0; 2056 hdac_poll_reinit(sc); 2057 } 2058 2059 static void 2060 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2061 { 2062 struct hdac_softc *sc = device_get_softc(dev); 2063 int timeout = 1000; 2064 int to = timeout; 2065 int ss, off; 2066 uint32_t ctl; 2067 2068 ss = hdac_find_stream(sc, dir, stream); 2069 KASSERT(ss >= 0, 2070 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2071 2072 off = ss << 5; 2073 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2074 ctl |= HDAC_SDCTL_SRST; 2075 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2076 do { 2077 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2078 if (ctl & HDAC_SDCTL_SRST) 2079 break; 2080 DELAY(10); 2081 } while (--to); 2082 if (!(ctl & HDAC_SDCTL_SRST)) 2083 device_printf(dev, "Reset setting timeout\n"); 2084 ctl &= ~HDAC_SDCTL_SRST; 2085 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2086 to = timeout; 2087 do { 2088 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2089 if (!(ctl & HDAC_SDCTL_SRST)) 2090 break; 2091 DELAY(10); 2092 } while (--to); 2093 if (ctl & HDAC_SDCTL_SRST) 2094 device_printf(dev, "Reset timeout!\n"); 2095 } 2096 2097 static uint32_t 2098 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2099 { 2100 struct hdac_softc *sc = device_get_softc(dev); 2101 int ss, off; 2102 2103 ss = hdac_find_stream(sc, dir, stream); 2104 KASSERT(ss >= 0, 2105 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2106 2107 off = ss << 5; 2108 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2109 } 2110 2111 static int 2112 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2113 { 2114 struct hdac_softc *sc = device_get_softc(dev); 2115 2116 sc->unsol_registered++; 2117 hdac_poll_reinit(sc); 2118 return (tag); 2119 } 2120 2121 static void 2122 hdac_unsol_free(device_t dev, device_t child, int tag) 2123 { 2124 struct hdac_softc *sc = device_get_softc(dev); 2125 2126 sc->unsol_registered--; 2127 hdac_poll_reinit(sc); 2128 } 2129 2130 static device_method_t hdac_methods[] = { 2131 /* device interface */ 2132 DEVMETHOD(device_probe, hdac_probe), 2133 DEVMETHOD(device_attach, hdac_attach), 2134 DEVMETHOD(device_detach, hdac_detach), 2135 DEVMETHOD(device_suspend, hdac_suspend), 2136 DEVMETHOD(device_resume, hdac_resume), 2137 /* Bus interface */ 2138 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2139 DEVMETHOD(bus_print_child, hdac_print_child), 2140 DEVMETHOD(bus_child_location, hdac_child_location), 2141 DEVMETHOD(bus_child_pnpinfo, hdac_child_pnpinfo_method), 2142 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2143 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2144 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2145 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2146 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2147 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2148 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2149 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2150 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2151 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2152 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2153 DEVMETHOD_END 2154 }; 2155 2156 static driver_t hdac_driver = { 2157 "hdac", 2158 hdac_methods, 2159 sizeof(struct hdac_softc), 2160 }; 2161 2162 static devclass_t hdac_devclass; 2163 2164 DRIVER_MODULE(snd_hda, pci, hdac_driver, hdac_devclass, NULL, NULL); 2165