1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 SND_DECLARE_FILE("$FreeBSD$"); 55 56 #define hdac_lock(sc) snd_mtxlock((sc)->lock) 57 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) 58 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) 59 60 #define HDAC_QUIRK_64BIT (1 << 0) 61 #define HDAC_QUIRK_DMAPOS (1 << 1) 62 #define HDAC_QUIRK_MSI (1 << 2) 63 64 static const struct { 65 const char *key; 66 uint32_t value; 67 } hdac_quirks_tab[] = { 68 { "64bit", HDAC_QUIRK_64BIT }, 69 { "dmapos", HDAC_QUIRK_DMAPOS }, 70 { "msi", HDAC_QUIRK_MSI }, 71 }; 72 73 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 74 75 static const struct { 76 uint32_t model; 77 const char *desc; 78 char quirks_on; 79 char quirks_off; 80 } hdac_devices[] = { 81 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 82 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 83 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 84 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 85 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 87 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 88 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 89 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 90 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, 91 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 92 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 93 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 94 { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, 95 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 96 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 97 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 98 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 99 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 100 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 101 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 102 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 103 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 104 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 105 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 106 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 107 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 108 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, 109 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 110 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 111 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 112 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 113 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, 114 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 115 { HDA_INTEL_ALLK, "Intel Alder Lake", 0, 0 }, 116 { HDA_INTEL_ALLKM, "Intel Alder Lake-M", 0, 0 }, 117 { HDA_INTEL_ALLKN, "Intel Alder Lake-N", 0, 0 }, 118 { HDA_INTEL_ALLKP1, "Intel Alder Lake-P", 0, 0 }, 119 { HDA_INTEL_ALLKP2, "Intel Alder Lake-P", 0, 0 }, 120 { HDA_INTEL_ALLKPS, "Intel Alder Lake-PS", 0, 0 }, 121 { HDA_INTEL_RPTLK1, "Intel Raptor Lake-P", 0, 0 }, 122 { HDA_INTEL_RPTLK2, "Intel Raptor Lake-P", 0, 0 }, 123 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 124 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 125 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 126 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 127 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 128 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, 129 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 130 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 131 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 132 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 133 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, 134 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, 135 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, 136 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 137 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 138 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 139 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 140 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 141 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 142 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 143 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 144 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 145 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 146 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 147 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 148 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 149 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 150 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 151 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 152 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 153 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 154 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 155 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 156 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 157 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 158 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 159 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 160 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 161 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 162 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 163 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 164 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 165 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 166 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 167 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 168 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 169 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 170 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 171 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 172 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 173 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 174 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 175 { HDA_ATI_RS880, "ATI RS880", 0, 0 }, 176 { HDA_ATI_R600, "ATI R600", 0, 0 }, 177 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 178 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 179 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 180 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 181 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 182 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 183 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 184 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 185 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 186 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 187 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 188 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 189 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 190 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 191 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 192 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 193 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 194 { HDA_ATI_KABINI, "ATI Kabini", 0, 0 }, 195 { HDA_ATI_TRINITY, "ATI Trinity", 0, 0 }, 196 { HDA_AMD_X370, "AMD X370", 0, 0 }, 197 { HDA_AMD_X570, "AMD X570", 0, 0 }, 198 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, 199 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, 200 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 201 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 202 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 203 { HDA_VMWARE, "VMware", 0, 0 }, 204 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 205 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 206 /* Unknown */ 207 { HDA_INTEL_ALL, "Intel", 0, 0 }, 208 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 209 { HDA_ATI_ALL, "ATI", 0, 0 }, 210 { HDA_AMD_ALL, "AMD", 0, 0 }, 211 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 212 { HDA_VIA_ALL, "VIA", 0, 0 }, 213 { HDA_VMWARE_ALL, "VMware", 0, 0 }, 214 { HDA_SIS_ALL, "SiS", 0, 0 }, 215 { HDA_ULI_ALL, "ULI", 0, 0 }, 216 }; 217 218 static const struct { 219 uint16_t vendor; 220 uint8_t reg; 221 uint8_t mask; 222 uint8_t enable; 223 } hdac_pcie_snoop[] = { 224 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 225 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 226 { AMD_VENDORID, 0x42, 0xf8, 0x02 }, 227 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 228 }; 229 230 /**************************************************************************** 231 * Function prototypes 232 ****************************************************************************/ 233 static void hdac_intr_handler(void *); 234 static int hdac_reset(struct hdac_softc *, bool); 235 static int hdac_get_capabilities(struct hdac_softc *); 236 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 237 static int hdac_dma_alloc(struct hdac_softc *, 238 struct hdac_dma *, bus_size_t); 239 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 240 static int hdac_mem_alloc(struct hdac_softc *); 241 static void hdac_mem_free(struct hdac_softc *); 242 static int hdac_irq_alloc(struct hdac_softc *); 243 static void hdac_irq_free(struct hdac_softc *); 244 static void hdac_corb_init(struct hdac_softc *); 245 static void hdac_rirb_init(struct hdac_softc *); 246 static void hdac_corb_start(struct hdac_softc *); 247 static void hdac_rirb_start(struct hdac_softc *); 248 249 static void hdac_attach2(void *); 250 251 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 252 253 static int hdac_probe(device_t); 254 static int hdac_attach(device_t); 255 static int hdac_detach(device_t); 256 static int hdac_suspend(device_t); 257 static int hdac_resume(device_t); 258 259 static int hdac_rirb_flush(struct hdac_softc *sc); 260 static int hdac_unsolq_flush(struct hdac_softc *sc); 261 262 /* This function surely going to make its way into upper level someday. */ 263 static void 264 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 265 { 266 const char *res = NULL; 267 int i = 0, j, k, len, inv; 268 269 if (resource_string_value(device_get_name(sc->dev), 270 device_get_unit(sc->dev), "config", &res) != 0) 271 return; 272 if (!(res != NULL && strlen(res) > 0)) 273 return; 274 HDA_BOOTVERBOSE( 275 device_printf(sc->dev, "Config options:"); 276 ); 277 for (;;) { 278 while (res[i] != '\0' && 279 (res[i] == ',' || isspace(res[i]) != 0)) 280 i++; 281 if (res[i] == '\0') { 282 HDA_BOOTVERBOSE( 283 printf("\n"); 284 ); 285 return; 286 } 287 j = i; 288 while (res[j] != '\0' && 289 !(res[j] == ',' || isspace(res[j]) != 0)) 290 j++; 291 len = j - i; 292 if (len > 2 && strncmp(res + i, "no", 2) == 0) 293 inv = 2; 294 else 295 inv = 0; 296 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 297 if (strncmp(res + i + inv, 298 hdac_quirks_tab[k].key, len - inv) != 0) 299 continue; 300 if (len - inv != strlen(hdac_quirks_tab[k].key)) 301 continue; 302 HDA_BOOTVERBOSE( 303 printf(" %s%s", (inv != 0) ? "no" : "", 304 hdac_quirks_tab[k].key); 305 ); 306 if (inv == 0) { 307 *on |= hdac_quirks_tab[k].value; 308 *off &= ~hdac_quirks_tab[k].value; 309 } else if (inv != 0) { 310 *off |= hdac_quirks_tab[k].value; 311 *on &= ~hdac_quirks_tab[k].value; 312 } 313 break; 314 } 315 i = j; 316 } 317 } 318 319 static void 320 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) 321 { 322 device_t dev; 323 uint8_t rirbsts; 324 int i; 325 326 /* Was this a controller interrupt? */ 327 if (intsts & HDAC_INTSTS_CIS) { 328 /* 329 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then 330 * we will need to check and clear HDAC_STATESTS. 331 * That event is used to report codec status changes such as 332 * a reset or a wake-up event. 333 */ 334 /* 335 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we 336 * will need to check and clear HDAC_CORBSTS_CMEI in 337 * HDAC_CORBSTS. 338 * That event is used to report CORB memory errors. 339 */ 340 /* 341 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we 342 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in 343 * HDAC_RIRBSTS. 344 * That event is used to report response FIFO overruns. 345 */ 346 347 /* Get as many responses that we can */ 348 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 349 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 350 HDAC_WRITE_1(&sc->mem, 351 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 352 hdac_rirb_flush(sc); 353 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 354 } 355 if (sc->unsolq_rp != sc->unsolq_wp) 356 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 357 } 358 359 if (intsts & HDAC_INTSTS_SIS_MASK) { 360 for (i = 0; i < sc->num_ss; i++) { 361 if ((intsts & (1 << i)) == 0) 362 continue; 363 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 364 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 365 if ((dev = sc->streams[i].dev) != NULL) { 366 HDAC_STREAM_INTR(dev, 367 sc->streams[i].dir, sc->streams[i].stream); 368 } 369 } 370 } 371 } 372 373 /**************************************************************************** 374 * void hdac_intr_handler(void *) 375 * 376 * Interrupt handler. Processes interrupts received from the hdac. 377 ****************************************************************************/ 378 static void 379 hdac_intr_handler(void *context) 380 { 381 struct hdac_softc *sc; 382 uint32_t intsts; 383 384 sc = (struct hdac_softc *)context; 385 386 /* 387 * Loop until HDAC_INTSTS_GIS gets clear. 388 * It is plausible that hardware interrupts a host only when GIS goes 389 * from zero to one. GIS is formed by OR-ing multiple hardware 390 * statuses, so it's possible that a previously cleared status gets set 391 * again while another status has not been cleared yet. Thus, there 392 * will be no new interrupt as GIS always stayed set. If we don't 393 * re-examine GIS then we can leave it set and never get an interrupt 394 * again. 395 */ 396 hdac_lock(sc); 397 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 398 while (intsts != 0xffffffff && (intsts & HDAC_INTSTS_GIS) != 0) { 399 hdac_one_intr(sc, intsts); 400 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 401 } 402 hdac_unlock(sc); 403 } 404 405 static void 406 hdac_poll_callback(void *arg) 407 { 408 struct hdac_softc *sc = arg; 409 410 if (sc == NULL) 411 return; 412 413 hdac_lock(sc); 414 if (sc->polling == 0) { 415 hdac_unlock(sc); 416 return; 417 } 418 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); 419 hdac_unlock(sc); 420 421 hdac_intr_handler(sc); 422 } 423 424 /**************************************************************************** 425 * int hdac_reset(hdac_softc *, bool) 426 * 427 * Reset the hdac to a quiescent and known state. 428 ****************************************************************************/ 429 static int 430 hdac_reset(struct hdac_softc *sc, bool wakeup) 431 { 432 uint32_t gctl; 433 int count, i; 434 435 /* 436 * Stop all Streams DMA engine 437 */ 438 for (i = 0; i < sc->num_iss; i++) 439 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 440 for (i = 0; i < sc->num_oss; i++) 441 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 442 for (i = 0; i < sc->num_bss; i++) 443 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 444 445 /* 446 * Stop Control DMA engines. 447 */ 448 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 449 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 450 451 /* 452 * Reset DMA position buffer. 453 */ 454 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 455 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 456 457 /* 458 * Reset the controller. The reset must remain asserted for 459 * a minimum of 100us. 460 */ 461 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 462 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 463 count = 10000; 464 do { 465 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 466 if (!(gctl & HDAC_GCTL_CRST)) 467 break; 468 DELAY(10); 469 } while (--count); 470 if (gctl & HDAC_GCTL_CRST) { 471 device_printf(sc->dev, "Unable to put hdac in reset\n"); 472 return (ENXIO); 473 } 474 475 /* If wakeup is not requested - leave the controller in reset state. */ 476 if (!wakeup) 477 return (0); 478 479 DELAY(100); 480 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 481 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 482 count = 10000; 483 do { 484 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 485 if (gctl & HDAC_GCTL_CRST) 486 break; 487 DELAY(10); 488 } while (--count); 489 if (!(gctl & HDAC_GCTL_CRST)) { 490 device_printf(sc->dev, "Device stuck in reset\n"); 491 return (ENXIO); 492 } 493 494 /* 495 * Wait for codecs to finish their own reset sequence. The delay here 496 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). 497 */ 498 DELAY(1000); 499 500 return (0); 501 } 502 503 /**************************************************************************** 504 * int hdac_get_capabilities(struct hdac_softc *); 505 * 506 * Retreive the general capabilities of the hdac; 507 * Number of Input Streams 508 * Number of Output Streams 509 * Number of bidirectional Streams 510 * 64bit ready 511 * CORB and RIRB sizes 512 ****************************************************************************/ 513 static int 514 hdac_get_capabilities(struct hdac_softc *sc) 515 { 516 uint16_t gcap; 517 uint8_t corbsize, rirbsize; 518 519 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 520 sc->num_iss = HDAC_GCAP_ISS(gcap); 521 sc->num_oss = HDAC_GCAP_OSS(gcap); 522 sc->num_bss = HDAC_GCAP_BSS(gcap); 523 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 524 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 525 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 526 if (sc->quirks_on & HDAC_QUIRK_64BIT) 527 sc->support_64bit = 1; 528 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 529 sc->support_64bit = 0; 530 531 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 532 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 533 HDAC_CORBSIZE_CORBSZCAP_256) 534 sc->corb_size = 256; 535 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 536 HDAC_CORBSIZE_CORBSZCAP_16) 537 sc->corb_size = 16; 538 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 539 HDAC_CORBSIZE_CORBSZCAP_2) 540 sc->corb_size = 2; 541 else { 542 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 543 __func__, corbsize); 544 return (ENXIO); 545 } 546 547 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 548 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 549 HDAC_RIRBSIZE_RIRBSZCAP_256) 550 sc->rirb_size = 256; 551 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 552 HDAC_RIRBSIZE_RIRBSZCAP_16) 553 sc->rirb_size = 16; 554 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 555 HDAC_RIRBSIZE_RIRBSZCAP_2) 556 sc->rirb_size = 2; 557 else { 558 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 559 __func__, rirbsize); 560 return (ENXIO); 561 } 562 563 HDA_BOOTVERBOSE( 564 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 565 "NSDO %d%s, CORB %d, RIRB %d\n", 566 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 567 sc->support_64bit ? ", 64bit" : "", 568 sc->corb_size, sc->rirb_size); 569 ); 570 571 return (0); 572 } 573 574 /**************************************************************************** 575 * void hdac_dma_cb 576 * 577 * This function is called by bus_dmamap_load when the mapping has been 578 * established. We just record the physical address of the mapping into 579 * the struct hdac_dma passed in. 580 ****************************************************************************/ 581 static void 582 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 583 { 584 struct hdac_dma *dma; 585 586 if (error == 0) { 587 dma = (struct hdac_dma *)callback_arg; 588 dma->dma_paddr = segs[0].ds_addr; 589 } 590 } 591 592 /**************************************************************************** 593 * int hdac_dma_alloc 594 * 595 * This function allocate and setup a dma region (struct hdac_dma). 596 * It must be freed by a corresponding hdac_dma_free. 597 ****************************************************************************/ 598 static int 599 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 600 { 601 bus_size_t roundsz; 602 int result; 603 604 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 605 bzero(dma, sizeof(*dma)); 606 607 /* 608 * Create a DMA tag 609 */ 610 result = bus_dma_tag_create( 611 bus_get_dma_tag(sc->dev), /* parent */ 612 HDA_DMA_ALIGNMENT, /* alignment */ 613 0, /* boundary */ 614 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 615 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 616 BUS_SPACE_MAXADDR, /* highaddr */ 617 NULL, /* filtfunc */ 618 NULL, /* fistfuncarg */ 619 roundsz, /* maxsize */ 620 1, /* nsegments */ 621 roundsz, /* maxsegsz */ 622 0, /* flags */ 623 NULL, /* lockfunc */ 624 NULL, /* lockfuncarg */ 625 &dma->dma_tag); /* dmat */ 626 if (result != 0) { 627 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 628 __func__, result); 629 goto hdac_dma_alloc_fail; 630 } 631 632 /* 633 * Allocate DMA memory 634 */ 635 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 636 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 637 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 638 BUS_DMA_COHERENT), 639 &dma->dma_map); 640 if (result != 0) { 641 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 642 __func__, result); 643 goto hdac_dma_alloc_fail; 644 } 645 646 dma->dma_size = roundsz; 647 648 /* 649 * Map the memory 650 */ 651 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 652 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 653 if (result != 0 || dma->dma_paddr == 0) { 654 if (result == 0) 655 result = ENOMEM; 656 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 657 __func__, result); 658 goto hdac_dma_alloc_fail; 659 } 660 661 HDA_BOOTHVERBOSE( 662 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 663 __func__, (uintmax_t)size, (uintmax_t)roundsz); 664 ); 665 666 return (0); 667 668 hdac_dma_alloc_fail: 669 hdac_dma_free(sc, dma); 670 671 return (result); 672 } 673 674 /**************************************************************************** 675 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 676 * 677 * Free a struct hdac_dma that has been previously allocated via the 678 * hdac_dma_alloc function. 679 ****************************************************************************/ 680 static void 681 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 682 { 683 if (dma->dma_paddr != 0) { 684 /* Flush caches */ 685 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 686 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 687 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 688 dma->dma_paddr = 0; 689 } 690 if (dma->dma_vaddr != NULL) { 691 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 692 dma->dma_vaddr = NULL; 693 } 694 if (dma->dma_tag != NULL) { 695 bus_dma_tag_destroy(dma->dma_tag); 696 dma->dma_tag = NULL; 697 } 698 dma->dma_size = 0; 699 } 700 701 /**************************************************************************** 702 * int hdac_mem_alloc(struct hdac_softc *) 703 * 704 * Allocate all the bus resources necessary to speak with the physical 705 * controller. 706 ****************************************************************************/ 707 static int 708 hdac_mem_alloc(struct hdac_softc *sc) 709 { 710 struct hdac_mem *mem; 711 712 mem = &sc->mem; 713 mem->mem_rid = PCIR_BAR(0); 714 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 715 &mem->mem_rid, RF_ACTIVE); 716 if (mem->mem_res == NULL) { 717 device_printf(sc->dev, 718 "%s: Unable to allocate memory resource\n", __func__); 719 return (ENOMEM); 720 } 721 mem->mem_tag = rman_get_bustag(mem->mem_res); 722 mem->mem_handle = rman_get_bushandle(mem->mem_res); 723 724 return (0); 725 } 726 727 /**************************************************************************** 728 * void hdac_mem_free(struct hdac_softc *) 729 * 730 * Free up resources previously allocated by hdac_mem_alloc. 731 ****************************************************************************/ 732 static void 733 hdac_mem_free(struct hdac_softc *sc) 734 { 735 struct hdac_mem *mem; 736 737 mem = &sc->mem; 738 if (mem->mem_res != NULL) 739 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 740 mem->mem_res); 741 mem->mem_res = NULL; 742 } 743 744 /**************************************************************************** 745 * int hdac_irq_alloc(struct hdac_softc *) 746 * 747 * Allocate and setup the resources necessary for interrupt handling. 748 ****************************************************************************/ 749 static int 750 hdac_irq_alloc(struct hdac_softc *sc) 751 { 752 struct hdac_irq *irq; 753 int result; 754 755 irq = &sc->irq; 756 irq->irq_rid = 0x0; 757 758 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 759 (result = pci_msi_count(sc->dev)) == 1 && 760 pci_alloc_msi(sc->dev, &result) == 0) 761 irq->irq_rid = 0x1; 762 763 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 764 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 765 if (irq->irq_res == NULL) { 766 device_printf(sc->dev, "%s: Unable to allocate irq\n", 767 __func__); 768 goto hdac_irq_alloc_fail; 769 } 770 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 771 NULL, hdac_intr_handler, sc, &irq->irq_handle); 772 if (result != 0) { 773 device_printf(sc->dev, 774 "%s: Unable to setup interrupt handler (%d)\n", 775 __func__, result); 776 goto hdac_irq_alloc_fail; 777 } 778 779 return (0); 780 781 hdac_irq_alloc_fail: 782 hdac_irq_free(sc); 783 784 return (ENXIO); 785 } 786 787 /**************************************************************************** 788 * void hdac_irq_free(struct hdac_softc *) 789 * 790 * Free up resources previously allocated by hdac_irq_alloc. 791 ****************************************************************************/ 792 static void 793 hdac_irq_free(struct hdac_softc *sc) 794 { 795 struct hdac_irq *irq; 796 797 irq = &sc->irq; 798 if (irq->irq_res != NULL && irq->irq_handle != NULL) 799 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 800 if (irq->irq_res != NULL) 801 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 802 irq->irq_res); 803 if (irq->irq_rid == 0x1) 804 pci_release_msi(sc->dev); 805 irq->irq_handle = NULL; 806 irq->irq_res = NULL; 807 irq->irq_rid = 0x0; 808 } 809 810 /**************************************************************************** 811 * void hdac_corb_init(struct hdac_softc *) 812 * 813 * Initialize the corb registers for operations but do not start it up yet. 814 * The CORB engine must not be running when this function is called. 815 ****************************************************************************/ 816 static void 817 hdac_corb_init(struct hdac_softc *sc) 818 { 819 uint8_t corbsize; 820 uint64_t corbpaddr; 821 822 /* Setup the CORB size. */ 823 switch (sc->corb_size) { 824 case 256: 825 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 826 break; 827 case 16: 828 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 829 break; 830 case 2: 831 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 832 break; 833 default: 834 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 835 } 836 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 837 838 /* Setup the CORB Address in the hdac */ 839 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 840 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 841 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 842 843 /* Set the WP and RP */ 844 sc->corb_wp = 0; 845 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 846 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 847 /* 848 * The HDA specification indicates that the CORBRPRST bit will always 849 * read as zero. Unfortunately, it seems that at least the 82801G 850 * doesn't reset the bit to zero, which stalls the corb engine. 851 * manually reset the bit to zero before continuing. 852 */ 853 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 854 855 /* Enable CORB error reporting */ 856 #if 0 857 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 858 #endif 859 } 860 861 /**************************************************************************** 862 * void hdac_rirb_init(struct hdac_softc *) 863 * 864 * Initialize the rirb registers for operations but do not start it up yet. 865 * The RIRB engine must not be running when this function is called. 866 ****************************************************************************/ 867 static void 868 hdac_rirb_init(struct hdac_softc *sc) 869 { 870 uint8_t rirbsize; 871 uint64_t rirbpaddr; 872 873 /* Setup the RIRB size. */ 874 switch (sc->rirb_size) { 875 case 256: 876 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 877 break; 878 case 16: 879 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 880 break; 881 case 2: 882 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 883 break; 884 default: 885 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 886 } 887 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 888 889 /* Setup the RIRB Address in the hdac */ 890 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 891 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 892 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 893 894 /* Setup the WP and RP */ 895 sc->rirb_rp = 0; 896 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 897 898 /* Setup the interrupt threshold */ 899 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 900 901 /* Enable Overrun and response received reporting */ 902 #if 0 903 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 904 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 905 #else 906 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 907 #endif 908 909 /* 910 * Make sure that the Host CPU cache doesn't contain any dirty 911 * cache lines that falls in the rirb. If I understood correctly, it 912 * should be sufficient to do this only once as the rirb is purely 913 * read-only from now on. 914 */ 915 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 916 BUS_DMASYNC_PREREAD); 917 } 918 919 /**************************************************************************** 920 * void hdac_corb_start(hdac_softc *) 921 * 922 * Startup the corb DMA engine 923 ****************************************************************************/ 924 static void 925 hdac_corb_start(struct hdac_softc *sc) 926 { 927 uint32_t corbctl; 928 929 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 930 corbctl |= HDAC_CORBCTL_CORBRUN; 931 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 932 } 933 934 /**************************************************************************** 935 * void hdac_rirb_start(hdac_softc *) 936 * 937 * Startup the rirb DMA engine 938 ****************************************************************************/ 939 static void 940 hdac_rirb_start(struct hdac_softc *sc) 941 { 942 uint32_t rirbctl; 943 944 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 945 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 946 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 947 } 948 949 static int 950 hdac_rirb_flush(struct hdac_softc *sc) 951 { 952 struct hdac_rirb *rirb_base, *rirb; 953 nid_t cad; 954 uint32_t resp, resp_ex; 955 uint8_t rirbwp; 956 int ret; 957 958 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 959 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 960 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 961 BUS_DMASYNC_POSTREAD); 962 963 ret = 0; 964 while (sc->rirb_rp != rirbwp) { 965 sc->rirb_rp++; 966 sc->rirb_rp %= sc->rirb_size; 967 rirb = &rirb_base[sc->rirb_rp]; 968 resp = le32toh(rirb->response); 969 resp_ex = le32toh(rirb->response_ex); 970 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 971 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 972 sc->unsolq[sc->unsolq_wp++] = resp; 973 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 974 sc->unsolq[sc->unsolq_wp++] = cad; 975 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 976 } else if (sc->codecs[cad].pending <= 0) { 977 device_printf(sc->dev, "Unexpected unsolicited " 978 "response from address %d: %08x\n", cad, resp); 979 } else { 980 sc->codecs[cad].response = resp; 981 sc->codecs[cad].pending--; 982 } 983 ret++; 984 } 985 986 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 987 BUS_DMASYNC_PREREAD); 988 return (ret); 989 } 990 991 static int 992 hdac_unsolq_flush(struct hdac_softc *sc) 993 { 994 device_t child; 995 nid_t cad; 996 uint32_t resp; 997 int ret = 0; 998 999 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 1000 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 1001 while (sc->unsolq_rp != sc->unsolq_wp) { 1002 resp = sc->unsolq[sc->unsolq_rp++]; 1003 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 1004 cad = sc->unsolq[sc->unsolq_rp++]; 1005 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 1006 if ((child = sc->codecs[cad].dev) != NULL && 1007 device_is_attached(child)) 1008 HDAC_UNSOL_INTR(child, resp); 1009 ret++; 1010 } 1011 sc->unsolq_st = HDAC_UNSOLQ_READY; 1012 } 1013 1014 return (ret); 1015 } 1016 1017 /**************************************************************************** 1018 * uint32_t hdac_send_command 1019 * 1020 * Wrapper function that sends only one command to a given codec 1021 ****************************************************************************/ 1022 static uint32_t 1023 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 1024 { 1025 int timeout; 1026 uint32_t *corb; 1027 1028 hdac_lockassert(sc); 1029 verb &= ~HDA_CMD_CAD_MASK; 1030 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 1031 sc->codecs[cad].response = HDA_INVALID; 1032 1033 sc->codecs[cad].pending++; 1034 sc->corb_wp++; 1035 sc->corb_wp %= sc->corb_size; 1036 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 1037 bus_dmamap_sync(sc->corb_dma.dma_tag, 1038 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 1039 corb[sc->corb_wp] = htole32(verb); 1040 bus_dmamap_sync(sc->corb_dma.dma_tag, 1041 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 1042 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 1043 1044 timeout = 10000; 1045 do { 1046 if (hdac_rirb_flush(sc) == 0) 1047 DELAY(10); 1048 } while (sc->codecs[cad].pending != 0 && --timeout); 1049 1050 if (sc->codecs[cad].pending != 0) { 1051 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", 1052 verb, cad); 1053 sc->codecs[cad].pending = 0; 1054 } 1055 1056 if (sc->unsolq_rp != sc->unsolq_wp) 1057 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1058 return (sc->codecs[cad].response); 1059 } 1060 1061 /**************************************************************************** 1062 * Device Methods 1063 ****************************************************************************/ 1064 1065 /**************************************************************************** 1066 * int hdac_probe(device_t) 1067 * 1068 * Probe for the presence of an hdac. If none is found, check for a generic 1069 * match using the subclass of the device. 1070 ****************************************************************************/ 1071 static int 1072 hdac_probe(device_t dev) 1073 { 1074 int i, result; 1075 uint32_t model; 1076 uint16_t class, subclass; 1077 char desc[64]; 1078 1079 model = (uint32_t)pci_get_device(dev) << 16; 1080 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1081 class = pci_get_class(dev); 1082 subclass = pci_get_subclass(dev); 1083 1084 bzero(desc, sizeof(desc)); 1085 result = ENXIO; 1086 for (i = 0; i < nitems(hdac_devices); i++) { 1087 if (hdac_devices[i].model == model) { 1088 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1089 result = BUS_PROBE_DEFAULT; 1090 break; 1091 } 1092 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1093 class == PCIC_MULTIMEDIA && 1094 subclass == PCIS_MULTIMEDIA_HDA) { 1095 snprintf(desc, sizeof(desc), "%s (0x%04x)", 1096 hdac_devices[i].desc, pci_get_device(dev)); 1097 result = BUS_PROBE_GENERIC; 1098 break; 1099 } 1100 } 1101 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1102 subclass == PCIS_MULTIMEDIA_HDA) { 1103 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1104 result = BUS_PROBE_GENERIC; 1105 } 1106 if (result != ENXIO) { 1107 strlcat(desc, " HDA Controller", sizeof(desc)); 1108 device_set_desc_copy(dev, desc); 1109 } 1110 1111 return (result); 1112 } 1113 1114 static void 1115 hdac_unsolq_task(void *context, int pending) 1116 { 1117 struct hdac_softc *sc; 1118 1119 sc = (struct hdac_softc *)context; 1120 1121 hdac_lock(sc); 1122 hdac_unsolq_flush(sc); 1123 hdac_unlock(sc); 1124 } 1125 1126 /**************************************************************************** 1127 * int hdac_attach(device_t) 1128 * 1129 * Attach the device into the kernel. Interrupts usually won't be enabled 1130 * when this function is called. Setup everything that doesn't require 1131 * interrupts and defer probing of codecs until interrupts are enabled. 1132 ****************************************************************************/ 1133 static int 1134 hdac_attach(device_t dev) 1135 { 1136 struct hdac_softc *sc; 1137 int result; 1138 int i, devid = -1; 1139 uint32_t model; 1140 uint16_t class, subclass; 1141 uint16_t vendor; 1142 uint8_t v; 1143 1144 sc = device_get_softc(dev); 1145 HDA_BOOTVERBOSE( 1146 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1147 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1148 device_printf(dev, "HDA Driver Revision: %s\n", 1149 HDA_DRV_TEST_REV); 1150 ); 1151 1152 model = (uint32_t)pci_get_device(dev) << 16; 1153 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1154 class = pci_get_class(dev); 1155 subclass = pci_get_subclass(dev); 1156 1157 for (i = 0; i < nitems(hdac_devices); i++) { 1158 if (hdac_devices[i].model == model) { 1159 devid = i; 1160 break; 1161 } 1162 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1163 class == PCIC_MULTIMEDIA && 1164 subclass == PCIS_MULTIMEDIA_HDA) { 1165 devid = i; 1166 break; 1167 } 1168 } 1169 1170 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); 1171 sc->dev = dev; 1172 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1173 callout_init(&sc->poll_callout, 1); 1174 for (i = 0; i < HDAC_CODEC_MAX; i++) 1175 sc->codecs[i].dev = NULL; 1176 if (devid >= 0) { 1177 sc->quirks_on = hdac_devices[devid].quirks_on; 1178 sc->quirks_off = hdac_devices[devid].quirks_off; 1179 } else { 1180 sc->quirks_on = 0; 1181 sc->quirks_off = 0; 1182 } 1183 if (resource_int_value(device_get_name(dev), 1184 device_get_unit(dev), "msi", &i) == 0) { 1185 if (i == 0) 1186 sc->quirks_off |= HDAC_QUIRK_MSI; 1187 else { 1188 sc->quirks_on |= HDAC_QUIRK_MSI; 1189 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1190 } 1191 } 1192 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1193 HDA_BOOTVERBOSE( 1194 device_printf(sc->dev, 1195 "Config options: on=0x%08x off=0x%08x\n", 1196 sc->quirks_on, sc->quirks_off); 1197 ); 1198 sc->poll_ival = hz; 1199 if (resource_int_value(device_get_name(dev), 1200 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1201 sc->polling = 1; 1202 else 1203 sc->polling = 0; 1204 1205 pci_enable_busmaster(dev); 1206 1207 vendor = pci_get_vendor(dev); 1208 if (vendor == INTEL_VENDORID) { 1209 /* TCSEL -> TC0 */ 1210 v = pci_read_config(dev, 0x44, 1); 1211 pci_write_config(dev, 0x44, v & 0xf8, 1); 1212 HDA_BOOTHVERBOSE( 1213 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1214 pci_read_config(dev, 0x44, 1)); 1215 ); 1216 } 1217 1218 #if defined(__i386__) || defined(__amd64__) 1219 sc->flags |= HDAC_F_DMA_NOCACHE; 1220 1221 if (resource_int_value(device_get_name(dev), 1222 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1223 #else 1224 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1225 #endif 1226 /* 1227 * Try to enable PCIe snoop to avoid messing around with 1228 * uncacheable DMA attribute. Since PCIe snoop register 1229 * config is pretty much vendor specific, there are no 1230 * general solutions on how to enable it, forcing us (even 1231 * Microsoft) to enable uncacheable or write combined DMA 1232 * by default. 1233 * 1234 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1235 */ 1236 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1237 if (hdac_pcie_snoop[i].vendor != vendor) 1238 continue; 1239 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1240 if (hdac_pcie_snoop[i].reg == 0x00) 1241 break; 1242 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1243 if ((v & hdac_pcie_snoop[i].enable) == 1244 hdac_pcie_snoop[i].enable) 1245 break; 1246 v &= hdac_pcie_snoop[i].mask; 1247 v |= hdac_pcie_snoop[i].enable; 1248 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1249 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1250 if ((v & hdac_pcie_snoop[i].enable) != 1251 hdac_pcie_snoop[i].enable) { 1252 HDA_BOOTVERBOSE( 1253 device_printf(dev, 1254 "WARNING: Failed to enable PCIe " 1255 "snoop!\n"); 1256 ); 1257 #if defined(__i386__) || defined(__amd64__) 1258 sc->flags |= HDAC_F_DMA_NOCACHE; 1259 #endif 1260 } 1261 break; 1262 } 1263 #if defined(__i386__) || defined(__amd64__) 1264 } 1265 #endif 1266 1267 HDA_BOOTHVERBOSE( 1268 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1269 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1270 "Uncacheable" : "PCIe snoop", vendor); 1271 ); 1272 1273 /* Allocate resources */ 1274 result = hdac_mem_alloc(sc); 1275 if (result != 0) 1276 goto hdac_attach_fail; 1277 result = hdac_irq_alloc(sc); 1278 if (result != 0) 1279 goto hdac_attach_fail; 1280 1281 /* Get Capabilities */ 1282 result = hdac_get_capabilities(sc); 1283 if (result != 0) 1284 goto hdac_attach_fail; 1285 1286 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1287 result = hdac_dma_alloc(sc, &sc->corb_dma, 1288 sc->corb_size * sizeof(uint32_t)); 1289 if (result != 0) 1290 goto hdac_attach_fail; 1291 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1292 sc->rirb_size * sizeof(struct hdac_rirb)); 1293 if (result != 0) 1294 goto hdac_attach_fail; 1295 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1296 M_HDAC, M_ZERO | M_WAITOK); 1297 for (i = 0; i < sc->num_ss; i++) { 1298 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1299 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1300 if (result != 0) 1301 goto hdac_attach_fail; 1302 } 1303 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1304 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1305 HDA_BOOTVERBOSE( 1306 device_printf(dev, "Failed to " 1307 "allocate DMA pos buffer " 1308 "(non-fatal)\n"); 1309 ); 1310 } else { 1311 uint64_t addr = sc->pos_dma.dma_paddr; 1312 1313 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1314 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1315 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1316 HDAC_DPLBASE_DPLBASE_DMAPBE); 1317 } 1318 } 1319 1320 result = bus_dma_tag_create( 1321 bus_get_dma_tag(sc->dev), /* parent */ 1322 HDA_DMA_ALIGNMENT, /* alignment */ 1323 0, /* boundary */ 1324 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1325 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1326 BUS_SPACE_MAXADDR, /* highaddr */ 1327 NULL, /* filtfunc */ 1328 NULL, /* fistfuncarg */ 1329 HDA_BUFSZ_MAX, /* maxsize */ 1330 1, /* nsegments */ 1331 HDA_BUFSZ_MAX, /* maxsegsz */ 1332 0, /* flags */ 1333 NULL, /* lockfunc */ 1334 NULL, /* lockfuncarg */ 1335 &sc->chan_dmat); /* dmat */ 1336 if (result != 0) { 1337 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1338 __func__, result); 1339 goto hdac_attach_fail; 1340 } 1341 1342 /* Quiesce everything */ 1343 HDA_BOOTHVERBOSE( 1344 device_printf(dev, "Reset controller...\n"); 1345 ); 1346 hdac_reset(sc, true); 1347 1348 /* Initialize the CORB and RIRB */ 1349 hdac_corb_init(sc); 1350 hdac_rirb_init(sc); 1351 1352 /* Defer remaining of initialization until interrupts are enabled */ 1353 sc->intrhook.ich_func = hdac_attach2; 1354 sc->intrhook.ich_arg = (void *)sc; 1355 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1356 sc->intrhook.ich_func = NULL; 1357 hdac_attach2((void *)sc); 1358 } 1359 1360 return (0); 1361 1362 hdac_attach_fail: 1363 hdac_irq_free(sc); 1364 if (sc->streams != NULL) 1365 for (i = 0; i < sc->num_ss; i++) 1366 hdac_dma_free(sc, &sc->streams[i].bdl); 1367 free(sc->streams, M_HDAC); 1368 hdac_dma_free(sc, &sc->rirb_dma); 1369 hdac_dma_free(sc, &sc->corb_dma); 1370 hdac_mem_free(sc); 1371 snd_mtxfree(sc->lock); 1372 1373 return (ENXIO); 1374 } 1375 1376 static int 1377 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1378 { 1379 struct hdac_softc *sc; 1380 device_t *devlist; 1381 device_t dev; 1382 int devcount, i, err, val; 1383 1384 dev = oidp->oid_arg1; 1385 sc = device_get_softc(dev); 1386 if (sc == NULL) 1387 return (EINVAL); 1388 val = 0; 1389 err = sysctl_handle_int(oidp, &val, 0, req); 1390 if (err != 0 || req->newptr == NULL || val == 0) 1391 return (err); 1392 1393 /* XXX: Temporary. For debugging. */ 1394 if (val == 100) { 1395 hdac_suspend(dev); 1396 return (0); 1397 } else if (val == 101) { 1398 hdac_resume(dev); 1399 return (0); 1400 } 1401 1402 bus_topo_lock(); 1403 1404 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) { 1405 bus_topo_unlock(); 1406 return (err); 1407 } 1408 1409 hdac_lock(sc); 1410 for (i = 0; i < devcount; i++) 1411 HDAC_PINDUMP(devlist[i]); 1412 hdac_unlock(sc); 1413 1414 bus_topo_unlock(); 1415 1416 free(devlist, M_TEMP); 1417 return (0); 1418 } 1419 1420 static int 1421 hdac_mdata_rate(uint16_t fmt) 1422 { 1423 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1424 int rate, bits; 1425 1426 if (fmt & (1 << 14)) 1427 rate = 44100; 1428 else 1429 rate = 48000; 1430 rate *= ((fmt >> 11) & 0x07) + 1; 1431 rate /= ((fmt >> 8) & 0x07) + 1; 1432 bits = mbits[(fmt >> 4) & 0x03]; 1433 bits *= (fmt & 0x0f) + 1; 1434 return (rate * bits); 1435 } 1436 1437 static int 1438 hdac_bdata_rate(uint16_t fmt, int output) 1439 { 1440 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1441 int rate, bits; 1442 1443 rate = 48000; 1444 rate *= ((fmt >> 11) & 0x07) + 1; 1445 bits = bbits[(fmt >> 4) & 0x03]; 1446 bits *= (fmt & 0x0f) + 1; 1447 if (!output) 1448 bits = ((bits + 7) & ~0x07) + 10; 1449 return (rate * bits); 1450 } 1451 1452 static void 1453 hdac_poll_reinit(struct hdac_softc *sc) 1454 { 1455 int i, pollticks, min = 1000000; 1456 struct hdac_stream *s; 1457 1458 if (sc->polling == 0) 1459 return; 1460 if (sc->unsol_registered > 0) 1461 min = hz / 2; 1462 for (i = 0; i < sc->num_ss; i++) { 1463 s = &sc->streams[i]; 1464 if (s->running == 0) 1465 continue; 1466 pollticks = ((uint64_t)hz * s->blksz) / 1467 (hdac_mdata_rate(s->format) / 8); 1468 pollticks >>= 1; 1469 if (pollticks > hz) 1470 pollticks = hz; 1471 if (pollticks < 1) 1472 pollticks = 1; 1473 if (min > pollticks) 1474 min = pollticks; 1475 } 1476 sc->poll_ival = min; 1477 if (min == 1000000) 1478 callout_stop(&sc->poll_callout); 1479 else 1480 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1481 } 1482 1483 static int 1484 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1485 { 1486 struct hdac_softc *sc; 1487 device_t dev; 1488 uint32_t ctl; 1489 int err, val; 1490 1491 dev = oidp->oid_arg1; 1492 sc = device_get_softc(dev); 1493 if (sc == NULL) 1494 return (EINVAL); 1495 hdac_lock(sc); 1496 val = sc->polling; 1497 hdac_unlock(sc); 1498 err = sysctl_handle_int(oidp, &val, 0, req); 1499 1500 if (err != 0 || req->newptr == NULL) 1501 return (err); 1502 if (val < 0 || val > 1) 1503 return (EINVAL); 1504 1505 hdac_lock(sc); 1506 if (val != sc->polling) { 1507 if (val == 0) { 1508 callout_stop(&sc->poll_callout); 1509 hdac_unlock(sc); 1510 callout_drain(&sc->poll_callout); 1511 hdac_lock(sc); 1512 sc->polling = 0; 1513 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1514 ctl |= HDAC_INTCTL_GIE; 1515 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1516 } else { 1517 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1518 ctl &= ~HDAC_INTCTL_GIE; 1519 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1520 sc->polling = 1; 1521 hdac_poll_reinit(sc); 1522 } 1523 } 1524 hdac_unlock(sc); 1525 1526 return (err); 1527 } 1528 1529 static void 1530 hdac_attach2(void *arg) 1531 { 1532 struct hdac_softc *sc; 1533 device_t child; 1534 uint32_t vendorid, revisionid; 1535 int i; 1536 uint16_t statests; 1537 1538 sc = (struct hdac_softc *)arg; 1539 1540 hdac_lock(sc); 1541 1542 /* Remove ourselves from the config hooks */ 1543 if (sc->intrhook.ich_func != NULL) { 1544 config_intrhook_disestablish(&sc->intrhook); 1545 sc->intrhook.ich_func = NULL; 1546 } 1547 1548 HDA_BOOTHVERBOSE( 1549 device_printf(sc->dev, "Starting CORB Engine...\n"); 1550 ); 1551 hdac_corb_start(sc); 1552 HDA_BOOTHVERBOSE( 1553 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1554 ); 1555 hdac_rirb_start(sc); 1556 1557 /* 1558 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1559 * (status change) interrupts. The documentation says that we 1560 * should not make any assumptions about the state of this register 1561 * and set it explicitly. 1562 * NB: this needs to be done before the interrupt is enabled as 1563 * the handler does not expect this interrupt source. 1564 */ 1565 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1566 1567 /* 1568 * Read and clear post-reset SDI wake status. 1569 * Each set bit corresponds to a codec that came out of reset. 1570 */ 1571 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1572 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests); 1573 1574 HDA_BOOTHVERBOSE( 1575 device_printf(sc->dev, 1576 "Enabling controller interrupt...\n"); 1577 ); 1578 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1579 HDAC_GCTL_UNSOL); 1580 if (sc->polling == 0) { 1581 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1582 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1583 } 1584 DELAY(1000); 1585 1586 HDA_BOOTHVERBOSE( 1587 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1588 ); 1589 hdac_unlock(sc); 1590 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1591 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1592 HDA_BOOTHVERBOSE( 1593 device_printf(sc->dev, 1594 "Found CODEC at address %d\n", i); 1595 ); 1596 hdac_lock(sc); 1597 vendorid = hdac_send_command(sc, i, 1598 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1599 revisionid = hdac_send_command(sc, i, 1600 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1601 hdac_unlock(sc); 1602 if (vendorid == HDA_INVALID && 1603 revisionid == HDA_INVALID) { 1604 device_printf(sc->dev, 1605 "CODEC at address %d not responding!\n", i); 1606 continue; 1607 } 1608 sc->codecs[i].vendor_id = 1609 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1610 sc->codecs[i].device_id = 1611 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1612 sc->codecs[i].revision_id = 1613 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1614 sc->codecs[i].stepping_id = 1615 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1616 child = device_add_child(sc->dev, "hdacc", -1); 1617 if (child == NULL) { 1618 device_printf(sc->dev, 1619 "Failed to add CODEC device\n"); 1620 continue; 1621 } 1622 device_set_ivars(child, (void *)(intptr_t)i); 1623 sc->codecs[i].dev = child; 1624 } 1625 } 1626 bus_generic_attach(sc->dev); 1627 1628 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1629 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1630 "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev, 1631 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); 1632 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1633 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1634 "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev, 1635 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); 1636 } 1637 1638 /**************************************************************************** 1639 * int hdac_suspend(device_t) 1640 * 1641 * Suspend and power down HDA bus and codecs. 1642 ****************************************************************************/ 1643 static int 1644 hdac_suspend(device_t dev) 1645 { 1646 struct hdac_softc *sc = device_get_softc(dev); 1647 1648 HDA_BOOTHVERBOSE( 1649 device_printf(dev, "Suspend...\n"); 1650 ); 1651 bus_generic_suspend(dev); 1652 1653 hdac_lock(sc); 1654 HDA_BOOTHVERBOSE( 1655 device_printf(dev, "Reset controller...\n"); 1656 ); 1657 callout_stop(&sc->poll_callout); 1658 hdac_reset(sc, false); 1659 hdac_unlock(sc); 1660 callout_drain(&sc->poll_callout); 1661 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1662 HDA_BOOTHVERBOSE( 1663 device_printf(dev, "Suspend done\n"); 1664 ); 1665 return (0); 1666 } 1667 1668 /**************************************************************************** 1669 * int hdac_resume(device_t) 1670 * 1671 * Powerup and restore HDA bus and codecs state. 1672 ****************************************************************************/ 1673 static int 1674 hdac_resume(device_t dev) 1675 { 1676 struct hdac_softc *sc = device_get_softc(dev); 1677 int error; 1678 1679 HDA_BOOTHVERBOSE( 1680 device_printf(dev, "Resume...\n"); 1681 ); 1682 hdac_lock(sc); 1683 1684 /* Quiesce everything */ 1685 HDA_BOOTHVERBOSE( 1686 device_printf(dev, "Reset controller...\n"); 1687 ); 1688 hdac_reset(sc, true); 1689 1690 /* Initialize the CORB and RIRB */ 1691 hdac_corb_init(sc); 1692 hdac_rirb_init(sc); 1693 1694 HDA_BOOTHVERBOSE( 1695 device_printf(dev, "Starting CORB Engine...\n"); 1696 ); 1697 hdac_corb_start(sc); 1698 HDA_BOOTHVERBOSE( 1699 device_printf(dev, "Starting RIRB Engine...\n"); 1700 ); 1701 hdac_rirb_start(sc); 1702 1703 /* 1704 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1705 * (status change) events. The documentation says that we should 1706 * not make any assumptions about the state of this register and 1707 * set it explicitly. 1708 * Also, clear HDAC_STATESTS. 1709 * NB: this needs to be done before the interrupt is enabled as 1710 * the handler does not expect this interrupt source. 1711 */ 1712 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1713 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK); 1714 1715 HDA_BOOTHVERBOSE( 1716 device_printf(dev, "Enabling controller interrupt...\n"); 1717 ); 1718 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1719 HDAC_GCTL_UNSOL); 1720 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1721 DELAY(1000); 1722 hdac_poll_reinit(sc); 1723 hdac_unlock(sc); 1724 1725 error = bus_generic_resume(dev); 1726 HDA_BOOTHVERBOSE( 1727 device_printf(dev, "Resume done\n"); 1728 ); 1729 return (error); 1730 } 1731 1732 /**************************************************************************** 1733 * int hdac_detach(device_t) 1734 * 1735 * Detach and free up resources utilized by the hdac device. 1736 ****************************************************************************/ 1737 static int 1738 hdac_detach(device_t dev) 1739 { 1740 struct hdac_softc *sc = device_get_softc(dev); 1741 device_t *devlist; 1742 int cad, i, devcount, error; 1743 1744 if ((error = device_get_children(dev, &devlist, &devcount)) != 0) 1745 return (error); 1746 for (i = 0; i < devcount; i++) { 1747 cad = (intptr_t)device_get_ivars(devlist[i]); 1748 if ((error = device_delete_child(dev, devlist[i])) != 0) { 1749 free(devlist, M_TEMP); 1750 return (error); 1751 } 1752 sc->codecs[cad].dev = NULL; 1753 } 1754 free(devlist, M_TEMP); 1755 1756 hdac_lock(sc); 1757 hdac_reset(sc, false); 1758 hdac_unlock(sc); 1759 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1760 hdac_irq_free(sc); 1761 1762 for (i = 0; i < sc->num_ss; i++) 1763 hdac_dma_free(sc, &sc->streams[i].bdl); 1764 free(sc->streams, M_HDAC); 1765 hdac_dma_free(sc, &sc->pos_dma); 1766 hdac_dma_free(sc, &sc->rirb_dma); 1767 hdac_dma_free(sc, &sc->corb_dma); 1768 if (sc->chan_dmat != NULL) { 1769 bus_dma_tag_destroy(sc->chan_dmat); 1770 sc->chan_dmat = NULL; 1771 } 1772 hdac_mem_free(sc); 1773 snd_mtxfree(sc->lock); 1774 return (0); 1775 } 1776 1777 static bus_dma_tag_t 1778 hdac_get_dma_tag(device_t dev, device_t child) 1779 { 1780 struct hdac_softc *sc = device_get_softc(dev); 1781 1782 return (sc->chan_dmat); 1783 } 1784 1785 static int 1786 hdac_print_child(device_t dev, device_t child) 1787 { 1788 int retval; 1789 1790 retval = bus_print_child_header(dev, child); 1791 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); 1792 retval += bus_print_child_footer(dev, child); 1793 1794 return (retval); 1795 } 1796 1797 static int 1798 hdac_child_location(device_t dev, device_t child, struct sbuf *sb) 1799 { 1800 1801 sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child)); 1802 return (0); 1803 } 1804 1805 static int 1806 hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) 1807 { 1808 struct hdac_softc *sc = device_get_softc(dev); 1809 nid_t cad = (uintptr_t)device_get_ivars(child); 1810 1811 sbuf_printf(sb, 1812 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", 1813 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1814 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1815 return (0); 1816 } 1817 1818 static int 1819 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1820 { 1821 struct hdac_softc *sc = device_get_softc(dev); 1822 nid_t cad = (uintptr_t)device_get_ivars(child); 1823 1824 switch (which) { 1825 case HDA_IVAR_CODEC_ID: 1826 *result = cad; 1827 break; 1828 case HDA_IVAR_VENDOR_ID: 1829 *result = sc->codecs[cad].vendor_id; 1830 break; 1831 case HDA_IVAR_DEVICE_ID: 1832 *result = sc->codecs[cad].device_id; 1833 break; 1834 case HDA_IVAR_REVISION_ID: 1835 *result = sc->codecs[cad].revision_id; 1836 break; 1837 case HDA_IVAR_STEPPING_ID: 1838 *result = sc->codecs[cad].stepping_id; 1839 break; 1840 case HDA_IVAR_SUBVENDOR_ID: 1841 *result = pci_get_subvendor(dev); 1842 break; 1843 case HDA_IVAR_SUBDEVICE_ID: 1844 *result = pci_get_subdevice(dev); 1845 break; 1846 case HDA_IVAR_DMA_NOCACHE: 1847 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1848 break; 1849 case HDA_IVAR_STRIPES_MASK: 1850 *result = (1 << (1 << sc->num_sdo)) - 1; 1851 break; 1852 default: 1853 return (ENOENT); 1854 } 1855 return (0); 1856 } 1857 1858 static struct mtx * 1859 hdac_get_mtx(device_t dev, device_t child) 1860 { 1861 struct hdac_softc *sc = device_get_softc(dev); 1862 1863 return (sc->lock); 1864 } 1865 1866 static uint32_t 1867 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1868 { 1869 1870 return (hdac_send_command(device_get_softc(dev), 1871 (intptr_t)device_get_ivars(child), verb)); 1872 } 1873 1874 static int 1875 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1876 { 1877 int i, ss; 1878 1879 ss = -1; 1880 /* Allocate ISS/OSS first. */ 1881 if (dir == 0) { 1882 for (i = 0; i < sc->num_iss; i++) { 1883 if (sc->streams[i].stream == stream) { 1884 ss = i; 1885 break; 1886 } 1887 } 1888 } else { 1889 for (i = 0; i < sc->num_oss; i++) { 1890 if (sc->streams[i + sc->num_iss].stream == stream) { 1891 ss = i + sc->num_iss; 1892 break; 1893 } 1894 } 1895 } 1896 /* Fallback to BSS. */ 1897 if (ss == -1) { 1898 for (i = 0; i < sc->num_bss; i++) { 1899 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1900 == stream) { 1901 ss = i + sc->num_iss + sc->num_oss; 1902 break; 1903 } 1904 } 1905 } 1906 return (ss); 1907 } 1908 1909 static int 1910 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1911 uint32_t **dmapos) 1912 { 1913 struct hdac_softc *sc = device_get_softc(dev); 1914 nid_t cad = (uintptr_t)device_get_ivars(child); 1915 int stream, ss, bw, maxbw, prevbw; 1916 1917 /* Look for empty stream. */ 1918 ss = hdac_find_stream(sc, dir, 0); 1919 1920 /* Return if found nothing. */ 1921 if (ss < 0) 1922 return (0); 1923 1924 /* Check bus bandwidth. */ 1925 bw = hdac_bdata_rate(format, dir); 1926 if (dir == 1) { 1927 bw *= 1 << (sc->num_sdo - stripe); 1928 prevbw = sc->sdo_bw_used; 1929 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1930 } else { 1931 prevbw = sc->codecs[cad].sdi_bw_used; 1932 maxbw = 48000 * 464; 1933 } 1934 HDA_BOOTHVERBOSE( 1935 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1936 (bw + prevbw) / 1000, maxbw / 1000, 1937 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1938 ); 1939 if (bw + prevbw > maxbw) 1940 return (0); 1941 if (dir == 1) 1942 sc->sdo_bw_used += bw; 1943 else 1944 sc->codecs[cad].sdi_bw_used += bw; 1945 1946 /* Allocate stream number */ 1947 if (ss >= sc->num_iss + sc->num_oss) 1948 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1949 else if (ss >= sc->num_iss) 1950 stream = ss - sc->num_iss + 1; 1951 else 1952 stream = ss + 1; 1953 1954 sc->streams[ss].dev = child; 1955 sc->streams[ss].dir = dir; 1956 sc->streams[ss].stream = stream; 1957 sc->streams[ss].bw = bw; 1958 sc->streams[ss].format = format; 1959 sc->streams[ss].stripe = stripe; 1960 if (dmapos != NULL) { 1961 if (sc->pos_dma.dma_vaddr != NULL) 1962 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1963 else 1964 *dmapos = NULL; 1965 } 1966 return (stream); 1967 } 1968 1969 static void 1970 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 1971 { 1972 struct hdac_softc *sc = device_get_softc(dev); 1973 nid_t cad = (uintptr_t)device_get_ivars(child); 1974 int ss; 1975 1976 ss = hdac_find_stream(sc, dir, stream); 1977 KASSERT(ss >= 0, 1978 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 1979 if (dir == 1) 1980 sc->sdo_bw_used -= sc->streams[ss].bw; 1981 else 1982 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 1983 sc->streams[ss].stream = 0; 1984 sc->streams[ss].dev = NULL; 1985 } 1986 1987 static int 1988 hdac_stream_start(device_t dev, device_t child, int dir, int stream, 1989 bus_addr_t buf, int blksz, int blkcnt) 1990 { 1991 struct hdac_softc *sc = device_get_softc(dev); 1992 struct hdac_bdle *bdle; 1993 uint64_t addr; 1994 int i, ss, off; 1995 uint32_t ctl; 1996 1997 ss = hdac_find_stream(sc, dir, stream); 1998 KASSERT(ss >= 0, 1999 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 2000 2001 addr = (uint64_t)buf; 2002 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 2003 for (i = 0; i < blkcnt; i++, bdle++) { 2004 bdle->addrl = htole32((uint32_t)addr); 2005 bdle->addrh = htole32((uint32_t)(addr >> 32)); 2006 bdle->len = htole32(blksz); 2007 bdle->ioc = htole32(1); 2008 addr += blksz; 2009 } 2010 2011 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2012 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 2013 2014 off = ss << 5; 2015 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 2016 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 2017 addr = sc->streams[ss].bdl.dma_paddr; 2018 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 2019 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 2020 2021 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 2022 if (dir) 2023 ctl |= HDAC_SDCTL2_DIR; 2024 else 2025 ctl &= ~HDAC_SDCTL2_DIR; 2026 ctl &= ~HDAC_SDCTL2_STRM_MASK; 2027 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 2028 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 2029 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 2030 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 2031 2032 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 2033 2034 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2035 ctl |= 1 << ss; 2036 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2037 2038 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 2039 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 2040 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2041 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2042 HDAC_SDCTL_RUN; 2043 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2044 2045 sc->streams[ss].blksz = blksz; 2046 sc->streams[ss].running = 1; 2047 hdac_poll_reinit(sc); 2048 return (0); 2049 } 2050 2051 static void 2052 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 2053 { 2054 struct hdac_softc *sc = device_get_softc(dev); 2055 int ss, off; 2056 uint32_t ctl; 2057 2058 ss = hdac_find_stream(sc, dir, stream); 2059 KASSERT(ss >= 0, 2060 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 2061 2062 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2063 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 2064 2065 off = ss << 5; 2066 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2067 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2068 HDAC_SDCTL_RUN); 2069 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2070 2071 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2072 ctl &= ~(1 << ss); 2073 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2074 2075 sc->streams[ss].running = 0; 2076 hdac_poll_reinit(sc); 2077 } 2078 2079 static void 2080 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2081 { 2082 struct hdac_softc *sc = device_get_softc(dev); 2083 int timeout = 1000; 2084 int to = timeout; 2085 int ss, off; 2086 uint32_t ctl; 2087 2088 ss = hdac_find_stream(sc, dir, stream); 2089 KASSERT(ss >= 0, 2090 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2091 2092 off = ss << 5; 2093 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2094 ctl |= HDAC_SDCTL_SRST; 2095 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2096 do { 2097 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2098 if (ctl & HDAC_SDCTL_SRST) 2099 break; 2100 DELAY(10); 2101 } while (--to); 2102 if (!(ctl & HDAC_SDCTL_SRST)) 2103 device_printf(dev, "Reset setting timeout\n"); 2104 ctl &= ~HDAC_SDCTL_SRST; 2105 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2106 to = timeout; 2107 do { 2108 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2109 if (!(ctl & HDAC_SDCTL_SRST)) 2110 break; 2111 DELAY(10); 2112 } while (--to); 2113 if (ctl & HDAC_SDCTL_SRST) 2114 device_printf(dev, "Reset timeout!\n"); 2115 } 2116 2117 static uint32_t 2118 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2119 { 2120 struct hdac_softc *sc = device_get_softc(dev); 2121 int ss, off; 2122 2123 ss = hdac_find_stream(sc, dir, stream); 2124 KASSERT(ss >= 0, 2125 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2126 2127 off = ss << 5; 2128 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2129 } 2130 2131 static int 2132 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2133 { 2134 struct hdac_softc *sc = device_get_softc(dev); 2135 2136 sc->unsol_registered++; 2137 hdac_poll_reinit(sc); 2138 return (tag); 2139 } 2140 2141 static void 2142 hdac_unsol_free(device_t dev, device_t child, int tag) 2143 { 2144 struct hdac_softc *sc = device_get_softc(dev); 2145 2146 sc->unsol_registered--; 2147 hdac_poll_reinit(sc); 2148 } 2149 2150 static device_method_t hdac_methods[] = { 2151 /* device interface */ 2152 DEVMETHOD(device_probe, hdac_probe), 2153 DEVMETHOD(device_attach, hdac_attach), 2154 DEVMETHOD(device_detach, hdac_detach), 2155 DEVMETHOD(device_suspend, hdac_suspend), 2156 DEVMETHOD(device_resume, hdac_resume), 2157 /* Bus interface */ 2158 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2159 DEVMETHOD(bus_print_child, hdac_print_child), 2160 DEVMETHOD(bus_child_location, hdac_child_location), 2161 DEVMETHOD(bus_child_pnpinfo, hdac_child_pnpinfo_method), 2162 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2163 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2164 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2165 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2166 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2167 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2168 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2169 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2170 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2171 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2172 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2173 DEVMETHOD_END 2174 }; 2175 2176 static driver_t hdac_driver = { 2177 "hdac", 2178 hdac_methods, 2179 sizeof(struct hdac_softc), 2180 }; 2181 2182 DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL); 2183