1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 #define hdac_lock(sc) snd_mtxlock((sc)->lock) 55 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) 56 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) 57 58 #define HDAC_QUIRK_64BIT (1 << 0) 59 #define HDAC_QUIRK_DMAPOS (1 << 1) 60 #define HDAC_QUIRK_MSI (1 << 2) 61 62 static const struct { 63 const char *key; 64 uint32_t value; 65 } hdac_quirks_tab[] = { 66 { "64bit", HDAC_QUIRK_64BIT }, 67 { "dmapos", HDAC_QUIRK_DMAPOS }, 68 { "msi", HDAC_QUIRK_MSI }, 69 }; 70 71 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 72 73 static const struct { 74 uint32_t model; 75 const char *desc; 76 char quirks_on; 77 char quirks_off; 78 } hdac_devices[] = { 79 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 80 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 81 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 82 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 83 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 84 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 85 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 87 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 88 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, 89 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 90 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 91 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 92 { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, 93 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 94 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 95 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 96 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 97 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 98 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 99 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 100 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 101 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 102 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 103 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 104 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 105 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 106 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, 107 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 108 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 109 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 110 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 111 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, 112 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 113 { HDA_INTEL_ALLK, "Intel Alder Lake", 0, 0 }, 114 { HDA_INTEL_ALLKM, "Intel Alder Lake-M", 0, 0 }, 115 { HDA_INTEL_ALLKN, "Intel Alder Lake-N", 0, 0 }, 116 { HDA_INTEL_ALLKP1, "Intel Alder Lake-P", 0, 0 }, 117 { HDA_INTEL_ALLKP2, "Intel Alder Lake-P", 0, 0 }, 118 { HDA_INTEL_ALLKPS, "Intel Alder Lake-PS", 0, 0 }, 119 { HDA_INTEL_RPTLK1, "Intel Raptor Lake-P", 0, 0 }, 120 { HDA_INTEL_RPTLK2, "Intel Raptor Lake-P", 0, 0 }, 121 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 122 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 123 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 124 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 125 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 126 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, 127 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 128 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 129 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 130 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 131 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, 132 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, 133 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, 134 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 135 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 136 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 137 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 138 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 139 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 140 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 141 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 142 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 143 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 144 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 145 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 146 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 147 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 148 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 149 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 150 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 151 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 152 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 153 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 154 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 155 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 156 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 157 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 158 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 159 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 160 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 161 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 162 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 163 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 164 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 165 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 166 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 167 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 168 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 169 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 170 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 171 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 172 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 173 { HDA_ATI_RS880, "ATI RS880", 0, 0 }, 174 { HDA_ATI_R600, "ATI R600", 0, 0 }, 175 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 176 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 177 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 178 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 179 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 180 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 181 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 182 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 183 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 184 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 185 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 186 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 187 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 188 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 189 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 190 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 191 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 192 { HDA_ATI_KABINI, "ATI Kabini", 0, 0 }, 193 { HDA_ATI_TRINITY, "ATI Trinity", 0, 0 }, 194 { HDA_AMD_X370, "AMD X370", 0, 0 }, 195 { HDA_AMD_X570, "AMD X570", 0, 0 }, 196 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, 197 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, 198 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 199 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 200 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 201 { HDA_VMWARE, "VMware", 0, 0 }, 202 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 203 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 204 { HDA_CREATIVE_SB1570, "Creative SB Audigy FX", 0, HDAC_QUIRK_64BIT }, 205 /* Unknown */ 206 { HDA_INTEL_ALL, "Intel", 0, 0 }, 207 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 208 { HDA_ATI_ALL, "ATI", 0, 0 }, 209 { HDA_AMD_ALL, "AMD", 0, 0 }, 210 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 211 { HDA_VIA_ALL, "VIA", 0, 0 }, 212 { HDA_VMWARE_ALL, "VMware", 0, 0 }, 213 { HDA_SIS_ALL, "SiS", 0, 0 }, 214 { HDA_ULI_ALL, "ULI", 0, 0 }, 215 }; 216 217 static const struct { 218 uint16_t vendor; 219 uint8_t reg; 220 uint8_t mask; 221 uint8_t enable; 222 } hdac_pcie_snoop[] = { 223 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 224 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 225 { AMD_VENDORID, 0x42, 0xf8, 0x02 }, 226 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 227 }; 228 229 /**************************************************************************** 230 * Function prototypes 231 ****************************************************************************/ 232 static void hdac_intr_handler(void *); 233 static int hdac_reset(struct hdac_softc *, bool); 234 static int hdac_get_capabilities(struct hdac_softc *); 235 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 236 static int hdac_dma_alloc(struct hdac_softc *, 237 struct hdac_dma *, bus_size_t); 238 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 239 static int hdac_mem_alloc(struct hdac_softc *); 240 static void hdac_mem_free(struct hdac_softc *); 241 static int hdac_irq_alloc(struct hdac_softc *); 242 static void hdac_irq_free(struct hdac_softc *); 243 static void hdac_corb_init(struct hdac_softc *); 244 static void hdac_rirb_init(struct hdac_softc *); 245 static void hdac_corb_start(struct hdac_softc *); 246 static void hdac_rirb_start(struct hdac_softc *); 247 248 static void hdac_attach2(void *); 249 250 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 251 252 static int hdac_probe(device_t); 253 static int hdac_attach(device_t); 254 static int hdac_detach(device_t); 255 static int hdac_suspend(device_t); 256 static int hdac_resume(device_t); 257 258 static int hdac_rirb_flush(struct hdac_softc *sc); 259 static int hdac_unsolq_flush(struct hdac_softc *sc); 260 261 /* This function surely going to make its way into upper level someday. */ 262 static void 263 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 264 { 265 const char *res = NULL; 266 int i = 0, j, k, len, inv; 267 268 if (resource_string_value(device_get_name(sc->dev), 269 device_get_unit(sc->dev), "config", &res) != 0) 270 return; 271 if (!(res != NULL && strlen(res) > 0)) 272 return; 273 HDA_BOOTVERBOSE( 274 device_printf(sc->dev, "Config options:"); 275 ); 276 for (;;) { 277 while (res[i] != '\0' && 278 (res[i] == ',' || isspace(res[i]) != 0)) 279 i++; 280 if (res[i] == '\0') { 281 HDA_BOOTVERBOSE( 282 printf("\n"); 283 ); 284 return; 285 } 286 j = i; 287 while (res[j] != '\0' && 288 !(res[j] == ',' || isspace(res[j]) != 0)) 289 j++; 290 len = j - i; 291 if (len > 2 && strncmp(res + i, "no", 2) == 0) 292 inv = 2; 293 else 294 inv = 0; 295 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 296 if (strncmp(res + i + inv, 297 hdac_quirks_tab[k].key, len - inv) != 0) 298 continue; 299 if (len - inv != strlen(hdac_quirks_tab[k].key)) 300 continue; 301 HDA_BOOTVERBOSE( 302 printf(" %s%s", (inv != 0) ? "no" : "", 303 hdac_quirks_tab[k].key); 304 ); 305 if (inv == 0) { 306 *on |= hdac_quirks_tab[k].value; 307 *off &= ~hdac_quirks_tab[k].value; 308 } else if (inv != 0) { 309 *off |= hdac_quirks_tab[k].value; 310 *on &= ~hdac_quirks_tab[k].value; 311 } 312 break; 313 } 314 i = j; 315 } 316 } 317 318 static void 319 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) 320 { 321 device_t dev; 322 uint8_t rirbsts; 323 int i; 324 325 /* Was this a controller interrupt? */ 326 if (intsts & HDAC_INTSTS_CIS) { 327 /* 328 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then 329 * we will need to check and clear HDAC_STATESTS. 330 * That event is used to report codec status changes such as 331 * a reset or a wake-up event. 332 */ 333 /* 334 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we 335 * will need to check and clear HDAC_CORBSTS_CMEI in 336 * HDAC_CORBSTS. 337 * That event is used to report CORB memory errors. 338 */ 339 /* 340 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we 341 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in 342 * HDAC_RIRBSTS. 343 * That event is used to report response FIFO overruns. 344 */ 345 346 /* Get as many responses that we can */ 347 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 348 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 349 HDAC_WRITE_1(&sc->mem, 350 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 351 hdac_rirb_flush(sc); 352 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 353 } 354 if (sc->unsolq_rp != sc->unsolq_wp) 355 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 356 } 357 358 if (intsts & HDAC_INTSTS_SIS_MASK) { 359 for (i = 0; i < sc->num_ss; i++) { 360 if ((intsts & (1 << i)) == 0) 361 continue; 362 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 363 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 364 if ((dev = sc->streams[i].dev) != NULL) { 365 HDAC_STREAM_INTR(dev, 366 sc->streams[i].dir, sc->streams[i].stream); 367 } 368 } 369 } 370 } 371 372 /**************************************************************************** 373 * void hdac_intr_handler(void *) 374 * 375 * Interrupt handler. Processes interrupts received from the hdac. 376 ****************************************************************************/ 377 static void 378 hdac_intr_handler(void *context) 379 { 380 struct hdac_softc *sc; 381 uint32_t intsts; 382 383 sc = (struct hdac_softc *)context; 384 385 /* 386 * Loop until HDAC_INTSTS_GIS gets clear. 387 * It is plausible that hardware interrupts a host only when GIS goes 388 * from zero to one. GIS is formed by OR-ing multiple hardware 389 * statuses, so it's possible that a previously cleared status gets set 390 * again while another status has not been cleared yet. Thus, there 391 * will be no new interrupt as GIS always stayed set. If we don't 392 * re-examine GIS then we can leave it set and never get an interrupt 393 * again. 394 */ 395 hdac_lock(sc); 396 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 397 while (intsts != 0xffffffff && (intsts & HDAC_INTSTS_GIS) != 0) { 398 hdac_one_intr(sc, intsts); 399 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 400 } 401 hdac_unlock(sc); 402 } 403 404 static void 405 hdac_poll_callback(void *arg) 406 { 407 struct hdac_softc *sc = arg; 408 409 if (sc == NULL) 410 return; 411 412 hdac_lock(sc); 413 if (sc->polling == 0) { 414 hdac_unlock(sc); 415 return; 416 } 417 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); 418 hdac_unlock(sc); 419 420 hdac_intr_handler(sc); 421 } 422 423 /**************************************************************************** 424 * int hdac_reset(hdac_softc *, bool) 425 * 426 * Reset the hdac to a quiescent and known state. 427 ****************************************************************************/ 428 static int 429 hdac_reset(struct hdac_softc *sc, bool wakeup) 430 { 431 uint32_t gctl; 432 int count, i; 433 434 /* 435 * Stop all Streams DMA engine 436 */ 437 for (i = 0; i < sc->num_iss; i++) 438 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 439 for (i = 0; i < sc->num_oss; i++) 440 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 441 for (i = 0; i < sc->num_bss; i++) 442 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 443 444 /* 445 * Stop Control DMA engines. 446 */ 447 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 448 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 449 450 /* 451 * Reset DMA position buffer. 452 */ 453 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 454 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 455 456 /* 457 * Reset the controller. The reset must remain asserted for 458 * a minimum of 100us. 459 */ 460 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 461 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 462 count = 10000; 463 do { 464 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 465 if (!(gctl & HDAC_GCTL_CRST)) 466 break; 467 DELAY(10); 468 } while (--count); 469 if (gctl & HDAC_GCTL_CRST) { 470 device_printf(sc->dev, "Unable to put hdac in reset\n"); 471 return (ENXIO); 472 } 473 474 /* If wakeup is not requested - leave the controller in reset state. */ 475 if (!wakeup) 476 return (0); 477 478 DELAY(100); 479 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 480 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 481 count = 10000; 482 do { 483 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 484 if (gctl & HDAC_GCTL_CRST) 485 break; 486 DELAY(10); 487 } while (--count); 488 if (!(gctl & HDAC_GCTL_CRST)) { 489 device_printf(sc->dev, "Device stuck in reset\n"); 490 return (ENXIO); 491 } 492 493 /* 494 * Wait for codecs to finish their own reset sequence. The delay here 495 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). 496 */ 497 DELAY(1000); 498 499 return (0); 500 } 501 502 /**************************************************************************** 503 * int hdac_get_capabilities(struct hdac_softc *); 504 * 505 * Retreive the general capabilities of the hdac; 506 * Number of Input Streams 507 * Number of Output Streams 508 * Number of bidirectional Streams 509 * 64bit ready 510 * CORB and RIRB sizes 511 ****************************************************************************/ 512 static int 513 hdac_get_capabilities(struct hdac_softc *sc) 514 { 515 uint16_t gcap; 516 uint8_t corbsize, rirbsize; 517 518 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 519 sc->num_iss = HDAC_GCAP_ISS(gcap); 520 sc->num_oss = HDAC_GCAP_OSS(gcap); 521 sc->num_bss = HDAC_GCAP_BSS(gcap); 522 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 523 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 524 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 525 if (sc->quirks_on & HDAC_QUIRK_64BIT) 526 sc->support_64bit = 1; 527 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 528 sc->support_64bit = 0; 529 530 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 531 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 532 HDAC_CORBSIZE_CORBSZCAP_256) 533 sc->corb_size = 256; 534 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 535 HDAC_CORBSIZE_CORBSZCAP_16) 536 sc->corb_size = 16; 537 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 538 HDAC_CORBSIZE_CORBSZCAP_2) 539 sc->corb_size = 2; 540 else { 541 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 542 __func__, corbsize); 543 return (ENXIO); 544 } 545 546 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 547 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 548 HDAC_RIRBSIZE_RIRBSZCAP_256) 549 sc->rirb_size = 256; 550 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 551 HDAC_RIRBSIZE_RIRBSZCAP_16) 552 sc->rirb_size = 16; 553 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 554 HDAC_RIRBSIZE_RIRBSZCAP_2) 555 sc->rirb_size = 2; 556 else { 557 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 558 __func__, rirbsize); 559 return (ENXIO); 560 } 561 562 HDA_BOOTVERBOSE( 563 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 564 "NSDO %d%s, CORB %d, RIRB %d\n", 565 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 566 sc->support_64bit ? ", 64bit" : "", 567 sc->corb_size, sc->rirb_size); 568 ); 569 570 return (0); 571 } 572 573 /**************************************************************************** 574 * void hdac_dma_cb 575 * 576 * This function is called by bus_dmamap_load when the mapping has been 577 * established. We just record the physical address of the mapping into 578 * the struct hdac_dma passed in. 579 ****************************************************************************/ 580 static void 581 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 582 { 583 struct hdac_dma *dma; 584 585 if (error == 0) { 586 dma = (struct hdac_dma *)callback_arg; 587 dma->dma_paddr = segs[0].ds_addr; 588 } 589 } 590 591 /**************************************************************************** 592 * int hdac_dma_alloc 593 * 594 * This function allocate and setup a dma region (struct hdac_dma). 595 * It must be freed by a corresponding hdac_dma_free. 596 ****************************************************************************/ 597 static int 598 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 599 { 600 bus_size_t roundsz; 601 int result; 602 603 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 604 bzero(dma, sizeof(*dma)); 605 606 /* 607 * Create a DMA tag 608 */ 609 result = bus_dma_tag_create( 610 bus_get_dma_tag(sc->dev), /* parent */ 611 HDA_DMA_ALIGNMENT, /* alignment */ 612 0, /* boundary */ 613 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 614 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 615 BUS_SPACE_MAXADDR, /* highaddr */ 616 NULL, /* filtfunc */ 617 NULL, /* fistfuncarg */ 618 roundsz, /* maxsize */ 619 1, /* nsegments */ 620 roundsz, /* maxsegsz */ 621 0, /* flags */ 622 NULL, /* lockfunc */ 623 NULL, /* lockfuncarg */ 624 &dma->dma_tag); /* dmat */ 625 if (result != 0) { 626 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 627 __func__, result); 628 goto hdac_dma_alloc_fail; 629 } 630 631 /* 632 * Allocate DMA memory 633 */ 634 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 635 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 636 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 637 BUS_DMA_COHERENT), 638 &dma->dma_map); 639 if (result != 0) { 640 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 641 __func__, result); 642 goto hdac_dma_alloc_fail; 643 } 644 645 dma->dma_size = roundsz; 646 647 /* 648 * Map the memory 649 */ 650 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 651 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 652 if (result != 0 || dma->dma_paddr == 0) { 653 if (result == 0) 654 result = ENOMEM; 655 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 656 __func__, result); 657 goto hdac_dma_alloc_fail; 658 } 659 660 HDA_BOOTHVERBOSE( 661 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 662 __func__, (uintmax_t)size, (uintmax_t)roundsz); 663 ); 664 665 return (0); 666 667 hdac_dma_alloc_fail: 668 hdac_dma_free(sc, dma); 669 670 return (result); 671 } 672 673 /**************************************************************************** 674 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 675 * 676 * Free a struct hdac_dma that has been previously allocated via the 677 * hdac_dma_alloc function. 678 ****************************************************************************/ 679 static void 680 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 681 { 682 if (dma->dma_paddr != 0) { 683 /* Flush caches */ 684 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 685 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 686 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 687 dma->dma_paddr = 0; 688 } 689 if (dma->dma_vaddr != NULL) { 690 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 691 dma->dma_vaddr = NULL; 692 } 693 if (dma->dma_tag != NULL) { 694 bus_dma_tag_destroy(dma->dma_tag); 695 dma->dma_tag = NULL; 696 } 697 dma->dma_size = 0; 698 } 699 700 /**************************************************************************** 701 * int hdac_mem_alloc(struct hdac_softc *) 702 * 703 * Allocate all the bus resources necessary to speak with the physical 704 * controller. 705 ****************************************************************************/ 706 static int 707 hdac_mem_alloc(struct hdac_softc *sc) 708 { 709 struct hdac_mem *mem; 710 711 mem = &sc->mem; 712 mem->mem_rid = PCIR_BAR(0); 713 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 714 &mem->mem_rid, RF_ACTIVE); 715 if (mem->mem_res == NULL) { 716 device_printf(sc->dev, 717 "%s: Unable to allocate memory resource\n", __func__); 718 return (ENOMEM); 719 } 720 mem->mem_tag = rman_get_bustag(mem->mem_res); 721 mem->mem_handle = rman_get_bushandle(mem->mem_res); 722 723 return (0); 724 } 725 726 /**************************************************************************** 727 * void hdac_mem_free(struct hdac_softc *) 728 * 729 * Free up resources previously allocated by hdac_mem_alloc. 730 ****************************************************************************/ 731 static void 732 hdac_mem_free(struct hdac_softc *sc) 733 { 734 struct hdac_mem *mem; 735 736 mem = &sc->mem; 737 if (mem->mem_res != NULL) 738 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 739 mem->mem_res); 740 mem->mem_res = NULL; 741 } 742 743 /**************************************************************************** 744 * int hdac_irq_alloc(struct hdac_softc *) 745 * 746 * Allocate and setup the resources necessary for interrupt handling. 747 ****************************************************************************/ 748 static int 749 hdac_irq_alloc(struct hdac_softc *sc) 750 { 751 struct hdac_irq *irq; 752 int result; 753 754 irq = &sc->irq; 755 irq->irq_rid = 0x0; 756 757 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 758 (result = pci_msi_count(sc->dev)) == 1 && 759 pci_alloc_msi(sc->dev, &result) == 0) 760 irq->irq_rid = 0x1; 761 762 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 763 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 764 if (irq->irq_res == NULL) { 765 device_printf(sc->dev, "%s: Unable to allocate irq\n", 766 __func__); 767 goto hdac_irq_alloc_fail; 768 } 769 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 770 NULL, hdac_intr_handler, sc, &irq->irq_handle); 771 if (result != 0) { 772 device_printf(sc->dev, 773 "%s: Unable to setup interrupt handler (%d)\n", 774 __func__, result); 775 goto hdac_irq_alloc_fail; 776 } 777 778 return (0); 779 780 hdac_irq_alloc_fail: 781 hdac_irq_free(sc); 782 783 return (ENXIO); 784 } 785 786 /**************************************************************************** 787 * void hdac_irq_free(struct hdac_softc *) 788 * 789 * Free up resources previously allocated by hdac_irq_alloc. 790 ****************************************************************************/ 791 static void 792 hdac_irq_free(struct hdac_softc *sc) 793 { 794 struct hdac_irq *irq; 795 796 irq = &sc->irq; 797 if (irq->irq_res != NULL && irq->irq_handle != NULL) 798 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 799 if (irq->irq_res != NULL) 800 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 801 irq->irq_res); 802 if (irq->irq_rid == 0x1) 803 pci_release_msi(sc->dev); 804 irq->irq_handle = NULL; 805 irq->irq_res = NULL; 806 irq->irq_rid = 0x0; 807 } 808 809 /**************************************************************************** 810 * void hdac_corb_init(struct hdac_softc *) 811 * 812 * Initialize the corb registers for operations but do not start it up yet. 813 * The CORB engine must not be running when this function is called. 814 ****************************************************************************/ 815 static void 816 hdac_corb_init(struct hdac_softc *sc) 817 { 818 uint8_t corbsize; 819 uint64_t corbpaddr; 820 821 /* Setup the CORB size. */ 822 switch (sc->corb_size) { 823 case 256: 824 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 825 break; 826 case 16: 827 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 828 break; 829 case 2: 830 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 831 break; 832 default: 833 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 834 } 835 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 836 837 /* Setup the CORB Address in the hdac */ 838 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 839 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 840 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 841 842 /* Set the WP and RP */ 843 sc->corb_wp = 0; 844 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 845 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 846 /* 847 * The HDA specification indicates that the CORBRPRST bit will always 848 * read as zero. Unfortunately, it seems that at least the 82801G 849 * doesn't reset the bit to zero, which stalls the corb engine. 850 * manually reset the bit to zero before continuing. 851 */ 852 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 853 854 /* Enable CORB error reporting */ 855 #if 0 856 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 857 #endif 858 } 859 860 /**************************************************************************** 861 * void hdac_rirb_init(struct hdac_softc *) 862 * 863 * Initialize the rirb registers for operations but do not start it up yet. 864 * The RIRB engine must not be running when this function is called. 865 ****************************************************************************/ 866 static void 867 hdac_rirb_init(struct hdac_softc *sc) 868 { 869 uint8_t rirbsize; 870 uint64_t rirbpaddr; 871 872 /* Setup the RIRB size. */ 873 switch (sc->rirb_size) { 874 case 256: 875 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 876 break; 877 case 16: 878 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 879 break; 880 case 2: 881 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 882 break; 883 default: 884 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 885 } 886 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 887 888 /* Setup the RIRB Address in the hdac */ 889 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 890 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 891 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 892 893 /* Setup the WP and RP */ 894 sc->rirb_rp = 0; 895 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 896 897 /* Setup the interrupt threshold */ 898 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 899 900 /* Enable Overrun and response received reporting */ 901 #if 0 902 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 903 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 904 #else 905 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 906 #endif 907 908 /* 909 * Make sure that the Host CPU cache doesn't contain any dirty 910 * cache lines that falls in the rirb. If I understood correctly, it 911 * should be sufficient to do this only once as the rirb is purely 912 * read-only from now on. 913 */ 914 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 915 BUS_DMASYNC_PREREAD); 916 } 917 918 /**************************************************************************** 919 * void hdac_corb_start(hdac_softc *) 920 * 921 * Startup the corb DMA engine 922 ****************************************************************************/ 923 static void 924 hdac_corb_start(struct hdac_softc *sc) 925 { 926 uint32_t corbctl; 927 928 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 929 corbctl |= HDAC_CORBCTL_CORBRUN; 930 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 931 } 932 933 /**************************************************************************** 934 * void hdac_rirb_start(hdac_softc *) 935 * 936 * Startup the rirb DMA engine 937 ****************************************************************************/ 938 static void 939 hdac_rirb_start(struct hdac_softc *sc) 940 { 941 uint32_t rirbctl; 942 943 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 944 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 945 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 946 } 947 948 static int 949 hdac_rirb_flush(struct hdac_softc *sc) 950 { 951 struct hdac_rirb *rirb_base, *rirb; 952 nid_t cad; 953 uint32_t resp, resp_ex; 954 uint8_t rirbwp; 955 int ret; 956 957 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 958 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 959 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 960 BUS_DMASYNC_POSTREAD); 961 962 ret = 0; 963 while (sc->rirb_rp != rirbwp) { 964 sc->rirb_rp++; 965 sc->rirb_rp %= sc->rirb_size; 966 rirb = &rirb_base[sc->rirb_rp]; 967 resp = le32toh(rirb->response); 968 resp_ex = le32toh(rirb->response_ex); 969 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 970 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 971 sc->unsolq[sc->unsolq_wp++] = resp; 972 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 973 sc->unsolq[sc->unsolq_wp++] = cad; 974 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 975 } else if (sc->codecs[cad].pending <= 0) { 976 device_printf(sc->dev, "Unexpected unsolicited " 977 "response from address %d: %08x\n", cad, resp); 978 } else { 979 sc->codecs[cad].response = resp; 980 sc->codecs[cad].pending--; 981 } 982 ret++; 983 } 984 985 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 986 BUS_DMASYNC_PREREAD); 987 return (ret); 988 } 989 990 static int 991 hdac_unsolq_flush(struct hdac_softc *sc) 992 { 993 device_t child; 994 nid_t cad; 995 uint32_t resp; 996 int ret = 0; 997 998 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 999 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 1000 while (sc->unsolq_rp != sc->unsolq_wp) { 1001 resp = sc->unsolq[sc->unsolq_rp++]; 1002 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 1003 cad = sc->unsolq[sc->unsolq_rp++]; 1004 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 1005 if ((child = sc->codecs[cad].dev) != NULL && 1006 device_is_attached(child)) 1007 HDAC_UNSOL_INTR(child, resp); 1008 ret++; 1009 } 1010 sc->unsolq_st = HDAC_UNSOLQ_READY; 1011 } 1012 1013 return (ret); 1014 } 1015 1016 /**************************************************************************** 1017 * uint32_t hdac_send_command 1018 * 1019 * Wrapper function that sends only one command to a given codec 1020 ****************************************************************************/ 1021 static uint32_t 1022 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 1023 { 1024 int timeout; 1025 uint32_t *corb; 1026 1027 hdac_lockassert(sc); 1028 verb &= ~HDA_CMD_CAD_MASK; 1029 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 1030 sc->codecs[cad].response = HDA_INVALID; 1031 1032 sc->codecs[cad].pending++; 1033 sc->corb_wp++; 1034 sc->corb_wp %= sc->corb_size; 1035 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 1036 bus_dmamap_sync(sc->corb_dma.dma_tag, 1037 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 1038 corb[sc->corb_wp] = htole32(verb); 1039 bus_dmamap_sync(sc->corb_dma.dma_tag, 1040 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 1041 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 1042 1043 timeout = 10000; 1044 do { 1045 if (hdac_rirb_flush(sc) == 0) 1046 DELAY(10); 1047 } while (sc->codecs[cad].pending != 0 && --timeout); 1048 1049 if (sc->codecs[cad].pending != 0) { 1050 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", 1051 verb, cad); 1052 sc->codecs[cad].pending = 0; 1053 } 1054 1055 if (sc->unsolq_rp != sc->unsolq_wp) 1056 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1057 return (sc->codecs[cad].response); 1058 } 1059 1060 /**************************************************************************** 1061 * Device Methods 1062 ****************************************************************************/ 1063 1064 /**************************************************************************** 1065 * int hdac_probe(device_t) 1066 * 1067 * Probe for the presence of an hdac. If none is found, check for a generic 1068 * match using the subclass of the device. 1069 ****************************************************************************/ 1070 static int 1071 hdac_probe(device_t dev) 1072 { 1073 int i, result; 1074 uint32_t model; 1075 uint16_t class, subclass; 1076 char desc[64]; 1077 1078 model = (uint32_t)pci_get_device(dev) << 16; 1079 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1080 class = pci_get_class(dev); 1081 subclass = pci_get_subclass(dev); 1082 1083 bzero(desc, sizeof(desc)); 1084 result = ENXIO; 1085 for (i = 0; i < nitems(hdac_devices); i++) { 1086 if (hdac_devices[i].model == model) { 1087 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1088 result = BUS_PROBE_DEFAULT; 1089 break; 1090 } 1091 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1092 class == PCIC_MULTIMEDIA && 1093 subclass == PCIS_MULTIMEDIA_HDA) { 1094 snprintf(desc, sizeof(desc), "%s (0x%04x)", 1095 hdac_devices[i].desc, pci_get_device(dev)); 1096 result = BUS_PROBE_GENERIC; 1097 break; 1098 } 1099 } 1100 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1101 subclass == PCIS_MULTIMEDIA_HDA) { 1102 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1103 result = BUS_PROBE_GENERIC; 1104 } 1105 if (result != ENXIO) 1106 device_set_descf(dev, "%s HDA Controller", desc); 1107 1108 return (result); 1109 } 1110 1111 static void 1112 hdac_unsolq_task(void *context, int pending) 1113 { 1114 struct hdac_softc *sc; 1115 1116 sc = (struct hdac_softc *)context; 1117 1118 hdac_lock(sc); 1119 hdac_unsolq_flush(sc); 1120 hdac_unlock(sc); 1121 } 1122 1123 /**************************************************************************** 1124 * int hdac_attach(device_t) 1125 * 1126 * Attach the device into the kernel. Interrupts usually won't be enabled 1127 * when this function is called. Setup everything that doesn't require 1128 * interrupts and defer probing of codecs until interrupts are enabled. 1129 ****************************************************************************/ 1130 static int 1131 hdac_attach(device_t dev) 1132 { 1133 struct hdac_softc *sc; 1134 int result; 1135 int i, devid = -1; 1136 uint32_t model; 1137 uint16_t class, subclass; 1138 uint16_t vendor; 1139 uint8_t v; 1140 1141 sc = device_get_softc(dev); 1142 HDA_BOOTVERBOSE( 1143 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1144 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1145 device_printf(dev, "HDA Driver Revision: %s\n", 1146 HDA_DRV_TEST_REV); 1147 ); 1148 1149 model = (uint32_t)pci_get_device(dev) << 16; 1150 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1151 class = pci_get_class(dev); 1152 subclass = pci_get_subclass(dev); 1153 1154 for (i = 0; i < nitems(hdac_devices); i++) { 1155 if (hdac_devices[i].model == model) { 1156 devid = i; 1157 break; 1158 } 1159 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1160 class == PCIC_MULTIMEDIA && 1161 subclass == PCIS_MULTIMEDIA_HDA) { 1162 devid = i; 1163 break; 1164 } 1165 } 1166 1167 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); 1168 sc->dev = dev; 1169 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1170 callout_init(&sc->poll_callout, 1); 1171 for (i = 0; i < HDAC_CODEC_MAX; i++) 1172 sc->codecs[i].dev = NULL; 1173 if (devid >= 0) { 1174 sc->quirks_on = hdac_devices[devid].quirks_on; 1175 sc->quirks_off = hdac_devices[devid].quirks_off; 1176 } else { 1177 sc->quirks_on = 0; 1178 sc->quirks_off = 0; 1179 } 1180 if (resource_int_value(device_get_name(dev), 1181 device_get_unit(dev), "msi", &i) == 0) { 1182 if (i == 0) 1183 sc->quirks_off |= HDAC_QUIRK_MSI; 1184 else { 1185 sc->quirks_on |= HDAC_QUIRK_MSI; 1186 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1187 } 1188 } 1189 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1190 HDA_BOOTVERBOSE( 1191 device_printf(sc->dev, 1192 "Config options: on=0x%08x off=0x%08x\n", 1193 sc->quirks_on, sc->quirks_off); 1194 ); 1195 sc->poll_ival = hz; 1196 if (resource_int_value(device_get_name(dev), 1197 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1198 sc->polling = 1; 1199 else 1200 sc->polling = 0; 1201 1202 pci_enable_busmaster(dev); 1203 1204 vendor = pci_get_vendor(dev); 1205 if (vendor == INTEL_VENDORID) { 1206 /* TCSEL -> TC0 */ 1207 v = pci_read_config(dev, 0x44, 1); 1208 pci_write_config(dev, 0x44, v & 0xf8, 1); 1209 HDA_BOOTHVERBOSE( 1210 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1211 pci_read_config(dev, 0x44, 1)); 1212 ); 1213 } 1214 1215 #if defined(__i386__) || defined(__amd64__) 1216 sc->flags |= HDAC_F_DMA_NOCACHE; 1217 1218 if (resource_int_value(device_get_name(dev), 1219 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1220 #else 1221 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1222 #endif 1223 /* 1224 * Try to enable PCIe snoop to avoid messing around with 1225 * uncacheable DMA attribute. Since PCIe snoop register 1226 * config is pretty much vendor specific, there are no 1227 * general solutions on how to enable it, forcing us (even 1228 * Microsoft) to enable uncacheable or write combined DMA 1229 * by default. 1230 * 1231 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1232 */ 1233 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1234 if (hdac_pcie_snoop[i].vendor != vendor) 1235 continue; 1236 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1237 if (hdac_pcie_snoop[i].reg == 0x00) 1238 break; 1239 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1240 if ((v & hdac_pcie_snoop[i].enable) == 1241 hdac_pcie_snoop[i].enable) 1242 break; 1243 v &= hdac_pcie_snoop[i].mask; 1244 v |= hdac_pcie_snoop[i].enable; 1245 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1246 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1247 if ((v & hdac_pcie_snoop[i].enable) != 1248 hdac_pcie_snoop[i].enable) { 1249 HDA_BOOTVERBOSE( 1250 device_printf(dev, 1251 "WARNING: Failed to enable PCIe " 1252 "snoop!\n"); 1253 ); 1254 #if defined(__i386__) || defined(__amd64__) 1255 sc->flags |= HDAC_F_DMA_NOCACHE; 1256 #endif 1257 } 1258 break; 1259 } 1260 #if defined(__i386__) || defined(__amd64__) 1261 } 1262 #endif 1263 1264 HDA_BOOTHVERBOSE( 1265 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1266 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1267 "Uncacheable" : "PCIe snoop", vendor); 1268 ); 1269 1270 /* Allocate resources */ 1271 result = hdac_mem_alloc(sc); 1272 if (result != 0) 1273 goto hdac_attach_fail; 1274 1275 /* Get Capabilities */ 1276 result = hdac_get_capabilities(sc); 1277 if (result != 0) 1278 goto hdac_attach_fail; 1279 1280 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1281 result = hdac_dma_alloc(sc, &sc->corb_dma, 1282 sc->corb_size * sizeof(uint32_t)); 1283 if (result != 0) 1284 goto hdac_attach_fail; 1285 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1286 sc->rirb_size * sizeof(struct hdac_rirb)); 1287 if (result != 0) 1288 goto hdac_attach_fail; 1289 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1290 M_HDAC, M_ZERO | M_WAITOK); 1291 for (i = 0; i < sc->num_ss; i++) { 1292 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1293 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1294 if (result != 0) 1295 goto hdac_attach_fail; 1296 } 1297 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1298 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1299 HDA_BOOTVERBOSE( 1300 device_printf(dev, "Failed to " 1301 "allocate DMA pos buffer " 1302 "(non-fatal)\n"); 1303 ); 1304 } else { 1305 uint64_t addr = sc->pos_dma.dma_paddr; 1306 1307 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1308 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1309 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1310 HDAC_DPLBASE_DPLBASE_DMAPBE); 1311 } 1312 } 1313 1314 result = bus_dma_tag_create( 1315 bus_get_dma_tag(sc->dev), /* parent */ 1316 HDA_DMA_ALIGNMENT, /* alignment */ 1317 0, /* boundary */ 1318 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1319 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1320 BUS_SPACE_MAXADDR, /* highaddr */ 1321 NULL, /* filtfunc */ 1322 NULL, /* fistfuncarg */ 1323 HDA_BUFSZ_MAX, /* maxsize */ 1324 1, /* nsegments */ 1325 HDA_BUFSZ_MAX, /* maxsegsz */ 1326 0, /* flags */ 1327 NULL, /* lockfunc */ 1328 NULL, /* lockfuncarg */ 1329 &sc->chan_dmat); /* dmat */ 1330 if (result != 0) { 1331 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1332 __func__, result); 1333 goto hdac_attach_fail; 1334 } 1335 1336 /* Quiesce everything */ 1337 HDA_BOOTHVERBOSE( 1338 device_printf(dev, "Reset controller...\n"); 1339 ); 1340 hdac_reset(sc, true); 1341 1342 /* Initialize the CORB and RIRB */ 1343 hdac_corb_init(sc); 1344 hdac_rirb_init(sc); 1345 1346 result = hdac_irq_alloc(sc); 1347 if (result != 0) 1348 goto hdac_attach_fail; 1349 1350 /* Defer remaining of initialization until interrupts are enabled */ 1351 sc->intrhook.ich_func = hdac_attach2; 1352 sc->intrhook.ich_arg = (void *)sc; 1353 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1354 sc->intrhook.ich_func = NULL; 1355 hdac_attach2((void *)sc); 1356 } 1357 1358 return (0); 1359 1360 hdac_attach_fail: 1361 hdac_irq_free(sc); 1362 if (sc->streams != NULL) 1363 for (i = 0; i < sc->num_ss; i++) 1364 hdac_dma_free(sc, &sc->streams[i].bdl); 1365 free(sc->streams, M_HDAC); 1366 hdac_dma_free(sc, &sc->rirb_dma); 1367 hdac_dma_free(sc, &sc->corb_dma); 1368 hdac_mem_free(sc); 1369 snd_mtxfree(sc->lock); 1370 1371 return (ENXIO); 1372 } 1373 1374 static int 1375 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1376 { 1377 struct hdac_softc *sc; 1378 device_t *devlist; 1379 device_t dev; 1380 int devcount, i, err, val; 1381 1382 dev = oidp->oid_arg1; 1383 sc = device_get_softc(dev); 1384 if (sc == NULL) 1385 return (EINVAL); 1386 val = 0; 1387 err = sysctl_handle_int(oidp, &val, 0, req); 1388 if (err != 0 || req->newptr == NULL || val == 0) 1389 return (err); 1390 1391 /* XXX: Temporary. For debugging. */ 1392 if (val == 100) { 1393 hdac_suspend(dev); 1394 return (0); 1395 } else if (val == 101) { 1396 hdac_resume(dev); 1397 return (0); 1398 } 1399 1400 bus_topo_lock(); 1401 1402 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) { 1403 bus_topo_unlock(); 1404 return (err); 1405 } 1406 1407 hdac_lock(sc); 1408 for (i = 0; i < devcount; i++) 1409 HDAC_PINDUMP(devlist[i]); 1410 hdac_unlock(sc); 1411 1412 bus_topo_unlock(); 1413 1414 free(devlist, M_TEMP); 1415 return (0); 1416 } 1417 1418 static int 1419 hdac_mdata_rate(uint16_t fmt) 1420 { 1421 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1422 int rate, bits; 1423 1424 if (fmt & (1 << 14)) 1425 rate = 44100; 1426 else 1427 rate = 48000; 1428 rate *= ((fmt >> 11) & 0x07) + 1; 1429 rate /= ((fmt >> 8) & 0x07) + 1; 1430 bits = mbits[(fmt >> 4) & 0x03]; 1431 bits *= (fmt & 0x0f) + 1; 1432 return (rate * bits); 1433 } 1434 1435 static int 1436 hdac_bdata_rate(uint16_t fmt, int output) 1437 { 1438 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1439 int rate, bits; 1440 1441 rate = 48000; 1442 rate *= ((fmt >> 11) & 0x07) + 1; 1443 bits = bbits[(fmt >> 4) & 0x03]; 1444 bits *= (fmt & 0x0f) + 1; 1445 if (!output) 1446 bits = ((bits + 7) & ~0x07) + 10; 1447 return (rate * bits); 1448 } 1449 1450 static void 1451 hdac_poll_reinit(struct hdac_softc *sc) 1452 { 1453 int i, pollticks, min = 1000000; 1454 struct hdac_stream *s; 1455 1456 if (sc->polling == 0) 1457 return; 1458 if (sc->unsol_registered > 0) 1459 min = hz / 2; 1460 for (i = 0; i < sc->num_ss; i++) { 1461 s = &sc->streams[i]; 1462 if (s->running == 0) 1463 continue; 1464 pollticks = ((uint64_t)hz * s->blksz) / 1465 (hdac_mdata_rate(s->format) / 8); 1466 pollticks >>= 1; 1467 if (pollticks > hz) 1468 pollticks = hz; 1469 if (pollticks < 1) 1470 pollticks = 1; 1471 if (min > pollticks) 1472 min = pollticks; 1473 } 1474 sc->poll_ival = min; 1475 if (min == 1000000) 1476 callout_stop(&sc->poll_callout); 1477 else 1478 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1479 } 1480 1481 static int 1482 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1483 { 1484 struct hdac_softc *sc; 1485 device_t dev; 1486 uint32_t ctl; 1487 int err, val; 1488 1489 dev = oidp->oid_arg1; 1490 sc = device_get_softc(dev); 1491 if (sc == NULL) 1492 return (EINVAL); 1493 hdac_lock(sc); 1494 val = sc->polling; 1495 hdac_unlock(sc); 1496 err = sysctl_handle_int(oidp, &val, 0, req); 1497 1498 if (err != 0 || req->newptr == NULL) 1499 return (err); 1500 if (val < 0 || val > 1) 1501 return (EINVAL); 1502 1503 hdac_lock(sc); 1504 if (val != sc->polling) { 1505 if (val == 0) { 1506 callout_stop(&sc->poll_callout); 1507 hdac_unlock(sc); 1508 callout_drain(&sc->poll_callout); 1509 hdac_lock(sc); 1510 sc->polling = 0; 1511 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1512 ctl |= HDAC_INTCTL_GIE; 1513 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1514 } else { 1515 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1516 ctl &= ~HDAC_INTCTL_GIE; 1517 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1518 sc->polling = 1; 1519 hdac_poll_reinit(sc); 1520 } 1521 } 1522 hdac_unlock(sc); 1523 1524 return (err); 1525 } 1526 1527 static void 1528 hdac_attach2(void *arg) 1529 { 1530 struct hdac_softc *sc; 1531 device_t child; 1532 uint32_t vendorid, revisionid; 1533 int i; 1534 uint16_t statests; 1535 1536 sc = (struct hdac_softc *)arg; 1537 1538 hdac_lock(sc); 1539 1540 /* Remove ourselves from the config hooks */ 1541 if (sc->intrhook.ich_func != NULL) { 1542 config_intrhook_disestablish(&sc->intrhook); 1543 sc->intrhook.ich_func = NULL; 1544 } 1545 1546 HDA_BOOTHVERBOSE( 1547 device_printf(sc->dev, "Starting CORB Engine...\n"); 1548 ); 1549 hdac_corb_start(sc); 1550 HDA_BOOTHVERBOSE( 1551 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1552 ); 1553 hdac_rirb_start(sc); 1554 1555 /* 1556 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1557 * (status change) interrupts. The documentation says that we 1558 * should not make any assumptions about the state of this register 1559 * and set it explicitly. 1560 * NB: this needs to be done before the interrupt is enabled as 1561 * the handler does not expect this interrupt source. 1562 */ 1563 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1564 1565 /* 1566 * Read and clear post-reset SDI wake status. 1567 * Each set bit corresponds to a codec that came out of reset. 1568 */ 1569 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1570 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests); 1571 1572 HDA_BOOTHVERBOSE( 1573 device_printf(sc->dev, 1574 "Enabling controller interrupt...\n"); 1575 ); 1576 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1577 HDAC_GCTL_UNSOL); 1578 if (sc->polling == 0) { 1579 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1580 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1581 } 1582 DELAY(1000); 1583 1584 HDA_BOOTHVERBOSE( 1585 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1586 ); 1587 hdac_unlock(sc); 1588 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1589 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1590 HDA_BOOTHVERBOSE( 1591 device_printf(sc->dev, 1592 "Found CODEC at address %d\n", i); 1593 ); 1594 hdac_lock(sc); 1595 vendorid = hdac_send_command(sc, i, 1596 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1597 revisionid = hdac_send_command(sc, i, 1598 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1599 hdac_unlock(sc); 1600 if (vendorid == HDA_INVALID && 1601 revisionid == HDA_INVALID) { 1602 device_printf(sc->dev, 1603 "CODEC at address %d not responding!\n", i); 1604 continue; 1605 } 1606 sc->codecs[i].vendor_id = 1607 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1608 sc->codecs[i].device_id = 1609 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1610 sc->codecs[i].revision_id = 1611 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1612 sc->codecs[i].stepping_id = 1613 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1614 child = device_add_child(sc->dev, "hdacc", -1); 1615 if (child == NULL) { 1616 device_printf(sc->dev, 1617 "Failed to add CODEC device\n"); 1618 continue; 1619 } 1620 device_set_ivars(child, (void *)(intptr_t)i); 1621 sc->codecs[i].dev = child; 1622 } 1623 } 1624 bus_generic_attach(sc->dev); 1625 1626 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1627 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1628 "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev, 1629 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); 1630 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1631 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1632 "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev, 1633 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); 1634 } 1635 1636 /**************************************************************************** 1637 * int hdac_suspend(device_t) 1638 * 1639 * Suspend and power down HDA bus and codecs. 1640 ****************************************************************************/ 1641 static int 1642 hdac_suspend(device_t dev) 1643 { 1644 struct hdac_softc *sc = device_get_softc(dev); 1645 1646 HDA_BOOTHVERBOSE( 1647 device_printf(dev, "Suspend...\n"); 1648 ); 1649 bus_generic_suspend(dev); 1650 1651 hdac_lock(sc); 1652 HDA_BOOTHVERBOSE( 1653 device_printf(dev, "Reset controller...\n"); 1654 ); 1655 callout_stop(&sc->poll_callout); 1656 hdac_reset(sc, false); 1657 hdac_unlock(sc); 1658 callout_drain(&sc->poll_callout); 1659 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1660 HDA_BOOTHVERBOSE( 1661 device_printf(dev, "Suspend done\n"); 1662 ); 1663 return (0); 1664 } 1665 1666 /**************************************************************************** 1667 * int hdac_resume(device_t) 1668 * 1669 * Powerup and restore HDA bus and codecs state. 1670 ****************************************************************************/ 1671 static int 1672 hdac_resume(device_t dev) 1673 { 1674 struct hdac_softc *sc = device_get_softc(dev); 1675 int error; 1676 1677 HDA_BOOTHVERBOSE( 1678 device_printf(dev, "Resume...\n"); 1679 ); 1680 hdac_lock(sc); 1681 1682 /* Quiesce everything */ 1683 HDA_BOOTHVERBOSE( 1684 device_printf(dev, "Reset controller...\n"); 1685 ); 1686 hdac_reset(sc, true); 1687 1688 /* Initialize the CORB and RIRB */ 1689 hdac_corb_init(sc); 1690 hdac_rirb_init(sc); 1691 1692 HDA_BOOTHVERBOSE( 1693 device_printf(dev, "Starting CORB Engine...\n"); 1694 ); 1695 hdac_corb_start(sc); 1696 HDA_BOOTHVERBOSE( 1697 device_printf(dev, "Starting RIRB Engine...\n"); 1698 ); 1699 hdac_rirb_start(sc); 1700 1701 /* 1702 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1703 * (status change) events. The documentation says that we should 1704 * not make any assumptions about the state of this register and 1705 * set it explicitly. 1706 * Also, clear HDAC_STATESTS. 1707 * NB: this needs to be done before the interrupt is enabled as 1708 * the handler does not expect this interrupt source. 1709 */ 1710 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1711 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK); 1712 1713 HDA_BOOTHVERBOSE( 1714 device_printf(dev, "Enabling controller interrupt...\n"); 1715 ); 1716 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1717 HDAC_GCTL_UNSOL); 1718 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1719 DELAY(1000); 1720 hdac_poll_reinit(sc); 1721 hdac_unlock(sc); 1722 1723 error = bus_generic_resume(dev); 1724 HDA_BOOTHVERBOSE( 1725 device_printf(dev, "Resume done\n"); 1726 ); 1727 return (error); 1728 } 1729 1730 /**************************************************************************** 1731 * int hdac_detach(device_t) 1732 * 1733 * Detach and free up resources utilized by the hdac device. 1734 ****************************************************************************/ 1735 static int 1736 hdac_detach(device_t dev) 1737 { 1738 struct hdac_softc *sc = device_get_softc(dev); 1739 device_t *devlist; 1740 int cad, i, devcount, error; 1741 1742 if ((error = device_get_children(dev, &devlist, &devcount)) != 0) 1743 return (error); 1744 for (i = 0; i < devcount; i++) { 1745 cad = (intptr_t)device_get_ivars(devlist[i]); 1746 if ((error = device_delete_child(dev, devlist[i])) != 0) { 1747 free(devlist, M_TEMP); 1748 return (error); 1749 } 1750 sc->codecs[cad].dev = NULL; 1751 } 1752 free(devlist, M_TEMP); 1753 1754 hdac_lock(sc); 1755 hdac_reset(sc, false); 1756 hdac_unlock(sc); 1757 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1758 hdac_irq_free(sc); 1759 1760 for (i = 0; i < sc->num_ss; i++) 1761 hdac_dma_free(sc, &sc->streams[i].bdl); 1762 free(sc->streams, M_HDAC); 1763 hdac_dma_free(sc, &sc->pos_dma); 1764 hdac_dma_free(sc, &sc->rirb_dma); 1765 hdac_dma_free(sc, &sc->corb_dma); 1766 if (sc->chan_dmat != NULL) { 1767 bus_dma_tag_destroy(sc->chan_dmat); 1768 sc->chan_dmat = NULL; 1769 } 1770 hdac_mem_free(sc); 1771 snd_mtxfree(sc->lock); 1772 return (0); 1773 } 1774 1775 static bus_dma_tag_t 1776 hdac_get_dma_tag(device_t dev, device_t child) 1777 { 1778 struct hdac_softc *sc = device_get_softc(dev); 1779 1780 return (sc->chan_dmat); 1781 } 1782 1783 static int 1784 hdac_print_child(device_t dev, device_t child) 1785 { 1786 int retval; 1787 1788 retval = bus_print_child_header(dev, child); 1789 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); 1790 retval += bus_print_child_footer(dev, child); 1791 1792 return (retval); 1793 } 1794 1795 static int 1796 hdac_child_location(device_t dev, device_t child, struct sbuf *sb) 1797 { 1798 1799 sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child)); 1800 return (0); 1801 } 1802 1803 static int 1804 hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) 1805 { 1806 struct hdac_softc *sc = device_get_softc(dev); 1807 nid_t cad = (uintptr_t)device_get_ivars(child); 1808 1809 sbuf_printf(sb, 1810 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", 1811 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1812 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1813 return (0); 1814 } 1815 1816 static int 1817 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1818 { 1819 struct hdac_softc *sc = device_get_softc(dev); 1820 nid_t cad = (uintptr_t)device_get_ivars(child); 1821 1822 switch (which) { 1823 case HDA_IVAR_CODEC_ID: 1824 *result = cad; 1825 break; 1826 case HDA_IVAR_VENDOR_ID: 1827 *result = sc->codecs[cad].vendor_id; 1828 break; 1829 case HDA_IVAR_DEVICE_ID: 1830 *result = sc->codecs[cad].device_id; 1831 break; 1832 case HDA_IVAR_REVISION_ID: 1833 *result = sc->codecs[cad].revision_id; 1834 break; 1835 case HDA_IVAR_STEPPING_ID: 1836 *result = sc->codecs[cad].stepping_id; 1837 break; 1838 case HDA_IVAR_SUBVENDOR_ID: 1839 *result = pci_get_subvendor(dev); 1840 break; 1841 case HDA_IVAR_SUBDEVICE_ID: 1842 *result = pci_get_subdevice(dev); 1843 break; 1844 case HDA_IVAR_DMA_NOCACHE: 1845 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1846 break; 1847 case HDA_IVAR_STRIPES_MASK: 1848 *result = (1 << (1 << sc->num_sdo)) - 1; 1849 break; 1850 default: 1851 return (ENOENT); 1852 } 1853 return (0); 1854 } 1855 1856 static struct mtx * 1857 hdac_get_mtx(device_t dev, device_t child) 1858 { 1859 struct hdac_softc *sc = device_get_softc(dev); 1860 1861 return (sc->lock); 1862 } 1863 1864 static uint32_t 1865 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1866 { 1867 1868 return (hdac_send_command(device_get_softc(dev), 1869 (intptr_t)device_get_ivars(child), verb)); 1870 } 1871 1872 static int 1873 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1874 { 1875 int i, ss; 1876 1877 ss = -1; 1878 /* Allocate ISS/OSS first. */ 1879 if (dir == 0) { 1880 for (i = 0; i < sc->num_iss; i++) { 1881 if (sc->streams[i].stream == stream) { 1882 ss = i; 1883 break; 1884 } 1885 } 1886 } else { 1887 for (i = 0; i < sc->num_oss; i++) { 1888 if (sc->streams[i + sc->num_iss].stream == stream) { 1889 ss = i + sc->num_iss; 1890 break; 1891 } 1892 } 1893 } 1894 /* Fallback to BSS. */ 1895 if (ss == -1) { 1896 for (i = 0; i < sc->num_bss; i++) { 1897 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1898 == stream) { 1899 ss = i + sc->num_iss + sc->num_oss; 1900 break; 1901 } 1902 } 1903 } 1904 return (ss); 1905 } 1906 1907 static int 1908 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1909 uint32_t **dmapos) 1910 { 1911 struct hdac_softc *sc = device_get_softc(dev); 1912 nid_t cad = (uintptr_t)device_get_ivars(child); 1913 int stream, ss, bw, maxbw, prevbw; 1914 1915 /* Look for empty stream. */ 1916 ss = hdac_find_stream(sc, dir, 0); 1917 1918 /* Return if found nothing. */ 1919 if (ss < 0) 1920 return (0); 1921 1922 /* Check bus bandwidth. */ 1923 bw = hdac_bdata_rate(format, dir); 1924 if (dir == 1) { 1925 bw *= 1 << (sc->num_sdo - stripe); 1926 prevbw = sc->sdo_bw_used; 1927 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1928 } else { 1929 prevbw = sc->codecs[cad].sdi_bw_used; 1930 maxbw = 48000 * 464; 1931 } 1932 HDA_BOOTHVERBOSE( 1933 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1934 (bw + prevbw) / 1000, maxbw / 1000, 1935 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1936 ); 1937 if (bw + prevbw > maxbw) 1938 return (0); 1939 if (dir == 1) 1940 sc->sdo_bw_used += bw; 1941 else 1942 sc->codecs[cad].sdi_bw_used += bw; 1943 1944 /* Allocate stream number */ 1945 if (ss >= sc->num_iss + sc->num_oss) 1946 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1947 else if (ss >= sc->num_iss) 1948 stream = ss - sc->num_iss + 1; 1949 else 1950 stream = ss + 1; 1951 1952 sc->streams[ss].dev = child; 1953 sc->streams[ss].dir = dir; 1954 sc->streams[ss].stream = stream; 1955 sc->streams[ss].bw = bw; 1956 sc->streams[ss].format = format; 1957 sc->streams[ss].stripe = stripe; 1958 if (dmapos != NULL) { 1959 if (sc->pos_dma.dma_vaddr != NULL) 1960 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1961 else 1962 *dmapos = NULL; 1963 } 1964 return (stream); 1965 } 1966 1967 static void 1968 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 1969 { 1970 struct hdac_softc *sc = device_get_softc(dev); 1971 nid_t cad = (uintptr_t)device_get_ivars(child); 1972 int ss; 1973 1974 ss = hdac_find_stream(sc, dir, stream); 1975 KASSERT(ss >= 0, 1976 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 1977 if (dir == 1) 1978 sc->sdo_bw_used -= sc->streams[ss].bw; 1979 else 1980 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 1981 sc->streams[ss].stream = 0; 1982 sc->streams[ss].dev = NULL; 1983 } 1984 1985 static int 1986 hdac_stream_start(device_t dev, device_t child, int dir, int stream, 1987 bus_addr_t buf, int blksz, int blkcnt) 1988 { 1989 struct hdac_softc *sc = device_get_softc(dev); 1990 struct hdac_bdle *bdle; 1991 uint64_t addr; 1992 int i, ss, off; 1993 uint32_t ctl; 1994 1995 ss = hdac_find_stream(sc, dir, stream); 1996 KASSERT(ss >= 0, 1997 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 1998 1999 addr = (uint64_t)buf; 2000 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 2001 for (i = 0; i < blkcnt; i++, bdle++) { 2002 bdle->addrl = htole32((uint32_t)addr); 2003 bdle->addrh = htole32((uint32_t)(addr >> 32)); 2004 bdle->len = htole32(blksz); 2005 bdle->ioc = htole32(1); 2006 addr += blksz; 2007 } 2008 2009 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2010 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 2011 2012 off = ss << 5; 2013 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 2014 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 2015 addr = sc->streams[ss].bdl.dma_paddr; 2016 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 2017 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 2018 2019 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 2020 if (dir) 2021 ctl |= HDAC_SDCTL2_DIR; 2022 else 2023 ctl &= ~HDAC_SDCTL2_DIR; 2024 ctl &= ~HDAC_SDCTL2_STRM_MASK; 2025 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 2026 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 2027 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 2028 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 2029 2030 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 2031 2032 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2033 ctl |= 1 << ss; 2034 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2035 2036 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 2037 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 2038 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2039 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2040 HDAC_SDCTL_RUN; 2041 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2042 2043 sc->streams[ss].blksz = blksz; 2044 sc->streams[ss].running = 1; 2045 hdac_poll_reinit(sc); 2046 return (0); 2047 } 2048 2049 static void 2050 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 2051 { 2052 struct hdac_softc *sc = device_get_softc(dev); 2053 int ss, off; 2054 uint32_t ctl; 2055 2056 ss = hdac_find_stream(sc, dir, stream); 2057 KASSERT(ss >= 0, 2058 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 2059 2060 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2061 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 2062 2063 off = ss << 5; 2064 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2065 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2066 HDAC_SDCTL_RUN); 2067 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2068 2069 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2070 ctl &= ~(1 << ss); 2071 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2072 2073 sc->streams[ss].running = 0; 2074 hdac_poll_reinit(sc); 2075 } 2076 2077 static void 2078 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2079 { 2080 struct hdac_softc *sc = device_get_softc(dev); 2081 int timeout = 1000; 2082 int to = timeout; 2083 int ss, off; 2084 uint32_t ctl; 2085 2086 ss = hdac_find_stream(sc, dir, stream); 2087 KASSERT(ss >= 0, 2088 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2089 2090 off = ss << 5; 2091 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2092 ctl |= HDAC_SDCTL_SRST; 2093 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2094 do { 2095 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2096 if (ctl & HDAC_SDCTL_SRST) 2097 break; 2098 DELAY(10); 2099 } while (--to); 2100 if (!(ctl & HDAC_SDCTL_SRST)) 2101 device_printf(dev, "Reset setting timeout\n"); 2102 ctl &= ~HDAC_SDCTL_SRST; 2103 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2104 to = timeout; 2105 do { 2106 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2107 if (!(ctl & HDAC_SDCTL_SRST)) 2108 break; 2109 DELAY(10); 2110 } while (--to); 2111 if (ctl & HDAC_SDCTL_SRST) 2112 device_printf(dev, "Reset timeout!\n"); 2113 } 2114 2115 static uint32_t 2116 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2117 { 2118 struct hdac_softc *sc = device_get_softc(dev); 2119 int ss, off; 2120 2121 ss = hdac_find_stream(sc, dir, stream); 2122 KASSERT(ss >= 0, 2123 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2124 2125 off = ss << 5; 2126 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2127 } 2128 2129 static int 2130 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2131 { 2132 struct hdac_softc *sc = device_get_softc(dev); 2133 2134 sc->unsol_registered++; 2135 hdac_poll_reinit(sc); 2136 return (tag); 2137 } 2138 2139 static void 2140 hdac_unsol_free(device_t dev, device_t child, int tag) 2141 { 2142 struct hdac_softc *sc = device_get_softc(dev); 2143 2144 sc->unsol_registered--; 2145 hdac_poll_reinit(sc); 2146 } 2147 2148 static device_method_t hdac_methods[] = { 2149 /* device interface */ 2150 DEVMETHOD(device_probe, hdac_probe), 2151 DEVMETHOD(device_attach, hdac_attach), 2152 DEVMETHOD(device_detach, hdac_detach), 2153 DEVMETHOD(device_suspend, hdac_suspend), 2154 DEVMETHOD(device_resume, hdac_resume), 2155 /* Bus interface */ 2156 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2157 DEVMETHOD(bus_print_child, hdac_print_child), 2158 DEVMETHOD(bus_child_location, hdac_child_location), 2159 DEVMETHOD(bus_child_pnpinfo, hdac_child_pnpinfo_method), 2160 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2161 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2162 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2163 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2164 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2165 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2166 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2167 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2168 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2169 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2170 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2171 DEVMETHOD_END 2172 }; 2173 2174 static driver_t hdac_driver = { 2175 "hdac", 2176 hdac_methods, 2177 sizeof(struct hdac_softc), 2178 }; 2179 2180 DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL); 2181