1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 #define hdac_lock(sc) snd_mtxlock((sc)->lock) 55 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) 56 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) 57 58 #define HDAC_QUIRK_64BIT (1 << 0) 59 #define HDAC_QUIRK_DMAPOS (1 << 1) 60 #define HDAC_QUIRK_MSI (1 << 2) 61 62 static const struct { 63 const char *key; 64 uint32_t value; 65 } hdac_quirks_tab[] = { 66 { "64bit", HDAC_QUIRK_64BIT }, 67 { "dmapos", HDAC_QUIRK_DMAPOS }, 68 { "msi", HDAC_QUIRK_MSI }, 69 }; 70 71 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 72 73 static const struct { 74 uint32_t model; 75 const char *desc; 76 char quirks_on; 77 char quirks_off; 78 } hdac_devices[] = { 79 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 80 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 81 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 82 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 83 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 84 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 85 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 87 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 88 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, 89 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 90 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 91 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 92 { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, 93 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 94 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 95 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 96 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 97 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 98 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 99 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 100 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 101 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 102 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 103 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 104 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 105 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 106 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, 107 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 108 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 109 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 110 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 111 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, 112 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 113 { HDA_INTEL_ALLK, "Intel Alder Lake", 0, 0 }, 114 { HDA_INTEL_ALLKM, "Intel Alder Lake-M", 0, 0 }, 115 { HDA_INTEL_ALLKN, "Intel Alder Lake-N", 0, 0 }, 116 { HDA_INTEL_ALLKP1, "Intel Alder Lake-P", 0, 0 }, 117 { HDA_INTEL_ALLKP2, "Intel Alder Lake-P", 0, 0 }, 118 { HDA_INTEL_ALLKPS, "Intel Alder Lake-PS", 0, 0 }, 119 { HDA_INTEL_RPTLK1, "Intel Raptor Lake-P", 0, 0 }, 120 { HDA_INTEL_RPTLK2, "Intel Raptor Lake-P", 0, 0 }, 121 { HDA_INTEL_MTL, "Intel Meteor Lake-P", 0, 0 }, 122 { HDA_INTEL_ARLS, "Intel Arrow Lake-S", 0, 0 }, 123 { HDA_INTEL_ARL, "Intel Arrow Lake", 0, 0 }, 124 { HDA_INTEL_LNLP, "Intel Lunar Lake-P", 0, 0 }, 125 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 126 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 127 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 128 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 129 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 130 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, 131 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 132 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 133 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 134 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 135 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, 136 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, 137 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, 138 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 139 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 140 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 141 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 142 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 143 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 144 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 145 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 146 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 147 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 148 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 149 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 150 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 151 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 152 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 153 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 154 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 155 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 156 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 157 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 158 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 159 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 160 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 161 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 162 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 163 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 164 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 165 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 166 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 167 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 168 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 169 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 170 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 171 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 172 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 173 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 174 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 175 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 176 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 177 { HDA_ATI_RS880, "ATI RS880", 0, 0 }, 178 { HDA_ATI_R600, "ATI R600", 0, 0 }, 179 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 180 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 181 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 182 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 183 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 184 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 185 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 186 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 187 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 188 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 189 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 190 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 191 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 192 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 193 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 194 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 195 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 196 { HDA_ATI_OLAND, "ATI Oland", 0, 0 }, 197 { HDA_ATI_KABINI, "ATI Kabini", 0, 0 }, 198 { HDA_ATI_TRINITY, "ATI Trinity", 0, 0 }, 199 { HDA_AMD_X370, "AMD X370", 0, 0 }, 200 { HDA_AMD_X570, "AMD X570", 0, 0 }, 201 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, 202 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, 203 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 204 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 205 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 206 { HDA_VMWARE, "VMware", 0, 0 }, 207 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 208 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 209 { HDA_CREATIVE_SB1570, "Creative SB Audigy FX", 0, HDAC_QUIRK_64BIT }, 210 /* Unknown */ 211 { HDA_INTEL_ALL, "Intel", 0, 0 }, 212 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 213 { HDA_ATI_ALL, "ATI", 0, 0 }, 214 { HDA_AMD_ALL, "AMD", 0, 0 }, 215 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 216 { HDA_VIA_ALL, "VIA", 0, 0 }, 217 { HDA_VMWARE_ALL, "VMware", 0, 0 }, 218 { HDA_SIS_ALL, "SiS", 0, 0 }, 219 { HDA_ULI_ALL, "ULI", 0, 0 }, 220 }; 221 222 static const struct { 223 uint16_t vendor; 224 uint8_t reg; 225 uint8_t mask; 226 uint8_t enable; 227 } hdac_pcie_snoop[] = { 228 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 229 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 230 { AMD_VENDORID, 0x42, 0xf8, 0x02 }, 231 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 232 }; 233 234 /**************************************************************************** 235 * Function prototypes 236 ****************************************************************************/ 237 static void hdac_intr_handler(void *); 238 static int hdac_reset(struct hdac_softc *, bool); 239 static int hdac_get_capabilities(struct hdac_softc *); 240 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 241 static int hdac_dma_alloc(struct hdac_softc *, 242 struct hdac_dma *, bus_size_t); 243 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 244 static int hdac_mem_alloc(struct hdac_softc *); 245 static void hdac_mem_free(struct hdac_softc *); 246 static int hdac_irq_alloc(struct hdac_softc *); 247 static void hdac_irq_free(struct hdac_softc *); 248 static void hdac_corb_init(struct hdac_softc *); 249 static void hdac_rirb_init(struct hdac_softc *); 250 static void hdac_corb_start(struct hdac_softc *); 251 static void hdac_rirb_start(struct hdac_softc *); 252 253 static void hdac_attach2(void *); 254 255 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 256 257 static int hdac_probe(device_t); 258 static int hdac_attach(device_t); 259 static int hdac_detach(device_t); 260 static int hdac_suspend(device_t); 261 static int hdac_resume(device_t); 262 263 static int hdac_rirb_flush(struct hdac_softc *sc); 264 static int hdac_unsolq_flush(struct hdac_softc *sc); 265 266 /* This function surely going to make its way into upper level someday. */ 267 static void 268 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 269 { 270 const char *res = NULL; 271 int i = 0, j, k, len, inv; 272 273 if (resource_string_value(device_get_name(sc->dev), 274 device_get_unit(sc->dev), "config", &res) != 0) 275 return; 276 if (!(res != NULL && strlen(res) > 0)) 277 return; 278 HDA_BOOTVERBOSE( 279 device_printf(sc->dev, "Config options:"); 280 ); 281 for (;;) { 282 while (res[i] != '\0' && 283 (res[i] == ',' || isspace(res[i]) != 0)) 284 i++; 285 if (res[i] == '\0') { 286 HDA_BOOTVERBOSE( 287 printf("\n"); 288 ); 289 return; 290 } 291 j = i; 292 while (res[j] != '\0' && 293 !(res[j] == ',' || isspace(res[j]) != 0)) 294 j++; 295 len = j - i; 296 if (len > 2 && strncmp(res + i, "no", 2) == 0) 297 inv = 2; 298 else 299 inv = 0; 300 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 301 if (strncmp(res + i + inv, 302 hdac_quirks_tab[k].key, len - inv) != 0) 303 continue; 304 if (len - inv != strlen(hdac_quirks_tab[k].key)) 305 continue; 306 HDA_BOOTVERBOSE( 307 printf(" %s%s", (inv != 0) ? "no" : "", 308 hdac_quirks_tab[k].key); 309 ); 310 if (inv == 0) { 311 *on |= hdac_quirks_tab[k].value; 312 *off &= ~hdac_quirks_tab[k].value; 313 } else if (inv != 0) { 314 *off |= hdac_quirks_tab[k].value; 315 *on &= ~hdac_quirks_tab[k].value; 316 } 317 break; 318 } 319 i = j; 320 } 321 } 322 323 static void 324 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) 325 { 326 device_t dev; 327 uint8_t rirbsts; 328 int i; 329 330 /* Was this a controller interrupt? */ 331 if (intsts & HDAC_INTSTS_CIS) { 332 /* 333 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then 334 * we will need to check and clear HDAC_STATESTS. 335 * That event is used to report codec status changes such as 336 * a reset or a wake-up event. 337 */ 338 /* 339 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we 340 * will need to check and clear HDAC_CORBSTS_CMEI in 341 * HDAC_CORBSTS. 342 * That event is used to report CORB memory errors. 343 */ 344 /* 345 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we 346 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in 347 * HDAC_RIRBSTS. 348 * That event is used to report response FIFO overruns. 349 */ 350 351 /* Get as many responses that we can */ 352 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 353 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 354 HDAC_WRITE_1(&sc->mem, 355 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 356 hdac_rirb_flush(sc); 357 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 358 } 359 if (sc->unsolq_rp != sc->unsolq_wp) 360 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 361 } 362 363 if (intsts & HDAC_INTSTS_SIS_MASK) { 364 for (i = 0; i < sc->num_ss; i++) { 365 if ((intsts & (1 << i)) == 0) 366 continue; 367 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 368 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 369 if ((dev = sc->streams[i].dev) != NULL) { 370 HDAC_STREAM_INTR(dev, 371 sc->streams[i].dir, sc->streams[i].stream); 372 } 373 } 374 } 375 } 376 377 /**************************************************************************** 378 * void hdac_intr_handler(void *) 379 * 380 * Interrupt handler. Processes interrupts received from the hdac. 381 ****************************************************************************/ 382 static void 383 hdac_intr_handler(void *context) 384 { 385 struct hdac_softc *sc; 386 uint32_t intsts; 387 388 sc = (struct hdac_softc *)context; 389 390 /* 391 * Loop until HDAC_INTSTS_GIS gets clear. 392 * It is plausible that hardware interrupts a host only when GIS goes 393 * from zero to one. GIS is formed by OR-ing multiple hardware 394 * statuses, so it's possible that a previously cleared status gets set 395 * again while another status has not been cleared yet. Thus, there 396 * will be no new interrupt as GIS always stayed set. If we don't 397 * re-examine GIS then we can leave it set and never get an interrupt 398 * again. 399 */ 400 hdac_lock(sc); 401 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 402 while (intsts != 0xffffffff && (intsts & HDAC_INTSTS_GIS) != 0) { 403 hdac_one_intr(sc, intsts); 404 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 405 } 406 hdac_unlock(sc); 407 } 408 409 static void 410 hdac_poll_callback(void *arg) 411 { 412 struct hdac_softc *sc = arg; 413 414 if (sc == NULL) 415 return; 416 417 hdac_lock(sc); 418 if (sc->polling == 0) { 419 hdac_unlock(sc); 420 return; 421 } 422 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); 423 hdac_unlock(sc); 424 425 hdac_intr_handler(sc); 426 } 427 428 /**************************************************************************** 429 * int hdac_reset(hdac_softc *, bool) 430 * 431 * Reset the hdac to a quiescent and known state. 432 ****************************************************************************/ 433 static int 434 hdac_reset(struct hdac_softc *sc, bool wakeup) 435 { 436 uint32_t gctl; 437 int count, i; 438 439 /* 440 * Stop all Streams DMA engine 441 */ 442 for (i = 0; i < sc->num_iss; i++) 443 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 444 for (i = 0; i < sc->num_oss; i++) 445 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 446 for (i = 0; i < sc->num_bss; i++) 447 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 448 449 /* 450 * Stop Control DMA engines. 451 */ 452 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 453 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 454 455 /* 456 * Reset DMA position buffer. 457 */ 458 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 459 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 460 461 /* 462 * Reset the controller. The reset must remain asserted for 463 * a minimum of 100us. 464 */ 465 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 466 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 467 count = 10000; 468 do { 469 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 470 if (!(gctl & HDAC_GCTL_CRST)) 471 break; 472 DELAY(10); 473 } while (--count); 474 if (gctl & HDAC_GCTL_CRST) { 475 device_printf(sc->dev, "Unable to put hdac in reset\n"); 476 return (ENXIO); 477 } 478 479 /* If wakeup is not requested - leave the controller in reset state. */ 480 if (!wakeup) 481 return (0); 482 483 DELAY(100); 484 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 485 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 486 count = 10000; 487 do { 488 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 489 if (gctl & HDAC_GCTL_CRST) 490 break; 491 DELAY(10); 492 } while (--count); 493 if (!(gctl & HDAC_GCTL_CRST)) { 494 device_printf(sc->dev, "Device stuck in reset\n"); 495 return (ENXIO); 496 } 497 498 /* 499 * Wait for codecs to finish their own reset sequence. The delay here 500 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). 501 */ 502 DELAY(1000); 503 504 return (0); 505 } 506 507 /**************************************************************************** 508 * int hdac_get_capabilities(struct hdac_softc *); 509 * 510 * Retreive the general capabilities of the hdac; 511 * Number of Input Streams 512 * Number of Output Streams 513 * Number of bidirectional Streams 514 * 64bit ready 515 * CORB and RIRB sizes 516 ****************************************************************************/ 517 static int 518 hdac_get_capabilities(struct hdac_softc *sc) 519 { 520 uint16_t gcap; 521 uint8_t corbsize, rirbsize; 522 523 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 524 sc->num_iss = HDAC_GCAP_ISS(gcap); 525 sc->num_oss = HDAC_GCAP_OSS(gcap); 526 sc->num_bss = HDAC_GCAP_BSS(gcap); 527 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 528 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 529 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 530 if (sc->quirks_on & HDAC_QUIRK_64BIT) 531 sc->support_64bit = 1; 532 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 533 sc->support_64bit = 0; 534 535 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 536 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 537 HDAC_CORBSIZE_CORBSZCAP_256) 538 sc->corb_size = 256; 539 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 540 HDAC_CORBSIZE_CORBSZCAP_16) 541 sc->corb_size = 16; 542 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 543 HDAC_CORBSIZE_CORBSZCAP_2) 544 sc->corb_size = 2; 545 else { 546 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 547 __func__, corbsize); 548 return (ENXIO); 549 } 550 551 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 552 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 553 HDAC_RIRBSIZE_RIRBSZCAP_256) 554 sc->rirb_size = 256; 555 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 556 HDAC_RIRBSIZE_RIRBSZCAP_16) 557 sc->rirb_size = 16; 558 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 559 HDAC_RIRBSIZE_RIRBSZCAP_2) 560 sc->rirb_size = 2; 561 else { 562 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 563 __func__, rirbsize); 564 return (ENXIO); 565 } 566 567 HDA_BOOTVERBOSE( 568 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 569 "NSDO %d%s, CORB %d, RIRB %d\n", 570 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 571 sc->support_64bit ? ", 64bit" : "", 572 sc->corb_size, sc->rirb_size); 573 ); 574 575 return (0); 576 } 577 578 /**************************************************************************** 579 * void hdac_dma_cb 580 * 581 * This function is called by bus_dmamap_load when the mapping has been 582 * established. We just record the physical address of the mapping into 583 * the struct hdac_dma passed in. 584 ****************************************************************************/ 585 static void 586 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 587 { 588 struct hdac_dma *dma; 589 590 if (error == 0) { 591 dma = (struct hdac_dma *)callback_arg; 592 dma->dma_paddr = segs[0].ds_addr; 593 } 594 } 595 596 /**************************************************************************** 597 * int hdac_dma_alloc 598 * 599 * This function allocate and setup a dma region (struct hdac_dma). 600 * It must be freed by a corresponding hdac_dma_free. 601 ****************************************************************************/ 602 static int 603 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 604 { 605 bus_size_t roundsz; 606 int result; 607 608 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 609 bzero(dma, sizeof(*dma)); 610 611 /* 612 * Create a DMA tag 613 */ 614 result = bus_dma_tag_create( 615 bus_get_dma_tag(sc->dev), /* parent */ 616 HDA_DMA_ALIGNMENT, /* alignment */ 617 0, /* boundary */ 618 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 619 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 620 BUS_SPACE_MAXADDR, /* highaddr */ 621 NULL, /* filtfunc */ 622 NULL, /* fistfuncarg */ 623 roundsz, /* maxsize */ 624 1, /* nsegments */ 625 roundsz, /* maxsegsz */ 626 0, /* flags */ 627 NULL, /* lockfunc */ 628 NULL, /* lockfuncarg */ 629 &dma->dma_tag); /* dmat */ 630 if (result != 0) { 631 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 632 __func__, result); 633 goto hdac_dma_alloc_fail; 634 } 635 636 /* 637 * Allocate DMA memory 638 */ 639 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 640 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 641 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 642 BUS_DMA_COHERENT), 643 &dma->dma_map); 644 if (result != 0) { 645 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 646 __func__, result); 647 goto hdac_dma_alloc_fail; 648 } 649 650 dma->dma_size = roundsz; 651 652 /* 653 * Map the memory 654 */ 655 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 656 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 657 if (result != 0 || dma->dma_paddr == 0) { 658 if (result == 0) 659 result = ENOMEM; 660 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 661 __func__, result); 662 goto hdac_dma_alloc_fail; 663 } 664 665 HDA_BOOTHVERBOSE( 666 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 667 __func__, (uintmax_t)size, (uintmax_t)roundsz); 668 ); 669 670 return (0); 671 672 hdac_dma_alloc_fail: 673 hdac_dma_free(sc, dma); 674 675 return (result); 676 } 677 678 /**************************************************************************** 679 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 680 * 681 * Free a struct hdac_dma that has been previously allocated via the 682 * hdac_dma_alloc function. 683 ****************************************************************************/ 684 static void 685 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 686 { 687 if (dma->dma_paddr != 0) { 688 /* Flush caches */ 689 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 690 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 691 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 692 dma->dma_paddr = 0; 693 } 694 if (dma->dma_vaddr != NULL) { 695 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 696 dma->dma_vaddr = NULL; 697 } 698 if (dma->dma_tag != NULL) { 699 bus_dma_tag_destroy(dma->dma_tag); 700 dma->dma_tag = NULL; 701 } 702 dma->dma_size = 0; 703 } 704 705 /**************************************************************************** 706 * int hdac_mem_alloc(struct hdac_softc *) 707 * 708 * Allocate all the bus resources necessary to speak with the physical 709 * controller. 710 ****************************************************************************/ 711 static int 712 hdac_mem_alloc(struct hdac_softc *sc) 713 { 714 struct hdac_mem *mem; 715 716 mem = &sc->mem; 717 mem->mem_rid = PCIR_BAR(0); 718 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 719 &mem->mem_rid, RF_ACTIVE); 720 if (mem->mem_res == NULL) { 721 device_printf(sc->dev, 722 "%s: Unable to allocate memory resource\n", __func__); 723 return (ENOMEM); 724 } 725 mem->mem_tag = rman_get_bustag(mem->mem_res); 726 mem->mem_handle = rman_get_bushandle(mem->mem_res); 727 728 return (0); 729 } 730 731 /**************************************************************************** 732 * void hdac_mem_free(struct hdac_softc *) 733 * 734 * Free up resources previously allocated by hdac_mem_alloc. 735 ****************************************************************************/ 736 static void 737 hdac_mem_free(struct hdac_softc *sc) 738 { 739 struct hdac_mem *mem; 740 741 mem = &sc->mem; 742 if (mem->mem_res != NULL) 743 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 744 mem->mem_res); 745 mem->mem_res = NULL; 746 } 747 748 /**************************************************************************** 749 * int hdac_irq_alloc(struct hdac_softc *) 750 * 751 * Allocate and setup the resources necessary for interrupt handling. 752 ****************************************************************************/ 753 static int 754 hdac_irq_alloc(struct hdac_softc *sc) 755 { 756 struct hdac_irq *irq; 757 int result; 758 759 irq = &sc->irq; 760 irq->irq_rid = 0x0; 761 762 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 763 (result = pci_msi_count(sc->dev)) == 1 && 764 pci_alloc_msi(sc->dev, &result) == 0) 765 irq->irq_rid = 0x1; 766 767 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 768 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 769 if (irq->irq_res == NULL) { 770 device_printf(sc->dev, "%s: Unable to allocate irq\n", 771 __func__); 772 goto hdac_irq_alloc_fail; 773 } 774 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 775 NULL, hdac_intr_handler, sc, &irq->irq_handle); 776 if (result != 0) { 777 device_printf(sc->dev, 778 "%s: Unable to setup interrupt handler (%d)\n", 779 __func__, result); 780 goto hdac_irq_alloc_fail; 781 } 782 783 return (0); 784 785 hdac_irq_alloc_fail: 786 hdac_irq_free(sc); 787 788 return (ENXIO); 789 } 790 791 /**************************************************************************** 792 * void hdac_irq_free(struct hdac_softc *) 793 * 794 * Free up resources previously allocated by hdac_irq_alloc. 795 ****************************************************************************/ 796 static void 797 hdac_irq_free(struct hdac_softc *sc) 798 { 799 struct hdac_irq *irq; 800 801 irq = &sc->irq; 802 if (irq->irq_res != NULL && irq->irq_handle != NULL) 803 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 804 if (irq->irq_res != NULL) 805 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 806 irq->irq_res); 807 if (irq->irq_rid == 0x1) 808 pci_release_msi(sc->dev); 809 irq->irq_handle = NULL; 810 irq->irq_res = NULL; 811 irq->irq_rid = 0x0; 812 } 813 814 /**************************************************************************** 815 * void hdac_corb_init(struct hdac_softc *) 816 * 817 * Initialize the corb registers for operations but do not start it up yet. 818 * The CORB engine must not be running when this function is called. 819 ****************************************************************************/ 820 static void 821 hdac_corb_init(struct hdac_softc *sc) 822 { 823 uint8_t corbsize; 824 uint64_t corbpaddr; 825 826 /* Setup the CORB size. */ 827 switch (sc->corb_size) { 828 case 256: 829 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 830 break; 831 case 16: 832 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 833 break; 834 case 2: 835 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 836 break; 837 default: 838 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 839 } 840 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 841 842 /* Setup the CORB Address in the hdac */ 843 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 844 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 845 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 846 847 /* Set the WP and RP */ 848 sc->corb_wp = 0; 849 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 850 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 851 /* 852 * The HDA specification indicates that the CORBRPRST bit will always 853 * read as zero. Unfortunately, it seems that at least the 82801G 854 * doesn't reset the bit to zero, which stalls the corb engine. 855 * manually reset the bit to zero before continuing. 856 */ 857 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 858 859 /* Enable CORB error reporting */ 860 #if 0 861 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 862 #endif 863 } 864 865 /**************************************************************************** 866 * void hdac_rirb_init(struct hdac_softc *) 867 * 868 * Initialize the rirb registers for operations but do not start it up yet. 869 * The RIRB engine must not be running when this function is called. 870 ****************************************************************************/ 871 static void 872 hdac_rirb_init(struct hdac_softc *sc) 873 { 874 uint8_t rirbsize; 875 uint64_t rirbpaddr; 876 877 /* Setup the RIRB size. */ 878 switch (sc->rirb_size) { 879 case 256: 880 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 881 break; 882 case 16: 883 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 884 break; 885 case 2: 886 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 887 break; 888 default: 889 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 890 } 891 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 892 893 /* Setup the RIRB Address in the hdac */ 894 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 895 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 896 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 897 898 /* Setup the WP and RP */ 899 sc->rirb_rp = 0; 900 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 901 902 /* Setup the interrupt threshold */ 903 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 904 905 /* Enable Overrun and response received reporting */ 906 #if 0 907 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 908 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 909 #else 910 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 911 #endif 912 913 /* 914 * Make sure that the Host CPU cache doesn't contain any dirty 915 * cache lines that falls in the rirb. If I understood correctly, it 916 * should be sufficient to do this only once as the rirb is purely 917 * read-only from now on. 918 */ 919 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 920 BUS_DMASYNC_PREREAD); 921 } 922 923 /**************************************************************************** 924 * void hdac_corb_start(hdac_softc *) 925 * 926 * Startup the corb DMA engine 927 ****************************************************************************/ 928 static void 929 hdac_corb_start(struct hdac_softc *sc) 930 { 931 uint32_t corbctl; 932 933 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 934 corbctl |= HDAC_CORBCTL_CORBRUN; 935 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 936 } 937 938 /**************************************************************************** 939 * void hdac_rirb_start(hdac_softc *) 940 * 941 * Startup the rirb DMA engine 942 ****************************************************************************/ 943 static void 944 hdac_rirb_start(struct hdac_softc *sc) 945 { 946 uint32_t rirbctl; 947 948 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 949 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 950 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 951 } 952 953 static int 954 hdac_rirb_flush(struct hdac_softc *sc) 955 { 956 struct hdac_rirb *rirb_base, *rirb; 957 nid_t cad; 958 uint32_t resp, resp_ex; 959 uint8_t rirbwp; 960 int ret; 961 962 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 963 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 964 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 965 BUS_DMASYNC_POSTREAD); 966 967 ret = 0; 968 while (sc->rirb_rp != rirbwp) { 969 sc->rirb_rp++; 970 sc->rirb_rp %= sc->rirb_size; 971 rirb = &rirb_base[sc->rirb_rp]; 972 resp = le32toh(rirb->response); 973 resp_ex = le32toh(rirb->response_ex); 974 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 975 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 976 sc->unsolq[sc->unsolq_wp++] = resp; 977 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 978 sc->unsolq[sc->unsolq_wp++] = cad; 979 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 980 } else if (sc->codecs[cad].pending <= 0) { 981 device_printf(sc->dev, "Unexpected unsolicited " 982 "response from address %d: %08x\n", cad, resp); 983 } else { 984 sc->codecs[cad].response = resp; 985 sc->codecs[cad].pending--; 986 } 987 ret++; 988 } 989 990 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 991 BUS_DMASYNC_PREREAD); 992 return (ret); 993 } 994 995 static int 996 hdac_unsolq_flush(struct hdac_softc *sc) 997 { 998 device_t child; 999 nid_t cad; 1000 uint32_t resp; 1001 int ret = 0; 1002 1003 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 1004 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 1005 while (sc->unsolq_rp != sc->unsolq_wp) { 1006 resp = sc->unsolq[sc->unsolq_rp++]; 1007 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 1008 cad = sc->unsolq[sc->unsolq_rp++]; 1009 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 1010 if ((child = sc->codecs[cad].dev) != NULL && 1011 device_is_attached(child)) 1012 HDAC_UNSOL_INTR(child, resp); 1013 ret++; 1014 } 1015 sc->unsolq_st = HDAC_UNSOLQ_READY; 1016 } 1017 1018 return (ret); 1019 } 1020 1021 /**************************************************************************** 1022 * uint32_t hdac_send_command 1023 * 1024 * Wrapper function that sends only one command to a given codec 1025 ****************************************************************************/ 1026 static uint32_t 1027 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 1028 { 1029 int timeout; 1030 uint32_t *corb; 1031 1032 hdac_lockassert(sc); 1033 verb &= ~HDA_CMD_CAD_MASK; 1034 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 1035 sc->codecs[cad].response = HDA_INVALID; 1036 1037 sc->codecs[cad].pending++; 1038 sc->corb_wp++; 1039 sc->corb_wp %= sc->corb_size; 1040 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 1041 bus_dmamap_sync(sc->corb_dma.dma_tag, 1042 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 1043 corb[sc->corb_wp] = htole32(verb); 1044 bus_dmamap_sync(sc->corb_dma.dma_tag, 1045 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 1046 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 1047 1048 timeout = 10000; 1049 do { 1050 if (hdac_rirb_flush(sc) == 0) 1051 DELAY(10); 1052 } while (sc->codecs[cad].pending != 0 && --timeout); 1053 1054 if (sc->codecs[cad].pending != 0) { 1055 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", 1056 verb, cad); 1057 sc->codecs[cad].pending = 0; 1058 } 1059 1060 if (sc->unsolq_rp != sc->unsolq_wp) 1061 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1062 return (sc->codecs[cad].response); 1063 } 1064 1065 /**************************************************************************** 1066 * Device Methods 1067 ****************************************************************************/ 1068 1069 /**************************************************************************** 1070 * int hdac_probe(device_t) 1071 * 1072 * Probe for the presence of an hdac. If none is found, check for a generic 1073 * match using the subclass of the device. 1074 ****************************************************************************/ 1075 static int 1076 hdac_probe(device_t dev) 1077 { 1078 int i, result; 1079 uint32_t model; 1080 uint16_t class, subclass; 1081 char desc[64]; 1082 1083 model = (uint32_t)pci_get_device(dev) << 16; 1084 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1085 class = pci_get_class(dev); 1086 subclass = pci_get_subclass(dev); 1087 1088 bzero(desc, sizeof(desc)); 1089 result = ENXIO; 1090 for (i = 0; i < nitems(hdac_devices); i++) { 1091 if (hdac_devices[i].model == model) { 1092 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1093 result = BUS_PROBE_DEFAULT; 1094 break; 1095 } 1096 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1097 class == PCIC_MULTIMEDIA && 1098 subclass == PCIS_MULTIMEDIA_HDA) { 1099 snprintf(desc, sizeof(desc), "%s (0x%04x)", 1100 hdac_devices[i].desc, pci_get_device(dev)); 1101 result = BUS_PROBE_GENERIC; 1102 break; 1103 } 1104 } 1105 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1106 subclass == PCIS_MULTIMEDIA_HDA) { 1107 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1108 result = BUS_PROBE_GENERIC; 1109 } 1110 if (result != ENXIO) 1111 device_set_descf(dev, "%s HDA Controller", desc); 1112 1113 return (result); 1114 } 1115 1116 static void 1117 hdac_unsolq_task(void *context, int pending) 1118 { 1119 struct hdac_softc *sc; 1120 1121 sc = (struct hdac_softc *)context; 1122 1123 hdac_lock(sc); 1124 hdac_unsolq_flush(sc); 1125 hdac_unlock(sc); 1126 } 1127 1128 /**************************************************************************** 1129 * int hdac_attach(device_t) 1130 * 1131 * Attach the device into the kernel. Interrupts usually won't be enabled 1132 * when this function is called. Setup everything that doesn't require 1133 * interrupts and defer probing of codecs until interrupts are enabled. 1134 ****************************************************************************/ 1135 static int 1136 hdac_attach(device_t dev) 1137 { 1138 struct hdac_softc *sc; 1139 int result; 1140 int i, devid = -1; 1141 uint32_t model; 1142 uint16_t class, subclass; 1143 uint16_t vendor; 1144 uint8_t v; 1145 1146 sc = device_get_softc(dev); 1147 HDA_BOOTVERBOSE( 1148 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1149 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1150 device_printf(dev, "HDA Driver Revision: %s\n", 1151 HDA_DRV_TEST_REV); 1152 ); 1153 1154 model = (uint32_t)pci_get_device(dev) << 16; 1155 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1156 class = pci_get_class(dev); 1157 subclass = pci_get_subclass(dev); 1158 1159 for (i = 0; i < nitems(hdac_devices); i++) { 1160 if (hdac_devices[i].model == model) { 1161 devid = i; 1162 break; 1163 } 1164 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1165 class == PCIC_MULTIMEDIA && 1166 subclass == PCIS_MULTIMEDIA_HDA) { 1167 devid = i; 1168 break; 1169 } 1170 } 1171 1172 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); 1173 sc->dev = dev; 1174 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1175 callout_init(&sc->poll_callout, 1); 1176 for (i = 0; i < HDAC_CODEC_MAX; i++) 1177 sc->codecs[i].dev = NULL; 1178 if (devid >= 0) { 1179 sc->quirks_on = hdac_devices[devid].quirks_on; 1180 sc->quirks_off = hdac_devices[devid].quirks_off; 1181 } else { 1182 sc->quirks_on = 0; 1183 sc->quirks_off = 0; 1184 } 1185 if (resource_int_value(device_get_name(dev), 1186 device_get_unit(dev), "msi", &i) == 0) { 1187 if (i == 0) 1188 sc->quirks_off |= HDAC_QUIRK_MSI; 1189 else { 1190 sc->quirks_on |= HDAC_QUIRK_MSI; 1191 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1192 } 1193 } 1194 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1195 HDA_BOOTVERBOSE( 1196 device_printf(sc->dev, 1197 "Config options: on=0x%08x off=0x%08x\n", 1198 sc->quirks_on, sc->quirks_off); 1199 ); 1200 sc->poll_ival = hz; 1201 if (resource_int_value(device_get_name(dev), 1202 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1203 sc->polling = 1; 1204 else 1205 sc->polling = 0; 1206 1207 pci_enable_busmaster(dev); 1208 1209 vendor = pci_get_vendor(dev); 1210 if (vendor == INTEL_VENDORID) { 1211 /* TCSEL -> TC0 */ 1212 v = pci_read_config(dev, 0x44, 1); 1213 pci_write_config(dev, 0x44, v & 0xf8, 1); 1214 HDA_BOOTHVERBOSE( 1215 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1216 pci_read_config(dev, 0x44, 1)); 1217 ); 1218 } 1219 1220 #if defined(__i386__) || defined(__amd64__) 1221 sc->flags |= HDAC_F_DMA_NOCACHE; 1222 1223 if (resource_int_value(device_get_name(dev), 1224 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1225 #else 1226 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1227 #endif 1228 /* 1229 * Try to enable PCIe snoop to avoid messing around with 1230 * uncacheable DMA attribute. Since PCIe snoop register 1231 * config is pretty much vendor specific, there are no 1232 * general solutions on how to enable it, forcing us (even 1233 * Microsoft) to enable uncacheable or write combined DMA 1234 * by default. 1235 * 1236 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1237 */ 1238 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1239 if (hdac_pcie_snoop[i].vendor != vendor) 1240 continue; 1241 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1242 if (hdac_pcie_snoop[i].reg == 0x00) 1243 break; 1244 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1245 if ((v & hdac_pcie_snoop[i].enable) == 1246 hdac_pcie_snoop[i].enable) 1247 break; 1248 v &= hdac_pcie_snoop[i].mask; 1249 v |= hdac_pcie_snoop[i].enable; 1250 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1251 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1252 if ((v & hdac_pcie_snoop[i].enable) != 1253 hdac_pcie_snoop[i].enable) { 1254 HDA_BOOTVERBOSE( 1255 device_printf(dev, 1256 "WARNING: Failed to enable PCIe " 1257 "snoop!\n"); 1258 ); 1259 #if defined(__i386__) || defined(__amd64__) 1260 sc->flags |= HDAC_F_DMA_NOCACHE; 1261 #endif 1262 } 1263 break; 1264 } 1265 #if defined(__i386__) || defined(__amd64__) 1266 } 1267 #endif 1268 1269 HDA_BOOTHVERBOSE( 1270 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1271 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1272 "Uncacheable" : "PCIe snoop", vendor); 1273 ); 1274 1275 /* Allocate resources */ 1276 result = hdac_mem_alloc(sc); 1277 if (result != 0) 1278 goto hdac_attach_fail; 1279 1280 /* Get Capabilities */ 1281 result = hdac_get_capabilities(sc); 1282 if (result != 0) 1283 goto hdac_attach_fail; 1284 1285 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1286 result = hdac_dma_alloc(sc, &sc->corb_dma, 1287 sc->corb_size * sizeof(uint32_t)); 1288 if (result != 0) 1289 goto hdac_attach_fail; 1290 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1291 sc->rirb_size * sizeof(struct hdac_rirb)); 1292 if (result != 0) 1293 goto hdac_attach_fail; 1294 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1295 M_HDAC, M_ZERO | M_WAITOK); 1296 for (i = 0; i < sc->num_ss; i++) { 1297 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1298 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1299 if (result != 0) 1300 goto hdac_attach_fail; 1301 } 1302 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1303 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1304 HDA_BOOTVERBOSE( 1305 device_printf(dev, "Failed to " 1306 "allocate DMA pos buffer " 1307 "(non-fatal)\n"); 1308 ); 1309 } else { 1310 uint64_t addr = sc->pos_dma.dma_paddr; 1311 1312 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1313 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1314 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1315 HDAC_DPLBASE_DPLBASE_DMAPBE); 1316 } 1317 } 1318 1319 result = bus_dma_tag_create( 1320 bus_get_dma_tag(sc->dev), /* parent */ 1321 HDA_DMA_ALIGNMENT, /* alignment */ 1322 0, /* boundary */ 1323 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1324 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1325 BUS_SPACE_MAXADDR, /* highaddr */ 1326 NULL, /* filtfunc */ 1327 NULL, /* fistfuncarg */ 1328 HDA_BUFSZ_MAX, /* maxsize */ 1329 1, /* nsegments */ 1330 HDA_BUFSZ_MAX, /* maxsegsz */ 1331 0, /* flags */ 1332 NULL, /* lockfunc */ 1333 NULL, /* lockfuncarg */ 1334 &sc->chan_dmat); /* dmat */ 1335 if (result != 0) { 1336 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1337 __func__, result); 1338 goto hdac_attach_fail; 1339 } 1340 1341 /* Quiesce everything */ 1342 HDA_BOOTHVERBOSE( 1343 device_printf(dev, "Reset controller...\n"); 1344 ); 1345 hdac_reset(sc, true); 1346 1347 /* Initialize the CORB and RIRB */ 1348 hdac_corb_init(sc); 1349 hdac_rirb_init(sc); 1350 1351 result = hdac_irq_alloc(sc); 1352 if (result != 0) 1353 goto hdac_attach_fail; 1354 1355 /* Defer remaining of initialization until interrupts are enabled */ 1356 sc->intrhook.ich_func = hdac_attach2; 1357 sc->intrhook.ich_arg = (void *)sc; 1358 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1359 sc->intrhook.ich_func = NULL; 1360 hdac_attach2((void *)sc); 1361 } 1362 1363 return (0); 1364 1365 hdac_attach_fail: 1366 hdac_irq_free(sc); 1367 if (sc->streams != NULL) 1368 for (i = 0; i < sc->num_ss; i++) 1369 hdac_dma_free(sc, &sc->streams[i].bdl); 1370 free(sc->streams, M_HDAC); 1371 hdac_dma_free(sc, &sc->rirb_dma); 1372 hdac_dma_free(sc, &sc->corb_dma); 1373 hdac_mem_free(sc); 1374 snd_mtxfree(sc->lock); 1375 1376 return (ENXIO); 1377 } 1378 1379 static int 1380 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1381 { 1382 struct hdac_softc *sc; 1383 device_t *devlist; 1384 device_t dev; 1385 int devcount, i, err, val; 1386 1387 dev = oidp->oid_arg1; 1388 sc = device_get_softc(dev); 1389 if (sc == NULL) 1390 return (EINVAL); 1391 val = 0; 1392 err = sysctl_handle_int(oidp, &val, 0, req); 1393 if (err != 0 || req->newptr == NULL || val == 0) 1394 return (err); 1395 1396 /* XXX: Temporary. For debugging. */ 1397 if (val == 100) { 1398 hdac_suspend(dev); 1399 return (0); 1400 } else if (val == 101) { 1401 hdac_resume(dev); 1402 return (0); 1403 } 1404 1405 bus_topo_lock(); 1406 1407 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) { 1408 bus_topo_unlock(); 1409 return (err); 1410 } 1411 1412 hdac_lock(sc); 1413 for (i = 0; i < devcount; i++) 1414 HDAC_PINDUMP(devlist[i]); 1415 hdac_unlock(sc); 1416 1417 bus_topo_unlock(); 1418 1419 free(devlist, M_TEMP); 1420 return (0); 1421 } 1422 1423 static int 1424 hdac_mdata_rate(uint16_t fmt) 1425 { 1426 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1427 int rate, bits; 1428 1429 if (fmt & (1 << 14)) 1430 rate = 44100; 1431 else 1432 rate = 48000; 1433 rate *= ((fmt >> 11) & 0x07) + 1; 1434 rate /= ((fmt >> 8) & 0x07) + 1; 1435 bits = mbits[(fmt >> 4) & 0x03]; 1436 bits *= (fmt & 0x0f) + 1; 1437 return (rate * bits); 1438 } 1439 1440 static int 1441 hdac_bdata_rate(uint16_t fmt, int output) 1442 { 1443 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1444 int rate, bits; 1445 1446 rate = 48000; 1447 rate *= ((fmt >> 11) & 0x07) + 1; 1448 bits = bbits[(fmt >> 4) & 0x03]; 1449 bits *= (fmt & 0x0f) + 1; 1450 if (!output) 1451 bits = ((bits + 7) & ~0x07) + 10; 1452 return (rate * bits); 1453 } 1454 1455 static void 1456 hdac_poll_reinit(struct hdac_softc *sc) 1457 { 1458 int i, pollticks, min = 1000000; 1459 struct hdac_stream *s; 1460 1461 if (sc->polling == 0) 1462 return; 1463 if (sc->unsol_registered > 0) 1464 min = hz / 2; 1465 for (i = 0; i < sc->num_ss; i++) { 1466 s = &sc->streams[i]; 1467 if (s->running == 0) 1468 continue; 1469 pollticks = ((uint64_t)hz * s->blksz) / 1470 (hdac_mdata_rate(s->format) / 8); 1471 pollticks >>= 1; 1472 if (pollticks > hz) 1473 pollticks = hz; 1474 if (pollticks < 1) 1475 pollticks = 1; 1476 if (min > pollticks) 1477 min = pollticks; 1478 } 1479 sc->poll_ival = min; 1480 if (min == 1000000) 1481 callout_stop(&sc->poll_callout); 1482 else 1483 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1484 } 1485 1486 static int 1487 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1488 { 1489 struct hdac_softc *sc; 1490 device_t dev; 1491 uint32_t ctl; 1492 int err, val; 1493 1494 dev = oidp->oid_arg1; 1495 sc = device_get_softc(dev); 1496 if (sc == NULL) 1497 return (EINVAL); 1498 hdac_lock(sc); 1499 val = sc->polling; 1500 hdac_unlock(sc); 1501 err = sysctl_handle_int(oidp, &val, 0, req); 1502 1503 if (err != 0 || req->newptr == NULL) 1504 return (err); 1505 if (val < 0 || val > 1) 1506 return (EINVAL); 1507 1508 hdac_lock(sc); 1509 if (val != sc->polling) { 1510 if (val == 0) { 1511 callout_stop(&sc->poll_callout); 1512 hdac_unlock(sc); 1513 callout_drain(&sc->poll_callout); 1514 hdac_lock(sc); 1515 sc->polling = 0; 1516 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1517 ctl |= HDAC_INTCTL_GIE; 1518 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1519 } else { 1520 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1521 ctl &= ~HDAC_INTCTL_GIE; 1522 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1523 sc->polling = 1; 1524 hdac_poll_reinit(sc); 1525 } 1526 } 1527 hdac_unlock(sc); 1528 1529 return (err); 1530 } 1531 1532 static void 1533 hdac_attach2(void *arg) 1534 { 1535 struct hdac_softc *sc; 1536 device_t child; 1537 uint32_t vendorid, revisionid; 1538 int i; 1539 uint16_t statests; 1540 1541 sc = (struct hdac_softc *)arg; 1542 1543 hdac_lock(sc); 1544 1545 /* Remove ourselves from the config hooks */ 1546 if (sc->intrhook.ich_func != NULL) { 1547 config_intrhook_disestablish(&sc->intrhook); 1548 sc->intrhook.ich_func = NULL; 1549 } 1550 1551 HDA_BOOTHVERBOSE( 1552 device_printf(sc->dev, "Starting CORB Engine...\n"); 1553 ); 1554 hdac_corb_start(sc); 1555 HDA_BOOTHVERBOSE( 1556 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1557 ); 1558 hdac_rirb_start(sc); 1559 1560 /* 1561 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1562 * (status change) interrupts. The documentation says that we 1563 * should not make any assumptions about the state of this register 1564 * and set it explicitly. 1565 * NB: this needs to be done before the interrupt is enabled as 1566 * the handler does not expect this interrupt source. 1567 */ 1568 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1569 1570 /* 1571 * Read and clear post-reset SDI wake status. 1572 * Each set bit corresponds to a codec that came out of reset. 1573 */ 1574 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1575 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests); 1576 1577 HDA_BOOTHVERBOSE( 1578 device_printf(sc->dev, 1579 "Enabling controller interrupt...\n"); 1580 ); 1581 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1582 HDAC_GCTL_UNSOL); 1583 if (sc->polling == 0) { 1584 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1585 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1586 } 1587 DELAY(1000); 1588 1589 HDA_BOOTHVERBOSE( 1590 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1591 ); 1592 hdac_unlock(sc); 1593 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1594 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1595 HDA_BOOTHVERBOSE( 1596 device_printf(sc->dev, 1597 "Found CODEC at address %d\n", i); 1598 ); 1599 hdac_lock(sc); 1600 vendorid = hdac_send_command(sc, i, 1601 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1602 revisionid = hdac_send_command(sc, i, 1603 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1604 hdac_unlock(sc); 1605 if (vendorid == HDA_INVALID && 1606 revisionid == HDA_INVALID) { 1607 device_printf(sc->dev, 1608 "CODEC at address %d not responding!\n", i); 1609 continue; 1610 } 1611 sc->codecs[i].vendor_id = 1612 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1613 sc->codecs[i].device_id = 1614 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1615 sc->codecs[i].revision_id = 1616 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1617 sc->codecs[i].stepping_id = 1618 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1619 child = device_add_child(sc->dev, "hdacc", -1); 1620 if (child == NULL) { 1621 device_printf(sc->dev, 1622 "Failed to add CODEC device\n"); 1623 continue; 1624 } 1625 device_set_ivars(child, (void *)(intptr_t)i); 1626 sc->codecs[i].dev = child; 1627 } 1628 } 1629 bus_generic_attach(sc->dev); 1630 1631 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1632 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1633 "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev, 1634 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); 1635 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1636 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1637 "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev, 1638 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); 1639 } 1640 1641 /**************************************************************************** 1642 * int hdac_suspend(device_t) 1643 * 1644 * Suspend and power down HDA bus and codecs. 1645 ****************************************************************************/ 1646 static int 1647 hdac_suspend(device_t dev) 1648 { 1649 struct hdac_softc *sc = device_get_softc(dev); 1650 1651 HDA_BOOTHVERBOSE( 1652 device_printf(dev, "Suspend...\n"); 1653 ); 1654 bus_generic_suspend(dev); 1655 1656 hdac_lock(sc); 1657 HDA_BOOTHVERBOSE( 1658 device_printf(dev, "Reset controller...\n"); 1659 ); 1660 callout_stop(&sc->poll_callout); 1661 hdac_reset(sc, false); 1662 hdac_unlock(sc); 1663 callout_drain(&sc->poll_callout); 1664 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1665 HDA_BOOTHVERBOSE( 1666 device_printf(dev, "Suspend done\n"); 1667 ); 1668 return (0); 1669 } 1670 1671 /**************************************************************************** 1672 * int hdac_resume(device_t) 1673 * 1674 * Powerup and restore HDA bus and codecs state. 1675 ****************************************************************************/ 1676 static int 1677 hdac_resume(device_t dev) 1678 { 1679 struct hdac_softc *sc = device_get_softc(dev); 1680 int error; 1681 1682 HDA_BOOTHVERBOSE( 1683 device_printf(dev, "Resume...\n"); 1684 ); 1685 hdac_lock(sc); 1686 1687 /* Quiesce everything */ 1688 HDA_BOOTHVERBOSE( 1689 device_printf(dev, "Reset controller...\n"); 1690 ); 1691 hdac_reset(sc, true); 1692 1693 /* Initialize the CORB and RIRB */ 1694 hdac_corb_init(sc); 1695 hdac_rirb_init(sc); 1696 1697 HDA_BOOTHVERBOSE( 1698 device_printf(dev, "Starting CORB Engine...\n"); 1699 ); 1700 hdac_corb_start(sc); 1701 HDA_BOOTHVERBOSE( 1702 device_printf(dev, "Starting RIRB Engine...\n"); 1703 ); 1704 hdac_rirb_start(sc); 1705 1706 /* 1707 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1708 * (status change) events. The documentation says that we should 1709 * not make any assumptions about the state of this register and 1710 * set it explicitly. 1711 * Also, clear HDAC_STATESTS. 1712 * NB: this needs to be done before the interrupt is enabled as 1713 * the handler does not expect this interrupt source. 1714 */ 1715 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1716 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK); 1717 1718 HDA_BOOTHVERBOSE( 1719 device_printf(dev, "Enabling controller interrupt...\n"); 1720 ); 1721 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1722 HDAC_GCTL_UNSOL); 1723 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1724 DELAY(1000); 1725 hdac_poll_reinit(sc); 1726 hdac_unlock(sc); 1727 1728 error = bus_generic_resume(dev); 1729 HDA_BOOTHVERBOSE( 1730 device_printf(dev, "Resume done\n"); 1731 ); 1732 return (error); 1733 } 1734 1735 /**************************************************************************** 1736 * int hdac_detach(device_t) 1737 * 1738 * Detach and free up resources utilized by the hdac device. 1739 ****************************************************************************/ 1740 static int 1741 hdac_detach(device_t dev) 1742 { 1743 struct hdac_softc *sc = device_get_softc(dev); 1744 device_t *devlist; 1745 int cad, i, devcount, error; 1746 1747 if ((error = device_get_children(dev, &devlist, &devcount)) != 0) 1748 return (error); 1749 for (i = 0; i < devcount; i++) { 1750 cad = (intptr_t)device_get_ivars(devlist[i]); 1751 if ((error = device_delete_child(dev, devlist[i])) != 0) { 1752 free(devlist, M_TEMP); 1753 return (error); 1754 } 1755 sc->codecs[cad].dev = NULL; 1756 } 1757 free(devlist, M_TEMP); 1758 1759 hdac_lock(sc); 1760 hdac_reset(sc, false); 1761 hdac_unlock(sc); 1762 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1763 hdac_irq_free(sc); 1764 1765 for (i = 0; i < sc->num_ss; i++) 1766 hdac_dma_free(sc, &sc->streams[i].bdl); 1767 free(sc->streams, M_HDAC); 1768 hdac_dma_free(sc, &sc->pos_dma); 1769 hdac_dma_free(sc, &sc->rirb_dma); 1770 hdac_dma_free(sc, &sc->corb_dma); 1771 if (sc->chan_dmat != NULL) { 1772 bus_dma_tag_destroy(sc->chan_dmat); 1773 sc->chan_dmat = NULL; 1774 } 1775 hdac_mem_free(sc); 1776 snd_mtxfree(sc->lock); 1777 return (0); 1778 } 1779 1780 static bus_dma_tag_t 1781 hdac_get_dma_tag(device_t dev, device_t child) 1782 { 1783 struct hdac_softc *sc = device_get_softc(dev); 1784 1785 return (sc->chan_dmat); 1786 } 1787 1788 static int 1789 hdac_print_child(device_t dev, device_t child) 1790 { 1791 int retval; 1792 1793 retval = bus_print_child_header(dev, child); 1794 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); 1795 retval += bus_print_child_footer(dev, child); 1796 1797 return (retval); 1798 } 1799 1800 static int 1801 hdac_child_location(device_t dev, device_t child, struct sbuf *sb) 1802 { 1803 1804 sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child)); 1805 return (0); 1806 } 1807 1808 static int 1809 hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) 1810 { 1811 struct hdac_softc *sc = device_get_softc(dev); 1812 nid_t cad = (uintptr_t)device_get_ivars(child); 1813 1814 sbuf_printf(sb, 1815 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", 1816 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1817 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1818 return (0); 1819 } 1820 1821 static int 1822 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1823 { 1824 struct hdac_softc *sc = device_get_softc(dev); 1825 nid_t cad = (uintptr_t)device_get_ivars(child); 1826 1827 switch (which) { 1828 case HDA_IVAR_CODEC_ID: 1829 *result = cad; 1830 break; 1831 case HDA_IVAR_VENDOR_ID: 1832 *result = sc->codecs[cad].vendor_id; 1833 break; 1834 case HDA_IVAR_DEVICE_ID: 1835 *result = sc->codecs[cad].device_id; 1836 break; 1837 case HDA_IVAR_REVISION_ID: 1838 *result = sc->codecs[cad].revision_id; 1839 break; 1840 case HDA_IVAR_STEPPING_ID: 1841 *result = sc->codecs[cad].stepping_id; 1842 break; 1843 case HDA_IVAR_SUBVENDOR_ID: 1844 *result = pci_get_subvendor(dev); 1845 break; 1846 case HDA_IVAR_SUBDEVICE_ID: 1847 *result = pci_get_subdevice(dev); 1848 break; 1849 case HDA_IVAR_DMA_NOCACHE: 1850 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1851 break; 1852 case HDA_IVAR_STRIPES_MASK: 1853 *result = (1 << (1 << sc->num_sdo)) - 1; 1854 break; 1855 default: 1856 return (ENOENT); 1857 } 1858 return (0); 1859 } 1860 1861 static struct mtx * 1862 hdac_get_mtx(device_t dev, device_t child) 1863 { 1864 struct hdac_softc *sc = device_get_softc(dev); 1865 1866 return (sc->lock); 1867 } 1868 1869 static uint32_t 1870 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1871 { 1872 1873 return (hdac_send_command(device_get_softc(dev), 1874 (intptr_t)device_get_ivars(child), verb)); 1875 } 1876 1877 static int 1878 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1879 { 1880 int i, ss; 1881 1882 ss = -1; 1883 /* Allocate ISS/OSS first. */ 1884 if (dir == 0) { 1885 for (i = 0; i < sc->num_iss; i++) { 1886 if (sc->streams[i].stream == stream) { 1887 ss = i; 1888 break; 1889 } 1890 } 1891 } else { 1892 for (i = 0; i < sc->num_oss; i++) { 1893 if (sc->streams[i + sc->num_iss].stream == stream) { 1894 ss = i + sc->num_iss; 1895 break; 1896 } 1897 } 1898 } 1899 /* Fallback to BSS. */ 1900 if (ss == -1) { 1901 for (i = 0; i < sc->num_bss; i++) { 1902 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1903 == stream) { 1904 ss = i + sc->num_iss + sc->num_oss; 1905 break; 1906 } 1907 } 1908 } 1909 return (ss); 1910 } 1911 1912 static int 1913 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1914 uint32_t **dmapos) 1915 { 1916 struct hdac_softc *sc = device_get_softc(dev); 1917 nid_t cad = (uintptr_t)device_get_ivars(child); 1918 int stream, ss, bw, maxbw, prevbw; 1919 1920 /* Look for empty stream. */ 1921 ss = hdac_find_stream(sc, dir, 0); 1922 1923 /* Return if found nothing. */ 1924 if (ss < 0) 1925 return (0); 1926 1927 /* Check bus bandwidth. */ 1928 bw = hdac_bdata_rate(format, dir); 1929 if (dir == 1) { 1930 bw *= 1 << (sc->num_sdo - stripe); 1931 prevbw = sc->sdo_bw_used; 1932 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1933 } else { 1934 prevbw = sc->codecs[cad].sdi_bw_used; 1935 maxbw = 48000 * 464; 1936 } 1937 HDA_BOOTHVERBOSE( 1938 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1939 (bw + prevbw) / 1000, maxbw / 1000, 1940 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1941 ); 1942 if (bw + prevbw > maxbw) 1943 return (0); 1944 if (dir == 1) 1945 sc->sdo_bw_used += bw; 1946 else 1947 sc->codecs[cad].sdi_bw_used += bw; 1948 1949 /* Allocate stream number */ 1950 if (ss >= sc->num_iss + sc->num_oss) 1951 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1952 else if (ss >= sc->num_iss) 1953 stream = ss - sc->num_iss + 1; 1954 else 1955 stream = ss + 1; 1956 1957 sc->streams[ss].dev = child; 1958 sc->streams[ss].dir = dir; 1959 sc->streams[ss].stream = stream; 1960 sc->streams[ss].bw = bw; 1961 sc->streams[ss].format = format; 1962 sc->streams[ss].stripe = stripe; 1963 if (dmapos != NULL) { 1964 if (sc->pos_dma.dma_vaddr != NULL) 1965 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1966 else 1967 *dmapos = NULL; 1968 } 1969 return (stream); 1970 } 1971 1972 static void 1973 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 1974 { 1975 struct hdac_softc *sc = device_get_softc(dev); 1976 nid_t cad = (uintptr_t)device_get_ivars(child); 1977 int ss; 1978 1979 ss = hdac_find_stream(sc, dir, stream); 1980 KASSERT(ss >= 0, 1981 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 1982 if (dir == 1) 1983 sc->sdo_bw_used -= sc->streams[ss].bw; 1984 else 1985 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 1986 sc->streams[ss].stream = 0; 1987 sc->streams[ss].dev = NULL; 1988 } 1989 1990 static int 1991 hdac_stream_start(device_t dev, device_t child, int dir, int stream, 1992 bus_addr_t buf, int blksz, int blkcnt) 1993 { 1994 struct hdac_softc *sc = device_get_softc(dev); 1995 struct hdac_bdle *bdle; 1996 uint64_t addr; 1997 int i, ss, off; 1998 uint32_t ctl; 1999 2000 ss = hdac_find_stream(sc, dir, stream); 2001 KASSERT(ss >= 0, 2002 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 2003 2004 addr = (uint64_t)buf; 2005 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 2006 for (i = 0; i < blkcnt; i++, bdle++) { 2007 bdle->addrl = htole32((uint32_t)addr); 2008 bdle->addrh = htole32((uint32_t)(addr >> 32)); 2009 bdle->len = htole32(blksz); 2010 bdle->ioc = htole32(1); 2011 addr += blksz; 2012 } 2013 2014 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2015 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 2016 2017 off = ss << 5; 2018 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 2019 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 2020 addr = sc->streams[ss].bdl.dma_paddr; 2021 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 2022 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 2023 2024 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 2025 if (dir) 2026 ctl |= HDAC_SDCTL2_DIR; 2027 else 2028 ctl &= ~HDAC_SDCTL2_DIR; 2029 ctl &= ~HDAC_SDCTL2_STRM_MASK; 2030 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 2031 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 2032 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 2033 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 2034 2035 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 2036 2037 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2038 ctl |= 1 << ss; 2039 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2040 2041 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 2042 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 2043 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2044 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2045 HDAC_SDCTL_RUN; 2046 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2047 2048 sc->streams[ss].blksz = blksz; 2049 sc->streams[ss].running = 1; 2050 hdac_poll_reinit(sc); 2051 return (0); 2052 } 2053 2054 static void 2055 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 2056 { 2057 struct hdac_softc *sc = device_get_softc(dev); 2058 int ss, off; 2059 uint32_t ctl; 2060 2061 ss = hdac_find_stream(sc, dir, stream); 2062 KASSERT(ss >= 0, 2063 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 2064 2065 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2066 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 2067 2068 off = ss << 5; 2069 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2070 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2071 HDAC_SDCTL_RUN); 2072 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2073 2074 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2075 ctl &= ~(1 << ss); 2076 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2077 2078 sc->streams[ss].running = 0; 2079 hdac_poll_reinit(sc); 2080 } 2081 2082 static void 2083 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2084 { 2085 struct hdac_softc *sc = device_get_softc(dev); 2086 int timeout = 1000; 2087 int to = timeout; 2088 int ss, off; 2089 uint32_t ctl; 2090 2091 ss = hdac_find_stream(sc, dir, stream); 2092 KASSERT(ss >= 0, 2093 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2094 2095 off = ss << 5; 2096 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2097 ctl |= HDAC_SDCTL_SRST; 2098 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2099 do { 2100 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2101 if (ctl & HDAC_SDCTL_SRST) 2102 break; 2103 DELAY(10); 2104 } while (--to); 2105 if (!(ctl & HDAC_SDCTL_SRST)) 2106 device_printf(dev, "Reset setting timeout\n"); 2107 ctl &= ~HDAC_SDCTL_SRST; 2108 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2109 to = timeout; 2110 do { 2111 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2112 if (!(ctl & HDAC_SDCTL_SRST)) 2113 break; 2114 DELAY(10); 2115 } while (--to); 2116 if (ctl & HDAC_SDCTL_SRST) 2117 device_printf(dev, "Reset timeout!\n"); 2118 } 2119 2120 static uint32_t 2121 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2122 { 2123 struct hdac_softc *sc = device_get_softc(dev); 2124 int ss, off; 2125 2126 ss = hdac_find_stream(sc, dir, stream); 2127 KASSERT(ss >= 0, 2128 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2129 2130 off = ss << 5; 2131 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2132 } 2133 2134 static int 2135 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2136 { 2137 struct hdac_softc *sc = device_get_softc(dev); 2138 2139 sc->unsol_registered++; 2140 hdac_poll_reinit(sc); 2141 return (tag); 2142 } 2143 2144 static void 2145 hdac_unsol_free(device_t dev, device_t child, int tag) 2146 { 2147 struct hdac_softc *sc = device_get_softc(dev); 2148 2149 sc->unsol_registered--; 2150 hdac_poll_reinit(sc); 2151 } 2152 2153 static device_method_t hdac_methods[] = { 2154 /* device interface */ 2155 DEVMETHOD(device_probe, hdac_probe), 2156 DEVMETHOD(device_attach, hdac_attach), 2157 DEVMETHOD(device_detach, hdac_detach), 2158 DEVMETHOD(device_suspend, hdac_suspend), 2159 DEVMETHOD(device_resume, hdac_resume), 2160 /* Bus interface */ 2161 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2162 DEVMETHOD(bus_print_child, hdac_print_child), 2163 DEVMETHOD(bus_child_location, hdac_child_location), 2164 DEVMETHOD(bus_child_pnpinfo, hdac_child_pnpinfo_method), 2165 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2166 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2167 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2168 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2169 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2170 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2171 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2172 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2173 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2174 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2175 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2176 DEVMETHOD_END 2177 }; 2178 2179 static driver_t hdac_driver = { 2180 "hdac", 2181 hdac_methods, 2182 sizeof(struct hdac_softc), 2183 }; 2184 2185 DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL); 2186