1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca> 5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org> 6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Intel High Definition Audio (Controller) driver for FreeBSD. 33 */ 34 35 #ifdef HAVE_KERNEL_OPTION_HEADERS 36 #include "opt_snd.h" 37 #endif 38 39 #include <dev/sound/pcm/sound.h> 40 #include <dev/pci/pcireg.h> 41 #include <dev/pci/pcivar.h> 42 43 #include <sys/ctype.h> 44 #include <sys/endian.h> 45 #include <sys/taskqueue.h> 46 47 #include <dev/sound/pci/hda/hdac_private.h> 48 #include <dev/sound/pci/hda/hdac_reg.h> 49 #include <dev/sound/pci/hda/hda_reg.h> 50 #include <dev/sound/pci/hda/hdac.h> 51 52 #define HDA_DRV_TEST_REV "20120126_0002" 53 54 #define hdac_lock(sc) mtx_lock(&(sc)->lock) 55 #define hdac_unlock(sc) mtx_unlock(&(sc)->lock) 56 #define hdac_lockassert(sc) mtx_assert(&(sc)->lock, MA_OWNED) 57 58 #define HDAC_QUIRK_64BIT (1 << 0) 59 #define HDAC_QUIRK_DMAPOS (1 << 1) 60 #define HDAC_QUIRK_MSI (1 << 2) 61 62 static const struct { 63 const char *key; 64 uint32_t value; 65 } hdac_quirks_tab[] = { 66 { "64bit", HDAC_QUIRK_64BIT }, 67 { "dmapos", HDAC_QUIRK_DMAPOS }, 68 { "msi", HDAC_QUIRK_MSI }, 69 }; 70 71 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); 72 73 static const struct { 74 uint32_t model; 75 const char *desc; 76 char quirks_on; 77 char quirks_off; 78 } hdac_devices[] = { 79 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, 80 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, 81 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, 82 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, 83 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, 84 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, 85 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, 86 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, 87 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, 88 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, 89 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, 90 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, 91 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, 92 { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, 93 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, 94 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, 95 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, 96 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, 97 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, 98 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, 99 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, 100 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, 101 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, 102 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, 103 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, 104 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, 105 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, 106 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, 107 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, 108 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, 109 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, 110 { HDA_INTEL_TGLKH, "Intel Tiger Lake-H", 0, 0 }, 111 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, 112 { HDA_INTEL_ALLK, "Intel Alder Lake", 0, 0 }, 113 { HDA_INTEL_ALLKM, "Intel Alder Lake-M", 0, 0 }, 114 { HDA_INTEL_ALLKN, "Intel Alder Lake-N", 0, 0 }, 115 { HDA_INTEL_ALLKP1, "Intel Alder Lake-P", 0, 0 }, 116 { HDA_INTEL_ALLKP2, "Intel Alder Lake-P", 0, 0 }, 117 { HDA_INTEL_ALLKPS, "Intel Alder Lake-PS", 0, 0 }, 118 { HDA_INTEL_RPTLK1, "Intel Raptor Lake-P", 0, 0 }, 119 { HDA_INTEL_RPTLK2, "Intel Raptor Lake-P", 0, 0 }, 120 { HDA_INTEL_RPTLK3, "Intel Raptor Lake-S", 0, 0 }, 121 { HDA_INTEL_MTL, "Intel Meteor Lake-P", 0, 0 }, 122 { HDA_INTEL_ARLS, "Intel Arrow Lake-S", 0, 0 }, 123 { HDA_INTEL_ARL, "Intel Arrow Lake", 0, 0 }, 124 { HDA_INTEL_LNLP, "Intel Lunar Lake-P", 0, 0 }, 125 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, 126 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, 127 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, 128 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, 129 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, 130 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, 131 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, 132 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, 133 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, 134 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, 135 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, 136 { HDA_INTEL_ELLK2, "Intel Elkhart Lake", 0, 0 }, 137 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, 138 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, 139 { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, 140 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, 141 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, 142 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, 143 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, 144 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, 145 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, 146 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, 147 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, 148 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, 149 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, 150 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 151 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 152 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 153 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, 154 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, 155 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, 156 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, 157 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, 158 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, 159 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, 160 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, 161 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, 162 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, 163 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, 164 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, 165 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, 166 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, 167 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, 168 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, 169 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, 170 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, 171 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 172 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, 173 { HDA_ATI_RAVEN, "ATI Raven", 0, 0 }, 174 { HDA_ATI_SB450, "ATI SB450", 0, 0 }, 175 { HDA_ATI_SB600, "ATI SB600", 0, 0 }, 176 { HDA_ATI_RS600, "ATI RS600", 0, 0 }, 177 { HDA_ATI_RS690, "ATI RS690", 0, 0 }, 178 { HDA_ATI_RS780, "ATI RS780", 0, 0 }, 179 { HDA_ATI_RS880, "ATI RS880", 0, 0 }, 180 { HDA_ATI_R600, "ATI R600", 0, 0 }, 181 { HDA_ATI_RV610, "ATI RV610", 0, 0 }, 182 { HDA_ATI_RV620, "ATI RV620", 0, 0 }, 183 { HDA_ATI_RV630, "ATI RV630", 0, 0 }, 184 { HDA_ATI_RV635, "ATI RV635", 0, 0 }, 185 { HDA_ATI_RV710, "ATI RV710", 0, 0 }, 186 { HDA_ATI_RV730, "ATI RV730", 0, 0 }, 187 { HDA_ATI_RV740, "ATI RV740", 0, 0 }, 188 { HDA_ATI_RV770, "ATI RV770", 0, 0 }, 189 { HDA_ATI_RV810, "ATI RV810", 0, 0 }, 190 { HDA_ATI_RV830, "ATI RV830", 0, 0 }, 191 { HDA_ATI_RV840, "ATI RV840", 0, 0 }, 192 { HDA_ATI_RV870, "ATI RV870", 0, 0 }, 193 { HDA_ATI_RV910, "ATI RV910", 0, 0 }, 194 { HDA_ATI_RV930, "ATI RV930", 0, 0 }, 195 { HDA_ATI_RV940, "ATI RV940", 0, 0 }, 196 { HDA_ATI_RV970, "ATI RV970", 0, 0 }, 197 { HDA_ATI_R1000, "ATI R1000", 0, 0 }, 198 { HDA_ATI_OLAND, "ATI Oland", 0, 0 }, 199 { HDA_ATI_KABINI, "ATI Kabini", 0, 0 }, 200 { HDA_ATI_TRINITY, "ATI Trinity", 0, 0 }, 201 { HDA_AMD_X370, "AMD X370", 0, 0 }, 202 { HDA_AMD_X570, "AMD X570", 0, 0 }, 203 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, 204 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, 205 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, 206 { HDA_RDC_M3010, "RDC M3010", 0, 0 }, 207 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, 208 { HDA_VMWARE, "VMware", 0, 0 }, 209 { HDA_SIS_966, "SiS 966/968", 0, 0 }, 210 { HDA_ULI_M5461, "ULI M5461", 0, 0 }, 211 { HDA_CREATIVE_SB1570, "Creative SB Audigy FX", 0, HDAC_QUIRK_64BIT }, 212 /* Unknown */ 213 { HDA_INTEL_ALL, "Intel", 0, 0 }, 214 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, 215 { HDA_ATI_ALL, "ATI", 0, 0 }, 216 { HDA_AMD_ALL, "AMD", 0, 0 }, 217 { HDA_CREATIVE_ALL, "Creative", 0, 0 }, 218 { HDA_VIA_ALL, "VIA", 0, 0 }, 219 { HDA_VMWARE_ALL, "VMware", 0, 0 }, 220 { HDA_SIS_ALL, "SiS", 0, 0 }, 221 { HDA_ULI_ALL, "ULI", 0, 0 }, 222 }; 223 224 static const struct { 225 uint16_t vendor; 226 uint8_t reg; 227 uint8_t mask; 228 uint8_t enable; 229 } hdac_pcie_snoop[] = { 230 { INTEL_VENDORID, 0x00, 0x00, 0x00 }, 231 { ATI_VENDORID, 0x42, 0xf8, 0x02 }, 232 { AMD_VENDORID, 0x42, 0xf8, 0x02 }, 233 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, 234 }; 235 236 /**************************************************************************** 237 * Function prototypes 238 ****************************************************************************/ 239 static void hdac_intr_handler(void *); 240 static int hdac_reset(struct hdac_softc *, bool); 241 static int hdac_get_capabilities(struct hdac_softc *); 242 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); 243 static int hdac_dma_alloc(struct hdac_softc *, 244 struct hdac_dma *, bus_size_t); 245 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); 246 static int hdac_mem_alloc(struct hdac_softc *); 247 static void hdac_mem_free(struct hdac_softc *); 248 static int hdac_irq_alloc(struct hdac_softc *); 249 static void hdac_irq_free(struct hdac_softc *); 250 static void hdac_corb_init(struct hdac_softc *); 251 static void hdac_rirb_init(struct hdac_softc *); 252 static void hdac_corb_start(struct hdac_softc *); 253 static void hdac_rirb_start(struct hdac_softc *); 254 255 static void hdac_attach2(void *); 256 257 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); 258 259 static int hdac_probe(device_t); 260 static int hdac_attach(device_t); 261 static int hdac_detach(device_t); 262 static int hdac_suspend(device_t); 263 static int hdac_resume(device_t); 264 265 static int hdac_rirb_flush(struct hdac_softc *sc); 266 static int hdac_unsolq_flush(struct hdac_softc *sc); 267 268 /* This function surely going to make its way into upper level someday. */ 269 static void 270 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) 271 { 272 const char *res = NULL; 273 int i = 0, j, k, len, inv; 274 275 if (resource_string_value(device_get_name(sc->dev), 276 device_get_unit(sc->dev), "config", &res) != 0) 277 return; 278 if (!(res != NULL && strlen(res) > 0)) 279 return; 280 HDA_BOOTVERBOSE( 281 device_printf(sc->dev, "Config options:"); 282 ); 283 for (;;) { 284 while (res[i] != '\0' && 285 (res[i] == ',' || isspace(res[i]) != 0)) 286 i++; 287 if (res[i] == '\0') { 288 HDA_BOOTVERBOSE( 289 printf("\n"); 290 ); 291 return; 292 } 293 j = i; 294 while (res[j] != '\0' && 295 !(res[j] == ',' || isspace(res[j]) != 0)) 296 j++; 297 len = j - i; 298 if (len > 2 && strncmp(res + i, "no", 2) == 0) 299 inv = 2; 300 else 301 inv = 0; 302 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { 303 if (strncmp(res + i + inv, 304 hdac_quirks_tab[k].key, len - inv) != 0) 305 continue; 306 if (len - inv != strlen(hdac_quirks_tab[k].key)) 307 continue; 308 HDA_BOOTVERBOSE( 309 printf(" %s%s", (inv != 0) ? "no" : "", 310 hdac_quirks_tab[k].key); 311 ); 312 if (inv == 0) { 313 *on |= hdac_quirks_tab[k].value; 314 *off &= ~hdac_quirks_tab[k].value; 315 } else if (inv != 0) { 316 *off |= hdac_quirks_tab[k].value; 317 *on &= ~hdac_quirks_tab[k].value; 318 } 319 break; 320 } 321 i = j; 322 } 323 } 324 325 static void 326 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) 327 { 328 device_t dev; 329 uint8_t rirbsts; 330 int i; 331 332 /* Was this a controller interrupt? */ 333 if (intsts & HDAC_INTSTS_CIS) { 334 /* 335 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then 336 * we will need to check and clear HDAC_STATESTS. 337 * That event is used to report codec status changes such as 338 * a reset or a wake-up event. 339 */ 340 /* 341 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we 342 * will need to check and clear HDAC_CORBSTS_CMEI in 343 * HDAC_CORBSTS. 344 * That event is used to report CORB memory errors. 345 */ 346 /* 347 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we 348 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in 349 * HDAC_RIRBSTS. 350 * That event is used to report response FIFO overruns. 351 */ 352 353 /* Get as many responses that we can */ 354 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 355 while (rirbsts & HDAC_RIRBSTS_RINTFL) { 356 HDAC_WRITE_1(&sc->mem, 357 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); 358 hdac_rirb_flush(sc); 359 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); 360 } 361 if (sc->unsolq_rp != sc->unsolq_wp) 362 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 363 } 364 365 if (intsts & HDAC_INTSTS_SIS_MASK) { 366 for (i = 0; i < sc->num_ss; i++) { 367 if ((intsts & (1 << i)) == 0) 368 continue; 369 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, 370 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 371 if ((dev = sc->streams[i].dev) != NULL) { 372 HDAC_STREAM_INTR(dev, 373 sc->streams[i].dir, sc->streams[i].stream); 374 } 375 } 376 } 377 } 378 379 /**************************************************************************** 380 * void hdac_intr_handler(void *) 381 * 382 * Interrupt handler. Processes interrupts received from the hdac. 383 ****************************************************************************/ 384 static void 385 hdac_intr_handler(void *context) 386 { 387 struct hdac_softc *sc; 388 uint32_t intsts; 389 390 sc = (struct hdac_softc *)context; 391 392 /* 393 * Loop until HDAC_INTSTS_GIS gets clear. 394 * It is plausible that hardware interrupts a host only when GIS goes 395 * from zero to one. GIS is formed by OR-ing multiple hardware 396 * statuses, so it's possible that a previously cleared status gets set 397 * again while another status has not been cleared yet. Thus, there 398 * will be no new interrupt as GIS always stayed set. If we don't 399 * re-examine GIS then we can leave it set and never get an interrupt 400 * again. 401 */ 402 hdac_lock(sc); 403 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 404 while (intsts != 0xffffffff && (intsts & HDAC_INTSTS_GIS) != 0) { 405 hdac_one_intr(sc, intsts); 406 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); 407 } 408 hdac_unlock(sc); 409 } 410 411 static void 412 hdac_poll_callback(void *arg) 413 { 414 struct hdac_softc *sc = arg; 415 416 if (sc == NULL) 417 return; 418 419 hdac_lock(sc); 420 if (sc->polling == 0) { 421 hdac_unlock(sc); 422 return; 423 } 424 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); 425 hdac_unlock(sc); 426 427 hdac_intr_handler(sc); 428 } 429 430 /**************************************************************************** 431 * int hdac_reset(hdac_softc *, bool) 432 * 433 * Reset the hdac to a quiescent and known state. 434 ****************************************************************************/ 435 static int 436 hdac_reset(struct hdac_softc *sc, bool wakeup) 437 { 438 uint32_t gctl; 439 int count, i; 440 441 /* 442 * Stop all Streams DMA engine 443 */ 444 for (i = 0; i < sc->num_iss; i++) 445 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); 446 for (i = 0; i < sc->num_oss; i++) 447 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); 448 for (i = 0; i < sc->num_bss; i++) 449 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); 450 451 /* 452 * Stop Control DMA engines. 453 */ 454 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); 455 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); 456 457 /* 458 * Reset DMA position buffer. 459 */ 460 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); 461 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); 462 463 /* 464 * Reset the controller. The reset must remain asserted for 465 * a minimum of 100us. 466 */ 467 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 468 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); 469 count = 10000; 470 do { 471 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 472 if (!(gctl & HDAC_GCTL_CRST)) 473 break; 474 DELAY(10); 475 } while (--count); 476 if (gctl & HDAC_GCTL_CRST) { 477 device_printf(sc->dev, "Unable to put hdac in reset\n"); 478 return (ENXIO); 479 } 480 481 /* If wakeup is not requested - leave the controller in reset state. */ 482 if (!wakeup) 483 return (0); 484 485 DELAY(100); 486 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 487 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); 488 count = 10000; 489 do { 490 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); 491 if (gctl & HDAC_GCTL_CRST) 492 break; 493 DELAY(10); 494 } while (--count); 495 if (!(gctl & HDAC_GCTL_CRST)) { 496 device_printf(sc->dev, "Device stuck in reset\n"); 497 return (ENXIO); 498 } 499 500 /* 501 * Wait for codecs to finish their own reset sequence. The delay here 502 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). 503 */ 504 DELAY(1000); 505 506 return (0); 507 } 508 509 /**************************************************************************** 510 * int hdac_get_capabilities(struct hdac_softc *); 511 * 512 * Retreive the general capabilities of the hdac; 513 * Number of Input Streams 514 * Number of Output Streams 515 * Number of bidirectional Streams 516 * 64bit ready 517 * CORB and RIRB sizes 518 ****************************************************************************/ 519 static int 520 hdac_get_capabilities(struct hdac_softc *sc) 521 { 522 uint16_t gcap; 523 uint8_t corbsize, rirbsize; 524 525 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); 526 sc->num_iss = HDAC_GCAP_ISS(gcap); 527 sc->num_oss = HDAC_GCAP_OSS(gcap); 528 sc->num_bss = HDAC_GCAP_BSS(gcap); 529 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; 530 sc->num_sdo = HDAC_GCAP_NSDO(gcap); 531 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; 532 if (sc->quirks_on & HDAC_QUIRK_64BIT) 533 sc->support_64bit = 1; 534 else if (sc->quirks_off & HDAC_QUIRK_64BIT) 535 sc->support_64bit = 0; 536 537 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); 538 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == 539 HDAC_CORBSIZE_CORBSZCAP_256) 540 sc->corb_size = 256; 541 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == 542 HDAC_CORBSIZE_CORBSZCAP_16) 543 sc->corb_size = 16; 544 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == 545 HDAC_CORBSIZE_CORBSZCAP_2) 546 sc->corb_size = 2; 547 else { 548 device_printf(sc->dev, "%s: Invalid corb size (%x)\n", 549 __func__, corbsize); 550 return (ENXIO); 551 } 552 553 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); 554 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == 555 HDAC_RIRBSIZE_RIRBSZCAP_256) 556 sc->rirb_size = 256; 557 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == 558 HDAC_RIRBSIZE_RIRBSZCAP_16) 559 sc->rirb_size = 16; 560 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == 561 HDAC_RIRBSIZE_RIRBSZCAP_2) 562 sc->rirb_size = 2; 563 else { 564 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", 565 __func__, rirbsize); 566 return (ENXIO); 567 } 568 569 HDA_BOOTVERBOSE( 570 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " 571 "NSDO %d%s, CORB %d, RIRB %d\n", 572 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, 573 sc->support_64bit ? ", 64bit" : "", 574 sc->corb_size, sc->rirb_size); 575 ); 576 577 return (0); 578 } 579 580 /**************************************************************************** 581 * void hdac_dma_cb 582 * 583 * This function is called by bus_dmamap_load when the mapping has been 584 * established. We just record the physical address of the mapping into 585 * the struct hdac_dma passed in. 586 ****************************************************************************/ 587 static void 588 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) 589 { 590 struct hdac_dma *dma; 591 592 if (error == 0) { 593 dma = (struct hdac_dma *)callback_arg; 594 dma->dma_paddr = segs[0].ds_addr; 595 } 596 } 597 598 /**************************************************************************** 599 * int hdac_dma_alloc 600 * 601 * This function allocate and setup a dma region (struct hdac_dma). 602 * It must be freed by a corresponding hdac_dma_free. 603 ****************************************************************************/ 604 static int 605 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) 606 { 607 bus_size_t roundsz; 608 int result; 609 610 roundsz = roundup2(size, HDA_DMA_ALIGNMENT); 611 bzero(dma, sizeof(*dma)); 612 613 /* 614 * Create a DMA tag 615 */ 616 result = bus_dma_tag_create( 617 bus_get_dma_tag(sc->dev), /* parent */ 618 HDA_DMA_ALIGNMENT, /* alignment */ 619 0, /* boundary */ 620 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 621 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 622 BUS_SPACE_MAXADDR, /* highaddr */ 623 NULL, /* filtfunc */ 624 NULL, /* fistfuncarg */ 625 roundsz, /* maxsize */ 626 1, /* nsegments */ 627 roundsz, /* maxsegsz */ 628 0, /* flags */ 629 NULL, /* lockfunc */ 630 NULL, /* lockfuncarg */ 631 &dma->dma_tag); /* dmat */ 632 if (result != 0) { 633 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", 634 __func__, result); 635 goto hdac_dma_alloc_fail; 636 } 637 638 /* 639 * Allocate DMA memory 640 */ 641 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 642 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 643 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : 644 BUS_DMA_COHERENT), 645 &dma->dma_map); 646 if (result != 0) { 647 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", 648 __func__, result); 649 goto hdac_dma_alloc_fail; 650 } 651 652 dma->dma_size = roundsz; 653 654 /* 655 * Map the memory 656 */ 657 result = bus_dmamap_load(dma->dma_tag, dma->dma_map, 658 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); 659 if (result != 0 || dma->dma_paddr == 0) { 660 if (result == 0) 661 result = ENOMEM; 662 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", 663 __func__, result); 664 goto hdac_dma_alloc_fail; 665 } 666 667 HDA_BOOTHVERBOSE( 668 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", 669 __func__, (uintmax_t)size, (uintmax_t)roundsz); 670 ); 671 672 return (0); 673 674 hdac_dma_alloc_fail: 675 hdac_dma_free(sc, dma); 676 677 return (result); 678 } 679 680 /**************************************************************************** 681 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) 682 * 683 * Free a struct hdac_dma that has been previously allocated via the 684 * hdac_dma_alloc function. 685 ****************************************************************************/ 686 static void 687 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) 688 { 689 if (dma->dma_paddr != 0) { 690 /* Flush caches */ 691 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 692 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 693 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 694 dma->dma_paddr = 0; 695 } 696 if (dma->dma_vaddr != NULL) { 697 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 698 dma->dma_vaddr = NULL; 699 } 700 if (dma->dma_tag != NULL) { 701 bus_dma_tag_destroy(dma->dma_tag); 702 dma->dma_tag = NULL; 703 } 704 dma->dma_size = 0; 705 } 706 707 /**************************************************************************** 708 * int hdac_mem_alloc(struct hdac_softc *) 709 * 710 * Allocate all the bus resources necessary to speak with the physical 711 * controller. 712 ****************************************************************************/ 713 static int 714 hdac_mem_alloc(struct hdac_softc *sc) 715 { 716 struct hdac_mem *mem; 717 718 mem = &sc->mem; 719 mem->mem_rid = PCIR_BAR(0); 720 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 721 &mem->mem_rid, RF_ACTIVE); 722 if (mem->mem_res == NULL) { 723 device_printf(sc->dev, 724 "%s: Unable to allocate memory resource\n", __func__); 725 return (ENOMEM); 726 } 727 mem->mem_tag = rman_get_bustag(mem->mem_res); 728 mem->mem_handle = rman_get_bushandle(mem->mem_res); 729 730 return (0); 731 } 732 733 /**************************************************************************** 734 * void hdac_mem_free(struct hdac_softc *) 735 * 736 * Free up resources previously allocated by hdac_mem_alloc. 737 ****************************************************************************/ 738 static void 739 hdac_mem_free(struct hdac_softc *sc) 740 { 741 struct hdac_mem *mem; 742 743 mem = &sc->mem; 744 if (mem->mem_res != NULL) 745 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, 746 mem->mem_res); 747 mem->mem_res = NULL; 748 } 749 750 /**************************************************************************** 751 * int hdac_irq_alloc(struct hdac_softc *) 752 * 753 * Allocate and setup the resources necessary for interrupt handling. 754 ****************************************************************************/ 755 static int 756 hdac_irq_alloc(struct hdac_softc *sc) 757 { 758 struct hdac_irq *irq; 759 int result; 760 761 irq = &sc->irq; 762 irq->irq_rid = 0x0; 763 764 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && 765 (result = pci_msi_count(sc->dev)) == 1 && 766 pci_alloc_msi(sc->dev, &result) == 0) 767 irq->irq_rid = 0x1; 768 769 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 770 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); 771 if (irq->irq_res == NULL) { 772 device_printf(sc->dev, "%s: Unable to allocate irq\n", 773 __func__); 774 goto hdac_irq_alloc_fail; 775 } 776 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, 777 NULL, hdac_intr_handler, sc, &irq->irq_handle); 778 if (result != 0) { 779 device_printf(sc->dev, 780 "%s: Unable to setup interrupt handler (%d)\n", 781 __func__, result); 782 goto hdac_irq_alloc_fail; 783 } 784 785 return (0); 786 787 hdac_irq_alloc_fail: 788 hdac_irq_free(sc); 789 790 return (ENXIO); 791 } 792 793 /**************************************************************************** 794 * void hdac_irq_free(struct hdac_softc *) 795 * 796 * Free up resources previously allocated by hdac_irq_alloc. 797 ****************************************************************************/ 798 static void 799 hdac_irq_free(struct hdac_softc *sc) 800 { 801 struct hdac_irq *irq; 802 803 irq = &sc->irq; 804 if (irq->irq_res != NULL && irq->irq_handle != NULL) 805 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); 806 if (irq->irq_res != NULL) 807 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, 808 irq->irq_res); 809 if (irq->irq_rid == 0x1) 810 pci_release_msi(sc->dev); 811 irq->irq_handle = NULL; 812 irq->irq_res = NULL; 813 irq->irq_rid = 0x0; 814 } 815 816 /**************************************************************************** 817 * void hdac_corb_init(struct hdac_softc *) 818 * 819 * Initialize the corb registers for operations but do not start it up yet. 820 * The CORB engine must not be running when this function is called. 821 ****************************************************************************/ 822 static void 823 hdac_corb_init(struct hdac_softc *sc) 824 { 825 uint8_t corbsize; 826 uint64_t corbpaddr; 827 828 /* Setup the CORB size. */ 829 switch (sc->corb_size) { 830 case 256: 831 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); 832 break; 833 case 16: 834 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); 835 break; 836 case 2: 837 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); 838 break; 839 default: 840 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); 841 } 842 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); 843 844 /* Setup the CORB Address in the hdac */ 845 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; 846 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); 847 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); 848 849 /* Set the WP and RP */ 850 sc->corb_wp = 0; 851 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 852 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); 853 /* 854 * The HDA specification indicates that the CORBRPRST bit will always 855 * read as zero. Unfortunately, it seems that at least the 82801G 856 * doesn't reset the bit to zero, which stalls the corb engine. 857 * manually reset the bit to zero before continuing. 858 */ 859 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); 860 861 /* Enable CORB error reporting */ 862 #if 0 863 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); 864 #endif 865 } 866 867 /**************************************************************************** 868 * void hdac_rirb_init(struct hdac_softc *) 869 * 870 * Initialize the rirb registers for operations but do not start it up yet. 871 * The RIRB engine must not be running when this function is called. 872 ****************************************************************************/ 873 static void 874 hdac_rirb_init(struct hdac_softc *sc) 875 { 876 uint8_t rirbsize; 877 uint64_t rirbpaddr; 878 879 /* Setup the RIRB size. */ 880 switch (sc->rirb_size) { 881 case 256: 882 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); 883 break; 884 case 16: 885 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); 886 break; 887 case 2: 888 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); 889 break; 890 default: 891 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); 892 } 893 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); 894 895 /* Setup the RIRB Address in the hdac */ 896 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; 897 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); 898 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); 899 900 /* Setup the WP and RP */ 901 sc->rirb_rp = 0; 902 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); 903 904 /* Setup the interrupt threshold */ 905 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); 906 907 /* Enable Overrun and response received reporting */ 908 #if 0 909 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 910 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); 911 #else 912 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); 913 #endif 914 915 /* 916 * Make sure that the Host CPU cache doesn't contain any dirty 917 * cache lines that falls in the rirb. If I understood correctly, it 918 * should be sufficient to do this only once as the rirb is purely 919 * read-only from now on. 920 */ 921 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 922 BUS_DMASYNC_PREREAD); 923 } 924 925 /**************************************************************************** 926 * void hdac_corb_start(hdac_softc *) 927 * 928 * Startup the corb DMA engine 929 ****************************************************************************/ 930 static void 931 hdac_corb_start(struct hdac_softc *sc) 932 { 933 uint32_t corbctl; 934 935 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); 936 corbctl |= HDAC_CORBCTL_CORBRUN; 937 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); 938 } 939 940 /**************************************************************************** 941 * void hdac_rirb_start(hdac_softc *) 942 * 943 * Startup the rirb DMA engine 944 ****************************************************************************/ 945 static void 946 hdac_rirb_start(struct hdac_softc *sc) 947 { 948 uint32_t rirbctl; 949 950 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); 951 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; 952 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); 953 } 954 955 static int 956 hdac_rirb_flush(struct hdac_softc *sc) 957 { 958 struct hdac_rirb *rirb_base, *rirb; 959 nid_t cad; 960 uint32_t resp, resp_ex; 961 uint8_t rirbwp; 962 int ret; 963 964 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; 965 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); 966 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 967 BUS_DMASYNC_POSTREAD); 968 969 ret = 0; 970 while (sc->rirb_rp != rirbwp) { 971 sc->rirb_rp++; 972 sc->rirb_rp %= sc->rirb_size; 973 rirb = &rirb_base[sc->rirb_rp]; 974 resp = le32toh(rirb->response); 975 resp_ex = le32toh(rirb->response_ex); 976 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); 977 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { 978 sc->unsolq[sc->unsolq_wp++] = resp; 979 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 980 sc->unsolq[sc->unsolq_wp++] = cad; 981 sc->unsolq_wp %= HDAC_UNSOLQ_MAX; 982 } else if (sc->codecs[cad].pending <= 0) { 983 device_printf(sc->dev, "Unexpected unsolicited " 984 "response from address %d: %08x\n", cad, resp); 985 } else { 986 sc->codecs[cad].response = resp; 987 sc->codecs[cad].pending--; 988 } 989 ret++; 990 } 991 992 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, 993 BUS_DMASYNC_PREREAD); 994 return (ret); 995 } 996 997 static int 998 hdac_unsolq_flush(struct hdac_softc *sc) 999 { 1000 device_t child; 1001 nid_t cad; 1002 uint32_t resp; 1003 int ret = 0; 1004 1005 if (sc->unsolq_st == HDAC_UNSOLQ_READY) { 1006 sc->unsolq_st = HDAC_UNSOLQ_BUSY; 1007 while (sc->unsolq_rp != sc->unsolq_wp) { 1008 resp = sc->unsolq[sc->unsolq_rp++]; 1009 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 1010 cad = sc->unsolq[sc->unsolq_rp++]; 1011 sc->unsolq_rp %= HDAC_UNSOLQ_MAX; 1012 if ((child = sc->codecs[cad].dev) != NULL && 1013 device_is_attached(child)) 1014 HDAC_UNSOL_INTR(child, resp); 1015 ret++; 1016 } 1017 sc->unsolq_st = HDAC_UNSOLQ_READY; 1018 } 1019 1020 return (ret); 1021 } 1022 1023 /**************************************************************************** 1024 * uint32_t hdac_send_command 1025 * 1026 * Wrapper function that sends only one command to a given codec 1027 ****************************************************************************/ 1028 static uint32_t 1029 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) 1030 { 1031 int timeout; 1032 uint32_t *corb; 1033 1034 hdac_lockassert(sc); 1035 verb &= ~HDA_CMD_CAD_MASK; 1036 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; 1037 sc->codecs[cad].response = HDA_INVALID; 1038 1039 sc->codecs[cad].pending++; 1040 sc->corb_wp++; 1041 sc->corb_wp %= sc->corb_size; 1042 corb = (uint32_t *)sc->corb_dma.dma_vaddr; 1043 bus_dmamap_sync(sc->corb_dma.dma_tag, 1044 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); 1045 corb[sc->corb_wp] = htole32(verb); 1046 bus_dmamap_sync(sc->corb_dma.dma_tag, 1047 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); 1048 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); 1049 1050 timeout = 10000; 1051 do { 1052 if (hdac_rirb_flush(sc) == 0) 1053 DELAY(10); 1054 } while (sc->codecs[cad].pending != 0 && --timeout); 1055 1056 if (sc->codecs[cad].pending != 0) { 1057 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", 1058 verb, cad); 1059 sc->codecs[cad].pending = 0; 1060 } 1061 1062 if (sc->unsolq_rp != sc->unsolq_wp) 1063 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); 1064 return (sc->codecs[cad].response); 1065 } 1066 1067 /**************************************************************************** 1068 * Device Methods 1069 ****************************************************************************/ 1070 1071 /**************************************************************************** 1072 * int hdac_probe(device_t) 1073 * 1074 * Probe for the presence of an hdac. If none is found, check for a generic 1075 * match using the subclass of the device. 1076 ****************************************************************************/ 1077 static int 1078 hdac_probe(device_t dev) 1079 { 1080 int i, result; 1081 uint32_t model; 1082 uint16_t class, subclass; 1083 char desc[64]; 1084 1085 model = (uint32_t)pci_get_device(dev) << 16; 1086 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1087 class = pci_get_class(dev); 1088 subclass = pci_get_subclass(dev); 1089 1090 bzero(desc, sizeof(desc)); 1091 result = ENXIO; 1092 for (i = 0; i < nitems(hdac_devices); i++) { 1093 if (hdac_devices[i].model == model) { 1094 strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); 1095 result = BUS_PROBE_DEFAULT; 1096 break; 1097 } 1098 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1099 class == PCIC_MULTIMEDIA && 1100 subclass == PCIS_MULTIMEDIA_HDA) { 1101 snprintf(desc, sizeof(desc), "%s (0x%04x)", 1102 hdac_devices[i].desc, pci_get_device(dev)); 1103 result = BUS_PROBE_GENERIC; 1104 break; 1105 } 1106 } 1107 if (result == ENXIO && class == PCIC_MULTIMEDIA && 1108 subclass == PCIS_MULTIMEDIA_HDA) { 1109 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); 1110 result = BUS_PROBE_GENERIC; 1111 } 1112 if (result != ENXIO) 1113 device_set_descf(dev, "%s HDA Controller", desc); 1114 1115 return (result); 1116 } 1117 1118 static void 1119 hdac_unsolq_task(void *context, int pending) 1120 { 1121 struct hdac_softc *sc; 1122 1123 sc = (struct hdac_softc *)context; 1124 1125 hdac_lock(sc); 1126 hdac_unsolq_flush(sc); 1127 hdac_unlock(sc); 1128 } 1129 1130 /**************************************************************************** 1131 * int hdac_attach(device_t) 1132 * 1133 * Attach the device into the kernel. Interrupts usually won't be enabled 1134 * when this function is called. Setup everything that doesn't require 1135 * interrupts and defer probing of codecs until interrupts are enabled. 1136 ****************************************************************************/ 1137 static int 1138 hdac_attach(device_t dev) 1139 { 1140 struct hdac_softc *sc; 1141 int result; 1142 int i, devid = -1; 1143 uint32_t model; 1144 uint16_t class, subclass; 1145 uint16_t vendor; 1146 uint8_t v; 1147 1148 sc = device_get_softc(dev); 1149 HDA_BOOTVERBOSE( 1150 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", 1151 pci_get_subvendor(dev), pci_get_subdevice(dev)); 1152 device_printf(dev, "HDA Driver Revision: %s\n", 1153 HDA_DRV_TEST_REV); 1154 ); 1155 1156 model = (uint32_t)pci_get_device(dev) << 16; 1157 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; 1158 class = pci_get_class(dev); 1159 subclass = pci_get_subclass(dev); 1160 1161 for (i = 0; i < nitems(hdac_devices); i++) { 1162 if (hdac_devices[i].model == model) { 1163 devid = i; 1164 break; 1165 } 1166 if (HDA_DEV_MATCH(hdac_devices[i].model, model) && 1167 class == PCIC_MULTIMEDIA && 1168 subclass == PCIS_MULTIMEDIA_HDA) { 1169 devid = i; 1170 break; 1171 } 1172 } 1173 1174 mtx_init(&sc->lock, device_get_nameunit(dev), "HDA driver mutex", 1175 MTX_DEF); 1176 sc->dev = dev; 1177 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); 1178 callout_init(&sc->poll_callout, 1); 1179 for (i = 0; i < HDAC_CODEC_MAX; i++) 1180 sc->codecs[i].dev = NULL; 1181 if (devid >= 0) { 1182 sc->quirks_on = hdac_devices[devid].quirks_on; 1183 sc->quirks_off = hdac_devices[devid].quirks_off; 1184 } else { 1185 sc->quirks_on = 0; 1186 sc->quirks_off = 0; 1187 } 1188 if (resource_int_value(device_get_name(dev), 1189 device_get_unit(dev), "msi", &i) == 0) { 1190 if (i == 0) 1191 sc->quirks_off |= HDAC_QUIRK_MSI; 1192 else { 1193 sc->quirks_on |= HDAC_QUIRK_MSI; 1194 sc->quirks_off |= ~HDAC_QUIRK_MSI; 1195 } 1196 } 1197 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); 1198 HDA_BOOTVERBOSE( 1199 device_printf(sc->dev, 1200 "Config options: on=0x%08x off=0x%08x\n", 1201 sc->quirks_on, sc->quirks_off); 1202 ); 1203 sc->poll_ival = hz; 1204 if (resource_int_value(device_get_name(dev), 1205 device_get_unit(dev), "polling", &i) == 0 && i != 0) 1206 sc->polling = 1; 1207 else 1208 sc->polling = 0; 1209 1210 pci_enable_busmaster(dev); 1211 1212 vendor = pci_get_vendor(dev); 1213 if (vendor == INTEL_VENDORID) { 1214 /* TCSEL -> TC0 */ 1215 v = pci_read_config(dev, 0x44, 1); 1216 pci_write_config(dev, 0x44, v & 0xf8, 1); 1217 HDA_BOOTHVERBOSE( 1218 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, 1219 pci_read_config(dev, 0x44, 1)); 1220 ); 1221 } 1222 1223 #if defined(__i386__) || defined(__amd64__) 1224 sc->flags |= HDAC_F_DMA_NOCACHE; 1225 1226 if (resource_int_value(device_get_name(dev), 1227 device_get_unit(dev), "snoop", &i) == 0 && i != 0) { 1228 #else 1229 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1230 #endif 1231 /* 1232 * Try to enable PCIe snoop to avoid messing around with 1233 * uncacheable DMA attribute. Since PCIe snoop register 1234 * config is pretty much vendor specific, there are no 1235 * general solutions on how to enable it, forcing us (even 1236 * Microsoft) to enable uncacheable or write combined DMA 1237 * by default. 1238 * 1239 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx 1240 */ 1241 for (i = 0; i < nitems(hdac_pcie_snoop); i++) { 1242 if (hdac_pcie_snoop[i].vendor != vendor) 1243 continue; 1244 sc->flags &= ~HDAC_F_DMA_NOCACHE; 1245 if (hdac_pcie_snoop[i].reg == 0x00) 1246 break; 1247 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1248 if ((v & hdac_pcie_snoop[i].enable) == 1249 hdac_pcie_snoop[i].enable) 1250 break; 1251 v &= hdac_pcie_snoop[i].mask; 1252 v |= hdac_pcie_snoop[i].enable; 1253 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); 1254 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); 1255 if ((v & hdac_pcie_snoop[i].enable) != 1256 hdac_pcie_snoop[i].enable) { 1257 HDA_BOOTVERBOSE( 1258 device_printf(dev, 1259 "WARNING: Failed to enable PCIe " 1260 "snoop!\n"); 1261 ); 1262 #if defined(__i386__) || defined(__amd64__) 1263 sc->flags |= HDAC_F_DMA_NOCACHE; 1264 #endif 1265 } 1266 break; 1267 } 1268 #if defined(__i386__) || defined(__amd64__) 1269 } 1270 #endif 1271 1272 HDA_BOOTHVERBOSE( 1273 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", 1274 (sc->flags & HDAC_F_DMA_NOCACHE) ? 1275 "Uncacheable" : "PCIe snoop", vendor); 1276 ); 1277 1278 /* Allocate resources */ 1279 result = hdac_mem_alloc(sc); 1280 if (result != 0) 1281 goto hdac_attach_fail; 1282 1283 /* Get Capabilities */ 1284 hdac_reset(sc, 1); 1285 result = hdac_get_capabilities(sc); 1286 if (result != 0) 1287 goto hdac_attach_fail; 1288 1289 /* Allocate CORB, RIRB, POS and BDLs dma memory */ 1290 result = hdac_dma_alloc(sc, &sc->corb_dma, 1291 sc->corb_size * sizeof(uint32_t)); 1292 if (result != 0) 1293 goto hdac_attach_fail; 1294 result = hdac_dma_alloc(sc, &sc->rirb_dma, 1295 sc->rirb_size * sizeof(struct hdac_rirb)); 1296 if (result != 0) 1297 goto hdac_attach_fail; 1298 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, 1299 M_HDAC, M_ZERO | M_WAITOK); 1300 for (i = 0; i < sc->num_ss; i++) { 1301 result = hdac_dma_alloc(sc, &sc->streams[i].bdl, 1302 sizeof(struct hdac_bdle) * HDA_BDL_MAX); 1303 if (result != 0) 1304 goto hdac_attach_fail; 1305 } 1306 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { 1307 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { 1308 HDA_BOOTVERBOSE( 1309 device_printf(dev, "Failed to " 1310 "allocate DMA pos buffer " 1311 "(non-fatal)\n"); 1312 ); 1313 } else { 1314 uint64_t addr = sc->pos_dma.dma_paddr; 1315 1316 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); 1317 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 1318 (addr & HDAC_DPLBASE_DPLBASE_MASK) | 1319 HDAC_DPLBASE_DPLBASE_DMAPBE); 1320 } 1321 } 1322 1323 result = bus_dma_tag_create( 1324 bus_get_dma_tag(sc->dev), /* parent */ 1325 HDA_DMA_ALIGNMENT, /* alignment */ 1326 0, /* boundary */ 1327 (sc->support_64bit) ? BUS_SPACE_MAXADDR : 1328 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1329 BUS_SPACE_MAXADDR, /* highaddr */ 1330 NULL, /* filtfunc */ 1331 NULL, /* fistfuncarg */ 1332 HDA_BUFSZ_MAX, /* maxsize */ 1333 1, /* nsegments */ 1334 HDA_BUFSZ_MAX, /* maxsegsz */ 1335 0, /* flags */ 1336 NULL, /* lockfunc */ 1337 NULL, /* lockfuncarg */ 1338 &sc->chan_dmat); /* dmat */ 1339 if (result != 0) { 1340 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", 1341 __func__, result); 1342 goto hdac_attach_fail; 1343 } 1344 1345 /* Quiesce everything */ 1346 HDA_BOOTHVERBOSE( 1347 device_printf(dev, "Reset controller...\n"); 1348 ); 1349 hdac_reset(sc, true); 1350 1351 /* Initialize the CORB and RIRB */ 1352 hdac_corb_init(sc); 1353 hdac_rirb_init(sc); 1354 1355 result = hdac_irq_alloc(sc); 1356 if (result != 0) 1357 goto hdac_attach_fail; 1358 1359 /* Defer remaining of initialization until interrupts are enabled */ 1360 sc->intrhook.ich_func = hdac_attach2; 1361 sc->intrhook.ich_arg = (void *)sc; 1362 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { 1363 sc->intrhook.ich_func = NULL; 1364 hdac_attach2((void *)sc); 1365 } 1366 1367 return (0); 1368 1369 hdac_attach_fail: 1370 hdac_irq_free(sc); 1371 if (sc->streams != NULL) 1372 for (i = 0; i < sc->num_ss; i++) 1373 hdac_dma_free(sc, &sc->streams[i].bdl); 1374 free(sc->streams, M_HDAC); 1375 hdac_dma_free(sc, &sc->rirb_dma); 1376 hdac_dma_free(sc, &sc->corb_dma); 1377 hdac_mem_free(sc); 1378 mtx_destroy(&sc->lock); 1379 1380 return (ENXIO); 1381 } 1382 1383 static int 1384 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) 1385 { 1386 struct hdac_softc *sc; 1387 device_t *devlist; 1388 device_t dev; 1389 int devcount, i, err, val; 1390 1391 dev = oidp->oid_arg1; 1392 sc = device_get_softc(dev); 1393 if (sc == NULL) 1394 return (EINVAL); 1395 val = 0; 1396 err = sysctl_handle_int(oidp, &val, 0, req); 1397 if (err != 0 || req->newptr == NULL || val == 0) 1398 return (err); 1399 1400 /* XXX: Temporary. For debugging. */ 1401 if (val == 100) { 1402 hdac_suspend(dev); 1403 return (0); 1404 } else if (val == 101) { 1405 hdac_resume(dev); 1406 return (0); 1407 } 1408 1409 bus_topo_lock(); 1410 1411 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) { 1412 bus_topo_unlock(); 1413 return (err); 1414 } 1415 1416 hdac_lock(sc); 1417 for (i = 0; i < devcount; i++) 1418 HDAC_PINDUMP(devlist[i]); 1419 hdac_unlock(sc); 1420 1421 bus_topo_unlock(); 1422 1423 free(devlist, M_TEMP); 1424 return (0); 1425 } 1426 1427 static int 1428 hdac_mdata_rate(uint16_t fmt) 1429 { 1430 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; 1431 int rate, bits; 1432 1433 if (fmt & (1 << 14)) 1434 rate = 44100; 1435 else 1436 rate = 48000; 1437 rate *= ((fmt >> 11) & 0x07) + 1; 1438 rate /= ((fmt >> 8) & 0x07) + 1; 1439 bits = mbits[(fmt >> 4) & 0x03]; 1440 bits *= (fmt & 0x0f) + 1; 1441 return (rate * bits); 1442 } 1443 1444 static int 1445 hdac_bdata_rate(uint16_t fmt, int output) 1446 { 1447 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; 1448 int rate, bits; 1449 1450 rate = 48000; 1451 rate *= ((fmt >> 11) & 0x07) + 1; 1452 bits = bbits[(fmt >> 4) & 0x03]; 1453 bits *= (fmt & 0x0f) + 1; 1454 if (!output) 1455 bits = ((bits + 7) & ~0x07) + 10; 1456 return (rate * bits); 1457 } 1458 1459 static void 1460 hdac_poll_reinit(struct hdac_softc *sc) 1461 { 1462 int i, pollticks, min = 1000000; 1463 struct hdac_stream *s; 1464 1465 if (sc->polling == 0) 1466 return; 1467 if (sc->unsol_registered > 0) 1468 min = hz / 2; 1469 for (i = 0; i < sc->num_ss; i++) { 1470 s = &sc->streams[i]; 1471 if (s->running == 0) 1472 continue; 1473 pollticks = ((uint64_t)hz * s->blksz) / 1474 (hdac_mdata_rate(s->format) / 8); 1475 pollticks >>= 1; 1476 if (pollticks > hz) 1477 pollticks = hz; 1478 if (pollticks < 1) 1479 pollticks = 1; 1480 if (min > pollticks) 1481 min = pollticks; 1482 } 1483 sc->poll_ival = min; 1484 if (min == 1000000) 1485 callout_stop(&sc->poll_callout); 1486 else 1487 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); 1488 } 1489 1490 static int 1491 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) 1492 { 1493 struct hdac_softc *sc; 1494 device_t dev; 1495 uint32_t ctl; 1496 int err, val; 1497 1498 dev = oidp->oid_arg1; 1499 sc = device_get_softc(dev); 1500 if (sc == NULL) 1501 return (EINVAL); 1502 hdac_lock(sc); 1503 val = sc->polling; 1504 hdac_unlock(sc); 1505 err = sysctl_handle_int(oidp, &val, 0, req); 1506 1507 if (err != 0 || req->newptr == NULL) 1508 return (err); 1509 if (val < 0 || val > 1) 1510 return (EINVAL); 1511 1512 hdac_lock(sc); 1513 if (val != sc->polling) { 1514 if (val == 0) { 1515 callout_stop(&sc->poll_callout); 1516 hdac_unlock(sc); 1517 callout_drain(&sc->poll_callout); 1518 hdac_lock(sc); 1519 sc->polling = 0; 1520 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1521 ctl |= HDAC_INTCTL_GIE; 1522 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1523 } else { 1524 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 1525 ctl &= ~HDAC_INTCTL_GIE; 1526 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 1527 sc->polling = 1; 1528 hdac_poll_reinit(sc); 1529 } 1530 } 1531 hdac_unlock(sc); 1532 1533 return (err); 1534 } 1535 1536 static void 1537 hdac_attach2(void *arg) 1538 { 1539 struct hdac_softc *sc; 1540 device_t child; 1541 uint32_t vendorid, revisionid; 1542 int i; 1543 uint16_t statests; 1544 1545 sc = (struct hdac_softc *)arg; 1546 1547 hdac_lock(sc); 1548 1549 /* Remove ourselves from the config hooks */ 1550 if (sc->intrhook.ich_func != NULL) { 1551 config_intrhook_disestablish(&sc->intrhook); 1552 sc->intrhook.ich_func = NULL; 1553 } 1554 1555 HDA_BOOTHVERBOSE( 1556 device_printf(sc->dev, "Starting CORB Engine...\n"); 1557 ); 1558 hdac_corb_start(sc); 1559 HDA_BOOTHVERBOSE( 1560 device_printf(sc->dev, "Starting RIRB Engine...\n"); 1561 ); 1562 hdac_rirb_start(sc); 1563 1564 /* 1565 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1566 * (status change) interrupts. The documentation says that we 1567 * should not make any assumptions about the state of this register 1568 * and set it explicitly. 1569 * NB: this needs to be done before the interrupt is enabled as 1570 * the handler does not expect this interrupt source. 1571 */ 1572 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1573 1574 /* 1575 * Read and clear post-reset SDI wake status. 1576 * Each set bit corresponds to a codec that came out of reset. 1577 */ 1578 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); 1579 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests); 1580 1581 HDA_BOOTHVERBOSE( 1582 device_printf(sc->dev, 1583 "Enabling controller interrupt...\n"); 1584 ); 1585 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1586 HDAC_GCTL_UNSOL); 1587 if (sc->polling == 0) { 1588 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, 1589 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1590 } 1591 DELAY(1000); 1592 1593 HDA_BOOTHVERBOSE( 1594 device_printf(sc->dev, "Scanning HDA codecs ...\n"); 1595 ); 1596 hdac_unlock(sc); 1597 for (i = 0; i < HDAC_CODEC_MAX; i++) { 1598 if (HDAC_STATESTS_SDIWAKE(statests, i)) { 1599 HDA_BOOTHVERBOSE( 1600 device_printf(sc->dev, 1601 "Found CODEC at address %d\n", i); 1602 ); 1603 hdac_lock(sc); 1604 vendorid = hdac_send_command(sc, i, 1605 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); 1606 revisionid = hdac_send_command(sc, i, 1607 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); 1608 hdac_unlock(sc); 1609 if (vendorid == HDA_INVALID && 1610 revisionid == HDA_INVALID) { 1611 device_printf(sc->dev, 1612 "CODEC at address %d not responding!\n", i); 1613 continue; 1614 } 1615 sc->codecs[i].vendor_id = 1616 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); 1617 sc->codecs[i].device_id = 1618 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); 1619 sc->codecs[i].revision_id = 1620 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); 1621 sc->codecs[i].stepping_id = 1622 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); 1623 child = device_add_child(sc->dev, "hdacc", DEVICE_UNIT_ANY); 1624 if (child == NULL) { 1625 device_printf(sc->dev, 1626 "Failed to add CODEC device\n"); 1627 continue; 1628 } 1629 device_set_ivars(child, (void *)(intptr_t)i); 1630 sc->codecs[i].dev = child; 1631 } 1632 } 1633 bus_attach_children(sc->dev); 1634 1635 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1636 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1637 "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev, 1638 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); 1639 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), 1640 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, 1641 "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev, 1642 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); 1643 } 1644 1645 /**************************************************************************** 1646 * int hdac_shutdown(device_t) 1647 * 1648 * Power down HDA bus and codecs. 1649 ****************************************************************************/ 1650 static int 1651 hdac_shutdown(device_t dev) 1652 { 1653 struct hdac_softc *sc = device_get_softc(dev); 1654 1655 HDA_BOOTHVERBOSE( 1656 device_printf(dev, "Shutdown...\n"); 1657 ); 1658 callout_drain(&sc->poll_callout); 1659 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1660 bus_generic_shutdown(dev); 1661 1662 hdac_lock(sc); 1663 HDA_BOOTHVERBOSE( 1664 device_printf(dev, "Reset controller...\n"); 1665 ); 1666 hdac_reset(sc, false); 1667 hdac_unlock(sc); 1668 HDA_BOOTHVERBOSE( 1669 device_printf(dev, "Shutdown done\n"); 1670 ); 1671 return (0); 1672 } 1673 1674 /**************************************************************************** 1675 * int hdac_suspend(device_t) 1676 * 1677 * Suspend and power down HDA bus and codecs. 1678 ****************************************************************************/ 1679 static int 1680 hdac_suspend(device_t dev) 1681 { 1682 struct hdac_softc *sc = device_get_softc(dev); 1683 1684 HDA_BOOTHVERBOSE( 1685 device_printf(dev, "Suspend...\n"); 1686 ); 1687 bus_generic_suspend(dev); 1688 1689 hdac_lock(sc); 1690 HDA_BOOTHVERBOSE( 1691 device_printf(dev, "Reset controller...\n"); 1692 ); 1693 callout_stop(&sc->poll_callout); 1694 hdac_reset(sc, false); 1695 hdac_unlock(sc); 1696 callout_drain(&sc->poll_callout); 1697 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1698 HDA_BOOTHVERBOSE( 1699 device_printf(dev, "Suspend done\n"); 1700 ); 1701 return (0); 1702 } 1703 1704 /**************************************************************************** 1705 * int hdac_resume(device_t) 1706 * 1707 * Powerup and restore HDA bus and codecs state. 1708 ****************************************************************************/ 1709 static int 1710 hdac_resume(device_t dev) 1711 { 1712 struct hdac_softc *sc = device_get_softc(dev); 1713 int error; 1714 1715 HDA_BOOTHVERBOSE( 1716 device_printf(dev, "Resume...\n"); 1717 ); 1718 hdac_lock(sc); 1719 1720 /* Quiesce everything */ 1721 HDA_BOOTHVERBOSE( 1722 device_printf(dev, "Reset controller...\n"); 1723 ); 1724 hdac_reset(sc, true); 1725 1726 /* Initialize the CORB and RIRB */ 1727 hdac_corb_init(sc); 1728 hdac_rirb_init(sc); 1729 1730 HDA_BOOTHVERBOSE( 1731 device_printf(dev, "Starting CORB Engine...\n"); 1732 ); 1733 hdac_corb_start(sc); 1734 HDA_BOOTHVERBOSE( 1735 device_printf(dev, "Starting RIRB Engine...\n"); 1736 ); 1737 hdac_rirb_start(sc); 1738 1739 /* 1740 * Clear HDAC_WAKEEN as at present we have no use for SDI wake 1741 * (status change) events. The documentation says that we should 1742 * not make any assumptions about the state of this register and 1743 * set it explicitly. 1744 * Also, clear HDAC_STATESTS. 1745 * NB: this needs to be done before the interrupt is enabled as 1746 * the handler does not expect this interrupt source. 1747 */ 1748 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); 1749 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK); 1750 1751 HDA_BOOTHVERBOSE( 1752 device_printf(dev, "Enabling controller interrupt...\n"); 1753 ); 1754 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | 1755 HDAC_GCTL_UNSOL); 1756 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); 1757 DELAY(1000); 1758 hdac_poll_reinit(sc); 1759 hdac_unlock(sc); 1760 1761 error = bus_generic_resume(dev); 1762 HDA_BOOTHVERBOSE( 1763 device_printf(dev, "Resume done\n"); 1764 ); 1765 return (error); 1766 } 1767 1768 /**************************************************************************** 1769 * int hdac_detach(device_t) 1770 * 1771 * Detach and free up resources utilized by the hdac device. 1772 ****************************************************************************/ 1773 static int 1774 hdac_detach(device_t dev) 1775 { 1776 struct hdac_softc *sc = device_get_softc(dev); 1777 int i, error; 1778 1779 callout_drain(&sc->poll_callout); 1780 hdac_irq_free(sc); 1781 taskqueue_drain(taskqueue_thread, &sc->unsolq_task); 1782 1783 error = bus_generic_detach(dev); 1784 if (error != 0) 1785 return (error); 1786 1787 hdac_lock(sc); 1788 hdac_reset(sc, false); 1789 hdac_unlock(sc); 1790 1791 for (i = 0; i < sc->num_ss; i++) 1792 hdac_dma_free(sc, &sc->streams[i].bdl); 1793 free(sc->streams, M_HDAC); 1794 hdac_dma_free(sc, &sc->pos_dma); 1795 hdac_dma_free(sc, &sc->rirb_dma); 1796 hdac_dma_free(sc, &sc->corb_dma); 1797 if (sc->chan_dmat != NULL) { 1798 bus_dma_tag_destroy(sc->chan_dmat); 1799 sc->chan_dmat = NULL; 1800 } 1801 hdac_mem_free(sc); 1802 mtx_destroy(&sc->lock); 1803 return (0); 1804 } 1805 1806 static bus_dma_tag_t 1807 hdac_get_dma_tag(device_t dev, device_t child) 1808 { 1809 struct hdac_softc *sc = device_get_softc(dev); 1810 1811 return (sc->chan_dmat); 1812 } 1813 1814 static int 1815 hdac_print_child(device_t dev, device_t child) 1816 { 1817 int retval; 1818 1819 retval = bus_print_child_header(dev, child); 1820 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); 1821 retval += bus_print_child_footer(dev, child); 1822 1823 return (retval); 1824 } 1825 1826 static int 1827 hdac_child_location(device_t dev, device_t child, struct sbuf *sb) 1828 { 1829 1830 sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child)); 1831 return (0); 1832 } 1833 1834 static int 1835 hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) 1836 { 1837 struct hdac_softc *sc = device_get_softc(dev); 1838 nid_t cad = (uintptr_t)device_get_ivars(child); 1839 1840 sbuf_printf(sb, 1841 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", 1842 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, 1843 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); 1844 return (0); 1845 } 1846 1847 static int 1848 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1849 { 1850 struct hdac_softc *sc = device_get_softc(dev); 1851 nid_t cad = (uintptr_t)device_get_ivars(child); 1852 1853 switch (which) { 1854 case HDA_IVAR_CODEC_ID: 1855 *result = cad; 1856 break; 1857 case HDA_IVAR_VENDOR_ID: 1858 *result = sc->codecs[cad].vendor_id; 1859 break; 1860 case HDA_IVAR_DEVICE_ID: 1861 *result = sc->codecs[cad].device_id; 1862 break; 1863 case HDA_IVAR_REVISION_ID: 1864 *result = sc->codecs[cad].revision_id; 1865 break; 1866 case HDA_IVAR_STEPPING_ID: 1867 *result = sc->codecs[cad].stepping_id; 1868 break; 1869 case HDA_IVAR_SUBVENDOR_ID: 1870 *result = pci_get_subvendor(dev); 1871 break; 1872 case HDA_IVAR_SUBDEVICE_ID: 1873 *result = pci_get_subdevice(dev); 1874 break; 1875 case HDA_IVAR_DMA_NOCACHE: 1876 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; 1877 break; 1878 case HDA_IVAR_STRIPES_MASK: 1879 *result = (1 << (1 << sc->num_sdo)) - 1; 1880 break; 1881 default: 1882 return (ENOENT); 1883 } 1884 return (0); 1885 } 1886 1887 static struct mtx * 1888 hdac_get_mtx(device_t dev, device_t child) 1889 { 1890 struct hdac_softc *sc = device_get_softc(dev); 1891 1892 return (&sc->lock); 1893 } 1894 1895 static uint32_t 1896 hdac_codec_command(device_t dev, device_t child, uint32_t verb) 1897 { 1898 1899 return (hdac_send_command(device_get_softc(dev), 1900 (intptr_t)device_get_ivars(child), verb)); 1901 } 1902 1903 static int 1904 hdac_find_stream(struct hdac_softc *sc, int dir, int stream) 1905 { 1906 int i, ss; 1907 1908 ss = -1; 1909 /* Allocate ISS/OSS first. */ 1910 if (dir == 0) { 1911 for (i = 0; i < sc->num_iss; i++) { 1912 if (sc->streams[i].stream == stream) { 1913 ss = i; 1914 break; 1915 } 1916 } 1917 } else { 1918 for (i = 0; i < sc->num_oss; i++) { 1919 if (sc->streams[i + sc->num_iss].stream == stream) { 1920 ss = i + sc->num_iss; 1921 break; 1922 } 1923 } 1924 } 1925 /* Fallback to BSS. */ 1926 if (ss == -1) { 1927 for (i = 0; i < sc->num_bss; i++) { 1928 if (sc->streams[i + sc->num_iss + sc->num_oss].stream 1929 == stream) { 1930 ss = i + sc->num_iss + sc->num_oss; 1931 break; 1932 } 1933 } 1934 } 1935 return (ss); 1936 } 1937 1938 static int 1939 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, 1940 uint32_t **dmapos) 1941 { 1942 struct hdac_softc *sc = device_get_softc(dev); 1943 nid_t cad = (uintptr_t)device_get_ivars(child); 1944 int stream, ss, bw, maxbw, prevbw; 1945 1946 /* Look for empty stream. */ 1947 ss = hdac_find_stream(sc, dir, 0); 1948 1949 /* Return if found nothing. */ 1950 if (ss < 0) 1951 return (0); 1952 1953 /* Check bus bandwidth. */ 1954 bw = hdac_bdata_rate(format, dir); 1955 if (dir == 1) { 1956 bw *= 1 << (sc->num_sdo - stripe); 1957 prevbw = sc->sdo_bw_used; 1958 maxbw = 48000 * 960 * (1 << sc->num_sdo); 1959 } else { 1960 prevbw = sc->codecs[cad].sdi_bw_used; 1961 maxbw = 48000 * 464; 1962 } 1963 HDA_BOOTHVERBOSE( 1964 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", 1965 (bw + prevbw) / 1000, maxbw / 1000, 1966 bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); 1967 ); 1968 if (bw + prevbw > maxbw) 1969 return (0); 1970 if (dir == 1) 1971 sc->sdo_bw_used += bw; 1972 else 1973 sc->codecs[cad].sdi_bw_used += bw; 1974 1975 /* Allocate stream number */ 1976 if (ss >= sc->num_iss + sc->num_oss) 1977 stream = 15 - (ss - sc->num_iss - sc->num_oss); 1978 else if (ss >= sc->num_iss) 1979 stream = ss - sc->num_iss + 1; 1980 else 1981 stream = ss + 1; 1982 1983 sc->streams[ss].dev = child; 1984 sc->streams[ss].dir = dir; 1985 sc->streams[ss].stream = stream; 1986 sc->streams[ss].bw = bw; 1987 sc->streams[ss].format = format; 1988 sc->streams[ss].stripe = stripe; 1989 if (dmapos != NULL) { 1990 if (sc->pos_dma.dma_vaddr != NULL) 1991 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); 1992 else 1993 *dmapos = NULL; 1994 } 1995 return (stream); 1996 } 1997 1998 static void 1999 hdac_stream_free(device_t dev, device_t child, int dir, int stream) 2000 { 2001 struct hdac_softc *sc = device_get_softc(dev); 2002 nid_t cad = (uintptr_t)device_get_ivars(child); 2003 int ss; 2004 2005 ss = hdac_find_stream(sc, dir, stream); 2006 KASSERT(ss >= 0, 2007 ("Free for not allocated stream (%d/%d)\n", dir, stream)); 2008 if (dir == 1) 2009 sc->sdo_bw_used -= sc->streams[ss].bw; 2010 else 2011 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; 2012 sc->streams[ss].stream = 0; 2013 sc->streams[ss].dev = NULL; 2014 } 2015 2016 static int 2017 hdac_stream_start(device_t dev, device_t child, int dir, int stream, 2018 bus_addr_t buf, int blksz, int blkcnt) 2019 { 2020 struct hdac_softc *sc = device_get_softc(dev); 2021 struct hdac_bdle *bdle; 2022 uint64_t addr; 2023 int i, ss, off; 2024 uint32_t ctl; 2025 2026 ss = hdac_find_stream(sc, dir, stream); 2027 KASSERT(ss >= 0, 2028 ("Start for not allocated stream (%d/%d)\n", dir, stream)); 2029 2030 addr = (uint64_t)buf; 2031 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; 2032 for (i = 0; i < blkcnt; i++, bdle++) { 2033 bdle->addrl = htole32((uint32_t)addr); 2034 bdle->addrh = htole32((uint32_t)(addr >> 32)); 2035 bdle->len = htole32(blksz); 2036 bdle->ioc = htole32(1); 2037 addr += blksz; 2038 } 2039 2040 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2041 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); 2042 2043 off = ss << 5; 2044 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); 2045 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); 2046 addr = sc->streams[ss].bdl.dma_paddr; 2047 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); 2048 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); 2049 2050 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); 2051 if (dir) 2052 ctl |= HDAC_SDCTL2_DIR; 2053 else 2054 ctl &= ~HDAC_SDCTL2_DIR; 2055 ctl &= ~HDAC_SDCTL2_STRM_MASK; 2056 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; 2057 ctl &= ~HDAC_SDCTL2_STRIPE_MASK; 2058 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; 2059 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); 2060 2061 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); 2062 2063 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2064 ctl |= 1 << ss; 2065 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2066 2067 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, 2068 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); 2069 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2070 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2071 HDAC_SDCTL_RUN; 2072 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2073 2074 sc->streams[ss].blksz = blksz; 2075 sc->streams[ss].running = 1; 2076 hdac_poll_reinit(sc); 2077 return (0); 2078 } 2079 2080 static void 2081 hdac_stream_stop(device_t dev, device_t child, int dir, int stream) 2082 { 2083 struct hdac_softc *sc = device_get_softc(dev); 2084 int ss, off; 2085 uint32_t ctl; 2086 2087 ss = hdac_find_stream(sc, dir, stream); 2088 KASSERT(ss >= 0, 2089 ("Stop for not allocated stream (%d/%d)\n", dir, stream)); 2090 2091 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, 2092 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); 2093 2094 off = ss << 5; 2095 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2096 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | 2097 HDAC_SDCTL_RUN); 2098 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2099 2100 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); 2101 ctl &= ~(1 << ss); 2102 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); 2103 2104 sc->streams[ss].running = 0; 2105 hdac_poll_reinit(sc); 2106 } 2107 2108 static void 2109 hdac_stream_reset(device_t dev, device_t child, int dir, int stream) 2110 { 2111 struct hdac_softc *sc = device_get_softc(dev); 2112 int timeout = 1000; 2113 int to = timeout; 2114 int ss, off; 2115 uint32_t ctl; 2116 2117 ss = hdac_find_stream(sc, dir, stream); 2118 KASSERT(ss >= 0, 2119 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2120 2121 off = ss << 5; 2122 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2123 ctl |= HDAC_SDCTL_SRST; 2124 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2125 do { 2126 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2127 if (ctl & HDAC_SDCTL_SRST) 2128 break; 2129 DELAY(10); 2130 } while (--to); 2131 if (!(ctl & HDAC_SDCTL_SRST)) 2132 device_printf(dev, "Reset setting timeout\n"); 2133 ctl &= ~HDAC_SDCTL_SRST; 2134 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); 2135 to = timeout; 2136 do { 2137 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); 2138 if (!(ctl & HDAC_SDCTL_SRST)) 2139 break; 2140 DELAY(10); 2141 } while (--to); 2142 if (ctl & HDAC_SDCTL_SRST) 2143 device_printf(dev, "Reset timeout!\n"); 2144 } 2145 2146 static uint32_t 2147 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) 2148 { 2149 struct hdac_softc *sc = device_get_softc(dev); 2150 int ss, off; 2151 2152 ss = hdac_find_stream(sc, dir, stream); 2153 KASSERT(ss >= 0, 2154 ("Reset for not allocated stream (%d/%d)\n", dir, stream)); 2155 2156 off = ss << 5; 2157 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); 2158 } 2159 2160 static int 2161 hdac_unsol_alloc(device_t dev, device_t child, int tag) 2162 { 2163 struct hdac_softc *sc = device_get_softc(dev); 2164 2165 sc->unsol_registered++; 2166 hdac_poll_reinit(sc); 2167 return (tag); 2168 } 2169 2170 static void 2171 hdac_unsol_free(device_t dev, device_t child, int tag) 2172 { 2173 struct hdac_softc *sc = device_get_softc(dev); 2174 2175 sc->unsol_registered--; 2176 hdac_poll_reinit(sc); 2177 } 2178 2179 static device_method_t hdac_methods[] = { 2180 /* device interface */ 2181 DEVMETHOD(device_probe, hdac_probe), 2182 DEVMETHOD(device_attach, hdac_attach), 2183 DEVMETHOD(device_detach, hdac_detach), 2184 DEVMETHOD(device_shutdown, hdac_shutdown), 2185 DEVMETHOD(device_suspend, hdac_suspend), 2186 DEVMETHOD(device_resume, hdac_resume), 2187 /* Bus interface */ 2188 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), 2189 DEVMETHOD(bus_print_child, hdac_print_child), 2190 DEVMETHOD(bus_child_location, hdac_child_location), 2191 DEVMETHOD(bus_child_pnpinfo, hdac_child_pnpinfo_method), 2192 DEVMETHOD(bus_read_ivar, hdac_read_ivar), 2193 DEVMETHOD(hdac_get_mtx, hdac_get_mtx), 2194 DEVMETHOD(hdac_codec_command, hdac_codec_command), 2195 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), 2196 DEVMETHOD(hdac_stream_free, hdac_stream_free), 2197 DEVMETHOD(hdac_stream_start, hdac_stream_start), 2198 DEVMETHOD(hdac_stream_stop, hdac_stream_stop), 2199 DEVMETHOD(hdac_stream_reset, hdac_stream_reset), 2200 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), 2201 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), 2202 DEVMETHOD(hdac_unsol_free, hdac_unsol_free), 2203 DEVMETHOD_END 2204 }; 2205 2206 static driver_t hdac_driver = { 2207 "hdac", 2208 hdac_methods, 2209 sizeof(struct hdac_softc), 2210 }; 2211 2212 DRIVER_MODULE_ORDERED(snd_hda, pci, hdac_driver, NULL, NULL, SI_ORDER_ANY); 2213