1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca>
5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org>
6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * Intel High Definition Audio (Controller) driver for FreeBSD.
33 */
34
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_snd.h"
37 #endif
38
39 #include <dev/sound/pcm/sound.h>
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42
43 #include <sys/ctype.h>
44 #include <sys/endian.h>
45 #include <sys/taskqueue.h>
46
47 #include <dev/sound/pci/hda/hdac_private.h>
48 #include <dev/sound/pci/hda/hdac_reg.h>
49 #include <dev/sound/pci/hda/hda_reg.h>
50 #include <dev/sound/pci/hda/hdac.h>
51
52 #define HDA_DRV_TEST_REV "20120126_0002"
53
54 #define hdac_lock(sc) snd_mtxlock((sc)->lock)
55 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock)
56 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock)
57
58 #define HDAC_QUIRK_64BIT (1 << 0)
59 #define HDAC_QUIRK_DMAPOS (1 << 1)
60 #define HDAC_QUIRK_MSI (1 << 2)
61
62 static const struct {
63 const char *key;
64 uint32_t value;
65 } hdac_quirks_tab[] = {
66 { "64bit", HDAC_QUIRK_64BIT },
67 { "dmapos", HDAC_QUIRK_DMAPOS },
68 { "msi", HDAC_QUIRK_MSI },
69 };
70
71 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller");
72
73 static const struct {
74 uint32_t model;
75 const char *desc;
76 char quirks_on;
77 char quirks_off;
78 } hdac_devices[] = {
79 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 },
80 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 },
81 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 },
82 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 },
83 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 },
84 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 },
85 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 },
86 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 },
87 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 },
88 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 },
89 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 },
90 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 },
91 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 },
92 { HDA_INTEL_BR, "Intel Braswell", 0, 0 },
93 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 },
94 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 },
95 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 },
96 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 },
97 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 },
98 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 },
99 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 },
100 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 },
101 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 },
102 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 },
103 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 },
104 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 },
105 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 },
106 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 },
107 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 },
108 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 },
109 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 },
110 { HDA_INTEL_TGLKH, "Intel Tiger Lake-H", 0, 0 },
111 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 },
112 { HDA_INTEL_ALLK, "Intel Alder Lake", 0, 0 },
113 { HDA_INTEL_ALLKM, "Intel Alder Lake-M", 0, 0 },
114 { HDA_INTEL_ALLKN, "Intel Alder Lake-N", 0, 0 },
115 { HDA_INTEL_ALLKP1, "Intel Alder Lake-P", 0, 0 },
116 { HDA_INTEL_ALLKP2, "Intel Alder Lake-P", 0, 0 },
117 { HDA_INTEL_ALLKPS, "Intel Alder Lake-PS", 0, 0 },
118 { HDA_INTEL_RPTLK1, "Intel Raptor Lake-P", 0, 0 },
119 { HDA_INTEL_RPTLK2, "Intel Raptor Lake-P", 0, 0 },
120 { HDA_INTEL_MTL, "Intel Meteor Lake-P", 0, 0 },
121 { HDA_INTEL_ARLS, "Intel Arrow Lake-S", 0, 0 },
122 { HDA_INTEL_ARL, "Intel Arrow Lake", 0, 0 },
123 { HDA_INTEL_LNLP, "Intel Lunar Lake-P", 0, 0 },
124 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 },
125 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 },
126 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 },
127 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 },
128 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 },
129 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 },
130 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 },
131 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 },
132 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 },
133 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 },
134 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 },
135 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 },
136 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 },
137 { HDA_INTEL_SCH, "Intel SCH", 0, 0 },
138 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI },
139 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI },
140 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 },
141 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 },
142 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 },
143 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 },
144 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 },
145 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 },
146 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 },
147 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 },
148 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
149 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
150 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
151 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
152 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 },
153 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 },
154 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 },
155 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 },
156 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 },
157 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 },
158 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 },
159 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 },
160 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI },
161 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI },
162 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI },
163 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI },
164 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI },
165 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI },
166 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI },
167 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI },
168 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 },
169 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI },
170 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI },
171 { HDA_ATI_SB450, "ATI SB450", 0, 0 },
172 { HDA_ATI_SB600, "ATI SB600", 0, 0 },
173 { HDA_ATI_RS600, "ATI RS600", 0, 0 },
174 { HDA_ATI_RS690, "ATI RS690", 0, 0 },
175 { HDA_ATI_RS780, "ATI RS780", 0, 0 },
176 { HDA_ATI_RS880, "ATI RS880", 0, 0 },
177 { HDA_ATI_R600, "ATI R600", 0, 0 },
178 { HDA_ATI_RV610, "ATI RV610", 0, 0 },
179 { HDA_ATI_RV620, "ATI RV620", 0, 0 },
180 { HDA_ATI_RV630, "ATI RV630", 0, 0 },
181 { HDA_ATI_RV635, "ATI RV635", 0, 0 },
182 { HDA_ATI_RV710, "ATI RV710", 0, 0 },
183 { HDA_ATI_RV730, "ATI RV730", 0, 0 },
184 { HDA_ATI_RV740, "ATI RV740", 0, 0 },
185 { HDA_ATI_RV770, "ATI RV770", 0, 0 },
186 { HDA_ATI_RV810, "ATI RV810", 0, 0 },
187 { HDA_ATI_RV830, "ATI RV830", 0, 0 },
188 { HDA_ATI_RV840, "ATI RV840", 0, 0 },
189 { HDA_ATI_RV870, "ATI RV870", 0, 0 },
190 { HDA_ATI_RV910, "ATI RV910", 0, 0 },
191 { HDA_ATI_RV930, "ATI RV930", 0, 0 },
192 { HDA_ATI_RV940, "ATI RV940", 0, 0 },
193 { HDA_ATI_RV970, "ATI RV970", 0, 0 },
194 { HDA_ATI_R1000, "ATI R1000", 0, 0 },
195 { HDA_ATI_OLAND, "ATI Oland", 0, 0 },
196 { HDA_ATI_KABINI, "ATI Kabini", 0, 0 },
197 { HDA_ATI_TRINITY, "ATI Trinity", 0, 0 },
198 { HDA_AMD_X370, "AMD X370", 0, 0 },
199 { HDA_AMD_X570, "AMD X570", 0, 0 },
200 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 },
201 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 },
202 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 },
203 { HDA_RDC_M3010, "RDC M3010", 0, 0 },
204 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 },
205 { HDA_VMWARE, "VMware", 0, 0 },
206 { HDA_SIS_966, "SiS 966/968", 0, 0 },
207 { HDA_ULI_M5461, "ULI M5461", 0, 0 },
208 { HDA_CREATIVE_SB1570, "Creative SB Audigy FX", 0, HDAC_QUIRK_64BIT },
209 /* Unknown */
210 { HDA_INTEL_ALL, "Intel", 0, 0 },
211 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 },
212 { HDA_ATI_ALL, "ATI", 0, 0 },
213 { HDA_AMD_ALL, "AMD", 0, 0 },
214 { HDA_CREATIVE_ALL, "Creative", 0, 0 },
215 { HDA_VIA_ALL, "VIA", 0, 0 },
216 { HDA_VMWARE_ALL, "VMware", 0, 0 },
217 { HDA_SIS_ALL, "SiS", 0, 0 },
218 { HDA_ULI_ALL, "ULI", 0, 0 },
219 };
220
221 static const struct {
222 uint16_t vendor;
223 uint8_t reg;
224 uint8_t mask;
225 uint8_t enable;
226 } hdac_pcie_snoop[] = {
227 { INTEL_VENDORID, 0x00, 0x00, 0x00 },
228 { ATI_VENDORID, 0x42, 0xf8, 0x02 },
229 { AMD_VENDORID, 0x42, 0xf8, 0x02 },
230 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f },
231 };
232
233 /****************************************************************************
234 * Function prototypes
235 ****************************************************************************/
236 static void hdac_intr_handler(void *);
237 static int hdac_reset(struct hdac_softc *, bool);
238 static int hdac_get_capabilities(struct hdac_softc *);
239 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int);
240 static int hdac_dma_alloc(struct hdac_softc *,
241 struct hdac_dma *, bus_size_t);
242 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *);
243 static int hdac_mem_alloc(struct hdac_softc *);
244 static void hdac_mem_free(struct hdac_softc *);
245 static int hdac_irq_alloc(struct hdac_softc *);
246 static void hdac_irq_free(struct hdac_softc *);
247 static void hdac_corb_init(struct hdac_softc *);
248 static void hdac_rirb_init(struct hdac_softc *);
249 static void hdac_corb_start(struct hdac_softc *);
250 static void hdac_rirb_start(struct hdac_softc *);
251
252 static void hdac_attach2(void *);
253
254 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t);
255
256 static int hdac_probe(device_t);
257 static int hdac_attach(device_t);
258 static int hdac_detach(device_t);
259 static int hdac_suspend(device_t);
260 static int hdac_resume(device_t);
261
262 static int hdac_rirb_flush(struct hdac_softc *sc);
263 static int hdac_unsolq_flush(struct hdac_softc *sc);
264
265 /* This function surely going to make its way into upper level someday. */
266 static void
hdac_config_fetch(struct hdac_softc * sc,uint32_t * on,uint32_t * off)267 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off)
268 {
269 const char *res = NULL;
270 int i = 0, j, k, len, inv;
271
272 if (resource_string_value(device_get_name(sc->dev),
273 device_get_unit(sc->dev), "config", &res) != 0)
274 return;
275 if (!(res != NULL && strlen(res) > 0))
276 return;
277 HDA_BOOTVERBOSE(
278 device_printf(sc->dev, "Config options:");
279 );
280 for (;;) {
281 while (res[i] != '\0' &&
282 (res[i] == ',' || isspace(res[i]) != 0))
283 i++;
284 if (res[i] == '\0') {
285 HDA_BOOTVERBOSE(
286 printf("\n");
287 );
288 return;
289 }
290 j = i;
291 while (res[j] != '\0' &&
292 !(res[j] == ',' || isspace(res[j]) != 0))
293 j++;
294 len = j - i;
295 if (len > 2 && strncmp(res + i, "no", 2) == 0)
296 inv = 2;
297 else
298 inv = 0;
299 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) {
300 if (strncmp(res + i + inv,
301 hdac_quirks_tab[k].key, len - inv) != 0)
302 continue;
303 if (len - inv != strlen(hdac_quirks_tab[k].key))
304 continue;
305 HDA_BOOTVERBOSE(
306 printf(" %s%s", (inv != 0) ? "no" : "",
307 hdac_quirks_tab[k].key);
308 );
309 if (inv == 0) {
310 *on |= hdac_quirks_tab[k].value;
311 *off &= ~hdac_quirks_tab[k].value;
312 } else if (inv != 0) {
313 *off |= hdac_quirks_tab[k].value;
314 *on &= ~hdac_quirks_tab[k].value;
315 }
316 break;
317 }
318 i = j;
319 }
320 }
321
322 static void
hdac_one_intr(struct hdac_softc * sc,uint32_t intsts)323 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts)
324 {
325 device_t dev;
326 uint8_t rirbsts;
327 int i;
328
329 /* Was this a controller interrupt? */
330 if (intsts & HDAC_INTSTS_CIS) {
331 /*
332 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then
333 * we will need to check and clear HDAC_STATESTS.
334 * That event is used to report codec status changes such as
335 * a reset or a wake-up event.
336 */
337 /*
338 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we
339 * will need to check and clear HDAC_CORBSTS_CMEI in
340 * HDAC_CORBSTS.
341 * That event is used to report CORB memory errors.
342 */
343 /*
344 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we
345 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in
346 * HDAC_RIRBSTS.
347 * That event is used to report response FIFO overruns.
348 */
349
350 /* Get as many responses that we can */
351 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS);
352 while (rirbsts & HDAC_RIRBSTS_RINTFL) {
353 HDAC_WRITE_1(&sc->mem,
354 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL);
355 hdac_rirb_flush(sc);
356 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS);
357 }
358 if (sc->unsolq_rp != sc->unsolq_wp)
359 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task);
360 }
361
362 if (intsts & HDAC_INTSTS_SIS_MASK) {
363 for (i = 0; i < sc->num_ss; i++) {
364 if ((intsts & (1 << i)) == 0)
365 continue;
366 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS,
367 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS);
368 if ((dev = sc->streams[i].dev) != NULL) {
369 HDAC_STREAM_INTR(dev,
370 sc->streams[i].dir, sc->streams[i].stream);
371 }
372 }
373 }
374 }
375
376 /****************************************************************************
377 * void hdac_intr_handler(void *)
378 *
379 * Interrupt handler. Processes interrupts received from the hdac.
380 ****************************************************************************/
381 static void
hdac_intr_handler(void * context)382 hdac_intr_handler(void *context)
383 {
384 struct hdac_softc *sc;
385 uint32_t intsts;
386
387 sc = (struct hdac_softc *)context;
388
389 /*
390 * Loop until HDAC_INTSTS_GIS gets clear.
391 * It is plausible that hardware interrupts a host only when GIS goes
392 * from zero to one. GIS is formed by OR-ing multiple hardware
393 * statuses, so it's possible that a previously cleared status gets set
394 * again while another status has not been cleared yet. Thus, there
395 * will be no new interrupt as GIS always stayed set. If we don't
396 * re-examine GIS then we can leave it set and never get an interrupt
397 * again.
398 */
399 hdac_lock(sc);
400 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS);
401 while (intsts != 0xffffffff && (intsts & HDAC_INTSTS_GIS) != 0) {
402 hdac_one_intr(sc, intsts);
403 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS);
404 }
405 hdac_unlock(sc);
406 }
407
408 static void
hdac_poll_callback(void * arg)409 hdac_poll_callback(void *arg)
410 {
411 struct hdac_softc *sc = arg;
412
413 if (sc == NULL)
414 return;
415
416 hdac_lock(sc);
417 if (sc->polling == 0) {
418 hdac_unlock(sc);
419 return;
420 }
421 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc);
422 hdac_unlock(sc);
423
424 hdac_intr_handler(sc);
425 }
426
427 /****************************************************************************
428 * int hdac_reset(hdac_softc *, bool)
429 *
430 * Reset the hdac to a quiescent and known state.
431 ****************************************************************************/
432 static int
hdac_reset(struct hdac_softc * sc,bool wakeup)433 hdac_reset(struct hdac_softc *sc, bool wakeup)
434 {
435 uint32_t gctl;
436 int count, i;
437
438 /*
439 * Stop all Streams DMA engine
440 */
441 for (i = 0; i < sc->num_iss; i++)
442 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0);
443 for (i = 0; i < sc->num_oss; i++)
444 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0);
445 for (i = 0; i < sc->num_bss; i++)
446 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0);
447
448 /*
449 * Stop Control DMA engines.
450 */
451 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0);
452 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0);
453
454 /*
455 * Reset DMA position buffer.
456 */
457 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0);
458 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0);
459
460 /*
461 * Reset the controller. The reset must remain asserted for
462 * a minimum of 100us.
463 */
464 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
465 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST);
466 count = 10000;
467 do {
468 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
469 if (!(gctl & HDAC_GCTL_CRST))
470 break;
471 DELAY(10);
472 } while (--count);
473 if (gctl & HDAC_GCTL_CRST) {
474 device_printf(sc->dev, "Unable to put hdac in reset\n");
475 return (ENXIO);
476 }
477
478 /* If wakeup is not requested - leave the controller in reset state. */
479 if (!wakeup)
480 return (0);
481
482 DELAY(100);
483 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
484 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST);
485 count = 10000;
486 do {
487 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
488 if (gctl & HDAC_GCTL_CRST)
489 break;
490 DELAY(10);
491 } while (--count);
492 if (!(gctl & HDAC_GCTL_CRST)) {
493 device_printf(sc->dev, "Device stuck in reset\n");
494 return (ENXIO);
495 }
496
497 /*
498 * Wait for codecs to finish their own reset sequence. The delay here
499 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery).
500 */
501 DELAY(1000);
502
503 return (0);
504 }
505
506 /****************************************************************************
507 * int hdac_get_capabilities(struct hdac_softc *);
508 *
509 * Retreive the general capabilities of the hdac;
510 * Number of Input Streams
511 * Number of Output Streams
512 * Number of bidirectional Streams
513 * 64bit ready
514 * CORB and RIRB sizes
515 ****************************************************************************/
516 static int
hdac_get_capabilities(struct hdac_softc * sc)517 hdac_get_capabilities(struct hdac_softc *sc)
518 {
519 uint16_t gcap;
520 uint8_t corbsize, rirbsize;
521
522 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP);
523 sc->num_iss = HDAC_GCAP_ISS(gcap);
524 sc->num_oss = HDAC_GCAP_OSS(gcap);
525 sc->num_bss = HDAC_GCAP_BSS(gcap);
526 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss;
527 sc->num_sdo = HDAC_GCAP_NSDO(gcap);
528 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0;
529 if (sc->quirks_on & HDAC_QUIRK_64BIT)
530 sc->support_64bit = 1;
531 else if (sc->quirks_off & HDAC_QUIRK_64BIT)
532 sc->support_64bit = 0;
533
534 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE);
535 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) ==
536 HDAC_CORBSIZE_CORBSZCAP_256)
537 sc->corb_size = 256;
538 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) ==
539 HDAC_CORBSIZE_CORBSZCAP_16)
540 sc->corb_size = 16;
541 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) ==
542 HDAC_CORBSIZE_CORBSZCAP_2)
543 sc->corb_size = 2;
544 else {
545 device_printf(sc->dev, "%s: Invalid corb size (%x)\n",
546 __func__, corbsize);
547 return (ENXIO);
548 }
549
550 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE);
551 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) ==
552 HDAC_RIRBSIZE_RIRBSZCAP_256)
553 sc->rirb_size = 256;
554 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) ==
555 HDAC_RIRBSIZE_RIRBSZCAP_16)
556 sc->rirb_size = 16;
557 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) ==
558 HDAC_RIRBSIZE_RIRBSZCAP_2)
559 sc->rirb_size = 2;
560 else {
561 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n",
562 __func__, rirbsize);
563 return (ENXIO);
564 }
565
566 HDA_BOOTVERBOSE(
567 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, "
568 "NSDO %d%s, CORB %d, RIRB %d\n",
569 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo,
570 sc->support_64bit ? ", 64bit" : "",
571 sc->corb_size, sc->rirb_size);
572 );
573
574 return (0);
575 }
576
577 /****************************************************************************
578 * void hdac_dma_cb
579 *
580 * This function is called by bus_dmamap_load when the mapping has been
581 * established. We just record the physical address of the mapping into
582 * the struct hdac_dma passed in.
583 ****************************************************************************/
584 static void
hdac_dma_cb(void * callback_arg,bus_dma_segment_t * segs,int nseg,int error)585 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error)
586 {
587 struct hdac_dma *dma;
588
589 if (error == 0) {
590 dma = (struct hdac_dma *)callback_arg;
591 dma->dma_paddr = segs[0].ds_addr;
592 }
593 }
594
595 /****************************************************************************
596 * int hdac_dma_alloc
597 *
598 * This function allocate and setup a dma region (struct hdac_dma).
599 * It must be freed by a corresponding hdac_dma_free.
600 ****************************************************************************/
601 static int
hdac_dma_alloc(struct hdac_softc * sc,struct hdac_dma * dma,bus_size_t size)602 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size)
603 {
604 bus_size_t roundsz;
605 int result;
606
607 roundsz = roundup2(size, HDA_DMA_ALIGNMENT);
608 bzero(dma, sizeof(*dma));
609
610 /*
611 * Create a DMA tag
612 */
613 result = bus_dma_tag_create(
614 bus_get_dma_tag(sc->dev), /* parent */
615 HDA_DMA_ALIGNMENT, /* alignment */
616 0, /* boundary */
617 (sc->support_64bit) ? BUS_SPACE_MAXADDR :
618 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
619 BUS_SPACE_MAXADDR, /* highaddr */
620 NULL, /* filtfunc */
621 NULL, /* fistfuncarg */
622 roundsz, /* maxsize */
623 1, /* nsegments */
624 roundsz, /* maxsegsz */
625 0, /* flags */
626 NULL, /* lockfunc */
627 NULL, /* lockfuncarg */
628 &dma->dma_tag); /* dmat */
629 if (result != 0) {
630 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n",
631 __func__, result);
632 goto hdac_dma_alloc_fail;
633 }
634
635 /*
636 * Allocate DMA memory
637 */
638 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
639 BUS_DMA_NOWAIT | BUS_DMA_ZERO |
640 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE :
641 BUS_DMA_COHERENT),
642 &dma->dma_map);
643 if (result != 0) {
644 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n",
645 __func__, result);
646 goto hdac_dma_alloc_fail;
647 }
648
649 dma->dma_size = roundsz;
650
651 /*
652 * Map the memory
653 */
654 result = bus_dmamap_load(dma->dma_tag, dma->dma_map,
655 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0);
656 if (result != 0 || dma->dma_paddr == 0) {
657 if (result == 0)
658 result = ENOMEM;
659 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n",
660 __func__, result);
661 goto hdac_dma_alloc_fail;
662 }
663
664 HDA_BOOTHVERBOSE(
665 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n",
666 __func__, (uintmax_t)size, (uintmax_t)roundsz);
667 );
668
669 return (0);
670
671 hdac_dma_alloc_fail:
672 hdac_dma_free(sc, dma);
673
674 return (result);
675 }
676
677 /****************************************************************************
678 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *)
679 *
680 * Free a struct hdac_dma that has been previously allocated via the
681 * hdac_dma_alloc function.
682 ****************************************************************************/
683 static void
hdac_dma_free(struct hdac_softc * sc,struct hdac_dma * dma)684 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma)
685 {
686 if (dma->dma_paddr != 0) {
687 /* Flush caches */
688 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
689 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
690 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
691 dma->dma_paddr = 0;
692 }
693 if (dma->dma_vaddr != NULL) {
694 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
695 dma->dma_vaddr = NULL;
696 }
697 if (dma->dma_tag != NULL) {
698 bus_dma_tag_destroy(dma->dma_tag);
699 dma->dma_tag = NULL;
700 }
701 dma->dma_size = 0;
702 }
703
704 /****************************************************************************
705 * int hdac_mem_alloc(struct hdac_softc *)
706 *
707 * Allocate all the bus resources necessary to speak with the physical
708 * controller.
709 ****************************************************************************/
710 static int
hdac_mem_alloc(struct hdac_softc * sc)711 hdac_mem_alloc(struct hdac_softc *sc)
712 {
713 struct hdac_mem *mem;
714
715 mem = &sc->mem;
716 mem->mem_rid = PCIR_BAR(0);
717 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
718 &mem->mem_rid, RF_ACTIVE);
719 if (mem->mem_res == NULL) {
720 device_printf(sc->dev,
721 "%s: Unable to allocate memory resource\n", __func__);
722 return (ENOMEM);
723 }
724 mem->mem_tag = rman_get_bustag(mem->mem_res);
725 mem->mem_handle = rman_get_bushandle(mem->mem_res);
726
727 return (0);
728 }
729
730 /****************************************************************************
731 * void hdac_mem_free(struct hdac_softc *)
732 *
733 * Free up resources previously allocated by hdac_mem_alloc.
734 ****************************************************************************/
735 static void
hdac_mem_free(struct hdac_softc * sc)736 hdac_mem_free(struct hdac_softc *sc)
737 {
738 struct hdac_mem *mem;
739
740 mem = &sc->mem;
741 if (mem->mem_res != NULL)
742 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid,
743 mem->mem_res);
744 mem->mem_res = NULL;
745 }
746
747 /****************************************************************************
748 * int hdac_irq_alloc(struct hdac_softc *)
749 *
750 * Allocate and setup the resources necessary for interrupt handling.
751 ****************************************************************************/
752 static int
hdac_irq_alloc(struct hdac_softc * sc)753 hdac_irq_alloc(struct hdac_softc *sc)
754 {
755 struct hdac_irq *irq;
756 int result;
757
758 irq = &sc->irq;
759 irq->irq_rid = 0x0;
760
761 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 &&
762 (result = pci_msi_count(sc->dev)) == 1 &&
763 pci_alloc_msi(sc->dev, &result) == 0)
764 irq->irq_rid = 0x1;
765
766 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
767 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE);
768 if (irq->irq_res == NULL) {
769 device_printf(sc->dev, "%s: Unable to allocate irq\n",
770 __func__);
771 goto hdac_irq_alloc_fail;
772 }
773 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV,
774 NULL, hdac_intr_handler, sc, &irq->irq_handle);
775 if (result != 0) {
776 device_printf(sc->dev,
777 "%s: Unable to setup interrupt handler (%d)\n",
778 __func__, result);
779 goto hdac_irq_alloc_fail;
780 }
781
782 return (0);
783
784 hdac_irq_alloc_fail:
785 hdac_irq_free(sc);
786
787 return (ENXIO);
788 }
789
790 /****************************************************************************
791 * void hdac_irq_free(struct hdac_softc *)
792 *
793 * Free up resources previously allocated by hdac_irq_alloc.
794 ****************************************************************************/
795 static void
hdac_irq_free(struct hdac_softc * sc)796 hdac_irq_free(struct hdac_softc *sc)
797 {
798 struct hdac_irq *irq;
799
800 irq = &sc->irq;
801 if (irq->irq_res != NULL && irq->irq_handle != NULL)
802 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle);
803 if (irq->irq_res != NULL)
804 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid,
805 irq->irq_res);
806 if (irq->irq_rid == 0x1)
807 pci_release_msi(sc->dev);
808 irq->irq_handle = NULL;
809 irq->irq_res = NULL;
810 irq->irq_rid = 0x0;
811 }
812
813 /****************************************************************************
814 * void hdac_corb_init(struct hdac_softc *)
815 *
816 * Initialize the corb registers for operations but do not start it up yet.
817 * The CORB engine must not be running when this function is called.
818 ****************************************************************************/
819 static void
hdac_corb_init(struct hdac_softc * sc)820 hdac_corb_init(struct hdac_softc *sc)
821 {
822 uint8_t corbsize;
823 uint64_t corbpaddr;
824
825 /* Setup the CORB size. */
826 switch (sc->corb_size) {
827 case 256:
828 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256);
829 break;
830 case 16:
831 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16);
832 break;
833 case 2:
834 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2);
835 break;
836 default:
837 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size);
838 }
839 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize);
840
841 /* Setup the CORB Address in the hdac */
842 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr;
843 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr);
844 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32));
845
846 /* Set the WP and RP */
847 sc->corb_wp = 0;
848 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp);
849 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST);
850 /*
851 * The HDA specification indicates that the CORBRPRST bit will always
852 * read as zero. Unfortunately, it seems that at least the 82801G
853 * doesn't reset the bit to zero, which stalls the corb engine.
854 * manually reset the bit to zero before continuing.
855 */
856 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0);
857
858 /* Enable CORB error reporting */
859 #if 0
860 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE);
861 #endif
862 }
863
864 /****************************************************************************
865 * void hdac_rirb_init(struct hdac_softc *)
866 *
867 * Initialize the rirb registers for operations but do not start it up yet.
868 * The RIRB engine must not be running when this function is called.
869 ****************************************************************************/
870 static void
hdac_rirb_init(struct hdac_softc * sc)871 hdac_rirb_init(struct hdac_softc *sc)
872 {
873 uint8_t rirbsize;
874 uint64_t rirbpaddr;
875
876 /* Setup the RIRB size. */
877 switch (sc->rirb_size) {
878 case 256:
879 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256);
880 break;
881 case 16:
882 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16);
883 break;
884 case 2:
885 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2);
886 break;
887 default:
888 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size);
889 }
890 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize);
891
892 /* Setup the RIRB Address in the hdac */
893 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr;
894 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr);
895 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32));
896
897 /* Setup the WP and RP */
898 sc->rirb_rp = 0;
899 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST);
900
901 /* Setup the interrupt threshold */
902 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2);
903
904 /* Enable Overrun and response received reporting */
905 #if 0
906 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL,
907 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL);
908 #else
909 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL);
910 #endif
911
912 /*
913 * Make sure that the Host CPU cache doesn't contain any dirty
914 * cache lines that falls in the rirb. If I understood correctly, it
915 * should be sufficient to do this only once as the rirb is purely
916 * read-only from now on.
917 */
918 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
919 BUS_DMASYNC_PREREAD);
920 }
921
922 /****************************************************************************
923 * void hdac_corb_start(hdac_softc *)
924 *
925 * Startup the corb DMA engine
926 ****************************************************************************/
927 static void
hdac_corb_start(struct hdac_softc * sc)928 hdac_corb_start(struct hdac_softc *sc)
929 {
930 uint32_t corbctl;
931
932 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL);
933 corbctl |= HDAC_CORBCTL_CORBRUN;
934 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl);
935 }
936
937 /****************************************************************************
938 * void hdac_rirb_start(hdac_softc *)
939 *
940 * Startup the rirb DMA engine
941 ****************************************************************************/
942 static void
hdac_rirb_start(struct hdac_softc * sc)943 hdac_rirb_start(struct hdac_softc *sc)
944 {
945 uint32_t rirbctl;
946
947 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL);
948 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN;
949 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl);
950 }
951
952 static int
hdac_rirb_flush(struct hdac_softc * sc)953 hdac_rirb_flush(struct hdac_softc *sc)
954 {
955 struct hdac_rirb *rirb_base, *rirb;
956 nid_t cad;
957 uint32_t resp, resp_ex;
958 uint8_t rirbwp;
959 int ret;
960
961 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr;
962 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP);
963 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
964 BUS_DMASYNC_POSTREAD);
965
966 ret = 0;
967 while (sc->rirb_rp != rirbwp) {
968 sc->rirb_rp++;
969 sc->rirb_rp %= sc->rirb_size;
970 rirb = &rirb_base[sc->rirb_rp];
971 resp = le32toh(rirb->response);
972 resp_ex = le32toh(rirb->response_ex);
973 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex);
974 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) {
975 sc->unsolq[sc->unsolq_wp++] = resp;
976 sc->unsolq_wp %= HDAC_UNSOLQ_MAX;
977 sc->unsolq[sc->unsolq_wp++] = cad;
978 sc->unsolq_wp %= HDAC_UNSOLQ_MAX;
979 } else if (sc->codecs[cad].pending <= 0) {
980 device_printf(sc->dev, "Unexpected unsolicited "
981 "response from address %d: %08x\n", cad, resp);
982 } else {
983 sc->codecs[cad].response = resp;
984 sc->codecs[cad].pending--;
985 }
986 ret++;
987 }
988
989 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
990 BUS_DMASYNC_PREREAD);
991 return (ret);
992 }
993
994 static int
hdac_unsolq_flush(struct hdac_softc * sc)995 hdac_unsolq_flush(struct hdac_softc *sc)
996 {
997 device_t child;
998 nid_t cad;
999 uint32_t resp;
1000 int ret = 0;
1001
1002 if (sc->unsolq_st == HDAC_UNSOLQ_READY) {
1003 sc->unsolq_st = HDAC_UNSOLQ_BUSY;
1004 while (sc->unsolq_rp != sc->unsolq_wp) {
1005 resp = sc->unsolq[sc->unsolq_rp++];
1006 sc->unsolq_rp %= HDAC_UNSOLQ_MAX;
1007 cad = sc->unsolq[sc->unsolq_rp++];
1008 sc->unsolq_rp %= HDAC_UNSOLQ_MAX;
1009 if ((child = sc->codecs[cad].dev) != NULL &&
1010 device_is_attached(child))
1011 HDAC_UNSOL_INTR(child, resp);
1012 ret++;
1013 }
1014 sc->unsolq_st = HDAC_UNSOLQ_READY;
1015 }
1016
1017 return (ret);
1018 }
1019
1020 /****************************************************************************
1021 * uint32_t hdac_send_command
1022 *
1023 * Wrapper function that sends only one command to a given codec
1024 ****************************************************************************/
1025 static uint32_t
hdac_send_command(struct hdac_softc * sc,nid_t cad,uint32_t verb)1026 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb)
1027 {
1028 int timeout;
1029 uint32_t *corb;
1030
1031 hdac_lockassert(sc);
1032 verb &= ~HDA_CMD_CAD_MASK;
1033 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT;
1034 sc->codecs[cad].response = HDA_INVALID;
1035
1036 sc->codecs[cad].pending++;
1037 sc->corb_wp++;
1038 sc->corb_wp %= sc->corb_size;
1039 corb = (uint32_t *)sc->corb_dma.dma_vaddr;
1040 bus_dmamap_sync(sc->corb_dma.dma_tag,
1041 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE);
1042 corb[sc->corb_wp] = htole32(verb);
1043 bus_dmamap_sync(sc->corb_dma.dma_tag,
1044 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE);
1045 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp);
1046
1047 timeout = 10000;
1048 do {
1049 if (hdac_rirb_flush(sc) == 0)
1050 DELAY(10);
1051 } while (sc->codecs[cad].pending != 0 && --timeout);
1052
1053 if (sc->codecs[cad].pending != 0) {
1054 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n",
1055 verb, cad);
1056 sc->codecs[cad].pending = 0;
1057 }
1058
1059 if (sc->unsolq_rp != sc->unsolq_wp)
1060 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task);
1061 return (sc->codecs[cad].response);
1062 }
1063
1064 /****************************************************************************
1065 * Device Methods
1066 ****************************************************************************/
1067
1068 /****************************************************************************
1069 * int hdac_probe(device_t)
1070 *
1071 * Probe for the presence of an hdac. If none is found, check for a generic
1072 * match using the subclass of the device.
1073 ****************************************************************************/
1074 static int
hdac_probe(device_t dev)1075 hdac_probe(device_t dev)
1076 {
1077 int i, result;
1078 uint32_t model;
1079 uint16_t class, subclass;
1080 char desc[64];
1081
1082 model = (uint32_t)pci_get_device(dev) << 16;
1083 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
1084 class = pci_get_class(dev);
1085 subclass = pci_get_subclass(dev);
1086
1087 bzero(desc, sizeof(desc));
1088 result = ENXIO;
1089 for (i = 0; i < nitems(hdac_devices); i++) {
1090 if (hdac_devices[i].model == model) {
1091 strlcpy(desc, hdac_devices[i].desc, sizeof(desc));
1092 result = BUS_PROBE_DEFAULT;
1093 break;
1094 }
1095 if (HDA_DEV_MATCH(hdac_devices[i].model, model) &&
1096 class == PCIC_MULTIMEDIA &&
1097 subclass == PCIS_MULTIMEDIA_HDA) {
1098 snprintf(desc, sizeof(desc), "%s (0x%04x)",
1099 hdac_devices[i].desc, pci_get_device(dev));
1100 result = BUS_PROBE_GENERIC;
1101 break;
1102 }
1103 }
1104 if (result == ENXIO && class == PCIC_MULTIMEDIA &&
1105 subclass == PCIS_MULTIMEDIA_HDA) {
1106 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model);
1107 result = BUS_PROBE_GENERIC;
1108 }
1109 if (result != ENXIO)
1110 device_set_descf(dev, "%s HDA Controller", desc);
1111
1112 return (result);
1113 }
1114
1115 static void
hdac_unsolq_task(void * context,int pending)1116 hdac_unsolq_task(void *context, int pending)
1117 {
1118 struct hdac_softc *sc;
1119
1120 sc = (struct hdac_softc *)context;
1121
1122 hdac_lock(sc);
1123 hdac_unsolq_flush(sc);
1124 hdac_unlock(sc);
1125 }
1126
1127 /****************************************************************************
1128 * int hdac_attach(device_t)
1129 *
1130 * Attach the device into the kernel. Interrupts usually won't be enabled
1131 * when this function is called. Setup everything that doesn't require
1132 * interrupts and defer probing of codecs until interrupts are enabled.
1133 ****************************************************************************/
1134 static int
hdac_attach(device_t dev)1135 hdac_attach(device_t dev)
1136 {
1137 struct hdac_softc *sc;
1138 int result;
1139 int i, devid = -1;
1140 uint32_t model;
1141 uint16_t class, subclass;
1142 uint16_t vendor;
1143 uint8_t v;
1144
1145 sc = device_get_softc(dev);
1146 HDA_BOOTVERBOSE(
1147 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n",
1148 pci_get_subvendor(dev), pci_get_subdevice(dev));
1149 device_printf(dev, "HDA Driver Revision: %s\n",
1150 HDA_DRV_TEST_REV);
1151 );
1152
1153 model = (uint32_t)pci_get_device(dev) << 16;
1154 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
1155 class = pci_get_class(dev);
1156 subclass = pci_get_subclass(dev);
1157
1158 for (i = 0; i < nitems(hdac_devices); i++) {
1159 if (hdac_devices[i].model == model) {
1160 devid = i;
1161 break;
1162 }
1163 if (HDA_DEV_MATCH(hdac_devices[i].model, model) &&
1164 class == PCIC_MULTIMEDIA &&
1165 subclass == PCIS_MULTIMEDIA_HDA) {
1166 devid = i;
1167 break;
1168 }
1169 }
1170
1171 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex");
1172 sc->dev = dev;
1173 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc);
1174 callout_init(&sc->poll_callout, 1);
1175 for (i = 0; i < HDAC_CODEC_MAX; i++)
1176 sc->codecs[i].dev = NULL;
1177 if (devid >= 0) {
1178 sc->quirks_on = hdac_devices[devid].quirks_on;
1179 sc->quirks_off = hdac_devices[devid].quirks_off;
1180 } else {
1181 sc->quirks_on = 0;
1182 sc->quirks_off = 0;
1183 }
1184 if (resource_int_value(device_get_name(dev),
1185 device_get_unit(dev), "msi", &i) == 0) {
1186 if (i == 0)
1187 sc->quirks_off |= HDAC_QUIRK_MSI;
1188 else {
1189 sc->quirks_on |= HDAC_QUIRK_MSI;
1190 sc->quirks_off |= ~HDAC_QUIRK_MSI;
1191 }
1192 }
1193 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off);
1194 HDA_BOOTVERBOSE(
1195 device_printf(sc->dev,
1196 "Config options: on=0x%08x off=0x%08x\n",
1197 sc->quirks_on, sc->quirks_off);
1198 );
1199 sc->poll_ival = hz;
1200 if (resource_int_value(device_get_name(dev),
1201 device_get_unit(dev), "polling", &i) == 0 && i != 0)
1202 sc->polling = 1;
1203 else
1204 sc->polling = 0;
1205
1206 pci_enable_busmaster(dev);
1207
1208 vendor = pci_get_vendor(dev);
1209 if (vendor == INTEL_VENDORID) {
1210 /* TCSEL -> TC0 */
1211 v = pci_read_config(dev, 0x44, 1);
1212 pci_write_config(dev, 0x44, v & 0xf8, 1);
1213 HDA_BOOTHVERBOSE(
1214 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v,
1215 pci_read_config(dev, 0x44, 1));
1216 );
1217 }
1218
1219 #if defined(__i386__) || defined(__amd64__)
1220 sc->flags |= HDAC_F_DMA_NOCACHE;
1221
1222 if (resource_int_value(device_get_name(dev),
1223 device_get_unit(dev), "snoop", &i) == 0 && i != 0) {
1224 #else
1225 sc->flags &= ~HDAC_F_DMA_NOCACHE;
1226 #endif
1227 /*
1228 * Try to enable PCIe snoop to avoid messing around with
1229 * uncacheable DMA attribute. Since PCIe snoop register
1230 * config is pretty much vendor specific, there are no
1231 * general solutions on how to enable it, forcing us (even
1232 * Microsoft) to enable uncacheable or write combined DMA
1233 * by default.
1234 *
1235 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx
1236 */
1237 for (i = 0; i < nitems(hdac_pcie_snoop); i++) {
1238 if (hdac_pcie_snoop[i].vendor != vendor)
1239 continue;
1240 sc->flags &= ~HDAC_F_DMA_NOCACHE;
1241 if (hdac_pcie_snoop[i].reg == 0x00)
1242 break;
1243 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1);
1244 if ((v & hdac_pcie_snoop[i].enable) ==
1245 hdac_pcie_snoop[i].enable)
1246 break;
1247 v &= hdac_pcie_snoop[i].mask;
1248 v |= hdac_pcie_snoop[i].enable;
1249 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1);
1250 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1);
1251 if ((v & hdac_pcie_snoop[i].enable) !=
1252 hdac_pcie_snoop[i].enable) {
1253 HDA_BOOTVERBOSE(
1254 device_printf(dev,
1255 "WARNING: Failed to enable PCIe "
1256 "snoop!\n");
1257 );
1258 #if defined(__i386__) || defined(__amd64__)
1259 sc->flags |= HDAC_F_DMA_NOCACHE;
1260 #endif
1261 }
1262 break;
1263 }
1264 #if defined(__i386__) || defined(__amd64__)
1265 }
1266 #endif
1267
1268 HDA_BOOTHVERBOSE(
1269 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n",
1270 (sc->flags & HDAC_F_DMA_NOCACHE) ?
1271 "Uncacheable" : "PCIe snoop", vendor);
1272 );
1273
1274 /* Allocate resources */
1275 result = hdac_mem_alloc(sc);
1276 if (result != 0)
1277 goto hdac_attach_fail;
1278
1279 /* Get Capabilities */
1280 result = hdac_get_capabilities(sc);
1281 if (result != 0)
1282 goto hdac_attach_fail;
1283
1284 /* Allocate CORB, RIRB, POS and BDLs dma memory */
1285 result = hdac_dma_alloc(sc, &sc->corb_dma,
1286 sc->corb_size * sizeof(uint32_t));
1287 if (result != 0)
1288 goto hdac_attach_fail;
1289 result = hdac_dma_alloc(sc, &sc->rirb_dma,
1290 sc->rirb_size * sizeof(struct hdac_rirb));
1291 if (result != 0)
1292 goto hdac_attach_fail;
1293 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss,
1294 M_HDAC, M_ZERO | M_WAITOK);
1295 for (i = 0; i < sc->num_ss; i++) {
1296 result = hdac_dma_alloc(sc, &sc->streams[i].bdl,
1297 sizeof(struct hdac_bdle) * HDA_BDL_MAX);
1298 if (result != 0)
1299 goto hdac_attach_fail;
1300 }
1301 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) {
1302 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) {
1303 HDA_BOOTVERBOSE(
1304 device_printf(dev, "Failed to "
1305 "allocate DMA pos buffer "
1306 "(non-fatal)\n");
1307 );
1308 } else {
1309 uint64_t addr = sc->pos_dma.dma_paddr;
1310
1311 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32);
1312 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE,
1313 (addr & HDAC_DPLBASE_DPLBASE_MASK) |
1314 HDAC_DPLBASE_DPLBASE_DMAPBE);
1315 }
1316 }
1317
1318 result = bus_dma_tag_create(
1319 bus_get_dma_tag(sc->dev), /* parent */
1320 HDA_DMA_ALIGNMENT, /* alignment */
1321 0, /* boundary */
1322 (sc->support_64bit) ? BUS_SPACE_MAXADDR :
1323 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1324 BUS_SPACE_MAXADDR, /* highaddr */
1325 NULL, /* filtfunc */
1326 NULL, /* fistfuncarg */
1327 HDA_BUFSZ_MAX, /* maxsize */
1328 1, /* nsegments */
1329 HDA_BUFSZ_MAX, /* maxsegsz */
1330 0, /* flags */
1331 NULL, /* lockfunc */
1332 NULL, /* lockfuncarg */
1333 &sc->chan_dmat); /* dmat */
1334 if (result != 0) {
1335 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n",
1336 __func__, result);
1337 goto hdac_attach_fail;
1338 }
1339
1340 /* Quiesce everything */
1341 HDA_BOOTHVERBOSE(
1342 device_printf(dev, "Reset controller...\n");
1343 );
1344 hdac_reset(sc, true);
1345
1346 /* Initialize the CORB and RIRB */
1347 hdac_corb_init(sc);
1348 hdac_rirb_init(sc);
1349
1350 result = hdac_irq_alloc(sc);
1351 if (result != 0)
1352 goto hdac_attach_fail;
1353
1354 /* Defer remaining of initialization until interrupts are enabled */
1355 sc->intrhook.ich_func = hdac_attach2;
1356 sc->intrhook.ich_arg = (void *)sc;
1357 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) {
1358 sc->intrhook.ich_func = NULL;
1359 hdac_attach2((void *)sc);
1360 }
1361
1362 return (0);
1363
1364 hdac_attach_fail:
1365 hdac_irq_free(sc);
1366 if (sc->streams != NULL)
1367 for (i = 0; i < sc->num_ss; i++)
1368 hdac_dma_free(sc, &sc->streams[i].bdl);
1369 free(sc->streams, M_HDAC);
1370 hdac_dma_free(sc, &sc->rirb_dma);
1371 hdac_dma_free(sc, &sc->corb_dma);
1372 hdac_mem_free(sc);
1373 snd_mtxfree(sc->lock);
1374
1375 return (ENXIO);
1376 }
1377
1378 static int
sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS)1379 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS)
1380 {
1381 struct hdac_softc *sc;
1382 device_t *devlist;
1383 device_t dev;
1384 int devcount, i, err, val;
1385
1386 dev = oidp->oid_arg1;
1387 sc = device_get_softc(dev);
1388 if (sc == NULL)
1389 return (EINVAL);
1390 val = 0;
1391 err = sysctl_handle_int(oidp, &val, 0, req);
1392 if (err != 0 || req->newptr == NULL || val == 0)
1393 return (err);
1394
1395 /* XXX: Temporary. For debugging. */
1396 if (val == 100) {
1397 hdac_suspend(dev);
1398 return (0);
1399 } else if (val == 101) {
1400 hdac_resume(dev);
1401 return (0);
1402 }
1403
1404 bus_topo_lock();
1405
1406 if ((err = device_get_children(dev, &devlist, &devcount)) != 0) {
1407 bus_topo_unlock();
1408 return (err);
1409 }
1410
1411 hdac_lock(sc);
1412 for (i = 0; i < devcount; i++)
1413 HDAC_PINDUMP(devlist[i]);
1414 hdac_unlock(sc);
1415
1416 bus_topo_unlock();
1417
1418 free(devlist, M_TEMP);
1419 return (0);
1420 }
1421
1422 static int
hdac_mdata_rate(uint16_t fmt)1423 hdac_mdata_rate(uint16_t fmt)
1424 {
1425 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 };
1426 int rate, bits;
1427
1428 if (fmt & (1 << 14))
1429 rate = 44100;
1430 else
1431 rate = 48000;
1432 rate *= ((fmt >> 11) & 0x07) + 1;
1433 rate /= ((fmt >> 8) & 0x07) + 1;
1434 bits = mbits[(fmt >> 4) & 0x03];
1435 bits *= (fmt & 0x0f) + 1;
1436 return (rate * bits);
1437 }
1438
1439 static int
hdac_bdata_rate(uint16_t fmt,int output)1440 hdac_bdata_rate(uint16_t fmt, int output)
1441 {
1442 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 };
1443 int rate, bits;
1444
1445 rate = 48000;
1446 rate *= ((fmt >> 11) & 0x07) + 1;
1447 bits = bbits[(fmt >> 4) & 0x03];
1448 bits *= (fmt & 0x0f) + 1;
1449 if (!output)
1450 bits = ((bits + 7) & ~0x07) + 10;
1451 return (rate * bits);
1452 }
1453
1454 static void
hdac_poll_reinit(struct hdac_softc * sc)1455 hdac_poll_reinit(struct hdac_softc *sc)
1456 {
1457 int i, pollticks, min = 1000000;
1458 struct hdac_stream *s;
1459
1460 if (sc->polling == 0)
1461 return;
1462 if (sc->unsol_registered > 0)
1463 min = hz / 2;
1464 for (i = 0; i < sc->num_ss; i++) {
1465 s = &sc->streams[i];
1466 if (s->running == 0)
1467 continue;
1468 pollticks = ((uint64_t)hz * s->blksz) /
1469 (hdac_mdata_rate(s->format) / 8);
1470 pollticks >>= 1;
1471 if (pollticks > hz)
1472 pollticks = hz;
1473 if (pollticks < 1)
1474 pollticks = 1;
1475 if (min > pollticks)
1476 min = pollticks;
1477 }
1478 sc->poll_ival = min;
1479 if (min == 1000000)
1480 callout_stop(&sc->poll_callout);
1481 else
1482 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc);
1483 }
1484
1485 static int
sysctl_hdac_polling(SYSCTL_HANDLER_ARGS)1486 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS)
1487 {
1488 struct hdac_softc *sc;
1489 device_t dev;
1490 uint32_t ctl;
1491 int err, val;
1492
1493 dev = oidp->oid_arg1;
1494 sc = device_get_softc(dev);
1495 if (sc == NULL)
1496 return (EINVAL);
1497 hdac_lock(sc);
1498 val = sc->polling;
1499 hdac_unlock(sc);
1500 err = sysctl_handle_int(oidp, &val, 0, req);
1501
1502 if (err != 0 || req->newptr == NULL)
1503 return (err);
1504 if (val < 0 || val > 1)
1505 return (EINVAL);
1506
1507 hdac_lock(sc);
1508 if (val != sc->polling) {
1509 if (val == 0) {
1510 callout_stop(&sc->poll_callout);
1511 hdac_unlock(sc);
1512 callout_drain(&sc->poll_callout);
1513 hdac_lock(sc);
1514 sc->polling = 0;
1515 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1516 ctl |= HDAC_INTCTL_GIE;
1517 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1518 } else {
1519 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1520 ctl &= ~HDAC_INTCTL_GIE;
1521 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1522 sc->polling = 1;
1523 hdac_poll_reinit(sc);
1524 }
1525 }
1526 hdac_unlock(sc);
1527
1528 return (err);
1529 }
1530
1531 static void
hdac_attach2(void * arg)1532 hdac_attach2(void *arg)
1533 {
1534 struct hdac_softc *sc;
1535 device_t child;
1536 uint32_t vendorid, revisionid;
1537 int i;
1538 uint16_t statests;
1539
1540 sc = (struct hdac_softc *)arg;
1541
1542 hdac_lock(sc);
1543
1544 /* Remove ourselves from the config hooks */
1545 if (sc->intrhook.ich_func != NULL) {
1546 config_intrhook_disestablish(&sc->intrhook);
1547 sc->intrhook.ich_func = NULL;
1548 }
1549
1550 HDA_BOOTHVERBOSE(
1551 device_printf(sc->dev, "Starting CORB Engine...\n");
1552 );
1553 hdac_corb_start(sc);
1554 HDA_BOOTHVERBOSE(
1555 device_printf(sc->dev, "Starting RIRB Engine...\n");
1556 );
1557 hdac_rirb_start(sc);
1558
1559 /*
1560 * Clear HDAC_WAKEEN as at present we have no use for SDI wake
1561 * (status change) interrupts. The documentation says that we
1562 * should not make any assumptions about the state of this register
1563 * and set it explicitly.
1564 * NB: this needs to be done before the interrupt is enabled as
1565 * the handler does not expect this interrupt source.
1566 */
1567 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0);
1568
1569 /*
1570 * Read and clear post-reset SDI wake status.
1571 * Each set bit corresponds to a codec that came out of reset.
1572 */
1573 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS);
1574 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests);
1575
1576 HDA_BOOTHVERBOSE(
1577 device_printf(sc->dev,
1578 "Enabling controller interrupt...\n");
1579 );
1580 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) |
1581 HDAC_GCTL_UNSOL);
1582 if (sc->polling == 0) {
1583 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL,
1584 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE);
1585 }
1586 DELAY(1000);
1587
1588 HDA_BOOTHVERBOSE(
1589 device_printf(sc->dev, "Scanning HDA codecs ...\n");
1590 );
1591 hdac_unlock(sc);
1592 for (i = 0; i < HDAC_CODEC_MAX; i++) {
1593 if (HDAC_STATESTS_SDIWAKE(statests, i)) {
1594 HDA_BOOTHVERBOSE(
1595 device_printf(sc->dev,
1596 "Found CODEC at address %d\n", i);
1597 );
1598 hdac_lock(sc);
1599 vendorid = hdac_send_command(sc, i,
1600 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID));
1601 revisionid = hdac_send_command(sc, i,
1602 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID));
1603 hdac_unlock(sc);
1604 if (vendorid == HDA_INVALID &&
1605 revisionid == HDA_INVALID) {
1606 device_printf(sc->dev,
1607 "CODEC at address %d not responding!\n", i);
1608 continue;
1609 }
1610 sc->codecs[i].vendor_id =
1611 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid);
1612 sc->codecs[i].device_id =
1613 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid);
1614 sc->codecs[i].revision_id =
1615 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid);
1616 sc->codecs[i].stepping_id =
1617 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid);
1618 child = device_add_child(sc->dev, "hdacc", DEVICE_UNIT_ANY);
1619 if (child == NULL) {
1620 device_printf(sc->dev,
1621 "Failed to add CODEC device\n");
1622 continue;
1623 }
1624 device_set_ivars(child, (void *)(intptr_t)i);
1625 sc->codecs[i].dev = child;
1626 }
1627 }
1628 bus_generic_attach(sc->dev);
1629
1630 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
1631 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
1632 "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev,
1633 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data");
1634 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
1635 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
1636 "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev,
1637 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode");
1638 }
1639
1640 /****************************************************************************
1641 * int hdac_suspend(device_t)
1642 *
1643 * Suspend and power down HDA bus and codecs.
1644 ****************************************************************************/
1645 static int
hdac_suspend(device_t dev)1646 hdac_suspend(device_t dev)
1647 {
1648 struct hdac_softc *sc = device_get_softc(dev);
1649
1650 HDA_BOOTHVERBOSE(
1651 device_printf(dev, "Suspend...\n");
1652 );
1653 bus_generic_suspend(dev);
1654
1655 hdac_lock(sc);
1656 HDA_BOOTHVERBOSE(
1657 device_printf(dev, "Reset controller...\n");
1658 );
1659 callout_stop(&sc->poll_callout);
1660 hdac_reset(sc, false);
1661 hdac_unlock(sc);
1662 callout_drain(&sc->poll_callout);
1663 taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1664 HDA_BOOTHVERBOSE(
1665 device_printf(dev, "Suspend done\n");
1666 );
1667 return (0);
1668 }
1669
1670 /****************************************************************************
1671 * int hdac_resume(device_t)
1672 *
1673 * Powerup and restore HDA bus and codecs state.
1674 ****************************************************************************/
1675 static int
hdac_resume(device_t dev)1676 hdac_resume(device_t dev)
1677 {
1678 struct hdac_softc *sc = device_get_softc(dev);
1679 int error;
1680
1681 HDA_BOOTHVERBOSE(
1682 device_printf(dev, "Resume...\n");
1683 );
1684 hdac_lock(sc);
1685
1686 /* Quiesce everything */
1687 HDA_BOOTHVERBOSE(
1688 device_printf(dev, "Reset controller...\n");
1689 );
1690 hdac_reset(sc, true);
1691
1692 /* Initialize the CORB and RIRB */
1693 hdac_corb_init(sc);
1694 hdac_rirb_init(sc);
1695
1696 HDA_BOOTHVERBOSE(
1697 device_printf(dev, "Starting CORB Engine...\n");
1698 );
1699 hdac_corb_start(sc);
1700 HDA_BOOTHVERBOSE(
1701 device_printf(dev, "Starting RIRB Engine...\n");
1702 );
1703 hdac_rirb_start(sc);
1704
1705 /*
1706 * Clear HDAC_WAKEEN as at present we have no use for SDI wake
1707 * (status change) events. The documentation says that we should
1708 * not make any assumptions about the state of this register and
1709 * set it explicitly.
1710 * Also, clear HDAC_STATESTS.
1711 * NB: this needs to be done before the interrupt is enabled as
1712 * the handler does not expect this interrupt source.
1713 */
1714 HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0);
1715 HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK);
1716
1717 HDA_BOOTHVERBOSE(
1718 device_printf(dev, "Enabling controller interrupt...\n");
1719 );
1720 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) |
1721 HDAC_GCTL_UNSOL);
1722 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE);
1723 DELAY(1000);
1724 hdac_poll_reinit(sc);
1725 hdac_unlock(sc);
1726
1727 error = bus_generic_resume(dev);
1728 HDA_BOOTHVERBOSE(
1729 device_printf(dev, "Resume done\n");
1730 );
1731 return (error);
1732 }
1733
1734 /****************************************************************************
1735 * int hdac_detach(device_t)
1736 *
1737 * Detach and free up resources utilized by the hdac device.
1738 ****************************************************************************/
1739 static int
hdac_detach(device_t dev)1740 hdac_detach(device_t dev)
1741 {
1742 struct hdac_softc *sc = device_get_softc(dev);
1743 device_t *devlist;
1744 int cad, i, devcount, error;
1745
1746 if ((error = device_get_children(dev, &devlist, &devcount)) != 0)
1747 return (error);
1748 for (i = 0; i < devcount; i++) {
1749 cad = (intptr_t)device_get_ivars(devlist[i]);
1750 if ((error = device_delete_child(dev, devlist[i])) != 0) {
1751 free(devlist, M_TEMP);
1752 return (error);
1753 }
1754 sc->codecs[cad].dev = NULL;
1755 }
1756 free(devlist, M_TEMP);
1757
1758 hdac_lock(sc);
1759 hdac_reset(sc, false);
1760 hdac_unlock(sc);
1761 taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1762 hdac_irq_free(sc);
1763
1764 for (i = 0; i < sc->num_ss; i++)
1765 hdac_dma_free(sc, &sc->streams[i].bdl);
1766 free(sc->streams, M_HDAC);
1767 hdac_dma_free(sc, &sc->pos_dma);
1768 hdac_dma_free(sc, &sc->rirb_dma);
1769 hdac_dma_free(sc, &sc->corb_dma);
1770 if (sc->chan_dmat != NULL) {
1771 bus_dma_tag_destroy(sc->chan_dmat);
1772 sc->chan_dmat = NULL;
1773 }
1774 hdac_mem_free(sc);
1775 snd_mtxfree(sc->lock);
1776 return (0);
1777 }
1778
1779 static bus_dma_tag_t
hdac_get_dma_tag(device_t dev,device_t child)1780 hdac_get_dma_tag(device_t dev, device_t child)
1781 {
1782 struct hdac_softc *sc = device_get_softc(dev);
1783
1784 return (sc->chan_dmat);
1785 }
1786
1787 static int
hdac_print_child(device_t dev,device_t child)1788 hdac_print_child(device_t dev, device_t child)
1789 {
1790 int retval;
1791
1792 retval = bus_print_child_header(dev, child);
1793 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child));
1794 retval += bus_print_child_footer(dev, child);
1795
1796 return (retval);
1797 }
1798
1799 static int
hdac_child_location(device_t dev,device_t child,struct sbuf * sb)1800 hdac_child_location(device_t dev, device_t child, struct sbuf *sb)
1801 {
1802
1803 sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child));
1804 return (0);
1805 }
1806
1807 static int
hdac_child_pnpinfo_method(device_t dev,device_t child,struct sbuf * sb)1808 hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb)
1809 {
1810 struct hdac_softc *sc = device_get_softc(dev);
1811 nid_t cad = (uintptr_t)device_get_ivars(child);
1812
1813 sbuf_printf(sb,
1814 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x",
1815 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id,
1816 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id);
1817 return (0);
1818 }
1819
1820 static int
hdac_read_ivar(device_t dev,device_t child,int which,uintptr_t * result)1821 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1822 {
1823 struct hdac_softc *sc = device_get_softc(dev);
1824 nid_t cad = (uintptr_t)device_get_ivars(child);
1825
1826 switch (which) {
1827 case HDA_IVAR_CODEC_ID:
1828 *result = cad;
1829 break;
1830 case HDA_IVAR_VENDOR_ID:
1831 *result = sc->codecs[cad].vendor_id;
1832 break;
1833 case HDA_IVAR_DEVICE_ID:
1834 *result = sc->codecs[cad].device_id;
1835 break;
1836 case HDA_IVAR_REVISION_ID:
1837 *result = sc->codecs[cad].revision_id;
1838 break;
1839 case HDA_IVAR_STEPPING_ID:
1840 *result = sc->codecs[cad].stepping_id;
1841 break;
1842 case HDA_IVAR_SUBVENDOR_ID:
1843 *result = pci_get_subvendor(dev);
1844 break;
1845 case HDA_IVAR_SUBDEVICE_ID:
1846 *result = pci_get_subdevice(dev);
1847 break;
1848 case HDA_IVAR_DMA_NOCACHE:
1849 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0;
1850 break;
1851 case HDA_IVAR_STRIPES_MASK:
1852 *result = (1 << (1 << sc->num_sdo)) - 1;
1853 break;
1854 default:
1855 return (ENOENT);
1856 }
1857 return (0);
1858 }
1859
1860 static struct mtx *
hdac_get_mtx(device_t dev,device_t child)1861 hdac_get_mtx(device_t dev, device_t child)
1862 {
1863 struct hdac_softc *sc = device_get_softc(dev);
1864
1865 return (sc->lock);
1866 }
1867
1868 static uint32_t
hdac_codec_command(device_t dev,device_t child,uint32_t verb)1869 hdac_codec_command(device_t dev, device_t child, uint32_t verb)
1870 {
1871
1872 return (hdac_send_command(device_get_softc(dev),
1873 (intptr_t)device_get_ivars(child), verb));
1874 }
1875
1876 static int
hdac_find_stream(struct hdac_softc * sc,int dir,int stream)1877 hdac_find_stream(struct hdac_softc *sc, int dir, int stream)
1878 {
1879 int i, ss;
1880
1881 ss = -1;
1882 /* Allocate ISS/OSS first. */
1883 if (dir == 0) {
1884 for (i = 0; i < sc->num_iss; i++) {
1885 if (sc->streams[i].stream == stream) {
1886 ss = i;
1887 break;
1888 }
1889 }
1890 } else {
1891 for (i = 0; i < sc->num_oss; i++) {
1892 if (sc->streams[i + sc->num_iss].stream == stream) {
1893 ss = i + sc->num_iss;
1894 break;
1895 }
1896 }
1897 }
1898 /* Fallback to BSS. */
1899 if (ss == -1) {
1900 for (i = 0; i < sc->num_bss; i++) {
1901 if (sc->streams[i + sc->num_iss + sc->num_oss].stream
1902 == stream) {
1903 ss = i + sc->num_iss + sc->num_oss;
1904 break;
1905 }
1906 }
1907 }
1908 return (ss);
1909 }
1910
1911 static int
hdac_stream_alloc(device_t dev,device_t child,int dir,int format,int stripe,uint32_t ** dmapos)1912 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe,
1913 uint32_t **dmapos)
1914 {
1915 struct hdac_softc *sc = device_get_softc(dev);
1916 nid_t cad = (uintptr_t)device_get_ivars(child);
1917 int stream, ss, bw, maxbw, prevbw;
1918
1919 /* Look for empty stream. */
1920 ss = hdac_find_stream(sc, dir, 0);
1921
1922 /* Return if found nothing. */
1923 if (ss < 0)
1924 return (0);
1925
1926 /* Check bus bandwidth. */
1927 bw = hdac_bdata_rate(format, dir);
1928 if (dir == 1) {
1929 bw *= 1 << (sc->num_sdo - stripe);
1930 prevbw = sc->sdo_bw_used;
1931 maxbw = 48000 * 960 * (1 << sc->num_sdo);
1932 } else {
1933 prevbw = sc->codecs[cad].sdi_bw_used;
1934 maxbw = 48000 * 464;
1935 }
1936 HDA_BOOTHVERBOSE(
1937 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n",
1938 (bw + prevbw) / 1000, maxbw / 1000,
1939 bw + prevbw > maxbw ? " -- OVERFLOW!" : "");
1940 );
1941 if (bw + prevbw > maxbw)
1942 return (0);
1943 if (dir == 1)
1944 sc->sdo_bw_used += bw;
1945 else
1946 sc->codecs[cad].sdi_bw_used += bw;
1947
1948 /* Allocate stream number */
1949 if (ss >= sc->num_iss + sc->num_oss)
1950 stream = 15 - (ss - sc->num_iss - sc->num_oss);
1951 else if (ss >= sc->num_iss)
1952 stream = ss - sc->num_iss + 1;
1953 else
1954 stream = ss + 1;
1955
1956 sc->streams[ss].dev = child;
1957 sc->streams[ss].dir = dir;
1958 sc->streams[ss].stream = stream;
1959 sc->streams[ss].bw = bw;
1960 sc->streams[ss].format = format;
1961 sc->streams[ss].stripe = stripe;
1962 if (dmapos != NULL) {
1963 if (sc->pos_dma.dma_vaddr != NULL)
1964 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8);
1965 else
1966 *dmapos = NULL;
1967 }
1968 return (stream);
1969 }
1970
1971 static void
hdac_stream_free(device_t dev,device_t child,int dir,int stream)1972 hdac_stream_free(device_t dev, device_t child, int dir, int stream)
1973 {
1974 struct hdac_softc *sc = device_get_softc(dev);
1975 nid_t cad = (uintptr_t)device_get_ivars(child);
1976 int ss;
1977
1978 ss = hdac_find_stream(sc, dir, stream);
1979 KASSERT(ss >= 0,
1980 ("Free for not allocated stream (%d/%d)\n", dir, stream));
1981 if (dir == 1)
1982 sc->sdo_bw_used -= sc->streams[ss].bw;
1983 else
1984 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw;
1985 sc->streams[ss].stream = 0;
1986 sc->streams[ss].dev = NULL;
1987 }
1988
1989 static int
hdac_stream_start(device_t dev,device_t child,int dir,int stream,bus_addr_t buf,int blksz,int blkcnt)1990 hdac_stream_start(device_t dev, device_t child, int dir, int stream,
1991 bus_addr_t buf, int blksz, int blkcnt)
1992 {
1993 struct hdac_softc *sc = device_get_softc(dev);
1994 struct hdac_bdle *bdle;
1995 uint64_t addr;
1996 int i, ss, off;
1997 uint32_t ctl;
1998
1999 ss = hdac_find_stream(sc, dir, stream);
2000 KASSERT(ss >= 0,
2001 ("Start for not allocated stream (%d/%d)\n", dir, stream));
2002
2003 addr = (uint64_t)buf;
2004 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr;
2005 for (i = 0; i < blkcnt; i++, bdle++) {
2006 bdle->addrl = htole32((uint32_t)addr);
2007 bdle->addrh = htole32((uint32_t)(addr >> 32));
2008 bdle->len = htole32(blksz);
2009 bdle->ioc = htole32(1);
2010 addr += blksz;
2011 }
2012
2013 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag,
2014 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE);
2015
2016 off = ss << 5;
2017 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt);
2018 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1);
2019 addr = sc->streams[ss].bdl.dma_paddr;
2020 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr);
2021 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32));
2022
2023 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2);
2024 if (dir)
2025 ctl |= HDAC_SDCTL2_DIR;
2026 else
2027 ctl &= ~HDAC_SDCTL2_DIR;
2028 ctl &= ~HDAC_SDCTL2_STRM_MASK;
2029 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT;
2030 ctl &= ~HDAC_SDCTL2_STRIPE_MASK;
2031 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT;
2032 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl);
2033
2034 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format);
2035
2036 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
2037 ctl |= 1 << ss;
2038 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
2039
2040 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS,
2041 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS);
2042 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2043 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE |
2044 HDAC_SDCTL_RUN;
2045 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2046
2047 sc->streams[ss].blksz = blksz;
2048 sc->streams[ss].running = 1;
2049 hdac_poll_reinit(sc);
2050 return (0);
2051 }
2052
2053 static void
hdac_stream_stop(device_t dev,device_t child,int dir,int stream)2054 hdac_stream_stop(device_t dev, device_t child, int dir, int stream)
2055 {
2056 struct hdac_softc *sc = device_get_softc(dev);
2057 int ss, off;
2058 uint32_t ctl;
2059
2060 ss = hdac_find_stream(sc, dir, stream);
2061 KASSERT(ss >= 0,
2062 ("Stop for not allocated stream (%d/%d)\n", dir, stream));
2063
2064 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag,
2065 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE);
2066
2067 off = ss << 5;
2068 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2069 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE |
2070 HDAC_SDCTL_RUN);
2071 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2072
2073 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
2074 ctl &= ~(1 << ss);
2075 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
2076
2077 sc->streams[ss].running = 0;
2078 hdac_poll_reinit(sc);
2079 }
2080
2081 static void
hdac_stream_reset(device_t dev,device_t child,int dir,int stream)2082 hdac_stream_reset(device_t dev, device_t child, int dir, int stream)
2083 {
2084 struct hdac_softc *sc = device_get_softc(dev);
2085 int timeout = 1000;
2086 int to = timeout;
2087 int ss, off;
2088 uint32_t ctl;
2089
2090 ss = hdac_find_stream(sc, dir, stream);
2091 KASSERT(ss >= 0,
2092 ("Reset for not allocated stream (%d/%d)\n", dir, stream));
2093
2094 off = ss << 5;
2095 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2096 ctl |= HDAC_SDCTL_SRST;
2097 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2098 do {
2099 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2100 if (ctl & HDAC_SDCTL_SRST)
2101 break;
2102 DELAY(10);
2103 } while (--to);
2104 if (!(ctl & HDAC_SDCTL_SRST))
2105 device_printf(dev, "Reset setting timeout\n");
2106 ctl &= ~HDAC_SDCTL_SRST;
2107 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2108 to = timeout;
2109 do {
2110 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2111 if (!(ctl & HDAC_SDCTL_SRST))
2112 break;
2113 DELAY(10);
2114 } while (--to);
2115 if (ctl & HDAC_SDCTL_SRST)
2116 device_printf(dev, "Reset timeout!\n");
2117 }
2118
2119 static uint32_t
hdac_stream_getptr(device_t dev,device_t child,int dir,int stream)2120 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream)
2121 {
2122 struct hdac_softc *sc = device_get_softc(dev);
2123 int ss, off;
2124
2125 ss = hdac_find_stream(sc, dir, stream);
2126 KASSERT(ss >= 0,
2127 ("Reset for not allocated stream (%d/%d)\n", dir, stream));
2128
2129 off = ss << 5;
2130 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB));
2131 }
2132
2133 static int
hdac_unsol_alloc(device_t dev,device_t child,int tag)2134 hdac_unsol_alloc(device_t dev, device_t child, int tag)
2135 {
2136 struct hdac_softc *sc = device_get_softc(dev);
2137
2138 sc->unsol_registered++;
2139 hdac_poll_reinit(sc);
2140 return (tag);
2141 }
2142
2143 static void
hdac_unsol_free(device_t dev,device_t child,int tag)2144 hdac_unsol_free(device_t dev, device_t child, int tag)
2145 {
2146 struct hdac_softc *sc = device_get_softc(dev);
2147
2148 sc->unsol_registered--;
2149 hdac_poll_reinit(sc);
2150 }
2151
2152 static device_method_t hdac_methods[] = {
2153 /* device interface */
2154 DEVMETHOD(device_probe, hdac_probe),
2155 DEVMETHOD(device_attach, hdac_attach),
2156 DEVMETHOD(device_detach, hdac_detach),
2157 DEVMETHOD(device_suspend, hdac_suspend),
2158 DEVMETHOD(device_resume, hdac_resume),
2159 /* Bus interface */
2160 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag),
2161 DEVMETHOD(bus_print_child, hdac_print_child),
2162 DEVMETHOD(bus_child_location, hdac_child_location),
2163 DEVMETHOD(bus_child_pnpinfo, hdac_child_pnpinfo_method),
2164 DEVMETHOD(bus_read_ivar, hdac_read_ivar),
2165 DEVMETHOD(hdac_get_mtx, hdac_get_mtx),
2166 DEVMETHOD(hdac_codec_command, hdac_codec_command),
2167 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc),
2168 DEVMETHOD(hdac_stream_free, hdac_stream_free),
2169 DEVMETHOD(hdac_stream_start, hdac_stream_start),
2170 DEVMETHOD(hdac_stream_stop, hdac_stream_stop),
2171 DEVMETHOD(hdac_stream_reset, hdac_stream_reset),
2172 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr),
2173 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc),
2174 DEVMETHOD(hdac_unsol_free, hdac_unsol_free),
2175 DEVMETHOD_END
2176 };
2177
2178 static driver_t hdac_driver = {
2179 "hdac",
2180 hdac_methods,
2181 sizeof(struct hdac_softc),
2182 };
2183
2184 DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL);
2185