xref: /freebsd/sys/dev/sound/pci/hda/hdac.c (revision 43be2d7aaf25b719aec8f49aab110c0061f1edec)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca>
5  * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org>
6  * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Intel High Definition Audio (Controller) driver for FreeBSD.
33  */
34 
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_snd.h"
37 #endif
38 
39 #include <dev/sound/pcm/sound.h>
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42 
43 #include <sys/ctype.h>
44 #include <sys/endian.h>
45 #include <sys/taskqueue.h>
46 
47 #include <dev/sound/pci/hda/hdac_private.h>
48 #include <dev/sound/pci/hda/hdac_reg.h>
49 #include <dev/sound/pci/hda/hda_reg.h>
50 #include <dev/sound/pci/hda/hdac.h>
51 
52 #define HDA_DRV_TEST_REV	"20120126_0002"
53 
54 #define hdac_lock(sc)		snd_mtxlock((sc)->lock)
55 #define hdac_unlock(sc)		snd_mtxunlock((sc)->lock)
56 #define hdac_lockassert(sc)	snd_mtxassert((sc)->lock)
57 
58 #define HDAC_QUIRK_64BIT	(1 << 0)
59 #define HDAC_QUIRK_DMAPOS	(1 << 1)
60 #define HDAC_QUIRK_MSI		(1 << 2)
61 
62 static const struct {
63 	const char *key;
64 	uint32_t value;
65 } hdac_quirks_tab[] = {
66 	{ "64bit", HDAC_QUIRK_64BIT },
67 	{ "dmapos", HDAC_QUIRK_DMAPOS },
68 	{ "msi", HDAC_QUIRK_MSI },
69 };
70 
71 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller");
72 
73 static const struct {
74 	uint32_t	model;
75 	const char	*desc;
76 	char		quirks_on;
77 	char		quirks_off;
78 } hdac_devices[] = {
79 	{ HDA_INTEL_OAK,     "Intel Oaktrail",	0, 0 },
80 	{ HDA_INTEL_CMLKLP,  "Intel Comet Lake-LP",	0, 0 },
81 	{ HDA_INTEL_CMLKH,   "Intel Comet Lake-H",	0, 0 },
82 	{ HDA_INTEL_BAY,     "Intel BayTrail",	0, 0 },
83 	{ HDA_INTEL_HSW1,    "Intel Haswell",	0, 0 },
84 	{ HDA_INTEL_HSW2,    "Intel Haswell",	0, 0 },
85 	{ HDA_INTEL_HSW3,    "Intel Haswell",	0, 0 },
86 	{ HDA_INTEL_BDW1,    "Intel Broadwell",	0, 0 },
87 	{ HDA_INTEL_BDW2,    "Intel Broadwell",	0, 0 },
88 	{ HDA_INTEL_BXTNT,   "Intel Broxton-T",	0, 0 },
89 	{ HDA_INTEL_CPT,     "Intel Cougar Point",	0, 0 },
90 	{ HDA_INTEL_PATSBURG,"Intel Patsburg",  0, 0 },
91 	{ HDA_INTEL_PPT1,    "Intel Panther Point",	0, 0 },
92 	{ HDA_INTEL_BR,      "Intel Braswell",	0, 0 },
93 	{ HDA_INTEL_LPT1,    "Intel Lynx Point",	0, 0 },
94 	{ HDA_INTEL_LPT2,    "Intel Lynx Point",	0, 0 },
95 	{ HDA_INTEL_WCPT,    "Intel Wildcat Point",	0, 0 },
96 	{ HDA_INTEL_WELLS1,  "Intel Wellsburg",	0, 0 },
97 	{ HDA_INTEL_WELLS2,  "Intel Wellsburg",	0, 0 },
98 	{ HDA_INTEL_LPTLP1,  "Intel Lynx Point-LP",	0, 0 },
99 	{ HDA_INTEL_LPTLP2,  "Intel Lynx Point-LP",	0, 0 },
100 	{ HDA_INTEL_SRPTLP,  "Intel Sunrise Point-LP",	0, 0 },
101 	{ HDA_INTEL_KBLKLP,  "Intel Kaby Lake-LP",	0, 0 },
102 	{ HDA_INTEL_SRPT,    "Intel Sunrise Point",	0, 0 },
103 	{ HDA_INTEL_KBLK,    "Intel Kaby Lake",	0, 0 },
104 	{ HDA_INTEL_KBLKH,   "Intel Kaby Lake-H",	0, 0 },
105 	{ HDA_INTEL_CFLK,    "Intel Coffee Lake",	0, 0 },
106 	{ HDA_INTEL_CMLKS,   "Intel Comet Lake-S",	0, 0 },
107 	{ HDA_INTEL_CNLK,    "Intel Cannon Lake",	0, 0 },
108 	{ HDA_INTEL_ICLK,    "Intel Ice Lake",		0, 0 },
109 	{ HDA_INTEL_CMLKLP,  "Intel Comet Lake-LP",	0, 0 },
110 	{ HDA_INTEL_CMLKH,   "Intel Comet Lake-H",	0, 0 },
111 	{ HDA_INTEL_TGLK,    "Intel Tiger Lake",	0, 0 },
112 	{ HDA_INTEL_GMLK,    "Intel Gemini Lake",	0, 0 },
113 	{ HDA_INTEL_ALLK,    "Intel Alder Lake",	0, 0 },
114 	{ HDA_INTEL_ALLKM,   "Intel Alder Lake-M",	0, 0 },
115 	{ HDA_INTEL_ALLKN,   "Intel Alder Lake-N",	0, 0 },
116 	{ HDA_INTEL_ALLKP1,  "Intel Alder Lake-P",	0, 0 },
117 	{ HDA_INTEL_ALLKP2,  "Intel Alder Lake-P",	0, 0 },
118 	{ HDA_INTEL_ALLKPS,  "Intel Alder Lake-PS",	0, 0 },
119 	{ HDA_INTEL_RPTLK1,  "Intel Raptor Lake-P",	0, 0 },
120 	{ HDA_INTEL_RPTLK2,  "Intel Raptor Lake-P",	0, 0 },
121 	{ HDA_INTEL_82801F,  "Intel 82801F",	0, 0 },
122 	{ HDA_INTEL_63XXESB, "Intel 631x/632xESB",	0, 0 },
123 	{ HDA_INTEL_82801G,  "Intel 82801G",	0, 0 },
124 	{ HDA_INTEL_82801H,  "Intel 82801H",	0, 0 },
125 	{ HDA_INTEL_82801I,  "Intel 82801I",	0, 0 },
126 	{ HDA_INTEL_JLK,     "Intel Jasper Lake",	0, 0 },
127 	{ HDA_INTEL_82801JI, "Intel 82801JI",	0, 0 },
128 	{ HDA_INTEL_82801JD, "Intel 82801JD",	0, 0 },
129 	{ HDA_INTEL_PCH,     "Intel Ibex Peak",	0, 0 },
130 	{ HDA_INTEL_PCH2,    "Intel Ibex Peak",	0, 0 },
131 	{ HDA_INTEL_ELLK,    "Intel Elkhart Lake",	0, 0 },
132 	{ HDA_INTEL_JLK2,    "Intel Jasper Lake",	0, 0 },
133 	{ HDA_INTEL_BXTNP,   "Intel Broxton-P",	0, 0 },
134 	{ HDA_INTEL_SCH,     "Intel SCH",	0, 0 },
135 	{ HDA_NVIDIA_MCP51,  "NVIDIA MCP51",	0, HDAC_QUIRK_MSI },
136 	{ HDA_NVIDIA_MCP55,  "NVIDIA MCP55",	0, HDAC_QUIRK_MSI },
137 	{ HDA_NVIDIA_MCP61_1, "NVIDIA MCP61",	0, 0 },
138 	{ HDA_NVIDIA_MCP61_2, "NVIDIA MCP61",	0, 0 },
139 	{ HDA_NVIDIA_MCP65_1, "NVIDIA MCP65",	0, 0 },
140 	{ HDA_NVIDIA_MCP65_2, "NVIDIA MCP65",	0, 0 },
141 	{ HDA_NVIDIA_MCP67_1, "NVIDIA MCP67",	0, 0 },
142 	{ HDA_NVIDIA_MCP67_2, "NVIDIA MCP67",	0, 0 },
143 	{ HDA_NVIDIA_MCP73_1, "NVIDIA MCP73",	0, 0 },
144 	{ HDA_NVIDIA_MCP73_2, "NVIDIA MCP73",	0, 0 },
145 	{ HDA_NVIDIA_MCP78_1, "NVIDIA MCP78",	0, HDAC_QUIRK_64BIT },
146 	{ HDA_NVIDIA_MCP78_2, "NVIDIA MCP78",	0, HDAC_QUIRK_64BIT },
147 	{ HDA_NVIDIA_MCP78_3, "NVIDIA MCP78",	0, HDAC_QUIRK_64BIT },
148 	{ HDA_NVIDIA_MCP78_4, "NVIDIA MCP78",	0, HDAC_QUIRK_64BIT },
149 	{ HDA_NVIDIA_MCP79_1, "NVIDIA MCP79",	0, 0 },
150 	{ HDA_NVIDIA_MCP79_2, "NVIDIA MCP79",	0, 0 },
151 	{ HDA_NVIDIA_MCP79_3, "NVIDIA MCP79",	0, 0 },
152 	{ HDA_NVIDIA_MCP79_4, "NVIDIA MCP79",	0, 0 },
153 	{ HDA_NVIDIA_MCP89_1, "NVIDIA MCP89",	0, 0 },
154 	{ HDA_NVIDIA_MCP89_2, "NVIDIA MCP89",	0, 0 },
155 	{ HDA_NVIDIA_MCP89_3, "NVIDIA MCP89",	0, 0 },
156 	{ HDA_NVIDIA_MCP89_4, "NVIDIA MCP89",	0, 0 },
157 	{ HDA_NVIDIA_0BE2,   "NVIDIA (0x0be2)",	0, HDAC_QUIRK_MSI },
158 	{ HDA_NVIDIA_0BE3,   "NVIDIA (0x0be3)",	0, HDAC_QUIRK_MSI },
159 	{ HDA_NVIDIA_0BE4,   "NVIDIA (0x0be4)",	0, HDAC_QUIRK_MSI },
160 	{ HDA_NVIDIA_GT100,  "NVIDIA GT100",	0, HDAC_QUIRK_MSI },
161 	{ HDA_NVIDIA_GT104,  "NVIDIA GT104",	0, HDAC_QUIRK_MSI },
162 	{ HDA_NVIDIA_GT106,  "NVIDIA GT106",	0, HDAC_QUIRK_MSI },
163 	{ HDA_NVIDIA_GT108,  "NVIDIA GT108",	0, HDAC_QUIRK_MSI },
164 	{ HDA_NVIDIA_GT116,  "NVIDIA GT116",	0, HDAC_QUIRK_MSI },
165 	{ HDA_NVIDIA_GF119,  "NVIDIA GF119",	0, 0 },
166 	{ HDA_NVIDIA_GF110_1, "NVIDIA GF110",	0, HDAC_QUIRK_MSI },
167 	{ HDA_NVIDIA_GF110_2, "NVIDIA GF110",	0, HDAC_QUIRK_MSI },
168 	{ HDA_ATI_SB450,     "ATI SB450",	0, 0 },
169 	{ HDA_ATI_SB600,     "ATI SB600",	0, 0 },
170 	{ HDA_ATI_RS600,     "ATI RS600",	0, 0 },
171 	{ HDA_ATI_RS690,     "ATI RS690",	0, 0 },
172 	{ HDA_ATI_RS780,     "ATI RS780",	0, 0 },
173 	{ HDA_ATI_RS880,     "ATI RS880",	0, 0 },
174 	{ HDA_ATI_R600,      "ATI R600",	0, 0 },
175 	{ HDA_ATI_RV610,     "ATI RV610",	0, 0 },
176 	{ HDA_ATI_RV620,     "ATI RV620",	0, 0 },
177 	{ HDA_ATI_RV630,     "ATI RV630",	0, 0 },
178 	{ HDA_ATI_RV635,     "ATI RV635",	0, 0 },
179 	{ HDA_ATI_RV710,     "ATI RV710",	0, 0 },
180 	{ HDA_ATI_RV730,     "ATI RV730",	0, 0 },
181 	{ HDA_ATI_RV740,     "ATI RV740",	0, 0 },
182 	{ HDA_ATI_RV770,     "ATI RV770",	0, 0 },
183 	{ HDA_ATI_RV810,     "ATI RV810",	0, 0 },
184 	{ HDA_ATI_RV830,     "ATI RV830",	0, 0 },
185 	{ HDA_ATI_RV840,     "ATI RV840",	0, 0 },
186 	{ HDA_ATI_RV870,     "ATI RV870",	0, 0 },
187 	{ HDA_ATI_RV910,     "ATI RV910",	0, 0 },
188 	{ HDA_ATI_RV930,     "ATI RV930",	0, 0 },
189 	{ HDA_ATI_RV940,     "ATI RV940",	0, 0 },
190 	{ HDA_ATI_RV970,     "ATI RV970",	0, 0 },
191 	{ HDA_ATI_R1000,     "ATI R1000",	0, 0 },
192 	{ HDA_ATI_KABINI,    "ATI Kabini",	0, 0 },
193 	{ HDA_ATI_TRINITY,   "ATI Trinity",	0, 0 },
194 	{ HDA_AMD_X370,      "AMD X370",	0, 0 },
195 	{ HDA_AMD_X570,      "AMD X570",	0, 0 },
196 	{ HDA_AMD_STONEY,    "AMD Stoney",	0, 0 },
197 	{ HDA_AMD_RAVEN,     "AMD Raven",	0, 0 },
198 	{ HDA_AMD_HUDSON2,   "AMD Hudson-2",	0, 0 },
199 	{ HDA_RDC_M3010,     "RDC M3010",	0, 0 },
200 	{ HDA_VIA_VT82XX,    "VIA VT8251/8237A",0, 0 },
201 	{ HDA_VMWARE,        "VMware",		0, 0 },
202 	{ HDA_SIS_966,       "SiS 966/968",	0, 0 },
203 	{ HDA_ULI_M5461,     "ULI M5461",	0, 0 },
204 	/* Unknown */
205 	{ HDA_INTEL_ALL,  "Intel",		0, 0 },
206 	{ HDA_NVIDIA_ALL, "NVIDIA",		0, 0 },
207 	{ HDA_ATI_ALL,    "ATI",		0, 0 },
208 	{ HDA_AMD_ALL,    "AMD",		0, 0 },
209 	{ HDA_CREATIVE_ALL,    "Creative",	0, 0 },
210 	{ HDA_VIA_ALL,    "VIA",		0, 0 },
211 	{ HDA_VMWARE_ALL, "VMware",		0, 0 },
212 	{ HDA_SIS_ALL,    "SiS",		0, 0 },
213 	{ HDA_ULI_ALL,    "ULI",		0, 0 },
214 };
215 
216 static const struct {
217 	uint16_t vendor;
218 	uint8_t reg;
219 	uint8_t mask;
220 	uint8_t enable;
221 } hdac_pcie_snoop[] = {
222 	{  INTEL_VENDORID, 0x00, 0x00, 0x00 },
223 	{    ATI_VENDORID, 0x42, 0xf8, 0x02 },
224 	{    AMD_VENDORID, 0x42, 0xf8, 0x02 },
225 	{ NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f },
226 };
227 
228 /****************************************************************************
229  * Function prototypes
230  ****************************************************************************/
231 static void	hdac_intr_handler(void *);
232 static int	hdac_reset(struct hdac_softc *, bool);
233 static int	hdac_get_capabilities(struct hdac_softc *);
234 static void	hdac_dma_cb(void *, bus_dma_segment_t *, int, int);
235 static int	hdac_dma_alloc(struct hdac_softc *,
236 					struct hdac_dma *, bus_size_t);
237 static void	hdac_dma_free(struct hdac_softc *, struct hdac_dma *);
238 static int	hdac_mem_alloc(struct hdac_softc *);
239 static void	hdac_mem_free(struct hdac_softc *);
240 static int	hdac_irq_alloc(struct hdac_softc *);
241 static void	hdac_irq_free(struct hdac_softc *);
242 static void	hdac_corb_init(struct hdac_softc *);
243 static void	hdac_rirb_init(struct hdac_softc *);
244 static void	hdac_corb_start(struct hdac_softc *);
245 static void	hdac_rirb_start(struct hdac_softc *);
246 
247 static void	hdac_attach2(void *);
248 
249 static uint32_t	hdac_send_command(struct hdac_softc *, nid_t, uint32_t);
250 
251 static int	hdac_probe(device_t);
252 static int	hdac_attach(device_t);
253 static int	hdac_detach(device_t);
254 static int	hdac_suspend(device_t);
255 static int	hdac_resume(device_t);
256 
257 static int	hdac_rirb_flush(struct hdac_softc *sc);
258 static int	hdac_unsolq_flush(struct hdac_softc *sc);
259 
260 /* This function surely going to make its way into upper level someday. */
261 static void
262 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off)
263 {
264 	const char *res = NULL;
265 	int i = 0, j, k, len, inv;
266 
267 	if (resource_string_value(device_get_name(sc->dev),
268 	    device_get_unit(sc->dev), "config", &res) != 0)
269 		return;
270 	if (!(res != NULL && strlen(res) > 0))
271 		return;
272 	HDA_BOOTVERBOSE(
273 		device_printf(sc->dev, "Config options:");
274 	);
275 	for (;;) {
276 		while (res[i] != '\0' &&
277 		    (res[i] == ',' || isspace(res[i]) != 0))
278 			i++;
279 		if (res[i] == '\0') {
280 			HDA_BOOTVERBOSE(
281 				printf("\n");
282 			);
283 			return;
284 		}
285 		j = i;
286 		while (res[j] != '\0' &&
287 		    !(res[j] == ',' || isspace(res[j]) != 0))
288 			j++;
289 		len = j - i;
290 		if (len > 2 && strncmp(res + i, "no", 2) == 0)
291 			inv = 2;
292 		else
293 			inv = 0;
294 		for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) {
295 			if (strncmp(res + i + inv,
296 			    hdac_quirks_tab[k].key, len - inv) != 0)
297 				continue;
298 			if (len - inv != strlen(hdac_quirks_tab[k].key))
299 				continue;
300 			HDA_BOOTVERBOSE(
301 				printf(" %s%s", (inv != 0) ? "no" : "",
302 				    hdac_quirks_tab[k].key);
303 			);
304 			if (inv == 0) {
305 				*on |= hdac_quirks_tab[k].value;
306 				*off &= ~hdac_quirks_tab[k].value;
307 			} else if (inv != 0) {
308 				*off |= hdac_quirks_tab[k].value;
309 				*on &= ~hdac_quirks_tab[k].value;
310 			}
311 			break;
312 		}
313 		i = j;
314 	}
315 }
316 
317 static void
318 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts)
319 {
320 	device_t dev;
321 	uint8_t rirbsts;
322 	int i;
323 
324 	/* Was this a controller interrupt? */
325 	if (intsts & HDAC_INTSTS_CIS) {
326 		/*
327 		 * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then
328 		 * we will need to check and clear HDAC_STATESTS.
329 		 * That event is used to report codec status changes such as
330 		 * a reset or a wake-up event.
331 		 */
332 		/*
333 		 * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we
334 		 * will need to check and clear HDAC_CORBSTS_CMEI in
335 		 * HDAC_CORBSTS.
336 		 * That event is used to report CORB memory errors.
337 		 */
338 		/*
339 		 * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we
340 		 * will need to check and clear HDAC_RIRBSTS_RIRBOIS in
341 		 * HDAC_RIRBSTS.
342 		 * That event is used to report response FIFO overruns.
343 		 */
344 
345 		/* Get as many responses that we can */
346 		rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS);
347 		while (rirbsts & HDAC_RIRBSTS_RINTFL) {
348 			HDAC_WRITE_1(&sc->mem,
349 			    HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL);
350 			hdac_rirb_flush(sc);
351 			rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS);
352 		}
353 		if (sc->unsolq_rp != sc->unsolq_wp)
354 			taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task);
355 	}
356 
357 	if (intsts & HDAC_INTSTS_SIS_MASK) {
358 		for (i = 0; i < sc->num_ss; i++) {
359 			if ((intsts & (1 << i)) == 0)
360 				continue;
361 			HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS,
362 			    HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS);
363 			if ((dev = sc->streams[i].dev) != NULL) {
364 				HDAC_STREAM_INTR(dev,
365 				    sc->streams[i].dir, sc->streams[i].stream);
366 			}
367 		}
368 	}
369 }
370 
371 /****************************************************************************
372  * void hdac_intr_handler(void *)
373  *
374  * Interrupt handler. Processes interrupts received from the hdac.
375  ****************************************************************************/
376 static void
377 hdac_intr_handler(void *context)
378 {
379 	struct hdac_softc *sc;
380 	uint32_t intsts;
381 
382 	sc = (struct hdac_softc *)context;
383 
384 	/*
385 	 * Loop until HDAC_INTSTS_GIS gets clear.
386 	 * It is plausible that hardware interrupts a host only when GIS goes
387 	 * from zero to one.  GIS is formed by OR-ing multiple hardware
388 	 * statuses, so it's possible that a previously cleared status gets set
389 	 * again while another status has not been cleared yet.  Thus, there
390 	 * will be no new interrupt as GIS always stayed set.  If we don't
391 	 * re-examine GIS then we can leave it set and never get an interrupt
392 	 * again.
393 	 */
394 	hdac_lock(sc);
395 	intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS);
396 	while (intsts != 0xffffffff && (intsts & HDAC_INTSTS_GIS) != 0) {
397 		hdac_one_intr(sc, intsts);
398 		intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS);
399 	}
400 	hdac_unlock(sc);
401 }
402 
403 static void
404 hdac_poll_callback(void *arg)
405 {
406 	struct hdac_softc *sc = arg;
407 
408 	if (sc == NULL)
409 		return;
410 
411 	hdac_lock(sc);
412 	if (sc->polling == 0) {
413 		hdac_unlock(sc);
414 		return;
415 	}
416 	callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc);
417 	hdac_unlock(sc);
418 
419 	hdac_intr_handler(sc);
420 }
421 
422 /****************************************************************************
423  * int hdac_reset(hdac_softc *, bool)
424  *
425  * Reset the hdac to a quiescent and known state.
426  ****************************************************************************/
427 static int
428 hdac_reset(struct hdac_softc *sc, bool wakeup)
429 {
430 	uint32_t gctl;
431 	int count, i;
432 
433 	/*
434 	 * Stop all Streams DMA engine
435 	 */
436 	for (i = 0; i < sc->num_iss; i++)
437 		HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0);
438 	for (i = 0; i < sc->num_oss; i++)
439 		HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0);
440 	for (i = 0; i < sc->num_bss; i++)
441 		HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0);
442 
443 	/*
444 	 * Stop Control DMA engines.
445 	 */
446 	HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0);
447 	HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0);
448 
449 	/*
450 	 * Reset DMA position buffer.
451 	 */
452 	HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0);
453 	HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0);
454 
455 	/*
456 	 * Reset the controller. The reset must remain asserted for
457 	 * a minimum of 100us.
458 	 */
459 	gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
460 	HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST);
461 	count = 10000;
462 	do {
463 		gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
464 		if (!(gctl & HDAC_GCTL_CRST))
465 			break;
466 		DELAY(10);
467 	} while (--count);
468 	if (gctl & HDAC_GCTL_CRST) {
469 		device_printf(sc->dev, "Unable to put hdac in reset\n");
470 		return (ENXIO);
471 	}
472 
473 	/* If wakeup is not requested - leave the controller in reset state. */
474 	if (!wakeup)
475 		return (0);
476 
477 	DELAY(100);
478 	gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
479 	HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST);
480 	count = 10000;
481 	do {
482 		gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
483 		if (gctl & HDAC_GCTL_CRST)
484 			break;
485 		DELAY(10);
486 	} while (--count);
487 	if (!(gctl & HDAC_GCTL_CRST)) {
488 		device_printf(sc->dev, "Device stuck in reset\n");
489 		return (ENXIO);
490 	}
491 
492 	/*
493 	 * Wait for codecs to finish their own reset sequence. The delay here
494 	 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery).
495 	 */
496 	DELAY(1000);
497 
498 	return (0);
499 }
500 
501 /****************************************************************************
502  * int hdac_get_capabilities(struct hdac_softc *);
503  *
504  * Retreive the general capabilities of the hdac;
505  *	Number of Input Streams
506  *	Number of Output Streams
507  *	Number of bidirectional Streams
508  *	64bit ready
509  *	CORB and RIRB sizes
510  ****************************************************************************/
511 static int
512 hdac_get_capabilities(struct hdac_softc *sc)
513 {
514 	uint16_t gcap;
515 	uint8_t corbsize, rirbsize;
516 
517 	gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP);
518 	sc->num_iss = HDAC_GCAP_ISS(gcap);
519 	sc->num_oss = HDAC_GCAP_OSS(gcap);
520 	sc->num_bss = HDAC_GCAP_BSS(gcap);
521 	sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss;
522 	sc->num_sdo = HDAC_GCAP_NSDO(gcap);
523 	sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0;
524 	if (sc->quirks_on & HDAC_QUIRK_64BIT)
525 		sc->support_64bit = 1;
526 	else if (sc->quirks_off & HDAC_QUIRK_64BIT)
527 		sc->support_64bit = 0;
528 
529 	corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE);
530 	if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) ==
531 	    HDAC_CORBSIZE_CORBSZCAP_256)
532 		sc->corb_size = 256;
533 	else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) ==
534 	    HDAC_CORBSIZE_CORBSZCAP_16)
535 		sc->corb_size = 16;
536 	else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) ==
537 	    HDAC_CORBSIZE_CORBSZCAP_2)
538 		sc->corb_size = 2;
539 	else {
540 		device_printf(sc->dev, "%s: Invalid corb size (%x)\n",
541 		    __func__, corbsize);
542 		return (ENXIO);
543 	}
544 
545 	rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE);
546 	if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) ==
547 	    HDAC_RIRBSIZE_RIRBSZCAP_256)
548 		sc->rirb_size = 256;
549 	else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) ==
550 	    HDAC_RIRBSIZE_RIRBSZCAP_16)
551 		sc->rirb_size = 16;
552 	else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) ==
553 	    HDAC_RIRBSIZE_RIRBSZCAP_2)
554 		sc->rirb_size = 2;
555 	else {
556 		device_printf(sc->dev, "%s: Invalid rirb size (%x)\n",
557 		    __func__, rirbsize);
558 		return (ENXIO);
559 	}
560 
561 	HDA_BOOTVERBOSE(
562 		device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, "
563 		    "NSDO %d%s, CORB %d, RIRB %d\n",
564 		    sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo,
565 		    sc->support_64bit ? ", 64bit" : "",
566 		    sc->corb_size, sc->rirb_size);
567 	);
568 
569 	return (0);
570 }
571 
572 /****************************************************************************
573  * void hdac_dma_cb
574  *
575  * This function is called by bus_dmamap_load when the mapping has been
576  * established. We just record the physical address of the mapping into
577  * the struct hdac_dma passed in.
578  ****************************************************************************/
579 static void
580 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error)
581 {
582 	struct hdac_dma *dma;
583 
584 	if (error == 0) {
585 		dma = (struct hdac_dma *)callback_arg;
586 		dma->dma_paddr = segs[0].ds_addr;
587 	}
588 }
589 
590 /****************************************************************************
591  * int hdac_dma_alloc
592  *
593  * This function allocate and setup a dma region (struct hdac_dma).
594  * It must be freed by a corresponding hdac_dma_free.
595  ****************************************************************************/
596 static int
597 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size)
598 {
599 	bus_size_t roundsz;
600 	int result;
601 
602 	roundsz = roundup2(size, HDA_DMA_ALIGNMENT);
603 	bzero(dma, sizeof(*dma));
604 
605 	/*
606 	 * Create a DMA tag
607 	 */
608 	result = bus_dma_tag_create(
609 	    bus_get_dma_tag(sc->dev),		/* parent */
610 	    HDA_DMA_ALIGNMENT,			/* alignment */
611 	    0,					/* boundary */
612 	    (sc->support_64bit) ? BUS_SPACE_MAXADDR :
613 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
614 	    BUS_SPACE_MAXADDR,			/* highaddr */
615 	    NULL,				/* filtfunc */
616 	    NULL,				/* fistfuncarg */
617 	    roundsz,				/* maxsize */
618 	    1,					/* nsegments */
619 	    roundsz,				/* maxsegsz */
620 	    0,					/* flags */
621 	    NULL,				/* lockfunc */
622 	    NULL,				/* lockfuncarg */
623 	    &dma->dma_tag);			/* dmat */
624 	if (result != 0) {
625 		device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n",
626 		    __func__, result);
627 		goto hdac_dma_alloc_fail;
628 	}
629 
630 	/*
631 	 * Allocate DMA memory
632 	 */
633 	result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
634 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO |
635 	    ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE :
636 	     BUS_DMA_COHERENT),
637 	    &dma->dma_map);
638 	if (result != 0) {
639 		device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n",
640 		    __func__, result);
641 		goto hdac_dma_alloc_fail;
642 	}
643 
644 	dma->dma_size = roundsz;
645 
646 	/*
647 	 * Map the memory
648 	 */
649 	result = bus_dmamap_load(dma->dma_tag, dma->dma_map,
650 	    (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0);
651 	if (result != 0 || dma->dma_paddr == 0) {
652 		if (result == 0)
653 			result = ENOMEM;
654 		device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n",
655 		    __func__, result);
656 		goto hdac_dma_alloc_fail;
657 	}
658 
659 	HDA_BOOTHVERBOSE(
660 		device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n",
661 		    __func__, (uintmax_t)size, (uintmax_t)roundsz);
662 	);
663 
664 	return (0);
665 
666 hdac_dma_alloc_fail:
667 	hdac_dma_free(sc, dma);
668 
669 	return (result);
670 }
671 
672 /****************************************************************************
673  * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *)
674  *
675  * Free a struct hdac_dma that has been previously allocated via the
676  * hdac_dma_alloc function.
677  ****************************************************************************/
678 static void
679 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma)
680 {
681 	if (dma->dma_paddr != 0) {
682 		/* Flush caches */
683 		bus_dmamap_sync(dma->dma_tag, dma->dma_map,
684 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
685 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
686 		dma->dma_paddr = 0;
687 	}
688 	if (dma->dma_vaddr != NULL) {
689 		bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
690 		dma->dma_vaddr = NULL;
691 	}
692 	if (dma->dma_tag != NULL) {
693 		bus_dma_tag_destroy(dma->dma_tag);
694 		dma->dma_tag = NULL;
695 	}
696 	dma->dma_size = 0;
697 }
698 
699 /****************************************************************************
700  * int hdac_mem_alloc(struct hdac_softc *)
701  *
702  * Allocate all the bus resources necessary to speak with the physical
703  * controller.
704  ****************************************************************************/
705 static int
706 hdac_mem_alloc(struct hdac_softc *sc)
707 {
708 	struct hdac_mem *mem;
709 
710 	mem = &sc->mem;
711 	mem->mem_rid = PCIR_BAR(0);
712 	mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
713 	    &mem->mem_rid, RF_ACTIVE);
714 	if (mem->mem_res == NULL) {
715 		device_printf(sc->dev,
716 		    "%s: Unable to allocate memory resource\n", __func__);
717 		return (ENOMEM);
718 	}
719 	mem->mem_tag = rman_get_bustag(mem->mem_res);
720 	mem->mem_handle = rman_get_bushandle(mem->mem_res);
721 
722 	return (0);
723 }
724 
725 /****************************************************************************
726  * void hdac_mem_free(struct hdac_softc *)
727  *
728  * Free up resources previously allocated by hdac_mem_alloc.
729  ****************************************************************************/
730 static void
731 hdac_mem_free(struct hdac_softc *sc)
732 {
733 	struct hdac_mem *mem;
734 
735 	mem = &sc->mem;
736 	if (mem->mem_res != NULL)
737 		bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid,
738 		    mem->mem_res);
739 	mem->mem_res = NULL;
740 }
741 
742 /****************************************************************************
743  * int hdac_irq_alloc(struct hdac_softc *)
744  *
745  * Allocate and setup the resources necessary for interrupt handling.
746  ****************************************************************************/
747 static int
748 hdac_irq_alloc(struct hdac_softc *sc)
749 {
750 	struct hdac_irq *irq;
751 	int result;
752 
753 	irq = &sc->irq;
754 	irq->irq_rid = 0x0;
755 
756 	if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 &&
757 	    (result = pci_msi_count(sc->dev)) == 1 &&
758 	    pci_alloc_msi(sc->dev, &result) == 0)
759 		irq->irq_rid = 0x1;
760 
761 	irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
762 	    &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE);
763 	if (irq->irq_res == NULL) {
764 		device_printf(sc->dev, "%s: Unable to allocate irq\n",
765 		    __func__);
766 		goto hdac_irq_alloc_fail;
767 	}
768 	result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV,
769 	    NULL, hdac_intr_handler, sc, &irq->irq_handle);
770 	if (result != 0) {
771 		device_printf(sc->dev,
772 		    "%s: Unable to setup interrupt handler (%d)\n",
773 		    __func__, result);
774 		goto hdac_irq_alloc_fail;
775 	}
776 
777 	return (0);
778 
779 hdac_irq_alloc_fail:
780 	hdac_irq_free(sc);
781 
782 	return (ENXIO);
783 }
784 
785 /****************************************************************************
786  * void hdac_irq_free(struct hdac_softc *)
787  *
788  * Free up resources previously allocated by hdac_irq_alloc.
789  ****************************************************************************/
790 static void
791 hdac_irq_free(struct hdac_softc *sc)
792 {
793 	struct hdac_irq *irq;
794 
795 	irq = &sc->irq;
796 	if (irq->irq_res != NULL && irq->irq_handle != NULL)
797 		bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle);
798 	if (irq->irq_res != NULL)
799 		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid,
800 		    irq->irq_res);
801 	if (irq->irq_rid == 0x1)
802 		pci_release_msi(sc->dev);
803 	irq->irq_handle = NULL;
804 	irq->irq_res = NULL;
805 	irq->irq_rid = 0x0;
806 }
807 
808 /****************************************************************************
809  * void hdac_corb_init(struct hdac_softc *)
810  *
811  * Initialize the corb registers for operations but do not start it up yet.
812  * The CORB engine must not be running when this function is called.
813  ****************************************************************************/
814 static void
815 hdac_corb_init(struct hdac_softc *sc)
816 {
817 	uint8_t corbsize;
818 	uint64_t corbpaddr;
819 
820 	/* Setup the CORB size. */
821 	switch (sc->corb_size) {
822 	case 256:
823 		corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256);
824 		break;
825 	case 16:
826 		corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16);
827 		break;
828 	case 2:
829 		corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2);
830 		break;
831 	default:
832 		panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size);
833 	}
834 	HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize);
835 
836 	/* Setup the CORB Address in the hdac */
837 	corbpaddr = (uint64_t)sc->corb_dma.dma_paddr;
838 	HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr);
839 	HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32));
840 
841 	/* Set the WP and RP */
842 	sc->corb_wp = 0;
843 	HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp);
844 	HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST);
845 	/*
846 	 * The HDA specification indicates that the CORBRPRST bit will always
847 	 * read as zero. Unfortunately, it seems that at least the 82801G
848 	 * doesn't reset the bit to zero, which stalls the corb engine.
849 	 * manually reset the bit to zero before continuing.
850 	 */
851 	HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0);
852 
853 	/* Enable CORB error reporting */
854 #if 0
855 	HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE);
856 #endif
857 }
858 
859 /****************************************************************************
860  * void hdac_rirb_init(struct hdac_softc *)
861  *
862  * Initialize the rirb registers for operations but do not start it up yet.
863  * The RIRB engine must not be running when this function is called.
864  ****************************************************************************/
865 static void
866 hdac_rirb_init(struct hdac_softc *sc)
867 {
868 	uint8_t rirbsize;
869 	uint64_t rirbpaddr;
870 
871 	/* Setup the RIRB size. */
872 	switch (sc->rirb_size) {
873 	case 256:
874 		rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256);
875 		break;
876 	case 16:
877 		rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16);
878 		break;
879 	case 2:
880 		rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2);
881 		break;
882 	default:
883 		panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size);
884 	}
885 	HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize);
886 
887 	/* Setup the RIRB Address in the hdac */
888 	rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr;
889 	HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr);
890 	HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32));
891 
892 	/* Setup the WP and RP */
893 	sc->rirb_rp = 0;
894 	HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST);
895 
896 	/* Setup the interrupt threshold */
897 	HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2);
898 
899 	/* Enable Overrun and response received reporting */
900 #if 0
901 	HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL,
902 	    HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL);
903 #else
904 	HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL);
905 #endif
906 
907 	/*
908 	 * Make sure that the Host CPU cache doesn't contain any dirty
909 	 * cache lines that falls in the rirb. If I understood correctly, it
910 	 * should be sufficient to do this only once as the rirb is purely
911 	 * read-only from now on.
912 	 */
913 	bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
914 	    BUS_DMASYNC_PREREAD);
915 }
916 
917 /****************************************************************************
918  * void hdac_corb_start(hdac_softc *)
919  *
920  * Startup the corb DMA engine
921  ****************************************************************************/
922 static void
923 hdac_corb_start(struct hdac_softc *sc)
924 {
925 	uint32_t corbctl;
926 
927 	corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL);
928 	corbctl |= HDAC_CORBCTL_CORBRUN;
929 	HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl);
930 }
931 
932 /****************************************************************************
933  * void hdac_rirb_start(hdac_softc *)
934  *
935  * Startup the rirb DMA engine
936  ****************************************************************************/
937 static void
938 hdac_rirb_start(struct hdac_softc *sc)
939 {
940 	uint32_t rirbctl;
941 
942 	rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL);
943 	rirbctl |= HDAC_RIRBCTL_RIRBDMAEN;
944 	HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl);
945 }
946 
947 static int
948 hdac_rirb_flush(struct hdac_softc *sc)
949 {
950 	struct hdac_rirb *rirb_base, *rirb;
951 	nid_t cad;
952 	uint32_t resp, resp_ex;
953 	uint8_t rirbwp;
954 	int ret;
955 
956 	rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr;
957 	rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP);
958 	bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
959 	    BUS_DMASYNC_POSTREAD);
960 
961 	ret = 0;
962 	while (sc->rirb_rp != rirbwp) {
963 		sc->rirb_rp++;
964 		sc->rirb_rp %= sc->rirb_size;
965 		rirb = &rirb_base[sc->rirb_rp];
966 		resp = le32toh(rirb->response);
967 		resp_ex = le32toh(rirb->response_ex);
968 		cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex);
969 		if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) {
970 			sc->unsolq[sc->unsolq_wp++] = resp;
971 			sc->unsolq_wp %= HDAC_UNSOLQ_MAX;
972 			sc->unsolq[sc->unsolq_wp++] = cad;
973 			sc->unsolq_wp %= HDAC_UNSOLQ_MAX;
974 		} else if (sc->codecs[cad].pending <= 0) {
975 			device_printf(sc->dev, "Unexpected unsolicited "
976 			    "response from address %d: %08x\n", cad, resp);
977 		} else {
978 			sc->codecs[cad].response = resp;
979 			sc->codecs[cad].pending--;
980 		}
981 		ret++;
982 	}
983 
984 	bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
985 	    BUS_DMASYNC_PREREAD);
986 	return (ret);
987 }
988 
989 static int
990 hdac_unsolq_flush(struct hdac_softc *sc)
991 {
992 	device_t child;
993 	nid_t cad;
994 	uint32_t resp;
995 	int ret = 0;
996 
997 	if (sc->unsolq_st == HDAC_UNSOLQ_READY) {
998 		sc->unsolq_st = HDAC_UNSOLQ_BUSY;
999 		while (sc->unsolq_rp != sc->unsolq_wp) {
1000 			resp = sc->unsolq[sc->unsolq_rp++];
1001 			sc->unsolq_rp %= HDAC_UNSOLQ_MAX;
1002 			cad = sc->unsolq[sc->unsolq_rp++];
1003 			sc->unsolq_rp %= HDAC_UNSOLQ_MAX;
1004 			if ((child = sc->codecs[cad].dev) != NULL &&
1005 			    device_is_attached(child))
1006 				HDAC_UNSOL_INTR(child, resp);
1007 			ret++;
1008 		}
1009 		sc->unsolq_st = HDAC_UNSOLQ_READY;
1010 	}
1011 
1012 	return (ret);
1013 }
1014 
1015 /****************************************************************************
1016  * uint32_t hdac_send_command
1017  *
1018  * Wrapper function that sends only one command to a given codec
1019  ****************************************************************************/
1020 static uint32_t
1021 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb)
1022 {
1023 	int timeout;
1024 	uint32_t *corb;
1025 
1026 	hdac_lockassert(sc);
1027 	verb &= ~HDA_CMD_CAD_MASK;
1028 	verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT;
1029 	sc->codecs[cad].response = HDA_INVALID;
1030 
1031 	sc->codecs[cad].pending++;
1032 	sc->corb_wp++;
1033 	sc->corb_wp %= sc->corb_size;
1034 	corb = (uint32_t *)sc->corb_dma.dma_vaddr;
1035 	bus_dmamap_sync(sc->corb_dma.dma_tag,
1036 	    sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE);
1037 	corb[sc->corb_wp] = htole32(verb);
1038 	bus_dmamap_sync(sc->corb_dma.dma_tag,
1039 	    sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE);
1040 	HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp);
1041 
1042 	timeout = 10000;
1043 	do {
1044 		if (hdac_rirb_flush(sc) == 0)
1045 			DELAY(10);
1046 	} while (sc->codecs[cad].pending != 0 && --timeout);
1047 
1048 	if (sc->codecs[cad].pending != 0) {
1049 		device_printf(sc->dev, "Command 0x%08x timeout on address %d\n",
1050 		    verb, cad);
1051 		sc->codecs[cad].pending = 0;
1052 	}
1053 
1054 	if (sc->unsolq_rp != sc->unsolq_wp)
1055 		taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task);
1056 	return (sc->codecs[cad].response);
1057 }
1058 
1059 /****************************************************************************
1060  * Device Methods
1061  ****************************************************************************/
1062 
1063 /****************************************************************************
1064  * int hdac_probe(device_t)
1065  *
1066  * Probe for the presence of an hdac. If none is found, check for a generic
1067  * match using the subclass of the device.
1068  ****************************************************************************/
1069 static int
1070 hdac_probe(device_t dev)
1071 {
1072 	int i, result;
1073 	uint32_t model;
1074 	uint16_t class, subclass;
1075 	char desc[64];
1076 
1077 	model = (uint32_t)pci_get_device(dev) << 16;
1078 	model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
1079 	class = pci_get_class(dev);
1080 	subclass = pci_get_subclass(dev);
1081 
1082 	bzero(desc, sizeof(desc));
1083 	result = ENXIO;
1084 	for (i = 0; i < nitems(hdac_devices); i++) {
1085 		if (hdac_devices[i].model == model) {
1086 			strlcpy(desc, hdac_devices[i].desc, sizeof(desc));
1087 			result = BUS_PROBE_DEFAULT;
1088 			break;
1089 		}
1090 		if (HDA_DEV_MATCH(hdac_devices[i].model, model) &&
1091 		    class == PCIC_MULTIMEDIA &&
1092 		    subclass == PCIS_MULTIMEDIA_HDA) {
1093 			snprintf(desc, sizeof(desc), "%s (0x%04x)",
1094 			    hdac_devices[i].desc, pci_get_device(dev));
1095 			result = BUS_PROBE_GENERIC;
1096 			break;
1097 		}
1098 	}
1099 	if (result == ENXIO && class == PCIC_MULTIMEDIA &&
1100 	    subclass == PCIS_MULTIMEDIA_HDA) {
1101 		snprintf(desc, sizeof(desc), "Generic (0x%08x)", model);
1102 		result = BUS_PROBE_GENERIC;
1103 	}
1104 	if (result != ENXIO)
1105 		device_set_descf(dev, "%s HDA Controller", desc);
1106 
1107 	return (result);
1108 }
1109 
1110 static void
1111 hdac_unsolq_task(void *context, int pending)
1112 {
1113 	struct hdac_softc *sc;
1114 
1115 	sc = (struct hdac_softc *)context;
1116 
1117 	hdac_lock(sc);
1118 	hdac_unsolq_flush(sc);
1119 	hdac_unlock(sc);
1120 }
1121 
1122 /****************************************************************************
1123  * int hdac_attach(device_t)
1124  *
1125  * Attach the device into the kernel. Interrupts usually won't be enabled
1126  * when this function is called. Setup everything that doesn't require
1127  * interrupts and defer probing of codecs until interrupts are enabled.
1128  ****************************************************************************/
1129 static int
1130 hdac_attach(device_t dev)
1131 {
1132 	struct hdac_softc *sc;
1133 	int result;
1134 	int i, devid = -1;
1135 	uint32_t model;
1136 	uint16_t class, subclass;
1137 	uint16_t vendor;
1138 	uint8_t v;
1139 
1140 	sc = device_get_softc(dev);
1141 	HDA_BOOTVERBOSE(
1142 		device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n",
1143 		    pci_get_subvendor(dev), pci_get_subdevice(dev));
1144 		device_printf(dev, "HDA Driver Revision: %s\n",
1145 		    HDA_DRV_TEST_REV);
1146 	);
1147 
1148 	model = (uint32_t)pci_get_device(dev) << 16;
1149 	model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
1150 	class = pci_get_class(dev);
1151 	subclass = pci_get_subclass(dev);
1152 
1153 	for (i = 0; i < nitems(hdac_devices); i++) {
1154 		if (hdac_devices[i].model == model) {
1155 			devid = i;
1156 			break;
1157 		}
1158 		if (HDA_DEV_MATCH(hdac_devices[i].model, model) &&
1159 		    class == PCIC_MULTIMEDIA &&
1160 		    subclass == PCIS_MULTIMEDIA_HDA) {
1161 			devid = i;
1162 			break;
1163 		}
1164 	}
1165 
1166 	sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex");
1167 	sc->dev = dev;
1168 	TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc);
1169 	callout_init(&sc->poll_callout, 1);
1170 	for (i = 0; i < HDAC_CODEC_MAX; i++)
1171 		sc->codecs[i].dev = NULL;
1172 	if (devid >= 0) {
1173 		sc->quirks_on = hdac_devices[devid].quirks_on;
1174 		sc->quirks_off = hdac_devices[devid].quirks_off;
1175 	} else {
1176 		sc->quirks_on = 0;
1177 		sc->quirks_off = 0;
1178 	}
1179 	if (resource_int_value(device_get_name(dev),
1180 	    device_get_unit(dev), "msi", &i) == 0) {
1181 		if (i == 0)
1182 			sc->quirks_off |= HDAC_QUIRK_MSI;
1183 		else {
1184 			sc->quirks_on |= HDAC_QUIRK_MSI;
1185 			sc->quirks_off |= ~HDAC_QUIRK_MSI;
1186 		}
1187 	}
1188 	hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off);
1189 	HDA_BOOTVERBOSE(
1190 		device_printf(sc->dev,
1191 		    "Config options: on=0x%08x off=0x%08x\n",
1192 		    sc->quirks_on, sc->quirks_off);
1193 	);
1194 	sc->poll_ival = hz;
1195 	if (resource_int_value(device_get_name(dev),
1196 	    device_get_unit(dev), "polling", &i) == 0 && i != 0)
1197 		sc->polling = 1;
1198 	else
1199 		sc->polling = 0;
1200 
1201 	pci_enable_busmaster(dev);
1202 
1203 	vendor = pci_get_vendor(dev);
1204 	if (vendor == INTEL_VENDORID) {
1205 		/* TCSEL -> TC0 */
1206 		v = pci_read_config(dev, 0x44, 1);
1207 		pci_write_config(dev, 0x44, v & 0xf8, 1);
1208 		HDA_BOOTHVERBOSE(
1209 			device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v,
1210 			    pci_read_config(dev, 0x44, 1));
1211 		);
1212 	}
1213 
1214 #if defined(__i386__) || defined(__amd64__)
1215 	sc->flags |= HDAC_F_DMA_NOCACHE;
1216 
1217 	if (resource_int_value(device_get_name(dev),
1218 	    device_get_unit(dev), "snoop", &i) == 0 && i != 0) {
1219 #else
1220 	sc->flags &= ~HDAC_F_DMA_NOCACHE;
1221 #endif
1222 		/*
1223 		 * Try to enable PCIe snoop to avoid messing around with
1224 		 * uncacheable DMA attribute. Since PCIe snoop register
1225 		 * config is pretty much vendor specific, there are no
1226 		 * general solutions on how to enable it, forcing us (even
1227 		 * Microsoft) to enable uncacheable or write combined DMA
1228 		 * by default.
1229 		 *
1230 		 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx
1231 		 */
1232 		for (i = 0; i < nitems(hdac_pcie_snoop); i++) {
1233 			if (hdac_pcie_snoop[i].vendor != vendor)
1234 				continue;
1235 			sc->flags &= ~HDAC_F_DMA_NOCACHE;
1236 			if (hdac_pcie_snoop[i].reg == 0x00)
1237 				break;
1238 			v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1);
1239 			if ((v & hdac_pcie_snoop[i].enable) ==
1240 			    hdac_pcie_snoop[i].enable)
1241 				break;
1242 			v &= hdac_pcie_snoop[i].mask;
1243 			v |= hdac_pcie_snoop[i].enable;
1244 			pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1);
1245 			v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1);
1246 			if ((v & hdac_pcie_snoop[i].enable) !=
1247 			    hdac_pcie_snoop[i].enable) {
1248 				HDA_BOOTVERBOSE(
1249 					device_printf(dev,
1250 					    "WARNING: Failed to enable PCIe "
1251 					    "snoop!\n");
1252 				);
1253 #if defined(__i386__) || defined(__amd64__)
1254 				sc->flags |= HDAC_F_DMA_NOCACHE;
1255 #endif
1256 			}
1257 			break;
1258 		}
1259 #if defined(__i386__) || defined(__amd64__)
1260 	}
1261 #endif
1262 
1263 	HDA_BOOTHVERBOSE(
1264 		device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n",
1265 		    (sc->flags & HDAC_F_DMA_NOCACHE) ?
1266 		    "Uncacheable" : "PCIe snoop", vendor);
1267 	);
1268 
1269 	/* Allocate resources */
1270 	result = hdac_mem_alloc(sc);
1271 	if (result != 0)
1272 		goto hdac_attach_fail;
1273 
1274 	/* Get Capabilities */
1275 	result = hdac_get_capabilities(sc);
1276 	if (result != 0)
1277 		goto hdac_attach_fail;
1278 
1279 	/* Allocate CORB, RIRB, POS and BDLs dma memory */
1280 	result = hdac_dma_alloc(sc, &sc->corb_dma,
1281 	    sc->corb_size * sizeof(uint32_t));
1282 	if (result != 0)
1283 		goto hdac_attach_fail;
1284 	result = hdac_dma_alloc(sc, &sc->rirb_dma,
1285 	    sc->rirb_size * sizeof(struct hdac_rirb));
1286 	if (result != 0)
1287 		goto hdac_attach_fail;
1288 	sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss,
1289 	    M_HDAC, M_ZERO | M_WAITOK);
1290 	for (i = 0; i < sc->num_ss; i++) {
1291 		result = hdac_dma_alloc(sc, &sc->streams[i].bdl,
1292 		    sizeof(struct hdac_bdle) * HDA_BDL_MAX);
1293 		if (result != 0)
1294 			goto hdac_attach_fail;
1295 	}
1296 	if (sc->quirks_on & HDAC_QUIRK_DMAPOS) {
1297 		if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) {
1298 			HDA_BOOTVERBOSE(
1299 				device_printf(dev, "Failed to "
1300 				    "allocate DMA pos buffer "
1301 				    "(non-fatal)\n");
1302 			);
1303 		} else {
1304 			uint64_t addr = sc->pos_dma.dma_paddr;
1305 
1306 			HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32);
1307 			HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE,
1308 			    (addr & HDAC_DPLBASE_DPLBASE_MASK) |
1309 			    HDAC_DPLBASE_DPLBASE_DMAPBE);
1310 		}
1311 	}
1312 
1313 	result = bus_dma_tag_create(
1314 	    bus_get_dma_tag(sc->dev),		/* parent */
1315 	    HDA_DMA_ALIGNMENT,			/* alignment */
1316 	    0,					/* boundary */
1317 	    (sc->support_64bit) ? BUS_SPACE_MAXADDR :
1318 		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1319 	    BUS_SPACE_MAXADDR,			/* highaddr */
1320 	    NULL,				/* filtfunc */
1321 	    NULL,				/* fistfuncarg */
1322 	    HDA_BUFSZ_MAX,			/* maxsize */
1323 	    1,					/* nsegments */
1324 	    HDA_BUFSZ_MAX,			/* maxsegsz */
1325 	    0,					/* flags */
1326 	    NULL,				/* lockfunc */
1327 	    NULL,				/* lockfuncarg */
1328 	    &sc->chan_dmat);			/* dmat */
1329 	if (result != 0) {
1330 		device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n",
1331 		     __func__, result);
1332 		goto hdac_attach_fail;
1333 	}
1334 
1335 	/* Quiesce everything */
1336 	HDA_BOOTHVERBOSE(
1337 		device_printf(dev, "Reset controller...\n");
1338 	);
1339 	hdac_reset(sc, true);
1340 
1341 	/* Initialize the CORB and RIRB */
1342 	hdac_corb_init(sc);
1343 	hdac_rirb_init(sc);
1344 
1345 	result = hdac_irq_alloc(sc);
1346 	if (result != 0)
1347 		goto hdac_attach_fail;
1348 
1349 	/* Defer remaining of initialization until interrupts are enabled */
1350 	sc->intrhook.ich_func = hdac_attach2;
1351 	sc->intrhook.ich_arg = (void *)sc;
1352 	if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) {
1353 		sc->intrhook.ich_func = NULL;
1354 		hdac_attach2((void *)sc);
1355 	}
1356 
1357 	return (0);
1358 
1359 hdac_attach_fail:
1360 	hdac_irq_free(sc);
1361 	if (sc->streams != NULL)
1362 		for (i = 0; i < sc->num_ss; i++)
1363 			hdac_dma_free(sc, &sc->streams[i].bdl);
1364 	free(sc->streams, M_HDAC);
1365 	hdac_dma_free(sc, &sc->rirb_dma);
1366 	hdac_dma_free(sc, &sc->corb_dma);
1367 	hdac_mem_free(sc);
1368 	snd_mtxfree(sc->lock);
1369 
1370 	return (ENXIO);
1371 }
1372 
1373 static int
1374 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS)
1375 {
1376 	struct hdac_softc *sc;
1377 	device_t *devlist;
1378 	device_t dev;
1379 	int devcount, i, err, val;
1380 
1381 	dev = oidp->oid_arg1;
1382 	sc = device_get_softc(dev);
1383 	if (sc == NULL)
1384 		return (EINVAL);
1385 	val = 0;
1386 	err = sysctl_handle_int(oidp, &val, 0, req);
1387 	if (err != 0 || req->newptr == NULL || val == 0)
1388 		return (err);
1389 
1390 	/* XXX: Temporary. For debugging. */
1391 	if (val == 100) {
1392 		hdac_suspend(dev);
1393 		return (0);
1394 	} else if (val == 101) {
1395 		hdac_resume(dev);
1396 		return (0);
1397 	}
1398 
1399 	bus_topo_lock();
1400 
1401 	if ((err = device_get_children(dev, &devlist, &devcount)) != 0) {
1402 		bus_topo_unlock();
1403 		return (err);
1404 	}
1405 
1406 	hdac_lock(sc);
1407 	for (i = 0; i < devcount; i++)
1408 		HDAC_PINDUMP(devlist[i]);
1409 	hdac_unlock(sc);
1410 
1411 	bus_topo_unlock();
1412 
1413 	free(devlist, M_TEMP);
1414 	return (0);
1415 }
1416 
1417 static int
1418 hdac_mdata_rate(uint16_t fmt)
1419 {
1420 	static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 };
1421 	int rate, bits;
1422 
1423 	if (fmt & (1 << 14))
1424 		rate = 44100;
1425 	else
1426 		rate = 48000;
1427 	rate *= ((fmt >> 11) & 0x07) + 1;
1428 	rate /= ((fmt >> 8) & 0x07) + 1;
1429 	bits = mbits[(fmt >> 4) & 0x03];
1430 	bits *= (fmt & 0x0f) + 1;
1431 	return (rate * bits);
1432 }
1433 
1434 static int
1435 hdac_bdata_rate(uint16_t fmt, int output)
1436 {
1437 	static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 };
1438 	int rate, bits;
1439 
1440 	rate = 48000;
1441 	rate *= ((fmt >> 11) & 0x07) + 1;
1442 	bits = bbits[(fmt >> 4) & 0x03];
1443 	bits *= (fmt & 0x0f) + 1;
1444 	if (!output)
1445 		bits = ((bits + 7) & ~0x07) + 10;
1446 	return (rate * bits);
1447 }
1448 
1449 static void
1450 hdac_poll_reinit(struct hdac_softc *sc)
1451 {
1452 	int i, pollticks, min = 1000000;
1453 	struct hdac_stream *s;
1454 
1455 	if (sc->polling == 0)
1456 		return;
1457 	if (sc->unsol_registered > 0)
1458 		min = hz / 2;
1459 	for (i = 0; i < sc->num_ss; i++) {
1460 		s = &sc->streams[i];
1461 		if (s->running == 0)
1462 			continue;
1463 		pollticks = ((uint64_t)hz * s->blksz) /
1464 		    (hdac_mdata_rate(s->format) / 8);
1465 		pollticks >>= 1;
1466 		if (pollticks > hz)
1467 			pollticks = hz;
1468 		if (pollticks < 1)
1469 			pollticks = 1;
1470 		if (min > pollticks)
1471 			min = pollticks;
1472 	}
1473 	sc->poll_ival = min;
1474 	if (min == 1000000)
1475 		callout_stop(&sc->poll_callout);
1476 	else
1477 		callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc);
1478 }
1479 
1480 static int
1481 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS)
1482 {
1483 	struct hdac_softc *sc;
1484 	device_t dev;
1485 	uint32_t ctl;
1486 	int err, val;
1487 
1488 	dev = oidp->oid_arg1;
1489 	sc = device_get_softc(dev);
1490 	if (sc == NULL)
1491 		return (EINVAL);
1492 	hdac_lock(sc);
1493 	val = sc->polling;
1494 	hdac_unlock(sc);
1495 	err = sysctl_handle_int(oidp, &val, 0, req);
1496 
1497 	if (err != 0 || req->newptr == NULL)
1498 		return (err);
1499 	if (val < 0 || val > 1)
1500 		return (EINVAL);
1501 
1502 	hdac_lock(sc);
1503 	if (val != sc->polling) {
1504 		if (val == 0) {
1505 			callout_stop(&sc->poll_callout);
1506 			hdac_unlock(sc);
1507 			callout_drain(&sc->poll_callout);
1508 			hdac_lock(sc);
1509 			sc->polling = 0;
1510 			ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1511 			ctl |= HDAC_INTCTL_GIE;
1512 			HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1513 		} else {
1514 			ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1515 			ctl &= ~HDAC_INTCTL_GIE;
1516 			HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1517 			sc->polling = 1;
1518 			hdac_poll_reinit(sc);
1519 		}
1520 	}
1521 	hdac_unlock(sc);
1522 
1523 	return (err);
1524 }
1525 
1526 static void
1527 hdac_attach2(void *arg)
1528 {
1529 	struct hdac_softc *sc;
1530 	device_t child;
1531 	uint32_t vendorid, revisionid;
1532 	int i;
1533 	uint16_t statests;
1534 
1535 	sc = (struct hdac_softc *)arg;
1536 
1537 	hdac_lock(sc);
1538 
1539 	/* Remove ourselves from the config hooks */
1540 	if (sc->intrhook.ich_func != NULL) {
1541 		config_intrhook_disestablish(&sc->intrhook);
1542 		sc->intrhook.ich_func = NULL;
1543 	}
1544 
1545 	HDA_BOOTHVERBOSE(
1546 		device_printf(sc->dev, "Starting CORB Engine...\n");
1547 	);
1548 	hdac_corb_start(sc);
1549 	HDA_BOOTHVERBOSE(
1550 		device_printf(sc->dev, "Starting RIRB Engine...\n");
1551 	);
1552 	hdac_rirb_start(sc);
1553 
1554 	/*
1555 	 * Clear HDAC_WAKEEN as at present we have no use for SDI wake
1556 	 * (status change) interrupts.  The documentation says that we
1557 	 * should not make any assumptions about the state of this register
1558 	 * and set it explicitly.
1559 	 * NB: this needs to be done before the interrupt is enabled as
1560 	 * the handler does not expect this interrupt source.
1561 	 */
1562 	HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0);
1563 
1564 	/*
1565 	 * Read and clear post-reset SDI wake status.
1566 	 * Each set bit corresponds to a codec that came out of reset.
1567 	 */
1568 	statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS);
1569 	HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests);
1570 
1571 	HDA_BOOTHVERBOSE(
1572 		device_printf(sc->dev,
1573 		    "Enabling controller interrupt...\n");
1574 	);
1575 	HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) |
1576 	    HDAC_GCTL_UNSOL);
1577 	if (sc->polling == 0) {
1578 		HDAC_WRITE_4(&sc->mem, HDAC_INTCTL,
1579 		    HDAC_INTCTL_CIE | HDAC_INTCTL_GIE);
1580 	}
1581 	DELAY(1000);
1582 
1583 	HDA_BOOTHVERBOSE(
1584 		device_printf(sc->dev, "Scanning HDA codecs ...\n");
1585 	);
1586 	hdac_unlock(sc);
1587 	for (i = 0; i < HDAC_CODEC_MAX; i++) {
1588 		if (HDAC_STATESTS_SDIWAKE(statests, i)) {
1589 			HDA_BOOTHVERBOSE(
1590 				device_printf(sc->dev,
1591 				    "Found CODEC at address %d\n", i);
1592 			);
1593 			hdac_lock(sc);
1594 			vendorid = hdac_send_command(sc, i,
1595 			    HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID));
1596 			revisionid = hdac_send_command(sc, i,
1597 			    HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID));
1598 			hdac_unlock(sc);
1599 			if (vendorid == HDA_INVALID &&
1600 			    revisionid == HDA_INVALID) {
1601 				device_printf(sc->dev,
1602 				    "CODEC at address %d not responding!\n", i);
1603 				continue;
1604 			}
1605 			sc->codecs[i].vendor_id =
1606 			    HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid);
1607 			sc->codecs[i].device_id =
1608 			    HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid);
1609 			sc->codecs[i].revision_id =
1610 			    HDA_PARAM_REVISION_ID_REVISION_ID(revisionid);
1611 			sc->codecs[i].stepping_id =
1612 			    HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid);
1613 			child = device_add_child(sc->dev, "hdacc", -1);
1614 			if (child == NULL) {
1615 				device_printf(sc->dev,
1616 				    "Failed to add CODEC device\n");
1617 				continue;
1618 			}
1619 			device_set_ivars(child, (void *)(intptr_t)i);
1620 			sc->codecs[i].dev = child;
1621 		}
1622 	}
1623 	bus_generic_attach(sc->dev);
1624 
1625 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
1626 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
1627 	    "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev,
1628 	    sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data");
1629 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
1630 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
1631 	    "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev,
1632 	    sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode");
1633 }
1634 
1635 /****************************************************************************
1636  * int hdac_suspend(device_t)
1637  *
1638  * Suspend and power down HDA bus and codecs.
1639  ****************************************************************************/
1640 static int
1641 hdac_suspend(device_t dev)
1642 {
1643 	struct hdac_softc *sc = device_get_softc(dev);
1644 
1645 	HDA_BOOTHVERBOSE(
1646 		device_printf(dev, "Suspend...\n");
1647 	);
1648 	bus_generic_suspend(dev);
1649 
1650 	hdac_lock(sc);
1651 	HDA_BOOTHVERBOSE(
1652 		device_printf(dev, "Reset controller...\n");
1653 	);
1654 	callout_stop(&sc->poll_callout);
1655 	hdac_reset(sc, false);
1656 	hdac_unlock(sc);
1657 	callout_drain(&sc->poll_callout);
1658 	taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1659 	HDA_BOOTHVERBOSE(
1660 		device_printf(dev, "Suspend done\n");
1661 	);
1662 	return (0);
1663 }
1664 
1665 /****************************************************************************
1666  * int hdac_resume(device_t)
1667  *
1668  * Powerup and restore HDA bus and codecs state.
1669  ****************************************************************************/
1670 static int
1671 hdac_resume(device_t dev)
1672 {
1673 	struct hdac_softc *sc = device_get_softc(dev);
1674 	int error;
1675 
1676 	HDA_BOOTHVERBOSE(
1677 		device_printf(dev, "Resume...\n");
1678 	);
1679 	hdac_lock(sc);
1680 
1681 	/* Quiesce everything */
1682 	HDA_BOOTHVERBOSE(
1683 		device_printf(dev, "Reset controller...\n");
1684 	);
1685 	hdac_reset(sc, true);
1686 
1687 	/* Initialize the CORB and RIRB */
1688 	hdac_corb_init(sc);
1689 	hdac_rirb_init(sc);
1690 
1691 	HDA_BOOTHVERBOSE(
1692 		device_printf(dev, "Starting CORB Engine...\n");
1693 	);
1694 	hdac_corb_start(sc);
1695 	HDA_BOOTHVERBOSE(
1696 		device_printf(dev, "Starting RIRB Engine...\n");
1697 	);
1698 	hdac_rirb_start(sc);
1699 
1700 	/*
1701 	 * Clear HDAC_WAKEEN as at present we have no use for SDI wake
1702 	 * (status change) events.  The documentation says that we should
1703 	 * not make any assumptions about the state of this register and
1704 	 * set it explicitly.
1705 	 * Also, clear HDAC_STATESTS.
1706 	 * NB: this needs to be done before the interrupt is enabled as
1707 	 * the handler does not expect this interrupt source.
1708 	 */
1709 	HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0);
1710 	HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK);
1711 
1712 	HDA_BOOTHVERBOSE(
1713 		device_printf(dev, "Enabling controller interrupt...\n");
1714 	);
1715 	HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) |
1716 	    HDAC_GCTL_UNSOL);
1717 	HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE);
1718 	DELAY(1000);
1719 	hdac_poll_reinit(sc);
1720 	hdac_unlock(sc);
1721 
1722 	error = bus_generic_resume(dev);
1723 	HDA_BOOTHVERBOSE(
1724 		device_printf(dev, "Resume done\n");
1725 	);
1726 	return (error);
1727 }
1728 
1729 /****************************************************************************
1730  * int hdac_detach(device_t)
1731  *
1732  * Detach and free up resources utilized by the hdac device.
1733  ****************************************************************************/
1734 static int
1735 hdac_detach(device_t dev)
1736 {
1737 	struct hdac_softc *sc = device_get_softc(dev);
1738 	device_t *devlist;
1739 	int cad, i, devcount, error;
1740 
1741 	if ((error = device_get_children(dev, &devlist, &devcount)) != 0)
1742 		return (error);
1743 	for (i = 0; i < devcount; i++) {
1744 		cad = (intptr_t)device_get_ivars(devlist[i]);
1745 		if ((error = device_delete_child(dev, devlist[i])) != 0) {
1746 			free(devlist, M_TEMP);
1747 			return (error);
1748 		}
1749 		sc->codecs[cad].dev = NULL;
1750 	}
1751 	free(devlist, M_TEMP);
1752 
1753 	hdac_lock(sc);
1754 	hdac_reset(sc, false);
1755 	hdac_unlock(sc);
1756 	taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1757 	hdac_irq_free(sc);
1758 
1759 	for (i = 0; i < sc->num_ss; i++)
1760 		hdac_dma_free(sc, &sc->streams[i].bdl);
1761 	free(sc->streams, M_HDAC);
1762 	hdac_dma_free(sc, &sc->pos_dma);
1763 	hdac_dma_free(sc, &sc->rirb_dma);
1764 	hdac_dma_free(sc, &sc->corb_dma);
1765 	if (sc->chan_dmat != NULL) {
1766 		bus_dma_tag_destroy(sc->chan_dmat);
1767 		sc->chan_dmat = NULL;
1768 	}
1769 	hdac_mem_free(sc);
1770 	snd_mtxfree(sc->lock);
1771 	return (0);
1772 }
1773 
1774 static bus_dma_tag_t
1775 hdac_get_dma_tag(device_t dev, device_t child)
1776 {
1777 	struct hdac_softc *sc = device_get_softc(dev);
1778 
1779 	return (sc->chan_dmat);
1780 }
1781 
1782 static int
1783 hdac_print_child(device_t dev, device_t child)
1784 {
1785 	int retval;
1786 
1787 	retval = bus_print_child_header(dev, child);
1788 	retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child));
1789 	retval += bus_print_child_footer(dev, child);
1790 
1791 	return (retval);
1792 }
1793 
1794 static int
1795 hdac_child_location(device_t dev, device_t child, struct sbuf *sb)
1796 {
1797 
1798 	sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child));
1799 	return (0);
1800 }
1801 
1802 static int
1803 hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb)
1804 {
1805 	struct hdac_softc *sc = device_get_softc(dev);
1806 	nid_t cad = (uintptr_t)device_get_ivars(child);
1807 
1808 	sbuf_printf(sb,
1809 	    "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x",
1810 	    sc->codecs[cad].vendor_id, sc->codecs[cad].device_id,
1811 	    sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id);
1812 	return (0);
1813 }
1814 
1815 static int
1816 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1817 {
1818 	struct hdac_softc *sc = device_get_softc(dev);
1819 	nid_t cad = (uintptr_t)device_get_ivars(child);
1820 
1821 	switch (which) {
1822 	case HDA_IVAR_CODEC_ID:
1823 		*result = cad;
1824 		break;
1825 	case HDA_IVAR_VENDOR_ID:
1826 		*result = sc->codecs[cad].vendor_id;
1827 		break;
1828 	case HDA_IVAR_DEVICE_ID:
1829 		*result = sc->codecs[cad].device_id;
1830 		break;
1831 	case HDA_IVAR_REVISION_ID:
1832 		*result = sc->codecs[cad].revision_id;
1833 		break;
1834 	case HDA_IVAR_STEPPING_ID:
1835 		*result = sc->codecs[cad].stepping_id;
1836 		break;
1837 	case HDA_IVAR_SUBVENDOR_ID:
1838 		*result = pci_get_subvendor(dev);
1839 		break;
1840 	case HDA_IVAR_SUBDEVICE_ID:
1841 		*result = pci_get_subdevice(dev);
1842 		break;
1843 	case HDA_IVAR_DMA_NOCACHE:
1844 		*result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0;
1845 		break;
1846 	case HDA_IVAR_STRIPES_MASK:
1847 		*result = (1 << (1 << sc->num_sdo)) - 1;
1848 		break;
1849 	default:
1850 		return (ENOENT);
1851 	}
1852 	return (0);
1853 }
1854 
1855 static struct mtx *
1856 hdac_get_mtx(device_t dev, device_t child)
1857 {
1858 	struct hdac_softc *sc = device_get_softc(dev);
1859 
1860 	return (sc->lock);
1861 }
1862 
1863 static uint32_t
1864 hdac_codec_command(device_t dev, device_t child, uint32_t verb)
1865 {
1866 
1867 	return (hdac_send_command(device_get_softc(dev),
1868 	    (intptr_t)device_get_ivars(child), verb));
1869 }
1870 
1871 static int
1872 hdac_find_stream(struct hdac_softc *sc, int dir, int stream)
1873 {
1874 	int i, ss;
1875 
1876 	ss = -1;
1877 	/* Allocate ISS/OSS first. */
1878 	if (dir == 0) {
1879 		for (i = 0; i < sc->num_iss; i++) {
1880 			if (sc->streams[i].stream == stream) {
1881 				ss = i;
1882 				break;
1883 			}
1884 		}
1885 	} else {
1886 		for (i = 0; i < sc->num_oss; i++) {
1887 			if (sc->streams[i + sc->num_iss].stream == stream) {
1888 				ss = i + sc->num_iss;
1889 				break;
1890 			}
1891 		}
1892 	}
1893 	/* Fallback to BSS. */
1894 	if (ss == -1) {
1895 		for (i = 0; i < sc->num_bss; i++) {
1896 			if (sc->streams[i + sc->num_iss + sc->num_oss].stream
1897 			    == stream) {
1898 				ss = i + sc->num_iss + sc->num_oss;
1899 				break;
1900 			}
1901 		}
1902 	}
1903 	return (ss);
1904 }
1905 
1906 static int
1907 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe,
1908     uint32_t **dmapos)
1909 {
1910 	struct hdac_softc *sc = device_get_softc(dev);
1911 	nid_t cad = (uintptr_t)device_get_ivars(child);
1912 	int stream, ss, bw, maxbw, prevbw;
1913 
1914 	/* Look for empty stream. */
1915 	ss = hdac_find_stream(sc, dir, 0);
1916 
1917 	/* Return if found nothing. */
1918 	if (ss < 0)
1919 		return (0);
1920 
1921 	/* Check bus bandwidth. */
1922 	bw = hdac_bdata_rate(format, dir);
1923 	if (dir == 1) {
1924 		bw *= 1 << (sc->num_sdo - stripe);
1925 		prevbw = sc->sdo_bw_used;
1926 		maxbw = 48000 * 960 * (1 << sc->num_sdo);
1927 	} else {
1928 		prevbw = sc->codecs[cad].sdi_bw_used;
1929 		maxbw = 48000 * 464;
1930 	}
1931 	HDA_BOOTHVERBOSE(
1932 		device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n",
1933 		    (bw + prevbw) / 1000, maxbw / 1000,
1934 		    bw + prevbw > maxbw ? " -- OVERFLOW!" : "");
1935 	);
1936 	if (bw + prevbw > maxbw)
1937 		return (0);
1938 	if (dir == 1)
1939 		sc->sdo_bw_used += bw;
1940 	else
1941 		sc->codecs[cad].sdi_bw_used += bw;
1942 
1943 	/* Allocate stream number */
1944 	if (ss >= sc->num_iss + sc->num_oss)
1945 		stream = 15 - (ss - sc->num_iss - sc->num_oss);
1946 	else if (ss >= sc->num_iss)
1947 		stream = ss - sc->num_iss + 1;
1948 	else
1949 		stream = ss + 1;
1950 
1951 	sc->streams[ss].dev = child;
1952 	sc->streams[ss].dir = dir;
1953 	sc->streams[ss].stream = stream;
1954 	sc->streams[ss].bw = bw;
1955 	sc->streams[ss].format = format;
1956 	sc->streams[ss].stripe = stripe;
1957 	if (dmapos != NULL) {
1958 		if (sc->pos_dma.dma_vaddr != NULL)
1959 			*dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8);
1960 		else
1961 			*dmapos = NULL;
1962 	}
1963 	return (stream);
1964 }
1965 
1966 static void
1967 hdac_stream_free(device_t dev, device_t child, int dir, int stream)
1968 {
1969 	struct hdac_softc *sc = device_get_softc(dev);
1970 	nid_t cad = (uintptr_t)device_get_ivars(child);
1971 	int ss;
1972 
1973 	ss = hdac_find_stream(sc, dir, stream);
1974 	KASSERT(ss >= 0,
1975 	    ("Free for not allocated stream (%d/%d)\n", dir, stream));
1976 	if (dir == 1)
1977 		sc->sdo_bw_used -= sc->streams[ss].bw;
1978 	else
1979 		sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw;
1980 	sc->streams[ss].stream = 0;
1981 	sc->streams[ss].dev = NULL;
1982 }
1983 
1984 static int
1985 hdac_stream_start(device_t dev, device_t child, int dir, int stream,
1986     bus_addr_t buf, int blksz, int blkcnt)
1987 {
1988 	struct hdac_softc *sc = device_get_softc(dev);
1989 	struct hdac_bdle *bdle;
1990 	uint64_t addr;
1991 	int i, ss, off;
1992 	uint32_t ctl;
1993 
1994 	ss = hdac_find_stream(sc, dir, stream);
1995 	KASSERT(ss >= 0,
1996 	    ("Start for not allocated stream (%d/%d)\n", dir, stream));
1997 
1998 	addr = (uint64_t)buf;
1999 	bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr;
2000 	for (i = 0; i < blkcnt; i++, bdle++) {
2001 		bdle->addrl = htole32((uint32_t)addr);
2002 		bdle->addrh = htole32((uint32_t)(addr >> 32));
2003 		bdle->len = htole32(blksz);
2004 		bdle->ioc = htole32(1);
2005 		addr += blksz;
2006 	}
2007 
2008 	bus_dmamap_sync(sc->streams[ss].bdl.dma_tag,
2009 	    sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE);
2010 
2011 	off = ss << 5;
2012 	HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt);
2013 	HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1);
2014 	addr = sc->streams[ss].bdl.dma_paddr;
2015 	HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr);
2016 	HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32));
2017 
2018 	ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2);
2019 	if (dir)
2020 		ctl |= HDAC_SDCTL2_DIR;
2021 	else
2022 		ctl &= ~HDAC_SDCTL2_DIR;
2023 	ctl &= ~HDAC_SDCTL2_STRM_MASK;
2024 	ctl |= stream << HDAC_SDCTL2_STRM_SHIFT;
2025 	ctl &= ~HDAC_SDCTL2_STRIPE_MASK;
2026 	ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT;
2027 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl);
2028 
2029 	HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format);
2030 
2031 	ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
2032 	ctl |= 1 << ss;
2033 	HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
2034 
2035 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS,
2036 	    HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS);
2037 	ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2038 	ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE |
2039 	    HDAC_SDCTL_RUN;
2040 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2041 
2042 	sc->streams[ss].blksz = blksz;
2043 	sc->streams[ss].running = 1;
2044 	hdac_poll_reinit(sc);
2045 	return (0);
2046 }
2047 
2048 static void
2049 hdac_stream_stop(device_t dev, device_t child, int dir, int stream)
2050 {
2051 	struct hdac_softc *sc = device_get_softc(dev);
2052 	int ss, off;
2053 	uint32_t ctl;
2054 
2055 	ss = hdac_find_stream(sc, dir, stream);
2056 	KASSERT(ss >= 0,
2057 	    ("Stop for not allocated stream (%d/%d)\n", dir, stream));
2058 
2059 	bus_dmamap_sync(sc->streams[ss].bdl.dma_tag,
2060 	    sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE);
2061 
2062 	off = ss << 5;
2063 	ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2064 	ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE |
2065 	    HDAC_SDCTL_RUN);
2066 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2067 
2068 	ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
2069 	ctl &= ~(1 << ss);
2070 	HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
2071 
2072 	sc->streams[ss].running = 0;
2073 	hdac_poll_reinit(sc);
2074 }
2075 
2076 static void
2077 hdac_stream_reset(device_t dev, device_t child, int dir, int stream)
2078 {
2079 	struct hdac_softc *sc = device_get_softc(dev);
2080 	int timeout = 1000;
2081 	int to = timeout;
2082 	int ss, off;
2083 	uint32_t ctl;
2084 
2085 	ss = hdac_find_stream(sc, dir, stream);
2086 	KASSERT(ss >= 0,
2087 	    ("Reset for not allocated stream (%d/%d)\n", dir, stream));
2088 
2089 	off = ss << 5;
2090 	ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2091 	ctl |= HDAC_SDCTL_SRST;
2092 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2093 	do {
2094 		ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2095 		if (ctl & HDAC_SDCTL_SRST)
2096 			break;
2097 		DELAY(10);
2098 	} while (--to);
2099 	if (!(ctl & HDAC_SDCTL_SRST))
2100 		device_printf(dev, "Reset setting timeout\n");
2101 	ctl &= ~HDAC_SDCTL_SRST;
2102 	HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2103 	to = timeout;
2104 	do {
2105 		ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2106 		if (!(ctl & HDAC_SDCTL_SRST))
2107 			break;
2108 		DELAY(10);
2109 	} while (--to);
2110 	if (ctl & HDAC_SDCTL_SRST)
2111 		device_printf(dev, "Reset timeout!\n");
2112 }
2113 
2114 static uint32_t
2115 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream)
2116 {
2117 	struct hdac_softc *sc = device_get_softc(dev);
2118 	int ss, off;
2119 
2120 	ss = hdac_find_stream(sc, dir, stream);
2121 	KASSERT(ss >= 0,
2122 	    ("Reset for not allocated stream (%d/%d)\n", dir, stream));
2123 
2124 	off = ss << 5;
2125 	return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB));
2126 }
2127 
2128 static int
2129 hdac_unsol_alloc(device_t dev, device_t child, int tag)
2130 {
2131 	struct hdac_softc *sc = device_get_softc(dev);
2132 
2133 	sc->unsol_registered++;
2134 	hdac_poll_reinit(sc);
2135 	return (tag);
2136 }
2137 
2138 static void
2139 hdac_unsol_free(device_t dev, device_t child, int tag)
2140 {
2141 	struct hdac_softc *sc = device_get_softc(dev);
2142 
2143 	sc->unsol_registered--;
2144 	hdac_poll_reinit(sc);
2145 }
2146 
2147 static device_method_t hdac_methods[] = {
2148 	/* device interface */
2149 	DEVMETHOD(device_probe,		hdac_probe),
2150 	DEVMETHOD(device_attach,	hdac_attach),
2151 	DEVMETHOD(device_detach,	hdac_detach),
2152 	DEVMETHOD(device_suspend,	hdac_suspend),
2153 	DEVMETHOD(device_resume,	hdac_resume),
2154 	/* Bus interface */
2155 	DEVMETHOD(bus_get_dma_tag,	hdac_get_dma_tag),
2156 	DEVMETHOD(bus_print_child,	hdac_print_child),
2157 	DEVMETHOD(bus_child_location,	hdac_child_location),
2158 	DEVMETHOD(bus_child_pnpinfo,	hdac_child_pnpinfo_method),
2159 	DEVMETHOD(bus_read_ivar,	hdac_read_ivar),
2160 	DEVMETHOD(hdac_get_mtx,		hdac_get_mtx),
2161 	DEVMETHOD(hdac_codec_command,	hdac_codec_command),
2162 	DEVMETHOD(hdac_stream_alloc,	hdac_stream_alloc),
2163 	DEVMETHOD(hdac_stream_free,	hdac_stream_free),
2164 	DEVMETHOD(hdac_stream_start,	hdac_stream_start),
2165 	DEVMETHOD(hdac_stream_stop,	hdac_stream_stop),
2166 	DEVMETHOD(hdac_stream_reset,	hdac_stream_reset),
2167 	DEVMETHOD(hdac_stream_getptr,	hdac_stream_getptr),
2168 	DEVMETHOD(hdac_unsol_alloc,	hdac_unsol_alloc),
2169 	DEVMETHOD(hdac_unsol_free,	hdac_unsol_free),
2170 	DEVMETHOD_END
2171 };
2172 
2173 static driver_t hdac_driver = {
2174 	"hdac",
2175 	hdac_methods,
2176 	sizeof(struct hdac_softc),
2177 };
2178 
2179 DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL);
2180