1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Implementation of primary alsa driver code base for Intel HD Audio.
5 *
6 * Copyright(c) 2004 Intel Corporation
7 *
8 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9 * PeiSen Hou <pshou@realtek.com.tw>
10 */
11
12 #include <linux/clocksource.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19
20 #ifdef CONFIG_X86
21 /* for art-tsc conversion */
22 #include <asm/tsc.h>
23 #endif
24
25 #include <sound/core.h>
26 #include <sound/initval.h>
27 #include <sound/pcm_params.h>
28 #include "hda_controller.h"
29 #include "hda_local.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "hda_controller_trace.h"
33
34 /* DSP lock helpers */
35 #define dsp_lock(dev) snd_hdac_dsp_lock(azx_stream(dev))
36 #define dsp_unlock(dev) snd_hdac_dsp_unlock(azx_stream(dev))
37 #define dsp_is_locked(dev) snd_hdac_stream_is_locked(azx_stream(dev))
38
39 /* assign a stream for the PCM */
40 static inline struct azx_dev *
azx_assign_device(struct azx * chip,struct snd_pcm_substream * substream)41 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
42 {
43 struct hdac_stream *s;
44
45 s = snd_hdac_stream_assign(azx_bus(chip), substream);
46 if (!s)
47 return NULL;
48 return stream_to_azx_dev(s);
49 }
50
51 /* release the assigned stream */
azx_release_device(struct azx_dev * azx_dev)52 static inline void azx_release_device(struct azx_dev *azx_dev)
53 {
54 snd_hdac_stream_release(azx_stream(azx_dev));
55 }
56
57 static inline struct hda_pcm_stream *
to_hda_pcm_stream(struct snd_pcm_substream * substream)58 to_hda_pcm_stream(struct snd_pcm_substream *substream)
59 {
60 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
61 return &apcm->info->stream[substream->stream];
62 }
63
azx_adjust_codec_delay(struct snd_pcm_substream * substream,u64 nsec)64 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
65 u64 nsec)
66 {
67 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
68 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
69 u64 codec_frames, codec_nsecs;
70
71 if (!hinfo->ops.get_delay)
72 return nsec;
73
74 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
75 codec_nsecs = div_u64(codec_frames * 1000000000LL,
76 substream->runtime->rate);
77
78 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
79 return nsec + codec_nsecs;
80
81 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
82 }
83
84 /*
85 * PCM ops
86 */
87
azx_pcm_close(struct snd_pcm_substream * substream)88 static int azx_pcm_close(struct snd_pcm_substream *substream)
89 {
90 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
91 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
92 struct azx *chip = apcm->chip;
93 struct azx_dev *azx_dev = get_azx_dev(substream);
94
95 trace_azx_pcm_close(chip, azx_dev);
96 mutex_lock(&chip->open_mutex);
97 azx_release_device(azx_dev);
98 if (hinfo->ops.close)
99 hinfo->ops.close(hinfo, apcm->codec, substream);
100 snd_hda_power_down(apcm->codec);
101 mutex_unlock(&chip->open_mutex);
102 snd_hda_codec_pcm_put(apcm->info);
103 return 0;
104 }
105
azx_pcm_hw_params(struct snd_pcm_substream * substream,struct snd_pcm_hw_params * hw_params)106 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
107 struct snd_pcm_hw_params *hw_params)
108 {
109 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
110 struct azx *chip = apcm->chip;
111 struct azx_dev *azx_dev = get_azx_dev(substream);
112 struct hdac_stream *hdas = azx_stream(azx_dev);
113 int ret = 0;
114
115 trace_azx_pcm_hw_params(chip, azx_dev);
116 dsp_lock(azx_dev);
117 if (dsp_is_locked(azx_dev)) {
118 ret = -EBUSY;
119 goto unlock;
120 }
121
122 /* Set up BDLEs here, return -ENOMEM if too many BDLEs are required */
123 hdas->bufsize = params_buffer_bytes(hw_params);
124 hdas->period_bytes = params_period_bytes(hw_params);
125 hdas->format_val = 0;
126 hdas->no_period_wakeup =
127 (hw_params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
128 (hw_params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
129 if (snd_hdac_stream_setup_periods(hdas) < 0)
130 ret = -ENOMEM;
131
132 unlock:
133 dsp_unlock(azx_dev);
134 return ret;
135 }
136
azx_pcm_hw_free(struct snd_pcm_substream * substream)137 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
138 {
139 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
140 struct azx_dev *azx_dev = get_azx_dev(substream);
141 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
142
143 /* reset BDL address */
144 dsp_lock(azx_dev);
145 if (!dsp_is_locked(azx_dev))
146 snd_hdac_stream_cleanup(azx_stream(azx_dev));
147
148 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
149
150 azx_stream(azx_dev)->prepared = 0;
151 dsp_unlock(azx_dev);
152 return 0;
153 }
154
azx_pcm_prepare(struct snd_pcm_substream * substream)155 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
156 {
157 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
158 struct azx *chip = apcm->chip;
159 struct azx_dev *azx_dev = get_azx_dev(substream);
160 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
161 struct snd_pcm_runtime *runtime = substream->runtime;
162 unsigned int format_val, stream_tag, bits;
163 int err;
164 struct hda_spdif_out *spdif =
165 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
166 unsigned short ctls = spdif ? spdif->ctls : 0;
167
168 trace_azx_pcm_prepare(chip, azx_dev);
169 dsp_lock(azx_dev);
170 if (dsp_is_locked(azx_dev)) {
171 err = -EBUSY;
172 goto unlock;
173 }
174
175 snd_hdac_stream_reset(azx_stream(azx_dev));
176 bits = snd_hdac_stream_format_bits(runtime->format, SNDRV_PCM_SUBFORMAT_STD, hinfo->maxbps);
177
178 format_val = snd_hdac_spdif_stream_format(runtime->channels, bits, runtime->rate, ctls);
179 if (!format_val) {
180 dev_err(chip->card->dev,
181 "invalid format_val, rate=%d, ch=%d, format=%d\n",
182 runtime->rate, runtime->channels, runtime->format);
183 err = -EINVAL;
184 goto unlock;
185 }
186
187 err = snd_hdac_stream_set_params(azx_stream(azx_dev), format_val);
188 if (err < 0)
189 goto unlock;
190
191 snd_hdac_stream_setup(azx_stream(azx_dev), false);
192
193 stream_tag = azx_dev->core.stream_tag;
194 /* CA-IBG chips need the playback stream starting from 1 */
195 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
196 stream_tag > chip->capture_streams)
197 stream_tag -= chip->capture_streams;
198 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
199 azx_dev->core.format_val, substream);
200
201 unlock:
202 if (!err)
203 azx_stream(azx_dev)->prepared = 1;
204 dsp_unlock(azx_dev);
205 return err;
206 }
207
azx_pcm_trigger(struct snd_pcm_substream * substream,int cmd)208 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
209 {
210 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
211 struct azx *chip = apcm->chip;
212 struct hdac_bus *bus = azx_bus(chip);
213 struct azx_dev *azx_dev;
214 struct snd_pcm_substream *s;
215 struct hdac_stream *hstr;
216 bool start;
217 int sbits = 0;
218 int sync_reg;
219
220 azx_dev = get_azx_dev(substream);
221 trace_azx_pcm_trigger(chip, azx_dev, cmd);
222
223 hstr = azx_stream(azx_dev);
224 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
225 sync_reg = AZX_REG_OLD_SSYNC;
226 else
227 sync_reg = AZX_REG_SSYNC;
228
229 if (dsp_is_locked(azx_dev) || !hstr->prepared)
230 return -EPIPE;
231
232 switch (cmd) {
233 case SNDRV_PCM_TRIGGER_START:
234 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
235 case SNDRV_PCM_TRIGGER_RESUME:
236 start = true;
237 break;
238 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
239 case SNDRV_PCM_TRIGGER_SUSPEND:
240 case SNDRV_PCM_TRIGGER_STOP:
241 start = false;
242 break;
243 default:
244 return -EINVAL;
245 }
246
247 snd_pcm_group_for_each_entry(s, substream) {
248 if (s->pcm->card != substream->pcm->card)
249 continue;
250 azx_dev = get_azx_dev(s);
251 sbits |= 1 << azx_dev->core.index;
252 snd_pcm_trigger_done(s, substream);
253 }
254
255 spin_lock(&bus->reg_lock);
256
257 /* first, set SYNC bits of corresponding streams */
258 snd_hdac_stream_sync_trigger(hstr, true, sbits, sync_reg);
259
260 snd_pcm_group_for_each_entry(s, substream) {
261 if (s->pcm->card != substream->pcm->card)
262 continue;
263 azx_dev = get_azx_dev(s);
264 if (start) {
265 azx_dev->insufficient = 1;
266 snd_hdac_stream_start(azx_stream(azx_dev));
267 } else {
268 snd_hdac_stream_stop(azx_stream(azx_dev));
269 }
270 }
271 spin_unlock(&bus->reg_lock);
272
273 snd_hdac_stream_sync(hstr, start, sbits);
274
275 spin_lock(&bus->reg_lock);
276 /* reset SYNC bits */
277 snd_hdac_stream_sync_trigger(hstr, false, sbits, sync_reg);
278 if (start)
279 snd_hdac_stream_timecounter_init(hstr, sbits);
280 spin_unlock(&bus->reg_lock);
281 return 0;
282 }
283
azx_get_pos_lpib(struct azx * chip,struct azx_dev * azx_dev)284 unsigned int azx_get_pos_lpib(struct azx *chip, struct azx_dev *azx_dev)
285 {
286 return snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
287 }
288 EXPORT_SYMBOL_GPL(azx_get_pos_lpib);
289
azx_get_pos_posbuf(struct azx * chip,struct azx_dev * azx_dev)290 unsigned int azx_get_pos_posbuf(struct azx *chip, struct azx_dev *azx_dev)
291 {
292 return snd_hdac_stream_get_pos_posbuf(azx_stream(azx_dev));
293 }
294 EXPORT_SYMBOL_GPL(azx_get_pos_posbuf);
295
azx_get_position(struct azx * chip,struct azx_dev * azx_dev)296 unsigned int azx_get_position(struct azx *chip,
297 struct azx_dev *azx_dev)
298 {
299 struct snd_pcm_substream *substream = azx_dev->core.substream;
300 unsigned int pos;
301 int stream = substream->stream;
302 int delay = 0;
303
304 if (chip->get_position[stream])
305 pos = chip->get_position[stream](chip, azx_dev);
306 else /* use the position buffer as default */
307 pos = azx_get_pos_posbuf(chip, azx_dev);
308
309 if (pos >= azx_dev->core.bufsize)
310 pos = 0;
311
312 if (substream->runtime) {
313 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
314 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
315
316 if (chip->get_delay[stream])
317 delay += chip->get_delay[stream](chip, azx_dev, pos);
318 if (hinfo->ops.get_delay)
319 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
320 substream);
321 substream->runtime->delay = delay;
322 }
323
324 trace_azx_get_position(chip, azx_dev, pos, delay);
325 return pos;
326 }
327 EXPORT_SYMBOL_GPL(azx_get_position);
328
azx_pcm_pointer(struct snd_pcm_substream * substream)329 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
330 {
331 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
332 struct azx *chip = apcm->chip;
333 struct azx_dev *azx_dev = get_azx_dev(substream);
334 return bytes_to_frames(substream->runtime,
335 azx_get_position(chip, azx_dev));
336 }
337
338 /*
339 * azx_scale64: Scale base by mult/div while not overflowing sanely
340 *
341 * Derived from scale64_check_overflow in kernel/time/timekeeping.c
342 *
343 * The tmestamps for a 48Khz stream can overflow after (2^64/10^9)/48K which
344 * is about 384307 ie ~4.5 days.
345 *
346 * This scales the calculation so that overflow will happen but after 2^64 /
347 * 48000 secs, which is pretty large!
348 *
349 * In caln below:
350 * base may overflow, but since there isn’t any additional division
351 * performed on base it’s OK
352 * rem can’t overflow because both are 32-bit values
353 */
354
355 #ifdef CONFIG_X86
azx_scale64(u64 base,u32 num,u32 den)356 static u64 azx_scale64(u64 base, u32 num, u32 den)
357 {
358 u64 rem;
359
360 rem = do_div(base, den);
361
362 base *= num;
363 rem *= num;
364
365 do_div(rem, den);
366
367 return base + rem;
368 }
369
azx_get_sync_time(ktime_t * device,struct system_counterval_t * system,void * ctx)370 static int azx_get_sync_time(ktime_t *device,
371 struct system_counterval_t *system, void *ctx)
372 {
373 struct snd_pcm_substream *substream = ctx;
374 struct azx_dev *azx_dev = get_azx_dev(substream);
375 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
376 struct azx *chip = apcm->chip;
377 struct snd_pcm_runtime *runtime;
378 u64 ll_counter, ll_counter_l, ll_counter_h;
379 u64 tsc_counter, tsc_counter_l, tsc_counter_h;
380 u32 wallclk_ctr, wallclk_cycles;
381 bool direction;
382 u32 dma_select;
383 u32 timeout;
384 u32 retry_count = 0;
385
386 runtime = substream->runtime;
387
388 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
389 direction = 1;
390 else
391 direction = 0;
392
393 /* 0th stream tag is not used, so DMA ch 0 is for 1st stream tag */
394 do {
395 timeout = 100;
396 dma_select = (direction << GTSCC_CDMAS_DMA_DIR_SHIFT) |
397 (azx_dev->core.stream_tag - 1);
398 snd_hdac_chip_writel(azx_bus(chip), GTSCC, dma_select);
399
400 /* Enable the capture */
401 snd_hdac_chip_updatel(azx_bus(chip), GTSCC, 0, GTSCC_TSCCI_MASK);
402
403 while (timeout) {
404 if (snd_hdac_chip_readl(azx_bus(chip), GTSCC) &
405 GTSCC_TSCCD_MASK)
406 break;
407
408 timeout--;
409 }
410
411 if (!timeout) {
412 dev_err(chip->card->dev, "GTSCC capture Timedout!\n");
413 return -EIO;
414 }
415
416 /* Read wall clock counter */
417 wallclk_ctr = snd_hdac_chip_readl(azx_bus(chip), WALFCC);
418
419 /* Read TSC counter */
420 tsc_counter_l = snd_hdac_chip_readl(azx_bus(chip), TSCCL);
421 tsc_counter_h = snd_hdac_chip_readl(azx_bus(chip), TSCCU);
422
423 /* Read Link counter */
424 ll_counter_l = snd_hdac_chip_readl(azx_bus(chip), LLPCL);
425 ll_counter_h = snd_hdac_chip_readl(azx_bus(chip), LLPCU);
426
427 /* Ack: registers read done */
428 snd_hdac_chip_writel(azx_bus(chip), GTSCC, GTSCC_TSCCD_SHIFT);
429
430 tsc_counter = (tsc_counter_h << TSCCU_CCU_SHIFT) |
431 tsc_counter_l;
432
433 ll_counter = (ll_counter_h << LLPC_CCU_SHIFT) | ll_counter_l;
434 wallclk_cycles = wallclk_ctr & WALFCC_CIF_MASK;
435
436 /*
437 * An error occurs near frame "rollover". The clocks in
438 * frame value indicates whether this error may have
439 * occurred. Here we use the value of 10 i.e.,
440 * HDA_MAX_CYCLE_OFFSET
441 */
442 if (wallclk_cycles < HDA_MAX_CYCLE_VALUE - HDA_MAX_CYCLE_OFFSET
443 && wallclk_cycles > HDA_MAX_CYCLE_OFFSET)
444 break;
445
446 /*
447 * Sleep before we read again, else we may again get
448 * value near to MAX_CYCLE. Try to sleep for different
449 * amount of time so we dont hit the same number again
450 */
451 udelay(retry_count++);
452
453 } while (retry_count != HDA_MAX_CYCLE_READ_RETRY);
454
455 if (retry_count == HDA_MAX_CYCLE_READ_RETRY) {
456 dev_err_ratelimited(chip->card->dev,
457 "Error in WALFCC cycle count\n");
458 return -EIO;
459 }
460
461 *device = ns_to_ktime(azx_scale64(ll_counter,
462 NSEC_PER_SEC, runtime->rate));
463 *device = ktime_add_ns(*device, (wallclk_cycles * NSEC_PER_SEC) /
464 ((HDA_MAX_CYCLE_VALUE + 1) * runtime->rate));
465
466 system->cycles = tsc_counter;
467 system->cs_id = CSID_X86_ART;
468
469 return 0;
470 }
471
472 #else
azx_get_sync_time(ktime_t * device,struct system_counterval_t * system,void * ctx)473 static int azx_get_sync_time(ktime_t *device,
474 struct system_counterval_t *system, void *ctx)
475 {
476 return -ENXIO;
477 }
478 #endif
479
azx_get_crosststamp(struct snd_pcm_substream * substream,struct system_device_crosststamp * xtstamp)480 static int azx_get_crosststamp(struct snd_pcm_substream *substream,
481 struct system_device_crosststamp *xtstamp)
482 {
483 return get_device_system_crosststamp(azx_get_sync_time,
484 substream, NULL, xtstamp);
485 }
486
is_link_time_supported(struct snd_pcm_runtime * runtime,struct snd_pcm_audio_tstamp_config * ts)487 static inline bool is_link_time_supported(struct snd_pcm_runtime *runtime,
488 struct snd_pcm_audio_tstamp_config *ts)
489 {
490 if (runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME)
491 if (ts->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED)
492 return true;
493
494 return false;
495 }
496
azx_get_time_info(struct snd_pcm_substream * substream,struct timespec64 * system_ts,struct timespec64 * audio_ts,struct snd_pcm_audio_tstamp_config * audio_tstamp_config,struct snd_pcm_audio_tstamp_report * audio_tstamp_report)497 static int azx_get_time_info(struct snd_pcm_substream *substream,
498 struct timespec64 *system_ts, struct timespec64 *audio_ts,
499 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
500 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
501 {
502 struct azx_dev *azx_dev = get_azx_dev(substream);
503 struct snd_pcm_runtime *runtime = substream->runtime;
504 struct system_device_crosststamp xtstamp;
505 int ret;
506 u64 nsec;
507
508 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
509 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
510
511 snd_pcm_gettime(substream->runtime, system_ts);
512
513 nsec = timecounter_read(&azx_dev->core.tc);
514 if (audio_tstamp_config->report_delay)
515 nsec = azx_adjust_codec_delay(substream, nsec);
516
517 *audio_ts = ns_to_timespec64(nsec);
518
519 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
520 audio_tstamp_report->accuracy_report = 1; /* rest of structure is valid */
521 audio_tstamp_report->accuracy = 42; /* 24 MHz WallClock == 42ns resolution */
522
523 } else if (is_link_time_supported(runtime, audio_tstamp_config)) {
524
525 ret = azx_get_crosststamp(substream, &xtstamp);
526 if (ret)
527 return ret;
528
529 switch (runtime->tstamp_type) {
530 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
531 return -EINVAL;
532
533 case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
534 *system_ts = ktime_to_timespec64(xtstamp.sys_monoraw);
535 break;
536
537 default:
538 *system_ts = ktime_to_timespec64(xtstamp.sys_realtime);
539 break;
540
541 }
542
543 *audio_ts = ktime_to_timespec64(xtstamp.device);
544
545 audio_tstamp_report->actual_type =
546 SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED;
547 audio_tstamp_report->accuracy_report = 1;
548 /* 24 MHz WallClock == 42ns resolution */
549 audio_tstamp_report->accuracy = 42;
550
551 } else {
552 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
553 }
554
555 return 0;
556 }
557
558 static const struct snd_pcm_hardware azx_pcm_hw = {
559 .info = (SNDRV_PCM_INFO_MMAP |
560 SNDRV_PCM_INFO_INTERLEAVED |
561 SNDRV_PCM_INFO_BLOCK_TRANSFER |
562 SNDRV_PCM_INFO_MMAP_VALID |
563 /* No full-resume yet implemented */
564 /* SNDRV_PCM_INFO_RESUME |*/
565 SNDRV_PCM_INFO_PAUSE |
566 SNDRV_PCM_INFO_SYNC_START |
567 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
568 SNDRV_PCM_INFO_HAS_LINK_ATIME |
569 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
570 .formats = SNDRV_PCM_FMTBIT_S16_LE,
571 .rates = SNDRV_PCM_RATE_48000,
572 .rate_min = 48000,
573 .rate_max = 48000,
574 .channels_min = 2,
575 .channels_max = 2,
576 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
577 .period_bytes_min = 128,
578 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
579 .periods_min = 2,
580 .periods_max = AZX_MAX_FRAG,
581 .fifo_size = 0,
582 };
583
azx_pcm_open(struct snd_pcm_substream * substream)584 static int azx_pcm_open(struct snd_pcm_substream *substream)
585 {
586 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
587 struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
588 struct azx *chip = apcm->chip;
589 struct azx_dev *azx_dev;
590 struct snd_pcm_runtime *runtime = substream->runtime;
591 int err;
592 int buff_step;
593
594 snd_hda_codec_pcm_get(apcm->info);
595 mutex_lock(&chip->open_mutex);
596 azx_dev = azx_assign_device(chip, substream);
597 trace_azx_pcm_open(chip, azx_dev);
598 if (azx_dev == NULL) {
599 err = -EBUSY;
600 goto unlock;
601 }
602 runtime->private_data = azx_dev;
603
604 runtime->hw = azx_pcm_hw;
605 if (chip->gts_present)
606 runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
607 runtime->hw.channels_min = hinfo->channels_min;
608 runtime->hw.channels_max = hinfo->channels_max;
609 runtime->hw.formats = hinfo->formats;
610 runtime->hw.rates = hinfo->rates;
611 snd_pcm_limit_hw_rates(runtime);
612 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
613
614 /* avoid wrap-around with wall-clock */
615 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
616 20,
617 178000000);
618
619 if (chip->align_buffer_size)
620 /* constrain buffer sizes to be multiple of 128
621 bytes. This is more efficient in terms of memory
622 access but isn't required by the HDA spec and
623 prevents users from specifying exact period/buffer
624 sizes. For example for 44.1kHz, a period size set
625 to 20ms will be rounded to 19.59ms. */
626 buff_step = 128;
627 else
628 /* Don't enforce steps on buffer sizes, still need to
629 be multiple of 4 bytes (HDA spec). Tested on Intel
630 HDA controllers, may not work on all devices where
631 option needs to be disabled */
632 buff_step = 4;
633
634 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
635 buff_step);
636 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
637 buff_step);
638 snd_hda_power_up(apcm->codec);
639 if (hinfo->ops.open)
640 err = hinfo->ops.open(hinfo, apcm->codec, substream);
641 else
642 err = -ENODEV;
643 if (err < 0) {
644 azx_release_device(azx_dev);
645 goto powerdown;
646 }
647 snd_pcm_limit_hw_rates(runtime);
648 /* sanity check */
649 if (snd_BUG_ON(!runtime->hw.channels_min) ||
650 snd_BUG_ON(!runtime->hw.channels_max) ||
651 snd_BUG_ON(!runtime->hw.formats) ||
652 snd_BUG_ON(!runtime->hw.rates)) {
653 azx_release_device(azx_dev);
654 if (hinfo->ops.close)
655 hinfo->ops.close(hinfo, apcm->codec, substream);
656 err = -EINVAL;
657 goto powerdown;
658 }
659
660 /* disable LINK_ATIME timestamps for capture streams
661 until we figure out how to handle digital inputs */
662 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
663 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
664 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
665 }
666
667 snd_pcm_set_sync(substream);
668 mutex_unlock(&chip->open_mutex);
669 return 0;
670
671 powerdown:
672 snd_hda_power_down(apcm->codec);
673 unlock:
674 mutex_unlock(&chip->open_mutex);
675 snd_hda_codec_pcm_put(apcm->info);
676 return err;
677 }
678
679 static const struct snd_pcm_ops azx_pcm_ops = {
680 .open = azx_pcm_open,
681 .close = azx_pcm_close,
682 .hw_params = azx_pcm_hw_params,
683 .hw_free = azx_pcm_hw_free,
684 .prepare = azx_pcm_prepare,
685 .trigger = azx_pcm_trigger,
686 .pointer = azx_pcm_pointer,
687 .get_time_info = azx_get_time_info,
688 };
689
azx_pcm_free(struct snd_pcm * pcm)690 static void azx_pcm_free(struct snd_pcm *pcm)
691 {
692 struct azx_pcm *apcm = pcm->private_data;
693 if (apcm) {
694 list_del(&apcm->list);
695 apcm->info->pcm = NULL;
696 kfree(apcm);
697 }
698 }
699
700 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
701
snd_hda_attach_pcm_stream(struct hda_bus * _bus,struct hda_codec * codec,struct hda_pcm * cpcm)702 int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
703 struct hda_pcm *cpcm)
704 {
705 struct hdac_bus *bus = &_bus->core;
706 struct azx *chip = bus_to_azx(bus);
707 struct snd_pcm *pcm;
708 struct azx_pcm *apcm;
709 int pcm_dev = cpcm->device;
710 unsigned int size;
711 int s, err;
712 int type = SNDRV_DMA_TYPE_DEV_SG;
713
714 list_for_each_entry(apcm, &chip->pcm_list, list) {
715 if (apcm->pcm->device == pcm_dev) {
716 dev_err(chip->card->dev, "PCM %d already exists\n",
717 pcm_dev);
718 return -EBUSY;
719 }
720 }
721 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
722 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
723 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
724 &pcm);
725 if (err < 0)
726 return err;
727 strscpy(pcm->name, cpcm->name, sizeof(pcm->name));
728 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
729 if (apcm == NULL) {
730 snd_device_free(chip->card, pcm);
731 return -ENOMEM;
732 }
733 apcm->chip = chip;
734 apcm->pcm = pcm;
735 apcm->codec = codec;
736 apcm->info = cpcm;
737 pcm->private_data = apcm;
738 pcm->private_free = azx_pcm_free;
739 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
740 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
741 list_add_tail(&apcm->list, &chip->pcm_list);
742 cpcm->pcm = pcm;
743 for (s = 0; s < 2; s++) {
744 if (cpcm->stream[s].substreams)
745 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
746 }
747 /* buffer pre-allocation */
748 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
749 if (size > MAX_PREALLOC_SIZE)
750 size = MAX_PREALLOC_SIZE;
751 if (chip->uc_buffer)
752 type = SNDRV_DMA_TYPE_DEV_WC_SG;
753 snd_pcm_set_managed_buffer_all(pcm, type, chip->card->dev,
754 size, MAX_PREALLOC_SIZE);
755 return 0;
756 }
757
azx_command_addr(u32 cmd)758 static unsigned int azx_command_addr(u32 cmd)
759 {
760 unsigned int addr = cmd >> 28;
761
762 if (addr >= AZX_MAX_CODECS) {
763 snd_BUG();
764 addr = 0;
765 }
766
767 return addr;
768 }
769
770 /* receive a response */
azx_rirb_get_response(struct hdac_bus * bus,unsigned int addr,unsigned int * res)771 static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
772 unsigned int *res)
773 {
774 struct azx *chip = bus_to_azx(bus);
775 struct hda_bus *hbus = &chip->bus;
776 int err;
777
778 again:
779 err = snd_hdac_bus_get_response(bus, addr, res);
780 if (!err)
781 return 0;
782
783 if (hbus->no_response_fallback)
784 return -EIO;
785
786 if (!bus->polling_mode) {
787 dev_warn(chip->card->dev,
788 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
789 bus->last_cmd[addr]);
790 bus->polling_mode = 1;
791 goto again;
792 }
793
794 if (chip->msi) {
795 dev_warn(chip->card->dev,
796 "No response from codec, disabling MSI: last cmd=0x%08x\n",
797 bus->last_cmd[addr]);
798 if (chip->ops->disable_msi_reset_irq &&
799 chip->ops->disable_msi_reset_irq(chip) < 0)
800 return -EIO;
801 goto again;
802 }
803
804 if (chip->probing) {
805 /* If this critical timeout happens during the codec probing
806 * phase, this is likely an access to a non-existing codec
807 * slot. Better to return an error and reset the system.
808 */
809 return -EIO;
810 }
811
812 /* no fallback mechanism? */
813 if (!chip->fallback_to_single_cmd)
814 return -EIO;
815
816 /* a fatal communication error; need either to reset or to fallback
817 * to the single_cmd mode
818 */
819 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
820 hbus->response_reset = 1;
821 dev_err(chip->card->dev,
822 "No response from codec, resetting bus: last cmd=0x%08x\n",
823 bus->last_cmd[addr]);
824 return -EAGAIN; /* give a chance to retry */
825 }
826
827 dev_err(chip->card->dev,
828 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
829 bus->last_cmd[addr]);
830 chip->single_cmd = 1;
831 hbus->response_reset = 0;
832 snd_hdac_bus_stop_cmd_io(bus);
833 return -EIO;
834 }
835
836 /*
837 * Use the single immediate command instead of CORB/RIRB for simplicity
838 *
839 * Note: according to Intel, this is not preferred use. The command was
840 * intended for the BIOS only, and may get confused with unsolicited
841 * responses. So, we shouldn't use it for normal operation from the
842 * driver.
843 * I left the codes, however, for debugging/testing purposes.
844 */
845
846 /* receive a response */
azx_single_wait_for_response(struct azx * chip,unsigned int addr)847 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
848 {
849 int timeout = 50;
850
851 while (timeout--) {
852 /* check IRV busy bit */
853 if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
854 /* reuse rirb.res as the response return value */
855 azx_bus(chip)->rirb.res[addr] = azx_readl(chip, IR);
856 return 0;
857 }
858 udelay(1);
859 }
860 if (printk_ratelimit())
861 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
862 azx_readw(chip, IRS));
863 azx_bus(chip)->rirb.res[addr] = -1;
864 return -EIO;
865 }
866
867 /* send a command */
azx_single_send_cmd(struct hdac_bus * bus,u32 val)868 static int azx_single_send_cmd(struct hdac_bus *bus, u32 val)
869 {
870 struct azx *chip = bus_to_azx(bus);
871 unsigned int addr = azx_command_addr(val);
872 int timeout = 50;
873
874 bus->last_cmd[azx_command_addr(val)] = val;
875 while (timeout--) {
876 /* check ICB busy bit */
877 if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
878 /* Clear IRV valid bit */
879 azx_writew(chip, IRS, azx_readw(chip, IRS) |
880 AZX_IRS_VALID);
881 azx_writel(chip, IC, val);
882 azx_writew(chip, IRS, azx_readw(chip, IRS) |
883 AZX_IRS_BUSY);
884 return azx_single_wait_for_response(chip, addr);
885 }
886 udelay(1);
887 }
888 if (printk_ratelimit())
889 dev_dbg(chip->card->dev,
890 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
891 azx_readw(chip, IRS), val);
892 return -EIO;
893 }
894
895 /* receive a response */
azx_single_get_response(struct hdac_bus * bus,unsigned int addr,unsigned int * res)896 static int azx_single_get_response(struct hdac_bus *bus, unsigned int addr,
897 unsigned int *res)
898 {
899 if (res)
900 *res = bus->rirb.res[addr];
901 return 0;
902 }
903
904 /*
905 * The below are the main callbacks from hda_codec.
906 *
907 * They are just the skeleton to call sub-callbacks according to the
908 * current setting of chip->single_cmd.
909 */
910
911 /* send a command */
azx_send_cmd(struct hdac_bus * bus,unsigned int val)912 static int azx_send_cmd(struct hdac_bus *bus, unsigned int val)
913 {
914 struct azx *chip = bus_to_azx(bus);
915
916 if (chip->disabled)
917 return 0;
918 if (chip->single_cmd || bus->use_pio_for_commands)
919 return azx_single_send_cmd(bus, val);
920 else
921 return snd_hdac_bus_send_cmd(bus, val);
922 }
923
924 /* get a response */
azx_get_response(struct hdac_bus * bus,unsigned int addr,unsigned int * res)925 static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
926 unsigned int *res)
927 {
928 struct azx *chip = bus_to_azx(bus);
929
930 if (chip->disabled)
931 return 0;
932 if (chip->single_cmd || bus->use_pio_for_commands)
933 return azx_single_get_response(bus, addr, res);
934 else
935 return azx_rirb_get_response(bus, addr, res);
936 }
937
938 static const struct hdac_bus_ops bus_core_ops = {
939 .command = azx_send_cmd,
940 .get_response = azx_get_response,
941 };
942
943 #ifdef CONFIG_SND_HDA_DSP_LOADER
944 /*
945 * DSP loading code (e.g. for CA0132)
946 */
947
948 /* use the first stream for loading DSP */
949 static struct azx_dev *
azx_get_dsp_loader_dev(struct azx * chip)950 azx_get_dsp_loader_dev(struct azx *chip)
951 {
952 struct hdac_bus *bus = azx_bus(chip);
953 struct hdac_stream *s;
954
955 list_for_each_entry(s, &bus->stream_list, list)
956 if (s->index == chip->playback_index_offset)
957 return stream_to_azx_dev(s);
958
959 return NULL;
960 }
961
snd_hda_codec_load_dsp_prepare(struct hda_codec * codec,unsigned int format,unsigned int byte_size,struct snd_dma_buffer * bufp)962 int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
963 unsigned int byte_size,
964 struct snd_dma_buffer *bufp)
965 {
966 struct hdac_bus *bus = &codec->bus->core;
967 struct azx *chip = bus_to_azx(bus);
968 struct azx_dev *azx_dev;
969 struct hdac_stream *hstr;
970 bool saved = false;
971 int err;
972
973 azx_dev = azx_get_dsp_loader_dev(chip);
974 hstr = azx_stream(azx_dev);
975 spin_lock_irq(&bus->reg_lock);
976 if (hstr->opened) {
977 chip->saved_azx_dev = *azx_dev;
978 saved = true;
979 }
980 spin_unlock_irq(&bus->reg_lock);
981
982 err = snd_hdac_dsp_prepare(hstr, format, byte_size, bufp);
983 if (err < 0) {
984 spin_lock_irq(&bus->reg_lock);
985 if (saved)
986 *azx_dev = chip->saved_azx_dev;
987 spin_unlock_irq(&bus->reg_lock);
988 return err;
989 }
990
991 hstr->prepared = 0;
992 return err;
993 }
994 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_prepare);
995
snd_hda_codec_load_dsp_trigger(struct hda_codec * codec,bool start)996 void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start)
997 {
998 struct hdac_bus *bus = &codec->bus->core;
999 struct azx *chip = bus_to_azx(bus);
1000 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1001
1002 snd_hdac_dsp_trigger(azx_stream(azx_dev), start);
1003 }
1004 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_trigger);
1005
snd_hda_codec_load_dsp_cleanup(struct hda_codec * codec,struct snd_dma_buffer * dmab)1006 void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
1007 struct snd_dma_buffer *dmab)
1008 {
1009 struct hdac_bus *bus = &codec->bus->core;
1010 struct azx *chip = bus_to_azx(bus);
1011 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1012 struct hdac_stream *hstr = azx_stream(azx_dev);
1013
1014 if (!dmab->area || !hstr->locked)
1015 return;
1016
1017 snd_hdac_dsp_cleanup(hstr, dmab);
1018 spin_lock_irq(&bus->reg_lock);
1019 if (hstr->opened)
1020 *azx_dev = chip->saved_azx_dev;
1021 hstr->locked = false;
1022 spin_unlock_irq(&bus->reg_lock);
1023 }
1024 EXPORT_SYMBOL_GPL(snd_hda_codec_load_dsp_cleanup);
1025 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1026
1027 /*
1028 * reset and start the controller registers
1029 */
azx_init_chip(struct azx * chip,bool full_reset)1030 void azx_init_chip(struct azx *chip, bool full_reset)
1031 {
1032 if (snd_hdac_bus_init_chip(azx_bus(chip), full_reset)) {
1033 /* correct RINTCNT for CXT */
1034 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1035 azx_writew(chip, RINTCNT, 0xc0);
1036 }
1037 }
1038 EXPORT_SYMBOL_GPL(azx_init_chip);
1039
azx_stop_all_streams(struct azx * chip)1040 void azx_stop_all_streams(struct azx *chip)
1041 {
1042 struct hdac_bus *bus = azx_bus(chip);
1043
1044 snd_hdac_stop_streams(bus);
1045 }
1046 EXPORT_SYMBOL_GPL(azx_stop_all_streams);
1047
azx_stop_chip(struct azx * chip)1048 void azx_stop_chip(struct azx *chip)
1049 {
1050 snd_hdac_bus_stop_chip(azx_bus(chip));
1051 }
1052 EXPORT_SYMBOL_GPL(azx_stop_chip);
1053
1054 /*
1055 * interrupt handler
1056 */
stream_update(struct hdac_bus * bus,struct hdac_stream * s)1057 static void stream_update(struct hdac_bus *bus, struct hdac_stream *s)
1058 {
1059 struct azx *chip = bus_to_azx(bus);
1060 struct azx_dev *azx_dev = stream_to_azx_dev(s);
1061
1062 /* check whether this IRQ is really acceptable */
1063 if (!chip->ops->position_check ||
1064 chip->ops->position_check(chip, azx_dev)) {
1065 spin_unlock(&bus->reg_lock);
1066 snd_pcm_period_elapsed(azx_stream(azx_dev)->substream);
1067 spin_lock(&bus->reg_lock);
1068 }
1069 }
1070
azx_interrupt(int irq,void * dev_id)1071 irqreturn_t azx_interrupt(int irq, void *dev_id)
1072 {
1073 struct azx *chip = dev_id;
1074 struct hdac_bus *bus = azx_bus(chip);
1075 u32 status;
1076 bool active, handled = false;
1077 int repeat = 0; /* count for avoiding endless loop */
1078
1079 if (azx_has_pm_runtime(chip))
1080 if (!pm_runtime_active(chip->card->dev))
1081 return IRQ_NONE;
1082
1083 spin_lock(&bus->reg_lock);
1084
1085 if (chip->disabled)
1086 goto unlock;
1087
1088 do {
1089 status = azx_readl(chip, INTSTS);
1090 if (status == 0 || status == 0xffffffff)
1091 break;
1092
1093 handled = true;
1094 active = false;
1095 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
1096 active = true;
1097
1098 status = azx_readb(chip, RIRBSTS);
1099 if (status & RIRB_INT_MASK) {
1100 /*
1101 * Clearing the interrupt status here ensures that no
1102 * interrupt gets masked after the RIRB wp is read in
1103 * snd_hdac_bus_update_rirb. This avoids a possible
1104 * race condition where codec response in RIRB may
1105 * remain unserviced by IRQ, eventually falling back
1106 * to polling mode in azx_rirb_get_response.
1107 */
1108 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1109 active = true;
1110 if (status & RIRB_INT_RESPONSE) {
1111 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1112 udelay(80);
1113 snd_hdac_bus_update_rirb(bus);
1114 }
1115 }
1116 } while (active && ++repeat < 10);
1117
1118 unlock:
1119 spin_unlock(&bus->reg_lock);
1120
1121 return IRQ_RETVAL(handled);
1122 }
1123 EXPORT_SYMBOL_GPL(azx_interrupt);
1124
1125 /*
1126 * Codec initerface
1127 */
1128
1129 /*
1130 * Probe the given codec address
1131 */
probe_codec(struct azx * chip,int addr)1132 static int probe_codec(struct azx *chip, int addr)
1133 {
1134 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1135 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1136 struct hdac_bus *bus = azx_bus(chip);
1137 int err;
1138 unsigned int res = -1;
1139
1140 mutex_lock(&bus->cmd_mutex);
1141 chip->probing = 1;
1142 azx_send_cmd(bus, cmd);
1143 err = azx_get_response(bus, addr, &res);
1144 chip->probing = 0;
1145 mutex_unlock(&bus->cmd_mutex);
1146 if (err < 0 || res == -1)
1147 return -EIO;
1148 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1149 return 0;
1150 }
1151
snd_hda_bus_reset(struct hda_bus * bus)1152 void snd_hda_bus_reset(struct hda_bus *bus)
1153 {
1154 struct azx *chip = bus_to_azx(&bus->core);
1155
1156 bus->in_reset = 1;
1157 azx_stop_chip(chip);
1158 azx_init_chip(chip, true);
1159 if (bus->core.chip_init)
1160 snd_hda_bus_reset_codecs(bus);
1161 bus->in_reset = 0;
1162 }
1163
1164 /* HD-audio bus initialization */
azx_bus_init(struct azx * chip,const char * model)1165 int azx_bus_init(struct azx *chip, const char *model)
1166 {
1167 struct hda_bus *bus = &chip->bus;
1168 int err;
1169
1170 err = snd_hdac_bus_init(&bus->core, chip->card->dev, &bus_core_ops);
1171 if (err < 0)
1172 return err;
1173
1174 bus->card = chip->card;
1175 mutex_init(&bus->prepare_mutex);
1176 bus->pci = chip->pci;
1177 bus->modelname = model;
1178 bus->mixer_assigned = -1;
1179 bus->core.snoop = azx_snoop(chip);
1180 if (chip->get_position[0] != azx_get_pos_lpib ||
1181 chip->get_position[1] != azx_get_pos_lpib)
1182 bus->core.use_posbuf = true;
1183 bus->core.bdl_pos_adj = chip->bdl_pos_adj;
1184 if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
1185 bus->core.corbrp_self_clear = true;
1186
1187 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
1188 bus->core.align_bdle_4k = true;
1189
1190 if (chip->driver_caps & AZX_DCAPS_PIO_COMMANDS)
1191 bus->core.use_pio_for_commands = true;
1192
1193 /* enable sync_write flag for stable communication as default */
1194 bus->core.sync_write = 1;
1195
1196 return 0;
1197 }
1198 EXPORT_SYMBOL_GPL(azx_bus_init);
1199
1200 /* Probe codecs */
azx_probe_codecs(struct azx * chip,unsigned int max_slots)1201 int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
1202 {
1203 struct hdac_bus *bus = azx_bus(chip);
1204 int c, codecs, err;
1205
1206 codecs = 0;
1207 if (!max_slots)
1208 max_slots = AZX_DEFAULT_CODECS;
1209
1210 /* First try to probe all given codec slots */
1211 for (c = 0; c < max_slots; c++) {
1212 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1213 if (probe_codec(chip, c) < 0) {
1214 /* Some BIOSen give you wrong codec addresses
1215 * that don't exist
1216 */
1217 dev_warn(chip->card->dev,
1218 "Codec #%d probe error; disabling it...\n", c);
1219 bus->codec_mask &= ~(1 << c);
1220 /* no codecs */
1221 if (bus->codec_mask == 0)
1222 break;
1223 /* More badly, accessing to a non-existing
1224 * codec often screws up the controller chip,
1225 * and disturbs the further communications.
1226 * Thus if an error occurs during probing,
1227 * better to reset the controller chip to
1228 * get back to the sanity state.
1229 */
1230 azx_stop_chip(chip);
1231 azx_init_chip(chip, true);
1232 }
1233 }
1234 }
1235
1236 /* Then create codec instances */
1237 for (c = 0; c < max_slots; c++) {
1238 if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1239 struct hda_codec *codec;
1240 err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
1241 if (err < 0)
1242 continue;
1243 codec->jackpoll_interval = chip->jackpoll_interval;
1244 codec->beep_mode = chip->beep_mode;
1245 codec->ctl_dev_id = chip->ctl_dev_id;
1246 codecs++;
1247 }
1248 }
1249 if (!codecs) {
1250 dev_err(chip->card->dev, "no codecs initialized\n");
1251 return -ENXIO;
1252 }
1253 return 0;
1254 }
1255 EXPORT_SYMBOL_GPL(azx_probe_codecs);
1256
1257 /* configure each codec instance */
azx_codec_configure(struct azx * chip)1258 int azx_codec_configure(struct azx *chip)
1259 {
1260 struct hda_codec *codec, *next;
1261 int success = 0;
1262
1263 list_for_each_codec(codec, &chip->bus) {
1264 if (!snd_hda_codec_configure(codec))
1265 success++;
1266 }
1267
1268 if (success) {
1269 /* unregister failed codecs if any codec has been probed */
1270 list_for_each_codec_safe(codec, next, &chip->bus) {
1271 if (!codec->configured) {
1272 codec_err(codec, "Unable to configure, disabling\n");
1273 snd_hdac_device_unregister(&codec->core);
1274 }
1275 }
1276 }
1277
1278 return success ? 0 : -ENODEV;
1279 }
1280 EXPORT_SYMBOL_GPL(azx_codec_configure);
1281
stream_direction(struct azx * chip,unsigned char index)1282 static int stream_direction(struct azx *chip, unsigned char index)
1283 {
1284 if (index >= chip->capture_index_offset &&
1285 index < chip->capture_index_offset + chip->capture_streams)
1286 return SNDRV_PCM_STREAM_CAPTURE;
1287 return SNDRV_PCM_STREAM_PLAYBACK;
1288 }
1289
1290 /* initialize SD streams */
azx_init_streams(struct azx * chip)1291 int azx_init_streams(struct azx *chip)
1292 {
1293 int i;
1294 int stream_tags[2] = { 0, 0 };
1295
1296 /* initialize each stream (aka device)
1297 * assign the starting bdl address to each stream (device)
1298 * and initialize
1299 */
1300 for (i = 0; i < chip->num_streams; i++) {
1301 struct azx_dev *azx_dev = kzalloc(sizeof(*azx_dev), GFP_KERNEL);
1302 int dir, tag;
1303
1304 if (!azx_dev)
1305 return -ENOMEM;
1306
1307 dir = stream_direction(chip, i);
1308 /* stream tag must be unique throughout
1309 * the stream direction group,
1310 * valid values 1...15
1311 * use separate stream tag if the flag
1312 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1313 */
1314 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1315 tag = ++stream_tags[dir];
1316 else
1317 tag = i + 1;
1318 snd_hdac_stream_init(azx_bus(chip), azx_stream(azx_dev),
1319 i, dir, tag);
1320 }
1321
1322 return 0;
1323 }
1324 EXPORT_SYMBOL_GPL(azx_init_streams);
1325
azx_free_streams(struct azx * chip)1326 void azx_free_streams(struct azx *chip)
1327 {
1328 struct hdac_bus *bus = azx_bus(chip);
1329 struct hdac_stream *s;
1330
1331 while (!list_empty(&bus->stream_list)) {
1332 s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
1333 list_del(&s->list);
1334 kfree(stream_to_azx_dev(s));
1335 }
1336 }
1337 EXPORT_SYMBOL_GPL(azx_free_streams);
1338