xref: /linux/sound/core/pcm_lib.c (revision 011be872643446a9b7c4485cfc8b5f50b0f93a13)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Digital Audio (PCM) abstract layer
4  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5  *                   Abramo Bagnara <abramo@alsa-project.org>
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/sched/signal.h>
10 #include <linux/time.h>
11 #include <linux/math64.h>
12 #include <linux/export.h>
13 #include <sound/core.h>
14 #include <sound/control.h>
15 #include <sound/tlv.h>
16 #include <sound/info.h>
17 #include <sound/pcm.h>
18 #include <sound/pcm_params.h>
19 #include <sound/timer.h>
20 
21 #include "pcm_local.h"
22 
23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
24 #define CREATE_TRACE_POINTS
25 #include "pcm_trace.h"
26 #else
27 #define trace_hwptr(substream, pos, in_interrupt)
28 #define trace_xrun(substream)
29 #define trace_hw_ptr_error(substream, reason)
30 #define trace_applptr(substream, prev, curr)
31 #endif
32 
33 static int fill_silence_frames(struct snd_pcm_substream *substream,
34 			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
35 
36 /*
37  * fill ring buffer with silence
38  * runtime->silence_start: starting pointer to silence area
39  * runtime->silence_filled: size filled with silence
40  * runtime->silence_threshold: threshold from application
41  * runtime->silence_size: maximal size from application
42  *
43  * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
44  */
45 void snd_pcm_playback_silence(struct snd_pcm_substream *substream)
46 {
47 	struct snd_pcm_runtime *runtime = substream->runtime;
48 	snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
49 	snd_pcm_sframes_t added, hw_avail, frames;
50 	snd_pcm_uframes_t noise_dist, ofs, transfer;
51 	int err;
52 
53 	added = appl_ptr - runtime->silence_start;
54 	if (added) {
55 		if (added < 0)
56 			added += runtime->boundary;
57 		if (added < runtime->silence_filled)
58 			runtime->silence_filled -= added;
59 		else
60 			runtime->silence_filled = 0;
61 		runtime->silence_start = appl_ptr;
62 	}
63 
64 	// This will "legitimately" turn negative on underrun, and will be mangled
65 	// into a huge number by the boundary crossing handling. The initial state
66 	// might also be not quite sane. The code below MUST account for these cases.
67 	hw_avail = appl_ptr - runtime->status->hw_ptr;
68 	if (hw_avail < 0)
69 		hw_avail += runtime->boundary;
70 
71 	noise_dist = hw_avail + runtime->silence_filled;
72 	if (runtime->silence_size < runtime->boundary) {
73 		frames = runtime->silence_threshold - noise_dist;
74 		if (frames <= 0)
75 			return;
76 		if (frames > runtime->silence_size)
77 			frames = runtime->silence_size;
78 	} else {
79 		frames = runtime->buffer_size - noise_dist;
80 		if (frames <= 0)
81 			return;
82 	}
83 
84 	if (snd_BUG_ON(frames > runtime->buffer_size))
85 		return;
86 	ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size;
87 	do {
88 		transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
89 		err = fill_silence_frames(substream, ofs, transfer);
90 		snd_BUG_ON(err < 0);
91 		runtime->silence_filled += transfer;
92 		frames -= transfer;
93 		ofs = 0;
94 	} while (frames > 0);
95 	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
96 }
97 
98 #ifdef CONFIG_SND_DEBUG
99 void snd_pcm_debug_name(struct snd_pcm_substream *substream,
100 			   char *name, size_t len)
101 {
102 	snprintf(name, len, "pcmC%dD%d%c:%d",
103 		 substream->pcm->card->number,
104 		 substream->pcm->device,
105 		 substream->stream ? 'c' : 'p',
106 		 substream->number);
107 }
108 EXPORT_SYMBOL(snd_pcm_debug_name);
109 #endif
110 
111 #define XRUN_DEBUG_BASIC	(1<<0)
112 #define XRUN_DEBUG_STACK	(1<<1)	/* dump also stack */
113 #define XRUN_DEBUG_JIFFIESCHECK	(1<<2)	/* do jiffies check */
114 
115 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
116 
117 #define xrun_debug(substream, mask) \
118 			((substream)->pstr->xrun_debug & (mask))
119 #else
120 #define xrun_debug(substream, mask)	0
121 #endif
122 
123 #define dump_stack_on_xrun(substream) do {			\
124 		if (xrun_debug(substream, XRUN_DEBUG_STACK))	\
125 			dump_stack();				\
126 	} while (0)
127 
128 /* call with stream lock held */
129 void __snd_pcm_xrun(struct snd_pcm_substream *substream)
130 {
131 	struct snd_pcm_runtime *runtime = substream->runtime;
132 
133 	trace_xrun(substream);
134 	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
135 		struct timespec64 tstamp;
136 
137 		snd_pcm_gettime(runtime, &tstamp);
138 		runtime->status->tstamp.tv_sec = tstamp.tv_sec;
139 		runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
140 	}
141 	snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
142 	if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
143 		char name[16];
144 		snd_pcm_debug_name(substream, name, sizeof(name));
145 		pcm_warn(substream->pcm, "XRUN: %s\n", name);
146 		dump_stack_on_xrun(substream);
147 	}
148 }
149 
150 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
151 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...)	\
152 	do {								\
153 		trace_hw_ptr_error(substream, reason);	\
154 		if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {		\
155 			pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
156 					   (in_interrupt) ? 'Q' : 'P', ##args);	\
157 			dump_stack_on_xrun(substream);			\
158 		}							\
159 	} while (0)
160 
161 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
162 
163 #define hw_ptr_error(substream, fmt, args...) do { } while (0)
164 
165 #endif
166 
167 int snd_pcm_update_state(struct snd_pcm_substream *substream,
168 			 struct snd_pcm_runtime *runtime)
169 {
170 	snd_pcm_uframes_t avail;
171 
172 	avail = snd_pcm_avail(substream);
173 	if (avail > runtime->avail_max)
174 		runtime->avail_max = avail;
175 	if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
176 		if (avail >= runtime->buffer_size) {
177 			snd_pcm_drain_done(substream);
178 			return -EPIPE;
179 		}
180 	} else {
181 		if (avail >= runtime->stop_threshold) {
182 			__snd_pcm_xrun(substream);
183 			return -EPIPE;
184 		}
185 	}
186 	if (runtime->twake) {
187 		if (avail >= runtime->twake)
188 			wake_up(&runtime->tsleep);
189 	} else if (avail >= runtime->control->avail_min)
190 		wake_up(&runtime->sleep);
191 	return 0;
192 }
193 
194 static void update_audio_tstamp(struct snd_pcm_substream *substream,
195 				struct timespec64 *curr_tstamp,
196 				struct timespec64 *audio_tstamp)
197 {
198 	struct snd_pcm_runtime *runtime = substream->runtime;
199 	u64 audio_frames, audio_nsecs;
200 	struct timespec64 driver_tstamp;
201 
202 	if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
203 		return;
204 
205 	if (!(substream->ops->get_time_info) ||
206 		(runtime->audio_tstamp_report.actual_type ==
207 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
208 
209 		/*
210 		 * provide audio timestamp derived from pointer position
211 		 * add delay only if requested
212 		 */
213 
214 		audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
215 
216 		if (runtime->audio_tstamp_config.report_delay) {
217 			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
218 				audio_frames -=  runtime->delay;
219 			else
220 				audio_frames +=  runtime->delay;
221 		}
222 		audio_nsecs = div_u64(audio_frames * 1000000000LL,
223 				runtime->rate);
224 		*audio_tstamp = ns_to_timespec64(audio_nsecs);
225 	}
226 
227 	if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
228 	    runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
229 		runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
230 		runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
231 		runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
232 		runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
233 	}
234 
235 
236 	/*
237 	 * re-take a driver timestamp to let apps detect if the reference tstamp
238 	 * read by low-level hardware was provided with a delay
239 	 */
240 	snd_pcm_gettime(substream->runtime, &driver_tstamp);
241 	runtime->driver_tstamp = driver_tstamp;
242 }
243 
244 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
245 				  unsigned int in_interrupt)
246 {
247 	struct snd_pcm_runtime *runtime = substream->runtime;
248 	snd_pcm_uframes_t pos;
249 	snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
250 	snd_pcm_sframes_t hdelta, delta;
251 	unsigned long jdelta;
252 	unsigned long curr_jiffies;
253 	struct timespec64 curr_tstamp;
254 	struct timespec64 audio_tstamp;
255 	int crossed_boundary = 0;
256 
257 	old_hw_ptr = runtime->status->hw_ptr;
258 
259 	/*
260 	 * group pointer, time and jiffies reads to allow for more
261 	 * accurate correlations/corrections.
262 	 * The values are stored at the end of this routine after
263 	 * corrections for hw_ptr position
264 	 */
265 	pos = substream->ops->pointer(substream);
266 	curr_jiffies = jiffies;
267 	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
268 		if ((substream->ops->get_time_info) &&
269 			(runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
270 			substream->ops->get_time_info(substream, &curr_tstamp,
271 						&audio_tstamp,
272 						&runtime->audio_tstamp_config,
273 						&runtime->audio_tstamp_report);
274 
275 			/* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
276 			if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
277 				snd_pcm_gettime(runtime, &curr_tstamp);
278 		} else
279 			snd_pcm_gettime(runtime, &curr_tstamp);
280 	}
281 
282 	if (pos == SNDRV_PCM_POS_XRUN) {
283 		__snd_pcm_xrun(substream);
284 		return -EPIPE;
285 	}
286 	if (pos >= runtime->buffer_size) {
287 		if (printk_ratelimit()) {
288 			char name[16];
289 			snd_pcm_debug_name(substream, name, sizeof(name));
290 			pcm_err(substream->pcm,
291 				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
292 				name, pos, runtime->buffer_size,
293 				runtime->period_size);
294 		}
295 		pos = 0;
296 	}
297 	pos -= pos % runtime->min_align;
298 	trace_hwptr(substream, pos, in_interrupt);
299 	hw_base = runtime->hw_ptr_base;
300 	new_hw_ptr = hw_base + pos;
301 	if (in_interrupt) {
302 		/* we know that one period was processed */
303 		/* delta = "expected next hw_ptr" for in_interrupt != 0 */
304 		delta = runtime->hw_ptr_interrupt + runtime->period_size;
305 		if (delta > new_hw_ptr) {
306 			/* check for double acknowledged interrupts */
307 			hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
308 			if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
309 				hw_base += runtime->buffer_size;
310 				if (hw_base >= runtime->boundary) {
311 					hw_base = 0;
312 					crossed_boundary++;
313 				}
314 				new_hw_ptr = hw_base + pos;
315 				goto __delta;
316 			}
317 		}
318 	}
319 	/* new_hw_ptr might be lower than old_hw_ptr in case when */
320 	/* pointer crosses the end of the ring buffer */
321 	if (new_hw_ptr < old_hw_ptr) {
322 		hw_base += runtime->buffer_size;
323 		if (hw_base >= runtime->boundary) {
324 			hw_base = 0;
325 			crossed_boundary++;
326 		}
327 		new_hw_ptr = hw_base + pos;
328 	}
329       __delta:
330 	delta = new_hw_ptr - old_hw_ptr;
331 	if (delta < 0)
332 		delta += runtime->boundary;
333 
334 	if (runtime->no_period_wakeup) {
335 		snd_pcm_sframes_t xrun_threshold;
336 		/*
337 		 * Without regular period interrupts, we have to check
338 		 * the elapsed time to detect xruns.
339 		 */
340 		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
341 		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
342 			goto no_delta_check;
343 		hdelta = jdelta - delta * HZ / runtime->rate;
344 		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
345 		while (hdelta > xrun_threshold) {
346 			delta += runtime->buffer_size;
347 			hw_base += runtime->buffer_size;
348 			if (hw_base >= runtime->boundary) {
349 				hw_base = 0;
350 				crossed_boundary++;
351 			}
352 			new_hw_ptr = hw_base + pos;
353 			hdelta -= runtime->hw_ptr_buffer_jiffies;
354 		}
355 		goto no_delta_check;
356 	}
357 
358 	/* something must be really wrong */
359 	if (delta >= runtime->buffer_size + runtime->period_size) {
360 		hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
361 			     "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
362 			     substream->stream, (long)pos,
363 			     (long)new_hw_ptr, (long)old_hw_ptr);
364 		return 0;
365 	}
366 
367 	/* Do jiffies check only in xrun_debug mode */
368 	if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
369 		goto no_jiffies_check;
370 
371 	/* Skip the jiffies check for hardwares with BATCH flag.
372 	 * Such hardware usually just increases the position at each IRQ,
373 	 * thus it can't give any strange position.
374 	 */
375 	if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
376 		goto no_jiffies_check;
377 	hdelta = delta;
378 	if (hdelta < runtime->delay)
379 		goto no_jiffies_check;
380 	hdelta -= runtime->delay;
381 	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
382 	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
383 		delta = jdelta /
384 			(((runtime->period_size * HZ) / runtime->rate)
385 								+ HZ/100);
386 		/* move new_hw_ptr according jiffies not pos variable */
387 		new_hw_ptr = old_hw_ptr;
388 		hw_base = delta;
389 		/* use loop to avoid checks for delta overflows */
390 		/* the delta value is small or zero in most cases */
391 		while (delta > 0) {
392 			new_hw_ptr += runtime->period_size;
393 			if (new_hw_ptr >= runtime->boundary) {
394 				new_hw_ptr -= runtime->boundary;
395 				crossed_boundary--;
396 			}
397 			delta--;
398 		}
399 		/* align hw_base to buffer_size */
400 		hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
401 			     "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
402 			     (long)pos, (long)hdelta,
403 			     (long)runtime->period_size, jdelta,
404 			     ((hdelta * HZ) / runtime->rate), hw_base,
405 			     (unsigned long)old_hw_ptr,
406 			     (unsigned long)new_hw_ptr);
407 		/* reset values to proper state */
408 		delta = 0;
409 		hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
410 	}
411  no_jiffies_check:
412 	if (delta > runtime->period_size + runtime->period_size / 2) {
413 		hw_ptr_error(substream, in_interrupt,
414 			     "Lost interrupts?",
415 			     "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
416 			     substream->stream, (long)delta,
417 			     (long)new_hw_ptr,
418 			     (long)old_hw_ptr);
419 	}
420 
421  no_delta_check:
422 	if (runtime->status->hw_ptr == new_hw_ptr) {
423 		runtime->hw_ptr_jiffies = curr_jiffies;
424 		update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
425 		return 0;
426 	}
427 
428 	if (in_interrupt) {
429 		delta = new_hw_ptr - runtime->hw_ptr_interrupt;
430 		if (delta < 0)
431 			delta += runtime->boundary;
432 		delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
433 		runtime->hw_ptr_interrupt += delta;
434 		if (runtime->hw_ptr_interrupt >= runtime->boundary)
435 			runtime->hw_ptr_interrupt -= runtime->boundary;
436 	}
437 	runtime->hw_ptr_base = hw_base;
438 	runtime->status->hw_ptr = new_hw_ptr;
439 	runtime->hw_ptr_jiffies = curr_jiffies;
440 	if (crossed_boundary) {
441 		snd_BUG_ON(crossed_boundary != 1);
442 		runtime->hw_ptr_wrap += runtime->boundary;
443 	}
444 
445 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
446 	    runtime->silence_size > 0)
447 		snd_pcm_playback_silence(substream);
448 
449 	update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
450 
451 	return snd_pcm_update_state(substream, runtime);
452 }
453 
454 /* CAUTION: call it with irq disabled */
455 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
456 {
457 	return snd_pcm_update_hw_ptr0(substream, 0);
458 }
459 
460 /**
461  * snd_pcm_set_ops - set the PCM operators
462  * @pcm: the pcm instance
463  * @direction: stream direction, SNDRV_PCM_STREAM_XXX
464  * @ops: the operator table
465  *
466  * Sets the given PCM operators to the pcm instance.
467  */
468 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
469 		     const struct snd_pcm_ops *ops)
470 {
471 	struct snd_pcm_str *stream = &pcm->streams[direction];
472 	struct snd_pcm_substream *substream;
473 
474 	for (substream = stream->substream; substream != NULL; substream = substream->next)
475 		substream->ops = ops;
476 }
477 EXPORT_SYMBOL(snd_pcm_set_ops);
478 
479 /**
480  * snd_pcm_set_sync - set the PCM sync id
481  * @substream: the pcm substream
482  *
483  * Sets the PCM sync identifier for the card.
484  */
485 void snd_pcm_set_sync(struct snd_pcm_substream *substream)
486 {
487 	struct snd_pcm_runtime *runtime = substream->runtime;
488 
489 	runtime->sync.id32[0] = substream->pcm->card->number;
490 	runtime->sync.id32[1] = -1;
491 	runtime->sync.id32[2] = -1;
492 	runtime->sync.id32[3] = -1;
493 }
494 EXPORT_SYMBOL(snd_pcm_set_sync);
495 
496 /*
497  *  Standard ioctl routine
498  */
499 
500 static inline unsigned int div32(unsigned int a, unsigned int b,
501 				 unsigned int *r)
502 {
503 	if (b == 0) {
504 		*r = 0;
505 		return UINT_MAX;
506 	}
507 	*r = a % b;
508 	return a / b;
509 }
510 
511 static inline unsigned int div_down(unsigned int a, unsigned int b)
512 {
513 	if (b == 0)
514 		return UINT_MAX;
515 	return a / b;
516 }
517 
518 static inline unsigned int div_up(unsigned int a, unsigned int b)
519 {
520 	unsigned int r;
521 	unsigned int q;
522 	if (b == 0)
523 		return UINT_MAX;
524 	q = div32(a, b, &r);
525 	if (r)
526 		++q;
527 	return q;
528 }
529 
530 static inline unsigned int mul(unsigned int a, unsigned int b)
531 {
532 	if (a == 0)
533 		return 0;
534 	if (div_down(UINT_MAX, a) < b)
535 		return UINT_MAX;
536 	return a * b;
537 }
538 
539 static inline unsigned int muldiv32(unsigned int a, unsigned int b,
540 				    unsigned int c, unsigned int *r)
541 {
542 	u_int64_t n = (u_int64_t) a * b;
543 	if (c == 0) {
544 		*r = 0;
545 		return UINT_MAX;
546 	}
547 	n = div_u64_rem(n, c, r);
548 	if (n >= UINT_MAX) {
549 		*r = 0;
550 		return UINT_MAX;
551 	}
552 	return n;
553 }
554 
555 /**
556  * snd_interval_refine - refine the interval value of configurator
557  * @i: the interval value to refine
558  * @v: the interval value to refer to
559  *
560  * Refines the interval value with the reference value.
561  * The interval is changed to the range satisfying both intervals.
562  * The interval status (min, max, integer, etc.) are evaluated.
563  *
564  * Return: Positive if the value is changed, zero if it's not changed, or a
565  * negative error code.
566  */
567 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
568 {
569 	int changed = 0;
570 	if (snd_BUG_ON(snd_interval_empty(i)))
571 		return -EINVAL;
572 	if (i->min < v->min) {
573 		i->min = v->min;
574 		i->openmin = v->openmin;
575 		changed = 1;
576 	} else if (i->min == v->min && !i->openmin && v->openmin) {
577 		i->openmin = 1;
578 		changed = 1;
579 	}
580 	if (i->max > v->max) {
581 		i->max = v->max;
582 		i->openmax = v->openmax;
583 		changed = 1;
584 	} else if (i->max == v->max && !i->openmax && v->openmax) {
585 		i->openmax = 1;
586 		changed = 1;
587 	}
588 	if (!i->integer && v->integer) {
589 		i->integer = 1;
590 		changed = 1;
591 	}
592 	if (i->integer) {
593 		if (i->openmin) {
594 			i->min++;
595 			i->openmin = 0;
596 		}
597 		if (i->openmax) {
598 			i->max--;
599 			i->openmax = 0;
600 		}
601 	} else if (!i->openmin && !i->openmax && i->min == i->max)
602 		i->integer = 1;
603 	if (snd_interval_checkempty(i)) {
604 		snd_interval_none(i);
605 		return -EINVAL;
606 	}
607 	return changed;
608 }
609 EXPORT_SYMBOL(snd_interval_refine);
610 
611 static int snd_interval_refine_first(struct snd_interval *i)
612 {
613 	const unsigned int last_max = i->max;
614 
615 	if (snd_BUG_ON(snd_interval_empty(i)))
616 		return -EINVAL;
617 	if (snd_interval_single(i))
618 		return 0;
619 	i->max = i->min;
620 	if (i->openmin)
621 		i->max++;
622 	/* only exclude max value if also excluded before refine */
623 	i->openmax = (i->openmax && i->max >= last_max);
624 	return 1;
625 }
626 
627 static int snd_interval_refine_last(struct snd_interval *i)
628 {
629 	const unsigned int last_min = i->min;
630 
631 	if (snd_BUG_ON(snd_interval_empty(i)))
632 		return -EINVAL;
633 	if (snd_interval_single(i))
634 		return 0;
635 	i->min = i->max;
636 	if (i->openmax)
637 		i->min--;
638 	/* only exclude min value if also excluded before refine */
639 	i->openmin = (i->openmin && i->min <= last_min);
640 	return 1;
641 }
642 
643 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
644 {
645 	if (a->empty || b->empty) {
646 		snd_interval_none(c);
647 		return;
648 	}
649 	c->empty = 0;
650 	c->min = mul(a->min, b->min);
651 	c->openmin = (a->openmin || b->openmin);
652 	c->max = mul(a->max,  b->max);
653 	c->openmax = (a->openmax || b->openmax);
654 	c->integer = (a->integer && b->integer);
655 }
656 
657 /**
658  * snd_interval_div - refine the interval value with division
659  * @a: dividend
660  * @b: divisor
661  * @c: quotient
662  *
663  * c = a / b
664  *
665  * Returns non-zero if the value is changed, zero if not changed.
666  */
667 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
668 {
669 	unsigned int r;
670 	if (a->empty || b->empty) {
671 		snd_interval_none(c);
672 		return;
673 	}
674 	c->empty = 0;
675 	c->min = div32(a->min, b->max, &r);
676 	c->openmin = (r || a->openmin || b->openmax);
677 	if (b->min > 0) {
678 		c->max = div32(a->max, b->min, &r);
679 		if (r) {
680 			c->max++;
681 			c->openmax = 1;
682 		} else
683 			c->openmax = (a->openmax || b->openmin);
684 	} else {
685 		c->max = UINT_MAX;
686 		c->openmax = 0;
687 	}
688 	c->integer = 0;
689 }
690 
691 /**
692  * snd_interval_muldivk - refine the interval value
693  * @a: dividend 1
694  * @b: dividend 2
695  * @k: divisor (as integer)
696  * @c: result
697   *
698  * c = a * b / k
699  *
700  * Returns non-zero if the value is changed, zero if not changed.
701  */
702 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
703 		      unsigned int k, struct snd_interval *c)
704 {
705 	unsigned int r;
706 	if (a->empty || b->empty) {
707 		snd_interval_none(c);
708 		return;
709 	}
710 	c->empty = 0;
711 	c->min = muldiv32(a->min, b->min, k, &r);
712 	c->openmin = (r || a->openmin || b->openmin);
713 	c->max = muldiv32(a->max, b->max, k, &r);
714 	if (r) {
715 		c->max++;
716 		c->openmax = 1;
717 	} else
718 		c->openmax = (a->openmax || b->openmax);
719 	c->integer = 0;
720 }
721 
722 /**
723  * snd_interval_mulkdiv - refine the interval value
724  * @a: dividend 1
725  * @k: dividend 2 (as integer)
726  * @b: divisor
727  * @c: result
728  *
729  * c = a * k / b
730  *
731  * Returns non-zero if the value is changed, zero if not changed.
732  */
733 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
734 		      const struct snd_interval *b, struct snd_interval *c)
735 {
736 	unsigned int r;
737 	if (a->empty || b->empty) {
738 		snd_interval_none(c);
739 		return;
740 	}
741 	c->empty = 0;
742 	c->min = muldiv32(a->min, k, b->max, &r);
743 	c->openmin = (r || a->openmin || b->openmax);
744 	if (b->min > 0) {
745 		c->max = muldiv32(a->max, k, b->min, &r);
746 		if (r) {
747 			c->max++;
748 			c->openmax = 1;
749 		} else
750 			c->openmax = (a->openmax || b->openmin);
751 	} else {
752 		c->max = UINT_MAX;
753 		c->openmax = 0;
754 	}
755 	c->integer = 0;
756 }
757 
758 /* ---- */
759 
760 
761 /**
762  * snd_interval_ratnum - refine the interval value
763  * @i: interval to refine
764  * @rats_count: number of ratnum_t
765  * @rats: ratnum_t array
766  * @nump: pointer to store the resultant numerator
767  * @denp: pointer to store the resultant denominator
768  *
769  * Return: Positive if the value is changed, zero if it's not changed, or a
770  * negative error code.
771  */
772 int snd_interval_ratnum(struct snd_interval *i,
773 			unsigned int rats_count, const struct snd_ratnum *rats,
774 			unsigned int *nump, unsigned int *denp)
775 {
776 	unsigned int best_num, best_den;
777 	int best_diff;
778 	unsigned int k;
779 	struct snd_interval t;
780 	int err;
781 	unsigned int result_num, result_den;
782 	int result_diff;
783 
784 	best_num = best_den = best_diff = 0;
785 	for (k = 0; k < rats_count; ++k) {
786 		unsigned int num = rats[k].num;
787 		unsigned int den;
788 		unsigned int q = i->min;
789 		int diff;
790 		if (q == 0)
791 			q = 1;
792 		den = div_up(num, q);
793 		if (den < rats[k].den_min)
794 			continue;
795 		if (den > rats[k].den_max)
796 			den = rats[k].den_max;
797 		else {
798 			unsigned int r;
799 			r = (den - rats[k].den_min) % rats[k].den_step;
800 			if (r != 0)
801 				den -= r;
802 		}
803 		diff = num - q * den;
804 		if (diff < 0)
805 			diff = -diff;
806 		if (best_num == 0 ||
807 		    diff * best_den < best_diff * den) {
808 			best_diff = diff;
809 			best_den = den;
810 			best_num = num;
811 		}
812 	}
813 	if (best_den == 0) {
814 		i->empty = 1;
815 		return -EINVAL;
816 	}
817 	t.min = div_down(best_num, best_den);
818 	t.openmin = !!(best_num % best_den);
819 
820 	result_num = best_num;
821 	result_diff = best_diff;
822 	result_den = best_den;
823 	best_num = best_den = best_diff = 0;
824 	for (k = 0; k < rats_count; ++k) {
825 		unsigned int num = rats[k].num;
826 		unsigned int den;
827 		unsigned int q = i->max;
828 		int diff;
829 		if (q == 0) {
830 			i->empty = 1;
831 			return -EINVAL;
832 		}
833 		den = div_down(num, q);
834 		if (den > rats[k].den_max)
835 			continue;
836 		if (den < rats[k].den_min)
837 			den = rats[k].den_min;
838 		else {
839 			unsigned int r;
840 			r = (den - rats[k].den_min) % rats[k].den_step;
841 			if (r != 0)
842 				den += rats[k].den_step - r;
843 		}
844 		diff = q * den - num;
845 		if (diff < 0)
846 			diff = -diff;
847 		if (best_num == 0 ||
848 		    diff * best_den < best_diff * den) {
849 			best_diff = diff;
850 			best_den = den;
851 			best_num = num;
852 		}
853 	}
854 	if (best_den == 0) {
855 		i->empty = 1;
856 		return -EINVAL;
857 	}
858 	t.max = div_up(best_num, best_den);
859 	t.openmax = !!(best_num % best_den);
860 	t.integer = 0;
861 	err = snd_interval_refine(i, &t);
862 	if (err < 0)
863 		return err;
864 
865 	if (snd_interval_single(i)) {
866 		if (best_diff * result_den < result_diff * best_den) {
867 			result_num = best_num;
868 			result_den = best_den;
869 		}
870 		if (nump)
871 			*nump = result_num;
872 		if (denp)
873 			*denp = result_den;
874 	}
875 	return err;
876 }
877 EXPORT_SYMBOL(snd_interval_ratnum);
878 
879 /**
880  * snd_interval_ratden - refine the interval value
881  * @i: interval to refine
882  * @rats_count: number of struct ratden
883  * @rats: struct ratden array
884  * @nump: pointer to store the resultant numerator
885  * @denp: pointer to store the resultant denominator
886  *
887  * Return: Positive if the value is changed, zero if it's not changed, or a
888  * negative error code.
889  */
890 static int snd_interval_ratden(struct snd_interval *i,
891 			       unsigned int rats_count,
892 			       const struct snd_ratden *rats,
893 			       unsigned int *nump, unsigned int *denp)
894 {
895 	unsigned int best_num, best_diff, best_den;
896 	unsigned int k;
897 	struct snd_interval t;
898 	int err;
899 
900 	best_num = best_den = best_diff = 0;
901 	for (k = 0; k < rats_count; ++k) {
902 		unsigned int num;
903 		unsigned int den = rats[k].den;
904 		unsigned int q = i->min;
905 		int diff;
906 		num = mul(q, den);
907 		if (num > rats[k].num_max)
908 			continue;
909 		if (num < rats[k].num_min)
910 			num = rats[k].num_max;
911 		else {
912 			unsigned int r;
913 			r = (num - rats[k].num_min) % rats[k].num_step;
914 			if (r != 0)
915 				num += rats[k].num_step - r;
916 		}
917 		diff = num - q * den;
918 		if (best_num == 0 ||
919 		    diff * best_den < best_diff * den) {
920 			best_diff = diff;
921 			best_den = den;
922 			best_num = num;
923 		}
924 	}
925 	if (best_den == 0) {
926 		i->empty = 1;
927 		return -EINVAL;
928 	}
929 	t.min = div_down(best_num, best_den);
930 	t.openmin = !!(best_num % best_den);
931 
932 	best_num = best_den = best_diff = 0;
933 	for (k = 0; k < rats_count; ++k) {
934 		unsigned int num;
935 		unsigned int den = rats[k].den;
936 		unsigned int q = i->max;
937 		int diff;
938 		num = mul(q, den);
939 		if (num < rats[k].num_min)
940 			continue;
941 		if (num > rats[k].num_max)
942 			num = rats[k].num_max;
943 		else {
944 			unsigned int r;
945 			r = (num - rats[k].num_min) % rats[k].num_step;
946 			if (r != 0)
947 				num -= r;
948 		}
949 		diff = q * den - num;
950 		if (best_num == 0 ||
951 		    diff * best_den < best_diff * den) {
952 			best_diff = diff;
953 			best_den = den;
954 			best_num = num;
955 		}
956 	}
957 	if (best_den == 0) {
958 		i->empty = 1;
959 		return -EINVAL;
960 	}
961 	t.max = div_up(best_num, best_den);
962 	t.openmax = !!(best_num % best_den);
963 	t.integer = 0;
964 	err = snd_interval_refine(i, &t);
965 	if (err < 0)
966 		return err;
967 
968 	if (snd_interval_single(i)) {
969 		if (nump)
970 			*nump = best_num;
971 		if (denp)
972 			*denp = best_den;
973 	}
974 	return err;
975 }
976 
977 /**
978  * snd_interval_list - refine the interval value from the list
979  * @i: the interval value to refine
980  * @count: the number of elements in the list
981  * @list: the value list
982  * @mask: the bit-mask to evaluate
983  *
984  * Refines the interval value from the list.
985  * When mask is non-zero, only the elements corresponding to bit 1 are
986  * evaluated.
987  *
988  * Return: Positive if the value is changed, zero if it's not changed, or a
989  * negative error code.
990  */
991 int snd_interval_list(struct snd_interval *i, unsigned int count,
992 		      const unsigned int *list, unsigned int mask)
993 {
994         unsigned int k;
995 	struct snd_interval list_range;
996 
997 	if (!count) {
998 		i->empty = 1;
999 		return -EINVAL;
1000 	}
1001 	snd_interval_any(&list_range);
1002 	list_range.min = UINT_MAX;
1003 	list_range.max = 0;
1004         for (k = 0; k < count; k++) {
1005 		if (mask && !(mask & (1 << k)))
1006 			continue;
1007 		if (!snd_interval_test(i, list[k]))
1008 			continue;
1009 		list_range.min = min(list_range.min, list[k]);
1010 		list_range.max = max(list_range.max, list[k]);
1011         }
1012 	return snd_interval_refine(i, &list_range);
1013 }
1014 EXPORT_SYMBOL(snd_interval_list);
1015 
1016 /**
1017  * snd_interval_ranges - refine the interval value from the list of ranges
1018  * @i: the interval value to refine
1019  * @count: the number of elements in the list of ranges
1020  * @ranges: the ranges list
1021  * @mask: the bit-mask to evaluate
1022  *
1023  * Refines the interval value from the list of ranges.
1024  * When mask is non-zero, only the elements corresponding to bit 1 are
1025  * evaluated.
1026  *
1027  * Return: Positive if the value is changed, zero if it's not changed, or a
1028  * negative error code.
1029  */
1030 int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1031 			const struct snd_interval *ranges, unsigned int mask)
1032 {
1033 	unsigned int k;
1034 	struct snd_interval range_union;
1035 	struct snd_interval range;
1036 
1037 	if (!count) {
1038 		snd_interval_none(i);
1039 		return -EINVAL;
1040 	}
1041 	snd_interval_any(&range_union);
1042 	range_union.min = UINT_MAX;
1043 	range_union.max = 0;
1044 	for (k = 0; k < count; k++) {
1045 		if (mask && !(mask & (1 << k)))
1046 			continue;
1047 		snd_interval_copy(&range, &ranges[k]);
1048 		if (snd_interval_refine(&range, i) < 0)
1049 			continue;
1050 		if (snd_interval_empty(&range))
1051 			continue;
1052 
1053 		if (range.min < range_union.min) {
1054 			range_union.min = range.min;
1055 			range_union.openmin = 1;
1056 		}
1057 		if (range.min == range_union.min && !range.openmin)
1058 			range_union.openmin = 0;
1059 		if (range.max > range_union.max) {
1060 			range_union.max = range.max;
1061 			range_union.openmax = 1;
1062 		}
1063 		if (range.max == range_union.max && !range.openmax)
1064 			range_union.openmax = 0;
1065 	}
1066 	return snd_interval_refine(i, &range_union);
1067 }
1068 EXPORT_SYMBOL(snd_interval_ranges);
1069 
1070 static int snd_interval_step(struct snd_interval *i, unsigned int step)
1071 {
1072 	unsigned int n;
1073 	int changed = 0;
1074 	n = i->min % step;
1075 	if (n != 0 || i->openmin) {
1076 		i->min += step - n;
1077 		i->openmin = 0;
1078 		changed = 1;
1079 	}
1080 	n = i->max % step;
1081 	if (n != 0 || i->openmax) {
1082 		i->max -= n;
1083 		i->openmax = 0;
1084 		changed = 1;
1085 	}
1086 	if (snd_interval_checkempty(i)) {
1087 		i->empty = 1;
1088 		return -EINVAL;
1089 	}
1090 	return changed;
1091 }
1092 
1093 /* Info constraints helpers */
1094 
1095 /**
1096  * snd_pcm_hw_rule_add - add the hw-constraint rule
1097  * @runtime: the pcm runtime instance
1098  * @cond: condition bits
1099  * @var: the variable to evaluate
1100  * @func: the evaluation function
1101  * @private: the private data pointer passed to function
1102  * @dep: the dependent variables
1103  *
1104  * Return: Zero if successful, or a negative error code on failure.
1105  */
1106 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1107 			int var,
1108 			snd_pcm_hw_rule_func_t func, void *private,
1109 			int dep, ...)
1110 {
1111 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1112 	struct snd_pcm_hw_rule *c;
1113 	unsigned int k;
1114 	va_list args;
1115 	va_start(args, dep);
1116 	if (constrs->rules_num >= constrs->rules_all) {
1117 		struct snd_pcm_hw_rule *new;
1118 		unsigned int new_rules = constrs->rules_all + 16;
1119 		new = krealloc_array(constrs->rules, new_rules,
1120 				     sizeof(*c), GFP_KERNEL);
1121 		if (!new) {
1122 			va_end(args);
1123 			return -ENOMEM;
1124 		}
1125 		constrs->rules = new;
1126 		constrs->rules_all = new_rules;
1127 	}
1128 	c = &constrs->rules[constrs->rules_num];
1129 	c->cond = cond;
1130 	c->func = func;
1131 	c->var = var;
1132 	c->private = private;
1133 	k = 0;
1134 	while (1) {
1135 		if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1136 			va_end(args);
1137 			return -EINVAL;
1138 		}
1139 		c->deps[k++] = dep;
1140 		if (dep < 0)
1141 			break;
1142 		dep = va_arg(args, int);
1143 	}
1144 	constrs->rules_num++;
1145 	va_end(args);
1146 	return 0;
1147 }
1148 EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1149 
1150 /**
1151  * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1152  * @runtime: PCM runtime instance
1153  * @var: hw_params variable to apply the mask
1154  * @mask: the bitmap mask
1155  *
1156  * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1157  *
1158  * Return: Zero if successful, or a negative error code on failure.
1159  */
1160 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1161 			       u_int32_t mask)
1162 {
1163 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1164 	struct snd_mask *maskp = constrs_mask(constrs, var);
1165 	*maskp->bits &= mask;
1166 	memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1167 	if (*maskp->bits == 0)
1168 		return -EINVAL;
1169 	return 0;
1170 }
1171 
1172 /**
1173  * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1174  * @runtime: PCM runtime instance
1175  * @var: hw_params variable to apply the mask
1176  * @mask: the 64bit bitmap mask
1177  *
1178  * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1179  *
1180  * Return: Zero if successful, or a negative error code on failure.
1181  */
1182 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1183 				 u_int64_t mask)
1184 {
1185 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1186 	struct snd_mask *maskp = constrs_mask(constrs, var);
1187 	maskp->bits[0] &= (u_int32_t)mask;
1188 	maskp->bits[1] &= (u_int32_t)(mask >> 32);
1189 	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1190 	if (! maskp->bits[0] && ! maskp->bits[1])
1191 		return -EINVAL;
1192 	return 0;
1193 }
1194 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1195 
1196 /**
1197  * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1198  * @runtime: PCM runtime instance
1199  * @var: hw_params variable to apply the integer constraint
1200  *
1201  * Apply the constraint of integer to an interval parameter.
1202  *
1203  * Return: Positive if the value is changed, zero if it's not changed, or a
1204  * negative error code.
1205  */
1206 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1207 {
1208 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1209 	return snd_interval_setinteger(constrs_interval(constrs, var));
1210 }
1211 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1212 
1213 /**
1214  * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1215  * @runtime: PCM runtime instance
1216  * @var: hw_params variable to apply the range
1217  * @min: the minimal value
1218  * @max: the maximal value
1219  *
1220  * Apply the min/max range constraint to an interval parameter.
1221  *
1222  * Return: Positive if the value is changed, zero if it's not changed, or a
1223  * negative error code.
1224  */
1225 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1226 				 unsigned int min, unsigned int max)
1227 {
1228 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1229 	struct snd_interval t;
1230 	t.min = min;
1231 	t.max = max;
1232 	t.openmin = t.openmax = 0;
1233 	t.integer = 0;
1234 	return snd_interval_refine(constrs_interval(constrs, var), &t);
1235 }
1236 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1237 
1238 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1239 				struct snd_pcm_hw_rule *rule)
1240 {
1241 	struct snd_pcm_hw_constraint_list *list = rule->private;
1242 	return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1243 }
1244 
1245 
1246 /**
1247  * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1248  * @runtime: PCM runtime instance
1249  * @cond: condition bits
1250  * @var: hw_params variable to apply the list constraint
1251  * @l: list
1252  *
1253  * Apply the list of constraints to an interval parameter.
1254  *
1255  * Return: Zero if successful, or a negative error code on failure.
1256  */
1257 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1258 			       unsigned int cond,
1259 			       snd_pcm_hw_param_t var,
1260 			       const struct snd_pcm_hw_constraint_list *l)
1261 {
1262 	return snd_pcm_hw_rule_add(runtime, cond, var,
1263 				   snd_pcm_hw_rule_list, (void *)l,
1264 				   var, -1);
1265 }
1266 EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1267 
1268 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1269 				  struct snd_pcm_hw_rule *rule)
1270 {
1271 	struct snd_pcm_hw_constraint_ranges *r = rule->private;
1272 	return snd_interval_ranges(hw_param_interval(params, rule->var),
1273 				   r->count, r->ranges, r->mask);
1274 }
1275 
1276 
1277 /**
1278  * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1279  * @runtime: PCM runtime instance
1280  * @cond: condition bits
1281  * @var: hw_params variable to apply the list of range constraints
1282  * @r: ranges
1283  *
1284  * Apply the list of range constraints to an interval parameter.
1285  *
1286  * Return: Zero if successful, or a negative error code on failure.
1287  */
1288 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1289 				 unsigned int cond,
1290 				 snd_pcm_hw_param_t var,
1291 				 const struct snd_pcm_hw_constraint_ranges *r)
1292 {
1293 	return snd_pcm_hw_rule_add(runtime, cond, var,
1294 				   snd_pcm_hw_rule_ranges, (void *)r,
1295 				   var, -1);
1296 }
1297 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1298 
1299 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1300 				   struct snd_pcm_hw_rule *rule)
1301 {
1302 	const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1303 	unsigned int num = 0, den = 0;
1304 	int err;
1305 	err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1306 				  r->nrats, r->rats, &num, &den);
1307 	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1308 		params->rate_num = num;
1309 		params->rate_den = den;
1310 	}
1311 	return err;
1312 }
1313 
1314 /**
1315  * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1316  * @runtime: PCM runtime instance
1317  * @cond: condition bits
1318  * @var: hw_params variable to apply the ratnums constraint
1319  * @r: struct snd_ratnums constriants
1320  *
1321  * Return: Zero if successful, or a negative error code on failure.
1322  */
1323 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1324 				  unsigned int cond,
1325 				  snd_pcm_hw_param_t var,
1326 				  const struct snd_pcm_hw_constraint_ratnums *r)
1327 {
1328 	return snd_pcm_hw_rule_add(runtime, cond, var,
1329 				   snd_pcm_hw_rule_ratnums, (void *)r,
1330 				   var, -1);
1331 }
1332 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1333 
1334 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1335 				   struct snd_pcm_hw_rule *rule)
1336 {
1337 	const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1338 	unsigned int num = 0, den = 0;
1339 	int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1340 				  r->nrats, r->rats, &num, &den);
1341 	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1342 		params->rate_num = num;
1343 		params->rate_den = den;
1344 	}
1345 	return err;
1346 }
1347 
1348 /**
1349  * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1350  * @runtime: PCM runtime instance
1351  * @cond: condition bits
1352  * @var: hw_params variable to apply the ratdens constraint
1353  * @r: struct snd_ratdens constriants
1354  *
1355  * Return: Zero if successful, or a negative error code on failure.
1356  */
1357 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1358 				  unsigned int cond,
1359 				  snd_pcm_hw_param_t var,
1360 				  const struct snd_pcm_hw_constraint_ratdens *r)
1361 {
1362 	return snd_pcm_hw_rule_add(runtime, cond, var,
1363 				   snd_pcm_hw_rule_ratdens, (void *)r,
1364 				   var, -1);
1365 }
1366 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1367 
1368 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1369 				  struct snd_pcm_hw_rule *rule)
1370 {
1371 	unsigned int l = (unsigned long) rule->private;
1372 	int width = l & 0xffff;
1373 	unsigned int msbits = l >> 16;
1374 	const struct snd_interval *i =
1375 		hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1376 
1377 	if (!snd_interval_single(i))
1378 		return 0;
1379 
1380 	if ((snd_interval_value(i) == width) ||
1381 	    (width == 0 && snd_interval_value(i) > msbits))
1382 		params->msbits = min_not_zero(params->msbits, msbits);
1383 
1384 	return 0;
1385 }
1386 
1387 /**
1388  * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1389  * @runtime: PCM runtime instance
1390  * @cond: condition bits
1391  * @width: sample bits width
1392  * @msbits: msbits width
1393  *
1394  * This constraint will set the number of most significant bits (msbits) if a
1395  * sample format with the specified width has been select. If width is set to 0
1396  * the msbits will be set for any sample format with a width larger than the
1397  * specified msbits.
1398  *
1399  * Return: Zero if successful, or a negative error code on failure.
1400  */
1401 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1402 				 unsigned int cond,
1403 				 unsigned int width,
1404 				 unsigned int msbits)
1405 {
1406 	unsigned long l = (msbits << 16) | width;
1407 	return snd_pcm_hw_rule_add(runtime, cond, -1,
1408 				    snd_pcm_hw_rule_msbits,
1409 				    (void*) l,
1410 				    SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1411 }
1412 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1413 
1414 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1415 				struct snd_pcm_hw_rule *rule)
1416 {
1417 	unsigned long step = (unsigned long) rule->private;
1418 	return snd_interval_step(hw_param_interval(params, rule->var), step);
1419 }
1420 
1421 /**
1422  * snd_pcm_hw_constraint_step - add a hw constraint step rule
1423  * @runtime: PCM runtime instance
1424  * @cond: condition bits
1425  * @var: hw_params variable to apply the step constraint
1426  * @step: step size
1427  *
1428  * Return: Zero if successful, or a negative error code on failure.
1429  */
1430 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1431 			       unsigned int cond,
1432 			       snd_pcm_hw_param_t var,
1433 			       unsigned long step)
1434 {
1435 	return snd_pcm_hw_rule_add(runtime, cond, var,
1436 				   snd_pcm_hw_rule_step, (void *) step,
1437 				   var, -1);
1438 }
1439 EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1440 
1441 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1442 {
1443 	static const unsigned int pow2_sizes[] = {
1444 		1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1445 		1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1446 		1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1447 		1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1448 	};
1449 	return snd_interval_list(hw_param_interval(params, rule->var),
1450 				 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1451 }
1452 
1453 /**
1454  * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1455  * @runtime: PCM runtime instance
1456  * @cond: condition bits
1457  * @var: hw_params variable to apply the power-of-2 constraint
1458  *
1459  * Return: Zero if successful, or a negative error code on failure.
1460  */
1461 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1462 			       unsigned int cond,
1463 			       snd_pcm_hw_param_t var)
1464 {
1465 	return snd_pcm_hw_rule_add(runtime, cond, var,
1466 				   snd_pcm_hw_rule_pow2, NULL,
1467 				   var, -1);
1468 }
1469 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1470 
1471 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1472 					   struct snd_pcm_hw_rule *rule)
1473 {
1474 	unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1475 	struct snd_interval *rate;
1476 
1477 	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1478 	return snd_interval_list(rate, 1, &base_rate, 0);
1479 }
1480 
1481 /**
1482  * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1483  * @runtime: PCM runtime instance
1484  * @base_rate: the rate at which the hardware does not resample
1485  *
1486  * Return: Zero if successful, or a negative error code on failure.
1487  */
1488 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1489 			       unsigned int base_rate)
1490 {
1491 	return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1492 				   SNDRV_PCM_HW_PARAM_RATE,
1493 				   snd_pcm_hw_rule_noresample_func,
1494 				   (void *)(uintptr_t)base_rate,
1495 				   SNDRV_PCM_HW_PARAM_RATE, -1);
1496 }
1497 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1498 
1499 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1500 				  snd_pcm_hw_param_t var)
1501 {
1502 	if (hw_is_mask(var)) {
1503 		snd_mask_any(hw_param_mask(params, var));
1504 		params->cmask |= 1 << var;
1505 		params->rmask |= 1 << var;
1506 		return;
1507 	}
1508 	if (hw_is_interval(var)) {
1509 		snd_interval_any(hw_param_interval(params, var));
1510 		params->cmask |= 1 << var;
1511 		params->rmask |= 1 << var;
1512 		return;
1513 	}
1514 	snd_BUG();
1515 }
1516 
1517 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1518 {
1519 	unsigned int k;
1520 	memset(params, 0, sizeof(*params));
1521 	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1522 		_snd_pcm_hw_param_any(params, k);
1523 	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1524 		_snd_pcm_hw_param_any(params, k);
1525 	params->info = ~0U;
1526 }
1527 EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1528 
1529 /**
1530  * snd_pcm_hw_param_value - return @params field @var value
1531  * @params: the hw_params instance
1532  * @var: parameter to retrieve
1533  * @dir: pointer to the direction (-1,0,1) or %NULL
1534  *
1535  * Return: The value for field @var if it's fixed in configuration space
1536  * defined by @params. -%EINVAL otherwise.
1537  */
1538 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1539 			   snd_pcm_hw_param_t var, int *dir)
1540 {
1541 	if (hw_is_mask(var)) {
1542 		const struct snd_mask *mask = hw_param_mask_c(params, var);
1543 		if (!snd_mask_single(mask))
1544 			return -EINVAL;
1545 		if (dir)
1546 			*dir = 0;
1547 		return snd_mask_value(mask);
1548 	}
1549 	if (hw_is_interval(var)) {
1550 		const struct snd_interval *i = hw_param_interval_c(params, var);
1551 		if (!snd_interval_single(i))
1552 			return -EINVAL;
1553 		if (dir)
1554 			*dir = i->openmin;
1555 		return snd_interval_value(i);
1556 	}
1557 	return -EINVAL;
1558 }
1559 EXPORT_SYMBOL(snd_pcm_hw_param_value);
1560 
1561 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1562 				snd_pcm_hw_param_t var)
1563 {
1564 	if (hw_is_mask(var)) {
1565 		snd_mask_none(hw_param_mask(params, var));
1566 		params->cmask |= 1 << var;
1567 		params->rmask |= 1 << var;
1568 	} else if (hw_is_interval(var)) {
1569 		snd_interval_none(hw_param_interval(params, var));
1570 		params->cmask |= 1 << var;
1571 		params->rmask |= 1 << var;
1572 	} else {
1573 		snd_BUG();
1574 	}
1575 }
1576 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1577 
1578 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1579 				   snd_pcm_hw_param_t var)
1580 {
1581 	int changed;
1582 	if (hw_is_mask(var))
1583 		changed = snd_mask_refine_first(hw_param_mask(params, var));
1584 	else if (hw_is_interval(var))
1585 		changed = snd_interval_refine_first(hw_param_interval(params, var));
1586 	else
1587 		return -EINVAL;
1588 	if (changed > 0) {
1589 		params->cmask |= 1 << var;
1590 		params->rmask |= 1 << var;
1591 	}
1592 	return changed;
1593 }
1594 
1595 
1596 /**
1597  * snd_pcm_hw_param_first - refine config space and return minimum value
1598  * @pcm: PCM instance
1599  * @params: the hw_params instance
1600  * @var: parameter to retrieve
1601  * @dir: pointer to the direction (-1,0,1) or %NULL
1602  *
1603  * Inside configuration space defined by @params remove from @var all
1604  * values > minimum. Reduce configuration space accordingly.
1605  *
1606  * Return: The minimum, or a negative error code on failure.
1607  */
1608 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1609 			   struct snd_pcm_hw_params *params,
1610 			   snd_pcm_hw_param_t var, int *dir)
1611 {
1612 	int changed = _snd_pcm_hw_param_first(params, var);
1613 	if (changed < 0)
1614 		return changed;
1615 	if (params->rmask) {
1616 		int err = snd_pcm_hw_refine(pcm, params);
1617 		if (err < 0)
1618 			return err;
1619 	}
1620 	return snd_pcm_hw_param_value(params, var, dir);
1621 }
1622 EXPORT_SYMBOL(snd_pcm_hw_param_first);
1623 
1624 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1625 				  snd_pcm_hw_param_t var)
1626 {
1627 	int changed;
1628 	if (hw_is_mask(var))
1629 		changed = snd_mask_refine_last(hw_param_mask(params, var));
1630 	else if (hw_is_interval(var))
1631 		changed = snd_interval_refine_last(hw_param_interval(params, var));
1632 	else
1633 		return -EINVAL;
1634 	if (changed > 0) {
1635 		params->cmask |= 1 << var;
1636 		params->rmask |= 1 << var;
1637 	}
1638 	return changed;
1639 }
1640 
1641 
1642 /**
1643  * snd_pcm_hw_param_last - refine config space and return maximum value
1644  * @pcm: PCM instance
1645  * @params: the hw_params instance
1646  * @var: parameter to retrieve
1647  * @dir: pointer to the direction (-1,0,1) or %NULL
1648  *
1649  * Inside configuration space defined by @params remove from @var all
1650  * values < maximum. Reduce configuration space accordingly.
1651  *
1652  * Return: The maximum, or a negative error code on failure.
1653  */
1654 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1655 			  struct snd_pcm_hw_params *params,
1656 			  snd_pcm_hw_param_t var, int *dir)
1657 {
1658 	int changed = _snd_pcm_hw_param_last(params, var);
1659 	if (changed < 0)
1660 		return changed;
1661 	if (params->rmask) {
1662 		int err = snd_pcm_hw_refine(pcm, params);
1663 		if (err < 0)
1664 			return err;
1665 	}
1666 	return snd_pcm_hw_param_value(params, var, dir);
1667 }
1668 EXPORT_SYMBOL(snd_pcm_hw_param_last);
1669 
1670 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1671 				   void *arg)
1672 {
1673 	struct snd_pcm_runtime *runtime = substream->runtime;
1674 	unsigned long flags;
1675 	snd_pcm_stream_lock_irqsave(substream, flags);
1676 	if (snd_pcm_running(substream) &&
1677 	    snd_pcm_update_hw_ptr(substream) >= 0)
1678 		runtime->status->hw_ptr %= runtime->buffer_size;
1679 	else {
1680 		runtime->status->hw_ptr = 0;
1681 		runtime->hw_ptr_wrap = 0;
1682 	}
1683 	snd_pcm_stream_unlock_irqrestore(substream, flags);
1684 	return 0;
1685 }
1686 
1687 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1688 					  void *arg)
1689 {
1690 	struct snd_pcm_channel_info *info = arg;
1691 	struct snd_pcm_runtime *runtime = substream->runtime;
1692 	int width;
1693 	if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1694 		info->offset = -1;
1695 		return 0;
1696 	}
1697 	width = snd_pcm_format_physical_width(runtime->format);
1698 	if (width < 0)
1699 		return width;
1700 	info->offset = 0;
1701 	switch (runtime->access) {
1702 	case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1703 	case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1704 		info->first = info->channel * width;
1705 		info->step = runtime->channels * width;
1706 		break;
1707 	case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1708 	case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1709 	{
1710 		size_t size = runtime->dma_bytes / runtime->channels;
1711 		info->first = info->channel * size * 8;
1712 		info->step = width;
1713 		break;
1714 	}
1715 	default:
1716 		snd_BUG();
1717 		break;
1718 	}
1719 	return 0;
1720 }
1721 
1722 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1723 				       void *arg)
1724 {
1725 	struct snd_pcm_hw_params *params = arg;
1726 	snd_pcm_format_t format;
1727 	int channels;
1728 	ssize_t frame_size;
1729 
1730 	params->fifo_size = substream->runtime->hw.fifo_size;
1731 	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1732 		format = params_format(params);
1733 		channels = params_channels(params);
1734 		frame_size = snd_pcm_format_size(format, channels);
1735 		if (frame_size > 0)
1736 			params->fifo_size /= frame_size;
1737 	}
1738 	return 0;
1739 }
1740 
1741 /**
1742  * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1743  * @substream: the pcm substream instance
1744  * @cmd: ioctl command
1745  * @arg: ioctl argument
1746  *
1747  * Processes the generic ioctl commands for PCM.
1748  * Can be passed as the ioctl callback for PCM ops.
1749  *
1750  * Return: Zero if successful, or a negative error code on failure.
1751  */
1752 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1753 		      unsigned int cmd, void *arg)
1754 {
1755 	switch (cmd) {
1756 	case SNDRV_PCM_IOCTL1_RESET:
1757 		return snd_pcm_lib_ioctl_reset(substream, arg);
1758 	case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1759 		return snd_pcm_lib_ioctl_channel_info(substream, arg);
1760 	case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1761 		return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1762 	}
1763 	return -ENXIO;
1764 }
1765 EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1766 
1767 /**
1768  * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1769  *						under acquired lock of PCM substream.
1770  * @substream: the instance of pcm substream.
1771  *
1772  * This function is called when the batch of audio data frames as the same size as the period of
1773  * buffer is already processed in audio data transmission.
1774  *
1775  * The call of function updates the status of runtime with the latest position of audio data
1776  * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1777  * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1778  * substream according to configured threshold.
1779  *
1780  * The function is intended to use for the case that PCM driver operates audio data frames under
1781  * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1782  * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1783  * since lock of PCM substream should be acquired in advance.
1784  *
1785  * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1786  * function:
1787  *
1788  * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1789  * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1790  * - .get_time_info - to retrieve audio time stamp if needed.
1791  *
1792  * Even if more than one periods have elapsed since the last call, you have to call this only once.
1793  */
1794 void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1795 {
1796 	struct snd_pcm_runtime *runtime;
1797 
1798 	if (PCM_RUNTIME_CHECK(substream))
1799 		return;
1800 	runtime = substream->runtime;
1801 
1802 	if (!snd_pcm_running(substream) ||
1803 	    snd_pcm_update_hw_ptr0(substream, 1) < 0)
1804 		goto _end;
1805 
1806 #ifdef CONFIG_SND_PCM_TIMER
1807 	if (substream->timer_running)
1808 		snd_timer_interrupt(substream->timer, 1);
1809 #endif
1810  _end:
1811 	snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1812 }
1813 EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1814 
1815 /**
1816  * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1817  *			      PCM substream.
1818  * @substream: the instance of PCM substream.
1819  *
1820  * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1821  * acquiring lock of PCM substream voluntarily.
1822  *
1823  * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1824  * the batch of audio data frames as the same size as the period of buffer is already processed in
1825  * audio data transmission.
1826  */
1827 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1828 {
1829 	unsigned long flags;
1830 
1831 	if (snd_BUG_ON(!substream))
1832 		return;
1833 
1834 	snd_pcm_stream_lock_irqsave(substream, flags);
1835 	snd_pcm_period_elapsed_under_stream_lock(substream);
1836 	snd_pcm_stream_unlock_irqrestore(substream, flags);
1837 }
1838 EXPORT_SYMBOL(snd_pcm_period_elapsed);
1839 
1840 /*
1841  * Wait until avail_min data becomes available
1842  * Returns a negative error code if any error occurs during operation.
1843  * The available space is stored on availp.  When err = 0 and avail = 0
1844  * on the capture stream, it indicates the stream is in DRAINING state.
1845  */
1846 static int wait_for_avail(struct snd_pcm_substream *substream,
1847 			      snd_pcm_uframes_t *availp)
1848 {
1849 	struct snd_pcm_runtime *runtime = substream->runtime;
1850 	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1851 	wait_queue_entry_t wait;
1852 	int err = 0;
1853 	snd_pcm_uframes_t avail = 0;
1854 	long wait_time, tout;
1855 
1856 	init_waitqueue_entry(&wait, current);
1857 	set_current_state(TASK_INTERRUPTIBLE);
1858 	add_wait_queue(&runtime->tsleep, &wait);
1859 
1860 	if (runtime->no_period_wakeup)
1861 		wait_time = MAX_SCHEDULE_TIMEOUT;
1862 	else {
1863 		/* use wait time from substream if available */
1864 		if (substream->wait_time) {
1865 			wait_time = substream->wait_time;
1866 		} else {
1867 			wait_time = 100;
1868 
1869 			if (runtime->rate) {
1870 				long t = runtime->buffer_size * 1100 / runtime->rate;
1871 				wait_time = max(t, wait_time);
1872 			}
1873 		}
1874 		wait_time = msecs_to_jiffies(wait_time);
1875 	}
1876 
1877 	for (;;) {
1878 		if (signal_pending(current)) {
1879 			err = -ERESTARTSYS;
1880 			break;
1881 		}
1882 
1883 		/*
1884 		 * We need to check if space became available already
1885 		 * (and thus the wakeup happened already) first to close
1886 		 * the race of space already having become available.
1887 		 * This check must happen after been added to the waitqueue
1888 		 * and having current state be INTERRUPTIBLE.
1889 		 */
1890 		avail = snd_pcm_avail(substream);
1891 		if (avail >= runtime->twake)
1892 			break;
1893 		snd_pcm_stream_unlock_irq(substream);
1894 
1895 		tout = schedule_timeout(wait_time);
1896 
1897 		snd_pcm_stream_lock_irq(substream);
1898 		set_current_state(TASK_INTERRUPTIBLE);
1899 		switch (runtime->state) {
1900 		case SNDRV_PCM_STATE_SUSPENDED:
1901 			err = -ESTRPIPE;
1902 			goto _endloop;
1903 		case SNDRV_PCM_STATE_XRUN:
1904 			err = -EPIPE;
1905 			goto _endloop;
1906 		case SNDRV_PCM_STATE_DRAINING:
1907 			if (is_playback)
1908 				err = -EPIPE;
1909 			else
1910 				avail = 0; /* indicate draining */
1911 			goto _endloop;
1912 		case SNDRV_PCM_STATE_OPEN:
1913 		case SNDRV_PCM_STATE_SETUP:
1914 		case SNDRV_PCM_STATE_DISCONNECTED:
1915 			err = -EBADFD;
1916 			goto _endloop;
1917 		case SNDRV_PCM_STATE_PAUSED:
1918 			continue;
1919 		}
1920 		if (!tout) {
1921 			pcm_dbg(substream->pcm,
1922 				"%s timeout (DMA or IRQ trouble?)\n",
1923 				is_playback ? "playback write" : "capture read");
1924 			err = -EIO;
1925 			break;
1926 		}
1927 	}
1928  _endloop:
1929 	set_current_state(TASK_RUNNING);
1930 	remove_wait_queue(&runtime->tsleep, &wait);
1931 	*availp = avail;
1932 	return err;
1933 }
1934 
1935 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1936 			      int channel, unsigned long hwoff,
1937 			      void *buf, unsigned long bytes);
1938 
1939 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1940 			  snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
1941 
1942 /* calculate the target DMA-buffer position to be written/read */
1943 static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1944 			   int channel, unsigned long hwoff)
1945 {
1946 	return runtime->dma_area + hwoff +
1947 		channel * (runtime->dma_bytes / runtime->channels);
1948 }
1949 
1950 /* default copy_user ops for write; used for both interleaved and non- modes */
1951 static int default_write_copy(struct snd_pcm_substream *substream,
1952 			      int channel, unsigned long hwoff,
1953 			      void *buf, unsigned long bytes)
1954 {
1955 	if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
1956 			   (void __user *)buf, bytes))
1957 		return -EFAULT;
1958 	return 0;
1959 }
1960 
1961 /* default copy_kernel ops for write */
1962 static int default_write_copy_kernel(struct snd_pcm_substream *substream,
1963 				     int channel, unsigned long hwoff,
1964 				     void *buf, unsigned long bytes)
1965 {
1966 	memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
1967 	return 0;
1968 }
1969 
1970 /* fill silence instead of copy data; called as a transfer helper
1971  * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
1972  * a NULL buffer is passed
1973  */
1974 static int fill_silence(struct snd_pcm_substream *substream, int channel,
1975 			unsigned long hwoff, void *buf, unsigned long bytes)
1976 {
1977 	struct snd_pcm_runtime *runtime = substream->runtime;
1978 
1979 	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
1980 		return 0;
1981 	if (substream->ops->fill_silence)
1982 		return substream->ops->fill_silence(substream, channel,
1983 						    hwoff, bytes);
1984 
1985 	snd_pcm_format_set_silence(runtime->format,
1986 				   get_dma_ptr(runtime, channel, hwoff),
1987 				   bytes_to_samples(runtime, bytes));
1988 	return 0;
1989 }
1990 
1991 /* default copy_user ops for read; used for both interleaved and non- modes */
1992 static int default_read_copy(struct snd_pcm_substream *substream,
1993 			     int channel, unsigned long hwoff,
1994 			     void *buf, unsigned long bytes)
1995 {
1996 	if (copy_to_user((void __user *)buf,
1997 			 get_dma_ptr(substream->runtime, channel, hwoff),
1998 			 bytes))
1999 		return -EFAULT;
2000 	return 0;
2001 }
2002 
2003 /* default copy_kernel ops for read */
2004 static int default_read_copy_kernel(struct snd_pcm_substream *substream,
2005 				    int channel, unsigned long hwoff,
2006 				    void *buf, unsigned long bytes)
2007 {
2008 	memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
2009 	return 0;
2010 }
2011 
2012 /* call transfer function with the converted pointers and sizes;
2013  * for interleaved mode, it's one shot for all samples
2014  */
2015 static int interleaved_copy(struct snd_pcm_substream *substream,
2016 			    snd_pcm_uframes_t hwoff, void *data,
2017 			    snd_pcm_uframes_t off,
2018 			    snd_pcm_uframes_t frames,
2019 			    pcm_transfer_f transfer)
2020 {
2021 	struct snd_pcm_runtime *runtime = substream->runtime;
2022 
2023 	/* convert to bytes */
2024 	hwoff = frames_to_bytes(runtime, hwoff);
2025 	off = frames_to_bytes(runtime, off);
2026 	frames = frames_to_bytes(runtime, frames);
2027 	return transfer(substream, 0, hwoff, data + off, frames);
2028 }
2029 
2030 /* call transfer function with the converted pointers and sizes for each
2031  * non-interleaved channel; when buffer is NULL, silencing instead of copying
2032  */
2033 static int noninterleaved_copy(struct snd_pcm_substream *substream,
2034 			       snd_pcm_uframes_t hwoff, void *data,
2035 			       snd_pcm_uframes_t off,
2036 			       snd_pcm_uframes_t frames,
2037 			       pcm_transfer_f transfer)
2038 {
2039 	struct snd_pcm_runtime *runtime = substream->runtime;
2040 	int channels = runtime->channels;
2041 	void **bufs = data;
2042 	int c, err;
2043 
2044 	/* convert to bytes; note that it's not frames_to_bytes() here.
2045 	 * in non-interleaved mode, we copy for each channel, thus
2046 	 * each copy is n_samples bytes x channels = whole frames.
2047 	 */
2048 	off = samples_to_bytes(runtime, off);
2049 	frames = samples_to_bytes(runtime, frames);
2050 	hwoff = samples_to_bytes(runtime, hwoff);
2051 	for (c = 0; c < channels; ++c, ++bufs) {
2052 		if (!data || !*bufs)
2053 			err = fill_silence(substream, c, hwoff, NULL, frames);
2054 		else
2055 			err = transfer(substream, c, hwoff, *bufs + off,
2056 				       frames);
2057 		if (err < 0)
2058 			return err;
2059 	}
2060 	return 0;
2061 }
2062 
2063 /* fill silence on the given buffer position;
2064  * called from snd_pcm_playback_silence()
2065  */
2066 static int fill_silence_frames(struct snd_pcm_substream *substream,
2067 			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2068 {
2069 	if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2070 	    substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2071 		return interleaved_copy(substream, off, NULL, 0, frames,
2072 					fill_silence);
2073 	else
2074 		return noninterleaved_copy(substream, off, NULL, 0, frames,
2075 					   fill_silence);
2076 }
2077 
2078 /* sanity-check for read/write methods */
2079 static int pcm_sanity_check(struct snd_pcm_substream *substream)
2080 {
2081 	struct snd_pcm_runtime *runtime;
2082 	if (PCM_RUNTIME_CHECK(substream))
2083 		return -ENXIO;
2084 	runtime = substream->runtime;
2085 	if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
2086 		return -EINVAL;
2087 	if (runtime->state == SNDRV_PCM_STATE_OPEN)
2088 		return -EBADFD;
2089 	return 0;
2090 }
2091 
2092 static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2093 {
2094 	switch (runtime->state) {
2095 	case SNDRV_PCM_STATE_PREPARED:
2096 	case SNDRV_PCM_STATE_RUNNING:
2097 	case SNDRV_PCM_STATE_PAUSED:
2098 		return 0;
2099 	case SNDRV_PCM_STATE_XRUN:
2100 		return -EPIPE;
2101 	case SNDRV_PCM_STATE_SUSPENDED:
2102 		return -ESTRPIPE;
2103 	default:
2104 		return -EBADFD;
2105 	}
2106 }
2107 
2108 /* update to the given appl_ptr and call ack callback if needed;
2109  * when an error is returned, take back to the original value
2110  */
2111 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2112 			   snd_pcm_uframes_t appl_ptr)
2113 {
2114 	struct snd_pcm_runtime *runtime = substream->runtime;
2115 	snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2116 	snd_pcm_sframes_t diff;
2117 	int ret;
2118 
2119 	if (old_appl_ptr == appl_ptr)
2120 		return 0;
2121 
2122 	if (appl_ptr >= runtime->boundary)
2123 		return -EINVAL;
2124 	/*
2125 	 * check if a rewind is requested by the application
2126 	 */
2127 	if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2128 		diff = appl_ptr - old_appl_ptr;
2129 		if (diff >= 0) {
2130 			if (diff > runtime->buffer_size)
2131 				return -EINVAL;
2132 		} else {
2133 			if (runtime->boundary + diff > runtime->buffer_size)
2134 				return -EINVAL;
2135 		}
2136 	}
2137 
2138 	runtime->control->appl_ptr = appl_ptr;
2139 	if (substream->ops->ack) {
2140 		ret = substream->ops->ack(substream);
2141 		if (ret < 0) {
2142 			runtime->control->appl_ptr = old_appl_ptr;
2143 			if (ret == -EPIPE)
2144 				__snd_pcm_xrun(substream);
2145 			return ret;
2146 		}
2147 	}
2148 
2149 	trace_applptr(substream, old_appl_ptr, appl_ptr);
2150 
2151 	return 0;
2152 }
2153 
2154 /* the common loop for read/write data */
2155 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2156 				     void *data, bool interleaved,
2157 				     snd_pcm_uframes_t size, bool in_kernel)
2158 {
2159 	struct snd_pcm_runtime *runtime = substream->runtime;
2160 	snd_pcm_uframes_t xfer = 0;
2161 	snd_pcm_uframes_t offset = 0;
2162 	snd_pcm_uframes_t avail;
2163 	pcm_copy_f writer;
2164 	pcm_transfer_f transfer;
2165 	bool nonblock;
2166 	bool is_playback;
2167 	int err;
2168 
2169 	err = pcm_sanity_check(substream);
2170 	if (err < 0)
2171 		return err;
2172 
2173 	is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2174 	if (interleaved) {
2175 		if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2176 		    runtime->channels > 1)
2177 			return -EINVAL;
2178 		writer = interleaved_copy;
2179 	} else {
2180 		if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2181 			return -EINVAL;
2182 		writer = noninterleaved_copy;
2183 	}
2184 
2185 	if (!data) {
2186 		if (is_playback)
2187 			transfer = fill_silence;
2188 		else
2189 			return -EINVAL;
2190 	} else if (in_kernel) {
2191 		if (substream->ops->copy_kernel)
2192 			transfer = substream->ops->copy_kernel;
2193 		else
2194 			transfer = is_playback ?
2195 				default_write_copy_kernel : default_read_copy_kernel;
2196 	} else {
2197 		if (substream->ops->copy_user)
2198 			transfer = (pcm_transfer_f)substream->ops->copy_user;
2199 		else
2200 			transfer = is_playback ?
2201 				default_write_copy : default_read_copy;
2202 	}
2203 
2204 	if (size == 0)
2205 		return 0;
2206 
2207 	nonblock = !!(substream->f_flags & O_NONBLOCK);
2208 
2209 	snd_pcm_stream_lock_irq(substream);
2210 	err = pcm_accessible_state(runtime);
2211 	if (err < 0)
2212 		goto _end_unlock;
2213 
2214 	runtime->twake = runtime->control->avail_min ? : 1;
2215 	if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2216 		snd_pcm_update_hw_ptr(substream);
2217 
2218 	/*
2219 	 * If size < start_threshold, wait indefinitely. Another
2220 	 * thread may start capture
2221 	 */
2222 	if (!is_playback &&
2223 	    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2224 	    size >= runtime->start_threshold) {
2225 		err = snd_pcm_start(substream);
2226 		if (err < 0)
2227 			goto _end_unlock;
2228 	}
2229 
2230 	avail = snd_pcm_avail(substream);
2231 
2232 	while (size > 0) {
2233 		snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2234 		snd_pcm_uframes_t cont;
2235 		if (!avail) {
2236 			if (!is_playback &&
2237 			    runtime->state == SNDRV_PCM_STATE_DRAINING) {
2238 				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2239 				goto _end_unlock;
2240 			}
2241 			if (nonblock) {
2242 				err = -EAGAIN;
2243 				goto _end_unlock;
2244 			}
2245 			runtime->twake = min_t(snd_pcm_uframes_t, size,
2246 					runtime->control->avail_min ? : 1);
2247 			err = wait_for_avail(substream, &avail);
2248 			if (err < 0)
2249 				goto _end_unlock;
2250 			if (!avail)
2251 				continue; /* draining */
2252 		}
2253 		frames = size > avail ? avail : size;
2254 		appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2255 		appl_ofs = appl_ptr % runtime->buffer_size;
2256 		cont = runtime->buffer_size - appl_ofs;
2257 		if (frames > cont)
2258 			frames = cont;
2259 		if (snd_BUG_ON(!frames)) {
2260 			err = -EINVAL;
2261 			goto _end_unlock;
2262 		}
2263 		if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2264 			err = -EBUSY;
2265 			goto _end_unlock;
2266 		}
2267 		snd_pcm_stream_unlock_irq(substream);
2268 		if (!is_playback)
2269 			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2270 		err = writer(substream, appl_ofs, data, offset, frames,
2271 			     transfer);
2272 		if (is_playback)
2273 			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2274 		snd_pcm_stream_lock_irq(substream);
2275 		atomic_dec(&runtime->buffer_accessing);
2276 		if (err < 0)
2277 			goto _end_unlock;
2278 		err = pcm_accessible_state(runtime);
2279 		if (err < 0)
2280 			goto _end_unlock;
2281 		appl_ptr += frames;
2282 		if (appl_ptr >= runtime->boundary)
2283 			appl_ptr -= runtime->boundary;
2284 		err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2285 		if (err < 0)
2286 			goto _end_unlock;
2287 
2288 		offset += frames;
2289 		size -= frames;
2290 		xfer += frames;
2291 		avail -= frames;
2292 		if (is_playback &&
2293 		    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2294 		    snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2295 			err = snd_pcm_start(substream);
2296 			if (err < 0)
2297 				goto _end_unlock;
2298 		}
2299 	}
2300  _end_unlock:
2301 	runtime->twake = 0;
2302 	if (xfer > 0 && err >= 0)
2303 		snd_pcm_update_state(substream, runtime);
2304 	snd_pcm_stream_unlock_irq(substream);
2305 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2306 }
2307 EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2308 
2309 /*
2310  * standard channel mapping helpers
2311  */
2312 
2313 /* default channel maps for multi-channel playbacks, up to 8 channels */
2314 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2315 	{ .channels = 1,
2316 	  .map = { SNDRV_CHMAP_MONO } },
2317 	{ .channels = 2,
2318 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2319 	{ .channels = 4,
2320 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2321 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2322 	{ .channels = 6,
2323 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2324 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2325 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2326 	{ .channels = 8,
2327 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2328 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2329 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2330 		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2331 	{ }
2332 };
2333 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2334 
2335 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2336 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2337 	{ .channels = 1,
2338 	  .map = { SNDRV_CHMAP_MONO } },
2339 	{ .channels = 2,
2340 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2341 	{ .channels = 4,
2342 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2343 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2344 	{ .channels = 6,
2345 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2346 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2347 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2348 	{ .channels = 8,
2349 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2350 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2351 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2352 		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2353 	{ }
2354 };
2355 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2356 
2357 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2358 {
2359 	if (ch > info->max_channels)
2360 		return false;
2361 	return !info->channel_mask || (info->channel_mask & (1U << ch));
2362 }
2363 
2364 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2365 			      struct snd_ctl_elem_info *uinfo)
2366 {
2367 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2368 
2369 	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2370 	uinfo->count = info->max_channels;
2371 	uinfo->value.integer.min = 0;
2372 	uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2373 	return 0;
2374 }
2375 
2376 /* get callback for channel map ctl element
2377  * stores the channel position firstly matching with the current channels
2378  */
2379 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2380 			     struct snd_ctl_elem_value *ucontrol)
2381 {
2382 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2383 	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2384 	struct snd_pcm_substream *substream;
2385 	const struct snd_pcm_chmap_elem *map;
2386 
2387 	if (!info->chmap)
2388 		return -EINVAL;
2389 	substream = snd_pcm_chmap_substream(info, idx);
2390 	if (!substream)
2391 		return -ENODEV;
2392 	memset(ucontrol->value.integer.value, 0,
2393 	       sizeof(long) * info->max_channels);
2394 	if (!substream->runtime)
2395 		return 0; /* no channels set */
2396 	for (map = info->chmap; map->channels; map++) {
2397 		int i;
2398 		if (map->channels == substream->runtime->channels &&
2399 		    valid_chmap_channels(info, map->channels)) {
2400 			for (i = 0; i < map->channels; i++)
2401 				ucontrol->value.integer.value[i] = map->map[i];
2402 			return 0;
2403 		}
2404 	}
2405 	return -EINVAL;
2406 }
2407 
2408 /* tlv callback for channel map ctl element
2409  * expands the pre-defined channel maps in a form of TLV
2410  */
2411 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2412 			     unsigned int size, unsigned int __user *tlv)
2413 {
2414 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2415 	const struct snd_pcm_chmap_elem *map;
2416 	unsigned int __user *dst;
2417 	int c, count = 0;
2418 
2419 	if (!info->chmap)
2420 		return -EINVAL;
2421 	if (size < 8)
2422 		return -ENOMEM;
2423 	if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2424 		return -EFAULT;
2425 	size -= 8;
2426 	dst = tlv + 2;
2427 	for (map = info->chmap; map->channels; map++) {
2428 		int chs_bytes = map->channels * 4;
2429 		if (!valid_chmap_channels(info, map->channels))
2430 			continue;
2431 		if (size < 8)
2432 			return -ENOMEM;
2433 		if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2434 		    put_user(chs_bytes, dst + 1))
2435 			return -EFAULT;
2436 		dst += 2;
2437 		size -= 8;
2438 		count += 8;
2439 		if (size < chs_bytes)
2440 			return -ENOMEM;
2441 		size -= chs_bytes;
2442 		count += chs_bytes;
2443 		for (c = 0; c < map->channels; c++) {
2444 			if (put_user(map->map[c], dst))
2445 				return -EFAULT;
2446 			dst++;
2447 		}
2448 	}
2449 	if (put_user(count, tlv + 1))
2450 		return -EFAULT;
2451 	return 0;
2452 }
2453 
2454 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2455 {
2456 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2457 	info->pcm->streams[info->stream].chmap_kctl = NULL;
2458 	kfree(info);
2459 }
2460 
2461 /**
2462  * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2463  * @pcm: the assigned PCM instance
2464  * @stream: stream direction
2465  * @chmap: channel map elements (for query)
2466  * @max_channels: the max number of channels for the stream
2467  * @private_value: the value passed to each kcontrol's private_value field
2468  * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2469  *
2470  * Create channel-mapping control elements assigned to the given PCM stream(s).
2471  * Return: Zero if successful, or a negative error value.
2472  */
2473 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2474 			   const struct snd_pcm_chmap_elem *chmap,
2475 			   int max_channels,
2476 			   unsigned long private_value,
2477 			   struct snd_pcm_chmap **info_ret)
2478 {
2479 	struct snd_pcm_chmap *info;
2480 	struct snd_kcontrol_new knew = {
2481 		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
2482 		.access = SNDRV_CTL_ELEM_ACCESS_READ |
2483 			SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2484 			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2485 		.info = pcm_chmap_ctl_info,
2486 		.get = pcm_chmap_ctl_get,
2487 		.tlv.c = pcm_chmap_ctl_tlv,
2488 	};
2489 	int err;
2490 
2491 	if (WARN_ON(pcm->streams[stream].chmap_kctl))
2492 		return -EBUSY;
2493 	info = kzalloc(sizeof(*info), GFP_KERNEL);
2494 	if (!info)
2495 		return -ENOMEM;
2496 	info->pcm = pcm;
2497 	info->stream = stream;
2498 	info->chmap = chmap;
2499 	info->max_channels = max_channels;
2500 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2501 		knew.name = "Playback Channel Map";
2502 	else
2503 		knew.name = "Capture Channel Map";
2504 	knew.device = pcm->device;
2505 	knew.count = pcm->streams[stream].substream_count;
2506 	knew.private_value = private_value;
2507 	info->kctl = snd_ctl_new1(&knew, info);
2508 	if (!info->kctl) {
2509 		kfree(info);
2510 		return -ENOMEM;
2511 	}
2512 	info->kctl->private_free = pcm_chmap_ctl_private_free;
2513 	err = snd_ctl_add(pcm->card, info->kctl);
2514 	if (err < 0)
2515 		return err;
2516 	pcm->streams[stream].chmap_kctl = info->kctl;
2517 	if (info_ret)
2518 		*info_ret = info;
2519 	return 0;
2520 }
2521 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
2522