xref: /linux/sound/core/pcm_lib.c (revision 955abe0a1b41de5ba61fe4cd614ebc123084d499)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Digital Audio (PCM) abstract layer
4  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5  *                   Abramo Bagnara <abramo@alsa-project.org>
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/sched/signal.h>
10 #include <linux/time.h>
11 #include <linux/math64.h>
12 #include <linux/export.h>
13 #include <sound/core.h>
14 #include <sound/control.h>
15 #include <sound/tlv.h>
16 #include <sound/info.h>
17 #include <sound/pcm.h>
18 #include <sound/pcm_params.h>
19 #include <sound/timer.h>
20 
21 #include "pcm_local.h"
22 
23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
24 #define CREATE_TRACE_POINTS
25 #include "pcm_trace.h"
26 #else
27 #define trace_hwptr(substream, pos, in_interrupt)
28 #define trace_xrun(substream)
29 #define trace_hw_ptr_error(substream, reason)
30 #define trace_applptr(substream, prev, curr)
31 #endif
32 
33 static int fill_silence_frames(struct snd_pcm_substream *substream,
34 			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
35 
36 
37 static inline void update_silence_vars(struct snd_pcm_runtime *runtime,
38 				       snd_pcm_uframes_t ptr,
39 				       snd_pcm_uframes_t new_ptr)
40 {
41 	snd_pcm_sframes_t delta;
42 
43 	delta = new_ptr - ptr;
44 	if (delta == 0)
45 		return;
46 	if (delta < 0)
47 		delta += runtime->boundary;
48 	if ((snd_pcm_uframes_t)delta < runtime->silence_filled)
49 		runtime->silence_filled -= delta;
50 	else
51 		runtime->silence_filled = 0;
52 	runtime->silence_start = new_ptr;
53 }
54 
55 /*
56  * fill ring buffer with silence
57  * runtime->silence_start: starting pointer to silence area
58  * runtime->silence_filled: size filled with silence
59  * runtime->silence_threshold: threshold from application
60  * runtime->silence_size: maximal size from application
61  *
62  * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
63  */
64 void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
65 {
66 	struct snd_pcm_runtime *runtime = substream->runtime;
67 	snd_pcm_uframes_t frames, ofs, transfer;
68 	int err;
69 
70 	if (runtime->silence_size < runtime->boundary) {
71 		snd_pcm_sframes_t noise_dist;
72 		snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
73 		update_silence_vars(runtime, runtime->silence_start, appl_ptr);
74 		/* initialization outside pointer updates */
75 		if (new_hw_ptr == ULONG_MAX)
76 			new_hw_ptr = runtime->status->hw_ptr;
77 		/* get hw_avail with the boundary crossing */
78 		noise_dist = appl_ptr - new_hw_ptr;
79 		if (noise_dist < 0)
80 			noise_dist += runtime->boundary;
81 		/* total noise distance */
82 		noise_dist += runtime->silence_filled;
83 		if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
84 			return;
85 		frames = runtime->silence_threshold - noise_dist;
86 		if (frames > runtime->silence_size)
87 			frames = runtime->silence_size;
88 	} else {
89 		/*
90 		 * This filling mode aims at free-running mode (used for example by dmix),
91 		 * which doesn't update the application pointer.
92 		 */
93 		snd_pcm_uframes_t hw_ptr = runtime->status->hw_ptr;
94 		if (new_hw_ptr == ULONG_MAX) {
95 			/*
96 			 * Initialization, fill the whole unused buffer with silence.
97 			 *
98 			 * Usually, this is entered while stopped, before data is queued,
99 			 * so both pointers are expected to be zero.
100 			 */
101 			snd_pcm_sframes_t avail = runtime->control->appl_ptr - hw_ptr;
102 			if (avail < 0)
103 				avail += runtime->boundary;
104 			/*
105 			 * In free-running mode, appl_ptr will be zero even while running,
106 			 * so we end up with a huge number. There is no useful way to
107 			 * handle this, so we just clear the whole buffer.
108 			 */
109 			runtime->silence_filled = avail > runtime->buffer_size ? 0 : avail;
110 			runtime->silence_start = hw_ptr;
111 		} else {
112 			/* Silence the just played area immediately */
113 			update_silence_vars(runtime, hw_ptr, new_hw_ptr);
114 		}
115 		/*
116 		 * In this mode, silence_filled actually includes the valid
117 		 * sample data from the user.
118 		 */
119 		frames = runtime->buffer_size - runtime->silence_filled;
120 	}
121 	if (snd_BUG_ON(frames > runtime->buffer_size))
122 		return;
123 	if (frames == 0)
124 		return;
125 	ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size;
126 	do {
127 		transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
128 		err = fill_silence_frames(substream, ofs, transfer);
129 		snd_BUG_ON(err < 0);
130 		runtime->silence_filled += transfer;
131 		frames -= transfer;
132 		ofs = 0;
133 	} while (frames > 0);
134 	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
135 }
136 
137 #ifdef CONFIG_SND_DEBUG
138 void snd_pcm_debug_name(struct snd_pcm_substream *substream,
139 			   char *name, size_t len)
140 {
141 	snprintf(name, len, "pcmC%dD%d%c:%d",
142 		 substream->pcm->card->number,
143 		 substream->pcm->device,
144 		 substream->stream ? 'c' : 'p',
145 		 substream->number);
146 }
147 EXPORT_SYMBOL(snd_pcm_debug_name);
148 #endif
149 
150 #define XRUN_DEBUG_BASIC	(1<<0)
151 #define XRUN_DEBUG_STACK	(1<<1)	/* dump also stack */
152 #define XRUN_DEBUG_JIFFIESCHECK	(1<<2)	/* do jiffies check */
153 
154 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
155 
156 #define xrun_debug(substream, mask) \
157 			((substream)->pstr->xrun_debug & (mask))
158 #else
159 #define xrun_debug(substream, mask)	0
160 #endif
161 
162 #define dump_stack_on_xrun(substream) do {			\
163 		if (xrun_debug(substream, XRUN_DEBUG_STACK))	\
164 			dump_stack();				\
165 	} while (0)
166 
167 /* call with stream lock held */
168 void __snd_pcm_xrun(struct snd_pcm_substream *substream)
169 {
170 	struct snd_pcm_runtime *runtime = substream->runtime;
171 
172 	trace_xrun(substream);
173 	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
174 		struct timespec64 tstamp;
175 
176 		snd_pcm_gettime(runtime, &tstamp);
177 		runtime->status->tstamp.tv_sec = tstamp.tv_sec;
178 		runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
179 	}
180 	snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
181 	if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
182 		char name[16];
183 		snd_pcm_debug_name(substream, name, sizeof(name));
184 		pcm_warn(substream->pcm, "XRUN: %s\n", name);
185 		dump_stack_on_xrun(substream);
186 	}
187 }
188 
189 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
190 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...)	\
191 	do {								\
192 		trace_hw_ptr_error(substream, reason);	\
193 		if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {		\
194 			pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
195 					   (in_interrupt) ? 'Q' : 'P', ##args);	\
196 			dump_stack_on_xrun(substream);			\
197 		}							\
198 	} while (0)
199 
200 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
201 
202 #define hw_ptr_error(substream, fmt, args...) do { } while (0)
203 
204 #endif
205 
206 int snd_pcm_update_state(struct snd_pcm_substream *substream,
207 			 struct snd_pcm_runtime *runtime)
208 {
209 	snd_pcm_uframes_t avail;
210 
211 	avail = snd_pcm_avail(substream);
212 	if (avail > runtime->avail_max)
213 		runtime->avail_max = avail;
214 	if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
215 		if (avail >= runtime->buffer_size) {
216 			snd_pcm_drain_done(substream);
217 			return -EPIPE;
218 		}
219 	} else {
220 		if (avail >= runtime->stop_threshold) {
221 			__snd_pcm_xrun(substream);
222 			return -EPIPE;
223 		}
224 	}
225 	if (runtime->twake) {
226 		if (avail >= runtime->twake)
227 			wake_up(&runtime->tsleep);
228 	} else if (avail >= runtime->control->avail_min)
229 		wake_up(&runtime->sleep);
230 	return 0;
231 }
232 
233 static void update_audio_tstamp(struct snd_pcm_substream *substream,
234 				struct timespec64 *curr_tstamp,
235 				struct timespec64 *audio_tstamp)
236 {
237 	struct snd_pcm_runtime *runtime = substream->runtime;
238 	u64 audio_frames, audio_nsecs;
239 	struct timespec64 driver_tstamp;
240 
241 	if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
242 		return;
243 
244 	if (!(substream->ops->get_time_info) ||
245 		(runtime->audio_tstamp_report.actual_type ==
246 			SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
247 
248 		/*
249 		 * provide audio timestamp derived from pointer position
250 		 * add delay only if requested
251 		 */
252 
253 		audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
254 
255 		if (runtime->audio_tstamp_config.report_delay) {
256 			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
257 				audio_frames -=  runtime->delay;
258 			else
259 				audio_frames +=  runtime->delay;
260 		}
261 		audio_nsecs = div_u64(audio_frames * 1000000000LL,
262 				runtime->rate);
263 		*audio_tstamp = ns_to_timespec64(audio_nsecs);
264 	}
265 
266 	if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
267 	    runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
268 		runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
269 		runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
270 		runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
271 		runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
272 	}
273 
274 
275 	/*
276 	 * re-take a driver timestamp to let apps detect if the reference tstamp
277 	 * read by low-level hardware was provided with a delay
278 	 */
279 	snd_pcm_gettime(substream->runtime, &driver_tstamp);
280 	runtime->driver_tstamp = driver_tstamp;
281 }
282 
283 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
284 				  unsigned int in_interrupt)
285 {
286 	struct snd_pcm_runtime *runtime = substream->runtime;
287 	snd_pcm_uframes_t pos;
288 	snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
289 	snd_pcm_sframes_t hdelta, delta;
290 	unsigned long jdelta;
291 	unsigned long curr_jiffies;
292 	struct timespec64 curr_tstamp;
293 	struct timespec64 audio_tstamp;
294 	int crossed_boundary = 0;
295 
296 	old_hw_ptr = runtime->status->hw_ptr;
297 
298 	/*
299 	 * group pointer, time and jiffies reads to allow for more
300 	 * accurate correlations/corrections.
301 	 * The values are stored at the end of this routine after
302 	 * corrections for hw_ptr position
303 	 */
304 	pos = substream->ops->pointer(substream);
305 	curr_jiffies = jiffies;
306 	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
307 		if ((substream->ops->get_time_info) &&
308 			(runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
309 			substream->ops->get_time_info(substream, &curr_tstamp,
310 						&audio_tstamp,
311 						&runtime->audio_tstamp_config,
312 						&runtime->audio_tstamp_report);
313 
314 			/* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
315 			if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
316 				snd_pcm_gettime(runtime, &curr_tstamp);
317 		} else
318 			snd_pcm_gettime(runtime, &curr_tstamp);
319 	}
320 
321 	if (pos == SNDRV_PCM_POS_XRUN) {
322 		__snd_pcm_xrun(substream);
323 		return -EPIPE;
324 	}
325 	if (pos >= runtime->buffer_size) {
326 		if (printk_ratelimit()) {
327 			char name[16];
328 			snd_pcm_debug_name(substream, name, sizeof(name));
329 			pcm_err(substream->pcm,
330 				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
331 				name, pos, runtime->buffer_size,
332 				runtime->period_size);
333 		}
334 		pos = 0;
335 	}
336 	pos -= pos % runtime->min_align;
337 	trace_hwptr(substream, pos, in_interrupt);
338 	hw_base = runtime->hw_ptr_base;
339 	new_hw_ptr = hw_base + pos;
340 	if (in_interrupt) {
341 		/* we know that one period was processed */
342 		/* delta = "expected next hw_ptr" for in_interrupt != 0 */
343 		delta = runtime->hw_ptr_interrupt + runtime->period_size;
344 		if (delta > new_hw_ptr) {
345 			/* check for double acknowledged interrupts */
346 			hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
347 			if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
348 				hw_base += runtime->buffer_size;
349 				if (hw_base >= runtime->boundary) {
350 					hw_base = 0;
351 					crossed_boundary++;
352 				}
353 				new_hw_ptr = hw_base + pos;
354 				goto __delta;
355 			}
356 		}
357 	}
358 	/* new_hw_ptr might be lower than old_hw_ptr in case when */
359 	/* pointer crosses the end of the ring buffer */
360 	if (new_hw_ptr < old_hw_ptr) {
361 		hw_base += runtime->buffer_size;
362 		if (hw_base >= runtime->boundary) {
363 			hw_base = 0;
364 			crossed_boundary++;
365 		}
366 		new_hw_ptr = hw_base + pos;
367 	}
368       __delta:
369 	delta = new_hw_ptr - old_hw_ptr;
370 	if (delta < 0)
371 		delta += runtime->boundary;
372 
373 	if (runtime->no_period_wakeup) {
374 		snd_pcm_sframes_t xrun_threshold;
375 		/*
376 		 * Without regular period interrupts, we have to check
377 		 * the elapsed time to detect xruns.
378 		 */
379 		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
380 		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
381 			goto no_delta_check;
382 		hdelta = jdelta - delta * HZ / runtime->rate;
383 		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
384 		while (hdelta > xrun_threshold) {
385 			delta += runtime->buffer_size;
386 			hw_base += runtime->buffer_size;
387 			if (hw_base >= runtime->boundary) {
388 				hw_base = 0;
389 				crossed_boundary++;
390 			}
391 			new_hw_ptr = hw_base + pos;
392 			hdelta -= runtime->hw_ptr_buffer_jiffies;
393 		}
394 		goto no_delta_check;
395 	}
396 
397 	/* something must be really wrong */
398 	if (delta >= runtime->buffer_size + runtime->period_size) {
399 		hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
400 			     "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
401 			     substream->stream, (long)pos,
402 			     (long)new_hw_ptr, (long)old_hw_ptr);
403 		return 0;
404 	}
405 
406 	/* Do jiffies check only in xrun_debug mode */
407 	if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
408 		goto no_jiffies_check;
409 
410 	/* Skip the jiffies check for hardwares with BATCH flag.
411 	 * Such hardware usually just increases the position at each IRQ,
412 	 * thus it can't give any strange position.
413 	 */
414 	if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
415 		goto no_jiffies_check;
416 	hdelta = delta;
417 	if (hdelta < runtime->delay)
418 		goto no_jiffies_check;
419 	hdelta -= runtime->delay;
420 	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
421 	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
422 		delta = jdelta /
423 			(((runtime->period_size * HZ) / runtime->rate)
424 								+ HZ/100);
425 		/* move new_hw_ptr according jiffies not pos variable */
426 		new_hw_ptr = old_hw_ptr;
427 		hw_base = delta;
428 		/* use loop to avoid checks for delta overflows */
429 		/* the delta value is small or zero in most cases */
430 		while (delta > 0) {
431 			new_hw_ptr += runtime->period_size;
432 			if (new_hw_ptr >= runtime->boundary) {
433 				new_hw_ptr -= runtime->boundary;
434 				crossed_boundary--;
435 			}
436 			delta--;
437 		}
438 		/* align hw_base to buffer_size */
439 		hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
440 			     "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
441 			     (long)pos, (long)hdelta,
442 			     (long)runtime->period_size, jdelta,
443 			     ((hdelta * HZ) / runtime->rate), hw_base,
444 			     (unsigned long)old_hw_ptr,
445 			     (unsigned long)new_hw_ptr);
446 		/* reset values to proper state */
447 		delta = 0;
448 		hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
449 	}
450  no_jiffies_check:
451 	if (delta > runtime->period_size + runtime->period_size / 2) {
452 		hw_ptr_error(substream, in_interrupt,
453 			     "Lost interrupts?",
454 			     "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
455 			     substream->stream, (long)delta,
456 			     (long)new_hw_ptr,
457 			     (long)old_hw_ptr);
458 	}
459 
460  no_delta_check:
461 	if (runtime->status->hw_ptr == new_hw_ptr) {
462 		runtime->hw_ptr_jiffies = curr_jiffies;
463 		update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
464 		return 0;
465 	}
466 
467 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
468 	    runtime->silence_size > 0)
469 		snd_pcm_playback_silence(substream, new_hw_ptr);
470 
471 	if (in_interrupt) {
472 		delta = new_hw_ptr - runtime->hw_ptr_interrupt;
473 		if (delta < 0)
474 			delta += runtime->boundary;
475 		delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
476 		runtime->hw_ptr_interrupt += delta;
477 		if (runtime->hw_ptr_interrupt >= runtime->boundary)
478 			runtime->hw_ptr_interrupt -= runtime->boundary;
479 	}
480 	runtime->hw_ptr_base = hw_base;
481 	runtime->status->hw_ptr = new_hw_ptr;
482 	runtime->hw_ptr_jiffies = curr_jiffies;
483 	if (crossed_boundary) {
484 		snd_BUG_ON(crossed_boundary != 1);
485 		runtime->hw_ptr_wrap += runtime->boundary;
486 	}
487 
488 	update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
489 
490 	return snd_pcm_update_state(substream, runtime);
491 }
492 
493 /* CAUTION: call it with irq disabled */
494 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
495 {
496 	return snd_pcm_update_hw_ptr0(substream, 0);
497 }
498 
499 /**
500  * snd_pcm_set_ops - set the PCM operators
501  * @pcm: the pcm instance
502  * @direction: stream direction, SNDRV_PCM_STREAM_XXX
503  * @ops: the operator table
504  *
505  * Sets the given PCM operators to the pcm instance.
506  */
507 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
508 		     const struct snd_pcm_ops *ops)
509 {
510 	struct snd_pcm_str *stream = &pcm->streams[direction];
511 	struct snd_pcm_substream *substream;
512 
513 	for (substream = stream->substream; substream != NULL; substream = substream->next)
514 		substream->ops = ops;
515 }
516 EXPORT_SYMBOL(snd_pcm_set_ops);
517 
518 /**
519  * snd_pcm_set_sync_per_card - set the PCM sync id with card number
520  * @substream: the pcm substream
521  * @params: modified hardware parameters
522  * @id: identifier (max 12 bytes)
523  * @len: identifier length (max 12 bytes)
524  *
525  * Sets the PCM sync identifier for the card with zero padding.
526  *
527  * User space or any user should use this 16-byte identifier for a comparison only
528  * to check if two IDs are similar or different. Special case is the identifier
529  * containing only zeros. Interpretation for this combination is - empty (not set).
530  * The contents of the identifier should not be interpreted in any other way.
531  *
532  * The synchronization ID must be unique per clock source (usually one sound card,
533  * but multiple soundcard may use one PCM word clock source which means that they
534  * are fully synchronized).
535  *
536  * This routine composes this ID using card number in first four bytes and
537  * 12-byte additional ID. When other ID composition is used (e.g. for multiple
538  * sound cards), make sure that the composition does not clash with this
539  * composition scheme.
540  */
541 void snd_pcm_set_sync_per_card(struct snd_pcm_substream *substream,
542 			       struct snd_pcm_hw_params *params,
543 			       const unsigned char *id, unsigned int len)
544 {
545 	*(__u32 *)params->sync = cpu_to_le32(substream->pcm->card->number);
546 	len = min(12, len);
547 	memcpy(params->sync + 4, id, len);
548 	memset(params->sync + 4 + len, 0, 12 - len);
549 }
550 EXPORT_SYMBOL_GPL(snd_pcm_set_sync_per_card);
551 
552 /*
553  *  Standard ioctl routine
554  */
555 
556 static inline unsigned int div32(unsigned int a, unsigned int b,
557 				 unsigned int *r)
558 {
559 	if (b == 0) {
560 		*r = 0;
561 		return UINT_MAX;
562 	}
563 	*r = a % b;
564 	return a / b;
565 }
566 
567 static inline unsigned int div_down(unsigned int a, unsigned int b)
568 {
569 	if (b == 0)
570 		return UINT_MAX;
571 	return a / b;
572 }
573 
574 static inline unsigned int div_up(unsigned int a, unsigned int b)
575 {
576 	unsigned int r;
577 	unsigned int q;
578 	if (b == 0)
579 		return UINT_MAX;
580 	q = div32(a, b, &r);
581 	if (r)
582 		++q;
583 	return q;
584 }
585 
586 static inline unsigned int mul(unsigned int a, unsigned int b)
587 {
588 	if (a == 0)
589 		return 0;
590 	if (div_down(UINT_MAX, a) < b)
591 		return UINT_MAX;
592 	return a * b;
593 }
594 
595 static inline unsigned int muldiv32(unsigned int a, unsigned int b,
596 				    unsigned int c, unsigned int *r)
597 {
598 	u_int64_t n = (u_int64_t) a * b;
599 	if (c == 0) {
600 		*r = 0;
601 		return UINT_MAX;
602 	}
603 	n = div_u64_rem(n, c, r);
604 	if (n >= UINT_MAX) {
605 		*r = 0;
606 		return UINT_MAX;
607 	}
608 	return n;
609 }
610 
611 /**
612  * snd_interval_refine - refine the interval value of configurator
613  * @i: the interval value to refine
614  * @v: the interval value to refer to
615  *
616  * Refines the interval value with the reference value.
617  * The interval is changed to the range satisfying both intervals.
618  * The interval status (min, max, integer, etc.) are evaluated.
619  *
620  * Return: Positive if the value is changed, zero if it's not changed, or a
621  * negative error code.
622  */
623 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
624 {
625 	int changed = 0;
626 	if (snd_BUG_ON(snd_interval_empty(i)))
627 		return -EINVAL;
628 	if (i->min < v->min) {
629 		i->min = v->min;
630 		i->openmin = v->openmin;
631 		changed = 1;
632 	} else if (i->min == v->min && !i->openmin && v->openmin) {
633 		i->openmin = 1;
634 		changed = 1;
635 	}
636 	if (i->max > v->max) {
637 		i->max = v->max;
638 		i->openmax = v->openmax;
639 		changed = 1;
640 	} else if (i->max == v->max && !i->openmax && v->openmax) {
641 		i->openmax = 1;
642 		changed = 1;
643 	}
644 	if (!i->integer && v->integer) {
645 		i->integer = 1;
646 		changed = 1;
647 	}
648 	if (i->integer) {
649 		if (i->openmin) {
650 			i->min++;
651 			i->openmin = 0;
652 		}
653 		if (i->openmax) {
654 			i->max--;
655 			i->openmax = 0;
656 		}
657 	} else if (!i->openmin && !i->openmax && i->min == i->max)
658 		i->integer = 1;
659 	if (snd_interval_checkempty(i)) {
660 		snd_interval_none(i);
661 		return -EINVAL;
662 	}
663 	return changed;
664 }
665 EXPORT_SYMBOL(snd_interval_refine);
666 
667 static int snd_interval_refine_first(struct snd_interval *i)
668 {
669 	const unsigned int last_max = i->max;
670 
671 	if (snd_BUG_ON(snd_interval_empty(i)))
672 		return -EINVAL;
673 	if (snd_interval_single(i))
674 		return 0;
675 	i->max = i->min;
676 	if (i->openmin)
677 		i->max++;
678 	/* only exclude max value if also excluded before refine */
679 	i->openmax = (i->openmax && i->max >= last_max);
680 	return 1;
681 }
682 
683 static int snd_interval_refine_last(struct snd_interval *i)
684 {
685 	const unsigned int last_min = i->min;
686 
687 	if (snd_BUG_ON(snd_interval_empty(i)))
688 		return -EINVAL;
689 	if (snd_interval_single(i))
690 		return 0;
691 	i->min = i->max;
692 	if (i->openmax)
693 		i->min--;
694 	/* only exclude min value if also excluded before refine */
695 	i->openmin = (i->openmin && i->min <= last_min);
696 	return 1;
697 }
698 
699 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
700 {
701 	if (a->empty || b->empty) {
702 		snd_interval_none(c);
703 		return;
704 	}
705 	c->empty = 0;
706 	c->min = mul(a->min, b->min);
707 	c->openmin = (a->openmin || b->openmin);
708 	c->max = mul(a->max,  b->max);
709 	c->openmax = (a->openmax || b->openmax);
710 	c->integer = (a->integer && b->integer);
711 }
712 
713 /**
714  * snd_interval_div - refine the interval value with division
715  * @a: dividend
716  * @b: divisor
717  * @c: quotient
718  *
719  * c = a / b
720  *
721  * Returns non-zero if the value is changed, zero if not changed.
722  */
723 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
724 {
725 	unsigned int r;
726 	if (a->empty || b->empty) {
727 		snd_interval_none(c);
728 		return;
729 	}
730 	c->empty = 0;
731 	c->min = div32(a->min, b->max, &r);
732 	c->openmin = (r || a->openmin || b->openmax);
733 	if (b->min > 0) {
734 		c->max = div32(a->max, b->min, &r);
735 		if (r) {
736 			c->max++;
737 			c->openmax = 1;
738 		} else
739 			c->openmax = (a->openmax || b->openmin);
740 	} else {
741 		c->max = UINT_MAX;
742 		c->openmax = 0;
743 	}
744 	c->integer = 0;
745 }
746 
747 /**
748  * snd_interval_muldivk - refine the interval value
749  * @a: dividend 1
750  * @b: dividend 2
751  * @k: divisor (as integer)
752  * @c: result
753   *
754  * c = a * b / k
755  *
756  * Returns non-zero if the value is changed, zero if not changed.
757  */
758 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
759 		      unsigned int k, struct snd_interval *c)
760 {
761 	unsigned int r;
762 	if (a->empty || b->empty) {
763 		snd_interval_none(c);
764 		return;
765 	}
766 	c->empty = 0;
767 	c->min = muldiv32(a->min, b->min, k, &r);
768 	c->openmin = (r || a->openmin || b->openmin);
769 	c->max = muldiv32(a->max, b->max, k, &r);
770 	if (r) {
771 		c->max++;
772 		c->openmax = 1;
773 	} else
774 		c->openmax = (a->openmax || b->openmax);
775 	c->integer = 0;
776 }
777 
778 /**
779  * snd_interval_mulkdiv - refine the interval value
780  * @a: dividend 1
781  * @k: dividend 2 (as integer)
782  * @b: divisor
783  * @c: result
784  *
785  * c = a * k / b
786  *
787  * Returns non-zero if the value is changed, zero if not changed.
788  */
789 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
790 		      const struct snd_interval *b, struct snd_interval *c)
791 {
792 	unsigned int r;
793 	if (a->empty || b->empty) {
794 		snd_interval_none(c);
795 		return;
796 	}
797 	c->empty = 0;
798 	c->min = muldiv32(a->min, k, b->max, &r);
799 	c->openmin = (r || a->openmin || b->openmax);
800 	if (b->min > 0) {
801 		c->max = muldiv32(a->max, k, b->min, &r);
802 		if (r) {
803 			c->max++;
804 			c->openmax = 1;
805 		} else
806 			c->openmax = (a->openmax || b->openmin);
807 	} else {
808 		c->max = UINT_MAX;
809 		c->openmax = 0;
810 	}
811 	c->integer = 0;
812 }
813 
814 /* ---- */
815 
816 
817 /**
818  * snd_interval_ratnum - refine the interval value
819  * @i: interval to refine
820  * @rats_count: number of ratnum_t
821  * @rats: ratnum_t array
822  * @nump: pointer to store the resultant numerator
823  * @denp: pointer to store the resultant denominator
824  *
825  * Return: Positive if the value is changed, zero if it's not changed, or a
826  * negative error code.
827  */
828 int snd_interval_ratnum(struct snd_interval *i,
829 			unsigned int rats_count, const struct snd_ratnum *rats,
830 			unsigned int *nump, unsigned int *denp)
831 {
832 	unsigned int best_num, best_den;
833 	int best_diff;
834 	unsigned int k;
835 	struct snd_interval t;
836 	int err;
837 	unsigned int result_num, result_den;
838 	int result_diff;
839 
840 	best_num = best_den = best_diff = 0;
841 	for (k = 0; k < rats_count; ++k) {
842 		unsigned int num = rats[k].num;
843 		unsigned int den;
844 		unsigned int q = i->min;
845 		int diff;
846 		if (q == 0)
847 			q = 1;
848 		den = div_up(num, q);
849 		if (den < rats[k].den_min)
850 			continue;
851 		if (den > rats[k].den_max)
852 			den = rats[k].den_max;
853 		else {
854 			unsigned int r;
855 			r = (den - rats[k].den_min) % rats[k].den_step;
856 			if (r != 0)
857 				den -= r;
858 		}
859 		diff = num - q * den;
860 		if (diff < 0)
861 			diff = -diff;
862 		if (best_num == 0 ||
863 		    diff * best_den < best_diff * den) {
864 			best_diff = diff;
865 			best_den = den;
866 			best_num = num;
867 		}
868 	}
869 	if (best_den == 0) {
870 		i->empty = 1;
871 		return -EINVAL;
872 	}
873 	t.min = div_down(best_num, best_den);
874 	t.openmin = !!(best_num % best_den);
875 
876 	result_num = best_num;
877 	result_diff = best_diff;
878 	result_den = best_den;
879 	best_num = best_den = best_diff = 0;
880 	for (k = 0; k < rats_count; ++k) {
881 		unsigned int num = rats[k].num;
882 		unsigned int den;
883 		unsigned int q = i->max;
884 		int diff;
885 		if (q == 0) {
886 			i->empty = 1;
887 			return -EINVAL;
888 		}
889 		den = div_down(num, q);
890 		if (den > rats[k].den_max)
891 			continue;
892 		if (den < rats[k].den_min)
893 			den = rats[k].den_min;
894 		else {
895 			unsigned int r;
896 			r = (den - rats[k].den_min) % rats[k].den_step;
897 			if (r != 0)
898 				den += rats[k].den_step - r;
899 		}
900 		diff = q * den - num;
901 		if (diff < 0)
902 			diff = -diff;
903 		if (best_num == 0 ||
904 		    diff * best_den < best_diff * den) {
905 			best_diff = diff;
906 			best_den = den;
907 			best_num = num;
908 		}
909 	}
910 	if (best_den == 0) {
911 		i->empty = 1;
912 		return -EINVAL;
913 	}
914 	t.max = div_up(best_num, best_den);
915 	t.openmax = !!(best_num % best_den);
916 	t.integer = 0;
917 	err = snd_interval_refine(i, &t);
918 	if (err < 0)
919 		return err;
920 
921 	if (snd_interval_single(i)) {
922 		if (best_diff * result_den < result_diff * best_den) {
923 			result_num = best_num;
924 			result_den = best_den;
925 		}
926 		if (nump)
927 			*nump = result_num;
928 		if (denp)
929 			*denp = result_den;
930 	}
931 	return err;
932 }
933 EXPORT_SYMBOL(snd_interval_ratnum);
934 
935 /**
936  * snd_interval_ratden - refine the interval value
937  * @i: interval to refine
938  * @rats_count: number of struct ratden
939  * @rats: struct ratden array
940  * @nump: pointer to store the resultant numerator
941  * @denp: pointer to store the resultant denominator
942  *
943  * Return: Positive if the value is changed, zero if it's not changed, or a
944  * negative error code.
945  */
946 static int snd_interval_ratden(struct snd_interval *i,
947 			       unsigned int rats_count,
948 			       const struct snd_ratden *rats,
949 			       unsigned int *nump, unsigned int *denp)
950 {
951 	unsigned int best_num, best_diff, best_den;
952 	unsigned int k;
953 	struct snd_interval t;
954 	int err;
955 
956 	best_num = best_den = best_diff = 0;
957 	for (k = 0; k < rats_count; ++k) {
958 		unsigned int num;
959 		unsigned int den = rats[k].den;
960 		unsigned int q = i->min;
961 		int diff;
962 		num = mul(q, den);
963 		if (num > rats[k].num_max)
964 			continue;
965 		if (num < rats[k].num_min)
966 			num = rats[k].num_max;
967 		else {
968 			unsigned int r;
969 			r = (num - rats[k].num_min) % rats[k].num_step;
970 			if (r != 0)
971 				num += rats[k].num_step - r;
972 		}
973 		diff = num - q * den;
974 		if (best_num == 0 ||
975 		    diff * best_den < best_diff * den) {
976 			best_diff = diff;
977 			best_den = den;
978 			best_num = num;
979 		}
980 	}
981 	if (best_den == 0) {
982 		i->empty = 1;
983 		return -EINVAL;
984 	}
985 	t.min = div_down(best_num, best_den);
986 	t.openmin = !!(best_num % best_den);
987 
988 	best_num = best_den = best_diff = 0;
989 	for (k = 0; k < rats_count; ++k) {
990 		unsigned int num;
991 		unsigned int den = rats[k].den;
992 		unsigned int q = i->max;
993 		int diff;
994 		num = mul(q, den);
995 		if (num < rats[k].num_min)
996 			continue;
997 		if (num > rats[k].num_max)
998 			num = rats[k].num_max;
999 		else {
1000 			unsigned int r;
1001 			r = (num - rats[k].num_min) % rats[k].num_step;
1002 			if (r != 0)
1003 				num -= r;
1004 		}
1005 		diff = q * den - num;
1006 		if (best_num == 0 ||
1007 		    diff * best_den < best_diff * den) {
1008 			best_diff = diff;
1009 			best_den = den;
1010 			best_num = num;
1011 		}
1012 	}
1013 	if (best_den == 0) {
1014 		i->empty = 1;
1015 		return -EINVAL;
1016 	}
1017 	t.max = div_up(best_num, best_den);
1018 	t.openmax = !!(best_num % best_den);
1019 	t.integer = 0;
1020 	err = snd_interval_refine(i, &t);
1021 	if (err < 0)
1022 		return err;
1023 
1024 	if (snd_interval_single(i)) {
1025 		if (nump)
1026 			*nump = best_num;
1027 		if (denp)
1028 			*denp = best_den;
1029 	}
1030 	return err;
1031 }
1032 
1033 /**
1034  * snd_interval_list - refine the interval value from the list
1035  * @i: the interval value to refine
1036  * @count: the number of elements in the list
1037  * @list: the value list
1038  * @mask: the bit-mask to evaluate
1039  *
1040  * Refines the interval value from the list.
1041  * When mask is non-zero, only the elements corresponding to bit 1 are
1042  * evaluated.
1043  *
1044  * Return: Positive if the value is changed, zero if it's not changed, or a
1045  * negative error code.
1046  */
1047 int snd_interval_list(struct snd_interval *i, unsigned int count,
1048 		      const unsigned int *list, unsigned int mask)
1049 {
1050         unsigned int k;
1051 	struct snd_interval list_range;
1052 
1053 	if (!count) {
1054 		i->empty = 1;
1055 		return -EINVAL;
1056 	}
1057 	snd_interval_any(&list_range);
1058 	list_range.min = UINT_MAX;
1059 	list_range.max = 0;
1060         for (k = 0; k < count; k++) {
1061 		if (mask && !(mask & (1 << k)))
1062 			continue;
1063 		if (!snd_interval_test(i, list[k]))
1064 			continue;
1065 		list_range.min = min(list_range.min, list[k]);
1066 		list_range.max = max(list_range.max, list[k]);
1067         }
1068 	return snd_interval_refine(i, &list_range);
1069 }
1070 EXPORT_SYMBOL(snd_interval_list);
1071 
1072 /**
1073  * snd_interval_ranges - refine the interval value from the list of ranges
1074  * @i: the interval value to refine
1075  * @count: the number of elements in the list of ranges
1076  * @ranges: the ranges list
1077  * @mask: the bit-mask to evaluate
1078  *
1079  * Refines the interval value from the list of ranges.
1080  * When mask is non-zero, only the elements corresponding to bit 1 are
1081  * evaluated.
1082  *
1083  * Return: Positive if the value is changed, zero if it's not changed, or a
1084  * negative error code.
1085  */
1086 int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1087 			const struct snd_interval *ranges, unsigned int mask)
1088 {
1089 	unsigned int k;
1090 	struct snd_interval range_union;
1091 	struct snd_interval range;
1092 
1093 	if (!count) {
1094 		snd_interval_none(i);
1095 		return -EINVAL;
1096 	}
1097 	snd_interval_any(&range_union);
1098 	range_union.min = UINT_MAX;
1099 	range_union.max = 0;
1100 	for (k = 0; k < count; k++) {
1101 		if (mask && !(mask & (1 << k)))
1102 			continue;
1103 		snd_interval_copy(&range, &ranges[k]);
1104 		if (snd_interval_refine(&range, i) < 0)
1105 			continue;
1106 		if (snd_interval_empty(&range))
1107 			continue;
1108 
1109 		if (range.min < range_union.min) {
1110 			range_union.min = range.min;
1111 			range_union.openmin = 1;
1112 		}
1113 		if (range.min == range_union.min && !range.openmin)
1114 			range_union.openmin = 0;
1115 		if (range.max > range_union.max) {
1116 			range_union.max = range.max;
1117 			range_union.openmax = 1;
1118 		}
1119 		if (range.max == range_union.max && !range.openmax)
1120 			range_union.openmax = 0;
1121 	}
1122 	return snd_interval_refine(i, &range_union);
1123 }
1124 EXPORT_SYMBOL(snd_interval_ranges);
1125 
1126 static int snd_interval_step(struct snd_interval *i, unsigned int step)
1127 {
1128 	unsigned int n;
1129 	int changed = 0;
1130 	n = i->min % step;
1131 	if (n != 0 || i->openmin) {
1132 		i->min += step - n;
1133 		i->openmin = 0;
1134 		changed = 1;
1135 	}
1136 	n = i->max % step;
1137 	if (n != 0 || i->openmax) {
1138 		i->max -= n;
1139 		i->openmax = 0;
1140 		changed = 1;
1141 	}
1142 	if (snd_interval_checkempty(i)) {
1143 		i->empty = 1;
1144 		return -EINVAL;
1145 	}
1146 	return changed;
1147 }
1148 
1149 /* Info constraints helpers */
1150 
1151 /**
1152  * snd_pcm_hw_rule_add - add the hw-constraint rule
1153  * @runtime: the pcm runtime instance
1154  * @cond: condition bits
1155  * @var: the variable to evaluate
1156  * @func: the evaluation function
1157  * @private: the private data pointer passed to function
1158  * @dep: the dependent variables
1159  *
1160  * Return: Zero if successful, or a negative error code on failure.
1161  */
1162 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1163 			int var,
1164 			snd_pcm_hw_rule_func_t func, void *private,
1165 			int dep, ...)
1166 {
1167 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1168 	struct snd_pcm_hw_rule *c;
1169 	unsigned int k;
1170 	va_list args;
1171 	va_start(args, dep);
1172 	if (constrs->rules_num >= constrs->rules_all) {
1173 		struct snd_pcm_hw_rule *new;
1174 		unsigned int new_rules = constrs->rules_all + 16;
1175 		new = krealloc_array(constrs->rules, new_rules,
1176 				     sizeof(*c), GFP_KERNEL);
1177 		if (!new) {
1178 			va_end(args);
1179 			return -ENOMEM;
1180 		}
1181 		constrs->rules = new;
1182 		constrs->rules_all = new_rules;
1183 	}
1184 	c = &constrs->rules[constrs->rules_num];
1185 	c->cond = cond;
1186 	c->func = func;
1187 	c->var = var;
1188 	c->private = private;
1189 	k = 0;
1190 	while (1) {
1191 		if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1192 			va_end(args);
1193 			return -EINVAL;
1194 		}
1195 		c->deps[k++] = dep;
1196 		if (dep < 0)
1197 			break;
1198 		dep = va_arg(args, int);
1199 	}
1200 	constrs->rules_num++;
1201 	va_end(args);
1202 	return 0;
1203 }
1204 EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1205 
1206 /**
1207  * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1208  * @runtime: PCM runtime instance
1209  * @var: hw_params variable to apply the mask
1210  * @mask: the bitmap mask
1211  *
1212  * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1213  *
1214  * Return: Zero if successful, or a negative error code on failure.
1215  */
1216 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1217 			       u_int32_t mask)
1218 {
1219 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1220 	struct snd_mask *maskp = constrs_mask(constrs, var);
1221 	*maskp->bits &= mask;
1222 	memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1223 	if (*maskp->bits == 0)
1224 		return -EINVAL;
1225 	return 0;
1226 }
1227 
1228 /**
1229  * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1230  * @runtime: PCM runtime instance
1231  * @var: hw_params variable to apply the mask
1232  * @mask: the 64bit bitmap mask
1233  *
1234  * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1235  *
1236  * Return: Zero if successful, or a negative error code on failure.
1237  */
1238 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1239 				 u_int64_t mask)
1240 {
1241 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1242 	struct snd_mask *maskp = constrs_mask(constrs, var);
1243 	maskp->bits[0] &= (u_int32_t)mask;
1244 	maskp->bits[1] &= (u_int32_t)(mask >> 32);
1245 	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1246 	if (! maskp->bits[0] && ! maskp->bits[1])
1247 		return -EINVAL;
1248 	return 0;
1249 }
1250 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1251 
1252 /**
1253  * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1254  * @runtime: PCM runtime instance
1255  * @var: hw_params variable to apply the integer constraint
1256  *
1257  * Apply the constraint of integer to an interval parameter.
1258  *
1259  * Return: Positive if the value is changed, zero if it's not changed, or a
1260  * negative error code.
1261  */
1262 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1263 {
1264 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1265 	return snd_interval_setinteger(constrs_interval(constrs, var));
1266 }
1267 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1268 
1269 /**
1270  * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1271  * @runtime: PCM runtime instance
1272  * @var: hw_params variable to apply the range
1273  * @min: the minimal value
1274  * @max: the maximal value
1275  *
1276  * Apply the min/max range constraint to an interval parameter.
1277  *
1278  * Return: Positive if the value is changed, zero if it's not changed, or a
1279  * negative error code.
1280  */
1281 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1282 				 unsigned int min, unsigned int max)
1283 {
1284 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1285 	struct snd_interval t;
1286 	t.min = min;
1287 	t.max = max;
1288 	t.openmin = t.openmax = 0;
1289 	t.integer = 0;
1290 	return snd_interval_refine(constrs_interval(constrs, var), &t);
1291 }
1292 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1293 
1294 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1295 				struct snd_pcm_hw_rule *rule)
1296 {
1297 	struct snd_pcm_hw_constraint_list *list = rule->private;
1298 	return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1299 }
1300 
1301 
1302 /**
1303  * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1304  * @runtime: PCM runtime instance
1305  * @cond: condition bits
1306  * @var: hw_params variable to apply the list constraint
1307  * @l: list
1308  *
1309  * Apply the list of constraints to an interval parameter.
1310  *
1311  * Return: Zero if successful, or a negative error code on failure.
1312  */
1313 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1314 			       unsigned int cond,
1315 			       snd_pcm_hw_param_t var,
1316 			       const struct snd_pcm_hw_constraint_list *l)
1317 {
1318 	return snd_pcm_hw_rule_add(runtime, cond, var,
1319 				   snd_pcm_hw_rule_list, (void *)l,
1320 				   var, -1);
1321 }
1322 EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1323 
1324 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1325 				  struct snd_pcm_hw_rule *rule)
1326 {
1327 	struct snd_pcm_hw_constraint_ranges *r = rule->private;
1328 	return snd_interval_ranges(hw_param_interval(params, rule->var),
1329 				   r->count, r->ranges, r->mask);
1330 }
1331 
1332 
1333 /**
1334  * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1335  * @runtime: PCM runtime instance
1336  * @cond: condition bits
1337  * @var: hw_params variable to apply the list of range constraints
1338  * @r: ranges
1339  *
1340  * Apply the list of range constraints to an interval parameter.
1341  *
1342  * Return: Zero if successful, or a negative error code on failure.
1343  */
1344 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1345 				 unsigned int cond,
1346 				 snd_pcm_hw_param_t var,
1347 				 const struct snd_pcm_hw_constraint_ranges *r)
1348 {
1349 	return snd_pcm_hw_rule_add(runtime, cond, var,
1350 				   snd_pcm_hw_rule_ranges, (void *)r,
1351 				   var, -1);
1352 }
1353 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1354 
1355 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1356 				   struct snd_pcm_hw_rule *rule)
1357 {
1358 	const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1359 	unsigned int num = 0, den = 0;
1360 	int err;
1361 	err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1362 				  r->nrats, r->rats, &num, &den);
1363 	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1364 		params->rate_num = num;
1365 		params->rate_den = den;
1366 	}
1367 	return err;
1368 }
1369 
1370 /**
1371  * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1372  * @runtime: PCM runtime instance
1373  * @cond: condition bits
1374  * @var: hw_params variable to apply the ratnums constraint
1375  * @r: struct snd_ratnums constriants
1376  *
1377  * Return: Zero if successful, or a negative error code on failure.
1378  */
1379 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1380 				  unsigned int cond,
1381 				  snd_pcm_hw_param_t var,
1382 				  const struct snd_pcm_hw_constraint_ratnums *r)
1383 {
1384 	return snd_pcm_hw_rule_add(runtime, cond, var,
1385 				   snd_pcm_hw_rule_ratnums, (void *)r,
1386 				   var, -1);
1387 }
1388 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1389 
1390 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1391 				   struct snd_pcm_hw_rule *rule)
1392 {
1393 	const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1394 	unsigned int num = 0, den = 0;
1395 	int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1396 				  r->nrats, r->rats, &num, &den);
1397 	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1398 		params->rate_num = num;
1399 		params->rate_den = den;
1400 	}
1401 	return err;
1402 }
1403 
1404 /**
1405  * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1406  * @runtime: PCM runtime instance
1407  * @cond: condition bits
1408  * @var: hw_params variable to apply the ratdens constraint
1409  * @r: struct snd_ratdens constriants
1410  *
1411  * Return: Zero if successful, or a negative error code on failure.
1412  */
1413 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1414 				  unsigned int cond,
1415 				  snd_pcm_hw_param_t var,
1416 				  const struct snd_pcm_hw_constraint_ratdens *r)
1417 {
1418 	return snd_pcm_hw_rule_add(runtime, cond, var,
1419 				   snd_pcm_hw_rule_ratdens, (void *)r,
1420 				   var, -1);
1421 }
1422 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1423 
1424 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1425 				  struct snd_pcm_hw_rule *rule)
1426 {
1427 	unsigned int l = (unsigned long) rule->private;
1428 	int width = l & 0xffff;
1429 	unsigned int msbits = l >> 16;
1430 	const struct snd_interval *i =
1431 		hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1432 
1433 	if (!snd_interval_single(i))
1434 		return 0;
1435 
1436 	if ((snd_interval_value(i) == width) ||
1437 	    (width == 0 && snd_interval_value(i) > msbits))
1438 		params->msbits = min_not_zero(params->msbits, msbits);
1439 
1440 	return 0;
1441 }
1442 
1443 /**
1444  * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1445  * @runtime: PCM runtime instance
1446  * @cond: condition bits
1447  * @width: sample bits width
1448  * @msbits: msbits width
1449  *
1450  * This constraint will set the number of most significant bits (msbits) if a
1451  * sample format with the specified width has been select. If width is set to 0
1452  * the msbits will be set for any sample format with a width larger than the
1453  * specified msbits.
1454  *
1455  * Return: Zero if successful, or a negative error code on failure.
1456  */
1457 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1458 				 unsigned int cond,
1459 				 unsigned int width,
1460 				 unsigned int msbits)
1461 {
1462 	unsigned long l = (msbits << 16) | width;
1463 	return snd_pcm_hw_rule_add(runtime, cond, -1,
1464 				    snd_pcm_hw_rule_msbits,
1465 				    (void*) l,
1466 				    SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1467 }
1468 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1469 
1470 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1471 				struct snd_pcm_hw_rule *rule)
1472 {
1473 	unsigned long step = (unsigned long) rule->private;
1474 	return snd_interval_step(hw_param_interval(params, rule->var), step);
1475 }
1476 
1477 /**
1478  * snd_pcm_hw_constraint_step - add a hw constraint step rule
1479  * @runtime: PCM runtime instance
1480  * @cond: condition bits
1481  * @var: hw_params variable to apply the step constraint
1482  * @step: step size
1483  *
1484  * Return: Zero if successful, or a negative error code on failure.
1485  */
1486 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1487 			       unsigned int cond,
1488 			       snd_pcm_hw_param_t var,
1489 			       unsigned long step)
1490 {
1491 	return snd_pcm_hw_rule_add(runtime, cond, var,
1492 				   snd_pcm_hw_rule_step, (void *) step,
1493 				   var, -1);
1494 }
1495 EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1496 
1497 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1498 {
1499 	static const unsigned int pow2_sizes[] = {
1500 		1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1501 		1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1502 		1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1503 		1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1504 	};
1505 	return snd_interval_list(hw_param_interval(params, rule->var),
1506 				 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1507 }
1508 
1509 /**
1510  * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1511  * @runtime: PCM runtime instance
1512  * @cond: condition bits
1513  * @var: hw_params variable to apply the power-of-2 constraint
1514  *
1515  * Return: Zero if successful, or a negative error code on failure.
1516  */
1517 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1518 			       unsigned int cond,
1519 			       snd_pcm_hw_param_t var)
1520 {
1521 	return snd_pcm_hw_rule_add(runtime, cond, var,
1522 				   snd_pcm_hw_rule_pow2, NULL,
1523 				   var, -1);
1524 }
1525 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1526 
1527 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1528 					   struct snd_pcm_hw_rule *rule)
1529 {
1530 	unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1531 	struct snd_interval *rate;
1532 
1533 	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1534 	return snd_interval_list(rate, 1, &base_rate, 0);
1535 }
1536 
1537 /**
1538  * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1539  * @runtime: PCM runtime instance
1540  * @base_rate: the rate at which the hardware does not resample
1541  *
1542  * Return: Zero if successful, or a negative error code on failure.
1543  */
1544 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1545 			       unsigned int base_rate)
1546 {
1547 	return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1548 				   SNDRV_PCM_HW_PARAM_RATE,
1549 				   snd_pcm_hw_rule_noresample_func,
1550 				   (void *)(uintptr_t)base_rate,
1551 				   SNDRV_PCM_HW_PARAM_RATE, -1);
1552 }
1553 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1554 
1555 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1556 				  snd_pcm_hw_param_t var)
1557 {
1558 	if (hw_is_mask(var)) {
1559 		snd_mask_any(hw_param_mask(params, var));
1560 		params->cmask |= 1 << var;
1561 		params->rmask |= 1 << var;
1562 		return;
1563 	}
1564 	if (hw_is_interval(var)) {
1565 		snd_interval_any(hw_param_interval(params, var));
1566 		params->cmask |= 1 << var;
1567 		params->rmask |= 1 << var;
1568 		return;
1569 	}
1570 	snd_BUG();
1571 }
1572 
1573 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1574 {
1575 	unsigned int k;
1576 	memset(params, 0, sizeof(*params));
1577 	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1578 		_snd_pcm_hw_param_any(params, k);
1579 	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1580 		_snd_pcm_hw_param_any(params, k);
1581 	params->info = ~0U;
1582 }
1583 EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1584 
1585 /**
1586  * snd_pcm_hw_param_value - return @params field @var value
1587  * @params: the hw_params instance
1588  * @var: parameter to retrieve
1589  * @dir: pointer to the direction (-1,0,1) or %NULL
1590  *
1591  * Return: The value for field @var if it's fixed in configuration space
1592  * defined by @params. -%EINVAL otherwise.
1593  */
1594 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1595 			   snd_pcm_hw_param_t var, int *dir)
1596 {
1597 	if (hw_is_mask(var)) {
1598 		const struct snd_mask *mask = hw_param_mask_c(params, var);
1599 		if (!snd_mask_single(mask))
1600 			return -EINVAL;
1601 		if (dir)
1602 			*dir = 0;
1603 		return snd_mask_value(mask);
1604 	}
1605 	if (hw_is_interval(var)) {
1606 		const struct snd_interval *i = hw_param_interval_c(params, var);
1607 		if (!snd_interval_single(i))
1608 			return -EINVAL;
1609 		if (dir)
1610 			*dir = i->openmin;
1611 		return snd_interval_value(i);
1612 	}
1613 	return -EINVAL;
1614 }
1615 EXPORT_SYMBOL(snd_pcm_hw_param_value);
1616 
1617 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1618 				snd_pcm_hw_param_t var)
1619 {
1620 	if (hw_is_mask(var)) {
1621 		snd_mask_none(hw_param_mask(params, var));
1622 		params->cmask |= 1 << var;
1623 		params->rmask |= 1 << var;
1624 	} else if (hw_is_interval(var)) {
1625 		snd_interval_none(hw_param_interval(params, var));
1626 		params->cmask |= 1 << var;
1627 		params->rmask |= 1 << var;
1628 	} else {
1629 		snd_BUG();
1630 	}
1631 }
1632 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1633 
1634 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1635 				   snd_pcm_hw_param_t var)
1636 {
1637 	int changed;
1638 	if (hw_is_mask(var))
1639 		changed = snd_mask_refine_first(hw_param_mask(params, var));
1640 	else if (hw_is_interval(var))
1641 		changed = snd_interval_refine_first(hw_param_interval(params, var));
1642 	else
1643 		return -EINVAL;
1644 	if (changed > 0) {
1645 		params->cmask |= 1 << var;
1646 		params->rmask |= 1 << var;
1647 	}
1648 	return changed;
1649 }
1650 
1651 
1652 /**
1653  * snd_pcm_hw_param_first - refine config space and return minimum value
1654  * @pcm: PCM instance
1655  * @params: the hw_params instance
1656  * @var: parameter to retrieve
1657  * @dir: pointer to the direction (-1,0,1) or %NULL
1658  *
1659  * Inside configuration space defined by @params remove from @var all
1660  * values > minimum. Reduce configuration space accordingly.
1661  *
1662  * Return: The minimum, or a negative error code on failure.
1663  */
1664 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1665 			   struct snd_pcm_hw_params *params,
1666 			   snd_pcm_hw_param_t var, int *dir)
1667 {
1668 	int changed = _snd_pcm_hw_param_first(params, var);
1669 	if (changed < 0)
1670 		return changed;
1671 	if (params->rmask) {
1672 		int err = snd_pcm_hw_refine(pcm, params);
1673 		if (err < 0)
1674 			return err;
1675 	}
1676 	return snd_pcm_hw_param_value(params, var, dir);
1677 }
1678 EXPORT_SYMBOL(snd_pcm_hw_param_first);
1679 
1680 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1681 				  snd_pcm_hw_param_t var)
1682 {
1683 	int changed;
1684 	if (hw_is_mask(var))
1685 		changed = snd_mask_refine_last(hw_param_mask(params, var));
1686 	else if (hw_is_interval(var))
1687 		changed = snd_interval_refine_last(hw_param_interval(params, var));
1688 	else
1689 		return -EINVAL;
1690 	if (changed > 0) {
1691 		params->cmask |= 1 << var;
1692 		params->rmask |= 1 << var;
1693 	}
1694 	return changed;
1695 }
1696 
1697 
1698 /**
1699  * snd_pcm_hw_param_last - refine config space and return maximum value
1700  * @pcm: PCM instance
1701  * @params: the hw_params instance
1702  * @var: parameter to retrieve
1703  * @dir: pointer to the direction (-1,0,1) or %NULL
1704  *
1705  * Inside configuration space defined by @params remove from @var all
1706  * values < maximum. Reduce configuration space accordingly.
1707  *
1708  * Return: The maximum, or a negative error code on failure.
1709  */
1710 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1711 			  struct snd_pcm_hw_params *params,
1712 			  snd_pcm_hw_param_t var, int *dir)
1713 {
1714 	int changed = _snd_pcm_hw_param_last(params, var);
1715 	if (changed < 0)
1716 		return changed;
1717 	if (params->rmask) {
1718 		int err = snd_pcm_hw_refine(pcm, params);
1719 		if (err < 0)
1720 			return err;
1721 	}
1722 	return snd_pcm_hw_param_value(params, var, dir);
1723 }
1724 EXPORT_SYMBOL(snd_pcm_hw_param_last);
1725 
1726 /**
1727  * snd_pcm_hw_params_bits - Get the number of bits per the sample.
1728  * @p: hardware parameters
1729  *
1730  * Return: The number of bits per sample based on the format,
1731  * subformat and msbits the specified hw params has.
1732  */
1733 int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p)
1734 {
1735 	snd_pcm_subformat_t subformat = params_subformat(p);
1736 	snd_pcm_format_t format = params_format(p);
1737 
1738 	switch (format) {
1739 	case SNDRV_PCM_FORMAT_S32_LE:
1740 	case SNDRV_PCM_FORMAT_U32_LE:
1741 	case SNDRV_PCM_FORMAT_S32_BE:
1742 	case SNDRV_PCM_FORMAT_U32_BE:
1743 		switch (subformat) {
1744 		case SNDRV_PCM_SUBFORMAT_MSBITS_20:
1745 			return 20;
1746 		case SNDRV_PCM_SUBFORMAT_MSBITS_24:
1747 			return 24;
1748 		case SNDRV_PCM_SUBFORMAT_MSBITS_MAX:
1749 		case SNDRV_PCM_SUBFORMAT_STD:
1750 		default:
1751 			break;
1752 		}
1753 		fallthrough;
1754 	default:
1755 		return snd_pcm_format_width(format);
1756 	}
1757 }
1758 EXPORT_SYMBOL(snd_pcm_hw_params_bits);
1759 
1760 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1761 				   void *arg)
1762 {
1763 	struct snd_pcm_runtime *runtime = substream->runtime;
1764 
1765 	guard(pcm_stream_lock_irqsave)(substream);
1766 	if (snd_pcm_running(substream) &&
1767 	    snd_pcm_update_hw_ptr(substream) >= 0)
1768 		runtime->status->hw_ptr %= runtime->buffer_size;
1769 	else {
1770 		runtime->status->hw_ptr = 0;
1771 		runtime->hw_ptr_wrap = 0;
1772 	}
1773 	return 0;
1774 }
1775 
1776 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1777 					  void *arg)
1778 {
1779 	struct snd_pcm_channel_info *info = arg;
1780 	struct snd_pcm_runtime *runtime = substream->runtime;
1781 	int width;
1782 	if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1783 		info->offset = -1;
1784 		return 0;
1785 	}
1786 	width = snd_pcm_format_physical_width(runtime->format);
1787 	if (width < 0)
1788 		return width;
1789 	info->offset = 0;
1790 	switch (runtime->access) {
1791 	case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1792 	case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1793 		info->first = info->channel * width;
1794 		info->step = runtime->channels * width;
1795 		break;
1796 	case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1797 	case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1798 	{
1799 		size_t size = runtime->dma_bytes / runtime->channels;
1800 		info->first = info->channel * size * 8;
1801 		info->step = width;
1802 		break;
1803 	}
1804 	default:
1805 		snd_BUG();
1806 		break;
1807 	}
1808 	return 0;
1809 }
1810 
1811 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1812 				       void *arg)
1813 {
1814 	struct snd_pcm_hw_params *params = arg;
1815 	snd_pcm_format_t format;
1816 	int channels;
1817 	ssize_t frame_size;
1818 
1819 	params->fifo_size = substream->runtime->hw.fifo_size;
1820 	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1821 		format = params_format(params);
1822 		channels = params_channels(params);
1823 		frame_size = snd_pcm_format_size(format, channels);
1824 		if (frame_size > 0)
1825 			params->fifo_size /= frame_size;
1826 	}
1827 	return 0;
1828 }
1829 
1830 static int snd_pcm_lib_ioctl_sync_id(struct snd_pcm_substream *substream,
1831 				     void *arg)
1832 {
1833 	static const unsigned char id[12] = { 0xff, 0xff, 0xff, 0xff,
1834 					      0xff, 0xff, 0xff, 0xff,
1835 					      0xff, 0xff, 0xff, 0xff };
1836 
1837 	if (substream->runtime->std_sync_id)
1838 		snd_pcm_set_sync_per_card(substream, arg, id, sizeof(id));
1839 	return 0;
1840 }
1841 
1842 /**
1843  * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1844  * @substream: the pcm substream instance
1845  * @cmd: ioctl command
1846  * @arg: ioctl argument
1847  *
1848  * Processes the generic ioctl commands for PCM.
1849  * Can be passed as the ioctl callback for PCM ops.
1850  *
1851  * Return: Zero if successful, or a negative error code on failure.
1852  */
1853 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1854 		      unsigned int cmd, void *arg)
1855 {
1856 	switch (cmd) {
1857 	case SNDRV_PCM_IOCTL1_RESET:
1858 		return snd_pcm_lib_ioctl_reset(substream, arg);
1859 	case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1860 		return snd_pcm_lib_ioctl_channel_info(substream, arg);
1861 	case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1862 		return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1863 	case SNDRV_PCM_IOCTL1_SYNC_ID:
1864 		return snd_pcm_lib_ioctl_sync_id(substream, arg);
1865 	}
1866 	return -ENXIO;
1867 }
1868 EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1869 
1870 /**
1871  * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1872  *						under acquired lock of PCM substream.
1873  * @substream: the instance of pcm substream.
1874  *
1875  * This function is called when the batch of audio data frames as the same size as the period of
1876  * buffer is already processed in audio data transmission.
1877  *
1878  * The call of function updates the status of runtime with the latest position of audio data
1879  * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1880  * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1881  * substream according to configured threshold.
1882  *
1883  * The function is intended to use for the case that PCM driver operates audio data frames under
1884  * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1885  * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1886  * since lock of PCM substream should be acquired in advance.
1887  *
1888  * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1889  * function:
1890  *
1891  * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1892  * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1893  * - .get_time_info - to retrieve audio time stamp if needed.
1894  *
1895  * Even if more than one periods have elapsed since the last call, you have to call this only once.
1896  */
1897 void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1898 {
1899 	struct snd_pcm_runtime *runtime;
1900 
1901 	if (PCM_RUNTIME_CHECK(substream))
1902 		return;
1903 	runtime = substream->runtime;
1904 
1905 	if (!snd_pcm_running(substream) ||
1906 	    snd_pcm_update_hw_ptr0(substream, 1) < 0)
1907 		goto _end;
1908 
1909 #ifdef CONFIG_SND_PCM_TIMER
1910 	if (substream->timer_running)
1911 		snd_timer_interrupt(substream->timer, 1);
1912 #endif
1913  _end:
1914 	snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1915 }
1916 EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1917 
1918 /**
1919  * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1920  *			      PCM substream.
1921  * @substream: the instance of PCM substream.
1922  *
1923  * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1924  * acquiring lock of PCM substream voluntarily.
1925  *
1926  * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1927  * the batch of audio data frames as the same size as the period of buffer is already processed in
1928  * audio data transmission.
1929  */
1930 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1931 {
1932 	if (snd_BUG_ON(!substream))
1933 		return;
1934 
1935 	guard(pcm_stream_lock_irqsave)(substream);
1936 	snd_pcm_period_elapsed_under_stream_lock(substream);
1937 }
1938 EXPORT_SYMBOL(snd_pcm_period_elapsed);
1939 
1940 /*
1941  * Wait until avail_min data becomes available
1942  * Returns a negative error code if any error occurs during operation.
1943  * The available space is stored on availp.  When err = 0 and avail = 0
1944  * on the capture stream, it indicates the stream is in DRAINING state.
1945  */
1946 static int wait_for_avail(struct snd_pcm_substream *substream,
1947 			      snd_pcm_uframes_t *availp)
1948 {
1949 	struct snd_pcm_runtime *runtime = substream->runtime;
1950 	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1951 	wait_queue_entry_t wait;
1952 	int err = 0;
1953 	snd_pcm_uframes_t avail = 0;
1954 	long wait_time, tout;
1955 
1956 	init_waitqueue_entry(&wait, current);
1957 	set_current_state(TASK_INTERRUPTIBLE);
1958 	add_wait_queue(&runtime->tsleep, &wait);
1959 
1960 	if (runtime->no_period_wakeup)
1961 		wait_time = MAX_SCHEDULE_TIMEOUT;
1962 	else {
1963 		/* use wait time from substream if available */
1964 		if (substream->wait_time) {
1965 			wait_time = substream->wait_time;
1966 		} else {
1967 			wait_time = 100;
1968 
1969 			if (runtime->rate) {
1970 				long t = runtime->buffer_size * 1100 / runtime->rate;
1971 				wait_time = max(t, wait_time);
1972 			}
1973 		}
1974 		wait_time = msecs_to_jiffies(wait_time);
1975 	}
1976 
1977 	for (;;) {
1978 		if (signal_pending(current)) {
1979 			err = -ERESTARTSYS;
1980 			break;
1981 		}
1982 
1983 		/*
1984 		 * We need to check if space became available already
1985 		 * (and thus the wakeup happened already) first to close
1986 		 * the race of space already having become available.
1987 		 * This check must happen after been added to the waitqueue
1988 		 * and having current state be INTERRUPTIBLE.
1989 		 */
1990 		avail = snd_pcm_avail(substream);
1991 		if (avail >= runtime->twake)
1992 			break;
1993 		snd_pcm_stream_unlock_irq(substream);
1994 
1995 		tout = schedule_timeout(wait_time);
1996 
1997 		snd_pcm_stream_lock_irq(substream);
1998 		set_current_state(TASK_INTERRUPTIBLE);
1999 		switch (runtime->state) {
2000 		case SNDRV_PCM_STATE_SUSPENDED:
2001 			err = -ESTRPIPE;
2002 			goto _endloop;
2003 		case SNDRV_PCM_STATE_XRUN:
2004 			err = -EPIPE;
2005 			goto _endloop;
2006 		case SNDRV_PCM_STATE_DRAINING:
2007 			if (is_playback)
2008 				err = -EPIPE;
2009 			else
2010 				avail = 0; /* indicate draining */
2011 			goto _endloop;
2012 		case SNDRV_PCM_STATE_OPEN:
2013 		case SNDRV_PCM_STATE_SETUP:
2014 		case SNDRV_PCM_STATE_DISCONNECTED:
2015 			err = -EBADFD;
2016 			goto _endloop;
2017 		case SNDRV_PCM_STATE_PAUSED:
2018 			continue;
2019 		}
2020 		if (!tout) {
2021 			pcm_dbg(substream->pcm,
2022 				"%s timeout (DMA or IRQ trouble?)\n",
2023 				is_playback ? "playback write" : "capture read");
2024 			err = -EIO;
2025 			break;
2026 		}
2027 	}
2028  _endloop:
2029 	set_current_state(TASK_RUNNING);
2030 	remove_wait_queue(&runtime->tsleep, &wait);
2031 	*availp = avail;
2032 	return err;
2033 }
2034 
2035 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
2036 			      int channel, unsigned long hwoff,
2037 			      struct iov_iter *iter, unsigned long bytes);
2038 
2039 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
2040 			  snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f,
2041 			  bool);
2042 
2043 /* calculate the target DMA-buffer position to be written/read */
2044 static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
2045 			   int channel, unsigned long hwoff)
2046 {
2047 	return runtime->dma_area + hwoff +
2048 		channel * (runtime->dma_bytes / runtime->channels);
2049 }
2050 
2051 /* default copy ops for write; used for both interleaved and non- modes */
2052 static int default_write_copy(struct snd_pcm_substream *substream,
2053 			      int channel, unsigned long hwoff,
2054 			      struct iov_iter *iter, unsigned long bytes)
2055 {
2056 	if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2057 			   bytes, iter) != bytes)
2058 		return -EFAULT;
2059 	return 0;
2060 }
2061 
2062 /* fill silence instead of copy data; called as a transfer helper
2063  * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
2064  * a NULL buffer is passed
2065  */
2066 static int fill_silence(struct snd_pcm_substream *substream, int channel,
2067 			unsigned long hwoff, struct iov_iter *iter,
2068 			unsigned long bytes)
2069 {
2070 	struct snd_pcm_runtime *runtime = substream->runtime;
2071 
2072 	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
2073 		return 0;
2074 	if (substream->ops->fill_silence)
2075 		return substream->ops->fill_silence(substream, channel,
2076 						    hwoff, bytes);
2077 
2078 	snd_pcm_format_set_silence(runtime->format,
2079 				   get_dma_ptr(runtime, channel, hwoff),
2080 				   bytes_to_samples(runtime, bytes));
2081 	return 0;
2082 }
2083 
2084 /* default copy ops for read; used for both interleaved and non- modes */
2085 static int default_read_copy(struct snd_pcm_substream *substream,
2086 			     int channel, unsigned long hwoff,
2087 			     struct iov_iter *iter, unsigned long bytes)
2088 {
2089 	if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2090 			 bytes, iter) != bytes)
2091 		return -EFAULT;
2092 	return 0;
2093 }
2094 
2095 /* call transfer with the filled iov_iter */
2096 static int do_transfer(struct snd_pcm_substream *substream, int c,
2097 		       unsigned long hwoff, void *data, unsigned long bytes,
2098 		       pcm_transfer_f transfer, bool in_kernel)
2099 {
2100 	struct iov_iter iter;
2101 	int err, type;
2102 
2103 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2104 		type = ITER_SOURCE;
2105 	else
2106 		type = ITER_DEST;
2107 
2108 	if (in_kernel) {
2109 		struct kvec kvec = { data, bytes };
2110 
2111 		iov_iter_kvec(&iter, type, &kvec, 1, bytes);
2112 		return transfer(substream, c, hwoff, &iter, bytes);
2113 	}
2114 
2115 	err = import_ubuf(type, (__force void __user *)data, bytes, &iter);
2116 	if (err)
2117 		return err;
2118 	return transfer(substream, c, hwoff, &iter, bytes);
2119 }
2120 
2121 /* call transfer function with the converted pointers and sizes;
2122  * for interleaved mode, it's one shot for all samples
2123  */
2124 static int interleaved_copy(struct snd_pcm_substream *substream,
2125 			    snd_pcm_uframes_t hwoff, void *data,
2126 			    snd_pcm_uframes_t off,
2127 			    snd_pcm_uframes_t frames,
2128 			    pcm_transfer_f transfer,
2129 			    bool in_kernel)
2130 {
2131 	struct snd_pcm_runtime *runtime = substream->runtime;
2132 
2133 	/* convert to bytes */
2134 	hwoff = frames_to_bytes(runtime, hwoff);
2135 	off = frames_to_bytes(runtime, off);
2136 	frames = frames_to_bytes(runtime, frames);
2137 
2138 	return do_transfer(substream, 0, hwoff, data + off, frames, transfer,
2139 			   in_kernel);
2140 }
2141 
2142 /* call transfer function with the converted pointers and sizes for each
2143  * non-interleaved channel; when buffer is NULL, silencing instead of copying
2144  */
2145 static int noninterleaved_copy(struct snd_pcm_substream *substream,
2146 			       snd_pcm_uframes_t hwoff, void *data,
2147 			       snd_pcm_uframes_t off,
2148 			       snd_pcm_uframes_t frames,
2149 			       pcm_transfer_f transfer,
2150 			       bool in_kernel)
2151 {
2152 	struct snd_pcm_runtime *runtime = substream->runtime;
2153 	int channels = runtime->channels;
2154 	void **bufs = data;
2155 	int c, err;
2156 
2157 	/* convert to bytes; note that it's not frames_to_bytes() here.
2158 	 * in non-interleaved mode, we copy for each channel, thus
2159 	 * each copy is n_samples bytes x channels = whole frames.
2160 	 */
2161 	off = samples_to_bytes(runtime, off);
2162 	frames = samples_to_bytes(runtime, frames);
2163 	hwoff = samples_to_bytes(runtime, hwoff);
2164 	for (c = 0; c < channels; ++c, ++bufs) {
2165 		if (!data || !*bufs)
2166 			err = fill_silence(substream, c, hwoff, NULL, frames);
2167 		else
2168 			err = do_transfer(substream, c, hwoff, *bufs + off,
2169 					  frames, transfer, in_kernel);
2170 		if (err < 0)
2171 			return err;
2172 	}
2173 	return 0;
2174 }
2175 
2176 /* fill silence on the given buffer position;
2177  * called from snd_pcm_playback_silence()
2178  */
2179 static int fill_silence_frames(struct snd_pcm_substream *substream,
2180 			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2181 {
2182 	if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2183 	    substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2184 		return interleaved_copy(substream, off, NULL, 0, frames,
2185 					fill_silence, true);
2186 	else
2187 		return noninterleaved_copy(substream, off, NULL, 0, frames,
2188 					   fill_silence, true);
2189 }
2190 
2191 /* sanity-check for read/write methods */
2192 static int pcm_sanity_check(struct snd_pcm_substream *substream)
2193 {
2194 	struct snd_pcm_runtime *runtime;
2195 	if (PCM_RUNTIME_CHECK(substream))
2196 		return -ENXIO;
2197 	runtime = substream->runtime;
2198 	if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
2199 		return -EINVAL;
2200 	if (runtime->state == SNDRV_PCM_STATE_OPEN)
2201 		return -EBADFD;
2202 	return 0;
2203 }
2204 
2205 static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2206 {
2207 	switch (runtime->state) {
2208 	case SNDRV_PCM_STATE_PREPARED:
2209 	case SNDRV_PCM_STATE_RUNNING:
2210 	case SNDRV_PCM_STATE_PAUSED:
2211 		return 0;
2212 	case SNDRV_PCM_STATE_XRUN:
2213 		return -EPIPE;
2214 	case SNDRV_PCM_STATE_SUSPENDED:
2215 		return -ESTRPIPE;
2216 	default:
2217 		return -EBADFD;
2218 	}
2219 }
2220 
2221 /* update to the given appl_ptr and call ack callback if needed;
2222  * when an error is returned, take back to the original value
2223  */
2224 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2225 			   snd_pcm_uframes_t appl_ptr)
2226 {
2227 	struct snd_pcm_runtime *runtime = substream->runtime;
2228 	snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2229 	snd_pcm_sframes_t diff;
2230 	int ret;
2231 
2232 	if (old_appl_ptr == appl_ptr)
2233 		return 0;
2234 
2235 	if (appl_ptr >= runtime->boundary)
2236 		return -EINVAL;
2237 	/*
2238 	 * check if a rewind is requested by the application
2239 	 */
2240 	if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2241 		diff = appl_ptr - old_appl_ptr;
2242 		if (diff >= 0) {
2243 			if (diff > runtime->buffer_size)
2244 				return -EINVAL;
2245 		} else {
2246 			if (runtime->boundary + diff > runtime->buffer_size)
2247 				return -EINVAL;
2248 		}
2249 	}
2250 
2251 	runtime->control->appl_ptr = appl_ptr;
2252 	if (substream->ops->ack) {
2253 		ret = substream->ops->ack(substream);
2254 		if (ret < 0) {
2255 			runtime->control->appl_ptr = old_appl_ptr;
2256 			if (ret == -EPIPE)
2257 				__snd_pcm_xrun(substream);
2258 			return ret;
2259 		}
2260 	}
2261 
2262 	trace_applptr(substream, old_appl_ptr, appl_ptr);
2263 
2264 	return 0;
2265 }
2266 
2267 /* the common loop for read/write data */
2268 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2269 				     void *data, bool interleaved,
2270 				     snd_pcm_uframes_t size, bool in_kernel)
2271 {
2272 	struct snd_pcm_runtime *runtime = substream->runtime;
2273 	snd_pcm_uframes_t xfer = 0;
2274 	snd_pcm_uframes_t offset = 0;
2275 	snd_pcm_uframes_t avail;
2276 	pcm_copy_f writer;
2277 	pcm_transfer_f transfer;
2278 	bool nonblock;
2279 	bool is_playback;
2280 	int err;
2281 
2282 	err = pcm_sanity_check(substream);
2283 	if (err < 0)
2284 		return err;
2285 
2286 	is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2287 	if (interleaved) {
2288 		if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2289 		    runtime->channels > 1)
2290 			return -EINVAL;
2291 		writer = interleaved_copy;
2292 	} else {
2293 		if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2294 			return -EINVAL;
2295 		writer = noninterleaved_copy;
2296 	}
2297 
2298 	if (!data) {
2299 		if (is_playback)
2300 			transfer = fill_silence;
2301 		else
2302 			return -EINVAL;
2303 	} else {
2304 		if (substream->ops->copy)
2305 			transfer = substream->ops->copy;
2306 		else
2307 			transfer = is_playback ?
2308 				default_write_copy : default_read_copy;
2309 	}
2310 
2311 	if (size == 0)
2312 		return 0;
2313 
2314 	nonblock = !!(substream->f_flags & O_NONBLOCK);
2315 
2316 	snd_pcm_stream_lock_irq(substream);
2317 	err = pcm_accessible_state(runtime);
2318 	if (err < 0)
2319 		goto _end_unlock;
2320 
2321 	runtime->twake = runtime->control->avail_min ? : 1;
2322 	if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2323 		snd_pcm_update_hw_ptr(substream);
2324 
2325 	/*
2326 	 * If size < start_threshold, wait indefinitely. Another
2327 	 * thread may start capture
2328 	 */
2329 	if (!is_playback &&
2330 	    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2331 	    size >= runtime->start_threshold) {
2332 		err = snd_pcm_start(substream);
2333 		if (err < 0)
2334 			goto _end_unlock;
2335 	}
2336 
2337 	avail = snd_pcm_avail(substream);
2338 
2339 	while (size > 0) {
2340 		snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2341 		snd_pcm_uframes_t cont;
2342 		if (!avail) {
2343 			if (!is_playback &&
2344 			    runtime->state == SNDRV_PCM_STATE_DRAINING) {
2345 				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2346 				goto _end_unlock;
2347 			}
2348 			if (nonblock) {
2349 				err = -EAGAIN;
2350 				goto _end_unlock;
2351 			}
2352 			runtime->twake = min_t(snd_pcm_uframes_t, size,
2353 					runtime->control->avail_min ? : 1);
2354 			err = wait_for_avail(substream, &avail);
2355 			if (err < 0)
2356 				goto _end_unlock;
2357 			if (!avail)
2358 				continue; /* draining */
2359 		}
2360 		frames = size > avail ? avail : size;
2361 		appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2362 		appl_ofs = appl_ptr % runtime->buffer_size;
2363 		cont = runtime->buffer_size - appl_ofs;
2364 		if (frames > cont)
2365 			frames = cont;
2366 		if (snd_BUG_ON(!frames)) {
2367 			err = -EINVAL;
2368 			goto _end_unlock;
2369 		}
2370 		if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2371 			err = -EBUSY;
2372 			goto _end_unlock;
2373 		}
2374 		snd_pcm_stream_unlock_irq(substream);
2375 		if (!is_playback)
2376 			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2377 		err = writer(substream, appl_ofs, data, offset, frames,
2378 			     transfer, in_kernel);
2379 		if (is_playback)
2380 			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2381 		snd_pcm_stream_lock_irq(substream);
2382 		atomic_dec(&runtime->buffer_accessing);
2383 		if (err < 0)
2384 			goto _end_unlock;
2385 		err = pcm_accessible_state(runtime);
2386 		if (err < 0)
2387 			goto _end_unlock;
2388 		appl_ptr += frames;
2389 		if (appl_ptr >= runtime->boundary)
2390 			appl_ptr -= runtime->boundary;
2391 		err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2392 		if (err < 0)
2393 			goto _end_unlock;
2394 
2395 		offset += frames;
2396 		size -= frames;
2397 		xfer += frames;
2398 		avail -= frames;
2399 		if (is_playback &&
2400 		    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2401 		    snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2402 			err = snd_pcm_start(substream);
2403 			if (err < 0)
2404 				goto _end_unlock;
2405 		}
2406 	}
2407  _end_unlock:
2408 	runtime->twake = 0;
2409 	if (xfer > 0 && err >= 0)
2410 		snd_pcm_update_state(substream, runtime);
2411 	snd_pcm_stream_unlock_irq(substream);
2412 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2413 }
2414 EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2415 
2416 /*
2417  * standard channel mapping helpers
2418  */
2419 
2420 /* default channel maps for multi-channel playbacks, up to 8 channels */
2421 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2422 	{ .channels = 1,
2423 	  .map = { SNDRV_CHMAP_MONO } },
2424 	{ .channels = 2,
2425 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2426 	{ .channels = 4,
2427 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2428 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2429 	{ .channels = 6,
2430 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2431 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2432 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2433 	{ .channels = 8,
2434 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2435 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2436 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2437 		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2438 	{ }
2439 };
2440 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2441 
2442 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2443 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2444 	{ .channels = 1,
2445 	  .map = { SNDRV_CHMAP_MONO } },
2446 	{ .channels = 2,
2447 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2448 	{ .channels = 4,
2449 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2450 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2451 	{ .channels = 6,
2452 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2453 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2454 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2455 	{ .channels = 8,
2456 	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2457 		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2458 		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2459 		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2460 	{ }
2461 };
2462 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2463 
2464 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2465 {
2466 	if (ch > info->max_channels)
2467 		return false;
2468 	return !info->channel_mask || (info->channel_mask & (1U << ch));
2469 }
2470 
2471 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2472 			      struct snd_ctl_elem_info *uinfo)
2473 {
2474 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2475 
2476 	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2477 	uinfo->count = info->max_channels;
2478 	uinfo->value.integer.min = 0;
2479 	uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2480 	return 0;
2481 }
2482 
2483 /* get callback for channel map ctl element
2484  * stores the channel position firstly matching with the current channels
2485  */
2486 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2487 			     struct snd_ctl_elem_value *ucontrol)
2488 {
2489 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2490 	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2491 	struct snd_pcm_substream *substream;
2492 	const struct snd_pcm_chmap_elem *map;
2493 
2494 	if (!info->chmap)
2495 		return -EINVAL;
2496 	substream = snd_pcm_chmap_substream(info, idx);
2497 	if (!substream)
2498 		return -ENODEV;
2499 	memset(ucontrol->value.integer.value, 0,
2500 	       sizeof(long) * info->max_channels);
2501 	if (!substream->runtime)
2502 		return 0; /* no channels set */
2503 	for (map = info->chmap; map->channels; map++) {
2504 		int i;
2505 		if (map->channels == substream->runtime->channels &&
2506 		    valid_chmap_channels(info, map->channels)) {
2507 			for (i = 0; i < map->channels; i++)
2508 				ucontrol->value.integer.value[i] = map->map[i];
2509 			return 0;
2510 		}
2511 	}
2512 	return -EINVAL;
2513 }
2514 
2515 /* tlv callback for channel map ctl element
2516  * expands the pre-defined channel maps in a form of TLV
2517  */
2518 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2519 			     unsigned int size, unsigned int __user *tlv)
2520 {
2521 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2522 	const struct snd_pcm_chmap_elem *map;
2523 	unsigned int __user *dst;
2524 	int c, count = 0;
2525 
2526 	if (!info->chmap)
2527 		return -EINVAL;
2528 	if (size < 8)
2529 		return -ENOMEM;
2530 	if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2531 		return -EFAULT;
2532 	size -= 8;
2533 	dst = tlv + 2;
2534 	for (map = info->chmap; map->channels; map++) {
2535 		int chs_bytes = map->channels * 4;
2536 		if (!valid_chmap_channels(info, map->channels))
2537 			continue;
2538 		if (size < 8)
2539 			return -ENOMEM;
2540 		if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2541 		    put_user(chs_bytes, dst + 1))
2542 			return -EFAULT;
2543 		dst += 2;
2544 		size -= 8;
2545 		count += 8;
2546 		if (size < chs_bytes)
2547 			return -ENOMEM;
2548 		size -= chs_bytes;
2549 		count += chs_bytes;
2550 		for (c = 0; c < map->channels; c++) {
2551 			if (put_user(map->map[c], dst))
2552 				return -EFAULT;
2553 			dst++;
2554 		}
2555 	}
2556 	if (put_user(count, tlv + 1))
2557 		return -EFAULT;
2558 	return 0;
2559 }
2560 
2561 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2562 {
2563 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2564 	info->pcm->streams[info->stream].chmap_kctl = NULL;
2565 	kfree(info);
2566 }
2567 
2568 /**
2569  * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2570  * @pcm: the assigned PCM instance
2571  * @stream: stream direction
2572  * @chmap: channel map elements (for query)
2573  * @max_channels: the max number of channels for the stream
2574  * @private_value: the value passed to each kcontrol's private_value field
2575  * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2576  *
2577  * Create channel-mapping control elements assigned to the given PCM stream(s).
2578  * Return: Zero if successful, or a negative error value.
2579  */
2580 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2581 			   const struct snd_pcm_chmap_elem *chmap,
2582 			   int max_channels,
2583 			   unsigned long private_value,
2584 			   struct snd_pcm_chmap **info_ret)
2585 {
2586 	struct snd_pcm_chmap *info;
2587 	struct snd_kcontrol_new knew = {
2588 		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
2589 		.access = SNDRV_CTL_ELEM_ACCESS_READ |
2590 			SNDRV_CTL_ELEM_ACCESS_VOLATILE |
2591 			SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2592 			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2593 		.info = pcm_chmap_ctl_info,
2594 		.get = pcm_chmap_ctl_get,
2595 		.tlv.c = pcm_chmap_ctl_tlv,
2596 	};
2597 	int err;
2598 
2599 	if (WARN_ON(pcm->streams[stream].chmap_kctl))
2600 		return -EBUSY;
2601 	info = kzalloc(sizeof(*info), GFP_KERNEL);
2602 	if (!info)
2603 		return -ENOMEM;
2604 	info->pcm = pcm;
2605 	info->stream = stream;
2606 	info->chmap = chmap;
2607 	info->max_channels = max_channels;
2608 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2609 		knew.name = "Playback Channel Map";
2610 	else
2611 		knew.name = "Capture Channel Map";
2612 	knew.device = pcm->device;
2613 	knew.count = pcm->streams[stream].substream_count;
2614 	knew.private_value = private_value;
2615 	info->kctl = snd_ctl_new1(&knew, info);
2616 	if (!info->kctl) {
2617 		kfree(info);
2618 		return -ENOMEM;
2619 	}
2620 	info->kctl->private_free = pcm_chmap_ctl_private_free;
2621 	err = snd_ctl_add(pcm->card, info->kctl);
2622 	if (err < 0)
2623 		return err;
2624 	pcm->streams[stream].chmap_kctl = info->kctl;
2625 	if (info_ret)
2626 		*info_ret = info;
2627 	return 0;
2628 }
2629 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
2630