xref: /linux/sound/core/pcm_native.c (revision 05a54fa773284d1a7923cdfdd8f0c8dabb98bd26)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Digital Audio (PCM) abstract layer
4  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5  */
6 
7 #include <linux/compat.h>
8 #include <linux/mm.h>
9 #include <linux/module.h>
10 #include <linux/file.h>
11 #include <linux/slab.h>
12 #include <linux/sched/signal.h>
13 #include <linux/time.h>
14 #include <linux/pm_qos.h>
15 #include <linux/io.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/vmalloc.h>
18 #include <sound/core.h>
19 #include <sound/control.h>
20 #include <sound/info.h>
21 #include <sound/pcm.h>
22 #include <sound/pcm_params.h>
23 #include <sound/timer.h>
24 #include <sound/minors.h>
25 #include <linux/uio.h>
26 #include <linux/delay.h>
27 #include <linux/bitops.h>
28 
29 #include "pcm_local.h"
30 
31 #ifdef CONFIG_SND_DEBUG
32 #define CREATE_TRACE_POINTS
33 #include "pcm_param_trace.h"
34 #else
35 #define trace_hw_mask_param_enabled()		0
36 #define trace_hw_interval_param_enabled()	0
37 #define trace_hw_mask_param(substream, type, index, prev, curr)
38 #define trace_hw_interval_param(substream, type, index, prev, curr)
39 #endif
40 
41 /*
42  *  Compatibility
43  */
44 
45 struct snd_pcm_hw_params_old {
46 	unsigned int flags;
47 	unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
48 			   SNDRV_PCM_HW_PARAM_ACCESS + 1];
49 	struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
50 					SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
51 	unsigned int rmask;
52 	unsigned int cmask;
53 	unsigned int info;
54 	unsigned int msbits;
55 	unsigned int rate_num;
56 	unsigned int rate_den;
57 	snd_pcm_uframes_t fifo_size;
58 	unsigned char reserved[64];
59 };
60 
61 #ifdef CONFIG_SND_SUPPORT_OLD_API
62 #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
63 #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
64 
65 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
66 				      struct snd_pcm_hw_params_old __user * _oparams);
67 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
68 				      struct snd_pcm_hw_params_old __user * _oparams);
69 #endif
70 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
71 
72 /*
73  *
74  */
75 
76 static DECLARE_RWSEM(snd_pcm_link_rwsem);
77 
78 void snd_pcm_group_init(struct snd_pcm_group *group)
79 {
80 	spin_lock_init(&group->lock);
81 	mutex_init(&group->mutex);
82 	INIT_LIST_HEAD(&group->substreams);
83 	refcount_set(&group->refs, 1);
84 }
85 
86 /* define group lock helpers */
87 #define DEFINE_PCM_GROUP_LOCK(action, bh_lock, bh_unlock, mutex_action) \
88 static void snd_pcm_group_ ## action(struct snd_pcm_group *group, bool nonatomic) \
89 { \
90 	if (nonatomic) { \
91 		mutex_ ## mutex_action(&group->mutex); \
92 	} else { \
93 		if (IS_ENABLED(CONFIG_PREEMPT_RT) && bh_lock)   \
94 			local_bh_disable();			\
95 		spin_ ## action(&group->lock);			\
96 		if (IS_ENABLED(CONFIG_PREEMPT_RT) && bh_unlock) \
97 			local_bh_enable();                      \
98 	}							\
99 }
100 
101 DEFINE_PCM_GROUP_LOCK(lock, false, false, lock);
102 DEFINE_PCM_GROUP_LOCK(unlock, false, false, unlock);
103 DEFINE_PCM_GROUP_LOCK(lock_irq, true, false, lock);
104 DEFINE_PCM_GROUP_LOCK(unlock_irq, false, true, unlock);
105 
106 /**
107  * snd_pcm_stream_lock - Lock the PCM stream
108  * @substream: PCM substream
109  *
110  * This locks the PCM stream's spinlock or mutex depending on the nonatomic
111  * flag of the given substream.  This also takes the global link rw lock
112  * (or rw sem), too, for avoiding the race with linked streams.
113  */
114 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
115 {
116 	snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
117 }
118 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
119 
120 /**
121  * snd_pcm_stream_unlock - Unlock the PCM stream
122  * @substream: PCM substream
123  *
124  * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
125  */
126 void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
127 {
128 	snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
129 }
130 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
131 
132 /**
133  * snd_pcm_stream_lock_irq - Lock the PCM stream
134  * @substream: PCM substream
135  *
136  * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
137  * IRQ (only when nonatomic is false).  In nonatomic case, this is identical
138  * as snd_pcm_stream_lock().
139  */
140 void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
141 {
142 	snd_pcm_group_lock_irq(&substream->self_group,
143 			       substream->pcm->nonatomic);
144 }
145 EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
146 
147 static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream)
148 {
149 	struct snd_pcm_group *group = &substream->self_group;
150 
151 	if (substream->pcm->nonatomic)
152 		mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING);
153 	else
154 		spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
155 }
156 
157 /**
158  * snd_pcm_stream_unlock_irq - Unlock the PCM stream
159  * @substream: PCM substream
160  *
161  * This is a counter-part of snd_pcm_stream_lock_irq().
162  */
163 void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
164 {
165 	snd_pcm_group_unlock_irq(&substream->self_group,
166 				 substream->pcm->nonatomic);
167 }
168 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
169 
170 unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
171 {
172 	unsigned long flags = 0;
173 	if (substream->pcm->nonatomic)
174 		mutex_lock(&substream->self_group.mutex);
175 	else
176 		spin_lock_irqsave(&substream->self_group.lock, flags);
177 	return flags;
178 }
179 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
180 
181 unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream)
182 {
183 	unsigned long flags = 0;
184 	if (substream->pcm->nonatomic)
185 		mutex_lock_nested(&substream->self_group.mutex,
186 				  SINGLE_DEPTH_NESTING);
187 	else
188 		spin_lock_irqsave_nested(&substream->self_group.lock, flags,
189 					 SINGLE_DEPTH_NESTING);
190 	return flags;
191 }
192 EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave_nested);
193 
194 /**
195  * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
196  * @substream: PCM substream
197  * @flags: irq flags
198  *
199  * This is a counter-part of snd_pcm_stream_lock_irqsave().
200  */
201 void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
202 				      unsigned long flags)
203 {
204 	if (substream->pcm->nonatomic)
205 		mutex_unlock(&substream->self_group.mutex);
206 	else
207 		spin_unlock_irqrestore(&substream->self_group.lock, flags);
208 }
209 EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
210 
211 /* Run PCM ioctl ops */
212 static int snd_pcm_ops_ioctl(struct snd_pcm_substream *substream,
213 			     unsigned cmd, void *arg)
214 {
215 	if (substream->ops->ioctl)
216 		return substream->ops->ioctl(substream, cmd, arg);
217 	else
218 		return snd_pcm_lib_ioctl(substream, cmd, arg);
219 }
220 
221 int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
222 {
223 	struct snd_pcm *pcm = substream->pcm;
224 	struct snd_pcm_str *pstr = substream->pstr;
225 
226 	memset(info, 0, sizeof(*info));
227 	info->card = pcm->card->number;
228 	info->device = pcm->device;
229 	info->stream = substream->stream;
230 	info->subdevice = substream->number;
231 	strscpy(info->id, pcm->id, sizeof(info->id));
232 	strscpy(info->name, pcm->name, sizeof(info->name));
233 	info->dev_class = pcm->dev_class;
234 	info->dev_subclass = pcm->dev_subclass;
235 	info->subdevices_count = pstr->substream_count;
236 	info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
237 	strscpy(info->subname, substream->name, sizeof(info->subname));
238 
239 	return 0;
240 }
241 
242 int snd_pcm_info_user(struct snd_pcm_substream *substream,
243 		      struct snd_pcm_info __user * _info)
244 {
245 	struct snd_pcm_info *info __free(kfree) = NULL;
246 	int err;
247 
248 	info = kmalloc(sizeof(*info), GFP_KERNEL);
249 	if (! info)
250 		return -ENOMEM;
251 	err = snd_pcm_info(substream, info);
252 	if (err >= 0) {
253 		if (copy_to_user(_info, info, sizeof(*info)))
254 			err = -EFAULT;
255 	}
256 	return err;
257 }
258 
259 /* macro for simplified cast */
260 #define PARAM_MASK_BIT(b)	(1U << (__force int)(b))
261 
262 static bool hw_support_mmap(struct snd_pcm_substream *substream)
263 {
264 	struct snd_dma_buffer *dmabuf;
265 
266 	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
267 		return false;
268 
269 	if (substream->ops->mmap || substream->ops->page)
270 		return true;
271 
272 	dmabuf = snd_pcm_get_dma_buf(substream);
273 	if (!dmabuf)
274 		dmabuf = &substream->dma_buffer;
275 	switch (dmabuf->dev.type) {
276 	case SNDRV_DMA_TYPE_UNKNOWN:
277 		/* we can't know the device, so just assume that the driver does
278 		 * everything right
279 		 */
280 		return true;
281 	case SNDRV_DMA_TYPE_CONTINUOUS:
282 	case SNDRV_DMA_TYPE_VMALLOC:
283 		return true;
284 	default:
285 		return dma_can_mmap(dmabuf->dev.dev);
286 	}
287 }
288 
289 static int constrain_mask_params(struct snd_pcm_substream *substream,
290 				 struct snd_pcm_hw_params *params)
291 {
292 	struct snd_pcm_hw_constraints *constrs =
293 					&substream->runtime->hw_constraints;
294 	struct snd_mask *m;
295 	unsigned int k;
296 	struct snd_mask old_mask __maybe_unused;
297 	int changed;
298 
299 	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
300 		m = hw_param_mask(params, k);
301 		if (snd_mask_empty(m))
302 			return -EINVAL;
303 
304 		/* This parameter is not requested to change by a caller. */
305 		if (!(params->rmask & PARAM_MASK_BIT(k)))
306 			continue;
307 
308 		if (trace_hw_mask_param_enabled())
309 			old_mask = *m;
310 
311 		changed = snd_mask_refine(m, constrs_mask(constrs, k));
312 		if (changed < 0)
313 			return changed;
314 		if (changed == 0)
315 			continue;
316 
317 		/* Set corresponding flag so that the caller gets it. */
318 		trace_hw_mask_param(substream, k, 0, &old_mask, m);
319 		params->cmask |= PARAM_MASK_BIT(k);
320 	}
321 
322 	return 0;
323 }
324 
325 static int constrain_interval_params(struct snd_pcm_substream *substream,
326 				     struct snd_pcm_hw_params *params)
327 {
328 	struct snd_pcm_hw_constraints *constrs =
329 					&substream->runtime->hw_constraints;
330 	struct snd_interval *i;
331 	unsigned int k;
332 	struct snd_interval old_interval __maybe_unused;
333 	int changed;
334 
335 	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
336 		i = hw_param_interval(params, k);
337 		if (snd_interval_empty(i))
338 			return -EINVAL;
339 
340 		/* This parameter is not requested to change by a caller. */
341 		if (!(params->rmask & PARAM_MASK_BIT(k)))
342 			continue;
343 
344 		if (trace_hw_interval_param_enabled())
345 			old_interval = *i;
346 
347 		changed = snd_interval_refine(i, constrs_interval(constrs, k));
348 		if (changed < 0)
349 			return changed;
350 		if (changed == 0)
351 			continue;
352 
353 		/* Set corresponding flag so that the caller gets it. */
354 		trace_hw_interval_param(substream, k, 0, &old_interval, i);
355 		params->cmask |= PARAM_MASK_BIT(k);
356 	}
357 
358 	return 0;
359 }
360 
361 static int constrain_params_by_rules(struct snd_pcm_substream *substream,
362 				     struct snd_pcm_hw_params *params)
363 {
364 	struct snd_pcm_hw_constraints *constrs =
365 					&substream->runtime->hw_constraints;
366 	unsigned int k;
367 	unsigned int *rstamps __free(kfree) = NULL;
368 	unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
369 	unsigned int stamp;
370 	struct snd_pcm_hw_rule *r;
371 	unsigned int d;
372 	struct snd_mask old_mask __maybe_unused;
373 	struct snd_interval old_interval __maybe_unused;
374 	bool again;
375 	int changed, err = 0;
376 
377 	/*
378 	 * Each application of rule has own sequence number.
379 	 *
380 	 * Each member of 'rstamps' array represents the sequence number of
381 	 * recent application of corresponding rule.
382 	 */
383 	rstamps = kcalloc(constrs->rules_num, sizeof(unsigned int), GFP_KERNEL);
384 	if (!rstamps)
385 		return -ENOMEM;
386 
387 	/*
388 	 * Each member of 'vstamps' array represents the sequence number of
389 	 * recent application of rule in which corresponding parameters were
390 	 * changed.
391 	 *
392 	 * In initial state, elements corresponding to parameters requested by
393 	 * a caller is 1. For unrequested parameters, corresponding members
394 	 * have 0 so that the parameters are never changed anymore.
395 	 */
396 	for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
397 		vstamps[k] = (params->rmask & PARAM_MASK_BIT(k)) ? 1 : 0;
398 
399 	/* Due to the above design, actual sequence number starts at 2. */
400 	stamp = 2;
401 retry:
402 	/* Apply all rules in order. */
403 	again = false;
404 	for (k = 0; k < constrs->rules_num; k++) {
405 		r = &constrs->rules[k];
406 
407 		/*
408 		 * Check condition bits of this rule. When the rule has
409 		 * some condition bits, parameter without the bits is
410 		 * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
411 		 * is an example of the condition bits.
412 		 */
413 		if (r->cond && !(r->cond & params->flags))
414 			continue;
415 
416 		/*
417 		 * The 'deps' array includes maximum four dependencies
418 		 * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fifth
419 		 * member of this array is a sentinel and should be
420 		 * negative value.
421 		 *
422 		 * This rule should be processed in this time when dependent
423 		 * parameters were changed at former applications of the other
424 		 * rules.
425 		 */
426 		for (d = 0; r->deps[d] >= 0; d++) {
427 			if (vstamps[r->deps[d]] > rstamps[k])
428 				break;
429 		}
430 		if (r->deps[d] < 0)
431 			continue;
432 
433 		if (trace_hw_mask_param_enabled()) {
434 			if (hw_is_mask(r->var))
435 				old_mask = *hw_param_mask(params, r->var);
436 		}
437 		if (trace_hw_interval_param_enabled()) {
438 			if (hw_is_interval(r->var))
439 				old_interval = *hw_param_interval(params, r->var);
440 		}
441 
442 		changed = r->func(params, r);
443 		if (changed < 0)
444 			return changed;
445 
446 		/*
447 		 * When the parameter is changed, notify it to the caller
448 		 * by corresponding returned bit, then preparing for next
449 		 * iteration.
450 		 */
451 		if (changed && r->var >= 0) {
452 			if (hw_is_mask(r->var)) {
453 				trace_hw_mask_param(substream, r->var,
454 					k + 1, &old_mask,
455 					hw_param_mask(params, r->var));
456 			}
457 			if (hw_is_interval(r->var)) {
458 				trace_hw_interval_param(substream, r->var,
459 					k + 1, &old_interval,
460 					hw_param_interval(params, r->var));
461 			}
462 
463 			params->cmask |= PARAM_MASK_BIT(r->var);
464 			vstamps[r->var] = stamp;
465 			again = true;
466 		}
467 
468 		rstamps[k] = stamp++;
469 	}
470 
471 	/* Iterate to evaluate all rules till no parameters are changed. */
472 	if (again)
473 		goto retry;
474 
475 	return err;
476 }
477 
478 static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
479 				     struct snd_pcm_hw_params *params)
480 {
481 	const struct snd_interval *i;
482 	const struct snd_mask *m;
483 	struct snd_mask *m_rw;
484 	int err;
485 
486 	if (!params->msbits) {
487 		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
488 		if (snd_interval_single(i))
489 			params->msbits = snd_interval_value(i);
490 		m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
491 		if (snd_mask_single(m)) {
492 			snd_pcm_format_t format = (__force snd_pcm_format_t)snd_mask_min(m);
493 			params->msbits = snd_pcm_format_width(format);
494 		}
495 	}
496 
497 	if (params->msbits) {
498 		m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
499 		if (snd_mask_single(m)) {
500 			snd_pcm_format_t format = (__force snd_pcm_format_t)snd_mask_min(m);
501 
502 			if (snd_pcm_format_linear(format) &&
503 			    snd_pcm_format_width(format) != params->msbits) {
504 				m_rw = hw_param_mask(params, SNDRV_PCM_HW_PARAM_SUBFORMAT);
505 				snd_mask_reset(m_rw,
506 					       (__force unsigned)SNDRV_PCM_SUBFORMAT_MSBITS_MAX);
507 				if (snd_mask_empty(m_rw))
508 					return -EINVAL;
509 			}
510 		}
511 	}
512 
513 	if (!params->rate_den) {
514 		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
515 		if (snd_interval_single(i)) {
516 			params->rate_num = snd_interval_value(i);
517 			params->rate_den = 1;
518 		}
519 	}
520 
521 	if (!params->fifo_size) {
522 		m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
523 		i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
524 		if (snd_mask_single(m) && snd_interval_single(i)) {
525 			err = snd_pcm_ops_ioctl(substream,
526 						SNDRV_PCM_IOCTL1_FIFO_SIZE,
527 						params);
528 			if (err < 0)
529 				return err;
530 		}
531 	}
532 
533 	if (!params->info) {
534 		params->info = substream->runtime->hw.info;
535 		params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
536 				  SNDRV_PCM_INFO_DRAIN_TRIGGER);
537 		if (!hw_support_mmap(substream))
538 			params->info &= ~(SNDRV_PCM_INFO_MMAP |
539 					  SNDRV_PCM_INFO_MMAP_VALID);
540 	}
541 
542 	err = snd_pcm_ops_ioctl(substream,
543 				SNDRV_PCM_IOCTL1_SYNC_ID,
544 				params);
545 	if (err < 0)
546 		return err;
547 
548 	return 0;
549 }
550 
551 int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
552 		      struct snd_pcm_hw_params *params)
553 {
554 	int err;
555 
556 	params->info = 0;
557 	params->fifo_size = 0;
558 	if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
559 		params->msbits = 0;
560 	if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_RATE)) {
561 		params->rate_num = 0;
562 		params->rate_den = 0;
563 	}
564 
565 	err = constrain_mask_params(substream, params);
566 	if (err < 0)
567 		return err;
568 
569 	err = constrain_interval_params(substream, params);
570 	if (err < 0)
571 		return err;
572 
573 	err = constrain_params_by_rules(substream, params);
574 	if (err < 0)
575 		return err;
576 
577 	params->rmask = 0;
578 
579 	return 0;
580 }
581 EXPORT_SYMBOL(snd_pcm_hw_refine);
582 
583 static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
584 				  struct snd_pcm_hw_params __user * _params)
585 {
586 	struct snd_pcm_hw_params *params __free(kfree) = NULL;
587 	int err;
588 
589 	params = memdup_user(_params, sizeof(*params));
590 	if (IS_ERR(params))
591 		return PTR_ERR(params);
592 
593 	err = snd_pcm_hw_refine(substream, params);
594 	if (err < 0)
595 		return err;
596 
597 	err = fixup_unreferenced_params(substream, params);
598 	if (err < 0)
599 		return err;
600 
601 	if (copy_to_user(_params, params, sizeof(*params)))
602 		return -EFAULT;
603 	return 0;
604 }
605 
606 static int period_to_usecs(struct snd_pcm_runtime *runtime)
607 {
608 	int usecs;
609 
610 	if (! runtime->rate)
611 		return -1; /* invalid */
612 
613 	/* take 75% of period time as the deadline */
614 	usecs = (750000 / runtime->rate) * runtime->period_size;
615 	usecs += ((750000 % runtime->rate) * runtime->period_size) /
616 		runtime->rate;
617 
618 	return usecs;
619 }
620 
621 static void snd_pcm_set_state(struct snd_pcm_substream *substream,
622 			      snd_pcm_state_t state)
623 {
624 	guard(pcm_stream_lock_irq)(substream);
625 	if (substream->runtime->state != SNDRV_PCM_STATE_DISCONNECTED)
626 		__snd_pcm_set_state(substream->runtime, state);
627 }
628 
629 static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
630 					int event)
631 {
632 #ifdef CONFIG_SND_PCM_TIMER
633 	if (substream->timer)
634 		snd_timer_notify(substream->timer, event,
635 					&substream->runtime->trigger_tstamp);
636 #endif
637 }
638 
639 void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq)
640 {
641 	if (substream->runtime && substream->runtime->stop_operating) {
642 		substream->runtime->stop_operating = false;
643 		if (substream->ops && substream->ops->sync_stop)
644 			substream->ops->sync_stop(substream);
645 		else if (sync_irq && substream->pcm->card->sync_irq > 0)
646 			synchronize_irq(substream->pcm->card->sync_irq);
647 	}
648 }
649 
650 /**
651  * snd_pcm_hw_params_choose - choose a configuration defined by @params
652  * @pcm: PCM instance
653  * @params: the hw_params instance
654  *
655  * Choose one configuration from configuration space defined by @params.
656  * The configuration chosen is that obtained fixing in this order:
657  * first access, first format, first subformat, min channels,
658  * min rate, min period time, max buffer size, min tick time
659  *
660  * Return: Zero if successful, or a negative error code on failure.
661  */
662 static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
663 				    struct snd_pcm_hw_params *params)
664 {
665 	static const int vars[] = {
666 		SNDRV_PCM_HW_PARAM_ACCESS,
667 		SNDRV_PCM_HW_PARAM_FORMAT,
668 		SNDRV_PCM_HW_PARAM_SUBFORMAT,
669 		SNDRV_PCM_HW_PARAM_CHANNELS,
670 		SNDRV_PCM_HW_PARAM_RATE,
671 		SNDRV_PCM_HW_PARAM_PERIOD_TIME,
672 		SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
673 		SNDRV_PCM_HW_PARAM_TICK_TIME,
674 		-1
675 	};
676 	const int *v;
677 	struct snd_mask old_mask __maybe_unused;
678 	struct snd_interval old_interval __maybe_unused;
679 	int changed;
680 
681 	for (v = vars; *v != -1; v++) {
682 		/* Keep old parameter to trace. */
683 		if (trace_hw_mask_param_enabled()) {
684 			if (hw_is_mask(*v))
685 				old_mask = *hw_param_mask(params, *v);
686 		}
687 		if (trace_hw_interval_param_enabled()) {
688 			if (hw_is_interval(*v))
689 				old_interval = *hw_param_interval(params, *v);
690 		}
691 		if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
692 			changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
693 		else
694 			changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
695 		if (changed < 0)
696 			return changed;
697 		if (changed == 0)
698 			continue;
699 
700 		/* Trace the changed parameter. */
701 		if (hw_is_mask(*v)) {
702 			trace_hw_mask_param(pcm, *v, 0, &old_mask,
703 					    hw_param_mask(params, *v));
704 		}
705 		if (hw_is_interval(*v)) {
706 			trace_hw_interval_param(pcm, *v, 0, &old_interval,
707 						hw_param_interval(params, *v));
708 		}
709 	}
710 
711 	return 0;
712 }
713 
714 /* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
715  * block the further r/w operations
716  */
717 static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
718 {
719 	if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
720 		return -EBUSY;
721 	mutex_lock(&runtime->buffer_mutex);
722 	return 0; /* keep buffer_mutex, unlocked by below */
723 }
724 
725 /* release buffer_mutex and clear r/w access flag */
726 static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
727 {
728 	mutex_unlock(&runtime->buffer_mutex);
729 	atomic_inc(&runtime->buffer_accessing);
730 }
731 
732 /* fill the PCM buffer with the current silence format; called from pcm_oss.c */
733 void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime)
734 {
735 	snd_pcm_buffer_access_lock(runtime);
736 	if (runtime->dma_area)
737 		snd_pcm_format_set_silence(runtime->format, runtime->dma_area,
738 					   bytes_to_samples(runtime, runtime->dma_bytes));
739 	snd_pcm_buffer_access_unlock(runtime);
740 }
741 EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence);
742 
743 #if IS_ENABLED(CONFIG_SND_PCM_OSS)
744 #define is_oss_stream(substream)	((substream)->oss.oss)
745 #else
746 #define is_oss_stream(substream)	false
747 #endif
748 
749 static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
750 			     struct snd_pcm_hw_params *params)
751 {
752 	struct snd_pcm_runtime *runtime;
753 	int err, usecs;
754 	unsigned int bits;
755 	snd_pcm_uframes_t frames;
756 
757 	if (PCM_RUNTIME_CHECK(substream))
758 		return -ENXIO;
759 	runtime = substream->runtime;
760 	err = snd_pcm_buffer_access_lock(runtime);
761 	if (err < 0)
762 		return err;
763 	scoped_guard(pcm_stream_lock_irq, substream) {
764 		switch (runtime->state) {
765 		case SNDRV_PCM_STATE_OPEN:
766 		case SNDRV_PCM_STATE_SETUP:
767 		case SNDRV_PCM_STATE_PREPARED:
768 			if (!is_oss_stream(substream) &&
769 			    atomic_read(&substream->mmap_count))
770 				err = -EBADFD;
771 			break;
772 		default:
773 			err = -EBADFD;
774 			break;
775 		}
776 	}
777 	if (err)
778 		goto unlock;
779 
780 	snd_pcm_sync_stop(substream, true);
781 
782 	params->rmask = ~0U;
783 	err = snd_pcm_hw_refine(substream, params);
784 	if (err < 0)
785 		goto _error;
786 
787 	err = snd_pcm_hw_params_choose(substream, params);
788 	if (err < 0)
789 		goto _error;
790 
791 	err = fixup_unreferenced_params(substream, params);
792 	if (err < 0)
793 		goto _error;
794 
795 	if (substream->managed_buffer_alloc) {
796 		err = snd_pcm_lib_malloc_pages(substream,
797 					       params_buffer_bytes(params));
798 		if (err < 0)
799 			goto _error;
800 		runtime->buffer_changed = err > 0;
801 	}
802 
803 	if (substream->ops->hw_params != NULL) {
804 		err = substream->ops->hw_params(substream, params);
805 		if (err < 0)
806 			goto _error;
807 	}
808 
809 	runtime->access = params_access(params);
810 	runtime->format = params_format(params);
811 	runtime->subformat = params_subformat(params);
812 	runtime->channels = params_channels(params);
813 	runtime->rate = params_rate(params);
814 	runtime->period_size = params_period_size(params);
815 	runtime->periods = params_periods(params);
816 	runtime->buffer_size = params_buffer_size(params);
817 	runtime->info = params->info;
818 	runtime->rate_num = params->rate_num;
819 	runtime->rate_den = params->rate_den;
820 	runtime->no_period_wakeup =
821 			(params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
822 			(params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
823 
824 	bits = snd_pcm_format_physical_width(runtime->format);
825 	runtime->sample_bits = bits;
826 	bits *= runtime->channels;
827 	runtime->frame_bits = bits;
828 	frames = 1;
829 	while (bits % 8 != 0) {
830 		bits *= 2;
831 		frames *= 2;
832 	}
833 	runtime->byte_align = bits / 8;
834 	runtime->min_align = frames;
835 
836 	/* Default sw params */
837 	runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
838 	runtime->period_step = 1;
839 	runtime->control->avail_min = runtime->period_size;
840 	runtime->start_threshold = 1;
841 	runtime->stop_threshold = runtime->buffer_size;
842 	runtime->silence_threshold = 0;
843 	runtime->silence_size = 0;
844 	runtime->boundary = runtime->buffer_size;
845 	while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
846 		runtime->boundary *= 2;
847 
848 	/* clear the buffer for avoiding possible kernel info leaks */
849 	if (runtime->dma_area && !substream->ops->copy) {
850 		size_t size = runtime->dma_bytes;
851 
852 		if (runtime->info & SNDRV_PCM_INFO_MMAP)
853 			size = PAGE_ALIGN(size);
854 		memset(runtime->dma_area, 0, size);
855 	}
856 
857 	snd_pcm_timer_resolution_change(substream);
858 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
859 
860 	if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
861 		cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
862 	usecs = period_to_usecs(runtime);
863 	if (usecs >= 0)
864 		cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
865 					    usecs);
866 	err = 0;
867  _error:
868 	if (err) {
869 		/* hardware might be unusable from this time,
870 		 * so we force application to retry to set
871 		 * the correct hardware parameter settings
872 		 */
873 		snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
874 		if (substream->ops->hw_free != NULL)
875 			substream->ops->hw_free(substream);
876 		if (substream->managed_buffer_alloc)
877 			snd_pcm_lib_free_pages(substream);
878 	}
879  unlock:
880 	snd_pcm_buffer_access_unlock(runtime);
881 	return err;
882 }
883 
884 static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
885 				  struct snd_pcm_hw_params __user * _params)
886 {
887 	struct snd_pcm_hw_params *params __free(kfree) = NULL;
888 	int err;
889 
890 	params = memdup_user(_params, sizeof(*params));
891 	if (IS_ERR(params))
892 		return PTR_ERR(params);
893 
894 	err = snd_pcm_hw_params(substream, params);
895 	if (err < 0)
896 		return err;
897 
898 	if (copy_to_user(_params, params, sizeof(*params)))
899 		return -EFAULT;
900 	return err;
901 }
902 
903 static int do_hw_free(struct snd_pcm_substream *substream)
904 {
905 	int result = 0;
906 
907 	snd_pcm_sync_stop(substream, true);
908 	if (substream->ops->hw_free)
909 		result = substream->ops->hw_free(substream);
910 	if (substream->managed_buffer_alloc)
911 		snd_pcm_lib_free_pages(substream);
912 	return result;
913 }
914 
915 static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
916 {
917 	struct snd_pcm_runtime *runtime;
918 	int result = 0;
919 
920 	if (PCM_RUNTIME_CHECK(substream))
921 		return -ENXIO;
922 	runtime = substream->runtime;
923 	result = snd_pcm_buffer_access_lock(runtime);
924 	if (result < 0)
925 		return result;
926 	scoped_guard(pcm_stream_lock_irq, substream) {
927 		switch (runtime->state) {
928 		case SNDRV_PCM_STATE_SETUP:
929 		case SNDRV_PCM_STATE_PREPARED:
930 			if (atomic_read(&substream->mmap_count))
931 				result = -EBADFD;
932 			break;
933 		default:
934 			result = -EBADFD;
935 			break;
936 		}
937 	}
938 	if (result)
939 		goto unlock;
940 	result = do_hw_free(substream);
941 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
942 	cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
943  unlock:
944 	snd_pcm_buffer_access_unlock(runtime);
945 	return result;
946 }
947 
948 static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
949 			     struct snd_pcm_sw_params *params)
950 {
951 	struct snd_pcm_runtime *runtime;
952 	int err;
953 
954 	if (PCM_RUNTIME_CHECK(substream))
955 		return -ENXIO;
956 	runtime = substream->runtime;
957 	scoped_guard(pcm_stream_lock_irq, substream) {
958 		if (runtime->state == SNDRV_PCM_STATE_OPEN)
959 			return -EBADFD;
960 	}
961 
962 	if (params->tstamp_mode < 0 ||
963 	    params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
964 		return -EINVAL;
965 	if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
966 	    params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
967 		return -EINVAL;
968 	if (params->avail_min == 0)
969 		return -EINVAL;
970 	if (params->silence_size >= runtime->boundary) {
971 		if (params->silence_threshold != 0)
972 			return -EINVAL;
973 	} else {
974 		if (params->silence_size > params->silence_threshold)
975 			return -EINVAL;
976 		if (params->silence_threshold > runtime->buffer_size)
977 			return -EINVAL;
978 	}
979 	err = 0;
980 	scoped_guard(pcm_stream_lock_irq, substream) {
981 		runtime->tstamp_mode = params->tstamp_mode;
982 		if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
983 			runtime->tstamp_type = params->tstamp_type;
984 		runtime->period_step = params->period_step;
985 		runtime->control->avail_min = params->avail_min;
986 		runtime->start_threshold = params->start_threshold;
987 		runtime->stop_threshold = params->stop_threshold;
988 		runtime->silence_threshold = params->silence_threshold;
989 		runtime->silence_size = params->silence_size;
990 		params->boundary = runtime->boundary;
991 		if (snd_pcm_running(substream)) {
992 			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
993 			    runtime->silence_size > 0)
994 				snd_pcm_playback_silence(substream, ULONG_MAX);
995 			err = snd_pcm_update_state(substream, runtime);
996 		}
997 	}
998 	return err;
999 }
1000 
1001 static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
1002 				  struct snd_pcm_sw_params __user * _params)
1003 {
1004 	struct snd_pcm_sw_params params;
1005 	int err;
1006 	if (copy_from_user(&params, _params, sizeof(params)))
1007 		return -EFAULT;
1008 	err = snd_pcm_sw_params(substream, &params);
1009 	if (copy_to_user(_params, &params, sizeof(params)))
1010 		return -EFAULT;
1011 	return err;
1012 }
1013 
1014 static inline snd_pcm_uframes_t
1015 snd_pcm_calc_delay(struct snd_pcm_substream *substream)
1016 {
1017 	snd_pcm_uframes_t delay;
1018 
1019 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
1020 		delay = snd_pcm_playback_hw_avail(substream->runtime);
1021 	else
1022 		delay = snd_pcm_capture_avail(substream->runtime);
1023 	return delay + substream->runtime->delay;
1024 }
1025 
1026 int snd_pcm_status64(struct snd_pcm_substream *substream,
1027 		     struct snd_pcm_status64 *status)
1028 {
1029 	struct snd_pcm_runtime *runtime = substream->runtime;
1030 
1031 	guard(pcm_stream_lock_irq)(substream);
1032 
1033 	snd_pcm_unpack_audio_tstamp_config(status->audio_tstamp_data,
1034 					&runtime->audio_tstamp_config);
1035 
1036 	/* backwards compatible behavior */
1037 	if (runtime->audio_tstamp_config.type_requested ==
1038 		SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT) {
1039 		if (runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)
1040 			runtime->audio_tstamp_config.type_requested =
1041 				SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
1042 		else
1043 			runtime->audio_tstamp_config.type_requested =
1044 				SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
1045 		runtime->audio_tstamp_report.valid = 0;
1046 	} else
1047 		runtime->audio_tstamp_report.valid = 1;
1048 
1049 	status->state = runtime->state;
1050 	status->suspended_state = runtime->suspended_state;
1051 	if (status->state == SNDRV_PCM_STATE_OPEN)
1052 		return 0;
1053 	status->trigger_tstamp_sec = runtime->trigger_tstamp.tv_sec;
1054 	status->trigger_tstamp_nsec = runtime->trigger_tstamp.tv_nsec;
1055 	if (snd_pcm_running(substream)) {
1056 		snd_pcm_update_hw_ptr(substream);
1057 		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
1058 			status->tstamp_sec = runtime->status->tstamp.tv_sec;
1059 			status->tstamp_nsec =
1060 				runtime->status->tstamp.tv_nsec;
1061 			status->driver_tstamp_sec =
1062 				runtime->driver_tstamp.tv_sec;
1063 			status->driver_tstamp_nsec =
1064 				runtime->driver_tstamp.tv_nsec;
1065 			status->audio_tstamp_sec =
1066 				runtime->status->audio_tstamp.tv_sec;
1067 			status->audio_tstamp_nsec =
1068 				runtime->status->audio_tstamp.tv_nsec;
1069 			if (runtime->audio_tstamp_report.valid == 1)
1070 				/* backwards compatibility, no report provided in COMPAT mode */
1071 				snd_pcm_pack_audio_tstamp_report(&status->audio_tstamp_data,
1072 								&status->audio_tstamp_accuracy,
1073 								&runtime->audio_tstamp_report);
1074 
1075 			goto _tstamp_end;
1076 		}
1077 	} else {
1078 		/* get tstamp only in fallback mode and only if enabled */
1079 		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
1080 			struct timespec64 tstamp;
1081 
1082 			snd_pcm_gettime(runtime, &tstamp);
1083 			status->tstamp_sec = tstamp.tv_sec;
1084 			status->tstamp_nsec = tstamp.tv_nsec;
1085 		}
1086 	}
1087  _tstamp_end:
1088 	status->appl_ptr = runtime->control->appl_ptr;
1089 	status->hw_ptr = runtime->status->hw_ptr;
1090 	status->avail = snd_pcm_avail(substream);
1091 	status->delay = snd_pcm_running(substream) ?
1092 		snd_pcm_calc_delay(substream) : 0;
1093 	status->avail_max = runtime->avail_max;
1094 	status->overrange = runtime->overrange;
1095 	runtime->avail_max = 0;
1096 	runtime->overrange = 0;
1097 	return 0;
1098 }
1099 
1100 static int snd_pcm_status_user64(struct snd_pcm_substream *substream,
1101 				 struct snd_pcm_status64 __user * _status,
1102 				 bool ext)
1103 {
1104 	struct snd_pcm_status64 status;
1105 	int res;
1106 
1107 	memset(&status, 0, sizeof(status));
1108 	/*
1109 	 * with extension, parameters are read/write,
1110 	 * get audio_tstamp_data from user,
1111 	 * ignore rest of status structure
1112 	 */
1113 	if (ext && get_user(status.audio_tstamp_data,
1114 				(u32 __user *)(&_status->audio_tstamp_data)))
1115 		return -EFAULT;
1116 	res = snd_pcm_status64(substream, &status);
1117 	if (res < 0)
1118 		return res;
1119 	if (copy_to_user(_status, &status, sizeof(status)))
1120 		return -EFAULT;
1121 	return 0;
1122 }
1123 
1124 static int snd_pcm_status_user32(struct snd_pcm_substream *substream,
1125 				 struct snd_pcm_status32 __user * _status,
1126 				 bool ext)
1127 {
1128 	struct snd_pcm_status64 status64;
1129 	struct snd_pcm_status32 status32;
1130 	int res;
1131 
1132 	memset(&status64, 0, sizeof(status64));
1133 	memset(&status32, 0, sizeof(status32));
1134 	/*
1135 	 * with extension, parameters are read/write,
1136 	 * get audio_tstamp_data from user,
1137 	 * ignore rest of status structure
1138 	 */
1139 	if (ext && get_user(status64.audio_tstamp_data,
1140 			    (u32 __user *)(&_status->audio_tstamp_data)))
1141 		return -EFAULT;
1142 	res = snd_pcm_status64(substream, &status64);
1143 	if (res < 0)
1144 		return res;
1145 
1146 	status32 = (struct snd_pcm_status32) {
1147 		.state = status64.state,
1148 		.trigger_tstamp_sec = status64.trigger_tstamp_sec,
1149 		.trigger_tstamp_nsec = status64.trigger_tstamp_nsec,
1150 		.tstamp_sec = status64.tstamp_sec,
1151 		.tstamp_nsec = status64.tstamp_nsec,
1152 		.appl_ptr = status64.appl_ptr,
1153 		.hw_ptr = status64.hw_ptr,
1154 		.delay = status64.delay,
1155 		.avail = status64.avail,
1156 		.avail_max = status64.avail_max,
1157 		.overrange = status64.overrange,
1158 		.suspended_state = status64.suspended_state,
1159 		.audio_tstamp_data = status64.audio_tstamp_data,
1160 		.audio_tstamp_sec = status64.audio_tstamp_sec,
1161 		.audio_tstamp_nsec = status64.audio_tstamp_nsec,
1162 		.driver_tstamp_sec = status64.audio_tstamp_sec,
1163 		.driver_tstamp_nsec = status64.audio_tstamp_nsec,
1164 		.audio_tstamp_accuracy = status64.audio_tstamp_accuracy,
1165 	};
1166 
1167 	if (copy_to_user(_status, &status32, sizeof(status32)))
1168 		return -EFAULT;
1169 
1170 	return 0;
1171 }
1172 
1173 static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
1174 				struct snd_pcm_channel_info * info)
1175 {
1176 	struct snd_pcm_runtime *runtime;
1177 	unsigned int channel;
1178 
1179 	channel = info->channel;
1180 	runtime = substream->runtime;
1181 	scoped_guard(pcm_stream_lock_irq, substream) {
1182 		if (runtime->state == SNDRV_PCM_STATE_OPEN)
1183 			return -EBADFD;
1184 	}
1185 	if (channel >= runtime->channels)
1186 		return -EINVAL;
1187 	memset(info, 0, sizeof(*info));
1188 	info->channel = channel;
1189 	return snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
1190 }
1191 
1192 static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
1193 				     struct snd_pcm_channel_info __user * _info)
1194 {
1195 	struct snd_pcm_channel_info info;
1196 	int res;
1197 
1198 	if (copy_from_user(&info, _info, sizeof(info)))
1199 		return -EFAULT;
1200 	res = snd_pcm_channel_info(substream, &info);
1201 	if (res < 0)
1202 		return res;
1203 	if (copy_to_user(_info, &info, sizeof(info)))
1204 		return -EFAULT;
1205 	return 0;
1206 }
1207 
1208 static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
1209 {
1210 	struct snd_pcm_runtime *runtime = substream->runtime;
1211 	if (runtime->trigger_master == NULL)
1212 		return;
1213 	if (runtime->trigger_master == substream) {
1214 		if (!runtime->trigger_tstamp_latched)
1215 			snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
1216 	} else {
1217 		snd_pcm_trigger_tstamp(runtime->trigger_master);
1218 		runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
1219 	}
1220 	runtime->trigger_master = NULL;
1221 }
1222 
1223 #define ACTION_ARG_IGNORE	(__force snd_pcm_state_t)0
1224 
1225 struct action_ops {
1226 	int (*pre_action)(struct snd_pcm_substream *substream,
1227 			  snd_pcm_state_t state);
1228 	int (*do_action)(struct snd_pcm_substream *substream,
1229 			 snd_pcm_state_t state);
1230 	void (*undo_action)(struct snd_pcm_substream *substream,
1231 			    snd_pcm_state_t state);
1232 	void (*post_action)(struct snd_pcm_substream *substream,
1233 			    snd_pcm_state_t state);
1234 };
1235 
1236 /*
1237  *  this functions is core for handling of linked stream
1238  *  Note: the stream state might be changed also on failure
1239  *  Note2: call with calling stream lock + link lock
1240  */
1241 static int snd_pcm_action_group(const struct action_ops *ops,
1242 				struct snd_pcm_substream *substream,
1243 				snd_pcm_state_t state,
1244 				bool stream_lock)
1245 {
1246 	struct snd_pcm_substream *s = NULL;
1247 	struct snd_pcm_substream *s1;
1248 	int res = 0, depth = 1;
1249 
1250 	snd_pcm_group_for_each_entry(s, substream) {
1251 		if (s != substream) {
1252 			if (!stream_lock)
1253 				mutex_lock_nested(&s->runtime->buffer_mutex, depth);
1254 			else if (s->pcm->nonatomic)
1255 				mutex_lock_nested(&s->self_group.mutex, depth);
1256 			else
1257 				spin_lock_nested(&s->self_group.lock, depth);
1258 			depth++;
1259 		}
1260 		res = ops->pre_action(s, state);
1261 		if (res < 0)
1262 			goto _unlock;
1263 	}
1264 	snd_pcm_group_for_each_entry(s, substream) {
1265 		res = ops->do_action(s, state);
1266 		if (res < 0) {
1267 			if (ops->undo_action) {
1268 				snd_pcm_group_for_each_entry(s1, substream) {
1269 					if (s1 == s) /* failed stream */
1270 						break;
1271 					ops->undo_action(s1, state);
1272 				}
1273 			}
1274 			s = NULL; /* unlock all */
1275 			goto _unlock;
1276 		}
1277 	}
1278 	snd_pcm_group_for_each_entry(s, substream) {
1279 		ops->post_action(s, state);
1280 	}
1281  _unlock:
1282 	/* unlock streams */
1283 	snd_pcm_group_for_each_entry(s1, substream) {
1284 		if (s1 != substream) {
1285 			if (!stream_lock)
1286 				mutex_unlock(&s1->runtime->buffer_mutex);
1287 			else if (s1->pcm->nonatomic)
1288 				mutex_unlock(&s1->self_group.mutex);
1289 			else
1290 				spin_unlock(&s1->self_group.lock);
1291 		}
1292 		if (s1 == s)	/* end */
1293 			break;
1294 	}
1295 	return res;
1296 }
1297 
1298 /*
1299  *  Note: call with stream lock
1300  */
1301 static int snd_pcm_action_single(const struct action_ops *ops,
1302 				 struct snd_pcm_substream *substream,
1303 				 snd_pcm_state_t state)
1304 {
1305 	int res;
1306 
1307 	res = ops->pre_action(substream, state);
1308 	if (res < 0)
1309 		return res;
1310 	res = ops->do_action(substream, state);
1311 	if (res == 0)
1312 		ops->post_action(substream, state);
1313 	else if (ops->undo_action)
1314 		ops->undo_action(substream, state);
1315 	return res;
1316 }
1317 
1318 static void snd_pcm_group_assign(struct snd_pcm_substream *substream,
1319 				 struct snd_pcm_group *new_group)
1320 {
1321 	substream->group = new_group;
1322 	list_move(&substream->link_list, &new_group->substreams);
1323 }
1324 
1325 /*
1326  * Unref and unlock the group, but keep the stream lock;
1327  * when the group becomes empty and no longer referred, destroy itself
1328  */
1329 static void snd_pcm_group_unref(struct snd_pcm_group *group,
1330 				struct snd_pcm_substream *substream)
1331 {
1332 	bool do_free;
1333 
1334 	if (!group)
1335 		return;
1336 	do_free = refcount_dec_and_test(&group->refs);
1337 	snd_pcm_group_unlock(group, substream->pcm->nonatomic);
1338 	if (do_free)
1339 		kfree(group);
1340 }
1341 
1342 /*
1343  * Lock the group inside a stream lock and reference it;
1344  * return the locked group object, or NULL if not linked
1345  */
1346 static struct snd_pcm_group *
1347 snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
1348 {
1349 	bool nonatomic = substream->pcm->nonatomic;
1350 	struct snd_pcm_group *group;
1351 	bool trylock;
1352 
1353 	for (;;) {
1354 		if (!snd_pcm_stream_linked(substream))
1355 			return NULL;
1356 		group = substream->group;
1357 		/* block freeing the group object */
1358 		refcount_inc(&group->refs);
1359 
1360 		trylock = nonatomic ? mutex_trylock(&group->mutex) :
1361 			spin_trylock(&group->lock);
1362 		if (trylock)
1363 			break; /* OK */
1364 
1365 		/* re-lock for avoiding ABBA deadlock */
1366 		snd_pcm_stream_unlock(substream);
1367 		snd_pcm_group_lock(group, nonatomic);
1368 		snd_pcm_stream_lock(substream);
1369 
1370 		/* check the group again; the above opens a small race window */
1371 		if (substream->group == group)
1372 			break; /* OK */
1373 		/* group changed, try again */
1374 		snd_pcm_group_unref(group, substream);
1375 	}
1376 	return group;
1377 }
1378 
1379 /*
1380  *  Note: call with stream lock
1381  */
1382 static int snd_pcm_action(const struct action_ops *ops,
1383 			  struct snd_pcm_substream *substream,
1384 			  snd_pcm_state_t state)
1385 {
1386 	struct snd_pcm_group *group;
1387 	int res;
1388 
1389 	group = snd_pcm_stream_group_ref(substream);
1390 	if (group)
1391 		res = snd_pcm_action_group(ops, substream, state, true);
1392 	else
1393 		res = snd_pcm_action_single(ops, substream, state);
1394 	snd_pcm_group_unref(group, substream);
1395 	return res;
1396 }
1397 
1398 /*
1399  *  Note: don't use any locks before
1400  */
1401 static int snd_pcm_action_lock_irq(const struct action_ops *ops,
1402 				   struct snd_pcm_substream *substream,
1403 				   snd_pcm_state_t state)
1404 {
1405 	guard(pcm_stream_lock_irq)(substream);
1406 	return snd_pcm_action(ops, substream, state);
1407 }
1408 
1409 /*
1410  */
1411 static int snd_pcm_action_nonatomic(const struct action_ops *ops,
1412 				    struct snd_pcm_substream *substream,
1413 				    snd_pcm_state_t state)
1414 {
1415 	int res;
1416 
1417 	/* Guarantee the group members won't change during non-atomic action */
1418 	guard(rwsem_read)(&snd_pcm_link_rwsem);
1419 	res = snd_pcm_buffer_access_lock(substream->runtime);
1420 	if (res < 0)
1421 		return res;
1422 	if (snd_pcm_stream_linked(substream))
1423 		res = snd_pcm_action_group(ops, substream, state, false);
1424 	else
1425 		res = snd_pcm_action_single(ops, substream, state);
1426 	snd_pcm_buffer_access_unlock(substream->runtime);
1427 	return res;
1428 }
1429 
1430 /*
1431  * start callbacks
1432  */
1433 static int snd_pcm_pre_start(struct snd_pcm_substream *substream,
1434 			     snd_pcm_state_t state)
1435 {
1436 	struct snd_pcm_runtime *runtime = substream->runtime;
1437 	if (runtime->state != SNDRV_PCM_STATE_PREPARED)
1438 		return -EBADFD;
1439 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1440 	    !snd_pcm_playback_data(substream))
1441 		return -EPIPE;
1442 	runtime->trigger_tstamp_latched = false;
1443 	runtime->trigger_master = substream;
1444 	return 0;
1445 }
1446 
1447 static int snd_pcm_do_start(struct snd_pcm_substream *substream,
1448 			    snd_pcm_state_t state)
1449 {
1450 	int err;
1451 
1452 	if (substream->runtime->trigger_master != substream)
1453 		return 0;
1454 	err = substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
1455 	/* XRUN happened during the start */
1456 	if (err == -EPIPE)
1457 		__snd_pcm_set_state(substream->runtime, SNDRV_PCM_STATE_XRUN);
1458 	return err;
1459 }
1460 
1461 static void snd_pcm_undo_start(struct snd_pcm_substream *substream,
1462 			       snd_pcm_state_t state)
1463 {
1464 	if (substream->runtime->trigger_master == substream) {
1465 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1466 		substream->runtime->stop_operating = true;
1467 	}
1468 }
1469 
1470 static void snd_pcm_post_start(struct snd_pcm_substream *substream,
1471 			       snd_pcm_state_t state)
1472 {
1473 	struct snd_pcm_runtime *runtime = substream->runtime;
1474 	snd_pcm_trigger_tstamp(substream);
1475 	runtime->hw_ptr_jiffies = jiffies;
1476 	runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
1477 							    runtime->rate;
1478 	__snd_pcm_set_state(runtime, state);
1479 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1480 	    runtime->silence_size > 0)
1481 		snd_pcm_playback_silence(substream, ULONG_MAX);
1482 	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTART);
1483 }
1484 
1485 static const struct action_ops snd_pcm_action_start = {
1486 	.pre_action = snd_pcm_pre_start,
1487 	.do_action = snd_pcm_do_start,
1488 	.undo_action = snd_pcm_undo_start,
1489 	.post_action = snd_pcm_post_start
1490 };
1491 
1492 /**
1493  * snd_pcm_start - start all linked streams
1494  * @substream: the PCM substream instance
1495  *
1496  * Return: Zero if successful, or a negative error code.
1497  * The stream lock must be acquired before calling this function.
1498  */
1499 int snd_pcm_start(struct snd_pcm_substream *substream)
1500 {
1501 	return snd_pcm_action(&snd_pcm_action_start, substream,
1502 			      SNDRV_PCM_STATE_RUNNING);
1503 }
1504 
1505 /* take the stream lock and start the streams */
1506 static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
1507 {
1508 	return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
1509 				       SNDRV_PCM_STATE_RUNNING);
1510 }
1511 
1512 /*
1513  * stop callbacks
1514  */
1515 static int snd_pcm_pre_stop(struct snd_pcm_substream *substream,
1516 			    snd_pcm_state_t state)
1517 {
1518 	struct snd_pcm_runtime *runtime = substream->runtime;
1519 	if (runtime->state == SNDRV_PCM_STATE_OPEN)
1520 		return -EBADFD;
1521 	runtime->trigger_master = substream;
1522 	return 0;
1523 }
1524 
1525 static int snd_pcm_do_stop(struct snd_pcm_substream *substream,
1526 			   snd_pcm_state_t state)
1527 {
1528 	if (substream->runtime->trigger_master == substream &&
1529 	    snd_pcm_running(substream)) {
1530 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
1531 		substream->runtime->stop_operating = true;
1532 	}
1533 	return 0; /* unconditionally stop all substreams */
1534 }
1535 
1536 static void snd_pcm_post_stop(struct snd_pcm_substream *substream,
1537 			      snd_pcm_state_t state)
1538 {
1539 	struct snd_pcm_runtime *runtime = substream->runtime;
1540 	if (runtime->state != state) {
1541 		snd_pcm_trigger_tstamp(substream);
1542 		__snd_pcm_set_state(runtime, state);
1543 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
1544 	}
1545 	wake_up(&runtime->sleep);
1546 	wake_up(&runtime->tsleep);
1547 }
1548 
1549 static const struct action_ops snd_pcm_action_stop = {
1550 	.pre_action = snd_pcm_pre_stop,
1551 	.do_action = snd_pcm_do_stop,
1552 	.post_action = snd_pcm_post_stop
1553 };
1554 
1555 /**
1556  * snd_pcm_stop - try to stop all running streams in the substream group
1557  * @substream: the PCM substream instance
1558  * @state: PCM state after stopping the stream
1559  *
1560  * The state of each stream is then changed to the given state unconditionally.
1561  *
1562  * Return: Zero if successful, or a negative error code.
1563  */
1564 int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
1565 {
1566 	return snd_pcm_action(&snd_pcm_action_stop, substream, state);
1567 }
1568 EXPORT_SYMBOL(snd_pcm_stop);
1569 
1570 /**
1571  * snd_pcm_drain_done - stop the DMA only when the given stream is playback
1572  * @substream: the PCM substream
1573  *
1574  * After stopping, the state is changed to SETUP.
1575  * Unlike snd_pcm_stop(), this affects only the given stream.
1576  *
1577  * Return: Zero if successful, or a negative error code.
1578  */
1579 int snd_pcm_drain_done(struct snd_pcm_substream *substream)
1580 {
1581 	return snd_pcm_action_single(&snd_pcm_action_stop, substream,
1582 				     SNDRV_PCM_STATE_SETUP);
1583 }
1584 
1585 /**
1586  * snd_pcm_stop_xrun - stop the running streams as XRUN
1587  * @substream: the PCM substream instance
1588  *
1589  * This stops the given running substream (and all linked substreams) as XRUN.
1590  * Unlike snd_pcm_stop(), this function takes the substream lock by itself.
1591  *
1592  * Return: Zero if successful, or a negative error code.
1593  */
1594 int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
1595 {
1596 	guard(pcm_stream_lock_irqsave)(substream);
1597 	if (substream->runtime && snd_pcm_running(substream))
1598 		__snd_pcm_xrun(substream);
1599 	return 0;
1600 }
1601 EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
1602 
1603 /*
1604  * pause callbacks: pass boolean (to start pause or resume) as state argument
1605  */
1606 #define pause_pushed(state)	(__force bool)(state)
1607 
1608 static int snd_pcm_pre_pause(struct snd_pcm_substream *substream,
1609 			     snd_pcm_state_t state)
1610 {
1611 	struct snd_pcm_runtime *runtime = substream->runtime;
1612 	if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
1613 		return -ENOSYS;
1614 	if (pause_pushed(state)) {
1615 		if (runtime->state != SNDRV_PCM_STATE_RUNNING)
1616 			return -EBADFD;
1617 	} else if (runtime->state != SNDRV_PCM_STATE_PAUSED)
1618 		return -EBADFD;
1619 	runtime->trigger_master = substream;
1620 	return 0;
1621 }
1622 
1623 static int snd_pcm_do_pause(struct snd_pcm_substream *substream,
1624 			    snd_pcm_state_t state)
1625 {
1626 	if (substream->runtime->trigger_master != substream)
1627 		return 0;
1628 	/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
1629 	 * a delta between the current jiffies, this gives a large enough
1630 	 * delta, effectively to skip the check once.
1631 	 */
1632 	substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
1633 	return substream->ops->trigger(substream,
1634 				       pause_pushed(state) ?
1635 				       SNDRV_PCM_TRIGGER_PAUSE_PUSH :
1636 				       SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
1637 }
1638 
1639 static void snd_pcm_undo_pause(struct snd_pcm_substream *substream,
1640 			       snd_pcm_state_t state)
1641 {
1642 	if (substream->runtime->trigger_master == substream)
1643 		substream->ops->trigger(substream,
1644 					pause_pushed(state) ?
1645 					SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
1646 					SNDRV_PCM_TRIGGER_PAUSE_PUSH);
1647 }
1648 
1649 static void snd_pcm_post_pause(struct snd_pcm_substream *substream,
1650 			       snd_pcm_state_t state)
1651 {
1652 	struct snd_pcm_runtime *runtime = substream->runtime;
1653 	snd_pcm_trigger_tstamp(substream);
1654 	if (pause_pushed(state)) {
1655 		__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_PAUSED);
1656 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MPAUSE);
1657 		wake_up(&runtime->sleep);
1658 		wake_up(&runtime->tsleep);
1659 	} else {
1660 		__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_RUNNING);
1661 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MCONTINUE);
1662 	}
1663 }
1664 
1665 static const struct action_ops snd_pcm_action_pause = {
1666 	.pre_action = snd_pcm_pre_pause,
1667 	.do_action = snd_pcm_do_pause,
1668 	.undo_action = snd_pcm_undo_pause,
1669 	.post_action = snd_pcm_post_pause
1670 };
1671 
1672 /*
1673  * Push/release the pause for all linked streams.
1674  */
1675 static int snd_pcm_pause(struct snd_pcm_substream *substream, bool push)
1676 {
1677 	return snd_pcm_action(&snd_pcm_action_pause, substream,
1678 			      (__force snd_pcm_state_t)push);
1679 }
1680 
1681 static int snd_pcm_pause_lock_irq(struct snd_pcm_substream *substream,
1682 				  bool push)
1683 {
1684 	return snd_pcm_action_lock_irq(&snd_pcm_action_pause, substream,
1685 				       (__force snd_pcm_state_t)push);
1686 }
1687 
1688 #ifdef CONFIG_PM
1689 /* suspend callback: state argument ignored */
1690 
1691 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream,
1692 			       snd_pcm_state_t state)
1693 {
1694 	struct snd_pcm_runtime *runtime = substream->runtime;
1695 	switch (runtime->state) {
1696 	case SNDRV_PCM_STATE_SUSPENDED:
1697 		return -EBUSY;
1698 	/* unresumable PCM state; return -EBUSY for skipping suspend */
1699 	case SNDRV_PCM_STATE_OPEN:
1700 	case SNDRV_PCM_STATE_SETUP:
1701 	case SNDRV_PCM_STATE_DISCONNECTED:
1702 		return -EBUSY;
1703 	}
1704 	runtime->trigger_master = substream;
1705 	return 0;
1706 }
1707 
1708 static int snd_pcm_do_suspend(struct snd_pcm_substream *substream,
1709 			      snd_pcm_state_t state)
1710 {
1711 	struct snd_pcm_runtime *runtime = substream->runtime;
1712 	if (runtime->trigger_master != substream)
1713 		return 0;
1714 	if (! snd_pcm_running(substream))
1715 		return 0;
1716 	substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1717 	runtime->stop_operating = true;
1718 	return 0; /* suspend unconditionally */
1719 }
1720 
1721 static void snd_pcm_post_suspend(struct snd_pcm_substream *substream,
1722 				 snd_pcm_state_t state)
1723 {
1724 	struct snd_pcm_runtime *runtime = substream->runtime;
1725 	snd_pcm_trigger_tstamp(substream);
1726 	runtime->suspended_state = runtime->state;
1727 	runtime->status->suspended_state = runtime->suspended_state;
1728 	__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_SUSPENDED);
1729 	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSUSPEND);
1730 	wake_up(&runtime->sleep);
1731 	wake_up(&runtime->tsleep);
1732 }
1733 
1734 static const struct action_ops snd_pcm_action_suspend = {
1735 	.pre_action = snd_pcm_pre_suspend,
1736 	.do_action = snd_pcm_do_suspend,
1737 	.post_action = snd_pcm_post_suspend
1738 };
1739 
1740 /*
1741  * snd_pcm_suspend - trigger SUSPEND to all linked streams
1742  * @substream: the PCM substream
1743  *
1744  * After this call, all streams are changed to SUSPENDED state.
1745  *
1746  * Return: Zero if successful, or a negative error code.
1747  */
1748 static int snd_pcm_suspend(struct snd_pcm_substream *substream)
1749 {
1750 	guard(pcm_stream_lock_irqsave)(substream);
1751 	return snd_pcm_action(&snd_pcm_action_suspend, substream,
1752 			      ACTION_ARG_IGNORE);
1753 }
1754 
1755 /**
1756  * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
1757  * @pcm: the PCM instance
1758  *
1759  * After this call, all streams are changed to SUSPENDED state.
1760  *
1761  * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
1762  */
1763 int snd_pcm_suspend_all(struct snd_pcm *pcm)
1764 {
1765 	struct snd_pcm_substream *substream;
1766 	int stream, err = 0;
1767 
1768 	if (! pcm)
1769 		return 0;
1770 
1771 	for_each_pcm_substream(pcm, stream, substream) {
1772 		/* FIXME: the open/close code should lock this as well */
1773 		if (!substream->runtime)
1774 			continue;
1775 
1776 		/*
1777 		 * Skip BE dai link PCM's that are internal and may
1778 		 * not have their substream ops set.
1779 		 */
1780 		if (!substream->ops)
1781 			continue;
1782 
1783 		err = snd_pcm_suspend(substream);
1784 		if (err < 0 && err != -EBUSY)
1785 			return err;
1786 	}
1787 
1788 	for_each_pcm_substream(pcm, stream, substream)
1789 		snd_pcm_sync_stop(substream, false);
1790 
1791 	return 0;
1792 }
1793 EXPORT_SYMBOL(snd_pcm_suspend_all);
1794 
1795 /* resume callbacks: state argument ignored */
1796 
1797 static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
1798 			      snd_pcm_state_t state)
1799 {
1800 	struct snd_pcm_runtime *runtime = substream->runtime;
1801 	if (runtime->state != SNDRV_PCM_STATE_SUSPENDED)
1802 		return -EBADFD;
1803 	if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
1804 		return -ENOSYS;
1805 	runtime->trigger_master = substream;
1806 	return 0;
1807 }
1808 
1809 static int snd_pcm_do_resume(struct snd_pcm_substream *substream,
1810 			     snd_pcm_state_t state)
1811 {
1812 	struct snd_pcm_runtime *runtime = substream->runtime;
1813 	if (runtime->trigger_master != substream)
1814 		return 0;
1815 	/* DMA not running previously? */
1816 	if (runtime->suspended_state != SNDRV_PCM_STATE_RUNNING &&
1817 	    (runtime->suspended_state != SNDRV_PCM_STATE_DRAINING ||
1818 	     substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
1819 		return 0;
1820 	return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
1821 }
1822 
1823 static void snd_pcm_undo_resume(struct snd_pcm_substream *substream,
1824 				snd_pcm_state_t state)
1825 {
1826 	if (substream->runtime->trigger_master == substream &&
1827 	    snd_pcm_running(substream))
1828 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
1829 }
1830 
1831 static void snd_pcm_post_resume(struct snd_pcm_substream *substream,
1832 				snd_pcm_state_t state)
1833 {
1834 	struct snd_pcm_runtime *runtime = substream->runtime;
1835 	snd_pcm_trigger_tstamp(substream);
1836 	__snd_pcm_set_state(runtime, runtime->suspended_state);
1837 	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
1838 }
1839 
1840 static const struct action_ops snd_pcm_action_resume = {
1841 	.pre_action = snd_pcm_pre_resume,
1842 	.do_action = snd_pcm_do_resume,
1843 	.undo_action = snd_pcm_undo_resume,
1844 	.post_action = snd_pcm_post_resume
1845 };
1846 
1847 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1848 {
1849 	return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream,
1850 				       ACTION_ARG_IGNORE);
1851 }
1852 
1853 #else
1854 
1855 static int snd_pcm_resume(struct snd_pcm_substream *substream)
1856 {
1857 	return -ENOSYS;
1858 }
1859 
1860 #endif /* CONFIG_PM */
1861 
1862 /*
1863  * xrun ioctl
1864  *
1865  * Change the RUNNING stream(s) to XRUN state.
1866  */
1867 static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1868 {
1869 	struct snd_pcm_runtime *runtime = substream->runtime;
1870 
1871 	guard(pcm_stream_lock_irq)(substream);
1872 	switch (runtime->state) {
1873 	case SNDRV_PCM_STATE_XRUN:
1874 		return 0;	/* already there */
1875 	case SNDRV_PCM_STATE_RUNNING:
1876 		__snd_pcm_xrun(substream);
1877 		return 0;
1878 	default:
1879 		return -EBADFD;
1880 	}
1881 }
1882 
1883 /*
1884  * reset ioctl
1885  */
1886 /* reset callbacks:  state argument ignored */
1887 static int snd_pcm_pre_reset(struct snd_pcm_substream *substream,
1888 			     snd_pcm_state_t state)
1889 {
1890 	struct snd_pcm_runtime *runtime = substream->runtime;
1891 	switch (runtime->state) {
1892 	case SNDRV_PCM_STATE_RUNNING:
1893 	case SNDRV_PCM_STATE_PREPARED:
1894 	case SNDRV_PCM_STATE_PAUSED:
1895 	case SNDRV_PCM_STATE_SUSPENDED:
1896 		return 0;
1897 	default:
1898 		return -EBADFD;
1899 	}
1900 }
1901 
1902 static int snd_pcm_do_reset(struct snd_pcm_substream *substream,
1903 			    snd_pcm_state_t state)
1904 {
1905 	struct snd_pcm_runtime *runtime = substream->runtime;
1906 	int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
1907 	if (err < 0)
1908 		return err;
1909 	guard(pcm_stream_lock_irq)(substream);
1910 	runtime->hw_ptr_base = 0;
1911 	runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
1912 		runtime->status->hw_ptr % runtime->period_size;
1913 	runtime->silence_start = runtime->status->hw_ptr;
1914 	runtime->silence_filled = 0;
1915 	return 0;
1916 }
1917 
1918 static void snd_pcm_post_reset(struct snd_pcm_substream *substream,
1919 			       snd_pcm_state_t state)
1920 {
1921 	struct snd_pcm_runtime *runtime = substream->runtime;
1922 	guard(pcm_stream_lock_irq)(substream);
1923 	runtime->control->appl_ptr = runtime->status->hw_ptr;
1924 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
1925 	    runtime->silence_size > 0)
1926 		snd_pcm_playback_silence(substream, ULONG_MAX);
1927 }
1928 
1929 static const struct action_ops snd_pcm_action_reset = {
1930 	.pre_action = snd_pcm_pre_reset,
1931 	.do_action = snd_pcm_do_reset,
1932 	.post_action = snd_pcm_post_reset
1933 };
1934 
1935 static int snd_pcm_reset(struct snd_pcm_substream *substream)
1936 {
1937 	return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream,
1938 					ACTION_ARG_IGNORE);
1939 }
1940 
1941 /*
1942  * prepare ioctl
1943  */
1944 /* pass f_flags as state argument */
1945 static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
1946 			       snd_pcm_state_t state)
1947 {
1948 	struct snd_pcm_runtime *runtime = substream->runtime;
1949 	int f_flags = (__force int)state;
1950 
1951 	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
1952 	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
1953 		return -EBADFD;
1954 	if (snd_pcm_running(substream))
1955 		return -EBUSY;
1956 	substream->f_flags = f_flags;
1957 	return 0;
1958 }
1959 
1960 static int snd_pcm_do_prepare(struct snd_pcm_substream *substream,
1961 			      snd_pcm_state_t state)
1962 {
1963 	int err;
1964 	snd_pcm_sync_stop(substream, true);
1965 	err = substream->ops->prepare(substream);
1966 	if (err < 0)
1967 		return err;
1968 	return snd_pcm_do_reset(substream, state);
1969 }
1970 
1971 static void snd_pcm_post_prepare(struct snd_pcm_substream *substream,
1972 				 snd_pcm_state_t state)
1973 {
1974 	struct snd_pcm_runtime *runtime = substream->runtime;
1975 	runtime->control->appl_ptr = runtime->status->hw_ptr;
1976 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
1977 }
1978 
1979 static const struct action_ops snd_pcm_action_prepare = {
1980 	.pre_action = snd_pcm_pre_prepare,
1981 	.do_action = snd_pcm_do_prepare,
1982 	.post_action = snd_pcm_post_prepare
1983 };
1984 
1985 /**
1986  * snd_pcm_prepare - prepare the PCM substream to be triggerable
1987  * @substream: the PCM substream instance
1988  * @file: file to refer f_flags
1989  *
1990  * Return: Zero if successful, or a negative error code.
1991  */
1992 static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1993 			   struct file *file)
1994 {
1995 	int f_flags;
1996 
1997 	if (file)
1998 		f_flags = file->f_flags;
1999 	else
2000 		f_flags = substream->f_flags;
2001 
2002 	scoped_guard(pcm_stream_lock_irq, substream) {
2003 		switch (substream->runtime->state) {
2004 		case SNDRV_PCM_STATE_PAUSED:
2005 			snd_pcm_pause(substream, false);
2006 			fallthrough;
2007 		case SNDRV_PCM_STATE_SUSPENDED:
2008 			snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2009 			break;
2010 		}
2011 	}
2012 
2013 	return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
2014 					substream,
2015 					(__force snd_pcm_state_t)f_flags);
2016 }
2017 
2018 /*
2019  * drain ioctl
2020  */
2021 
2022 /* drain init callbacks: state argument ignored */
2023 static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream,
2024 				  snd_pcm_state_t state)
2025 {
2026 	struct snd_pcm_runtime *runtime = substream->runtime;
2027 	switch (runtime->state) {
2028 	case SNDRV_PCM_STATE_OPEN:
2029 	case SNDRV_PCM_STATE_DISCONNECTED:
2030 	case SNDRV_PCM_STATE_SUSPENDED:
2031 		return -EBADFD;
2032 	}
2033 	runtime->trigger_master = substream;
2034 	return 0;
2035 }
2036 
2037 static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream,
2038 				 snd_pcm_state_t state)
2039 {
2040 	struct snd_pcm_runtime *runtime = substream->runtime;
2041 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
2042 		switch (runtime->state) {
2043 		case SNDRV_PCM_STATE_PREPARED:
2044 			/* start playback stream if possible */
2045 			if (! snd_pcm_playback_empty(substream)) {
2046 				snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
2047 				snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
2048 			} else {
2049 				__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_SETUP);
2050 			}
2051 			break;
2052 		case SNDRV_PCM_STATE_RUNNING:
2053 			__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_DRAINING);
2054 			break;
2055 		case SNDRV_PCM_STATE_XRUN:
2056 			__snd_pcm_set_state(runtime, SNDRV_PCM_STATE_SETUP);
2057 			break;
2058 		default:
2059 			break;
2060 		}
2061 	} else {
2062 		/* stop running stream */
2063 		if (runtime->state == SNDRV_PCM_STATE_RUNNING) {
2064 			snd_pcm_state_t new_state;
2065 
2066 			new_state = snd_pcm_capture_avail(runtime) > 0 ?
2067 				SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
2068 			snd_pcm_do_stop(substream, new_state);
2069 			snd_pcm_post_stop(substream, new_state);
2070 		}
2071 	}
2072 
2073 	if (runtime->state == SNDRV_PCM_STATE_DRAINING &&
2074 	    runtime->trigger_master == substream &&
2075 	    (runtime->hw.info & SNDRV_PCM_INFO_DRAIN_TRIGGER))
2076 		return substream->ops->trigger(substream,
2077 					       SNDRV_PCM_TRIGGER_DRAIN);
2078 
2079 	return 0;
2080 }
2081 
2082 static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream,
2083 				    snd_pcm_state_t state)
2084 {
2085 }
2086 
2087 static const struct action_ops snd_pcm_action_drain_init = {
2088 	.pre_action = snd_pcm_pre_drain_init,
2089 	.do_action = snd_pcm_do_drain_init,
2090 	.post_action = snd_pcm_post_drain_init
2091 };
2092 
2093 /*
2094  * Drain the stream(s).
2095  * When the substream is linked, sync until the draining of all playback streams
2096  * is finished.
2097  * After this call, all streams are supposed to be either SETUP or DRAINING
2098  * (capture only) state.
2099  */
2100 static int snd_pcm_drain(struct snd_pcm_substream *substream,
2101 			 struct file *file)
2102 {
2103 	struct snd_card *card;
2104 	struct snd_pcm_runtime *runtime;
2105 	struct snd_pcm_substream *s;
2106 	struct snd_pcm_group *group;
2107 	wait_queue_entry_t wait;
2108 	int result = 0;
2109 	int nonblock = 0;
2110 
2111 	card = substream->pcm->card;
2112 	runtime = substream->runtime;
2113 
2114 	if (runtime->state == SNDRV_PCM_STATE_OPEN)
2115 		return -EBADFD;
2116 
2117 	if (file) {
2118 		if (file->f_flags & O_NONBLOCK)
2119 			nonblock = 1;
2120 	} else if (substream->f_flags & O_NONBLOCK)
2121 		nonblock = 1;
2122 
2123 	snd_pcm_stream_lock_irq(substream);
2124 	/* resume pause */
2125 	if (runtime->state == SNDRV_PCM_STATE_PAUSED)
2126 		snd_pcm_pause(substream, false);
2127 
2128 	/* pre-start/stop - all running streams are changed to DRAINING state */
2129 	result = snd_pcm_action(&snd_pcm_action_drain_init, substream,
2130 				ACTION_ARG_IGNORE);
2131 	if (result < 0)
2132 		goto unlock;
2133 	/* in non-blocking, we don't wait in ioctl but let caller poll */
2134 	if (nonblock) {
2135 		result = -EAGAIN;
2136 		goto unlock;
2137 	}
2138 
2139 	for (;;) {
2140 		long tout;
2141 		struct snd_pcm_runtime *to_check;
2142 		if (signal_pending(current)) {
2143 			result = -ERESTARTSYS;
2144 			break;
2145 		}
2146 		/* find a substream to drain */
2147 		to_check = NULL;
2148 		group = snd_pcm_stream_group_ref(substream);
2149 		snd_pcm_group_for_each_entry(s, substream) {
2150 			if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
2151 				continue;
2152 			runtime = s->runtime;
2153 			if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
2154 				to_check = runtime;
2155 				break;
2156 			}
2157 		}
2158 		snd_pcm_group_unref(group, substream);
2159 		if (!to_check)
2160 			break; /* all drained */
2161 		init_waitqueue_entry(&wait, current);
2162 		set_current_state(TASK_INTERRUPTIBLE);
2163 		add_wait_queue(&to_check->sleep, &wait);
2164 		snd_pcm_stream_unlock_irq(substream);
2165 		if (runtime->no_period_wakeup)
2166 			tout = MAX_SCHEDULE_TIMEOUT;
2167 		else {
2168 			tout = 100;
2169 			if (runtime->rate) {
2170 				long t = runtime->buffer_size * 1100 / runtime->rate;
2171 				tout = max(t, tout);
2172 			}
2173 			tout = msecs_to_jiffies(tout);
2174 		}
2175 		tout = schedule_timeout(tout);
2176 
2177 		snd_pcm_stream_lock_irq(substream);
2178 		group = snd_pcm_stream_group_ref(substream);
2179 		snd_pcm_group_for_each_entry(s, substream) {
2180 			if (s->runtime == to_check) {
2181 				remove_wait_queue(&to_check->sleep, &wait);
2182 				break;
2183 			}
2184 		}
2185 		snd_pcm_group_unref(group, substream);
2186 
2187 		if (card->shutdown) {
2188 			result = -ENODEV;
2189 			break;
2190 		}
2191 		if (tout == 0) {
2192 			if (substream->runtime->state == SNDRV_PCM_STATE_SUSPENDED)
2193 				result = -ESTRPIPE;
2194 			else {
2195 				dev_dbg(substream->pcm->card->dev,
2196 					"playback drain timeout (DMA or IRQ trouble?)\n");
2197 				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2198 				result = -EIO;
2199 			}
2200 			break;
2201 		}
2202 	}
2203 
2204  unlock:
2205 	snd_pcm_stream_unlock_irq(substream);
2206 
2207 	return result;
2208 }
2209 
2210 /*
2211  * drop ioctl
2212  *
2213  * Immediately put all linked substreams into SETUP state.
2214  */
2215 static int snd_pcm_drop(struct snd_pcm_substream *substream)
2216 {
2217 	struct snd_pcm_runtime *runtime;
2218 	int result = 0;
2219 
2220 	if (PCM_RUNTIME_CHECK(substream))
2221 		return -ENXIO;
2222 	runtime = substream->runtime;
2223 
2224 	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
2225 	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
2226 		return -EBADFD;
2227 
2228 	guard(pcm_stream_lock_irq)(substream);
2229 	/* resume pause */
2230 	if (runtime->state == SNDRV_PCM_STATE_PAUSED)
2231 		snd_pcm_pause(substream, false);
2232 
2233 	snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2234 	/* runtime->control->appl_ptr = runtime->status->hw_ptr; */
2235 
2236 	return result;
2237 }
2238 
2239 
2240 static bool is_pcm_file(struct file *file)
2241 {
2242 	struct inode *inode = file_inode(file);
2243 	struct snd_pcm *pcm;
2244 	unsigned int minor;
2245 
2246 	if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
2247 		return false;
2248 	minor = iminor(inode);
2249 	pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2250 	if (!pcm)
2251 		pcm = snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2252 	if (!pcm)
2253 		return false;
2254 	snd_card_unref(pcm->card);
2255 	return true;
2256 }
2257 
2258 /*
2259  * PCM link handling
2260  */
2261 static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
2262 {
2263 	struct snd_pcm_file *pcm_file;
2264 	struct snd_pcm_substream *substream1;
2265 	struct snd_pcm_group *group __free(kfree) = NULL;
2266 	struct snd_pcm_group *target_group;
2267 	bool nonatomic = substream->pcm->nonatomic;
2268 	CLASS(fd, f)(fd);
2269 
2270 	if (fd_empty(f))
2271 		return -EBADFD;
2272 	if (!is_pcm_file(fd_file(f)))
2273 		return -EBADFD;
2274 
2275 	pcm_file = fd_file(f)->private_data;
2276 	substream1 = pcm_file->substream;
2277 
2278 	if (substream == substream1)
2279 		return -EINVAL;
2280 
2281 	group = kzalloc(sizeof(*group), GFP_KERNEL);
2282 	if (!group)
2283 		return -ENOMEM;
2284 	snd_pcm_group_init(group);
2285 
2286 	guard(rwsem_write)(&snd_pcm_link_rwsem);
2287 	if (substream->runtime->state == SNDRV_PCM_STATE_OPEN ||
2288 	    substream->runtime->state != substream1->runtime->state ||
2289 	    substream->pcm->nonatomic != substream1->pcm->nonatomic)
2290 		return -EBADFD;
2291 	if (snd_pcm_stream_linked(substream1))
2292 		return -EALREADY;
2293 
2294 	scoped_guard(pcm_stream_lock_irq, substream) {
2295 		if (!snd_pcm_stream_linked(substream)) {
2296 			snd_pcm_group_assign(substream, group);
2297 			group = NULL; /* assigned, don't free this one below */
2298 		}
2299 		target_group = substream->group;
2300 	}
2301 
2302 	snd_pcm_group_lock_irq(target_group, nonatomic);
2303 	snd_pcm_stream_lock_nested(substream1);
2304 	snd_pcm_group_assign(substream1, target_group);
2305 	refcount_inc(&target_group->refs);
2306 	snd_pcm_stream_unlock(substream1);
2307 	snd_pcm_group_unlock_irq(target_group, nonatomic);
2308 	return 0;
2309 }
2310 
2311 static void relink_to_local(struct snd_pcm_substream *substream)
2312 {
2313 	snd_pcm_stream_lock_nested(substream);
2314 	snd_pcm_group_assign(substream, &substream->self_group);
2315 	snd_pcm_stream_unlock(substream);
2316 }
2317 
2318 static int snd_pcm_unlink(struct snd_pcm_substream *substream)
2319 {
2320 	struct snd_pcm_group *group;
2321 	bool nonatomic = substream->pcm->nonatomic;
2322 	bool do_free = false;
2323 
2324 	guard(rwsem_write)(&snd_pcm_link_rwsem);
2325 
2326 	if (!snd_pcm_stream_linked(substream))
2327 		return -EALREADY;
2328 
2329 	group = substream->group;
2330 	snd_pcm_group_lock_irq(group, nonatomic);
2331 
2332 	relink_to_local(substream);
2333 	refcount_dec(&group->refs);
2334 
2335 	/* detach the last stream, too */
2336 	if (list_is_singular(&group->substreams)) {
2337 		relink_to_local(list_first_entry(&group->substreams,
2338 						 struct snd_pcm_substream,
2339 						 link_list));
2340 		do_free = refcount_dec_and_test(&group->refs);
2341 	}
2342 
2343 	snd_pcm_group_unlock_irq(group, nonatomic);
2344 	if (do_free)
2345 		kfree(group);
2346 	return 0;
2347 }
2348 
2349 /*
2350  * hw configurator
2351  */
2352 static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
2353 			       struct snd_pcm_hw_rule *rule)
2354 {
2355 	struct snd_interval t;
2356 	snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
2357 		     hw_param_interval_c(params, rule->deps[1]), &t);
2358 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2359 }
2360 
2361 static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
2362 			       struct snd_pcm_hw_rule *rule)
2363 {
2364 	struct snd_interval t;
2365 	snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
2366 		     hw_param_interval_c(params, rule->deps[1]), &t);
2367 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2368 }
2369 
2370 static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
2371 				   struct snd_pcm_hw_rule *rule)
2372 {
2373 	struct snd_interval t;
2374 	snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
2375 			 hw_param_interval_c(params, rule->deps[1]),
2376 			 (unsigned long) rule->private, &t);
2377 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2378 }
2379 
2380 static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
2381 				   struct snd_pcm_hw_rule *rule)
2382 {
2383 	struct snd_interval t;
2384 	snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
2385 			 (unsigned long) rule->private,
2386 			 hw_param_interval_c(params, rule->deps[1]), &t);
2387 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2388 }
2389 
2390 static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
2391 				  struct snd_pcm_hw_rule *rule)
2392 {
2393 	snd_pcm_format_t k;
2394 	const struct snd_interval *i =
2395 				hw_param_interval_c(params, rule->deps[0]);
2396 	struct snd_mask m;
2397 	struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2398 	snd_mask_any(&m);
2399 	pcm_for_each_format(k) {
2400 		int bits;
2401 		if (!snd_mask_test_format(mask, k))
2402 			continue;
2403 		bits = snd_pcm_format_physical_width(k);
2404 		if (bits <= 0)
2405 			continue; /* ignore invalid formats */
2406 		if ((unsigned)bits < i->min || (unsigned)bits > i->max)
2407 			snd_mask_reset(&m, (__force unsigned)k);
2408 	}
2409 	return snd_mask_refine(mask, &m);
2410 }
2411 
2412 static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
2413 				       struct snd_pcm_hw_rule *rule)
2414 {
2415 	struct snd_interval t;
2416 	snd_pcm_format_t k;
2417 
2418 	t.min = UINT_MAX;
2419 	t.max = 0;
2420 	t.openmin = 0;
2421 	t.openmax = 0;
2422 	pcm_for_each_format(k) {
2423 		int bits;
2424 		if (!snd_mask_test_format(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
2425 			continue;
2426 		bits = snd_pcm_format_physical_width(k);
2427 		if (bits <= 0)
2428 			continue; /* ignore invalid formats */
2429 		if (t.min > (unsigned)bits)
2430 			t.min = bits;
2431 		if (t.max < (unsigned)bits)
2432 			t.max = bits;
2433 	}
2434 	t.integer = 1;
2435 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2436 }
2437 
2438 #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12 ||\
2439 	SNDRV_PCM_RATE_128000 != 1 << 19
2440 #error "Change this table"
2441 #endif
2442 
2443 /* NOTE: the list is unsorted! */
2444 static const unsigned int rates[] = {
2445 	5512, 8000, 11025, 16000, 22050, 32000, 44100,
2446 	48000, 64000, 88200, 96000, 176400, 192000, 352800, 384000, 705600, 768000,
2447 	/* extended */
2448 	12000, 24000, 128000
2449 };
2450 
2451 const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
2452 	.count = ARRAY_SIZE(rates),
2453 	.list = rates,
2454 };
2455 
2456 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
2457 				struct snd_pcm_hw_rule *rule)
2458 {
2459 	struct snd_pcm_hardware *hw = rule->private;
2460 	return snd_interval_list(hw_param_interval(params, rule->var),
2461 				 snd_pcm_known_rates.count,
2462 				 snd_pcm_known_rates.list, hw->rates);
2463 }
2464 
2465 static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
2466 					    struct snd_pcm_hw_rule *rule)
2467 {
2468 	struct snd_interval t;
2469 	struct snd_pcm_substream *substream = rule->private;
2470 	t.min = 0;
2471 	t.max = substream->buffer_bytes_max;
2472 	t.openmin = 0;
2473 	t.openmax = 0;
2474 	t.integer = 1;
2475 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
2476 }
2477 
2478 static int snd_pcm_hw_rule_subformats(struct snd_pcm_hw_params *params,
2479 				      struct snd_pcm_hw_rule *rule)
2480 {
2481 	struct snd_mask *sfmask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_SUBFORMAT);
2482 	struct snd_mask *fmask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2483 	u32 *subformats = rule->private;
2484 	snd_pcm_format_t f;
2485 	struct snd_mask m;
2486 
2487 	snd_mask_none(&m);
2488 	/* All PCMs support at least the default STD subformat. */
2489 	snd_mask_set(&m, (__force unsigned)SNDRV_PCM_SUBFORMAT_STD);
2490 
2491 	pcm_for_each_format(f) {
2492 		if (!snd_mask_test(fmask, (__force unsigned)f))
2493 			continue;
2494 
2495 		if (f == SNDRV_PCM_FORMAT_S32_LE && *subformats)
2496 			m.bits[0] |= *subformats;
2497 		else if (snd_pcm_format_linear(f))
2498 			snd_mask_set(&m, (__force unsigned)SNDRV_PCM_SUBFORMAT_MSBITS_MAX);
2499 	}
2500 
2501 	return snd_mask_refine(sfmask, &m);
2502 }
2503 
2504 static int snd_pcm_hw_constraint_subformats(struct snd_pcm_runtime *runtime,
2505 					   unsigned int cond, u32 *subformats)
2506 {
2507 	return snd_pcm_hw_rule_add(runtime, cond, -1,
2508 				   snd_pcm_hw_rule_subformats, (void *)subformats,
2509 				   SNDRV_PCM_HW_PARAM_SUBFORMAT,
2510 				   SNDRV_PCM_HW_PARAM_FORMAT, -1);
2511 }
2512 
2513 static int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
2514 {
2515 	struct snd_pcm_runtime *runtime = substream->runtime;
2516 	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
2517 	int k, err;
2518 
2519 	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
2520 		snd_mask_any(constrs_mask(constrs, k));
2521 	}
2522 
2523 	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
2524 		snd_interval_any(constrs_interval(constrs, k));
2525 	}
2526 
2527 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
2528 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
2529 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
2530 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
2531 	snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
2532 
2533 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
2534 				   snd_pcm_hw_rule_format, NULL,
2535 				   SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2536 	if (err < 0)
2537 		return err;
2538 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2539 				  snd_pcm_hw_rule_sample_bits, NULL,
2540 				  SNDRV_PCM_HW_PARAM_FORMAT,
2541 				  SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2542 	if (err < 0)
2543 		return err;
2544 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2545 				  snd_pcm_hw_rule_div, NULL,
2546 				  SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2547 	if (err < 0)
2548 		return err;
2549 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2550 				  snd_pcm_hw_rule_mul, NULL,
2551 				  SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
2552 	if (err < 0)
2553 		return err;
2554 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2555 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2556 				  SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2557 	if (err < 0)
2558 		return err;
2559 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
2560 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2561 				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
2562 	if (err < 0)
2563 		return err;
2564 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2565 				  snd_pcm_hw_rule_div, NULL,
2566 				  SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
2567 	if (err < 0)
2568 		return err;
2569 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2570 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2571 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
2572 	if (err < 0)
2573 		return err;
2574 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2575 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2576 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
2577 	if (err < 0)
2578 		return err;
2579 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
2580 				  snd_pcm_hw_rule_div, NULL,
2581 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
2582 	if (err < 0)
2583 		return err;
2584 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2585 				  snd_pcm_hw_rule_div, NULL,
2586 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2587 	if (err < 0)
2588 		return err;
2589 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2590 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2591 				  SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2592 	if (err < 0)
2593 		return err;
2594 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
2595 				  snd_pcm_hw_rule_muldivk, (void*) 1000000,
2596 				  SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2597 	if (err < 0)
2598 		return err;
2599 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2600 				  snd_pcm_hw_rule_mul, NULL,
2601 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
2602 	if (err < 0)
2603 		return err;
2604 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2605 				  snd_pcm_hw_rule_mulkdiv, (void*) 8,
2606 				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2607 	if (err < 0)
2608 		return err;
2609 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
2610 				  snd_pcm_hw_rule_muldivk, (void*) 1000000,
2611 				  SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
2612 	if (err < 0)
2613 		return err;
2614 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2615 				  snd_pcm_hw_rule_muldivk, (void*) 8,
2616 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2617 	if (err < 0)
2618 		return err;
2619 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2620 				  snd_pcm_hw_rule_muldivk, (void*) 8,
2621 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
2622 	if (err < 0)
2623 		return err;
2624 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
2625 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2626 				  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2627 	if (err < 0)
2628 		return err;
2629 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
2630 				  snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
2631 				  SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
2632 	if (err < 0)
2633 		return err;
2634 	return 0;
2635 }
2636 
2637 static int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
2638 {
2639 	struct snd_pcm_runtime *runtime = substream->runtime;
2640 	struct snd_pcm_hardware *hw = &runtime->hw;
2641 	int err;
2642 	unsigned int mask = 0;
2643 
2644         if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2645 		mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_INTERLEAVED);
2646         if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2647 		mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_NONINTERLEAVED);
2648 	if (hw_support_mmap(substream)) {
2649 		if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
2650 			mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_INTERLEAVED);
2651 		if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
2652 			mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED);
2653 		if (hw->info & SNDRV_PCM_INFO_COMPLEX)
2654 			mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_COMPLEX);
2655 	}
2656 	err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
2657 	if (err < 0)
2658 		return err;
2659 
2660 	err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
2661 	if (err < 0)
2662 		return err;
2663 
2664 	err = snd_pcm_hw_constraint_subformats(runtime, 0, &hw->subformats);
2665 	if (err < 0)
2666 		return err;
2667 
2668 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
2669 					   hw->channels_min, hw->channels_max);
2670 	if (err < 0)
2671 		return err;
2672 
2673 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
2674 					   hw->rate_min, hw->rate_max);
2675 	if (err < 0)
2676 		return err;
2677 
2678 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2679 					   hw->period_bytes_min, hw->period_bytes_max);
2680 	if (err < 0)
2681 		return err;
2682 
2683 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
2684 					   hw->periods_min, hw->periods_max);
2685 	if (err < 0)
2686 		return err;
2687 
2688 	err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2689 					   hw->period_bytes_min, hw->buffer_bytes_max);
2690 	if (err < 0)
2691 		return err;
2692 
2693 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
2694 				  snd_pcm_hw_rule_buffer_bytes_max, substream,
2695 				  SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
2696 	if (err < 0)
2697 		return err;
2698 
2699 	/* FIXME: remove */
2700 	if (runtime->dma_bytes) {
2701 		err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
2702 		if (err < 0)
2703 			return err;
2704 	}
2705 
2706 	if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
2707 		err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
2708 					  snd_pcm_hw_rule_rate, hw,
2709 					  SNDRV_PCM_HW_PARAM_RATE, -1);
2710 		if (err < 0)
2711 			return err;
2712 	}
2713 
2714 	/* FIXME: this belong to lowlevel */
2715 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
2716 
2717 	return 0;
2718 }
2719 
2720 static void pcm_release_private(struct snd_pcm_substream *substream)
2721 {
2722 	if (snd_pcm_stream_linked(substream))
2723 		snd_pcm_unlink(substream);
2724 }
2725 
2726 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2727 {
2728 	substream->ref_count--;
2729 	if (substream->ref_count > 0)
2730 		return;
2731 
2732 	snd_pcm_drop(substream);
2733 	if (substream->hw_opened) {
2734 		if (substream->runtime->state != SNDRV_PCM_STATE_OPEN)
2735 			do_hw_free(substream);
2736 		substream->ops->close(substream);
2737 		substream->hw_opened = 0;
2738 	}
2739 	if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
2740 		cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
2741 	if (substream->pcm_release) {
2742 		substream->pcm_release(substream);
2743 		substream->pcm_release = NULL;
2744 	}
2745 	snd_pcm_detach_substream(substream);
2746 }
2747 EXPORT_SYMBOL(snd_pcm_release_substream);
2748 
2749 int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2750 			   struct file *file,
2751 			   struct snd_pcm_substream **rsubstream)
2752 {
2753 	struct snd_pcm_substream *substream;
2754 	int err;
2755 
2756 	err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2757 	if (err < 0)
2758 		return err;
2759 	if (substream->ref_count > 1) {
2760 		*rsubstream = substream;
2761 		return 0;
2762 	}
2763 
2764 	err = snd_pcm_hw_constraints_init(substream);
2765 	if (err < 0) {
2766 		pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
2767 		goto error;
2768 	}
2769 
2770 	err = substream->ops->open(substream);
2771 	if (err < 0)
2772 		goto error;
2773 
2774 	substream->hw_opened = 1;
2775 
2776 	err = snd_pcm_hw_constraints_complete(substream);
2777 	if (err < 0) {
2778 		pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
2779 		goto error;
2780 	}
2781 
2782 	/* automatically set EXPLICIT_SYNC flag in the managed mode whenever
2783 	 * the DMA buffer requires it
2784 	 */
2785 	if (substream->managed_buffer_alloc &&
2786 	    substream->dma_buffer.dev.need_sync)
2787 		substream->runtime->hw.info |= SNDRV_PCM_INFO_EXPLICIT_SYNC;
2788 
2789 	*rsubstream = substream;
2790 	return 0;
2791 
2792  error:
2793 	snd_pcm_release_substream(substream);
2794 	return err;
2795 }
2796 EXPORT_SYMBOL(snd_pcm_open_substream);
2797 
2798 static int snd_pcm_open_file(struct file *file,
2799 			     struct snd_pcm *pcm,
2800 			     int stream)
2801 {
2802 	struct snd_pcm_file *pcm_file;
2803 	struct snd_pcm_substream *substream;
2804 	int err;
2805 
2806 	err = snd_pcm_open_substream(pcm, stream, file, &substream);
2807 	if (err < 0)
2808 		return err;
2809 
2810 	pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2811 	if (pcm_file == NULL) {
2812 		snd_pcm_release_substream(substream);
2813 		return -ENOMEM;
2814 	}
2815 	pcm_file->substream = substream;
2816 	if (substream->ref_count == 1)
2817 		substream->pcm_release = pcm_release_private;
2818 	file->private_data = pcm_file;
2819 
2820 	return 0;
2821 }
2822 
2823 static int snd_pcm_playback_open(struct inode *inode, struct file *file)
2824 {
2825 	struct snd_pcm *pcm;
2826 	int err = nonseekable_open(inode, file);
2827 	if (err < 0)
2828 		return err;
2829 	pcm = snd_lookup_minor_data(iminor(inode),
2830 				    SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
2831 	err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
2832 	if (pcm)
2833 		snd_card_unref(pcm->card);
2834 	return err;
2835 }
2836 
2837 static int snd_pcm_capture_open(struct inode *inode, struct file *file)
2838 {
2839 	struct snd_pcm *pcm;
2840 	int err = nonseekable_open(inode, file);
2841 	if (err < 0)
2842 		return err;
2843 	pcm = snd_lookup_minor_data(iminor(inode),
2844 				    SNDRV_DEVICE_TYPE_PCM_CAPTURE);
2845 	err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
2846 	if (pcm)
2847 		snd_card_unref(pcm->card);
2848 	return err;
2849 }
2850 
2851 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
2852 {
2853 	int err;
2854 	wait_queue_entry_t wait;
2855 
2856 	if (pcm == NULL) {
2857 		err = -ENODEV;
2858 		goto __error1;
2859 	}
2860 	err = snd_card_file_add(pcm->card, file);
2861 	if (err < 0)
2862 		goto __error1;
2863 	if (!try_module_get(pcm->card->module)) {
2864 		err = -EFAULT;
2865 		goto __error2;
2866 	}
2867 	init_waitqueue_entry(&wait, current);
2868 	add_wait_queue(&pcm->open_wait, &wait);
2869 	mutex_lock(&pcm->open_mutex);
2870 	while (1) {
2871 		err = snd_pcm_open_file(file, pcm, stream);
2872 		if (err >= 0)
2873 			break;
2874 		if (err == -EAGAIN) {
2875 			if (file->f_flags & O_NONBLOCK) {
2876 				err = -EBUSY;
2877 				break;
2878 			}
2879 		} else
2880 			break;
2881 		set_current_state(TASK_INTERRUPTIBLE);
2882 		mutex_unlock(&pcm->open_mutex);
2883 		schedule();
2884 		mutex_lock(&pcm->open_mutex);
2885 		if (pcm->card->shutdown) {
2886 			err = -ENODEV;
2887 			break;
2888 		}
2889 		if (signal_pending(current)) {
2890 			err = -ERESTARTSYS;
2891 			break;
2892 		}
2893 	}
2894 	remove_wait_queue(&pcm->open_wait, &wait);
2895 	mutex_unlock(&pcm->open_mutex);
2896 	if (err < 0)
2897 		goto __error;
2898 	return err;
2899 
2900       __error:
2901 	module_put(pcm->card->module);
2902       __error2:
2903       	snd_card_file_remove(pcm->card, file);
2904       __error1:
2905       	return err;
2906 }
2907 
2908 static int snd_pcm_release(struct inode *inode, struct file *file)
2909 {
2910 	struct snd_pcm *pcm;
2911 	struct snd_pcm_substream *substream;
2912 	struct snd_pcm_file *pcm_file;
2913 
2914 	pcm_file = file->private_data;
2915 	substream = pcm_file->substream;
2916 	if (snd_BUG_ON(!substream))
2917 		return -ENXIO;
2918 	pcm = substream->pcm;
2919 
2920 	/* block until the device gets woken up as it may touch the hardware */
2921 	snd_power_wait(pcm->card);
2922 
2923 	scoped_guard(mutex, &pcm->open_mutex) {
2924 		snd_pcm_release_substream(substream);
2925 		kfree(pcm_file);
2926 	}
2927 	wake_up(&pcm->open_wait);
2928 	module_put(pcm->card->module);
2929 	snd_card_file_remove(pcm->card, file);
2930 	return 0;
2931 }
2932 
2933 /* check and update PCM state; return 0 or a negative error
2934  * call this inside PCM lock
2935  */
2936 static int do_pcm_hwsync(struct snd_pcm_substream *substream)
2937 {
2938 	switch (substream->runtime->state) {
2939 	case SNDRV_PCM_STATE_DRAINING:
2940 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
2941 			return -EBADFD;
2942 		fallthrough;
2943 	case SNDRV_PCM_STATE_RUNNING:
2944 		return snd_pcm_update_hw_ptr(substream);
2945 	case SNDRV_PCM_STATE_PREPARED:
2946 	case SNDRV_PCM_STATE_PAUSED:
2947 		return 0;
2948 	case SNDRV_PCM_STATE_SUSPENDED:
2949 		return -ESTRPIPE;
2950 	case SNDRV_PCM_STATE_XRUN:
2951 		return -EPIPE;
2952 	default:
2953 		return -EBADFD;
2954 	}
2955 }
2956 
2957 /* increase the appl_ptr; returns the processed frames or a negative error */
2958 static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
2959 					  snd_pcm_uframes_t frames,
2960 					   snd_pcm_sframes_t avail)
2961 {
2962 	struct snd_pcm_runtime *runtime = substream->runtime;
2963 	snd_pcm_sframes_t appl_ptr;
2964 	int ret;
2965 
2966 	if (avail <= 0)
2967 		return 0;
2968 	if (frames > (snd_pcm_uframes_t)avail)
2969 		frames = avail;
2970 	appl_ptr = runtime->control->appl_ptr + frames;
2971 	if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
2972 		appl_ptr -= runtime->boundary;
2973 	ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2974 	return ret < 0 ? ret : frames;
2975 }
2976 
2977 /* decrease the appl_ptr; returns the processed frames or zero for error */
2978 static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
2979 					 snd_pcm_uframes_t frames,
2980 					 snd_pcm_sframes_t avail)
2981 {
2982 	struct snd_pcm_runtime *runtime = substream->runtime;
2983 	snd_pcm_sframes_t appl_ptr;
2984 	int ret;
2985 
2986 	if (avail <= 0)
2987 		return 0;
2988 	if (frames > (snd_pcm_uframes_t)avail)
2989 		frames = avail;
2990 	appl_ptr = runtime->control->appl_ptr - frames;
2991 	if (appl_ptr < 0)
2992 		appl_ptr += runtime->boundary;
2993 	ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2994 	/* NOTE: we return zero for errors because PulseAudio gets depressed
2995 	 * upon receiving an error from rewind ioctl and stops processing
2996 	 * any longer.  Returning zero means that no rewind is done, so
2997 	 * it's not absolutely wrong to answer like that.
2998 	 */
2999 	return ret < 0 ? 0 : frames;
3000 }
3001 
3002 static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream,
3003 					snd_pcm_uframes_t frames)
3004 {
3005 	snd_pcm_sframes_t ret;
3006 
3007 	if (frames == 0)
3008 		return 0;
3009 
3010 	scoped_guard(pcm_stream_lock_irq, substream) {
3011 		ret = do_pcm_hwsync(substream);
3012 		if (!ret)
3013 			ret = rewind_appl_ptr(substream, frames,
3014 					      snd_pcm_hw_avail(substream));
3015 	}
3016 	if (ret >= 0)
3017 		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
3018 	return ret;
3019 }
3020 
3021 static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream,
3022 					 snd_pcm_uframes_t frames)
3023 {
3024 	snd_pcm_sframes_t ret;
3025 
3026 	if (frames == 0)
3027 		return 0;
3028 
3029 	scoped_guard(pcm_stream_lock_irq, substream) {
3030 		ret = do_pcm_hwsync(substream);
3031 		if (!ret)
3032 			ret = forward_appl_ptr(substream, frames,
3033 					       snd_pcm_avail(substream));
3034 	}
3035 	if (ret >= 0)
3036 		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
3037 	return ret;
3038 }
3039 
3040 static int snd_pcm_delay(struct snd_pcm_substream *substream,
3041 			 snd_pcm_sframes_t *delay)
3042 {
3043 	int err;
3044 
3045 	scoped_guard(pcm_stream_lock_irq, substream) {
3046 		err = do_pcm_hwsync(substream);
3047 		if (delay && !err)
3048 			*delay = snd_pcm_calc_delay(substream);
3049 	}
3050 	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
3051 
3052 	return err;
3053 }
3054 
3055 static inline int snd_pcm_hwsync(struct snd_pcm_substream *substream)
3056 {
3057 	return snd_pcm_delay(substream, NULL);
3058 }
3059 
3060 #define snd_pcm_sync_ptr_get_user(__f, __c, __ptr) ({				\
3061 	__label__ failed, failed_begin;						\
3062 	int __err = -EFAULT;							\
3063 	typeof(*(__ptr)) __user *__src = (__ptr);				\
3064 										\
3065 	if (!user_read_access_begin(__src, sizeof(*__src)))			\
3066 		goto failed_begin;						\
3067 	unsafe_get_user(__f, &__src->flags, failed);				\
3068 	unsafe_get_user(__c.appl_ptr, &__src->c.control.appl_ptr, failed);	\
3069 	unsafe_get_user(__c.avail_min, &__src->c.control.avail_min, failed);	\
3070 	__err = 0;								\
3071 failed:										\
3072 	user_read_access_end();							\
3073 failed_begin:									\
3074 	__err;									\
3075 })
3076 
3077 #define snd_pcm_sync_ptr_put_user(__s, __c, __ptr) ({				\
3078 	__label__ failed, failed_begin;						\
3079 	int __err = -EFAULT;							\
3080 	typeof(*(__ptr)) __user *__src = (__ptr);				\
3081 										\
3082 	if (!user_write_access_begin(__src, sizeof(*__src)))			\
3083 		goto failed_begin;						\
3084 	unsafe_put_user(__s.state, &__src->s.status.state, failed);		\
3085 	unsafe_put_user(__s.hw_ptr, &__src->s.status.hw_ptr, failed);		\
3086 	unsafe_put_user(__s.tstamp.tv_sec, &__src->s.status.tstamp.tv_sec, failed);		\
3087 	unsafe_put_user(__s.tstamp.tv_nsec, &__src->s.status.tstamp.tv_nsec, failed);		\
3088 	unsafe_put_user(__s.suspended_state, &__src->s.status.suspended_state, failed);		\
3089 	unsafe_put_user(__s.audio_tstamp.tv_sec, &__src->s.status.audio_tstamp.tv_sec, failed);	\
3090 	unsafe_put_user(__s.audio_tstamp.tv_nsec, &__src->s.status.audio_tstamp.tv_nsec, failed);\
3091 	unsafe_put_user(__c.appl_ptr, &__src->c.control.appl_ptr, failed);	\
3092 	unsafe_put_user(__c.avail_min, &__src->c.control.avail_min, failed);	\
3093 	__err = 0;								\
3094 failed:										\
3095 	user_write_access_end();						\
3096 failed_begin:									\
3097 	__err;									\
3098 })
3099 
3100 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
3101 			    struct snd_pcm_sync_ptr __user *_sync_ptr)
3102 {
3103 	struct snd_pcm_runtime *runtime = substream->runtime;
3104 	volatile struct snd_pcm_mmap_status *status;
3105 	volatile struct snd_pcm_mmap_control *control;
3106 	u32 sflags;
3107 	struct snd_pcm_mmap_control scontrol;
3108 	struct snd_pcm_mmap_status sstatus;
3109 	int err;
3110 
3111 	if (snd_pcm_sync_ptr_get_user(sflags, scontrol, _sync_ptr))
3112 		return -EFAULT;
3113 	status = runtime->status;
3114 	control = runtime->control;
3115 	if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
3116 		err = snd_pcm_hwsync(substream);
3117 		if (err < 0)
3118 			return err;
3119 	}
3120 	scoped_guard(pcm_stream_lock_irq, substream) {
3121 		if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) {
3122 			err = pcm_lib_apply_appl_ptr(substream, scontrol.appl_ptr);
3123 			if (err < 0)
3124 				return err;
3125 		} else {
3126 			scontrol.appl_ptr = control->appl_ptr;
3127 		}
3128 		if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
3129 			control->avail_min = scontrol.avail_min;
3130 		else
3131 			scontrol.avail_min = control->avail_min;
3132 		sstatus.state = status->state;
3133 		sstatus.hw_ptr = status->hw_ptr;
3134 		sstatus.tstamp = status->tstamp;
3135 		sstatus.suspended_state = status->suspended_state;
3136 		sstatus.audio_tstamp = status->audio_tstamp;
3137 	}
3138 	if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
3139 		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
3140 	if (snd_pcm_sync_ptr_put_user(sstatus, scontrol, _sync_ptr))
3141 		return -EFAULT;
3142 	return 0;
3143 }
3144 
3145 struct snd_pcm_mmap_status32 {
3146 	snd_pcm_state_t state;
3147 	s32 pad1;
3148 	u32 hw_ptr;
3149 	struct __snd_timespec tstamp;
3150 	snd_pcm_state_t suspended_state;
3151 	struct __snd_timespec audio_tstamp;
3152 } __packed;
3153 
3154 struct snd_pcm_mmap_control32 {
3155 	u32 appl_ptr;
3156 	u32 avail_min;
3157 };
3158 
3159 struct snd_pcm_sync_ptr32 {
3160 	u32 flags;
3161 	union {
3162 		struct snd_pcm_mmap_status32 status;
3163 		unsigned char reserved[64];
3164 	} s;
3165 	union {
3166 		struct snd_pcm_mmap_control32 control;
3167 		unsigned char reserved[64];
3168 	} c;
3169 } __packed;
3170 
3171 /* recalculate the boundary within 32bit */
3172 static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime)
3173 {
3174 	snd_pcm_uframes_t boundary;
3175 	snd_pcm_uframes_t border;
3176 	int order;
3177 
3178 	if (! runtime->buffer_size)
3179 		return 0;
3180 
3181 	border = 0x7fffffffUL - runtime->buffer_size;
3182 	if (runtime->buffer_size > border)
3183 		return runtime->buffer_size;
3184 
3185 	order = __fls(border) - __fls(runtime->buffer_size);
3186 	boundary = runtime->buffer_size << order;
3187 
3188 	if (boundary <= border)
3189 		return boundary;
3190 	else
3191 		return boundary / 2;
3192 }
3193 
3194 static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
3195 					 struct snd_pcm_sync_ptr32 __user *src)
3196 {
3197 	struct snd_pcm_runtime *runtime = substream->runtime;
3198 	volatile struct snd_pcm_mmap_status *status;
3199 	volatile struct snd_pcm_mmap_control *control;
3200 	u32 sflags;
3201 	struct snd_pcm_mmap_control scontrol;
3202 	struct snd_pcm_mmap_status sstatus;
3203 	snd_pcm_uframes_t boundary;
3204 	int err;
3205 
3206 	if (snd_BUG_ON(!runtime))
3207 		return -EINVAL;
3208 
3209 	if (snd_pcm_sync_ptr_get_user(sflags, scontrol, src))
3210 		return -EFAULT;
3211 	if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
3212 		err = snd_pcm_hwsync(substream);
3213 		if (err < 0)
3214 			return err;
3215 	}
3216 	status = runtime->status;
3217 	control = runtime->control;
3218 	boundary = recalculate_boundary(runtime);
3219 	if (! boundary)
3220 		boundary = 0x7fffffff;
3221 	scoped_guard(pcm_stream_lock_irq, substream) {
3222 		/* FIXME: we should consider the boundary for the sync from app */
3223 		if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) {
3224 			err = pcm_lib_apply_appl_ptr(substream,
3225 						     scontrol.appl_ptr);
3226 			if (err < 0)
3227 				return err;
3228 		} else
3229 			scontrol.appl_ptr = control->appl_ptr % boundary;
3230 		if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
3231 			control->avail_min = scontrol.avail_min;
3232 		else
3233 			scontrol.avail_min = control->avail_min;
3234 		sstatus.state = status->state;
3235 		sstatus.hw_ptr = status->hw_ptr % boundary;
3236 		sstatus.tstamp = status->tstamp;
3237 		sstatus.suspended_state = status->suspended_state;
3238 		sstatus.audio_tstamp = status->audio_tstamp;
3239 	}
3240 	if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
3241 		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
3242 	if (snd_pcm_sync_ptr_put_user(sstatus, scontrol, src))
3243 		return -EFAULT;
3244 
3245 	return 0;
3246 }
3247 #define __SNDRV_PCM_IOCTL_SYNC_PTR32 _IOWR('A', 0x23, struct snd_pcm_sync_ptr32)
3248 
3249 static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
3250 {
3251 	struct snd_pcm_runtime *runtime = substream->runtime;
3252 	int arg;
3253 
3254 	if (get_user(arg, _arg))
3255 		return -EFAULT;
3256 	if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
3257 		return -EINVAL;
3258 	runtime->tstamp_type = arg;
3259 	return 0;
3260 }
3261 
3262 static int snd_pcm_xferi_frames_ioctl(struct snd_pcm_substream *substream,
3263 				      struct snd_xferi __user *_xferi)
3264 {
3265 	struct snd_xferi xferi;
3266 	struct snd_pcm_runtime *runtime = substream->runtime;
3267 	snd_pcm_sframes_t result;
3268 
3269 	if (runtime->state == SNDRV_PCM_STATE_OPEN)
3270 		return -EBADFD;
3271 	if (put_user(0, &_xferi->result))
3272 		return -EFAULT;
3273 	if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
3274 		return -EFAULT;
3275 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3276 		result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
3277 	else
3278 		result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
3279 	if (put_user(result, &_xferi->result))
3280 		return -EFAULT;
3281 	return result < 0 ? result : 0;
3282 }
3283 
3284 static int snd_pcm_xfern_frames_ioctl(struct snd_pcm_substream *substream,
3285 				      struct snd_xfern __user *_xfern)
3286 {
3287 	struct snd_xfern xfern;
3288 	struct snd_pcm_runtime *runtime = substream->runtime;
3289 	void *bufs __free(kfree) = NULL;
3290 	snd_pcm_sframes_t result;
3291 
3292 	if (runtime->state == SNDRV_PCM_STATE_OPEN)
3293 		return -EBADFD;
3294 	if (runtime->channels > 128)
3295 		return -EINVAL;
3296 	if (put_user(0, &_xfern->result))
3297 		return -EFAULT;
3298 	if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
3299 		return -EFAULT;
3300 
3301 	bufs = memdup_array_user(xfern.bufs, runtime->channels, sizeof(void *));
3302 	if (IS_ERR(bufs))
3303 		return PTR_ERR(bufs);
3304 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3305 		result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
3306 	else
3307 		result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
3308 	if (put_user(result, &_xfern->result))
3309 		return -EFAULT;
3310 	return result < 0 ? result : 0;
3311 }
3312 
3313 static int snd_pcm_rewind_ioctl(struct snd_pcm_substream *substream,
3314 				snd_pcm_uframes_t __user *_frames)
3315 {
3316 	snd_pcm_uframes_t frames;
3317 	snd_pcm_sframes_t result;
3318 
3319 	if (get_user(frames, _frames))
3320 		return -EFAULT;
3321 	if (put_user(0, _frames))
3322 		return -EFAULT;
3323 	result = snd_pcm_rewind(substream, frames);
3324 	if (put_user(result, _frames))
3325 		return -EFAULT;
3326 	return result < 0 ? result : 0;
3327 }
3328 
3329 static int snd_pcm_forward_ioctl(struct snd_pcm_substream *substream,
3330 				 snd_pcm_uframes_t __user *_frames)
3331 {
3332 	snd_pcm_uframes_t frames;
3333 	snd_pcm_sframes_t result;
3334 
3335 	if (get_user(frames, _frames))
3336 		return -EFAULT;
3337 	if (put_user(0, _frames))
3338 		return -EFAULT;
3339 	result = snd_pcm_forward(substream, frames);
3340 	if (put_user(result, _frames))
3341 		return -EFAULT;
3342 	return result < 0 ? result : 0;
3343 }
3344 
3345 static int snd_pcm_common_ioctl(struct file *file,
3346 				 struct snd_pcm_substream *substream,
3347 				 unsigned int cmd, void __user *arg)
3348 {
3349 	struct snd_pcm_file *pcm_file = file->private_data;
3350 	int res;
3351 
3352 	if (PCM_RUNTIME_CHECK(substream))
3353 		return -ENXIO;
3354 
3355 	if (substream->runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3356 		return -EBADFD;
3357 
3358 	res = snd_power_wait(substream->pcm->card);
3359 	if (res < 0)
3360 		return res;
3361 
3362 	switch (cmd) {
3363 	case SNDRV_PCM_IOCTL_PVERSION:
3364 		return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
3365 	case SNDRV_PCM_IOCTL_INFO:
3366 		return snd_pcm_info_user(substream, arg);
3367 	case SNDRV_PCM_IOCTL_TSTAMP:	/* just for compatibility */
3368 		return 0;
3369 	case SNDRV_PCM_IOCTL_TTSTAMP:
3370 		return snd_pcm_tstamp(substream, arg);
3371 	case SNDRV_PCM_IOCTL_USER_PVERSION:
3372 		if (get_user(pcm_file->user_pversion,
3373 			     (unsigned int __user *)arg))
3374 			return -EFAULT;
3375 		return 0;
3376 	case SNDRV_PCM_IOCTL_HW_REFINE:
3377 		return snd_pcm_hw_refine_user(substream, arg);
3378 	case SNDRV_PCM_IOCTL_HW_PARAMS:
3379 		return snd_pcm_hw_params_user(substream, arg);
3380 	case SNDRV_PCM_IOCTL_HW_FREE:
3381 		return snd_pcm_hw_free(substream);
3382 	case SNDRV_PCM_IOCTL_SW_PARAMS:
3383 		return snd_pcm_sw_params_user(substream, arg);
3384 	case SNDRV_PCM_IOCTL_STATUS32:
3385 		return snd_pcm_status_user32(substream, arg, false);
3386 	case SNDRV_PCM_IOCTL_STATUS_EXT32:
3387 		return snd_pcm_status_user32(substream, arg, true);
3388 	case SNDRV_PCM_IOCTL_STATUS64:
3389 		return snd_pcm_status_user64(substream, arg, false);
3390 	case SNDRV_PCM_IOCTL_STATUS_EXT64:
3391 		return snd_pcm_status_user64(substream, arg, true);
3392 	case SNDRV_PCM_IOCTL_CHANNEL_INFO:
3393 		return snd_pcm_channel_info_user(substream, arg);
3394 	case SNDRV_PCM_IOCTL_PREPARE:
3395 		return snd_pcm_prepare(substream, file);
3396 	case SNDRV_PCM_IOCTL_RESET:
3397 		return snd_pcm_reset(substream);
3398 	case SNDRV_PCM_IOCTL_START:
3399 		return snd_pcm_start_lock_irq(substream);
3400 	case SNDRV_PCM_IOCTL_LINK:
3401 		return snd_pcm_link(substream, (int)(unsigned long) arg);
3402 	case SNDRV_PCM_IOCTL_UNLINK:
3403 		return snd_pcm_unlink(substream);
3404 	case SNDRV_PCM_IOCTL_RESUME:
3405 		return snd_pcm_resume(substream);
3406 	case SNDRV_PCM_IOCTL_XRUN:
3407 		return snd_pcm_xrun(substream);
3408 	case SNDRV_PCM_IOCTL_HWSYNC:
3409 		return snd_pcm_hwsync(substream);
3410 	case SNDRV_PCM_IOCTL_DELAY:
3411 	{
3412 		snd_pcm_sframes_t delay = 0;
3413 		snd_pcm_sframes_t __user *res = arg;
3414 		int err;
3415 
3416 		err = snd_pcm_delay(substream, &delay);
3417 		if (err)
3418 			return err;
3419 		if (put_user(delay, res))
3420 			return -EFAULT;
3421 		return 0;
3422 	}
3423 	case __SNDRV_PCM_IOCTL_SYNC_PTR32:
3424 		return snd_pcm_ioctl_sync_ptr_compat(substream, arg);
3425 	case __SNDRV_PCM_IOCTL_SYNC_PTR64:
3426 		return snd_pcm_sync_ptr(substream, arg);
3427 #ifdef CONFIG_SND_SUPPORT_OLD_API
3428 	case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
3429 		return snd_pcm_hw_refine_old_user(substream, arg);
3430 	case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
3431 		return snd_pcm_hw_params_old_user(substream, arg);
3432 #endif
3433 	case SNDRV_PCM_IOCTL_DRAIN:
3434 		return snd_pcm_drain(substream, file);
3435 	case SNDRV_PCM_IOCTL_DROP:
3436 		return snd_pcm_drop(substream);
3437 	case SNDRV_PCM_IOCTL_PAUSE:
3438 		return snd_pcm_pause_lock_irq(substream, (unsigned long)arg);
3439 	case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
3440 	case SNDRV_PCM_IOCTL_READI_FRAMES:
3441 		return snd_pcm_xferi_frames_ioctl(substream, arg);
3442 	case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
3443 	case SNDRV_PCM_IOCTL_READN_FRAMES:
3444 		return snd_pcm_xfern_frames_ioctl(substream, arg);
3445 	case SNDRV_PCM_IOCTL_REWIND:
3446 		return snd_pcm_rewind_ioctl(substream, arg);
3447 	case SNDRV_PCM_IOCTL_FORWARD:
3448 		return snd_pcm_forward_ioctl(substream, arg);
3449 	}
3450 	pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
3451 	return -ENOTTY;
3452 }
3453 
3454 static long snd_pcm_ioctl(struct file *file, unsigned int cmd,
3455 			  unsigned long arg)
3456 {
3457 	struct snd_pcm_file *pcm_file;
3458 
3459 	pcm_file = file->private_data;
3460 
3461 	if (((cmd >> 8) & 0xff) != 'A')
3462 		return -ENOTTY;
3463 
3464 	return snd_pcm_common_ioctl(file, pcm_file->substream, cmd,
3465 				     (void __user *)arg);
3466 }
3467 
3468 /**
3469  * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
3470  * @substream: PCM substream
3471  * @cmd: IOCTL cmd
3472  * @arg: IOCTL argument
3473  *
3474  * The function is provided primarily for OSS layer and USB gadget drivers,
3475  * and it allows only the limited set of ioctls (hw_params, sw_params,
3476  * prepare, start, drain, drop, forward).
3477  *
3478  * Return: zero if successful, or a negative error code
3479  */
3480 int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3481 			 unsigned int cmd, void *arg)
3482 {
3483 	snd_pcm_uframes_t *frames = arg;
3484 	snd_pcm_sframes_t result;
3485 
3486 	if (substream->runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3487 		return -EBADFD;
3488 
3489 	switch (cmd) {
3490 	case SNDRV_PCM_IOCTL_FORWARD:
3491 	{
3492 		/* provided only for OSS; capture-only and no value returned */
3493 		if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
3494 			return -EINVAL;
3495 		result = snd_pcm_forward(substream, *frames);
3496 		return result < 0 ? result : 0;
3497 	}
3498 	case SNDRV_PCM_IOCTL_HW_PARAMS:
3499 		return snd_pcm_hw_params(substream, arg);
3500 	case SNDRV_PCM_IOCTL_SW_PARAMS:
3501 		return snd_pcm_sw_params(substream, arg);
3502 	case SNDRV_PCM_IOCTL_PREPARE:
3503 		return snd_pcm_prepare(substream, NULL);
3504 	case SNDRV_PCM_IOCTL_START:
3505 		return snd_pcm_start_lock_irq(substream);
3506 	case SNDRV_PCM_IOCTL_DRAIN:
3507 		return snd_pcm_drain(substream, NULL);
3508 	case SNDRV_PCM_IOCTL_DROP:
3509 		return snd_pcm_drop(substream);
3510 	case SNDRV_PCM_IOCTL_DELAY:
3511 		return snd_pcm_delay(substream, frames);
3512 	default:
3513 		return -EINVAL;
3514 	}
3515 }
3516 EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
3517 
3518 static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
3519 			    loff_t * offset)
3520 {
3521 	struct snd_pcm_file *pcm_file;
3522 	struct snd_pcm_substream *substream;
3523 	struct snd_pcm_runtime *runtime;
3524 	snd_pcm_sframes_t result;
3525 
3526 	pcm_file = file->private_data;
3527 	substream = pcm_file->substream;
3528 	if (PCM_RUNTIME_CHECK(substream))
3529 		return -ENXIO;
3530 	runtime = substream->runtime;
3531 	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
3532 	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3533 		return -EBADFD;
3534 	if (!frame_aligned(runtime, count))
3535 		return -EINVAL;
3536 	count = bytes_to_frames(runtime, count);
3537 	result = snd_pcm_lib_read(substream, buf, count);
3538 	if (result > 0)
3539 		result = frames_to_bytes(runtime, result);
3540 	return result;
3541 }
3542 
3543 static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
3544 			     size_t count, loff_t * offset)
3545 {
3546 	struct snd_pcm_file *pcm_file;
3547 	struct snd_pcm_substream *substream;
3548 	struct snd_pcm_runtime *runtime;
3549 	snd_pcm_sframes_t result;
3550 
3551 	pcm_file = file->private_data;
3552 	substream = pcm_file->substream;
3553 	if (PCM_RUNTIME_CHECK(substream))
3554 		return -ENXIO;
3555 	runtime = substream->runtime;
3556 	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
3557 	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3558 		return -EBADFD;
3559 	if (!frame_aligned(runtime, count))
3560 		return -EINVAL;
3561 	count = bytes_to_frames(runtime, count);
3562 	result = snd_pcm_lib_write(substream, buf, count);
3563 	if (result > 0)
3564 		result = frames_to_bytes(runtime, result);
3565 	return result;
3566 }
3567 
3568 static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
3569 {
3570 	struct snd_pcm_file *pcm_file;
3571 	struct snd_pcm_substream *substream;
3572 	struct snd_pcm_runtime *runtime;
3573 	snd_pcm_sframes_t result;
3574 	unsigned long i;
3575 	void __user **bufs __free(kfree) = NULL;
3576 	snd_pcm_uframes_t frames;
3577 	const struct iovec *iov = iter_iov(to);
3578 
3579 	pcm_file = iocb->ki_filp->private_data;
3580 	substream = pcm_file->substream;
3581 	if (PCM_RUNTIME_CHECK(substream))
3582 		return -ENXIO;
3583 	runtime = substream->runtime;
3584 	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
3585 	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3586 		return -EBADFD;
3587 	if (!user_backed_iter(to))
3588 		return -EINVAL;
3589 	if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
3590 		return -EINVAL;
3591 	if (!frame_aligned(runtime, iov->iov_len))
3592 		return -EINVAL;
3593 	frames = bytes_to_samples(runtime, iov->iov_len);
3594 	bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
3595 	if (bufs == NULL)
3596 		return -ENOMEM;
3597 	for (i = 0; i < to->nr_segs; ++i) {
3598 		bufs[i] = iov->iov_base;
3599 		iov++;
3600 	}
3601 	result = snd_pcm_lib_readv(substream, bufs, frames);
3602 	if (result > 0)
3603 		result = frames_to_bytes(runtime, result);
3604 	return result;
3605 }
3606 
3607 static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
3608 {
3609 	struct snd_pcm_file *pcm_file;
3610 	struct snd_pcm_substream *substream;
3611 	struct snd_pcm_runtime *runtime;
3612 	snd_pcm_sframes_t result;
3613 	unsigned long i;
3614 	void __user **bufs __free(kfree) = NULL;
3615 	snd_pcm_uframes_t frames;
3616 	const struct iovec *iov = iter_iov(from);
3617 
3618 	pcm_file = iocb->ki_filp->private_data;
3619 	substream = pcm_file->substream;
3620 	if (PCM_RUNTIME_CHECK(substream))
3621 		return -ENXIO;
3622 	runtime = substream->runtime;
3623 	if (runtime->state == SNDRV_PCM_STATE_OPEN ||
3624 	    runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3625 		return -EBADFD;
3626 	if (!user_backed_iter(from))
3627 		return -EINVAL;
3628 	if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
3629 	    !frame_aligned(runtime, iov->iov_len))
3630 		return -EINVAL;
3631 	frames = bytes_to_samples(runtime, iov->iov_len);
3632 	bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
3633 	if (bufs == NULL)
3634 		return -ENOMEM;
3635 	for (i = 0; i < from->nr_segs; ++i) {
3636 		bufs[i] = iov->iov_base;
3637 		iov++;
3638 	}
3639 	result = snd_pcm_lib_writev(substream, bufs, frames);
3640 	if (result > 0)
3641 		result = frames_to_bytes(runtime, result);
3642 	return result;
3643 }
3644 
3645 static __poll_t snd_pcm_poll(struct file *file, poll_table *wait)
3646 {
3647 	struct snd_pcm_file *pcm_file;
3648 	struct snd_pcm_substream *substream;
3649 	struct snd_pcm_runtime *runtime;
3650 	__poll_t mask, ok;
3651 	snd_pcm_uframes_t avail;
3652 
3653 	pcm_file = file->private_data;
3654 
3655 	substream = pcm_file->substream;
3656 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3657 		ok = EPOLLOUT | EPOLLWRNORM;
3658 	else
3659 		ok = EPOLLIN | EPOLLRDNORM;
3660 	if (PCM_RUNTIME_CHECK(substream))
3661 		return ok | EPOLLERR;
3662 
3663 	runtime = substream->runtime;
3664 	if (runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
3665 		return ok | EPOLLERR;
3666 
3667 	poll_wait(file, &runtime->sleep, wait);
3668 
3669 	mask = 0;
3670 	guard(pcm_stream_lock_irq)(substream);
3671 	avail = snd_pcm_avail(substream);
3672 	switch (runtime->state) {
3673 	case SNDRV_PCM_STATE_RUNNING:
3674 	case SNDRV_PCM_STATE_PREPARED:
3675 	case SNDRV_PCM_STATE_PAUSED:
3676 		if (avail >= runtime->control->avail_min)
3677 			mask = ok;
3678 		break;
3679 	case SNDRV_PCM_STATE_DRAINING:
3680 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
3681 			mask = ok;
3682 			if (!avail)
3683 				mask |= EPOLLERR;
3684 		}
3685 		break;
3686 	default:
3687 		mask = ok | EPOLLERR;
3688 		break;
3689 	}
3690 	return mask;
3691 }
3692 
3693 /*
3694  * mmap support
3695  */
3696 
3697 /*
3698  * Only on coherent architectures, we can mmap the status and the control records
3699  * for effcient data transfer.  On others, we have to use HWSYNC ioctl...
3700  */
3701 #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
3702 /*
3703  * mmap status record
3704  */
3705 static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3706 {
3707 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3708 	struct snd_pcm_runtime *runtime;
3709 
3710 	if (substream == NULL)
3711 		return VM_FAULT_SIGBUS;
3712 	runtime = substream->runtime;
3713 	vmf->page = virt_to_page(runtime->status);
3714 	get_page(vmf->page);
3715 	return 0;
3716 }
3717 
3718 static const struct vm_operations_struct snd_pcm_vm_ops_status =
3719 {
3720 	.fault =	snd_pcm_mmap_status_fault,
3721 };
3722 
3723 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3724 			       struct vm_area_struct *area)
3725 {
3726 	long size;
3727 	if (!(area->vm_flags & VM_READ))
3728 		return -EINVAL;
3729 	size = area->vm_end - area->vm_start;
3730 	if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3731 		return -EINVAL;
3732 	area->vm_ops = &snd_pcm_vm_ops_status;
3733 	area->vm_private_data = substream;
3734 	vm_flags_mod(area, VM_DONTEXPAND | VM_DONTDUMP,
3735 		     VM_WRITE | VM_MAYWRITE);
3736 
3737 	return 0;
3738 }
3739 
3740 /*
3741  * mmap control record
3742  */
3743 static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3744 {
3745 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3746 	struct snd_pcm_runtime *runtime;
3747 
3748 	if (substream == NULL)
3749 		return VM_FAULT_SIGBUS;
3750 	runtime = substream->runtime;
3751 	vmf->page = virt_to_page(runtime->control);
3752 	get_page(vmf->page);
3753 	return 0;
3754 }
3755 
3756 static const struct vm_operations_struct snd_pcm_vm_ops_control =
3757 {
3758 	.fault =	snd_pcm_mmap_control_fault,
3759 };
3760 
3761 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3762 				struct vm_area_struct *area)
3763 {
3764 	long size;
3765 	if (!(area->vm_flags & VM_READ))
3766 		return -EINVAL;
3767 	size = area->vm_end - area->vm_start;
3768 	if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3769 		return -EINVAL;
3770 	area->vm_ops = &snd_pcm_vm_ops_control;
3771 	area->vm_private_data = substream;
3772 	vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP);
3773 	return 0;
3774 }
3775 
3776 static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
3777 {
3778 	/* If drivers require the explicit sync (typically for non-coherent
3779 	 * pages), we have to disable the mmap of status and control data
3780 	 * to enforce the control via SYNC_PTR ioctl.
3781 	 */
3782 	if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_EXPLICIT_SYNC)
3783 		return false;
3784 	/* See pcm_control_mmap_allowed() below.
3785 	 * Since older alsa-lib requires both status and control mmaps to be
3786 	 * coupled, we have to disable the status mmap for old alsa-lib, too.
3787 	 */
3788 	if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
3789 	    (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
3790 		return false;
3791 	return true;
3792 }
3793 
3794 static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
3795 {
3796 	if (pcm_file->no_compat_mmap)
3797 		return false;
3798 	/* see above */
3799 	if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_EXPLICIT_SYNC)
3800 		return false;
3801 	/* Disallow the control mmap when SYNC_APPLPTR flag is set;
3802 	 * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
3803 	 * thus it effectively assures the manual update of appl_ptr.
3804 	 */
3805 	if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
3806 		return false;
3807 	return true;
3808 }
3809 
3810 #else /* ! coherent mmap */
3811 /*
3812  * don't support mmap for status and control records.
3813  */
3814 #define pcm_status_mmap_allowed(pcm_file)	false
3815 #define pcm_control_mmap_allowed(pcm_file)	false
3816 
3817 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3818 			       struct vm_area_struct *area)
3819 {
3820 	return -ENXIO;
3821 }
3822 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3823 				struct vm_area_struct *area)
3824 {
3825 	return -ENXIO;
3826 }
3827 #endif /* coherent mmap */
3828 
3829 /*
3830  * snd_pcm_mmap_data_open - increase the mmap counter
3831  */
3832 static void snd_pcm_mmap_data_open(struct vm_area_struct *area)
3833 {
3834 	struct snd_pcm_substream *substream = area->vm_private_data;
3835 
3836 	atomic_inc(&substream->mmap_count);
3837 }
3838 
3839 /*
3840  * snd_pcm_mmap_data_close - decrease the mmap counter
3841  */
3842 static void snd_pcm_mmap_data_close(struct vm_area_struct *area)
3843 {
3844 	struct snd_pcm_substream *substream = area->vm_private_data;
3845 
3846 	atomic_dec(&substream->mmap_count);
3847 }
3848 
3849 /*
3850  * fault callback for mmapping a RAM page
3851  */
3852 static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3853 {
3854 	struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3855 	struct snd_pcm_runtime *runtime;
3856 	unsigned long offset;
3857 	struct page * page;
3858 	size_t dma_bytes;
3859 
3860 	if (substream == NULL)
3861 		return VM_FAULT_SIGBUS;
3862 	runtime = substream->runtime;
3863 	offset = vmf->pgoff << PAGE_SHIFT;
3864 	dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3865 	if (offset > dma_bytes - PAGE_SIZE)
3866 		return VM_FAULT_SIGBUS;
3867 	if (substream->ops->page)
3868 		page = substream->ops->page(substream, offset);
3869 	else if (!snd_pcm_get_dma_buf(substream)) {
3870 		if (WARN_ON_ONCE(!runtime->dma_area))
3871 			return VM_FAULT_SIGBUS;
3872 		page = virt_to_page(runtime->dma_area + offset);
3873 	} else
3874 		page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset);
3875 	if (!page)
3876 		return VM_FAULT_SIGBUS;
3877 	get_page(page);
3878 	vmf->page = page;
3879 	return 0;
3880 }
3881 
3882 static const struct vm_operations_struct snd_pcm_vm_ops_data = {
3883 	.open =		snd_pcm_mmap_data_open,
3884 	.close =	snd_pcm_mmap_data_close,
3885 };
3886 
3887 static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3888 	.open =		snd_pcm_mmap_data_open,
3889 	.close =	snd_pcm_mmap_data_close,
3890 	.fault =	snd_pcm_mmap_data_fault,
3891 };
3892 
3893 /*
3894  * mmap the DMA buffer on RAM
3895  */
3896 
3897 /**
3898  * snd_pcm_lib_default_mmap - Default PCM data mmap function
3899  * @substream: PCM substream
3900  * @area: VMA
3901  *
3902  * This is the default mmap handler for PCM data.  When mmap pcm_ops is NULL,
3903  * this function is invoked implicitly.
3904  *
3905  * Return: zero if successful, or a negative error code
3906  */
3907 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
3908 			     struct vm_area_struct *area)
3909 {
3910 	vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP);
3911 	if (!substream->ops->page &&
3912 	    !snd_dma_buffer_mmap(snd_pcm_get_dma_buf(substream), area))
3913 		return 0;
3914 	/* mmap with fault handler */
3915 	area->vm_ops = &snd_pcm_vm_ops_data_fault;
3916 	return 0;
3917 }
3918 EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3919 
3920 /*
3921  * mmap the DMA buffer on I/O memory area
3922  */
3923 #if SNDRV_PCM_INFO_MMAP_IOMEM
3924 /**
3925  * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
3926  * @substream: PCM substream
3927  * @area: VMA
3928  *
3929  * When your hardware uses the iomapped pages as the hardware buffer and
3930  * wants to mmap it, pass this function as mmap pcm_ops.  Note that this
3931  * is supposed to work only on limited architectures.
3932  *
3933  * Return: zero if successful, or a negative error code
3934  */
3935 int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3936 			   struct vm_area_struct *area)
3937 {
3938 	struct snd_pcm_runtime *runtime = substream->runtime;
3939 
3940 	area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3941 	return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3942 }
3943 EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
3944 #endif /* SNDRV_PCM_INFO_MMAP */
3945 
3946 /*
3947  * mmap DMA buffer
3948  */
3949 int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
3950 		      struct vm_area_struct *area)
3951 {
3952 	struct snd_pcm_runtime *runtime;
3953 	long size;
3954 	unsigned long offset;
3955 	size_t dma_bytes;
3956 	int err;
3957 
3958 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3959 		if (!(area->vm_flags & (VM_WRITE|VM_READ)))
3960 			return -EINVAL;
3961 	} else {
3962 		if (!(area->vm_flags & VM_READ))
3963 			return -EINVAL;
3964 	}
3965 	runtime = substream->runtime;
3966 	if (runtime->state == SNDRV_PCM_STATE_OPEN)
3967 		return -EBADFD;
3968 	if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
3969 		return -ENXIO;
3970 	if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
3971 	    runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
3972 		return -EINVAL;
3973 	size = area->vm_end - area->vm_start;
3974 	offset = area->vm_pgoff << PAGE_SHIFT;
3975 	dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
3976 	if ((size_t)size > dma_bytes)
3977 		return -EINVAL;
3978 	if (offset > dma_bytes - size)
3979 		return -EINVAL;
3980 
3981 	area->vm_ops = &snd_pcm_vm_ops_data;
3982 	area->vm_private_data = substream;
3983 	if (substream->ops->mmap)
3984 		err = substream->ops->mmap(substream, area);
3985 	else
3986 		err = snd_pcm_lib_default_mmap(substream, area);
3987 	if (!err)
3988 		atomic_inc(&substream->mmap_count);
3989 	return err;
3990 }
3991 EXPORT_SYMBOL(snd_pcm_mmap_data);
3992 
3993 static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
3994 {
3995 	struct snd_pcm_file * pcm_file;
3996 	struct snd_pcm_substream *substream;
3997 	unsigned long offset;
3998 
3999 	pcm_file = file->private_data;
4000 	substream = pcm_file->substream;
4001 	if (PCM_RUNTIME_CHECK(substream))
4002 		return -ENXIO;
4003 	if (substream->runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
4004 		return -EBADFD;
4005 
4006 	offset = area->vm_pgoff << PAGE_SHIFT;
4007 	switch (offset) {
4008 	case SNDRV_PCM_MMAP_OFFSET_STATUS_OLD:
4009 		if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT))
4010 			return -ENXIO;
4011 		fallthrough;
4012 	case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW:
4013 		if (!pcm_status_mmap_allowed(pcm_file))
4014 			return -ENXIO;
4015 		return snd_pcm_mmap_status(substream, file, area);
4016 	case SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD:
4017 		if (pcm_file->no_compat_mmap || !IS_ENABLED(CONFIG_64BIT))
4018 			return -ENXIO;
4019 		fallthrough;
4020 	case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW:
4021 		if (!pcm_control_mmap_allowed(pcm_file))
4022 			return -ENXIO;
4023 		return snd_pcm_mmap_control(substream, file, area);
4024 	default:
4025 		return snd_pcm_mmap_data(substream, file, area);
4026 	}
4027 	return 0;
4028 }
4029 
4030 static int snd_pcm_fasync(int fd, struct file * file, int on)
4031 {
4032 	struct snd_pcm_file * pcm_file;
4033 	struct snd_pcm_substream *substream;
4034 	struct snd_pcm_runtime *runtime;
4035 
4036 	pcm_file = file->private_data;
4037 	substream = pcm_file->substream;
4038 	if (PCM_RUNTIME_CHECK(substream))
4039 		return -ENXIO;
4040 	runtime = substream->runtime;
4041 	if (runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
4042 		return -EBADFD;
4043 	return snd_fasync_helper(fd, file, on, &runtime->fasync);
4044 }
4045 
4046 /*
4047  * ioctl32 compat
4048  */
4049 #ifdef CONFIG_COMPAT
4050 #include "pcm_compat.c"
4051 #else
4052 #define snd_pcm_ioctl_compat	NULL
4053 #endif
4054 
4055 /*
4056  *  To be removed helpers to keep binary compatibility
4057  */
4058 
4059 #ifdef CONFIG_SND_SUPPORT_OLD_API
4060 #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
4061 #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
4062 
4063 static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
4064 					       struct snd_pcm_hw_params_old *oparams)
4065 {
4066 	unsigned int i;
4067 
4068 	memset(params, 0, sizeof(*params));
4069 	params->flags = oparams->flags;
4070 	for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
4071 		params->masks[i].bits[0] = oparams->masks[i];
4072 	memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
4073 	params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
4074 	params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
4075 	params->info = oparams->info;
4076 	params->msbits = oparams->msbits;
4077 	params->rate_num = oparams->rate_num;
4078 	params->rate_den = oparams->rate_den;
4079 	params->fifo_size = oparams->fifo_size;
4080 }
4081 
4082 static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
4083 					     struct snd_pcm_hw_params *params)
4084 {
4085 	unsigned int i;
4086 
4087 	memset(oparams, 0, sizeof(*oparams));
4088 	oparams->flags = params->flags;
4089 	for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
4090 		oparams->masks[i] = params->masks[i].bits[0];
4091 	memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
4092 	oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
4093 	oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
4094 	oparams->info = params->info;
4095 	oparams->msbits = params->msbits;
4096 	oparams->rate_num = params->rate_num;
4097 	oparams->rate_den = params->rate_den;
4098 	oparams->fifo_size = params->fifo_size;
4099 }
4100 
4101 static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
4102 				      struct snd_pcm_hw_params_old __user * _oparams)
4103 {
4104 	struct snd_pcm_hw_params *params __free(kfree) = NULL;
4105 	struct snd_pcm_hw_params_old *oparams __free(kfree) = NULL;
4106 	int err;
4107 
4108 	params = kmalloc(sizeof(*params), GFP_KERNEL);
4109 	if (!params)
4110 		return -ENOMEM;
4111 
4112 	oparams = memdup_user(_oparams, sizeof(*oparams));
4113 	if (IS_ERR(oparams))
4114 		return PTR_ERR(oparams);
4115 	snd_pcm_hw_convert_from_old_params(params, oparams);
4116 	err = snd_pcm_hw_refine(substream, params);
4117 	if (err < 0)
4118 		return err;
4119 
4120 	err = fixup_unreferenced_params(substream, params);
4121 	if (err < 0)
4122 		return err;
4123 
4124 	snd_pcm_hw_convert_to_old_params(oparams, params);
4125 	if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
4126 		return -EFAULT;
4127 	return 0;
4128 }
4129 
4130 static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
4131 				      struct snd_pcm_hw_params_old __user * _oparams)
4132 {
4133 	struct snd_pcm_hw_params *params __free(kfree) = NULL;
4134 	struct snd_pcm_hw_params_old *oparams __free(kfree) = NULL;
4135 	int err;
4136 
4137 	params = kmalloc(sizeof(*params), GFP_KERNEL);
4138 	if (!params)
4139 		return -ENOMEM;
4140 
4141 	oparams = memdup_user(_oparams, sizeof(*oparams));
4142 	if (IS_ERR(oparams))
4143 		return PTR_ERR(oparams);
4144 
4145 	snd_pcm_hw_convert_from_old_params(params, oparams);
4146 	err = snd_pcm_hw_params(substream, params);
4147 	if (err < 0)
4148 		return err;
4149 
4150 	snd_pcm_hw_convert_to_old_params(oparams, params);
4151 	if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
4152 		return -EFAULT;
4153 	return 0;
4154 }
4155 #endif /* CONFIG_SND_SUPPORT_OLD_API */
4156 
4157 #ifndef CONFIG_MMU
4158 static unsigned long snd_pcm_get_unmapped_area(struct file *file,
4159 					       unsigned long addr,
4160 					       unsigned long len,
4161 					       unsigned long pgoff,
4162 					       unsigned long flags)
4163 {
4164 	struct snd_pcm_file *pcm_file = file->private_data;
4165 	struct snd_pcm_substream *substream = pcm_file->substream;
4166 	struct snd_pcm_runtime *runtime = substream->runtime;
4167 	unsigned long offset = pgoff << PAGE_SHIFT;
4168 
4169 	switch (offset) {
4170 	case SNDRV_PCM_MMAP_OFFSET_STATUS_NEW:
4171 		return (unsigned long)runtime->status;
4172 	case SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW:
4173 		return (unsigned long)runtime->control;
4174 	default:
4175 		return (unsigned long)runtime->dma_area + offset;
4176 	}
4177 }
4178 #else
4179 # define snd_pcm_get_unmapped_area NULL
4180 #endif
4181 
4182 /*
4183  *  Register section
4184  */
4185 
4186 const struct file_operations snd_pcm_f_ops[2] = {
4187 	{
4188 		.owner =		THIS_MODULE,
4189 		.write =		snd_pcm_write,
4190 		.write_iter =		snd_pcm_writev,
4191 		.open =			snd_pcm_playback_open,
4192 		.release =		snd_pcm_release,
4193 		.poll =			snd_pcm_poll,
4194 		.unlocked_ioctl =	snd_pcm_ioctl,
4195 		.compat_ioctl = 	snd_pcm_ioctl_compat,
4196 		.mmap =			snd_pcm_mmap,
4197 		.fasync =		snd_pcm_fasync,
4198 		.get_unmapped_area =	snd_pcm_get_unmapped_area,
4199 	},
4200 	{
4201 		.owner =		THIS_MODULE,
4202 		.read =			snd_pcm_read,
4203 		.read_iter =		snd_pcm_readv,
4204 		.open =			snd_pcm_capture_open,
4205 		.release =		snd_pcm_release,
4206 		.poll =			snd_pcm_poll,
4207 		.unlocked_ioctl =	snd_pcm_ioctl,
4208 		.compat_ioctl = 	snd_pcm_ioctl_compat,
4209 		.mmap =			snd_pcm_mmap,
4210 		.fasync =		snd_pcm_fasync,
4211 		.get_unmapped_area =	snd_pcm_get_unmapped_area,
4212 	}
4213 };
4214