1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * compress_core.c - compress offload core
4 *
5 * Copyright (C) 2011 Intel Corporation
6 * Authors: Vinod Koul <vinod.koul@linux.intel.com>
7 * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 */
12 #define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
14
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/list.h>
18 #include <linux/math64.h>
19 #include <linux/mm.h>
20 #include <linux/mutex.h>
21 #include <linux/poll.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/types.h>
25 #include <linux/uio.h>
26 #include <linux/uaccess.h>
27 #include <linux/dma-buf.h>
28 #include <linux/module.h>
29 #include <linux/compat.h>
30 #include <sound/core.h>
31 #include <sound/initval.h>
32 #include <sound/info.h>
33 #include <sound/compress_params.h>
34 #include <sound/compress_offload.h>
35 #include <sound/compress_driver.h>
36
37 /* struct snd_compr_codec_caps overflows the ioctl bit size for some
38 * architectures, so we need to disable the relevant ioctls.
39 */
40 #if _IOC_SIZEBITS < 14
41 #define COMPR_CODEC_CAPS_OVERFLOW
42 #endif
43
44 /* TODO:
45 * - add substream support for multiple devices in case of
46 * SND_DYNAMIC_MINORS is not used
47 * - Multiple node representation
48 * driver should be able to register multiple nodes
49 */
50
51 struct snd_compr_file {
52 unsigned long caps;
53 struct snd_compr_stream stream;
54 };
55
56 static void error_delayed_work(struct work_struct *work);
57
58 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
59 static void snd_compr_task_free_all(struct snd_compr_stream *stream);
60 #else
snd_compr_task_free_all(struct snd_compr_stream * stream)61 static inline void snd_compr_task_free_all(struct snd_compr_stream *stream) { }
62 #endif
63
64 /*
65 * a note on stream states used:
66 * we use following states in the compressed core
67 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
68 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
69 * calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
70 * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
71 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
72 * playback only). User after setting up stream writes the data buffer
73 * before starting the stream.
74 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
75 * decoding/encoding and rendering/capturing data.
76 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
77 * by calling SNDRV_COMPRESS_DRAIN.
78 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
79 * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
80 * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
81 */
snd_compr_open(struct inode * inode,struct file * f)82 static int snd_compr_open(struct inode *inode, struct file *f)
83 {
84 struct snd_compr *compr;
85 struct snd_compr_file *data;
86 struct snd_compr_runtime *runtime;
87 enum snd_compr_direction dirn;
88 int maj = imajor(inode);
89 int ret;
90
91 if ((f->f_flags & O_ACCMODE) == O_WRONLY)
92 dirn = SND_COMPRESS_PLAYBACK;
93 else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
94 dirn = SND_COMPRESS_CAPTURE;
95 else if ((f->f_flags & O_ACCMODE) == O_RDWR)
96 dirn = SND_COMPRESS_ACCEL;
97 else
98 return -EINVAL;
99
100 if (maj == snd_major)
101 compr = snd_lookup_minor_data(iminor(inode),
102 SNDRV_DEVICE_TYPE_COMPRESS);
103 else
104 return -EBADFD;
105
106 if (compr == NULL) {
107 pr_err("no device data!!!\n");
108 return -ENODEV;
109 }
110
111 if (dirn != compr->direction) {
112 pr_err("this device doesn't support this direction\n");
113 snd_card_unref(compr->card);
114 return -EINVAL;
115 }
116
117 data = kzalloc(sizeof(*data), GFP_KERNEL);
118 if (!data) {
119 snd_card_unref(compr->card);
120 return -ENOMEM;
121 }
122
123 INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
124
125 data->stream.ops = compr->ops;
126 data->stream.direction = dirn;
127 data->stream.private_data = compr->private_data;
128 data->stream.device = compr;
129 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
130 if (!runtime) {
131 kfree(data);
132 snd_card_unref(compr->card);
133 return -ENOMEM;
134 }
135 runtime->state = SNDRV_PCM_STATE_OPEN;
136 init_waitqueue_head(&runtime->sleep);
137 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
138 INIT_LIST_HEAD(&runtime->tasks);
139 #endif
140 data->stream.runtime = runtime;
141 f->private_data = (void *)data;
142 scoped_guard(mutex, &compr->lock)
143 ret = compr->ops->open(&data->stream);
144 if (ret) {
145 kfree(runtime);
146 kfree(data);
147 }
148 snd_card_unref(compr->card);
149 return ret;
150 }
151
snd_compr_free(struct inode * inode,struct file * f)152 static int snd_compr_free(struct inode *inode, struct file *f)
153 {
154 struct snd_compr_file *data = f->private_data;
155 struct snd_compr_runtime *runtime = data->stream.runtime;
156
157 cancel_delayed_work_sync(&data->stream.error_work);
158
159 switch (runtime->state) {
160 case SNDRV_PCM_STATE_RUNNING:
161 case SNDRV_PCM_STATE_DRAINING:
162 case SNDRV_PCM_STATE_PAUSED:
163 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
164 break;
165 default:
166 break;
167 }
168
169 snd_compr_task_free_all(&data->stream);
170
171 data->stream.ops->free(&data->stream);
172 if (!data->stream.runtime->dma_buffer_p)
173 kfree(data->stream.runtime->buffer);
174 kfree(data->stream.runtime);
175 kfree(data);
176 return 0;
177 }
178
179 static void
snd_compr_tstamp32_from_64(struct snd_compr_tstamp * tstamp32,const struct snd_compr_tstamp64 * tstamp64)180 snd_compr_tstamp32_from_64(struct snd_compr_tstamp *tstamp32,
181 const struct snd_compr_tstamp64 *tstamp64)
182 {
183 tstamp32->byte_offset = tstamp64->byte_offset;
184 tstamp32->copied_total = (u32)tstamp64->copied_total;
185 tstamp32->pcm_frames = (u32)tstamp64->pcm_frames;
186 tstamp32->pcm_io_frames = (u32)tstamp64->pcm_io_frames;
187 tstamp32->sampling_rate = tstamp64->sampling_rate;
188 }
189
snd_compr_update_tstamp(struct snd_compr_stream * stream,struct snd_compr_tstamp64 * tstamp)190 static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
191 struct snd_compr_tstamp64 *tstamp)
192 {
193 if (!stream->ops->pointer)
194 return -ENOTSUPP;
195 stream->ops->pointer(stream, tstamp);
196 pr_debug("dsp consumed till %u total %llu bytes\n", tstamp->byte_offset,
197 tstamp->copied_total);
198 if (stream->direction == SND_COMPRESS_PLAYBACK)
199 stream->runtime->total_bytes_transferred = tstamp->copied_total;
200 else
201 stream->runtime->total_bytes_available = tstamp->copied_total;
202 return 0;
203 }
204
snd_compr_calc_avail(struct snd_compr_stream * stream,struct snd_compr_avail64 * avail)205 static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
206 struct snd_compr_avail64 *avail)
207 {
208 memset(avail, 0, sizeof(*avail));
209 snd_compr_update_tstamp(stream, &avail->tstamp);
210 /* Still need to return avail even if tstamp can't be filled in */
211
212 if (stream->runtime->total_bytes_available == 0 &&
213 stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
214 stream->direction == SND_COMPRESS_PLAYBACK) {
215 pr_debug("detected init and someone forgot to do a write\n");
216 return stream->runtime->buffer_size;
217 }
218 pr_debug("app wrote %llu, DSP consumed %llu\n",
219 stream->runtime->total_bytes_available,
220 stream->runtime->total_bytes_transferred);
221 if (stream->runtime->total_bytes_available ==
222 stream->runtime->total_bytes_transferred) {
223 if (stream->direction == SND_COMPRESS_PLAYBACK) {
224 pr_debug("both pointers are same, returning full avail\n");
225 return stream->runtime->buffer_size;
226 } else {
227 pr_debug("both pointers are same, returning no avail\n");
228 return 0;
229 }
230 }
231
232 avail->avail = stream->runtime->total_bytes_available -
233 stream->runtime->total_bytes_transferred;
234 if (stream->direction == SND_COMPRESS_PLAYBACK)
235 avail->avail = stream->runtime->buffer_size - avail->avail;
236
237 pr_debug("ret avail as %zu\n", (size_t)avail->avail);
238 return avail->avail;
239 }
240
snd_compr_get_avail(struct snd_compr_stream * stream)241 static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
242 {
243 struct snd_compr_avail64 avail;
244
245 return snd_compr_calc_avail(stream, &avail);
246 }
247
snd_compr_avail32_from_64(struct snd_compr_avail * avail32,const struct snd_compr_avail64 * avail64)248 static void snd_compr_avail32_from_64(struct snd_compr_avail *avail32,
249 const struct snd_compr_avail64 *avail64)
250 {
251 avail32->avail = avail64->avail;
252 snd_compr_tstamp32_from_64(&avail32->tstamp, &avail64->tstamp);
253 }
254
snd_compr_ioctl_avail(struct snd_compr_stream * stream,unsigned long arg,bool is_32bit)255 static int snd_compr_ioctl_avail(struct snd_compr_stream *stream,
256 unsigned long arg, bool is_32bit)
257 {
258 struct snd_compr_avail64 ioctl_avail64;
259 struct snd_compr_avail ioctl_avail32;
260 size_t avail;
261 const void *copy_from = &ioctl_avail64;
262 size_t copy_size = sizeof(ioctl_avail64);
263
264 if (stream->direction == SND_COMPRESS_ACCEL)
265 return -EBADFD;
266
267 avail = snd_compr_calc_avail(stream, &ioctl_avail64);
268 ioctl_avail64.avail = avail;
269 if (is_32bit) {
270 snd_compr_avail32_from_64(&ioctl_avail32, &ioctl_avail64);
271 copy_from = &ioctl_avail32;
272 copy_size = sizeof(ioctl_avail32);
273 }
274
275 switch (stream->runtime->state) {
276 case SNDRV_PCM_STATE_OPEN:
277 return -EBADFD;
278 case SNDRV_PCM_STATE_XRUN:
279 return -EPIPE;
280 default:
281 break;
282 }
283
284 if (copy_to_user((__u64 __user *)arg, copy_from, copy_size))
285 return -EFAULT;
286 return 0;
287 }
288
snd_compr_write_data(struct snd_compr_stream * stream,const char __user * buf,size_t count)289 static int snd_compr_write_data(struct snd_compr_stream *stream,
290 const char __user *buf, size_t count)
291 {
292 void *dstn;
293 size_t copy;
294 struct snd_compr_runtime *runtime = stream->runtime;
295 /* 64-bit Modulus */
296 u64 app_pointer = div64_u64(runtime->total_bytes_available,
297 runtime->buffer_size);
298 app_pointer = runtime->total_bytes_available -
299 (app_pointer * runtime->buffer_size);
300
301 dstn = runtime->buffer + app_pointer;
302 pr_debug("copying %lu at %llu\n", (unsigned long)count, app_pointer);
303 if (count < runtime->buffer_size - app_pointer) {
304 if (copy_from_user(dstn, buf, count))
305 return -EFAULT;
306 } else {
307 copy = runtime->buffer_size - app_pointer;
308 if (copy_from_user(dstn, buf, copy))
309 return -EFAULT;
310 if (copy_from_user(runtime->buffer, buf + copy, count - copy))
311 return -EFAULT;
312 }
313 /* if DSP cares, let it know data has been written */
314 if (stream->ops->ack)
315 stream->ops->ack(stream, count);
316 return count;
317 }
318
snd_compr_write(struct file * f,const char __user * buf,size_t count,loff_t * offset)319 static ssize_t snd_compr_write(struct file *f, const char __user *buf,
320 size_t count, loff_t *offset)
321 {
322 struct snd_compr_file *data = f->private_data;
323 struct snd_compr_stream *stream;
324 size_t avail;
325 int retval;
326
327 if (snd_BUG_ON(!data))
328 return -EFAULT;
329
330 stream = &data->stream;
331 if (stream->direction == SND_COMPRESS_ACCEL)
332 return -EBADFD;
333 guard(mutex)(&stream->device->lock);
334 /* write is allowed when stream is running or has been setup */
335 switch (stream->runtime->state) {
336 case SNDRV_PCM_STATE_SETUP:
337 case SNDRV_PCM_STATE_PREPARED:
338 case SNDRV_PCM_STATE_RUNNING:
339 break;
340 default:
341 return -EBADFD;
342 }
343
344 avail = snd_compr_get_avail(stream);
345 pr_debug("avail returned %lu\n", (unsigned long)avail);
346 /* calculate how much we can write to buffer */
347 if (avail > count)
348 avail = count;
349
350 if (stream->ops->copy) {
351 char __user* cbuf = (char __user*)buf;
352 retval = stream->ops->copy(stream, cbuf, avail);
353 } else {
354 retval = snd_compr_write_data(stream, buf, avail);
355 }
356 if (retval > 0)
357 stream->runtime->total_bytes_available += retval;
358
359 /* while initiating the stream, write should be called before START
360 * call, so in setup move state */
361 if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
362 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
363 pr_debug("stream prepared, Houston we are good to go\n");
364 }
365
366 return retval;
367 }
368
369
snd_compr_read(struct file * f,char __user * buf,size_t count,loff_t * offset)370 static ssize_t snd_compr_read(struct file *f, char __user *buf,
371 size_t count, loff_t *offset)
372 {
373 struct snd_compr_file *data = f->private_data;
374 struct snd_compr_stream *stream;
375 size_t avail;
376 int retval;
377
378 if (snd_BUG_ON(!data))
379 return -EFAULT;
380
381 stream = &data->stream;
382 if (stream->direction == SND_COMPRESS_ACCEL)
383 return -EBADFD;
384 guard(mutex)(&stream->device->lock);
385
386 /* read is allowed when stream is running, paused, draining and setup
387 * (yes setup is state which we transition to after stop, so if user
388 * wants to read data after stop we allow that)
389 */
390 switch (stream->runtime->state) {
391 case SNDRV_PCM_STATE_OPEN:
392 case SNDRV_PCM_STATE_PREPARED:
393 case SNDRV_PCM_STATE_SUSPENDED:
394 case SNDRV_PCM_STATE_DISCONNECTED:
395 return -EBADFD;
396 case SNDRV_PCM_STATE_XRUN:
397 return -EPIPE;
398 }
399
400 avail = snd_compr_get_avail(stream);
401 pr_debug("avail returned %lu\n", (unsigned long)avail);
402 /* calculate how much we can read from buffer */
403 if (avail > count)
404 avail = count;
405
406 if (stream->ops->copy)
407 retval = stream->ops->copy(stream, buf, avail);
408 else
409 return -ENXIO;
410 if (retval > 0)
411 stream->runtime->total_bytes_transferred += retval;
412
413 return retval;
414 }
415
snd_compr_mmap(struct file * f,struct vm_area_struct * vma)416 static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
417 {
418 return -ENXIO;
419 }
420
snd_compr_get_poll(struct snd_compr_stream * stream)421 static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
422 {
423 if (stream->direction == SND_COMPRESS_PLAYBACK)
424 return EPOLLOUT | EPOLLWRNORM;
425 else
426 return EPOLLIN | EPOLLRDNORM;
427 }
428
snd_compr_poll(struct file * f,poll_table * wait)429 static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
430 {
431 struct snd_compr_file *data = f->private_data;
432 struct snd_compr_stream *stream;
433 struct snd_compr_runtime *runtime;
434 size_t avail;
435 __poll_t retval = 0;
436
437 if (snd_BUG_ON(!data))
438 return EPOLLERR;
439
440 stream = &data->stream;
441 runtime = stream->runtime;
442
443 guard(mutex)(&stream->device->lock);
444
445 switch (runtime->state) {
446 case SNDRV_PCM_STATE_OPEN:
447 case SNDRV_PCM_STATE_XRUN:
448 return snd_compr_get_poll(stream) | EPOLLERR;
449 default:
450 break;
451 }
452
453 poll_wait(f, &runtime->sleep, wait);
454
455 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
456 if (stream->direction == SND_COMPRESS_ACCEL) {
457 struct snd_compr_task_runtime *task;
458 if (runtime->fragments > runtime->active_tasks)
459 retval |= EPOLLOUT | EPOLLWRNORM;
460 task = list_first_entry_or_null(&runtime->tasks,
461 struct snd_compr_task_runtime,
462 list);
463 if (task && task->state == SND_COMPRESS_TASK_STATE_FINISHED)
464 retval |= EPOLLIN | EPOLLRDNORM;
465 return retval;
466 }
467 #endif
468
469 avail = snd_compr_get_avail(stream);
470 pr_debug("avail is %lu\n", (unsigned long)avail);
471 /* check if we have at least one fragment to fill */
472 switch (runtime->state) {
473 case SNDRV_PCM_STATE_DRAINING:
474 /* stream has been woken up after drain is complete
475 * draining done so set stream state to stopped
476 */
477 retval = snd_compr_get_poll(stream);
478 runtime->state = SNDRV_PCM_STATE_SETUP;
479 break;
480 case SNDRV_PCM_STATE_RUNNING:
481 case SNDRV_PCM_STATE_PREPARED:
482 case SNDRV_PCM_STATE_PAUSED:
483 if (avail >= runtime->fragment_size)
484 retval = snd_compr_get_poll(stream);
485 break;
486 default:
487 return snd_compr_get_poll(stream) | EPOLLERR;
488 }
489
490 return retval;
491 }
492
493 static int
snd_compr_get_caps(struct snd_compr_stream * stream,unsigned long arg)494 snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
495 {
496 int retval;
497 struct snd_compr_caps caps;
498
499 if (!stream->ops->get_caps)
500 return -ENXIO;
501
502 memset(&caps, 0, sizeof(caps));
503 retval = stream->ops->get_caps(stream, &caps);
504 if (retval)
505 goto out;
506 if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
507 retval = -EFAULT;
508 out:
509 return retval;
510 }
511
512 #ifndef COMPR_CODEC_CAPS_OVERFLOW
513 static int
snd_compr_get_codec_caps(struct snd_compr_stream * stream,unsigned long arg)514 snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
515 {
516 int retval;
517
518 if (!stream->ops->get_codec_caps)
519 return -ENXIO;
520
521 struct snd_compr_codec_caps *caps __free(kfree) =
522 kzalloc_obj(*caps);
523 if (!caps)
524 return -ENOMEM;
525
526 retval = stream->ops->get_codec_caps(stream, caps);
527 if (retval)
528 return retval;
529 if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
530 return -EFAULT;
531 return retval;
532 }
533 #endif /* !COMPR_CODEC_CAPS_OVERFLOW */
534
snd_compr_malloc_pages(struct snd_compr_stream * stream,size_t size)535 int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
536 {
537 struct snd_dma_buffer *dmab;
538 int ret;
539
540 if (snd_BUG_ON(!(stream) || !(stream)->runtime))
541 return -EINVAL;
542 dmab = kzalloc_obj(*dmab);
543 if (!dmab)
544 return -ENOMEM;
545 dmab->dev = stream->dma_buffer.dev;
546 ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
547 if (ret < 0) {
548 kfree(dmab);
549 return ret;
550 }
551
552 snd_compr_set_runtime_buffer(stream, dmab);
553 stream->runtime->dma_bytes = size;
554 return 1;
555 }
556 EXPORT_SYMBOL(snd_compr_malloc_pages);
557
snd_compr_free_pages(struct snd_compr_stream * stream)558 int snd_compr_free_pages(struct snd_compr_stream *stream)
559 {
560 struct snd_compr_runtime *runtime;
561
562 if (snd_BUG_ON(!(stream) || !(stream)->runtime))
563 return -EINVAL;
564 runtime = stream->runtime;
565 if (runtime->dma_area == NULL)
566 return 0;
567 if (runtime->dma_buffer_p != &stream->dma_buffer) {
568 /* It's a newly allocated buffer. Release it now. */
569 snd_dma_free_pages(runtime->dma_buffer_p);
570 kfree(runtime->dma_buffer_p);
571 }
572
573 snd_compr_set_runtime_buffer(stream, NULL);
574 return 0;
575 }
576 EXPORT_SYMBOL(snd_compr_free_pages);
577
578 /* revisit this with snd_pcm_preallocate_xxx */
snd_compr_allocate_buffer(struct snd_compr_stream * stream,struct snd_compr_params * params)579 static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
580 struct snd_compr_params *params)
581 {
582 unsigned int buffer_size;
583 void *buffer = NULL;
584
585 if (stream->direction == SND_COMPRESS_ACCEL)
586 goto params;
587
588 buffer_size = params->buffer.fragment_size * params->buffer.fragments;
589 if (stream->ops->copy) {
590 buffer = NULL;
591 /* if copy is defined the driver will be required to copy
592 * the data from core
593 */
594 } else {
595 if (stream->runtime->dma_buffer_p) {
596
597 if (buffer_size > stream->runtime->dma_buffer_p->bytes)
598 dev_err(stream->device->dev,
599 "Not enough DMA buffer");
600 else
601 buffer = stream->runtime->dma_buffer_p->area;
602
603 } else {
604 buffer = kmalloc(buffer_size, GFP_KERNEL);
605 }
606
607 if (!buffer)
608 return -ENOMEM;
609 }
610
611 stream->runtime->buffer = buffer;
612 stream->runtime->buffer_size = buffer_size;
613 params:
614 stream->runtime->fragment_size = params->buffer.fragment_size;
615 stream->runtime->fragments = params->buffer.fragments;
616 return 0;
617 }
618
619 static int
snd_compress_check_input(struct snd_compr_stream * stream,struct snd_compr_params * params)620 snd_compress_check_input(struct snd_compr_stream *stream, struct snd_compr_params *params)
621 {
622 u32 max_fragments;
623
624 /* first let's check the buffer parameter's */
625 if (params->buffer.fragment_size == 0)
626 return -EINVAL;
627
628 if (stream->direction == SND_COMPRESS_ACCEL)
629 max_fragments = 64; /* safe value */
630 else
631 max_fragments = U32_MAX / params->buffer.fragment_size;
632
633 if (params->buffer.fragments > max_fragments ||
634 params->buffer.fragments == 0)
635 return -EINVAL;
636
637 /* now codec parameters */
638 if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
639 return -EINVAL;
640
641 if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
642 return -EINVAL;
643
644 return 0;
645 }
646
647 static int
snd_compr_set_params(struct snd_compr_stream * stream,unsigned long arg)648 snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
649 {
650 int retval;
651
652 if (stream->runtime->state == SNDRV_PCM_STATE_OPEN || stream->next_track) {
653 /*
654 * we should allow parameter change only when stream has been
655 * opened not in other cases
656 */
657 struct snd_compr_params *params __free(kfree) =
658 memdup_user((void __user *)arg, sizeof(*params));
659
660 if (IS_ERR(params))
661 return PTR_ERR(params);
662
663 retval = snd_compress_check_input(stream, params);
664 if (retval)
665 return retval;
666
667 retval = snd_compr_allocate_buffer(stream, params);
668 if (retval)
669 return -ENOMEM;
670
671 retval = stream->ops->set_params(stream, params);
672 if (retval)
673 return retval;
674
675 if (stream->next_track)
676 return retval;
677
678 stream->metadata_set = false;
679 stream->next_track = false;
680
681 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
682 } else {
683 return -EPERM;
684 }
685 return retval;
686 }
687
688 static int
snd_compr_get_params(struct snd_compr_stream * stream,unsigned long arg)689 snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
690 {
691 int retval;
692
693 if (!stream->ops->get_params)
694 return -EBADFD;
695
696 struct snd_codec *params __free(kfree) =
697 kzalloc_obj(*params);
698 if (!params)
699 return -ENOMEM;
700 retval = stream->ops->get_params(stream, params);
701 if (retval)
702 return retval;
703 if (copy_to_user((char __user *)arg, params, sizeof(*params)))
704 return -EFAULT;
705 return retval;
706 }
707
708 static int
snd_compr_get_metadata(struct snd_compr_stream * stream,unsigned long arg)709 snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
710 {
711 struct snd_compr_metadata metadata;
712 int retval;
713
714 if (!stream->ops->get_metadata)
715 return -ENXIO;
716
717 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
718 return -EFAULT;
719
720 retval = stream->ops->get_metadata(stream, &metadata);
721 if (retval != 0)
722 return retval;
723
724 if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
725 return -EFAULT;
726
727 return 0;
728 }
729
730 static int
snd_compr_set_metadata(struct snd_compr_stream * stream,unsigned long arg)731 snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
732 {
733 struct snd_compr_metadata metadata;
734 int retval;
735
736 if (!stream->ops->set_metadata)
737 return -ENXIO;
738 /*
739 * we should allow parameter change only when stream has been
740 * opened not in other cases
741 */
742 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
743 return -EFAULT;
744
745 retval = stream->ops->set_metadata(stream, &metadata);
746 stream->metadata_set = true;
747
748 return retval;
749 }
750
snd_compr_tstamp(struct snd_compr_stream * stream,unsigned long arg,bool is_32bit)751 static inline int snd_compr_tstamp(struct snd_compr_stream *stream,
752 unsigned long arg, bool is_32bit)
753 {
754 struct snd_compr_tstamp64 tstamp64 = { 0 };
755 struct snd_compr_tstamp tstamp32 = { 0 };
756 const void *copy_from = &tstamp64;
757 size_t copy_size = sizeof(tstamp64);
758 int ret;
759
760 ret = snd_compr_update_tstamp(stream, &tstamp64);
761 if (ret == 0) {
762 if (is_32bit) {
763 snd_compr_tstamp32_from_64(&tstamp32, &tstamp64);
764 copy_from = &tstamp32;
765 copy_size = sizeof(tstamp32);
766 }
767 ret = copy_to_user((void __user *)arg, copy_from, copy_size) ?
768 -EFAULT :
769 0;
770 }
771 return ret;
772 }
773
snd_compr_pause(struct snd_compr_stream * stream)774 static int snd_compr_pause(struct snd_compr_stream *stream)
775 {
776 int retval;
777
778 switch (stream->runtime->state) {
779 case SNDRV_PCM_STATE_RUNNING:
780 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
781 if (!retval)
782 stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
783 break;
784 case SNDRV_PCM_STATE_DRAINING:
785 if (!stream->device->use_pause_in_draining)
786 return -EPERM;
787 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
788 if (!retval)
789 stream->pause_in_draining = true;
790 break;
791 default:
792 return -EPERM;
793 }
794 return retval;
795 }
796
snd_compr_resume(struct snd_compr_stream * stream)797 static int snd_compr_resume(struct snd_compr_stream *stream)
798 {
799 int retval;
800
801 switch (stream->runtime->state) {
802 case SNDRV_PCM_STATE_PAUSED:
803 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
804 if (!retval)
805 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
806 break;
807 case SNDRV_PCM_STATE_DRAINING:
808 if (!stream->pause_in_draining)
809 return -EPERM;
810 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
811 if (!retval)
812 stream->pause_in_draining = false;
813 break;
814 default:
815 return -EPERM;
816 }
817 return retval;
818 }
819
snd_compr_start(struct snd_compr_stream * stream)820 static int snd_compr_start(struct snd_compr_stream *stream)
821 {
822 int retval;
823
824 switch (stream->runtime->state) {
825 case SNDRV_PCM_STATE_SETUP:
826 if (stream->direction != SND_COMPRESS_CAPTURE)
827 return -EPERM;
828 break;
829 case SNDRV_PCM_STATE_PREPARED:
830 break;
831 default:
832 return -EPERM;
833 }
834
835 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
836 if (!retval)
837 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
838 return retval;
839 }
840
snd_compr_stop(struct snd_compr_stream * stream)841 static int snd_compr_stop(struct snd_compr_stream *stream)
842 {
843 int retval;
844
845 switch (stream->runtime->state) {
846 case SNDRV_PCM_STATE_OPEN:
847 case SNDRV_PCM_STATE_SETUP:
848 case SNDRV_PCM_STATE_PREPARED:
849 return -EPERM;
850 default:
851 break;
852 }
853
854 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
855 if (!retval) {
856 /* clear flags and stop any drain wait */
857 stream->partial_drain = false;
858 stream->metadata_set = false;
859 stream->pause_in_draining = false;
860 snd_compr_drain_notify(stream);
861 stream->runtime->total_bytes_available = 0;
862 stream->runtime->total_bytes_transferred = 0;
863 }
864 return retval;
865 }
866
error_delayed_work(struct work_struct * work)867 static void error_delayed_work(struct work_struct *work)
868 {
869 struct snd_compr_stream *stream;
870
871 stream = container_of(work, struct snd_compr_stream, error_work.work);
872
873 guard(mutex)(&stream->device->lock);
874
875 stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
876 wake_up(&stream->runtime->sleep);
877 }
878
879 /**
880 * snd_compr_stop_error: Report a fatal error on a stream
881 * @stream: pointer to stream
882 * @state: state to transition the stream to
883 *
884 * Stop the stream and set its state.
885 *
886 * Should be called with compressed device lock held.
887 *
888 * Return: zero if successful, or a negative error code
889 */
snd_compr_stop_error(struct snd_compr_stream * stream,snd_pcm_state_t state)890 int snd_compr_stop_error(struct snd_compr_stream *stream,
891 snd_pcm_state_t state)
892 {
893 if (stream->runtime->state == state)
894 return 0;
895
896 stream->runtime->state = state;
897
898 pr_debug("Changing state to: %d\n", state);
899
900 queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
901
902 return 0;
903 }
904 EXPORT_SYMBOL_GPL(snd_compr_stop_error);
905
snd_compress_wait_for_drain(struct snd_compr_stream * stream)906 static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
907 {
908 int ret;
909
910 /*
911 * We are called with lock held. So drop the lock while we wait for
912 * drain complete notification from the driver
913 *
914 * It is expected that driver will notify the drain completion and then
915 * stream will be moved to SETUP state, even if draining resulted in an
916 * error. We can trigger next track after this.
917 */
918 stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
919 mutex_unlock(&stream->device->lock);
920
921 /* we wait for drain to complete here, drain can return when
922 * interruption occurred, wait returned error or success.
923 * For the first two cases we don't do anything different here and
924 * return after waking up
925 */
926
927 ret = wait_event_interruptible(stream->runtime->sleep,
928 (stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
929 if (ret == -ERESTARTSYS)
930 pr_debug("wait aborted by a signal\n");
931 else if (ret)
932 pr_debug("wait for drain failed with %d\n", ret);
933
934
935 wake_up(&stream->runtime->sleep);
936 mutex_lock(&stream->device->lock);
937
938 return ret;
939 }
940
snd_compr_drain(struct snd_compr_stream * stream)941 static int snd_compr_drain(struct snd_compr_stream *stream)
942 {
943 int retval;
944
945 switch (stream->runtime->state) {
946 case SNDRV_PCM_STATE_OPEN:
947 case SNDRV_PCM_STATE_SETUP:
948 case SNDRV_PCM_STATE_PREPARED:
949 case SNDRV_PCM_STATE_PAUSED:
950 return -EPERM;
951 case SNDRV_PCM_STATE_XRUN:
952 return -EPIPE;
953 default:
954 break;
955 }
956
957 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
958 if (retval) {
959 pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
960 wake_up(&stream->runtime->sleep);
961 return retval;
962 }
963
964 return snd_compress_wait_for_drain(stream);
965 }
966
snd_compr_next_track(struct snd_compr_stream * stream)967 static int snd_compr_next_track(struct snd_compr_stream *stream)
968 {
969 int retval;
970
971 /* only a running stream can transition to next track */
972 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
973 return -EPERM;
974
975 /* next track doesn't have any meaning for capture streams */
976 if (stream->direction == SND_COMPRESS_CAPTURE)
977 return -EPERM;
978
979 /* you can signal next track if this is intended to be a gapless stream
980 * and current track metadata is set
981 */
982 if (stream->metadata_set == false)
983 return -EPERM;
984
985 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
986 if (retval != 0)
987 return retval;
988 stream->metadata_set = false;
989 stream->next_track = true;
990 return 0;
991 }
992
snd_compr_partial_drain(struct snd_compr_stream * stream)993 static int snd_compr_partial_drain(struct snd_compr_stream *stream)
994 {
995 int retval;
996
997 switch (stream->runtime->state) {
998 case SNDRV_PCM_STATE_OPEN:
999 case SNDRV_PCM_STATE_SETUP:
1000 case SNDRV_PCM_STATE_PREPARED:
1001 case SNDRV_PCM_STATE_PAUSED:
1002 return -EPERM;
1003 case SNDRV_PCM_STATE_XRUN:
1004 return -EPIPE;
1005 default:
1006 break;
1007 }
1008
1009 /* partial drain doesn't have any meaning for capture streams */
1010 if (stream->direction == SND_COMPRESS_CAPTURE)
1011 return -EPERM;
1012
1013 /* stream can be drained only when next track has been signalled */
1014 if (stream->next_track == false)
1015 return -EPERM;
1016
1017 stream->partial_drain = true;
1018 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
1019 if (retval) {
1020 pr_debug("Partial drain returned failure\n");
1021 wake_up(&stream->runtime->sleep);
1022 return retval;
1023 }
1024
1025 stream->next_track = false;
1026 return snd_compress_wait_for_drain(stream);
1027 }
1028
1029 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1030
1031 static struct snd_compr_task_runtime *
snd_compr_find_task(struct snd_compr_stream * stream,__u64 seqno)1032 snd_compr_find_task(struct snd_compr_stream *stream, __u64 seqno)
1033 {
1034 struct snd_compr_task_runtime *task;
1035
1036 list_for_each_entry(task, &stream->runtime->tasks, list) {
1037 if (task->seqno == seqno)
1038 return task;
1039 }
1040 return NULL;
1041 }
1042
snd_compr_task_free(struct snd_compr_task_runtime * task)1043 static void snd_compr_task_free(struct snd_compr_task_runtime *task)
1044 {
1045 if (task->output)
1046 dma_buf_put(task->output);
1047 if (task->input)
1048 dma_buf_put(task->input);
1049 kfree(task);
1050 }
1051
snd_compr_seqno_next(struct snd_compr_stream * stream)1052 static u64 snd_compr_seqno_next(struct snd_compr_stream *stream)
1053 {
1054 u64 seqno = ++stream->runtime->task_seqno;
1055 if (seqno == 0)
1056 seqno = ++stream->runtime->task_seqno;
1057 return seqno;
1058 }
1059
snd_compr_task_new(struct snd_compr_stream * stream,struct snd_compr_task * utask)1060 static int snd_compr_task_new(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1061 {
1062 struct snd_compr_task_runtime *task;
1063 int retval, fd_i, fd_o;
1064
1065 if (stream->runtime->total_tasks >= stream->runtime->fragments)
1066 return -EBUSY;
1067 if (utask->origin_seqno != 0 || utask->input_size != 0)
1068 return -EINVAL;
1069 task = kzalloc_obj(*task);
1070 if (task == NULL)
1071 return -ENOMEM;
1072 task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1073 task->input_size = utask->input_size;
1074 retval = stream->ops->task_create(stream, task);
1075 if (retval < 0)
1076 goto cleanup;
1077 /* similar functionality as in dma_buf_fd(), but ensure that both
1078 file descriptors are allocated before fd_install() */
1079 if (!task->input || !task->input->file || !task->output || !task->output->file) {
1080 retval = -EINVAL;
1081 goto cleanup;
1082 }
1083 fd_i = get_unused_fd_flags(O_WRONLY|O_CLOEXEC);
1084 if (fd_i < 0)
1085 goto cleanup;
1086 fd_o = get_unused_fd_flags(O_RDONLY|O_CLOEXEC);
1087 if (fd_o < 0) {
1088 put_unused_fd(fd_i);
1089 goto cleanup;
1090 }
1091 /* keep dmabuf reference until freed with task free ioctl */
1092 get_dma_buf(task->input);
1093 get_dma_buf(task->output);
1094 fd_install(fd_i, task->input->file);
1095 fd_install(fd_o, task->output->file);
1096 utask->input_fd = fd_i;
1097 utask->output_fd = fd_o;
1098 list_add_tail(&task->list, &stream->runtime->tasks);
1099 stream->runtime->total_tasks++;
1100 return 0;
1101 cleanup:
1102 snd_compr_task_free(task);
1103 return retval;
1104 }
1105
snd_compr_task_create(struct snd_compr_stream * stream,unsigned long arg)1106 static int snd_compr_task_create(struct snd_compr_stream *stream, unsigned long arg)
1107 {
1108 int retval;
1109
1110 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1111 return -EPERM;
1112
1113 struct snd_compr_task *task __free(kfree) =
1114 memdup_user((void __user *)arg, sizeof(*task));
1115 if (IS_ERR(task))
1116 return PTR_ERR(task);
1117 retval = snd_compr_task_new(stream, task);
1118 if (retval >= 0)
1119 if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1120 retval = -EFAULT;
1121 return retval;
1122 }
1123
snd_compr_task_start_prepare(struct snd_compr_task_runtime * task,struct snd_compr_task * utask)1124 static int snd_compr_task_start_prepare(struct snd_compr_task_runtime *task,
1125 struct snd_compr_task *utask)
1126 {
1127 if (task == NULL)
1128 return -EINVAL;
1129 if (task->state >= SND_COMPRESS_TASK_STATE_FINISHED)
1130 return -EBUSY;
1131 if (utask->input_size > task->input->size)
1132 return -EINVAL;
1133 task->flags = utask->flags;
1134 task->input_size = utask->input_size;
1135 task->state = SND_COMPRESS_TASK_STATE_IDLE;
1136 return 0;
1137 }
1138
snd_compr_task_start(struct snd_compr_stream * stream,struct snd_compr_task * utask)1139 static int snd_compr_task_start(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1140 {
1141 struct snd_compr_task_runtime *task;
1142 int retval;
1143
1144 if (utask->origin_seqno > 0) {
1145 task = snd_compr_find_task(stream, utask->origin_seqno);
1146 retval = snd_compr_task_start_prepare(task, utask);
1147 if (retval < 0)
1148 return retval;
1149 task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1150 utask->origin_seqno = 0;
1151 list_move_tail(&task->list, &stream->runtime->tasks);
1152 } else {
1153 task = snd_compr_find_task(stream, utask->seqno);
1154 if (task && task->state != SND_COMPRESS_TASK_STATE_IDLE)
1155 return -EBUSY;
1156 retval = snd_compr_task_start_prepare(task, utask);
1157 if (retval < 0)
1158 return retval;
1159 }
1160 retval = stream->ops->task_start(stream, task);
1161 if (retval >= 0) {
1162 task->state = SND_COMPRESS_TASK_STATE_ACTIVE;
1163 stream->runtime->active_tasks++;
1164 }
1165 return retval;
1166 }
1167
snd_compr_task_start_ioctl(struct snd_compr_stream * stream,unsigned long arg)1168 static int snd_compr_task_start_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1169 {
1170 int retval;
1171
1172 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1173 return -EPERM;
1174
1175 struct snd_compr_task *task __free(kfree) =
1176 memdup_user((void __user *)arg, sizeof(*task));
1177 if (IS_ERR(task))
1178 return PTR_ERR(task);
1179 retval = snd_compr_task_start(stream, task);
1180 if (retval >= 0)
1181 if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1182 retval = -EFAULT;
1183 return retval;
1184 }
1185
snd_compr_task_stop_one(struct snd_compr_stream * stream,struct snd_compr_task_runtime * task)1186 static void snd_compr_task_stop_one(struct snd_compr_stream *stream,
1187 struct snd_compr_task_runtime *task)
1188 {
1189 if (task->state != SND_COMPRESS_TASK_STATE_ACTIVE)
1190 return;
1191 stream->ops->task_stop(stream, task);
1192 if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1193 stream->runtime->active_tasks--;
1194 list_move_tail(&task->list, &stream->runtime->tasks);
1195 task->state = SND_COMPRESS_TASK_STATE_IDLE;
1196 }
1197
snd_compr_task_free_one(struct snd_compr_stream * stream,struct snd_compr_task_runtime * task)1198 static void snd_compr_task_free_one(struct snd_compr_stream *stream,
1199 struct snd_compr_task_runtime *task)
1200 {
1201 snd_compr_task_stop_one(stream, task);
1202 stream->ops->task_free(stream, task);
1203 list_del(&task->list);
1204 snd_compr_task_free(task);
1205 stream->runtime->total_tasks--;
1206 }
1207
snd_compr_task_free_all(struct snd_compr_stream * stream)1208 static void snd_compr_task_free_all(struct snd_compr_stream *stream)
1209 {
1210 struct snd_compr_task_runtime *task, *temp;
1211
1212 list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1213 snd_compr_task_free_one(stream, task);
1214 }
1215
1216 typedef void (*snd_compr_seq_func_t)(struct snd_compr_stream *stream,
1217 struct snd_compr_task_runtime *task);
1218
snd_compr_task_seq(struct snd_compr_stream * stream,unsigned long arg,snd_compr_seq_func_t fcn)1219 static int snd_compr_task_seq(struct snd_compr_stream *stream, unsigned long arg,
1220 snd_compr_seq_func_t fcn)
1221 {
1222 struct snd_compr_task_runtime *task, *temp;
1223 __u64 seqno;
1224 int retval;
1225
1226 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1227 return -EPERM;
1228 retval = copy_from_user(&seqno, (__u64 __user *)arg, sizeof(seqno));
1229 if (retval)
1230 return -EFAULT;
1231 retval = 0;
1232 if (seqno == 0) {
1233 list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1234 fcn(stream, task);
1235 } else {
1236 task = snd_compr_find_task(stream, seqno);
1237 if (task == NULL) {
1238 retval = -EINVAL;
1239 } else {
1240 fcn(stream, task);
1241 }
1242 }
1243 return retval;
1244 }
1245
snd_compr_task_status(struct snd_compr_stream * stream,struct snd_compr_task_status * status)1246 static int snd_compr_task_status(struct snd_compr_stream *stream,
1247 struct snd_compr_task_status *status)
1248 {
1249 struct snd_compr_task_runtime *task;
1250
1251 task = snd_compr_find_task(stream, status->seqno);
1252 if (task == NULL)
1253 return -EINVAL;
1254 status->input_size = task->input_size;
1255 status->output_size = task->output_size;
1256 status->state = task->state;
1257 return 0;
1258 }
1259
snd_compr_task_status_ioctl(struct snd_compr_stream * stream,unsigned long arg)1260 static int snd_compr_task_status_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1261 {
1262 int retval;
1263
1264 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1265 return -EPERM;
1266
1267 struct snd_compr_task_status *status __free(kfree) =
1268 memdup_user((void __user *)arg, sizeof(*status));
1269 if (IS_ERR(status))
1270 return PTR_ERR(status);
1271 retval = snd_compr_task_status(stream, status);
1272 if (retval >= 0)
1273 if (copy_to_user((void __user *)arg, status, sizeof(*status)))
1274 retval = -EFAULT;
1275 return retval;
1276 }
1277
1278 /**
1279 * snd_compr_task_finished: Notify that the task was finished
1280 * @stream: pointer to stream
1281 * @task: runtime task structure
1282 *
1283 * Set the finished task state and notify waiters.
1284 */
snd_compr_task_finished(struct snd_compr_stream * stream,struct snd_compr_task_runtime * task)1285 void snd_compr_task_finished(struct snd_compr_stream *stream,
1286 struct snd_compr_task_runtime *task)
1287 {
1288 guard(mutex)(&stream->device->lock);
1289 if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1290 stream->runtime->active_tasks--;
1291 task->state = SND_COMPRESS_TASK_STATE_FINISHED;
1292 wake_up(&stream->runtime->sleep);
1293 }
1294 EXPORT_SYMBOL_GPL(snd_compr_task_finished);
1295
1296 MODULE_IMPORT_NS("DMA_BUF");
1297 #endif /* CONFIG_SND_COMPRESS_ACCEL */
1298
snd_compr_ioctl(struct file * f,unsigned int cmd,unsigned long arg)1299 static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1300 {
1301 struct snd_compr_file *data = f->private_data;
1302 struct snd_compr_stream *stream;
1303
1304 if (snd_BUG_ON(!data))
1305 return -EFAULT;
1306
1307 stream = &data->stream;
1308
1309 guard(mutex)(&stream->device->lock);
1310 switch (cmd) {
1311 case SNDRV_COMPRESS_IOCTL_VERSION:
1312 return put_user(SNDRV_COMPRESS_VERSION,
1313 (int __user *)arg) ? -EFAULT : 0;
1314 case SNDRV_COMPRESS_GET_CAPS:
1315 return snd_compr_get_caps(stream, arg);
1316 #ifndef COMPR_CODEC_CAPS_OVERFLOW
1317 case SNDRV_COMPRESS_GET_CODEC_CAPS:
1318 return snd_compr_get_codec_caps(stream, arg);
1319 #endif
1320 case SNDRV_COMPRESS_SET_PARAMS:
1321 return snd_compr_set_params(stream, arg);
1322 case SNDRV_COMPRESS_GET_PARAMS:
1323 return snd_compr_get_params(stream, arg);
1324 case SNDRV_COMPRESS_SET_METADATA:
1325 return snd_compr_set_metadata(stream, arg);
1326 case SNDRV_COMPRESS_GET_METADATA:
1327 return snd_compr_get_metadata(stream, arg);
1328 }
1329
1330 if (stream->direction == SND_COMPRESS_ACCEL) {
1331 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1332 switch (cmd) {
1333 case SNDRV_COMPRESS_TASK_CREATE:
1334 return snd_compr_task_create(stream, arg);
1335 case SNDRV_COMPRESS_TASK_FREE:
1336 return snd_compr_task_seq(stream, arg, snd_compr_task_free_one);
1337 case SNDRV_COMPRESS_TASK_START:
1338 return snd_compr_task_start_ioctl(stream, arg);
1339 case SNDRV_COMPRESS_TASK_STOP:
1340 return snd_compr_task_seq(stream, arg, snd_compr_task_stop_one);
1341 case SNDRV_COMPRESS_TASK_STATUS:
1342 return snd_compr_task_status_ioctl(stream, arg);
1343 }
1344 #endif
1345 return -ENOTTY;
1346 }
1347
1348 switch (cmd) {
1349 case SNDRV_COMPRESS_TSTAMP:
1350 return snd_compr_tstamp(stream, arg, true);
1351 case SNDRV_COMPRESS_TSTAMP64:
1352 return snd_compr_tstamp(stream, arg, false);
1353 case SNDRV_COMPRESS_AVAIL:
1354 return snd_compr_ioctl_avail(stream, arg, true);
1355 case SNDRV_COMPRESS_AVAIL64:
1356 return snd_compr_ioctl_avail(stream, arg, false);
1357 case SNDRV_COMPRESS_PAUSE:
1358 return snd_compr_pause(stream);
1359 case SNDRV_COMPRESS_RESUME:
1360 return snd_compr_resume(stream);
1361 case SNDRV_COMPRESS_START:
1362 return snd_compr_start(stream);
1363 case SNDRV_COMPRESS_STOP:
1364 return snd_compr_stop(stream);
1365 case SNDRV_COMPRESS_DRAIN:
1366 return snd_compr_drain(stream);
1367 case SNDRV_COMPRESS_PARTIAL_DRAIN:
1368 return snd_compr_partial_drain(stream);
1369 case SNDRV_COMPRESS_NEXT_TRACK:
1370 return snd_compr_next_track(stream);
1371 }
1372
1373 return -ENOTTY;
1374 }
1375
1376 /* support of 32bit userspace on 64bit platforms */
1377 #ifdef CONFIG_COMPAT
snd_compr_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1378 static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
1379 unsigned long arg)
1380 {
1381 return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1382 }
1383 #endif
1384
1385 static const struct file_operations snd_compr_file_ops = {
1386 .owner = THIS_MODULE,
1387 .open = snd_compr_open,
1388 .release = snd_compr_free,
1389 .write = snd_compr_write,
1390 .read = snd_compr_read,
1391 .unlocked_ioctl = snd_compr_ioctl,
1392 #ifdef CONFIG_COMPAT
1393 .compat_ioctl = snd_compr_ioctl_compat,
1394 #endif
1395 .mmap = snd_compr_mmap,
1396 .poll = snd_compr_poll,
1397 };
1398
snd_compress_dev_register(struct snd_device * device)1399 static int snd_compress_dev_register(struct snd_device *device)
1400 {
1401 int ret;
1402 struct snd_compr *compr;
1403
1404 if (snd_BUG_ON(!device || !device->device_data))
1405 return -EBADFD;
1406 compr = device->device_data;
1407
1408 pr_debug("reg device %s, direction %d\n", compr->name,
1409 compr->direction);
1410 /* register compressed device */
1411 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
1412 compr->card, compr->device,
1413 &snd_compr_file_ops, compr, compr->dev);
1414 if (ret < 0) {
1415 pr_err("snd_register_device failed %d\n", ret);
1416 return ret;
1417 }
1418 return ret;
1419
1420 }
1421
snd_compress_dev_disconnect(struct snd_device * device)1422 static int snd_compress_dev_disconnect(struct snd_device *device)
1423 {
1424 struct snd_compr *compr;
1425
1426 compr = device->device_data;
1427 snd_unregister_device(compr->dev);
1428 return 0;
1429 }
1430
1431 #ifdef CONFIG_SND_VERBOSE_PROCFS
snd_compress_proc_info_read(struct snd_info_entry * entry,struct snd_info_buffer * buffer)1432 static void snd_compress_proc_info_read(struct snd_info_entry *entry,
1433 struct snd_info_buffer *buffer)
1434 {
1435 struct snd_compr *compr = (struct snd_compr *)entry->private_data;
1436
1437 snd_iprintf(buffer, "card: %d\n", compr->card->number);
1438 snd_iprintf(buffer, "device: %d\n", compr->device);
1439 snd_iprintf(buffer, "stream: %s\n",
1440 compr->direction == SND_COMPRESS_PLAYBACK
1441 ? "PLAYBACK" : "CAPTURE");
1442 snd_iprintf(buffer, "id: %s\n", compr->id);
1443 }
1444
snd_compress_proc_init(struct snd_compr * compr)1445 static int snd_compress_proc_init(struct snd_compr *compr)
1446 {
1447 struct snd_info_entry *entry;
1448 char name[16];
1449
1450 sprintf(name, "compr%i", compr->device);
1451 entry = snd_info_create_card_entry(compr->card, name,
1452 compr->card->proc_root);
1453 if (!entry)
1454 return -ENOMEM;
1455 entry->mode = S_IFDIR | 0555;
1456 compr->proc_root = entry;
1457
1458 entry = snd_info_create_card_entry(compr->card, "info",
1459 compr->proc_root);
1460 if (entry)
1461 snd_info_set_text_ops(entry, compr,
1462 snd_compress_proc_info_read);
1463 compr->proc_info_entry = entry;
1464
1465 return 0;
1466 }
1467
snd_compress_proc_done(struct snd_compr * compr)1468 static void snd_compress_proc_done(struct snd_compr *compr)
1469 {
1470 snd_info_free_entry(compr->proc_info_entry);
1471 compr->proc_info_entry = NULL;
1472 snd_info_free_entry(compr->proc_root);
1473 compr->proc_root = NULL;
1474 }
1475
snd_compress_set_id(struct snd_compr * compr,const char * id)1476 static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1477 {
1478 strscpy(compr->id, id, sizeof(compr->id));
1479 }
1480 #else
snd_compress_proc_init(struct snd_compr * compr)1481 static inline int snd_compress_proc_init(struct snd_compr *compr)
1482 {
1483 return 0;
1484 }
1485
snd_compress_proc_done(struct snd_compr * compr)1486 static inline void snd_compress_proc_done(struct snd_compr *compr)
1487 {
1488 }
1489
snd_compress_set_id(struct snd_compr * compr,const char * id)1490 static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1491 {
1492 }
1493 #endif
1494
snd_compress_dev_free(struct snd_device * device)1495 static int snd_compress_dev_free(struct snd_device *device)
1496 {
1497 struct snd_compr *compr;
1498
1499 compr = device->device_data;
1500 snd_compress_proc_done(compr);
1501 put_device(compr->dev);
1502 return 0;
1503 }
1504
1505 /**
1506 * snd_compress_new: create new compress device
1507 * @card: sound card pointer
1508 * @device: device number
1509 * @dirn: device direction, should be of type enum snd_compr_direction
1510 * @id: ID string
1511 * @compr: compress device pointer
1512 *
1513 * Return: zero if successful, or a negative error code
1514 */
snd_compress_new(struct snd_card * card,int device,int dirn,const char * id,struct snd_compr * compr)1515 int snd_compress_new(struct snd_card *card, int device,
1516 int dirn, const char *id, struct snd_compr *compr)
1517 {
1518 static const struct snd_device_ops ops = {
1519 .dev_free = snd_compress_dev_free,
1520 .dev_register = snd_compress_dev_register,
1521 .dev_disconnect = snd_compress_dev_disconnect,
1522 };
1523 int ret;
1524
1525 #if !IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1526 if (snd_BUG_ON(dirn == SND_COMPRESS_ACCEL))
1527 return -EINVAL;
1528 #endif
1529
1530 compr->card = card;
1531 compr->device = device;
1532 compr->direction = dirn;
1533 mutex_init(&compr->lock);
1534
1535 snd_compress_set_id(compr, id);
1536
1537 ret = snd_device_alloc(&compr->dev, card);
1538 if (ret)
1539 return ret;
1540 dev_set_name(compr->dev, "comprC%iD%i", card->number, device);
1541
1542 ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1543 if (ret == 0)
1544 snd_compress_proc_init(compr);
1545 else
1546 put_device(compr->dev);
1547
1548 return ret;
1549 }
1550 EXPORT_SYMBOL_GPL(snd_compress_new);
1551
1552 MODULE_DESCRIPTION("ALSA Compressed offload framework");
1553 MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
1554 MODULE_LICENSE("GPL v2");
1555