1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * compress_core.c - compress offload core
4 *
5 * Copyright (C) 2011 Intel Corporation
6 * Authors: Vinod Koul <vinod.koul@linux.intel.com>
7 * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 */
12 #define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
14
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/list.h>
18 #include <linux/math64.h>
19 #include <linux/mm.h>
20 #include <linux/mutex.h>
21 #include <linux/poll.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/types.h>
25 #include <linux/uio.h>
26 #include <linux/uaccess.h>
27 #include <linux/dma-buf.h>
28 #include <linux/module.h>
29 #include <linux/compat.h>
30 #include <sound/core.h>
31 #include <sound/initval.h>
32 #include <sound/info.h>
33 #include <sound/compress_params.h>
34 #include <sound/compress_offload.h>
35 #include <sound/compress_driver.h>
36
37 /* struct snd_compr_codec_caps overflows the ioctl bit size for some
38 * architectures, so we need to disable the relevant ioctls.
39 */
40 #if _IOC_SIZEBITS < 14
41 #define COMPR_CODEC_CAPS_OVERFLOW
42 #endif
43
44 /* TODO:
45 * - add substream support for multiple devices in case of
46 * SND_DYNAMIC_MINORS is not used
47 * - Multiple node representation
48 * driver should be able to register multiple nodes
49 */
50
51 struct snd_compr_file {
52 unsigned long caps;
53 struct snd_compr_stream stream;
54 };
55
56 static void error_delayed_work(struct work_struct *work);
57
58 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
59 static void snd_compr_task_free_all(struct snd_compr_stream *stream);
60 #else
snd_compr_task_free_all(struct snd_compr_stream * stream)61 static inline void snd_compr_task_free_all(struct snd_compr_stream *stream) { }
62 #endif
63
64 /*
65 * a note on stream states used:
66 * we use following states in the compressed core
67 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
68 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
69 * calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
70 * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
71 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
72 * playback only). User after setting up stream writes the data buffer
73 * before starting the stream.
74 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
75 * decoding/encoding and rendering/capturing data.
76 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
77 * by calling SNDRV_COMPRESS_DRAIN.
78 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
79 * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
80 * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
81 */
snd_compr_open(struct inode * inode,struct file * f)82 static int snd_compr_open(struct inode *inode, struct file *f)
83 {
84 struct snd_compr *compr;
85 struct snd_compr_file *data;
86 struct snd_compr_runtime *runtime;
87 enum snd_compr_direction dirn;
88 int maj = imajor(inode);
89 int ret;
90
91 if ((f->f_flags & O_ACCMODE) == O_WRONLY)
92 dirn = SND_COMPRESS_PLAYBACK;
93 else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
94 dirn = SND_COMPRESS_CAPTURE;
95 else if ((f->f_flags & O_ACCMODE) == O_RDWR)
96 dirn = SND_COMPRESS_ACCEL;
97 else
98 return -EINVAL;
99
100 if (maj == snd_major)
101 compr = snd_lookup_minor_data(iminor(inode),
102 SNDRV_DEVICE_TYPE_COMPRESS);
103 else
104 return -EBADFD;
105
106 if (compr == NULL) {
107 pr_err("no device data!!!\n");
108 return -ENODEV;
109 }
110
111 if (dirn != compr->direction) {
112 pr_err("this device doesn't support this direction\n");
113 snd_card_unref(compr->card);
114 return -EINVAL;
115 }
116
117 data = kzalloc(sizeof(*data), GFP_KERNEL);
118 if (!data) {
119 snd_card_unref(compr->card);
120 return -ENOMEM;
121 }
122
123 INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
124
125 data->stream.ops = compr->ops;
126 data->stream.direction = dirn;
127 data->stream.private_data = compr->private_data;
128 data->stream.device = compr;
129 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
130 if (!runtime) {
131 kfree(data);
132 snd_card_unref(compr->card);
133 return -ENOMEM;
134 }
135 runtime->state = SNDRV_PCM_STATE_OPEN;
136 init_waitqueue_head(&runtime->sleep);
137 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
138 INIT_LIST_HEAD(&runtime->tasks);
139 #endif
140 data->stream.runtime = runtime;
141 f->private_data = (void *)data;
142 scoped_guard(mutex, &compr->lock)
143 ret = compr->ops->open(&data->stream);
144 if (ret) {
145 kfree(runtime);
146 kfree(data);
147 }
148 snd_card_unref(compr->card);
149 return ret;
150 }
151
snd_compr_free(struct inode * inode,struct file * f)152 static int snd_compr_free(struct inode *inode, struct file *f)
153 {
154 struct snd_compr_file *data = f->private_data;
155 struct snd_compr_runtime *runtime = data->stream.runtime;
156
157 cancel_delayed_work_sync(&data->stream.error_work);
158
159 switch (runtime->state) {
160 case SNDRV_PCM_STATE_RUNNING:
161 case SNDRV_PCM_STATE_DRAINING:
162 case SNDRV_PCM_STATE_PAUSED:
163 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
164 break;
165 default:
166 break;
167 }
168
169 snd_compr_task_free_all(&data->stream);
170
171 data->stream.ops->free(&data->stream);
172 if (!data->stream.runtime->dma_buffer_p)
173 kfree(data->stream.runtime->buffer);
174 kfree(data->stream.runtime);
175 kfree(data);
176 return 0;
177 }
178
snd_compr_update_tstamp(struct snd_compr_stream * stream,struct snd_compr_tstamp * tstamp)179 static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
180 struct snd_compr_tstamp *tstamp)
181 {
182 if (!stream->ops->pointer)
183 return -ENOTSUPP;
184 stream->ops->pointer(stream, tstamp);
185 pr_debug("dsp consumed till %d total %d bytes\n",
186 tstamp->byte_offset, tstamp->copied_total);
187 if (stream->direction == SND_COMPRESS_PLAYBACK)
188 stream->runtime->total_bytes_transferred = tstamp->copied_total;
189 else
190 stream->runtime->total_bytes_available = tstamp->copied_total;
191 return 0;
192 }
193
snd_compr_calc_avail(struct snd_compr_stream * stream,struct snd_compr_avail * avail)194 static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
195 struct snd_compr_avail *avail)
196 {
197 memset(avail, 0, sizeof(*avail));
198 snd_compr_update_tstamp(stream, &avail->tstamp);
199 /* Still need to return avail even if tstamp can't be filled in */
200
201 if (stream->runtime->total_bytes_available == 0 &&
202 stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
203 stream->direction == SND_COMPRESS_PLAYBACK) {
204 pr_debug("detected init and someone forgot to do a write\n");
205 return stream->runtime->buffer_size;
206 }
207 pr_debug("app wrote %lld, DSP consumed %lld\n",
208 stream->runtime->total_bytes_available,
209 stream->runtime->total_bytes_transferred);
210 if (stream->runtime->total_bytes_available ==
211 stream->runtime->total_bytes_transferred) {
212 if (stream->direction == SND_COMPRESS_PLAYBACK) {
213 pr_debug("both pointers are same, returning full avail\n");
214 return stream->runtime->buffer_size;
215 } else {
216 pr_debug("both pointers are same, returning no avail\n");
217 return 0;
218 }
219 }
220
221 avail->avail = stream->runtime->total_bytes_available -
222 stream->runtime->total_bytes_transferred;
223 if (stream->direction == SND_COMPRESS_PLAYBACK)
224 avail->avail = stream->runtime->buffer_size - avail->avail;
225
226 pr_debug("ret avail as %lld\n", avail->avail);
227 return avail->avail;
228 }
229
snd_compr_get_avail(struct snd_compr_stream * stream)230 static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
231 {
232 struct snd_compr_avail avail;
233
234 return snd_compr_calc_avail(stream, &avail);
235 }
236
237 static int
snd_compr_ioctl_avail(struct snd_compr_stream * stream,unsigned long arg)238 snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
239 {
240 struct snd_compr_avail ioctl_avail;
241 size_t avail;
242
243 if (stream->direction == SND_COMPRESS_ACCEL)
244 return -EBADFD;
245
246 avail = snd_compr_calc_avail(stream, &ioctl_avail);
247 ioctl_avail.avail = avail;
248
249 switch (stream->runtime->state) {
250 case SNDRV_PCM_STATE_OPEN:
251 return -EBADFD;
252 case SNDRV_PCM_STATE_XRUN:
253 return -EPIPE;
254 default:
255 break;
256 }
257
258 if (copy_to_user((__u64 __user *)arg,
259 &ioctl_avail, sizeof(ioctl_avail)))
260 return -EFAULT;
261 return 0;
262 }
263
snd_compr_write_data(struct snd_compr_stream * stream,const char __user * buf,size_t count)264 static int snd_compr_write_data(struct snd_compr_stream *stream,
265 const char __user *buf, size_t count)
266 {
267 void *dstn;
268 size_t copy;
269 struct snd_compr_runtime *runtime = stream->runtime;
270 /* 64-bit Modulus */
271 u64 app_pointer = div64_u64(runtime->total_bytes_available,
272 runtime->buffer_size);
273 app_pointer = runtime->total_bytes_available -
274 (app_pointer * runtime->buffer_size);
275
276 dstn = runtime->buffer + app_pointer;
277 pr_debug("copying %ld at %lld\n",
278 (unsigned long)count, app_pointer);
279 if (count < runtime->buffer_size - app_pointer) {
280 if (copy_from_user(dstn, buf, count))
281 return -EFAULT;
282 } else {
283 copy = runtime->buffer_size - app_pointer;
284 if (copy_from_user(dstn, buf, copy))
285 return -EFAULT;
286 if (copy_from_user(runtime->buffer, buf + copy, count - copy))
287 return -EFAULT;
288 }
289 /* if DSP cares, let it know data has been written */
290 if (stream->ops->ack)
291 stream->ops->ack(stream, count);
292 return count;
293 }
294
snd_compr_write(struct file * f,const char __user * buf,size_t count,loff_t * offset)295 static ssize_t snd_compr_write(struct file *f, const char __user *buf,
296 size_t count, loff_t *offset)
297 {
298 struct snd_compr_file *data = f->private_data;
299 struct snd_compr_stream *stream;
300 size_t avail;
301 int retval;
302
303 if (snd_BUG_ON(!data))
304 return -EFAULT;
305
306 stream = &data->stream;
307 if (stream->direction == SND_COMPRESS_ACCEL)
308 return -EBADFD;
309 guard(mutex)(&stream->device->lock);
310 /* write is allowed when stream is running or has been setup */
311 switch (stream->runtime->state) {
312 case SNDRV_PCM_STATE_SETUP:
313 case SNDRV_PCM_STATE_PREPARED:
314 case SNDRV_PCM_STATE_RUNNING:
315 break;
316 default:
317 return -EBADFD;
318 }
319
320 avail = snd_compr_get_avail(stream);
321 pr_debug("avail returned %ld\n", (unsigned long)avail);
322 /* calculate how much we can write to buffer */
323 if (avail > count)
324 avail = count;
325
326 if (stream->ops->copy) {
327 char __user* cbuf = (char __user*)buf;
328 retval = stream->ops->copy(stream, cbuf, avail);
329 } else {
330 retval = snd_compr_write_data(stream, buf, avail);
331 }
332 if (retval > 0)
333 stream->runtime->total_bytes_available += retval;
334
335 /* while initiating the stream, write should be called before START
336 * call, so in setup move state */
337 if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
338 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
339 pr_debug("stream prepared, Houston we are good to go\n");
340 }
341
342 return retval;
343 }
344
345
snd_compr_read(struct file * f,char __user * buf,size_t count,loff_t * offset)346 static ssize_t snd_compr_read(struct file *f, char __user *buf,
347 size_t count, loff_t *offset)
348 {
349 struct snd_compr_file *data = f->private_data;
350 struct snd_compr_stream *stream;
351 size_t avail;
352 int retval;
353
354 if (snd_BUG_ON(!data))
355 return -EFAULT;
356
357 stream = &data->stream;
358 if (stream->direction == SND_COMPRESS_ACCEL)
359 return -EBADFD;
360 guard(mutex)(&stream->device->lock);
361
362 /* read is allowed when stream is running, paused, draining and setup
363 * (yes setup is state which we transition to after stop, so if user
364 * wants to read data after stop we allow that)
365 */
366 switch (stream->runtime->state) {
367 case SNDRV_PCM_STATE_OPEN:
368 case SNDRV_PCM_STATE_PREPARED:
369 case SNDRV_PCM_STATE_SUSPENDED:
370 case SNDRV_PCM_STATE_DISCONNECTED:
371 return -EBADFD;
372 case SNDRV_PCM_STATE_XRUN:
373 return -EPIPE;
374 }
375
376 avail = snd_compr_get_avail(stream);
377 pr_debug("avail returned %ld\n", (unsigned long)avail);
378 /* calculate how much we can read from buffer */
379 if (avail > count)
380 avail = count;
381
382 if (stream->ops->copy)
383 retval = stream->ops->copy(stream, buf, avail);
384 else
385 return -ENXIO;
386 if (retval > 0)
387 stream->runtime->total_bytes_transferred += retval;
388
389 return retval;
390 }
391
snd_compr_mmap(struct file * f,struct vm_area_struct * vma)392 static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
393 {
394 return -ENXIO;
395 }
396
snd_compr_get_poll(struct snd_compr_stream * stream)397 static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
398 {
399 if (stream->direction == SND_COMPRESS_PLAYBACK)
400 return EPOLLOUT | EPOLLWRNORM;
401 else
402 return EPOLLIN | EPOLLRDNORM;
403 }
404
snd_compr_poll(struct file * f,poll_table * wait)405 static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
406 {
407 struct snd_compr_file *data = f->private_data;
408 struct snd_compr_stream *stream;
409 struct snd_compr_runtime *runtime;
410 size_t avail;
411 __poll_t retval = 0;
412
413 if (snd_BUG_ON(!data))
414 return EPOLLERR;
415
416 stream = &data->stream;
417 runtime = stream->runtime;
418
419 guard(mutex)(&stream->device->lock);
420
421 switch (runtime->state) {
422 case SNDRV_PCM_STATE_OPEN:
423 case SNDRV_PCM_STATE_XRUN:
424 return snd_compr_get_poll(stream) | EPOLLERR;
425 default:
426 break;
427 }
428
429 poll_wait(f, &runtime->sleep, wait);
430
431 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
432 if (stream->direction == SND_COMPRESS_ACCEL) {
433 struct snd_compr_task_runtime *task;
434 if (runtime->fragments > runtime->active_tasks)
435 retval |= EPOLLOUT | EPOLLWRNORM;
436 task = list_first_entry_or_null(&runtime->tasks,
437 struct snd_compr_task_runtime,
438 list);
439 if (task && task->state == SND_COMPRESS_TASK_STATE_FINISHED)
440 retval |= EPOLLIN | EPOLLRDNORM;
441 return retval;
442 }
443 #endif
444
445 avail = snd_compr_get_avail(stream);
446 pr_debug("avail is %ld\n", (unsigned long)avail);
447 /* check if we have at least one fragment to fill */
448 switch (runtime->state) {
449 case SNDRV_PCM_STATE_DRAINING:
450 /* stream has been woken up after drain is complete
451 * draining done so set stream state to stopped
452 */
453 retval = snd_compr_get_poll(stream);
454 runtime->state = SNDRV_PCM_STATE_SETUP;
455 break;
456 case SNDRV_PCM_STATE_RUNNING:
457 case SNDRV_PCM_STATE_PREPARED:
458 case SNDRV_PCM_STATE_PAUSED:
459 if (avail >= runtime->fragment_size)
460 retval = snd_compr_get_poll(stream);
461 break;
462 default:
463 return snd_compr_get_poll(stream) | EPOLLERR;
464 }
465
466 return retval;
467 }
468
469 static int
snd_compr_get_caps(struct snd_compr_stream * stream,unsigned long arg)470 snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
471 {
472 int retval;
473 struct snd_compr_caps caps;
474
475 if (!stream->ops->get_caps)
476 return -ENXIO;
477
478 memset(&caps, 0, sizeof(caps));
479 retval = stream->ops->get_caps(stream, &caps);
480 if (retval)
481 goto out;
482 if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
483 retval = -EFAULT;
484 out:
485 return retval;
486 }
487
488 #ifndef COMPR_CODEC_CAPS_OVERFLOW
489 static int
snd_compr_get_codec_caps(struct snd_compr_stream * stream,unsigned long arg)490 snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
491 {
492 int retval;
493 struct snd_compr_codec_caps *caps __free(kfree) = NULL;
494
495 if (!stream->ops->get_codec_caps)
496 return -ENXIO;
497
498 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
499 if (!caps)
500 return -ENOMEM;
501
502 retval = stream->ops->get_codec_caps(stream, caps);
503 if (retval)
504 return retval;
505 if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
506 return -EFAULT;
507 return retval;
508 }
509 #endif /* !COMPR_CODEC_CAPS_OVERFLOW */
510
snd_compr_malloc_pages(struct snd_compr_stream * stream,size_t size)511 int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
512 {
513 struct snd_dma_buffer *dmab;
514 int ret;
515
516 if (snd_BUG_ON(!(stream) || !(stream)->runtime))
517 return -EINVAL;
518 dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
519 if (!dmab)
520 return -ENOMEM;
521 dmab->dev = stream->dma_buffer.dev;
522 ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
523 if (ret < 0) {
524 kfree(dmab);
525 return ret;
526 }
527
528 snd_compr_set_runtime_buffer(stream, dmab);
529 stream->runtime->dma_bytes = size;
530 return 1;
531 }
532 EXPORT_SYMBOL(snd_compr_malloc_pages);
533
snd_compr_free_pages(struct snd_compr_stream * stream)534 int snd_compr_free_pages(struct snd_compr_stream *stream)
535 {
536 struct snd_compr_runtime *runtime;
537
538 if (snd_BUG_ON(!(stream) || !(stream)->runtime))
539 return -EINVAL;
540 runtime = stream->runtime;
541 if (runtime->dma_area == NULL)
542 return 0;
543 if (runtime->dma_buffer_p != &stream->dma_buffer) {
544 /* It's a newly allocated buffer. Release it now. */
545 snd_dma_free_pages(runtime->dma_buffer_p);
546 kfree(runtime->dma_buffer_p);
547 }
548
549 snd_compr_set_runtime_buffer(stream, NULL);
550 return 0;
551 }
552 EXPORT_SYMBOL(snd_compr_free_pages);
553
554 /* revisit this with snd_pcm_preallocate_xxx */
snd_compr_allocate_buffer(struct snd_compr_stream * stream,struct snd_compr_params * params)555 static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
556 struct snd_compr_params *params)
557 {
558 unsigned int buffer_size;
559 void *buffer = NULL;
560
561 if (stream->direction == SND_COMPRESS_ACCEL)
562 goto params;
563
564 buffer_size = params->buffer.fragment_size * params->buffer.fragments;
565 if (stream->ops->copy) {
566 buffer = NULL;
567 /* if copy is defined the driver will be required to copy
568 * the data from core
569 */
570 } else {
571 if (stream->runtime->dma_buffer_p) {
572
573 if (buffer_size > stream->runtime->dma_buffer_p->bytes)
574 dev_err(stream->device->dev,
575 "Not enough DMA buffer");
576 else
577 buffer = stream->runtime->dma_buffer_p->area;
578
579 } else {
580 buffer = kmalloc(buffer_size, GFP_KERNEL);
581 }
582
583 if (!buffer)
584 return -ENOMEM;
585 }
586
587 stream->runtime->buffer = buffer;
588 stream->runtime->buffer_size = buffer_size;
589 params:
590 stream->runtime->fragment_size = params->buffer.fragment_size;
591 stream->runtime->fragments = params->buffer.fragments;
592 return 0;
593 }
594
595 static int
snd_compress_check_input(struct snd_compr_stream * stream,struct snd_compr_params * params)596 snd_compress_check_input(struct snd_compr_stream *stream, struct snd_compr_params *params)
597 {
598 u32 max_fragments;
599
600 /* first let's check the buffer parameter's */
601 if (params->buffer.fragment_size == 0)
602 return -EINVAL;
603
604 if (stream->direction == SND_COMPRESS_ACCEL)
605 max_fragments = 64; /* safe value */
606 else
607 max_fragments = U32_MAX / params->buffer.fragment_size;
608
609 if (params->buffer.fragments > max_fragments ||
610 params->buffer.fragments == 0)
611 return -EINVAL;
612
613 /* now codec parameters */
614 if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
615 return -EINVAL;
616
617 if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
618 return -EINVAL;
619
620 return 0;
621 }
622
623 static int
snd_compr_set_params(struct snd_compr_stream * stream,unsigned long arg)624 snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
625 {
626 struct snd_compr_params *params __free(kfree) = NULL;
627 int retval;
628
629 if (stream->runtime->state == SNDRV_PCM_STATE_OPEN || stream->next_track) {
630 /*
631 * we should allow parameter change only when stream has been
632 * opened not in other cases
633 */
634 params = memdup_user((void __user *)arg, sizeof(*params));
635 if (IS_ERR(params))
636 return PTR_ERR(params);
637
638 retval = snd_compress_check_input(stream, params);
639 if (retval)
640 return retval;
641
642 retval = snd_compr_allocate_buffer(stream, params);
643 if (retval)
644 return -ENOMEM;
645
646 retval = stream->ops->set_params(stream, params);
647 if (retval)
648 return retval;
649
650 if (stream->next_track)
651 return retval;
652
653 stream->metadata_set = false;
654 stream->next_track = false;
655
656 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
657 } else {
658 return -EPERM;
659 }
660 return retval;
661 }
662
663 static int
snd_compr_get_params(struct snd_compr_stream * stream,unsigned long arg)664 snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
665 {
666 struct snd_codec *params __free(kfree) = NULL;
667 int retval;
668
669 if (!stream->ops->get_params)
670 return -EBADFD;
671
672 params = kzalloc(sizeof(*params), GFP_KERNEL);
673 if (!params)
674 return -ENOMEM;
675 retval = stream->ops->get_params(stream, params);
676 if (retval)
677 return retval;
678 if (copy_to_user((char __user *)arg, params, sizeof(*params)))
679 return -EFAULT;
680 return retval;
681 }
682
683 static int
snd_compr_get_metadata(struct snd_compr_stream * stream,unsigned long arg)684 snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
685 {
686 struct snd_compr_metadata metadata;
687 int retval;
688
689 if (!stream->ops->get_metadata)
690 return -ENXIO;
691
692 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
693 return -EFAULT;
694
695 retval = stream->ops->get_metadata(stream, &metadata);
696 if (retval != 0)
697 return retval;
698
699 if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
700 return -EFAULT;
701
702 return 0;
703 }
704
705 static int
snd_compr_set_metadata(struct snd_compr_stream * stream,unsigned long arg)706 snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
707 {
708 struct snd_compr_metadata metadata;
709 int retval;
710
711 if (!stream->ops->set_metadata)
712 return -ENXIO;
713 /*
714 * we should allow parameter change only when stream has been
715 * opened not in other cases
716 */
717 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
718 return -EFAULT;
719
720 retval = stream->ops->set_metadata(stream, &metadata);
721 stream->metadata_set = true;
722
723 return retval;
724 }
725
726 static inline int
snd_compr_tstamp(struct snd_compr_stream * stream,unsigned long arg)727 snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
728 {
729 struct snd_compr_tstamp tstamp = {0};
730 int ret;
731
732 ret = snd_compr_update_tstamp(stream, &tstamp);
733 if (ret == 0)
734 ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
735 &tstamp, sizeof(tstamp)) ? -EFAULT : 0;
736 return ret;
737 }
738
snd_compr_pause(struct snd_compr_stream * stream)739 static int snd_compr_pause(struct snd_compr_stream *stream)
740 {
741 int retval;
742
743 switch (stream->runtime->state) {
744 case SNDRV_PCM_STATE_RUNNING:
745 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
746 if (!retval)
747 stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
748 break;
749 case SNDRV_PCM_STATE_DRAINING:
750 if (!stream->device->use_pause_in_draining)
751 return -EPERM;
752 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
753 if (!retval)
754 stream->pause_in_draining = true;
755 break;
756 default:
757 return -EPERM;
758 }
759 return retval;
760 }
761
snd_compr_resume(struct snd_compr_stream * stream)762 static int snd_compr_resume(struct snd_compr_stream *stream)
763 {
764 int retval;
765
766 switch (stream->runtime->state) {
767 case SNDRV_PCM_STATE_PAUSED:
768 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
769 if (!retval)
770 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
771 break;
772 case SNDRV_PCM_STATE_DRAINING:
773 if (!stream->pause_in_draining)
774 return -EPERM;
775 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
776 if (!retval)
777 stream->pause_in_draining = false;
778 break;
779 default:
780 return -EPERM;
781 }
782 return retval;
783 }
784
snd_compr_start(struct snd_compr_stream * stream)785 static int snd_compr_start(struct snd_compr_stream *stream)
786 {
787 int retval;
788
789 switch (stream->runtime->state) {
790 case SNDRV_PCM_STATE_SETUP:
791 if (stream->direction != SND_COMPRESS_CAPTURE)
792 return -EPERM;
793 break;
794 case SNDRV_PCM_STATE_PREPARED:
795 break;
796 default:
797 return -EPERM;
798 }
799
800 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
801 if (!retval)
802 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
803 return retval;
804 }
805
snd_compr_stop(struct snd_compr_stream * stream)806 static int snd_compr_stop(struct snd_compr_stream *stream)
807 {
808 int retval;
809
810 switch (stream->runtime->state) {
811 case SNDRV_PCM_STATE_OPEN:
812 case SNDRV_PCM_STATE_SETUP:
813 case SNDRV_PCM_STATE_PREPARED:
814 return -EPERM;
815 default:
816 break;
817 }
818
819 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
820 if (!retval) {
821 /* clear flags and stop any drain wait */
822 stream->partial_drain = false;
823 stream->metadata_set = false;
824 stream->pause_in_draining = false;
825 snd_compr_drain_notify(stream);
826 stream->runtime->total_bytes_available = 0;
827 stream->runtime->total_bytes_transferred = 0;
828 }
829 return retval;
830 }
831
error_delayed_work(struct work_struct * work)832 static void error_delayed_work(struct work_struct *work)
833 {
834 struct snd_compr_stream *stream;
835
836 stream = container_of(work, struct snd_compr_stream, error_work.work);
837
838 guard(mutex)(&stream->device->lock);
839
840 stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
841 wake_up(&stream->runtime->sleep);
842 }
843
844 /**
845 * snd_compr_stop_error: Report a fatal error on a stream
846 * @stream: pointer to stream
847 * @state: state to transition the stream to
848 *
849 * Stop the stream and set its state.
850 *
851 * Should be called with compressed device lock held.
852 *
853 * Return: zero if successful, or a negative error code
854 */
snd_compr_stop_error(struct snd_compr_stream * stream,snd_pcm_state_t state)855 int snd_compr_stop_error(struct snd_compr_stream *stream,
856 snd_pcm_state_t state)
857 {
858 if (stream->runtime->state == state)
859 return 0;
860
861 stream->runtime->state = state;
862
863 pr_debug("Changing state to: %d\n", state);
864
865 queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
866
867 return 0;
868 }
869 EXPORT_SYMBOL_GPL(snd_compr_stop_error);
870
snd_compress_wait_for_drain(struct snd_compr_stream * stream)871 static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
872 {
873 int ret;
874
875 /*
876 * We are called with lock held. So drop the lock while we wait for
877 * drain complete notification from the driver
878 *
879 * It is expected that driver will notify the drain completion and then
880 * stream will be moved to SETUP state, even if draining resulted in an
881 * error. We can trigger next track after this.
882 */
883 stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
884 mutex_unlock(&stream->device->lock);
885
886 /* we wait for drain to complete here, drain can return when
887 * interruption occurred, wait returned error or success.
888 * For the first two cases we don't do anything different here and
889 * return after waking up
890 */
891
892 ret = wait_event_interruptible(stream->runtime->sleep,
893 (stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
894 if (ret == -ERESTARTSYS)
895 pr_debug("wait aborted by a signal\n");
896 else if (ret)
897 pr_debug("wait for drain failed with %d\n", ret);
898
899
900 wake_up(&stream->runtime->sleep);
901 mutex_lock(&stream->device->lock);
902
903 return ret;
904 }
905
snd_compr_drain(struct snd_compr_stream * stream)906 static int snd_compr_drain(struct snd_compr_stream *stream)
907 {
908 int retval;
909
910 switch (stream->runtime->state) {
911 case SNDRV_PCM_STATE_OPEN:
912 case SNDRV_PCM_STATE_SETUP:
913 case SNDRV_PCM_STATE_PREPARED:
914 case SNDRV_PCM_STATE_PAUSED:
915 return -EPERM;
916 case SNDRV_PCM_STATE_XRUN:
917 return -EPIPE;
918 default:
919 break;
920 }
921
922 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
923 if (retval) {
924 pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
925 wake_up(&stream->runtime->sleep);
926 return retval;
927 }
928
929 return snd_compress_wait_for_drain(stream);
930 }
931
snd_compr_next_track(struct snd_compr_stream * stream)932 static int snd_compr_next_track(struct snd_compr_stream *stream)
933 {
934 int retval;
935
936 /* only a running stream can transition to next track */
937 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
938 return -EPERM;
939
940 /* next track doesn't have any meaning for capture streams */
941 if (stream->direction == SND_COMPRESS_CAPTURE)
942 return -EPERM;
943
944 /* you can signal next track if this is intended to be a gapless stream
945 * and current track metadata is set
946 */
947 if (stream->metadata_set == false)
948 return -EPERM;
949
950 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
951 if (retval != 0)
952 return retval;
953 stream->metadata_set = false;
954 stream->next_track = true;
955 return 0;
956 }
957
snd_compr_partial_drain(struct snd_compr_stream * stream)958 static int snd_compr_partial_drain(struct snd_compr_stream *stream)
959 {
960 int retval;
961
962 switch (stream->runtime->state) {
963 case SNDRV_PCM_STATE_OPEN:
964 case SNDRV_PCM_STATE_SETUP:
965 case SNDRV_PCM_STATE_PREPARED:
966 case SNDRV_PCM_STATE_PAUSED:
967 return -EPERM;
968 case SNDRV_PCM_STATE_XRUN:
969 return -EPIPE;
970 default:
971 break;
972 }
973
974 /* partial drain doesn't have any meaning for capture streams */
975 if (stream->direction == SND_COMPRESS_CAPTURE)
976 return -EPERM;
977
978 /* stream can be drained only when next track has been signalled */
979 if (stream->next_track == false)
980 return -EPERM;
981
982 stream->partial_drain = true;
983 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
984 if (retval) {
985 pr_debug("Partial drain returned failure\n");
986 wake_up(&stream->runtime->sleep);
987 return retval;
988 }
989
990 stream->next_track = false;
991 return snd_compress_wait_for_drain(stream);
992 }
993
994 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
995
996 static struct snd_compr_task_runtime *
snd_compr_find_task(struct snd_compr_stream * stream,__u64 seqno)997 snd_compr_find_task(struct snd_compr_stream *stream, __u64 seqno)
998 {
999 struct snd_compr_task_runtime *task;
1000
1001 list_for_each_entry(task, &stream->runtime->tasks, list) {
1002 if (task->seqno == seqno)
1003 return task;
1004 }
1005 return NULL;
1006 }
1007
snd_compr_task_free(struct snd_compr_task_runtime * task)1008 static void snd_compr_task_free(struct snd_compr_task_runtime *task)
1009 {
1010 if (task->output)
1011 dma_buf_put(task->output);
1012 if (task->input)
1013 dma_buf_put(task->input);
1014 kfree(task);
1015 }
1016
snd_compr_seqno_next(struct snd_compr_stream * stream)1017 static u64 snd_compr_seqno_next(struct snd_compr_stream *stream)
1018 {
1019 u64 seqno = ++stream->runtime->task_seqno;
1020 if (seqno == 0)
1021 seqno = ++stream->runtime->task_seqno;
1022 return seqno;
1023 }
1024
snd_compr_task_new(struct snd_compr_stream * stream,struct snd_compr_task * utask)1025 static int snd_compr_task_new(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1026 {
1027 struct snd_compr_task_runtime *task;
1028 int retval;
1029
1030 if (stream->runtime->total_tasks >= stream->runtime->fragments)
1031 return -EBUSY;
1032 if (utask->origin_seqno != 0 || utask->input_size != 0)
1033 return -EINVAL;
1034 task = kzalloc(sizeof(*task), GFP_KERNEL);
1035 if (task == NULL)
1036 return -ENOMEM;
1037 task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1038 task->input_size = utask->input_size;
1039 retval = stream->ops->task_create(stream, task);
1040 if (retval < 0)
1041 goto cleanup;
1042 utask->input_fd = dma_buf_fd(task->input, O_WRONLY|O_CLOEXEC);
1043 if (utask->input_fd < 0) {
1044 retval = utask->input_fd;
1045 goto cleanup;
1046 }
1047 utask->output_fd = dma_buf_fd(task->output, O_RDONLY|O_CLOEXEC);
1048 if (utask->output_fd < 0) {
1049 retval = utask->output_fd;
1050 goto cleanup;
1051 }
1052 /* keep dmabuf reference until freed with task free ioctl */
1053 dma_buf_get(utask->input_fd);
1054 dma_buf_get(utask->output_fd);
1055 list_add_tail(&task->list, &stream->runtime->tasks);
1056 stream->runtime->total_tasks++;
1057 return 0;
1058 cleanup:
1059 snd_compr_task_free(task);
1060 return retval;
1061 }
1062
snd_compr_task_create(struct snd_compr_stream * stream,unsigned long arg)1063 static int snd_compr_task_create(struct snd_compr_stream *stream, unsigned long arg)
1064 {
1065 struct snd_compr_task *task __free(kfree) = NULL;
1066 int retval;
1067
1068 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1069 return -EPERM;
1070 task = memdup_user((void __user *)arg, sizeof(*task));
1071 if (IS_ERR(task))
1072 return PTR_ERR(no_free_ptr(task));
1073 retval = snd_compr_task_new(stream, task);
1074 if (retval >= 0)
1075 if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1076 retval = -EFAULT;
1077 return retval;
1078 }
1079
snd_compr_task_start_prepare(struct snd_compr_task_runtime * task,struct snd_compr_task * utask)1080 static int snd_compr_task_start_prepare(struct snd_compr_task_runtime *task,
1081 struct snd_compr_task *utask)
1082 {
1083 if (task == NULL)
1084 return -EINVAL;
1085 if (task->state >= SND_COMPRESS_TASK_STATE_FINISHED)
1086 return -EBUSY;
1087 if (utask->input_size > task->input->size)
1088 return -EINVAL;
1089 task->flags = utask->flags;
1090 task->input_size = utask->input_size;
1091 task->state = SND_COMPRESS_TASK_STATE_IDLE;
1092 return 0;
1093 }
1094
snd_compr_task_start(struct snd_compr_stream * stream,struct snd_compr_task * utask)1095 static int snd_compr_task_start(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1096 {
1097 struct snd_compr_task_runtime *task;
1098 int retval;
1099
1100 if (utask->origin_seqno > 0) {
1101 task = snd_compr_find_task(stream, utask->origin_seqno);
1102 retval = snd_compr_task_start_prepare(task, utask);
1103 if (retval < 0)
1104 return retval;
1105 task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1106 utask->origin_seqno = 0;
1107 list_move_tail(&task->list, &stream->runtime->tasks);
1108 } else {
1109 task = snd_compr_find_task(stream, utask->seqno);
1110 if (task && task->state != SND_COMPRESS_TASK_STATE_IDLE)
1111 return -EBUSY;
1112 retval = snd_compr_task_start_prepare(task, utask);
1113 if (retval < 0)
1114 return retval;
1115 }
1116 retval = stream->ops->task_start(stream, task);
1117 if (retval >= 0) {
1118 task->state = SND_COMPRESS_TASK_STATE_ACTIVE;
1119 stream->runtime->active_tasks++;
1120 }
1121 return retval;
1122 }
1123
snd_compr_task_start_ioctl(struct snd_compr_stream * stream,unsigned long arg)1124 static int snd_compr_task_start_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1125 {
1126 struct snd_compr_task *task __free(kfree) = NULL;
1127 int retval;
1128
1129 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1130 return -EPERM;
1131 task = memdup_user((void __user *)arg, sizeof(*task));
1132 if (IS_ERR(task))
1133 return PTR_ERR(no_free_ptr(task));
1134 retval = snd_compr_task_start(stream, task);
1135 if (retval >= 0)
1136 if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1137 retval = -EFAULT;
1138 return retval;
1139 }
1140
snd_compr_task_stop_one(struct snd_compr_stream * stream,struct snd_compr_task_runtime * task)1141 static void snd_compr_task_stop_one(struct snd_compr_stream *stream,
1142 struct snd_compr_task_runtime *task)
1143 {
1144 if (task->state != SND_COMPRESS_TASK_STATE_ACTIVE)
1145 return;
1146 stream->ops->task_stop(stream, task);
1147 if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1148 stream->runtime->active_tasks--;
1149 list_move_tail(&task->list, &stream->runtime->tasks);
1150 task->state = SND_COMPRESS_TASK_STATE_IDLE;
1151 }
1152
snd_compr_task_free_one(struct snd_compr_stream * stream,struct snd_compr_task_runtime * task)1153 static void snd_compr_task_free_one(struct snd_compr_stream *stream,
1154 struct snd_compr_task_runtime *task)
1155 {
1156 snd_compr_task_stop_one(stream, task);
1157 stream->ops->task_free(stream, task);
1158 list_del(&task->list);
1159 snd_compr_task_free(task);
1160 stream->runtime->total_tasks--;
1161 }
1162
snd_compr_task_free_all(struct snd_compr_stream * stream)1163 static void snd_compr_task_free_all(struct snd_compr_stream *stream)
1164 {
1165 struct snd_compr_task_runtime *task, *temp;
1166
1167 list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1168 snd_compr_task_free_one(stream, task);
1169 }
1170
1171 typedef void (*snd_compr_seq_func_t)(struct snd_compr_stream *stream,
1172 struct snd_compr_task_runtime *task);
1173
snd_compr_task_seq(struct snd_compr_stream * stream,unsigned long arg,snd_compr_seq_func_t fcn)1174 static int snd_compr_task_seq(struct snd_compr_stream *stream, unsigned long arg,
1175 snd_compr_seq_func_t fcn)
1176 {
1177 struct snd_compr_task_runtime *task;
1178 __u64 seqno;
1179 int retval;
1180
1181 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1182 return -EPERM;
1183 retval = get_user(seqno, (__u64 __user *)arg);
1184 if (retval < 0)
1185 return retval;
1186 retval = 0;
1187 if (seqno == 0) {
1188 list_for_each_entry_reverse(task, &stream->runtime->tasks, list)
1189 fcn(stream, task);
1190 } else {
1191 task = snd_compr_find_task(stream, seqno);
1192 if (task == NULL) {
1193 retval = -EINVAL;
1194 } else {
1195 fcn(stream, task);
1196 }
1197 }
1198 return retval;
1199 }
1200
snd_compr_task_status(struct snd_compr_stream * stream,struct snd_compr_task_status * status)1201 static int snd_compr_task_status(struct snd_compr_stream *stream,
1202 struct snd_compr_task_status *status)
1203 {
1204 struct snd_compr_task_runtime *task;
1205
1206 task = snd_compr_find_task(stream, status->seqno);
1207 if (task == NULL)
1208 return -EINVAL;
1209 status->input_size = task->input_size;
1210 status->output_size = task->output_size;
1211 status->state = task->state;
1212 return 0;
1213 }
1214
snd_compr_task_status_ioctl(struct snd_compr_stream * stream,unsigned long arg)1215 static int snd_compr_task_status_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1216 {
1217 struct snd_compr_task_status *status __free(kfree) = NULL;
1218 int retval;
1219
1220 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1221 return -EPERM;
1222 status = memdup_user((void __user *)arg, sizeof(*status));
1223 if (IS_ERR(status))
1224 return PTR_ERR(no_free_ptr(status));
1225 retval = snd_compr_task_status(stream, status);
1226 if (retval >= 0)
1227 if (copy_to_user((void __user *)arg, status, sizeof(*status)))
1228 retval = -EFAULT;
1229 return retval;
1230 }
1231
1232 /**
1233 * snd_compr_task_finished: Notify that the task was finished
1234 * @stream: pointer to stream
1235 * @task: runtime task structure
1236 *
1237 * Set the finished task state and notify waiters.
1238 */
snd_compr_task_finished(struct snd_compr_stream * stream,struct snd_compr_task_runtime * task)1239 void snd_compr_task_finished(struct snd_compr_stream *stream,
1240 struct snd_compr_task_runtime *task)
1241 {
1242 guard(mutex)(&stream->device->lock);
1243 if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1244 stream->runtime->active_tasks--;
1245 task->state = SND_COMPRESS_TASK_STATE_FINISHED;
1246 wake_up(&stream->runtime->sleep);
1247 }
1248 EXPORT_SYMBOL_GPL(snd_compr_task_finished);
1249
1250 #endif /* CONFIG_SND_COMPRESS_ACCEL */
1251
snd_compr_ioctl(struct file * f,unsigned int cmd,unsigned long arg)1252 static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1253 {
1254 struct snd_compr_file *data = f->private_data;
1255 struct snd_compr_stream *stream;
1256
1257 if (snd_BUG_ON(!data))
1258 return -EFAULT;
1259
1260 stream = &data->stream;
1261
1262 guard(mutex)(&stream->device->lock);
1263 switch (_IOC_NR(cmd)) {
1264 case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
1265 return put_user(SNDRV_COMPRESS_VERSION,
1266 (int __user *)arg) ? -EFAULT : 0;
1267 case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
1268 return snd_compr_get_caps(stream, arg);
1269 #ifndef COMPR_CODEC_CAPS_OVERFLOW
1270 case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
1271 return snd_compr_get_codec_caps(stream, arg);
1272 #endif
1273 case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
1274 return snd_compr_set_params(stream, arg);
1275 case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
1276 return snd_compr_get_params(stream, arg);
1277 case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
1278 return snd_compr_set_metadata(stream, arg);
1279 case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
1280 return snd_compr_get_metadata(stream, arg);
1281 }
1282
1283 if (stream->direction == SND_COMPRESS_ACCEL) {
1284 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1285 switch (_IOC_NR(cmd)) {
1286 case _IOC_NR(SNDRV_COMPRESS_TASK_CREATE):
1287 return snd_compr_task_create(stream, arg);
1288 case _IOC_NR(SNDRV_COMPRESS_TASK_FREE):
1289 return snd_compr_task_seq(stream, arg, snd_compr_task_free_one);
1290 case _IOC_NR(SNDRV_COMPRESS_TASK_START):
1291 return snd_compr_task_start_ioctl(stream, arg);
1292 case _IOC_NR(SNDRV_COMPRESS_TASK_STOP):
1293 return snd_compr_task_seq(stream, arg, snd_compr_task_stop_one);
1294 case _IOC_NR(SNDRV_COMPRESS_TASK_STATUS):
1295 return snd_compr_task_status_ioctl(stream, arg);
1296 }
1297 #endif
1298 return -ENOTTY;
1299 }
1300
1301 switch (_IOC_NR(cmd)) {
1302 case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
1303 return snd_compr_tstamp(stream, arg);
1304 case _IOC_NR(SNDRV_COMPRESS_AVAIL):
1305 return snd_compr_ioctl_avail(stream, arg);
1306 case _IOC_NR(SNDRV_COMPRESS_PAUSE):
1307 return snd_compr_pause(stream);
1308 case _IOC_NR(SNDRV_COMPRESS_RESUME):
1309 return snd_compr_resume(stream);
1310 case _IOC_NR(SNDRV_COMPRESS_START):
1311 return snd_compr_start(stream);
1312 case _IOC_NR(SNDRV_COMPRESS_STOP):
1313 return snd_compr_stop(stream);
1314 case _IOC_NR(SNDRV_COMPRESS_DRAIN):
1315 return snd_compr_drain(stream);
1316 case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
1317 return snd_compr_partial_drain(stream);
1318 case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
1319 return snd_compr_next_track(stream);
1320 }
1321
1322 return -ENOTTY;
1323 }
1324
1325 /* support of 32bit userspace on 64bit platforms */
1326 #ifdef CONFIG_COMPAT
snd_compr_ioctl_compat(struct file * file,unsigned int cmd,unsigned long arg)1327 static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
1328 unsigned long arg)
1329 {
1330 return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1331 }
1332 #endif
1333
1334 static const struct file_operations snd_compr_file_ops = {
1335 .owner = THIS_MODULE,
1336 .open = snd_compr_open,
1337 .release = snd_compr_free,
1338 .write = snd_compr_write,
1339 .read = snd_compr_read,
1340 .unlocked_ioctl = snd_compr_ioctl,
1341 #ifdef CONFIG_COMPAT
1342 .compat_ioctl = snd_compr_ioctl_compat,
1343 #endif
1344 .mmap = snd_compr_mmap,
1345 .poll = snd_compr_poll,
1346 };
1347
snd_compress_dev_register(struct snd_device * device)1348 static int snd_compress_dev_register(struct snd_device *device)
1349 {
1350 int ret;
1351 struct snd_compr *compr;
1352
1353 if (snd_BUG_ON(!device || !device->device_data))
1354 return -EBADFD;
1355 compr = device->device_data;
1356
1357 pr_debug("reg device %s, direction %d\n", compr->name,
1358 compr->direction);
1359 /* register compressed device */
1360 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
1361 compr->card, compr->device,
1362 &snd_compr_file_ops, compr, compr->dev);
1363 if (ret < 0) {
1364 pr_err("snd_register_device failed %d\n", ret);
1365 return ret;
1366 }
1367 return ret;
1368
1369 }
1370
snd_compress_dev_disconnect(struct snd_device * device)1371 static int snd_compress_dev_disconnect(struct snd_device *device)
1372 {
1373 struct snd_compr *compr;
1374
1375 compr = device->device_data;
1376 snd_unregister_device(compr->dev);
1377 return 0;
1378 }
1379
1380 #ifdef CONFIG_SND_VERBOSE_PROCFS
snd_compress_proc_info_read(struct snd_info_entry * entry,struct snd_info_buffer * buffer)1381 static void snd_compress_proc_info_read(struct snd_info_entry *entry,
1382 struct snd_info_buffer *buffer)
1383 {
1384 struct snd_compr *compr = (struct snd_compr *)entry->private_data;
1385
1386 snd_iprintf(buffer, "card: %d\n", compr->card->number);
1387 snd_iprintf(buffer, "device: %d\n", compr->device);
1388 snd_iprintf(buffer, "stream: %s\n",
1389 compr->direction == SND_COMPRESS_PLAYBACK
1390 ? "PLAYBACK" : "CAPTURE");
1391 snd_iprintf(buffer, "id: %s\n", compr->id);
1392 }
1393
snd_compress_proc_init(struct snd_compr * compr)1394 static int snd_compress_proc_init(struct snd_compr *compr)
1395 {
1396 struct snd_info_entry *entry;
1397 char name[16];
1398
1399 sprintf(name, "compr%i", compr->device);
1400 entry = snd_info_create_card_entry(compr->card, name,
1401 compr->card->proc_root);
1402 if (!entry)
1403 return -ENOMEM;
1404 entry->mode = S_IFDIR | 0555;
1405 compr->proc_root = entry;
1406
1407 entry = snd_info_create_card_entry(compr->card, "info",
1408 compr->proc_root);
1409 if (entry)
1410 snd_info_set_text_ops(entry, compr,
1411 snd_compress_proc_info_read);
1412 compr->proc_info_entry = entry;
1413
1414 return 0;
1415 }
1416
snd_compress_proc_done(struct snd_compr * compr)1417 static void snd_compress_proc_done(struct snd_compr *compr)
1418 {
1419 snd_info_free_entry(compr->proc_info_entry);
1420 compr->proc_info_entry = NULL;
1421 snd_info_free_entry(compr->proc_root);
1422 compr->proc_root = NULL;
1423 }
1424
snd_compress_set_id(struct snd_compr * compr,const char * id)1425 static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1426 {
1427 strscpy(compr->id, id, sizeof(compr->id));
1428 }
1429 #else
snd_compress_proc_init(struct snd_compr * compr)1430 static inline int snd_compress_proc_init(struct snd_compr *compr)
1431 {
1432 return 0;
1433 }
1434
snd_compress_proc_done(struct snd_compr * compr)1435 static inline void snd_compress_proc_done(struct snd_compr *compr)
1436 {
1437 }
1438
snd_compress_set_id(struct snd_compr * compr,const char * id)1439 static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1440 {
1441 }
1442 #endif
1443
snd_compress_dev_free(struct snd_device * device)1444 static int snd_compress_dev_free(struct snd_device *device)
1445 {
1446 struct snd_compr *compr;
1447
1448 compr = device->device_data;
1449 snd_compress_proc_done(compr);
1450 put_device(compr->dev);
1451 return 0;
1452 }
1453
1454 /**
1455 * snd_compress_new: create new compress device
1456 * @card: sound card pointer
1457 * @device: device number
1458 * @dirn: device direction, should be of type enum snd_compr_direction
1459 * @id: ID string
1460 * @compr: compress device pointer
1461 *
1462 * Return: zero if successful, or a negative error code
1463 */
snd_compress_new(struct snd_card * card,int device,int dirn,const char * id,struct snd_compr * compr)1464 int snd_compress_new(struct snd_card *card, int device,
1465 int dirn, const char *id, struct snd_compr *compr)
1466 {
1467 static const struct snd_device_ops ops = {
1468 .dev_free = snd_compress_dev_free,
1469 .dev_register = snd_compress_dev_register,
1470 .dev_disconnect = snd_compress_dev_disconnect,
1471 };
1472 int ret;
1473
1474 #if !IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1475 if (snd_BUG_ON(dirn == SND_COMPRESS_ACCEL))
1476 return -EINVAL;
1477 #endif
1478
1479 compr->card = card;
1480 compr->device = device;
1481 compr->direction = dirn;
1482 mutex_init(&compr->lock);
1483
1484 snd_compress_set_id(compr, id);
1485
1486 ret = snd_device_alloc(&compr->dev, card);
1487 if (ret)
1488 return ret;
1489 dev_set_name(compr->dev, "comprC%iD%i", card->number, device);
1490
1491 ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1492 if (ret == 0)
1493 snd_compress_proc_init(compr);
1494 else
1495 put_device(compr->dev);
1496
1497 return ret;
1498 }
1499 EXPORT_SYMBOL_GPL(snd_compress_new);
1500
1501 MODULE_DESCRIPTION("ALSA Compressed offload framework");
1502 MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
1503 MODULE_LICENSE("GPL v2");
1504