1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * compress_core.c - compress offload core 4 * 5 * Copyright (C) 2011 Intel Corporation 6 * Authors: Vinod Koul <vinod.koul@linux.intel.com> 7 * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 */ 12 #define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__ 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt) 14 15 #include <linux/file.h> 16 #include <linux/fs.h> 17 #include <linux/list.h> 18 #include <linux/math64.h> 19 #include <linux/mm.h> 20 #include <linux/mutex.h> 21 #include <linux/poll.h> 22 #include <linux/slab.h> 23 #include <linux/sched.h> 24 #include <linux/types.h> 25 #include <linux/uio.h> 26 #include <linux/uaccess.h> 27 #include <linux/dma-buf.h> 28 #include <linux/module.h> 29 #include <linux/compat.h> 30 #include <sound/core.h> 31 #include <sound/initval.h> 32 #include <sound/info.h> 33 #include <sound/compress_params.h> 34 #include <sound/compress_offload.h> 35 #include <sound/compress_driver.h> 36 37 /* struct snd_compr_codec_caps overflows the ioctl bit size for some 38 * architectures, so we need to disable the relevant ioctls. 39 */ 40 #if _IOC_SIZEBITS < 14 41 #define COMPR_CODEC_CAPS_OVERFLOW 42 #endif 43 44 struct snd_compr_file { 45 unsigned long caps; 46 struct snd_compr_stream stream; 47 }; 48 49 static void error_delayed_work(struct work_struct *work); 50 51 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL) 52 static void snd_compr_task_free_all(struct snd_compr_stream *stream); 53 #else 54 static inline void snd_compr_task_free_all(struct snd_compr_stream *stream) { } 55 #endif 56 57 /* 58 * a note on stream states used: 59 * we use following states in the compressed core 60 * SNDRV_PCM_STATE_OPEN: When stream has been opened. 61 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by 62 * calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this 63 * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain. 64 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for 65 * playback only). User after setting up stream writes the data buffer 66 * before starting the stream. 67 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is 68 * decoding/encoding and rendering/capturing data. 69 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done 70 * by calling SNDRV_COMPRESS_DRAIN. 71 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling 72 * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling 73 * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively. 74 */ 75 static int snd_compr_open(struct inode *inode, struct file *f) 76 { 77 struct snd_compr *compr; 78 struct snd_compr_file *data; 79 struct snd_compr_runtime *runtime; 80 enum snd_compr_direction dirn; 81 int maj = imajor(inode); 82 int ret; 83 84 if ((f->f_flags & O_ACCMODE) == O_WRONLY) 85 dirn = SND_COMPRESS_PLAYBACK; 86 else if ((f->f_flags & O_ACCMODE) == O_RDONLY) 87 dirn = SND_COMPRESS_CAPTURE; 88 else if ((f->f_flags & O_ACCMODE) == O_RDWR) 89 dirn = SND_COMPRESS_ACCEL; 90 else 91 return -EINVAL; 92 93 if (maj == snd_major) 94 compr = snd_lookup_minor_data(iminor(inode), 95 SNDRV_DEVICE_TYPE_COMPRESS); 96 else 97 return -EBADFD; 98 99 if (compr == NULL) { 100 pr_err("no device data!!!\n"); 101 return -ENODEV; 102 } 103 104 if (dirn != compr->direction) { 105 pr_err("this device doesn't support this direction\n"); 106 snd_card_unref(compr->card); 107 return -EINVAL; 108 } 109 110 data = kzalloc(sizeof(*data), GFP_KERNEL); 111 if (!data) { 112 snd_card_unref(compr->card); 113 return -ENOMEM; 114 } 115 116 INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work); 117 118 data->stream.ops = compr->ops; 119 data->stream.direction = dirn; 120 data->stream.private_data = compr->private_data; 121 data->stream.device = compr; 122 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); 123 if (!runtime) { 124 kfree(data); 125 snd_card_unref(compr->card); 126 return -ENOMEM; 127 } 128 runtime->state = SNDRV_PCM_STATE_OPEN; 129 init_waitqueue_head(&runtime->sleep); 130 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL) 131 INIT_LIST_HEAD(&runtime->tasks); 132 #endif 133 data->stream.runtime = runtime; 134 f->private_data = (void *)data; 135 scoped_guard(mutex, &compr->lock) 136 ret = compr->ops->open(&data->stream); 137 if (ret) { 138 kfree(runtime); 139 kfree(data); 140 } 141 snd_card_unref(compr->card); 142 return ret; 143 } 144 145 static int snd_compr_free(struct inode *inode, struct file *f) 146 { 147 struct snd_compr_file *data = f->private_data; 148 struct snd_compr_runtime *runtime = data->stream.runtime; 149 150 cancel_delayed_work_sync(&data->stream.error_work); 151 152 switch (runtime->state) { 153 case SNDRV_PCM_STATE_RUNNING: 154 case SNDRV_PCM_STATE_DRAINING: 155 case SNDRV_PCM_STATE_PAUSED: 156 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP); 157 break; 158 default: 159 break; 160 } 161 162 snd_compr_task_free_all(&data->stream); 163 164 data->stream.ops->free(&data->stream); 165 if (!data->stream.runtime->dma_buffer_p) 166 kfree(data->stream.runtime->buffer); 167 kfree(data->stream.runtime); 168 kfree(data); 169 return 0; 170 } 171 172 static void 173 snd_compr_tstamp32_from_64(struct snd_compr_tstamp *tstamp32, 174 const struct snd_compr_tstamp64 *tstamp64) 175 { 176 tstamp32->byte_offset = tstamp64->byte_offset; 177 tstamp32->copied_total = (u32)tstamp64->copied_total; 178 tstamp32->pcm_frames = (u32)tstamp64->pcm_frames; 179 tstamp32->pcm_io_frames = (u32)tstamp64->pcm_io_frames; 180 tstamp32->sampling_rate = tstamp64->sampling_rate; 181 } 182 183 static int snd_compr_update_tstamp(struct snd_compr_stream *stream, 184 struct snd_compr_tstamp64 *tstamp) 185 { 186 int ret; 187 188 if (!stream->ops->pointer) 189 return -ENOTSUPP; 190 191 switch (stream->runtime->state) { 192 case SNDRV_PCM_STATE_OPEN: 193 return -EBADFD; 194 default: 195 break; 196 } 197 198 ret = stream->ops->pointer(stream, tstamp); 199 if (ret != 0) 200 return ret; 201 pr_debug("dsp consumed till %u total %llu bytes\n", tstamp->byte_offset, 202 tstamp->copied_total); 203 if (stream->direction == SND_COMPRESS_PLAYBACK) 204 stream->runtime->total_bytes_transferred = tstamp->copied_total; 205 else 206 stream->runtime->total_bytes_available = tstamp->copied_total; 207 return 0; 208 } 209 210 static size_t snd_compr_calc_avail(struct snd_compr_stream *stream, 211 struct snd_compr_avail64 *avail) 212 { 213 memset(avail, 0, sizeof(*avail)); 214 snd_compr_update_tstamp(stream, &avail->tstamp); 215 /* Still need to return avail even if tstamp can't be filled in */ 216 217 if (stream->runtime->total_bytes_available == 0 && 218 stream->runtime->state == SNDRV_PCM_STATE_SETUP && 219 stream->direction == SND_COMPRESS_PLAYBACK) { 220 pr_debug("detected init and someone forgot to do a write\n"); 221 return stream->runtime->buffer_size; 222 } 223 pr_debug("app wrote %llu, DSP consumed %llu\n", 224 stream->runtime->total_bytes_available, 225 stream->runtime->total_bytes_transferred); 226 if (stream->runtime->total_bytes_available == 227 stream->runtime->total_bytes_transferred) { 228 if (stream->direction == SND_COMPRESS_PLAYBACK) { 229 pr_debug("both pointers are same, returning full avail\n"); 230 return stream->runtime->buffer_size; 231 } else { 232 pr_debug("both pointers are same, returning no avail\n"); 233 return 0; 234 } 235 } 236 237 avail->avail = stream->runtime->total_bytes_available - 238 stream->runtime->total_bytes_transferred; 239 if (stream->direction == SND_COMPRESS_PLAYBACK) 240 avail->avail = stream->runtime->buffer_size - avail->avail; 241 242 pr_debug("ret avail as %zu\n", (size_t)avail->avail); 243 return avail->avail; 244 } 245 246 static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream) 247 { 248 struct snd_compr_avail64 avail; 249 250 return snd_compr_calc_avail(stream, &avail); 251 } 252 253 static void snd_compr_avail32_from_64(struct snd_compr_avail *avail32, 254 const struct snd_compr_avail64 *avail64) 255 { 256 avail32->avail = avail64->avail; 257 snd_compr_tstamp32_from_64(&avail32->tstamp, &avail64->tstamp); 258 } 259 260 static int snd_compr_ioctl_avail(struct snd_compr_stream *stream, 261 unsigned long arg, bool is_32bit) 262 { 263 struct snd_compr_avail64 ioctl_avail64; 264 struct snd_compr_avail ioctl_avail32; 265 size_t avail; 266 const void *copy_from = &ioctl_avail64; 267 size_t copy_size = sizeof(ioctl_avail64); 268 269 if (stream->direction == SND_COMPRESS_ACCEL) 270 return -EBADFD; 271 272 avail = snd_compr_calc_avail(stream, &ioctl_avail64); 273 ioctl_avail64.avail = avail; 274 if (is_32bit) { 275 snd_compr_avail32_from_64(&ioctl_avail32, &ioctl_avail64); 276 copy_from = &ioctl_avail32; 277 copy_size = sizeof(ioctl_avail32); 278 } 279 280 switch (stream->runtime->state) { 281 case SNDRV_PCM_STATE_OPEN: 282 return -EBADFD; 283 case SNDRV_PCM_STATE_XRUN: 284 return -EPIPE; 285 default: 286 break; 287 } 288 289 if (copy_to_user((__u64 __user *)arg, copy_from, copy_size)) 290 return -EFAULT; 291 return 0; 292 } 293 294 static int snd_compr_write_data(struct snd_compr_stream *stream, 295 const char __user *buf, size_t count) 296 { 297 void *dstn; 298 size_t copy; 299 struct snd_compr_runtime *runtime = stream->runtime; 300 /* 64-bit Modulus */ 301 u64 app_pointer = div64_u64(runtime->total_bytes_available, 302 runtime->buffer_size); 303 app_pointer = runtime->total_bytes_available - 304 (app_pointer * runtime->buffer_size); 305 306 dstn = runtime->buffer + app_pointer; 307 pr_debug("copying %lu at %llu\n", (unsigned long)count, app_pointer); 308 if (count < runtime->buffer_size - app_pointer) { 309 if (copy_from_user(dstn, buf, count)) 310 return -EFAULT; 311 } else { 312 copy = runtime->buffer_size - app_pointer; 313 if (copy_from_user(dstn, buf, copy)) 314 return -EFAULT; 315 if (copy_from_user(runtime->buffer, buf + copy, count - copy)) 316 return -EFAULT; 317 } 318 /* if DSP cares, let it know data has been written */ 319 if (stream->ops->ack) 320 stream->ops->ack(stream, count); 321 return count; 322 } 323 324 static ssize_t snd_compr_write(struct file *f, const char __user *buf, 325 size_t count, loff_t *offset) 326 { 327 struct snd_compr_file *data = f->private_data; 328 struct snd_compr_stream *stream; 329 size_t avail; 330 int retval; 331 332 if (snd_BUG_ON(!data)) 333 return -EFAULT; 334 335 stream = &data->stream; 336 if (stream->direction == SND_COMPRESS_ACCEL) 337 return -EBADFD; 338 guard(mutex)(&stream->device->lock); 339 /* write is allowed when stream is running or has been setup */ 340 switch (stream->runtime->state) { 341 case SNDRV_PCM_STATE_SETUP: 342 case SNDRV_PCM_STATE_PREPARED: 343 case SNDRV_PCM_STATE_RUNNING: 344 break; 345 default: 346 return -EBADFD; 347 } 348 349 avail = snd_compr_get_avail(stream); 350 pr_debug("avail returned %lu\n", (unsigned long)avail); 351 /* calculate how much we can write to buffer */ 352 if (avail > count) 353 avail = count; 354 355 if (stream->ops->copy) { 356 char __user* cbuf = (char __user*)buf; 357 retval = stream->ops->copy(stream, cbuf, avail); 358 } else { 359 retval = snd_compr_write_data(stream, buf, avail); 360 } 361 if (retval > 0) 362 stream->runtime->total_bytes_available += retval; 363 364 /* while initiating the stream, write should be called before START 365 * call, so in setup move state */ 366 if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) { 367 stream->runtime->state = SNDRV_PCM_STATE_PREPARED; 368 pr_debug("stream prepared, Houston we are good to go\n"); 369 } 370 371 return retval; 372 } 373 374 375 static ssize_t snd_compr_read(struct file *f, char __user *buf, 376 size_t count, loff_t *offset) 377 { 378 struct snd_compr_file *data = f->private_data; 379 struct snd_compr_stream *stream; 380 size_t avail; 381 int retval; 382 383 if (snd_BUG_ON(!data)) 384 return -EFAULT; 385 386 stream = &data->stream; 387 if (stream->direction == SND_COMPRESS_ACCEL) 388 return -EBADFD; 389 guard(mutex)(&stream->device->lock); 390 391 /* read is allowed when stream is running, paused, draining and setup 392 * (yes setup is state which we transition to after stop, so if user 393 * wants to read data after stop we allow that) 394 */ 395 switch (stream->runtime->state) { 396 case SNDRV_PCM_STATE_OPEN: 397 case SNDRV_PCM_STATE_PREPARED: 398 case SNDRV_PCM_STATE_SUSPENDED: 399 case SNDRV_PCM_STATE_DISCONNECTED: 400 return -EBADFD; 401 case SNDRV_PCM_STATE_XRUN: 402 return -EPIPE; 403 } 404 405 avail = snd_compr_get_avail(stream); 406 pr_debug("avail returned %lu\n", (unsigned long)avail); 407 /* calculate how much we can read from buffer */ 408 if (avail > count) 409 avail = count; 410 411 if (stream->ops->copy) 412 retval = stream->ops->copy(stream, buf, avail); 413 else 414 return -ENXIO; 415 if (retval > 0) 416 stream->runtime->total_bytes_transferred += retval; 417 418 return retval; 419 } 420 421 static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma) 422 { 423 return -ENXIO; 424 } 425 426 static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream) 427 { 428 if (stream->direction == SND_COMPRESS_PLAYBACK) 429 return EPOLLOUT | EPOLLWRNORM; 430 else 431 return EPOLLIN | EPOLLRDNORM; 432 } 433 434 static __poll_t snd_compr_poll(struct file *f, poll_table *wait) 435 { 436 struct snd_compr_file *data = f->private_data; 437 struct snd_compr_stream *stream; 438 struct snd_compr_runtime *runtime; 439 size_t avail; 440 __poll_t retval = 0; 441 442 if (snd_BUG_ON(!data)) 443 return EPOLLERR; 444 445 stream = &data->stream; 446 runtime = stream->runtime; 447 448 guard(mutex)(&stream->device->lock); 449 450 switch (runtime->state) { 451 case SNDRV_PCM_STATE_OPEN: 452 case SNDRV_PCM_STATE_XRUN: 453 return snd_compr_get_poll(stream) | EPOLLERR; 454 default: 455 break; 456 } 457 458 poll_wait(f, &runtime->sleep, wait); 459 460 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL) 461 if (stream->direction == SND_COMPRESS_ACCEL) { 462 struct snd_compr_task_runtime *task; 463 if (runtime->fragments > runtime->active_tasks) 464 retval |= EPOLLOUT | EPOLLWRNORM; 465 task = list_first_entry_or_null(&runtime->tasks, 466 struct snd_compr_task_runtime, 467 list); 468 if (task && task->state == SND_COMPRESS_TASK_STATE_FINISHED) 469 retval |= EPOLLIN | EPOLLRDNORM; 470 return retval; 471 } 472 #endif 473 474 avail = snd_compr_get_avail(stream); 475 pr_debug("avail is %lu\n", (unsigned long)avail); 476 /* check if we have at least one fragment to fill */ 477 switch (runtime->state) { 478 case SNDRV_PCM_STATE_DRAINING: 479 /* stream has been woken up after drain is complete 480 * draining done so set stream state to stopped 481 */ 482 retval = snd_compr_get_poll(stream); 483 runtime->state = SNDRV_PCM_STATE_SETUP; 484 break; 485 case SNDRV_PCM_STATE_RUNNING: 486 case SNDRV_PCM_STATE_PREPARED: 487 case SNDRV_PCM_STATE_PAUSED: 488 if (avail >= runtime->fragment_size) 489 retval = snd_compr_get_poll(stream); 490 break; 491 default: 492 return snd_compr_get_poll(stream) | EPOLLERR; 493 } 494 495 return retval; 496 } 497 498 static int 499 snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg) 500 { 501 int retval; 502 struct snd_compr_caps caps; 503 504 if (!stream->ops->get_caps) 505 return -ENXIO; 506 507 memset(&caps, 0, sizeof(caps)); 508 retval = stream->ops->get_caps(stream, &caps); 509 if (retval) 510 goto out; 511 if (copy_to_user((void __user *)arg, &caps, sizeof(caps))) 512 retval = -EFAULT; 513 out: 514 return retval; 515 } 516 517 #ifndef COMPR_CODEC_CAPS_OVERFLOW 518 static int 519 snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg) 520 { 521 int retval; 522 523 if (!stream->ops->get_codec_caps) 524 return -ENXIO; 525 526 struct snd_compr_codec_caps *caps __free(kfree) = 527 kzalloc_obj(*caps); 528 if (!caps) 529 return -ENOMEM; 530 531 retval = stream->ops->get_codec_caps(stream, caps); 532 if (retval) 533 return retval; 534 if (copy_to_user((void __user *)arg, caps, sizeof(*caps))) 535 return -EFAULT; 536 return retval; 537 } 538 #endif /* !COMPR_CODEC_CAPS_OVERFLOW */ 539 540 int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size) 541 { 542 struct snd_dma_buffer *dmab; 543 int ret; 544 545 if (snd_BUG_ON(!(stream) || !(stream)->runtime)) 546 return -EINVAL; 547 dmab = kzalloc_obj(*dmab); 548 if (!dmab) 549 return -ENOMEM; 550 dmab->dev = stream->dma_buffer.dev; 551 ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab); 552 if (ret < 0) { 553 kfree(dmab); 554 return ret; 555 } 556 557 snd_compr_set_runtime_buffer(stream, dmab); 558 stream->runtime->dma_bytes = size; 559 return 1; 560 } 561 EXPORT_SYMBOL(snd_compr_malloc_pages); 562 563 int snd_compr_free_pages(struct snd_compr_stream *stream) 564 { 565 struct snd_compr_runtime *runtime; 566 567 if (snd_BUG_ON(!(stream) || !(stream)->runtime)) 568 return -EINVAL; 569 runtime = stream->runtime; 570 if (runtime->dma_area == NULL) 571 return 0; 572 if (runtime->dma_buffer_p != &stream->dma_buffer) { 573 /* It's a newly allocated buffer. Release it now. */ 574 snd_dma_free_pages(runtime->dma_buffer_p); 575 kfree(runtime->dma_buffer_p); 576 } 577 578 snd_compr_set_runtime_buffer(stream, NULL); 579 return 0; 580 } 581 EXPORT_SYMBOL(snd_compr_free_pages); 582 583 /* revisit this with snd_pcm_preallocate_xxx */ 584 static int snd_compr_allocate_buffer(struct snd_compr_stream *stream, 585 struct snd_compr_params *params) 586 { 587 unsigned int buffer_size; 588 void *buffer = NULL; 589 590 if (stream->direction == SND_COMPRESS_ACCEL) 591 goto params; 592 593 buffer_size = params->buffer.fragment_size * params->buffer.fragments; 594 if (stream->ops->copy) { 595 buffer = NULL; 596 /* if copy is defined the driver will be required to copy 597 * the data from core 598 */ 599 } else { 600 if (stream->runtime->dma_buffer_p) { 601 602 if (buffer_size > stream->runtime->dma_buffer_p->bytes) 603 dev_err(stream->device->dev, 604 "Not enough DMA buffer"); 605 else 606 buffer = stream->runtime->dma_buffer_p->area; 607 608 } else { 609 buffer = kmalloc(buffer_size, GFP_KERNEL); 610 } 611 612 if (!buffer) 613 return -ENOMEM; 614 } 615 616 stream->runtime->buffer = buffer; 617 stream->runtime->buffer_size = buffer_size; 618 params: 619 stream->runtime->fragment_size = params->buffer.fragment_size; 620 stream->runtime->fragments = params->buffer.fragments; 621 return 0; 622 } 623 624 static int 625 snd_compress_check_input(struct snd_compr_stream *stream, struct snd_compr_params *params) 626 { 627 u32 max_fragments; 628 629 /* first let's check the buffer parameter's */ 630 if (params->buffer.fragment_size == 0) 631 return -EINVAL; 632 633 if (stream->direction == SND_COMPRESS_ACCEL) 634 max_fragments = 64; /* safe value */ 635 else 636 max_fragments = U32_MAX / params->buffer.fragment_size; 637 638 if (params->buffer.fragments > max_fragments || 639 params->buffer.fragments == 0) 640 return -EINVAL; 641 642 /* now codec parameters */ 643 if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX) 644 return -EINVAL; 645 646 if (params->codec.ch_in == 0 || params->codec.ch_out == 0) 647 return -EINVAL; 648 649 return 0; 650 } 651 652 static int 653 snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg) 654 { 655 int retval; 656 657 if (stream->runtime->state == SNDRV_PCM_STATE_OPEN || stream->next_track) { 658 /* 659 * we should allow parameter change only when stream has been 660 * opened not in other cases 661 */ 662 struct snd_compr_params *params __free(kfree) = 663 memdup_user((void __user *)arg, sizeof(*params)); 664 665 if (IS_ERR(params)) 666 return PTR_ERR(params); 667 668 retval = snd_compress_check_input(stream, params); 669 if (retval) 670 return retval; 671 672 retval = snd_compr_allocate_buffer(stream, params); 673 if (retval) 674 return -ENOMEM; 675 676 retval = stream->ops->set_params(stream, params); 677 if (retval) 678 return retval; 679 680 if (stream->next_track) 681 return retval; 682 683 stream->metadata_set = false; 684 stream->next_track = false; 685 686 stream->runtime->state = SNDRV_PCM_STATE_SETUP; 687 } else { 688 return -EPERM; 689 } 690 return retval; 691 } 692 693 static int 694 snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg) 695 { 696 int retval; 697 698 if (!stream->ops->get_params) 699 return -EBADFD; 700 701 struct snd_codec *params __free(kfree) = 702 kzalloc_obj(*params); 703 if (!params) 704 return -ENOMEM; 705 retval = stream->ops->get_params(stream, params); 706 if (retval) 707 return retval; 708 if (copy_to_user((char __user *)arg, params, sizeof(*params))) 709 return -EFAULT; 710 return retval; 711 } 712 713 static int 714 snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg) 715 { 716 struct snd_compr_metadata metadata; 717 int retval; 718 719 if (!stream->ops->get_metadata) 720 return -ENXIO; 721 722 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata))) 723 return -EFAULT; 724 725 retval = stream->ops->get_metadata(stream, &metadata); 726 if (retval != 0) 727 return retval; 728 729 if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata))) 730 return -EFAULT; 731 732 return 0; 733 } 734 735 static int 736 snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg) 737 { 738 struct snd_compr_metadata metadata; 739 int retval; 740 741 if (!stream->ops->set_metadata) 742 return -ENXIO; 743 /* 744 * we should allow parameter change only when stream has been 745 * opened not in other cases 746 */ 747 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata))) 748 return -EFAULT; 749 750 retval = stream->ops->set_metadata(stream, &metadata); 751 stream->metadata_set = true; 752 753 return retval; 754 } 755 756 static inline int snd_compr_tstamp(struct snd_compr_stream *stream, 757 unsigned long arg, bool is_32bit) 758 { 759 struct snd_compr_tstamp64 tstamp64 = { 0 }; 760 struct snd_compr_tstamp tstamp32 = { 0 }; 761 const void *copy_from = &tstamp64; 762 size_t copy_size = sizeof(tstamp64); 763 int ret; 764 765 ret = snd_compr_update_tstamp(stream, &tstamp64); 766 if (ret == 0) { 767 if (is_32bit) { 768 snd_compr_tstamp32_from_64(&tstamp32, &tstamp64); 769 copy_from = &tstamp32; 770 copy_size = sizeof(tstamp32); 771 } 772 ret = copy_to_user((void __user *)arg, copy_from, copy_size) ? 773 -EFAULT : 774 0; 775 } 776 return ret; 777 } 778 779 static int snd_compr_pause(struct snd_compr_stream *stream) 780 { 781 int retval; 782 783 switch (stream->runtime->state) { 784 case SNDRV_PCM_STATE_RUNNING: 785 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); 786 if (!retval) 787 stream->runtime->state = SNDRV_PCM_STATE_PAUSED; 788 break; 789 case SNDRV_PCM_STATE_DRAINING: 790 if (!stream->device->use_pause_in_draining) 791 return -EPERM; 792 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); 793 if (!retval) 794 stream->pause_in_draining = true; 795 break; 796 default: 797 return -EPERM; 798 } 799 return retval; 800 } 801 802 static int snd_compr_resume(struct snd_compr_stream *stream) 803 { 804 int retval; 805 806 switch (stream->runtime->state) { 807 case SNDRV_PCM_STATE_PAUSED: 808 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE); 809 if (!retval) 810 stream->runtime->state = SNDRV_PCM_STATE_RUNNING; 811 break; 812 case SNDRV_PCM_STATE_DRAINING: 813 if (!stream->pause_in_draining) 814 return -EPERM; 815 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE); 816 if (!retval) 817 stream->pause_in_draining = false; 818 break; 819 default: 820 return -EPERM; 821 } 822 return retval; 823 } 824 825 static int snd_compr_start(struct snd_compr_stream *stream) 826 { 827 int retval; 828 829 switch (stream->runtime->state) { 830 case SNDRV_PCM_STATE_SETUP: 831 if (stream->direction != SND_COMPRESS_CAPTURE) 832 return -EPERM; 833 break; 834 case SNDRV_PCM_STATE_PREPARED: 835 break; 836 default: 837 return -EPERM; 838 } 839 840 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START); 841 if (!retval) 842 stream->runtime->state = SNDRV_PCM_STATE_RUNNING; 843 return retval; 844 } 845 846 static int snd_compr_stop(struct snd_compr_stream *stream) 847 { 848 int retval; 849 850 switch (stream->runtime->state) { 851 case SNDRV_PCM_STATE_OPEN: 852 case SNDRV_PCM_STATE_SETUP: 853 case SNDRV_PCM_STATE_PREPARED: 854 return -EPERM; 855 default: 856 break; 857 } 858 859 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); 860 if (!retval) { 861 /* clear flags and stop any drain wait */ 862 stream->partial_drain = false; 863 stream->metadata_set = false; 864 stream->pause_in_draining = false; 865 snd_compr_drain_notify(stream); 866 stream->runtime->total_bytes_available = 0; 867 stream->runtime->total_bytes_transferred = 0; 868 } 869 return retval; 870 } 871 872 static void error_delayed_work(struct work_struct *work) 873 { 874 struct snd_compr_stream *stream; 875 876 stream = container_of(work, struct snd_compr_stream, error_work.work); 877 878 guard(mutex)(&stream->device->lock); 879 880 stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); 881 wake_up(&stream->runtime->sleep); 882 } 883 884 /** 885 * snd_compr_stop_error: Report a fatal error on a stream 886 * @stream: pointer to stream 887 * @state: state to transition the stream to 888 * 889 * Stop the stream and set its state. 890 * 891 * Should be called with compressed device lock held. 892 * 893 * Return: zero if successful, or a negative error code 894 */ 895 int snd_compr_stop_error(struct snd_compr_stream *stream, 896 snd_pcm_state_t state) 897 { 898 if (stream->runtime->state == state) 899 return 0; 900 901 stream->runtime->state = state; 902 903 pr_debug("Changing state to: %d\n", state); 904 905 queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0); 906 907 return 0; 908 } 909 EXPORT_SYMBOL_GPL(snd_compr_stop_error); 910 911 static int snd_compress_wait_for_drain(struct snd_compr_stream *stream) 912 { 913 int ret; 914 915 /* 916 * We are called with lock held. So drop the lock while we wait for 917 * drain complete notification from the driver 918 * 919 * It is expected that driver will notify the drain completion and then 920 * stream will be moved to SETUP state, even if draining resulted in an 921 * error. We can trigger next track after this. 922 */ 923 stream->runtime->state = SNDRV_PCM_STATE_DRAINING; 924 mutex_unlock(&stream->device->lock); 925 926 /* we wait for drain to complete here, drain can return when 927 * interruption occurred, wait returned error or success. 928 * For the first two cases we don't do anything different here and 929 * return after waking up 930 */ 931 932 ret = wait_event_interruptible(stream->runtime->sleep, 933 (stream->runtime->state != SNDRV_PCM_STATE_DRAINING)); 934 if (ret == -ERESTARTSYS) 935 pr_debug("wait aborted by a signal\n"); 936 else if (ret) 937 pr_debug("wait for drain failed with %d\n", ret); 938 939 940 wake_up(&stream->runtime->sleep); 941 mutex_lock(&stream->device->lock); 942 943 return ret; 944 } 945 946 static int snd_compr_drain(struct snd_compr_stream *stream) 947 { 948 int retval; 949 950 switch (stream->runtime->state) { 951 case SNDRV_PCM_STATE_OPEN: 952 case SNDRV_PCM_STATE_SETUP: 953 case SNDRV_PCM_STATE_PREPARED: 954 case SNDRV_PCM_STATE_PAUSED: 955 return -EPERM; 956 case SNDRV_PCM_STATE_XRUN: 957 return -EPIPE; 958 default: 959 break; 960 } 961 962 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN); 963 if (retval) { 964 pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval); 965 wake_up(&stream->runtime->sleep); 966 return retval; 967 } 968 969 return snd_compress_wait_for_drain(stream); 970 } 971 972 static int snd_compr_next_track(struct snd_compr_stream *stream) 973 { 974 int retval; 975 976 /* only a running stream can transition to next track */ 977 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) 978 return -EPERM; 979 980 /* next track doesn't have any meaning for capture streams */ 981 if (stream->direction == SND_COMPRESS_CAPTURE) 982 return -EPERM; 983 984 /* you can signal next track if this is intended to be a gapless stream 985 * and current track metadata is set 986 */ 987 if (stream->metadata_set == false) 988 return -EPERM; 989 990 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK); 991 if (retval != 0) 992 return retval; 993 stream->metadata_set = false; 994 stream->next_track = true; 995 return 0; 996 } 997 998 static int snd_compr_partial_drain(struct snd_compr_stream *stream) 999 { 1000 int retval; 1001 1002 switch (stream->runtime->state) { 1003 case SNDRV_PCM_STATE_OPEN: 1004 case SNDRV_PCM_STATE_SETUP: 1005 case SNDRV_PCM_STATE_PREPARED: 1006 case SNDRV_PCM_STATE_PAUSED: 1007 return -EPERM; 1008 case SNDRV_PCM_STATE_XRUN: 1009 return -EPIPE; 1010 default: 1011 break; 1012 } 1013 1014 /* partial drain doesn't have any meaning for capture streams */ 1015 if (stream->direction == SND_COMPRESS_CAPTURE) 1016 return -EPERM; 1017 1018 /* stream can be drained only when next track has been signalled */ 1019 if (stream->next_track == false) 1020 return -EPERM; 1021 1022 stream->partial_drain = true; 1023 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN); 1024 if (retval) { 1025 pr_debug("Partial drain returned failure\n"); 1026 wake_up(&stream->runtime->sleep); 1027 return retval; 1028 } 1029 1030 stream->next_track = false; 1031 return snd_compress_wait_for_drain(stream); 1032 } 1033 1034 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL) 1035 1036 static struct snd_compr_task_runtime * 1037 snd_compr_find_task(struct snd_compr_stream *stream, __u64 seqno) 1038 { 1039 struct snd_compr_task_runtime *task; 1040 1041 list_for_each_entry(task, &stream->runtime->tasks, list) { 1042 if (task->seqno == seqno) 1043 return task; 1044 } 1045 return NULL; 1046 } 1047 1048 static void snd_compr_task_free(struct snd_compr_task_runtime *task) 1049 { 1050 if (task->output) 1051 dma_buf_put(task->output); 1052 if (task->input) 1053 dma_buf_put(task->input); 1054 kfree(task); 1055 } 1056 1057 static u64 snd_compr_seqno_next(struct snd_compr_stream *stream) 1058 { 1059 u64 seqno = ++stream->runtime->task_seqno; 1060 if (seqno == 0) 1061 seqno = ++stream->runtime->task_seqno; 1062 return seqno; 1063 } 1064 1065 static int snd_compr_task_new(struct snd_compr_stream *stream, struct snd_compr_task *utask) 1066 { 1067 struct snd_compr_task_runtime *task; 1068 int retval, fd_i, fd_o; 1069 1070 if (stream->runtime->total_tasks >= stream->runtime->fragments) 1071 return -EBUSY; 1072 if (utask->origin_seqno != 0 || utask->input_size != 0) 1073 return -EINVAL; 1074 task = kzalloc_obj(*task); 1075 if (task == NULL) 1076 return -ENOMEM; 1077 task->seqno = utask->seqno = snd_compr_seqno_next(stream); 1078 task->input_size = utask->input_size; 1079 retval = stream->ops->task_create(stream, task); 1080 if (retval < 0) 1081 goto cleanup; 1082 /* similar functionality as in dma_buf_fd(), but ensure that both 1083 file descriptors are allocated before fd_install() */ 1084 if (!task->input || !task->input->file || !task->output || !task->output->file) { 1085 retval = -EINVAL; 1086 goto cleanup; 1087 } 1088 fd_i = get_unused_fd_flags(O_WRONLY|O_CLOEXEC); 1089 if (fd_i < 0) 1090 goto cleanup; 1091 fd_o = get_unused_fd_flags(O_RDONLY|O_CLOEXEC); 1092 if (fd_o < 0) { 1093 put_unused_fd(fd_i); 1094 goto cleanup; 1095 } 1096 /* keep dmabuf reference until freed with task free ioctl */ 1097 get_dma_buf(task->input); 1098 get_dma_buf(task->output); 1099 fd_install(fd_i, task->input->file); 1100 fd_install(fd_o, task->output->file); 1101 utask->input_fd = fd_i; 1102 utask->output_fd = fd_o; 1103 list_add_tail(&task->list, &stream->runtime->tasks); 1104 stream->runtime->total_tasks++; 1105 return 0; 1106 cleanup: 1107 snd_compr_task_free(task); 1108 return retval; 1109 } 1110 1111 static int snd_compr_task_create(struct snd_compr_stream *stream, unsigned long arg) 1112 { 1113 int retval; 1114 1115 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP) 1116 return -EPERM; 1117 1118 struct snd_compr_task *task __free(kfree) = 1119 memdup_user((void __user *)arg, sizeof(*task)); 1120 if (IS_ERR(task)) 1121 return PTR_ERR(task); 1122 retval = snd_compr_task_new(stream, task); 1123 if (retval >= 0) 1124 if (copy_to_user((void __user *)arg, task, sizeof(*task))) 1125 retval = -EFAULT; 1126 return retval; 1127 } 1128 1129 static int snd_compr_task_start_prepare(struct snd_compr_task_runtime *task, 1130 struct snd_compr_task *utask) 1131 { 1132 if (task == NULL) 1133 return -EINVAL; 1134 if (task->state >= SND_COMPRESS_TASK_STATE_FINISHED) 1135 return -EBUSY; 1136 if (utask->input_size > task->input->size) 1137 return -EINVAL; 1138 task->flags = utask->flags; 1139 task->input_size = utask->input_size; 1140 task->state = SND_COMPRESS_TASK_STATE_IDLE; 1141 return 0; 1142 } 1143 1144 static int snd_compr_task_start(struct snd_compr_stream *stream, struct snd_compr_task *utask) 1145 { 1146 struct snd_compr_task_runtime *task; 1147 int retval; 1148 1149 if (utask->origin_seqno > 0) { 1150 task = snd_compr_find_task(stream, utask->origin_seqno); 1151 retval = snd_compr_task_start_prepare(task, utask); 1152 if (retval < 0) 1153 return retval; 1154 task->seqno = utask->seqno = snd_compr_seqno_next(stream); 1155 utask->origin_seqno = 0; 1156 list_move_tail(&task->list, &stream->runtime->tasks); 1157 } else { 1158 task = snd_compr_find_task(stream, utask->seqno); 1159 if (task && task->state != SND_COMPRESS_TASK_STATE_IDLE) 1160 return -EBUSY; 1161 retval = snd_compr_task_start_prepare(task, utask); 1162 if (retval < 0) 1163 return retval; 1164 } 1165 retval = stream->ops->task_start(stream, task); 1166 if (retval >= 0) { 1167 task->state = SND_COMPRESS_TASK_STATE_ACTIVE; 1168 stream->runtime->active_tasks++; 1169 } 1170 return retval; 1171 } 1172 1173 static int snd_compr_task_start_ioctl(struct snd_compr_stream *stream, unsigned long arg) 1174 { 1175 int retval; 1176 1177 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP) 1178 return -EPERM; 1179 1180 struct snd_compr_task *task __free(kfree) = 1181 memdup_user((void __user *)arg, sizeof(*task)); 1182 if (IS_ERR(task)) 1183 return PTR_ERR(task); 1184 retval = snd_compr_task_start(stream, task); 1185 if (retval >= 0) 1186 if (copy_to_user((void __user *)arg, task, sizeof(*task))) 1187 retval = -EFAULT; 1188 return retval; 1189 } 1190 1191 static void snd_compr_task_stop_one(struct snd_compr_stream *stream, 1192 struct snd_compr_task_runtime *task) 1193 { 1194 if (task->state != SND_COMPRESS_TASK_STATE_ACTIVE) 1195 return; 1196 stream->ops->task_stop(stream, task); 1197 if (!snd_BUG_ON(stream->runtime->active_tasks == 0)) 1198 stream->runtime->active_tasks--; 1199 list_move_tail(&task->list, &stream->runtime->tasks); 1200 task->state = SND_COMPRESS_TASK_STATE_IDLE; 1201 } 1202 1203 static void snd_compr_task_free_one(struct snd_compr_stream *stream, 1204 struct snd_compr_task_runtime *task) 1205 { 1206 snd_compr_task_stop_one(stream, task); 1207 stream->ops->task_free(stream, task); 1208 list_del(&task->list); 1209 snd_compr_task_free(task); 1210 stream->runtime->total_tasks--; 1211 } 1212 1213 static void snd_compr_task_free_all(struct snd_compr_stream *stream) 1214 { 1215 struct snd_compr_task_runtime *task, *temp; 1216 1217 list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list) 1218 snd_compr_task_free_one(stream, task); 1219 } 1220 1221 typedef void (*snd_compr_seq_func_t)(struct snd_compr_stream *stream, 1222 struct snd_compr_task_runtime *task); 1223 1224 static int snd_compr_task_seq(struct snd_compr_stream *stream, unsigned long arg, 1225 snd_compr_seq_func_t fcn) 1226 { 1227 struct snd_compr_task_runtime *task, *temp; 1228 __u64 seqno; 1229 int retval; 1230 1231 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP) 1232 return -EPERM; 1233 retval = copy_from_user(&seqno, (__u64 __user *)arg, sizeof(seqno)); 1234 if (retval) 1235 return -EFAULT; 1236 retval = 0; 1237 if (seqno == 0) { 1238 list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list) 1239 fcn(stream, task); 1240 } else { 1241 task = snd_compr_find_task(stream, seqno); 1242 if (task == NULL) { 1243 retval = -EINVAL; 1244 } else { 1245 fcn(stream, task); 1246 } 1247 } 1248 return retval; 1249 } 1250 1251 static int snd_compr_task_status(struct snd_compr_stream *stream, 1252 struct snd_compr_task_status *status) 1253 { 1254 struct snd_compr_task_runtime *task; 1255 1256 task = snd_compr_find_task(stream, status->seqno); 1257 if (task == NULL) 1258 return -EINVAL; 1259 status->input_size = task->input_size; 1260 status->output_size = task->output_size; 1261 status->state = task->state; 1262 return 0; 1263 } 1264 1265 static int snd_compr_task_status_ioctl(struct snd_compr_stream *stream, unsigned long arg) 1266 { 1267 int retval; 1268 1269 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP) 1270 return -EPERM; 1271 1272 struct snd_compr_task_status *status __free(kfree) = 1273 memdup_user((void __user *)arg, sizeof(*status)); 1274 if (IS_ERR(status)) 1275 return PTR_ERR(status); 1276 retval = snd_compr_task_status(stream, status); 1277 if (retval >= 0) 1278 if (copy_to_user((void __user *)arg, status, sizeof(*status))) 1279 retval = -EFAULT; 1280 return retval; 1281 } 1282 1283 /** 1284 * snd_compr_task_finished: Notify that the task was finished 1285 * @stream: pointer to stream 1286 * @task: runtime task structure 1287 * 1288 * Set the finished task state and notify waiters. 1289 */ 1290 void snd_compr_task_finished(struct snd_compr_stream *stream, 1291 struct snd_compr_task_runtime *task) 1292 { 1293 guard(mutex)(&stream->device->lock); 1294 if (!snd_BUG_ON(stream->runtime->active_tasks == 0)) 1295 stream->runtime->active_tasks--; 1296 task->state = SND_COMPRESS_TASK_STATE_FINISHED; 1297 wake_up(&stream->runtime->sleep); 1298 } 1299 EXPORT_SYMBOL_GPL(snd_compr_task_finished); 1300 1301 MODULE_IMPORT_NS("DMA_BUF"); 1302 #endif /* CONFIG_SND_COMPRESS_ACCEL */ 1303 1304 static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 1305 { 1306 struct snd_compr_file *data = f->private_data; 1307 struct snd_compr_stream *stream; 1308 1309 if (snd_BUG_ON(!data)) 1310 return -EFAULT; 1311 1312 stream = &data->stream; 1313 1314 guard(mutex)(&stream->device->lock); 1315 switch (cmd) { 1316 case SNDRV_COMPRESS_IOCTL_VERSION: 1317 return put_user(SNDRV_COMPRESS_VERSION, 1318 (int __user *)arg) ? -EFAULT : 0; 1319 case SNDRV_COMPRESS_GET_CAPS: 1320 return snd_compr_get_caps(stream, arg); 1321 #ifndef COMPR_CODEC_CAPS_OVERFLOW 1322 case SNDRV_COMPRESS_GET_CODEC_CAPS: 1323 return snd_compr_get_codec_caps(stream, arg); 1324 #endif 1325 case SNDRV_COMPRESS_SET_PARAMS: 1326 return snd_compr_set_params(stream, arg); 1327 case SNDRV_COMPRESS_GET_PARAMS: 1328 return snd_compr_get_params(stream, arg); 1329 case SNDRV_COMPRESS_SET_METADATA: 1330 return snd_compr_set_metadata(stream, arg); 1331 case SNDRV_COMPRESS_GET_METADATA: 1332 return snd_compr_get_metadata(stream, arg); 1333 } 1334 1335 if (stream->direction == SND_COMPRESS_ACCEL) { 1336 #if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL) 1337 switch (cmd) { 1338 case SNDRV_COMPRESS_TASK_CREATE: 1339 return snd_compr_task_create(stream, arg); 1340 case SNDRV_COMPRESS_TASK_FREE: 1341 return snd_compr_task_seq(stream, arg, snd_compr_task_free_one); 1342 case SNDRV_COMPRESS_TASK_START: 1343 return snd_compr_task_start_ioctl(stream, arg); 1344 case SNDRV_COMPRESS_TASK_STOP: 1345 return snd_compr_task_seq(stream, arg, snd_compr_task_stop_one); 1346 case SNDRV_COMPRESS_TASK_STATUS: 1347 return snd_compr_task_status_ioctl(stream, arg); 1348 } 1349 #endif 1350 return -ENOTTY; 1351 } 1352 1353 switch (cmd) { 1354 case SNDRV_COMPRESS_TSTAMP: 1355 return snd_compr_tstamp(stream, arg, true); 1356 case SNDRV_COMPRESS_TSTAMP64: 1357 return snd_compr_tstamp(stream, arg, false); 1358 case SNDRV_COMPRESS_AVAIL: 1359 return snd_compr_ioctl_avail(stream, arg, true); 1360 case SNDRV_COMPRESS_AVAIL64: 1361 return snd_compr_ioctl_avail(stream, arg, false); 1362 case SNDRV_COMPRESS_PAUSE: 1363 return snd_compr_pause(stream); 1364 case SNDRV_COMPRESS_RESUME: 1365 return snd_compr_resume(stream); 1366 case SNDRV_COMPRESS_START: 1367 return snd_compr_start(stream); 1368 case SNDRV_COMPRESS_STOP: 1369 return snd_compr_stop(stream); 1370 case SNDRV_COMPRESS_DRAIN: 1371 return snd_compr_drain(stream); 1372 case SNDRV_COMPRESS_PARTIAL_DRAIN: 1373 return snd_compr_partial_drain(stream); 1374 case SNDRV_COMPRESS_NEXT_TRACK: 1375 return snd_compr_next_track(stream); 1376 } 1377 1378 return -ENOTTY; 1379 } 1380 1381 /* support of 32bit userspace on 64bit platforms */ 1382 #ifdef CONFIG_COMPAT 1383 static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd, 1384 unsigned long arg) 1385 { 1386 return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1387 } 1388 #endif 1389 1390 static const struct file_operations snd_compr_file_ops = { 1391 .owner = THIS_MODULE, 1392 .open = snd_compr_open, 1393 .release = snd_compr_free, 1394 .write = snd_compr_write, 1395 .read = snd_compr_read, 1396 .unlocked_ioctl = snd_compr_ioctl, 1397 #ifdef CONFIG_COMPAT 1398 .compat_ioctl = snd_compr_ioctl_compat, 1399 #endif 1400 .mmap = snd_compr_mmap, 1401 .poll = snd_compr_poll, 1402 }; 1403 1404 static int snd_compress_dev_register(struct snd_device *device) 1405 { 1406 int ret; 1407 struct snd_compr *compr; 1408 1409 if (snd_BUG_ON(!device || !device->device_data)) 1410 return -EBADFD; 1411 compr = device->device_data; 1412 1413 pr_debug("reg device %s, direction %d\n", compr->name, 1414 compr->direction); 1415 /* register compressed device */ 1416 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, 1417 compr->card, compr->device, 1418 &snd_compr_file_ops, compr, compr->dev); 1419 if (ret < 0) { 1420 pr_err("snd_register_device failed %d\n", ret); 1421 return ret; 1422 } 1423 return ret; 1424 1425 } 1426 1427 static int snd_compress_dev_disconnect(struct snd_device *device) 1428 { 1429 struct snd_compr *compr; 1430 1431 compr = device->device_data; 1432 snd_unregister_device(compr->dev); 1433 return 0; 1434 } 1435 1436 #ifdef CONFIG_SND_VERBOSE_PROCFS 1437 static void snd_compress_proc_info_read(struct snd_info_entry *entry, 1438 struct snd_info_buffer *buffer) 1439 { 1440 struct snd_compr *compr = (struct snd_compr *)entry->private_data; 1441 1442 snd_iprintf(buffer, "card: %d\n", compr->card->number); 1443 snd_iprintf(buffer, "device: %d\n", compr->device); 1444 snd_iprintf(buffer, "stream: %s\n", 1445 compr->direction == SND_COMPRESS_PLAYBACK 1446 ? "PLAYBACK" : "CAPTURE"); 1447 snd_iprintf(buffer, "id: %s\n", compr->id); 1448 } 1449 1450 static int snd_compress_proc_init(struct snd_compr *compr) 1451 { 1452 struct snd_info_entry *entry; 1453 char name[16]; 1454 1455 sprintf(name, "compr%i", compr->device); 1456 entry = snd_info_create_card_entry(compr->card, name, 1457 compr->card->proc_root); 1458 if (!entry) 1459 return -ENOMEM; 1460 entry->mode = S_IFDIR | 0555; 1461 compr->proc_root = entry; 1462 1463 entry = snd_info_create_card_entry(compr->card, "info", 1464 compr->proc_root); 1465 if (entry) 1466 snd_info_set_text_ops(entry, compr, 1467 snd_compress_proc_info_read); 1468 compr->proc_info_entry = entry; 1469 1470 return 0; 1471 } 1472 1473 static void snd_compress_proc_done(struct snd_compr *compr) 1474 { 1475 snd_info_free_entry(compr->proc_info_entry); 1476 compr->proc_info_entry = NULL; 1477 snd_info_free_entry(compr->proc_root); 1478 compr->proc_root = NULL; 1479 } 1480 1481 static inline void snd_compress_set_id(struct snd_compr *compr, const char *id) 1482 { 1483 strscpy(compr->id, id, sizeof(compr->id)); 1484 } 1485 #else 1486 static inline int snd_compress_proc_init(struct snd_compr *compr) 1487 { 1488 return 0; 1489 } 1490 1491 static inline void snd_compress_proc_done(struct snd_compr *compr) 1492 { 1493 } 1494 1495 static inline void snd_compress_set_id(struct snd_compr *compr, const char *id) 1496 { 1497 } 1498 #endif 1499 1500 static int snd_compress_dev_free(struct snd_device *device) 1501 { 1502 struct snd_compr *compr; 1503 1504 compr = device->device_data; 1505 snd_compress_proc_done(compr); 1506 put_device(compr->dev); 1507 return 0; 1508 } 1509 1510 /** 1511 * snd_compress_new: create new compress device 1512 * @card: sound card pointer 1513 * @device: device number 1514 * @dirn: device direction, should be of type enum snd_compr_direction 1515 * @id: ID string 1516 * @compr: compress device pointer 1517 * 1518 * Return: zero if successful, or a negative error code 1519 */ 1520 int snd_compress_new(struct snd_card *card, int device, 1521 int dirn, const char *id, struct snd_compr *compr) 1522 { 1523 static const struct snd_device_ops ops = { 1524 .dev_free = snd_compress_dev_free, 1525 .dev_register = snd_compress_dev_register, 1526 .dev_disconnect = snd_compress_dev_disconnect, 1527 }; 1528 int ret; 1529 1530 #if !IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL) 1531 if (snd_BUG_ON(dirn == SND_COMPRESS_ACCEL)) 1532 return -EINVAL; 1533 #endif 1534 1535 compr->card = card; 1536 compr->device = device; 1537 compr->direction = dirn; 1538 mutex_init(&compr->lock); 1539 1540 snd_compress_set_id(compr, id); 1541 1542 ret = snd_device_alloc(&compr->dev, card); 1543 if (ret) 1544 return ret; 1545 dev_set_name(compr->dev, "comprC%iD%i", card->number, device); 1546 1547 ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops); 1548 if (ret == 0) 1549 snd_compress_proc_init(compr); 1550 else 1551 put_device(compr->dev); 1552 1553 return ret; 1554 } 1555 EXPORT_SYMBOL_GPL(snd_compress_new); 1556 1557 MODULE_DESCRIPTION("ALSA Compressed offload framework"); 1558 MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>"); 1559 MODULE_LICENSE("GPL v2"); 1560