1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Digital Audio (PCM) abstract layer 4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 5 * Abramo Bagnara <abramo@alsa-project.org> 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/sched/signal.h> 10 #include <linux/time.h> 11 #include <linux/math64.h> 12 #include <linux/export.h> 13 #include <sound/core.h> 14 #include <sound/control.h> 15 #include <sound/tlv.h> 16 #include <sound/info.h> 17 #include <sound/pcm.h> 18 #include <sound/pcm_params.h> 19 #include <sound/timer.h> 20 21 #include "pcm_local.h" 22 23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 24 #define CREATE_TRACE_POINTS 25 #include "pcm_trace.h" 26 #else 27 #define trace_hwptr(substream, pos, in_interrupt) 28 #define trace_xrun(substream) 29 #define trace_hw_ptr_error(substream, reason) 30 #define trace_applptr(substream, prev, curr) 31 #endif 32 33 static int fill_silence_frames(struct snd_pcm_substream *substream, 34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames); 35 36 /* 37 * fill ring buffer with silence 38 * runtime->silence_start: starting pointer to silence area 39 * runtime->silence_filled: size filled with silence 40 * runtime->silence_threshold: threshold from application 41 * runtime->silence_size: maximal size from application 42 * 43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately 44 */ 45 void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr) 46 { 47 struct snd_pcm_runtime *runtime = substream->runtime; 48 snd_pcm_uframes_t frames, ofs, transfer; 49 int err; 50 51 if (runtime->silence_size < runtime->boundary) { 52 snd_pcm_sframes_t noise_dist, n; 53 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr); 54 if (runtime->silence_start != appl_ptr) { 55 n = appl_ptr - runtime->silence_start; 56 if (n < 0) 57 n += runtime->boundary; 58 if ((snd_pcm_uframes_t)n < runtime->silence_filled) 59 runtime->silence_filled -= n; 60 else 61 runtime->silence_filled = 0; 62 runtime->silence_start = appl_ptr; 63 } 64 if (runtime->silence_filled >= runtime->buffer_size) 65 return; 66 noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled; 67 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold) 68 return; 69 frames = runtime->silence_threshold - noise_dist; 70 if (frames > runtime->silence_size) 71 frames = runtime->silence_size; 72 } else { 73 if (new_hw_ptr == ULONG_MAX) { /* initialization */ 74 snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime); 75 if (avail > runtime->buffer_size) 76 avail = runtime->buffer_size; 77 runtime->silence_filled = avail > 0 ? avail : 0; 78 runtime->silence_start = (runtime->status->hw_ptr + 79 runtime->silence_filled) % 80 runtime->boundary; 81 } else { 82 ofs = runtime->status->hw_ptr; 83 frames = new_hw_ptr - ofs; 84 if ((snd_pcm_sframes_t)frames < 0) 85 frames += runtime->boundary; 86 runtime->silence_filled -= frames; 87 if ((snd_pcm_sframes_t)runtime->silence_filled < 0) { 88 runtime->silence_filled = 0; 89 runtime->silence_start = new_hw_ptr; 90 } else { 91 runtime->silence_start = ofs; 92 } 93 } 94 frames = runtime->buffer_size - runtime->silence_filled; 95 } 96 if (snd_BUG_ON(frames > runtime->buffer_size)) 97 return; 98 if (frames == 0) 99 return; 100 ofs = runtime->silence_start % runtime->buffer_size; 101 while (frames > 0) { 102 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames; 103 err = fill_silence_frames(substream, ofs, transfer); 104 snd_BUG_ON(err < 0); 105 runtime->silence_filled += transfer; 106 frames -= transfer; 107 ofs = 0; 108 } 109 } 110 111 #ifdef CONFIG_SND_DEBUG 112 void snd_pcm_debug_name(struct snd_pcm_substream *substream, 113 char *name, size_t len) 114 { 115 snprintf(name, len, "pcmC%dD%d%c:%d", 116 substream->pcm->card->number, 117 substream->pcm->device, 118 substream->stream ? 'c' : 'p', 119 substream->number); 120 } 121 EXPORT_SYMBOL(snd_pcm_debug_name); 122 #endif 123 124 #define XRUN_DEBUG_BASIC (1<<0) 125 #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */ 126 #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */ 127 128 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 129 130 #define xrun_debug(substream, mask) \ 131 ((substream)->pstr->xrun_debug & (mask)) 132 #else 133 #define xrun_debug(substream, mask) 0 134 #endif 135 136 #define dump_stack_on_xrun(substream) do { \ 137 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ 138 dump_stack(); \ 139 } while (0) 140 141 /* call with stream lock held */ 142 void __snd_pcm_xrun(struct snd_pcm_substream *substream) 143 { 144 struct snd_pcm_runtime *runtime = substream->runtime; 145 146 trace_xrun(substream); 147 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { 148 struct timespec64 tstamp; 149 150 snd_pcm_gettime(runtime, &tstamp); 151 runtime->status->tstamp.tv_sec = tstamp.tv_sec; 152 runtime->status->tstamp.tv_nsec = tstamp.tv_nsec; 153 } 154 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); 155 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { 156 char name[16]; 157 snd_pcm_debug_name(substream, name, sizeof(name)); 158 pcm_warn(substream->pcm, "XRUN: %s\n", name); 159 dump_stack_on_xrun(substream); 160 } 161 } 162 163 #ifdef CONFIG_SND_PCM_XRUN_DEBUG 164 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \ 165 do { \ 166 trace_hw_ptr_error(substream, reason); \ 167 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ 168 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \ 169 (in_interrupt) ? 'Q' : 'P', ##args); \ 170 dump_stack_on_xrun(substream); \ 171 } \ 172 } while (0) 173 174 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ 175 176 #define hw_ptr_error(substream, fmt, args...) do { } while (0) 177 178 #endif 179 180 int snd_pcm_update_state(struct snd_pcm_substream *substream, 181 struct snd_pcm_runtime *runtime) 182 { 183 snd_pcm_uframes_t avail; 184 185 avail = snd_pcm_avail(substream); 186 if (avail > runtime->avail_max) 187 runtime->avail_max = avail; 188 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 189 if (avail >= runtime->buffer_size) { 190 snd_pcm_drain_done(substream); 191 return -EPIPE; 192 } 193 } else { 194 if (avail >= runtime->stop_threshold) { 195 __snd_pcm_xrun(substream); 196 return -EPIPE; 197 } 198 } 199 if (runtime->twake) { 200 if (avail >= runtime->twake) 201 wake_up(&runtime->tsleep); 202 } else if (avail >= runtime->control->avail_min) 203 wake_up(&runtime->sleep); 204 return 0; 205 } 206 207 static void update_audio_tstamp(struct snd_pcm_substream *substream, 208 struct timespec64 *curr_tstamp, 209 struct timespec64 *audio_tstamp) 210 { 211 struct snd_pcm_runtime *runtime = substream->runtime; 212 u64 audio_frames, audio_nsecs; 213 struct timespec64 driver_tstamp; 214 215 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE) 216 return; 217 218 if (!(substream->ops->get_time_info) || 219 (runtime->audio_tstamp_report.actual_type == 220 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { 221 222 /* 223 * provide audio timestamp derived from pointer position 224 * add delay only if requested 225 */ 226 227 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr; 228 229 if (runtime->audio_tstamp_config.report_delay) { 230 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 231 audio_frames -= runtime->delay; 232 else 233 audio_frames += runtime->delay; 234 } 235 audio_nsecs = div_u64(audio_frames * 1000000000LL, 236 runtime->rate); 237 *audio_tstamp = ns_to_timespec64(audio_nsecs); 238 } 239 240 if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec || 241 runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) { 242 runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec; 243 runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec; 244 runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec; 245 runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec; 246 } 247 248 249 /* 250 * re-take a driver timestamp to let apps detect if the reference tstamp 251 * read by low-level hardware was provided with a delay 252 */ 253 snd_pcm_gettime(substream->runtime, &driver_tstamp); 254 runtime->driver_tstamp = driver_tstamp; 255 } 256 257 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, 258 unsigned int in_interrupt) 259 { 260 struct snd_pcm_runtime *runtime = substream->runtime; 261 snd_pcm_uframes_t pos; 262 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; 263 snd_pcm_sframes_t hdelta, delta; 264 unsigned long jdelta; 265 unsigned long curr_jiffies; 266 struct timespec64 curr_tstamp; 267 struct timespec64 audio_tstamp; 268 int crossed_boundary = 0; 269 270 old_hw_ptr = runtime->status->hw_ptr; 271 272 /* 273 * group pointer, time and jiffies reads to allow for more 274 * accurate correlations/corrections. 275 * The values are stored at the end of this routine after 276 * corrections for hw_ptr position 277 */ 278 pos = substream->ops->pointer(substream); 279 curr_jiffies = jiffies; 280 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { 281 if ((substream->ops->get_time_info) && 282 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { 283 substream->ops->get_time_info(substream, &curr_tstamp, 284 &audio_tstamp, 285 &runtime->audio_tstamp_config, 286 &runtime->audio_tstamp_report); 287 288 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */ 289 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT) 290 snd_pcm_gettime(runtime, &curr_tstamp); 291 } else 292 snd_pcm_gettime(runtime, &curr_tstamp); 293 } 294 295 if (pos == SNDRV_PCM_POS_XRUN) { 296 __snd_pcm_xrun(substream); 297 return -EPIPE; 298 } 299 if (pos >= runtime->buffer_size) { 300 if (printk_ratelimit()) { 301 char name[16]; 302 snd_pcm_debug_name(substream, name, sizeof(name)); 303 pcm_err(substream->pcm, 304 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n", 305 name, pos, runtime->buffer_size, 306 runtime->period_size); 307 } 308 pos = 0; 309 } 310 pos -= pos % runtime->min_align; 311 trace_hwptr(substream, pos, in_interrupt); 312 hw_base = runtime->hw_ptr_base; 313 new_hw_ptr = hw_base + pos; 314 if (in_interrupt) { 315 /* we know that one period was processed */ 316 /* delta = "expected next hw_ptr" for in_interrupt != 0 */ 317 delta = runtime->hw_ptr_interrupt + runtime->period_size; 318 if (delta > new_hw_ptr) { 319 /* check for double acknowledged interrupts */ 320 hdelta = curr_jiffies - runtime->hw_ptr_jiffies; 321 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) { 322 hw_base += runtime->buffer_size; 323 if (hw_base >= runtime->boundary) { 324 hw_base = 0; 325 crossed_boundary++; 326 } 327 new_hw_ptr = hw_base + pos; 328 goto __delta; 329 } 330 } 331 } 332 /* new_hw_ptr might be lower than old_hw_ptr in case when */ 333 /* pointer crosses the end of the ring buffer */ 334 if (new_hw_ptr < old_hw_ptr) { 335 hw_base += runtime->buffer_size; 336 if (hw_base >= runtime->boundary) { 337 hw_base = 0; 338 crossed_boundary++; 339 } 340 new_hw_ptr = hw_base + pos; 341 } 342 __delta: 343 delta = new_hw_ptr - old_hw_ptr; 344 if (delta < 0) 345 delta += runtime->boundary; 346 347 if (runtime->no_period_wakeup) { 348 snd_pcm_sframes_t xrun_threshold; 349 /* 350 * Without regular period interrupts, we have to check 351 * the elapsed time to detect xruns. 352 */ 353 jdelta = curr_jiffies - runtime->hw_ptr_jiffies; 354 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) 355 goto no_delta_check; 356 hdelta = jdelta - delta * HZ / runtime->rate; 357 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; 358 while (hdelta > xrun_threshold) { 359 delta += runtime->buffer_size; 360 hw_base += runtime->buffer_size; 361 if (hw_base >= runtime->boundary) { 362 hw_base = 0; 363 crossed_boundary++; 364 } 365 new_hw_ptr = hw_base + pos; 366 hdelta -= runtime->hw_ptr_buffer_jiffies; 367 } 368 goto no_delta_check; 369 } 370 371 /* something must be really wrong */ 372 if (delta >= runtime->buffer_size + runtime->period_size) { 373 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr", 374 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", 375 substream->stream, (long)pos, 376 (long)new_hw_ptr, (long)old_hw_ptr); 377 return 0; 378 } 379 380 /* Do jiffies check only in xrun_debug mode */ 381 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK)) 382 goto no_jiffies_check; 383 384 /* Skip the jiffies check for hardwares with BATCH flag. 385 * Such hardware usually just increases the position at each IRQ, 386 * thus it can't give any strange position. 387 */ 388 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH) 389 goto no_jiffies_check; 390 hdelta = delta; 391 if (hdelta < runtime->delay) 392 goto no_jiffies_check; 393 hdelta -= runtime->delay; 394 jdelta = curr_jiffies - runtime->hw_ptr_jiffies; 395 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { 396 delta = jdelta / 397 (((runtime->period_size * HZ) / runtime->rate) 398 + HZ/100); 399 /* move new_hw_ptr according jiffies not pos variable */ 400 new_hw_ptr = old_hw_ptr; 401 hw_base = delta; 402 /* use loop to avoid checks for delta overflows */ 403 /* the delta value is small or zero in most cases */ 404 while (delta > 0) { 405 new_hw_ptr += runtime->period_size; 406 if (new_hw_ptr >= runtime->boundary) { 407 new_hw_ptr -= runtime->boundary; 408 crossed_boundary--; 409 } 410 delta--; 411 } 412 /* align hw_base to buffer_size */ 413 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping", 414 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n", 415 (long)pos, (long)hdelta, 416 (long)runtime->period_size, jdelta, 417 ((hdelta * HZ) / runtime->rate), hw_base, 418 (unsigned long)old_hw_ptr, 419 (unsigned long)new_hw_ptr); 420 /* reset values to proper state */ 421 delta = 0; 422 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size); 423 } 424 no_jiffies_check: 425 if (delta > runtime->period_size + runtime->period_size / 2) { 426 hw_ptr_error(substream, in_interrupt, 427 "Lost interrupts?", 428 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", 429 substream->stream, (long)delta, 430 (long)new_hw_ptr, 431 (long)old_hw_ptr); 432 } 433 434 no_delta_check: 435 if (runtime->status->hw_ptr == new_hw_ptr) { 436 runtime->hw_ptr_jiffies = curr_jiffies; 437 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); 438 return 0; 439 } 440 441 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 442 runtime->silence_size > 0) 443 snd_pcm_playback_silence(substream, new_hw_ptr); 444 445 if (in_interrupt) { 446 delta = new_hw_ptr - runtime->hw_ptr_interrupt; 447 if (delta < 0) 448 delta += runtime->boundary; 449 delta -= (snd_pcm_uframes_t)delta % runtime->period_size; 450 runtime->hw_ptr_interrupt += delta; 451 if (runtime->hw_ptr_interrupt >= runtime->boundary) 452 runtime->hw_ptr_interrupt -= runtime->boundary; 453 } 454 runtime->hw_ptr_base = hw_base; 455 runtime->status->hw_ptr = new_hw_ptr; 456 runtime->hw_ptr_jiffies = curr_jiffies; 457 if (crossed_boundary) { 458 snd_BUG_ON(crossed_boundary != 1); 459 runtime->hw_ptr_wrap += runtime->boundary; 460 } 461 462 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); 463 464 return snd_pcm_update_state(substream, runtime); 465 } 466 467 /* CAUTION: call it with irq disabled */ 468 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) 469 { 470 return snd_pcm_update_hw_ptr0(substream, 0); 471 } 472 473 /** 474 * snd_pcm_set_ops - set the PCM operators 475 * @pcm: the pcm instance 476 * @direction: stream direction, SNDRV_PCM_STREAM_XXX 477 * @ops: the operator table 478 * 479 * Sets the given PCM operators to the pcm instance. 480 */ 481 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, 482 const struct snd_pcm_ops *ops) 483 { 484 struct snd_pcm_str *stream = &pcm->streams[direction]; 485 struct snd_pcm_substream *substream; 486 487 for (substream = stream->substream; substream != NULL; substream = substream->next) 488 substream->ops = ops; 489 } 490 EXPORT_SYMBOL(snd_pcm_set_ops); 491 492 /** 493 * snd_pcm_set_sync - set the PCM sync id 494 * @substream: the pcm substream 495 * 496 * Sets the PCM sync identifier for the card. 497 */ 498 void snd_pcm_set_sync(struct snd_pcm_substream *substream) 499 { 500 struct snd_pcm_runtime *runtime = substream->runtime; 501 502 runtime->sync.id32[0] = substream->pcm->card->number; 503 runtime->sync.id32[1] = -1; 504 runtime->sync.id32[2] = -1; 505 runtime->sync.id32[3] = -1; 506 } 507 EXPORT_SYMBOL(snd_pcm_set_sync); 508 509 /* 510 * Standard ioctl routine 511 */ 512 513 static inline unsigned int div32(unsigned int a, unsigned int b, 514 unsigned int *r) 515 { 516 if (b == 0) { 517 *r = 0; 518 return UINT_MAX; 519 } 520 *r = a % b; 521 return a / b; 522 } 523 524 static inline unsigned int div_down(unsigned int a, unsigned int b) 525 { 526 if (b == 0) 527 return UINT_MAX; 528 return a / b; 529 } 530 531 static inline unsigned int div_up(unsigned int a, unsigned int b) 532 { 533 unsigned int r; 534 unsigned int q; 535 if (b == 0) 536 return UINT_MAX; 537 q = div32(a, b, &r); 538 if (r) 539 ++q; 540 return q; 541 } 542 543 static inline unsigned int mul(unsigned int a, unsigned int b) 544 { 545 if (a == 0) 546 return 0; 547 if (div_down(UINT_MAX, a) < b) 548 return UINT_MAX; 549 return a * b; 550 } 551 552 static inline unsigned int muldiv32(unsigned int a, unsigned int b, 553 unsigned int c, unsigned int *r) 554 { 555 u_int64_t n = (u_int64_t) a * b; 556 if (c == 0) { 557 *r = 0; 558 return UINT_MAX; 559 } 560 n = div_u64_rem(n, c, r); 561 if (n >= UINT_MAX) { 562 *r = 0; 563 return UINT_MAX; 564 } 565 return n; 566 } 567 568 /** 569 * snd_interval_refine - refine the interval value of configurator 570 * @i: the interval value to refine 571 * @v: the interval value to refer to 572 * 573 * Refines the interval value with the reference value. 574 * The interval is changed to the range satisfying both intervals. 575 * The interval status (min, max, integer, etc.) are evaluated. 576 * 577 * Return: Positive if the value is changed, zero if it's not changed, or a 578 * negative error code. 579 */ 580 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v) 581 { 582 int changed = 0; 583 if (snd_BUG_ON(snd_interval_empty(i))) 584 return -EINVAL; 585 if (i->min < v->min) { 586 i->min = v->min; 587 i->openmin = v->openmin; 588 changed = 1; 589 } else if (i->min == v->min && !i->openmin && v->openmin) { 590 i->openmin = 1; 591 changed = 1; 592 } 593 if (i->max > v->max) { 594 i->max = v->max; 595 i->openmax = v->openmax; 596 changed = 1; 597 } else if (i->max == v->max && !i->openmax && v->openmax) { 598 i->openmax = 1; 599 changed = 1; 600 } 601 if (!i->integer && v->integer) { 602 i->integer = 1; 603 changed = 1; 604 } 605 if (i->integer) { 606 if (i->openmin) { 607 i->min++; 608 i->openmin = 0; 609 } 610 if (i->openmax) { 611 i->max--; 612 i->openmax = 0; 613 } 614 } else if (!i->openmin && !i->openmax && i->min == i->max) 615 i->integer = 1; 616 if (snd_interval_checkempty(i)) { 617 snd_interval_none(i); 618 return -EINVAL; 619 } 620 return changed; 621 } 622 EXPORT_SYMBOL(snd_interval_refine); 623 624 static int snd_interval_refine_first(struct snd_interval *i) 625 { 626 const unsigned int last_max = i->max; 627 628 if (snd_BUG_ON(snd_interval_empty(i))) 629 return -EINVAL; 630 if (snd_interval_single(i)) 631 return 0; 632 i->max = i->min; 633 if (i->openmin) 634 i->max++; 635 /* only exclude max value if also excluded before refine */ 636 i->openmax = (i->openmax && i->max >= last_max); 637 return 1; 638 } 639 640 static int snd_interval_refine_last(struct snd_interval *i) 641 { 642 const unsigned int last_min = i->min; 643 644 if (snd_BUG_ON(snd_interval_empty(i))) 645 return -EINVAL; 646 if (snd_interval_single(i)) 647 return 0; 648 i->min = i->max; 649 if (i->openmax) 650 i->min--; 651 /* only exclude min value if also excluded before refine */ 652 i->openmin = (i->openmin && i->min <= last_min); 653 return 1; 654 } 655 656 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) 657 { 658 if (a->empty || b->empty) { 659 snd_interval_none(c); 660 return; 661 } 662 c->empty = 0; 663 c->min = mul(a->min, b->min); 664 c->openmin = (a->openmin || b->openmin); 665 c->max = mul(a->max, b->max); 666 c->openmax = (a->openmax || b->openmax); 667 c->integer = (a->integer && b->integer); 668 } 669 670 /** 671 * snd_interval_div - refine the interval value with division 672 * @a: dividend 673 * @b: divisor 674 * @c: quotient 675 * 676 * c = a / b 677 * 678 * Returns non-zero if the value is changed, zero if not changed. 679 */ 680 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) 681 { 682 unsigned int r; 683 if (a->empty || b->empty) { 684 snd_interval_none(c); 685 return; 686 } 687 c->empty = 0; 688 c->min = div32(a->min, b->max, &r); 689 c->openmin = (r || a->openmin || b->openmax); 690 if (b->min > 0) { 691 c->max = div32(a->max, b->min, &r); 692 if (r) { 693 c->max++; 694 c->openmax = 1; 695 } else 696 c->openmax = (a->openmax || b->openmin); 697 } else { 698 c->max = UINT_MAX; 699 c->openmax = 0; 700 } 701 c->integer = 0; 702 } 703 704 /** 705 * snd_interval_muldivk - refine the interval value 706 * @a: dividend 1 707 * @b: dividend 2 708 * @k: divisor (as integer) 709 * @c: result 710 * 711 * c = a * b / k 712 * 713 * Returns non-zero if the value is changed, zero if not changed. 714 */ 715 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b, 716 unsigned int k, struct snd_interval *c) 717 { 718 unsigned int r; 719 if (a->empty || b->empty) { 720 snd_interval_none(c); 721 return; 722 } 723 c->empty = 0; 724 c->min = muldiv32(a->min, b->min, k, &r); 725 c->openmin = (r || a->openmin || b->openmin); 726 c->max = muldiv32(a->max, b->max, k, &r); 727 if (r) { 728 c->max++; 729 c->openmax = 1; 730 } else 731 c->openmax = (a->openmax || b->openmax); 732 c->integer = 0; 733 } 734 735 /** 736 * snd_interval_mulkdiv - refine the interval value 737 * @a: dividend 1 738 * @k: dividend 2 (as integer) 739 * @b: divisor 740 * @c: result 741 * 742 * c = a * k / b 743 * 744 * Returns non-zero if the value is changed, zero if not changed. 745 */ 746 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k, 747 const struct snd_interval *b, struct snd_interval *c) 748 { 749 unsigned int r; 750 if (a->empty || b->empty) { 751 snd_interval_none(c); 752 return; 753 } 754 c->empty = 0; 755 c->min = muldiv32(a->min, k, b->max, &r); 756 c->openmin = (r || a->openmin || b->openmax); 757 if (b->min > 0) { 758 c->max = muldiv32(a->max, k, b->min, &r); 759 if (r) { 760 c->max++; 761 c->openmax = 1; 762 } else 763 c->openmax = (a->openmax || b->openmin); 764 } else { 765 c->max = UINT_MAX; 766 c->openmax = 0; 767 } 768 c->integer = 0; 769 } 770 771 /* ---- */ 772 773 774 /** 775 * snd_interval_ratnum - refine the interval value 776 * @i: interval to refine 777 * @rats_count: number of ratnum_t 778 * @rats: ratnum_t array 779 * @nump: pointer to store the resultant numerator 780 * @denp: pointer to store the resultant denominator 781 * 782 * Return: Positive if the value is changed, zero if it's not changed, or a 783 * negative error code. 784 */ 785 int snd_interval_ratnum(struct snd_interval *i, 786 unsigned int rats_count, const struct snd_ratnum *rats, 787 unsigned int *nump, unsigned int *denp) 788 { 789 unsigned int best_num, best_den; 790 int best_diff; 791 unsigned int k; 792 struct snd_interval t; 793 int err; 794 unsigned int result_num, result_den; 795 int result_diff; 796 797 best_num = best_den = best_diff = 0; 798 for (k = 0; k < rats_count; ++k) { 799 unsigned int num = rats[k].num; 800 unsigned int den; 801 unsigned int q = i->min; 802 int diff; 803 if (q == 0) 804 q = 1; 805 den = div_up(num, q); 806 if (den < rats[k].den_min) 807 continue; 808 if (den > rats[k].den_max) 809 den = rats[k].den_max; 810 else { 811 unsigned int r; 812 r = (den - rats[k].den_min) % rats[k].den_step; 813 if (r != 0) 814 den -= r; 815 } 816 diff = num - q * den; 817 if (diff < 0) 818 diff = -diff; 819 if (best_num == 0 || 820 diff * best_den < best_diff * den) { 821 best_diff = diff; 822 best_den = den; 823 best_num = num; 824 } 825 } 826 if (best_den == 0) { 827 i->empty = 1; 828 return -EINVAL; 829 } 830 t.min = div_down(best_num, best_den); 831 t.openmin = !!(best_num % best_den); 832 833 result_num = best_num; 834 result_diff = best_diff; 835 result_den = best_den; 836 best_num = best_den = best_diff = 0; 837 for (k = 0; k < rats_count; ++k) { 838 unsigned int num = rats[k].num; 839 unsigned int den; 840 unsigned int q = i->max; 841 int diff; 842 if (q == 0) { 843 i->empty = 1; 844 return -EINVAL; 845 } 846 den = div_down(num, q); 847 if (den > rats[k].den_max) 848 continue; 849 if (den < rats[k].den_min) 850 den = rats[k].den_min; 851 else { 852 unsigned int r; 853 r = (den - rats[k].den_min) % rats[k].den_step; 854 if (r != 0) 855 den += rats[k].den_step - r; 856 } 857 diff = q * den - num; 858 if (diff < 0) 859 diff = -diff; 860 if (best_num == 0 || 861 diff * best_den < best_diff * den) { 862 best_diff = diff; 863 best_den = den; 864 best_num = num; 865 } 866 } 867 if (best_den == 0) { 868 i->empty = 1; 869 return -EINVAL; 870 } 871 t.max = div_up(best_num, best_den); 872 t.openmax = !!(best_num % best_den); 873 t.integer = 0; 874 err = snd_interval_refine(i, &t); 875 if (err < 0) 876 return err; 877 878 if (snd_interval_single(i)) { 879 if (best_diff * result_den < result_diff * best_den) { 880 result_num = best_num; 881 result_den = best_den; 882 } 883 if (nump) 884 *nump = result_num; 885 if (denp) 886 *denp = result_den; 887 } 888 return err; 889 } 890 EXPORT_SYMBOL(snd_interval_ratnum); 891 892 /** 893 * snd_interval_ratden - refine the interval value 894 * @i: interval to refine 895 * @rats_count: number of struct ratden 896 * @rats: struct ratden array 897 * @nump: pointer to store the resultant numerator 898 * @denp: pointer to store the resultant denominator 899 * 900 * Return: Positive if the value is changed, zero if it's not changed, or a 901 * negative error code. 902 */ 903 static int snd_interval_ratden(struct snd_interval *i, 904 unsigned int rats_count, 905 const struct snd_ratden *rats, 906 unsigned int *nump, unsigned int *denp) 907 { 908 unsigned int best_num, best_diff, best_den; 909 unsigned int k; 910 struct snd_interval t; 911 int err; 912 913 best_num = best_den = best_diff = 0; 914 for (k = 0; k < rats_count; ++k) { 915 unsigned int num; 916 unsigned int den = rats[k].den; 917 unsigned int q = i->min; 918 int diff; 919 num = mul(q, den); 920 if (num > rats[k].num_max) 921 continue; 922 if (num < rats[k].num_min) 923 num = rats[k].num_max; 924 else { 925 unsigned int r; 926 r = (num - rats[k].num_min) % rats[k].num_step; 927 if (r != 0) 928 num += rats[k].num_step - r; 929 } 930 diff = num - q * den; 931 if (best_num == 0 || 932 diff * best_den < best_diff * den) { 933 best_diff = diff; 934 best_den = den; 935 best_num = num; 936 } 937 } 938 if (best_den == 0) { 939 i->empty = 1; 940 return -EINVAL; 941 } 942 t.min = div_down(best_num, best_den); 943 t.openmin = !!(best_num % best_den); 944 945 best_num = best_den = best_diff = 0; 946 for (k = 0; k < rats_count; ++k) { 947 unsigned int num; 948 unsigned int den = rats[k].den; 949 unsigned int q = i->max; 950 int diff; 951 num = mul(q, den); 952 if (num < rats[k].num_min) 953 continue; 954 if (num > rats[k].num_max) 955 num = rats[k].num_max; 956 else { 957 unsigned int r; 958 r = (num - rats[k].num_min) % rats[k].num_step; 959 if (r != 0) 960 num -= r; 961 } 962 diff = q * den - num; 963 if (best_num == 0 || 964 diff * best_den < best_diff * den) { 965 best_diff = diff; 966 best_den = den; 967 best_num = num; 968 } 969 } 970 if (best_den == 0) { 971 i->empty = 1; 972 return -EINVAL; 973 } 974 t.max = div_up(best_num, best_den); 975 t.openmax = !!(best_num % best_den); 976 t.integer = 0; 977 err = snd_interval_refine(i, &t); 978 if (err < 0) 979 return err; 980 981 if (snd_interval_single(i)) { 982 if (nump) 983 *nump = best_num; 984 if (denp) 985 *denp = best_den; 986 } 987 return err; 988 } 989 990 /** 991 * snd_interval_list - refine the interval value from the list 992 * @i: the interval value to refine 993 * @count: the number of elements in the list 994 * @list: the value list 995 * @mask: the bit-mask to evaluate 996 * 997 * Refines the interval value from the list. 998 * When mask is non-zero, only the elements corresponding to bit 1 are 999 * evaluated. 1000 * 1001 * Return: Positive if the value is changed, zero if it's not changed, or a 1002 * negative error code. 1003 */ 1004 int snd_interval_list(struct snd_interval *i, unsigned int count, 1005 const unsigned int *list, unsigned int mask) 1006 { 1007 unsigned int k; 1008 struct snd_interval list_range; 1009 1010 if (!count) { 1011 i->empty = 1; 1012 return -EINVAL; 1013 } 1014 snd_interval_any(&list_range); 1015 list_range.min = UINT_MAX; 1016 list_range.max = 0; 1017 for (k = 0; k < count; k++) { 1018 if (mask && !(mask & (1 << k))) 1019 continue; 1020 if (!snd_interval_test(i, list[k])) 1021 continue; 1022 list_range.min = min(list_range.min, list[k]); 1023 list_range.max = max(list_range.max, list[k]); 1024 } 1025 return snd_interval_refine(i, &list_range); 1026 } 1027 EXPORT_SYMBOL(snd_interval_list); 1028 1029 /** 1030 * snd_interval_ranges - refine the interval value from the list of ranges 1031 * @i: the interval value to refine 1032 * @count: the number of elements in the list of ranges 1033 * @ranges: the ranges list 1034 * @mask: the bit-mask to evaluate 1035 * 1036 * Refines the interval value from the list of ranges. 1037 * When mask is non-zero, only the elements corresponding to bit 1 are 1038 * evaluated. 1039 * 1040 * Return: Positive if the value is changed, zero if it's not changed, or a 1041 * negative error code. 1042 */ 1043 int snd_interval_ranges(struct snd_interval *i, unsigned int count, 1044 const struct snd_interval *ranges, unsigned int mask) 1045 { 1046 unsigned int k; 1047 struct snd_interval range_union; 1048 struct snd_interval range; 1049 1050 if (!count) { 1051 snd_interval_none(i); 1052 return -EINVAL; 1053 } 1054 snd_interval_any(&range_union); 1055 range_union.min = UINT_MAX; 1056 range_union.max = 0; 1057 for (k = 0; k < count; k++) { 1058 if (mask && !(mask & (1 << k))) 1059 continue; 1060 snd_interval_copy(&range, &ranges[k]); 1061 if (snd_interval_refine(&range, i) < 0) 1062 continue; 1063 if (snd_interval_empty(&range)) 1064 continue; 1065 1066 if (range.min < range_union.min) { 1067 range_union.min = range.min; 1068 range_union.openmin = 1; 1069 } 1070 if (range.min == range_union.min && !range.openmin) 1071 range_union.openmin = 0; 1072 if (range.max > range_union.max) { 1073 range_union.max = range.max; 1074 range_union.openmax = 1; 1075 } 1076 if (range.max == range_union.max && !range.openmax) 1077 range_union.openmax = 0; 1078 } 1079 return snd_interval_refine(i, &range_union); 1080 } 1081 EXPORT_SYMBOL(snd_interval_ranges); 1082 1083 static int snd_interval_step(struct snd_interval *i, unsigned int step) 1084 { 1085 unsigned int n; 1086 int changed = 0; 1087 n = i->min % step; 1088 if (n != 0 || i->openmin) { 1089 i->min += step - n; 1090 i->openmin = 0; 1091 changed = 1; 1092 } 1093 n = i->max % step; 1094 if (n != 0 || i->openmax) { 1095 i->max -= n; 1096 i->openmax = 0; 1097 changed = 1; 1098 } 1099 if (snd_interval_checkempty(i)) { 1100 i->empty = 1; 1101 return -EINVAL; 1102 } 1103 return changed; 1104 } 1105 1106 /* Info constraints helpers */ 1107 1108 /** 1109 * snd_pcm_hw_rule_add - add the hw-constraint rule 1110 * @runtime: the pcm runtime instance 1111 * @cond: condition bits 1112 * @var: the variable to evaluate 1113 * @func: the evaluation function 1114 * @private: the private data pointer passed to function 1115 * @dep: the dependent variables 1116 * 1117 * Return: Zero if successful, or a negative error code on failure. 1118 */ 1119 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, 1120 int var, 1121 snd_pcm_hw_rule_func_t func, void *private, 1122 int dep, ...) 1123 { 1124 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1125 struct snd_pcm_hw_rule *c; 1126 unsigned int k; 1127 va_list args; 1128 va_start(args, dep); 1129 if (constrs->rules_num >= constrs->rules_all) { 1130 struct snd_pcm_hw_rule *new; 1131 unsigned int new_rules = constrs->rules_all + 16; 1132 new = krealloc_array(constrs->rules, new_rules, 1133 sizeof(*c), GFP_KERNEL); 1134 if (!new) { 1135 va_end(args); 1136 return -ENOMEM; 1137 } 1138 constrs->rules = new; 1139 constrs->rules_all = new_rules; 1140 } 1141 c = &constrs->rules[constrs->rules_num]; 1142 c->cond = cond; 1143 c->func = func; 1144 c->var = var; 1145 c->private = private; 1146 k = 0; 1147 while (1) { 1148 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { 1149 va_end(args); 1150 return -EINVAL; 1151 } 1152 c->deps[k++] = dep; 1153 if (dep < 0) 1154 break; 1155 dep = va_arg(args, int); 1156 } 1157 constrs->rules_num++; 1158 va_end(args); 1159 return 0; 1160 } 1161 EXPORT_SYMBOL(snd_pcm_hw_rule_add); 1162 1163 /** 1164 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint 1165 * @runtime: PCM runtime instance 1166 * @var: hw_params variable to apply the mask 1167 * @mask: the bitmap mask 1168 * 1169 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter. 1170 * 1171 * Return: Zero if successful, or a negative error code on failure. 1172 */ 1173 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1174 u_int32_t mask) 1175 { 1176 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1177 struct snd_mask *maskp = constrs_mask(constrs, var); 1178 *maskp->bits &= mask; 1179 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */ 1180 if (*maskp->bits == 0) 1181 return -EINVAL; 1182 return 0; 1183 } 1184 1185 /** 1186 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint 1187 * @runtime: PCM runtime instance 1188 * @var: hw_params variable to apply the mask 1189 * @mask: the 64bit bitmap mask 1190 * 1191 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter. 1192 * 1193 * Return: Zero if successful, or a negative error code on failure. 1194 */ 1195 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1196 u_int64_t mask) 1197 { 1198 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1199 struct snd_mask *maskp = constrs_mask(constrs, var); 1200 maskp->bits[0] &= (u_int32_t)mask; 1201 maskp->bits[1] &= (u_int32_t)(mask >> 32); 1202 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ 1203 if (! maskp->bits[0] && ! maskp->bits[1]) 1204 return -EINVAL; 1205 return 0; 1206 } 1207 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64); 1208 1209 /** 1210 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval 1211 * @runtime: PCM runtime instance 1212 * @var: hw_params variable to apply the integer constraint 1213 * 1214 * Apply the constraint of integer to an interval parameter. 1215 * 1216 * Return: Positive if the value is changed, zero if it's not changed, or a 1217 * negative error code. 1218 */ 1219 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var) 1220 { 1221 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1222 return snd_interval_setinteger(constrs_interval(constrs, var)); 1223 } 1224 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer); 1225 1226 /** 1227 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval 1228 * @runtime: PCM runtime instance 1229 * @var: hw_params variable to apply the range 1230 * @min: the minimal value 1231 * @max: the maximal value 1232 * 1233 * Apply the min/max range constraint to an interval parameter. 1234 * 1235 * Return: Positive if the value is changed, zero if it's not changed, or a 1236 * negative error code. 1237 */ 1238 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, 1239 unsigned int min, unsigned int max) 1240 { 1241 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; 1242 struct snd_interval t; 1243 t.min = min; 1244 t.max = max; 1245 t.openmin = t.openmax = 0; 1246 t.integer = 0; 1247 return snd_interval_refine(constrs_interval(constrs, var), &t); 1248 } 1249 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax); 1250 1251 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params, 1252 struct snd_pcm_hw_rule *rule) 1253 { 1254 struct snd_pcm_hw_constraint_list *list = rule->private; 1255 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask); 1256 } 1257 1258 1259 /** 1260 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter 1261 * @runtime: PCM runtime instance 1262 * @cond: condition bits 1263 * @var: hw_params variable to apply the list constraint 1264 * @l: list 1265 * 1266 * Apply the list of constraints to an interval parameter. 1267 * 1268 * Return: Zero if successful, or a negative error code on failure. 1269 */ 1270 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, 1271 unsigned int cond, 1272 snd_pcm_hw_param_t var, 1273 const struct snd_pcm_hw_constraint_list *l) 1274 { 1275 return snd_pcm_hw_rule_add(runtime, cond, var, 1276 snd_pcm_hw_rule_list, (void *)l, 1277 var, -1); 1278 } 1279 EXPORT_SYMBOL(snd_pcm_hw_constraint_list); 1280 1281 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params, 1282 struct snd_pcm_hw_rule *rule) 1283 { 1284 struct snd_pcm_hw_constraint_ranges *r = rule->private; 1285 return snd_interval_ranges(hw_param_interval(params, rule->var), 1286 r->count, r->ranges, r->mask); 1287 } 1288 1289 1290 /** 1291 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter 1292 * @runtime: PCM runtime instance 1293 * @cond: condition bits 1294 * @var: hw_params variable to apply the list of range constraints 1295 * @r: ranges 1296 * 1297 * Apply the list of range constraints to an interval parameter. 1298 * 1299 * Return: Zero if successful, or a negative error code on failure. 1300 */ 1301 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, 1302 unsigned int cond, 1303 snd_pcm_hw_param_t var, 1304 const struct snd_pcm_hw_constraint_ranges *r) 1305 { 1306 return snd_pcm_hw_rule_add(runtime, cond, var, 1307 snd_pcm_hw_rule_ranges, (void *)r, 1308 var, -1); 1309 } 1310 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges); 1311 1312 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params, 1313 struct snd_pcm_hw_rule *rule) 1314 { 1315 const struct snd_pcm_hw_constraint_ratnums *r = rule->private; 1316 unsigned int num = 0, den = 0; 1317 int err; 1318 err = snd_interval_ratnum(hw_param_interval(params, rule->var), 1319 r->nrats, r->rats, &num, &den); 1320 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { 1321 params->rate_num = num; 1322 params->rate_den = den; 1323 } 1324 return err; 1325 } 1326 1327 /** 1328 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter 1329 * @runtime: PCM runtime instance 1330 * @cond: condition bits 1331 * @var: hw_params variable to apply the ratnums constraint 1332 * @r: struct snd_ratnums constriants 1333 * 1334 * Return: Zero if successful, or a negative error code on failure. 1335 */ 1336 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, 1337 unsigned int cond, 1338 snd_pcm_hw_param_t var, 1339 const struct snd_pcm_hw_constraint_ratnums *r) 1340 { 1341 return snd_pcm_hw_rule_add(runtime, cond, var, 1342 snd_pcm_hw_rule_ratnums, (void *)r, 1343 var, -1); 1344 } 1345 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums); 1346 1347 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params, 1348 struct snd_pcm_hw_rule *rule) 1349 { 1350 const struct snd_pcm_hw_constraint_ratdens *r = rule->private; 1351 unsigned int num = 0, den = 0; 1352 int err = snd_interval_ratden(hw_param_interval(params, rule->var), 1353 r->nrats, r->rats, &num, &den); 1354 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { 1355 params->rate_num = num; 1356 params->rate_den = den; 1357 } 1358 return err; 1359 } 1360 1361 /** 1362 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter 1363 * @runtime: PCM runtime instance 1364 * @cond: condition bits 1365 * @var: hw_params variable to apply the ratdens constraint 1366 * @r: struct snd_ratdens constriants 1367 * 1368 * Return: Zero if successful, or a negative error code on failure. 1369 */ 1370 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, 1371 unsigned int cond, 1372 snd_pcm_hw_param_t var, 1373 const struct snd_pcm_hw_constraint_ratdens *r) 1374 { 1375 return snd_pcm_hw_rule_add(runtime, cond, var, 1376 snd_pcm_hw_rule_ratdens, (void *)r, 1377 var, -1); 1378 } 1379 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens); 1380 1381 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params, 1382 struct snd_pcm_hw_rule *rule) 1383 { 1384 unsigned int l = (unsigned long) rule->private; 1385 int width = l & 0xffff; 1386 unsigned int msbits = l >> 16; 1387 const struct snd_interval *i = 1388 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); 1389 1390 if (!snd_interval_single(i)) 1391 return 0; 1392 1393 if ((snd_interval_value(i) == width) || 1394 (width == 0 && snd_interval_value(i) > msbits)) 1395 params->msbits = min_not_zero(params->msbits, msbits); 1396 1397 return 0; 1398 } 1399 1400 /** 1401 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule 1402 * @runtime: PCM runtime instance 1403 * @cond: condition bits 1404 * @width: sample bits width 1405 * @msbits: msbits width 1406 * 1407 * This constraint will set the number of most significant bits (msbits) if a 1408 * sample format with the specified width has been select. If width is set to 0 1409 * the msbits will be set for any sample format with a width larger than the 1410 * specified msbits. 1411 * 1412 * Return: Zero if successful, or a negative error code on failure. 1413 */ 1414 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, 1415 unsigned int cond, 1416 unsigned int width, 1417 unsigned int msbits) 1418 { 1419 unsigned long l = (msbits << 16) | width; 1420 return snd_pcm_hw_rule_add(runtime, cond, -1, 1421 snd_pcm_hw_rule_msbits, 1422 (void*) l, 1423 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); 1424 } 1425 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits); 1426 1427 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params, 1428 struct snd_pcm_hw_rule *rule) 1429 { 1430 unsigned long step = (unsigned long) rule->private; 1431 return snd_interval_step(hw_param_interval(params, rule->var), step); 1432 } 1433 1434 /** 1435 * snd_pcm_hw_constraint_step - add a hw constraint step rule 1436 * @runtime: PCM runtime instance 1437 * @cond: condition bits 1438 * @var: hw_params variable to apply the step constraint 1439 * @step: step size 1440 * 1441 * Return: Zero if successful, or a negative error code on failure. 1442 */ 1443 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, 1444 unsigned int cond, 1445 snd_pcm_hw_param_t var, 1446 unsigned long step) 1447 { 1448 return snd_pcm_hw_rule_add(runtime, cond, var, 1449 snd_pcm_hw_rule_step, (void *) step, 1450 var, -1); 1451 } 1452 EXPORT_SYMBOL(snd_pcm_hw_constraint_step); 1453 1454 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) 1455 { 1456 static const unsigned int pow2_sizes[] = { 1457 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1458 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15, 1459 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23, 1460 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30 1461 }; 1462 return snd_interval_list(hw_param_interval(params, rule->var), 1463 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0); 1464 } 1465 1466 /** 1467 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule 1468 * @runtime: PCM runtime instance 1469 * @cond: condition bits 1470 * @var: hw_params variable to apply the power-of-2 constraint 1471 * 1472 * Return: Zero if successful, or a negative error code on failure. 1473 */ 1474 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, 1475 unsigned int cond, 1476 snd_pcm_hw_param_t var) 1477 { 1478 return snd_pcm_hw_rule_add(runtime, cond, var, 1479 snd_pcm_hw_rule_pow2, NULL, 1480 var, -1); 1481 } 1482 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2); 1483 1484 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params, 1485 struct snd_pcm_hw_rule *rule) 1486 { 1487 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private; 1488 struct snd_interval *rate; 1489 1490 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); 1491 return snd_interval_list(rate, 1, &base_rate, 0); 1492 } 1493 1494 /** 1495 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling 1496 * @runtime: PCM runtime instance 1497 * @base_rate: the rate at which the hardware does not resample 1498 * 1499 * Return: Zero if successful, or a negative error code on failure. 1500 */ 1501 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime, 1502 unsigned int base_rate) 1503 { 1504 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE, 1505 SNDRV_PCM_HW_PARAM_RATE, 1506 snd_pcm_hw_rule_noresample_func, 1507 (void *)(uintptr_t)base_rate, 1508 SNDRV_PCM_HW_PARAM_RATE, -1); 1509 } 1510 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample); 1511 1512 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params, 1513 snd_pcm_hw_param_t var) 1514 { 1515 if (hw_is_mask(var)) { 1516 snd_mask_any(hw_param_mask(params, var)); 1517 params->cmask |= 1 << var; 1518 params->rmask |= 1 << var; 1519 return; 1520 } 1521 if (hw_is_interval(var)) { 1522 snd_interval_any(hw_param_interval(params, var)); 1523 params->cmask |= 1 << var; 1524 params->rmask |= 1 << var; 1525 return; 1526 } 1527 snd_BUG(); 1528 } 1529 1530 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params) 1531 { 1532 unsigned int k; 1533 memset(params, 0, sizeof(*params)); 1534 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) 1535 _snd_pcm_hw_param_any(params, k); 1536 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) 1537 _snd_pcm_hw_param_any(params, k); 1538 params->info = ~0U; 1539 } 1540 EXPORT_SYMBOL(_snd_pcm_hw_params_any); 1541 1542 /** 1543 * snd_pcm_hw_param_value - return @params field @var value 1544 * @params: the hw_params instance 1545 * @var: parameter to retrieve 1546 * @dir: pointer to the direction (-1,0,1) or %NULL 1547 * 1548 * Return: The value for field @var if it's fixed in configuration space 1549 * defined by @params. -%EINVAL otherwise. 1550 */ 1551 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, 1552 snd_pcm_hw_param_t var, int *dir) 1553 { 1554 if (hw_is_mask(var)) { 1555 const struct snd_mask *mask = hw_param_mask_c(params, var); 1556 if (!snd_mask_single(mask)) 1557 return -EINVAL; 1558 if (dir) 1559 *dir = 0; 1560 return snd_mask_value(mask); 1561 } 1562 if (hw_is_interval(var)) { 1563 const struct snd_interval *i = hw_param_interval_c(params, var); 1564 if (!snd_interval_single(i)) 1565 return -EINVAL; 1566 if (dir) 1567 *dir = i->openmin; 1568 return snd_interval_value(i); 1569 } 1570 return -EINVAL; 1571 } 1572 EXPORT_SYMBOL(snd_pcm_hw_param_value); 1573 1574 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, 1575 snd_pcm_hw_param_t var) 1576 { 1577 if (hw_is_mask(var)) { 1578 snd_mask_none(hw_param_mask(params, var)); 1579 params->cmask |= 1 << var; 1580 params->rmask |= 1 << var; 1581 } else if (hw_is_interval(var)) { 1582 snd_interval_none(hw_param_interval(params, var)); 1583 params->cmask |= 1 << var; 1584 params->rmask |= 1 << var; 1585 } else { 1586 snd_BUG(); 1587 } 1588 } 1589 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty); 1590 1591 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params, 1592 snd_pcm_hw_param_t var) 1593 { 1594 int changed; 1595 if (hw_is_mask(var)) 1596 changed = snd_mask_refine_first(hw_param_mask(params, var)); 1597 else if (hw_is_interval(var)) 1598 changed = snd_interval_refine_first(hw_param_interval(params, var)); 1599 else 1600 return -EINVAL; 1601 if (changed > 0) { 1602 params->cmask |= 1 << var; 1603 params->rmask |= 1 << var; 1604 } 1605 return changed; 1606 } 1607 1608 1609 /** 1610 * snd_pcm_hw_param_first - refine config space and return minimum value 1611 * @pcm: PCM instance 1612 * @params: the hw_params instance 1613 * @var: parameter to retrieve 1614 * @dir: pointer to the direction (-1,0,1) or %NULL 1615 * 1616 * Inside configuration space defined by @params remove from @var all 1617 * values > minimum. Reduce configuration space accordingly. 1618 * 1619 * Return: The minimum, or a negative error code on failure. 1620 */ 1621 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, 1622 struct snd_pcm_hw_params *params, 1623 snd_pcm_hw_param_t var, int *dir) 1624 { 1625 int changed = _snd_pcm_hw_param_first(params, var); 1626 if (changed < 0) 1627 return changed; 1628 if (params->rmask) { 1629 int err = snd_pcm_hw_refine(pcm, params); 1630 if (err < 0) 1631 return err; 1632 } 1633 return snd_pcm_hw_param_value(params, var, dir); 1634 } 1635 EXPORT_SYMBOL(snd_pcm_hw_param_first); 1636 1637 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params, 1638 snd_pcm_hw_param_t var) 1639 { 1640 int changed; 1641 if (hw_is_mask(var)) 1642 changed = snd_mask_refine_last(hw_param_mask(params, var)); 1643 else if (hw_is_interval(var)) 1644 changed = snd_interval_refine_last(hw_param_interval(params, var)); 1645 else 1646 return -EINVAL; 1647 if (changed > 0) { 1648 params->cmask |= 1 << var; 1649 params->rmask |= 1 << var; 1650 } 1651 return changed; 1652 } 1653 1654 1655 /** 1656 * snd_pcm_hw_param_last - refine config space and return maximum value 1657 * @pcm: PCM instance 1658 * @params: the hw_params instance 1659 * @var: parameter to retrieve 1660 * @dir: pointer to the direction (-1,0,1) or %NULL 1661 * 1662 * Inside configuration space defined by @params remove from @var all 1663 * values < maximum. Reduce configuration space accordingly. 1664 * 1665 * Return: The maximum, or a negative error code on failure. 1666 */ 1667 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, 1668 struct snd_pcm_hw_params *params, 1669 snd_pcm_hw_param_t var, int *dir) 1670 { 1671 int changed = _snd_pcm_hw_param_last(params, var); 1672 if (changed < 0) 1673 return changed; 1674 if (params->rmask) { 1675 int err = snd_pcm_hw_refine(pcm, params); 1676 if (err < 0) 1677 return err; 1678 } 1679 return snd_pcm_hw_param_value(params, var, dir); 1680 } 1681 EXPORT_SYMBOL(snd_pcm_hw_param_last); 1682 1683 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream, 1684 void *arg) 1685 { 1686 struct snd_pcm_runtime *runtime = substream->runtime; 1687 unsigned long flags; 1688 snd_pcm_stream_lock_irqsave(substream, flags); 1689 if (snd_pcm_running(substream) && 1690 snd_pcm_update_hw_ptr(substream) >= 0) 1691 runtime->status->hw_ptr %= runtime->buffer_size; 1692 else { 1693 runtime->status->hw_ptr = 0; 1694 runtime->hw_ptr_wrap = 0; 1695 } 1696 snd_pcm_stream_unlock_irqrestore(substream, flags); 1697 return 0; 1698 } 1699 1700 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream, 1701 void *arg) 1702 { 1703 struct snd_pcm_channel_info *info = arg; 1704 struct snd_pcm_runtime *runtime = substream->runtime; 1705 int width; 1706 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) { 1707 info->offset = -1; 1708 return 0; 1709 } 1710 width = snd_pcm_format_physical_width(runtime->format); 1711 if (width < 0) 1712 return width; 1713 info->offset = 0; 1714 switch (runtime->access) { 1715 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED: 1716 case SNDRV_PCM_ACCESS_RW_INTERLEAVED: 1717 info->first = info->channel * width; 1718 info->step = runtime->channels * width; 1719 break; 1720 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED: 1721 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED: 1722 { 1723 size_t size = runtime->dma_bytes / runtime->channels; 1724 info->first = info->channel * size * 8; 1725 info->step = width; 1726 break; 1727 } 1728 default: 1729 snd_BUG(); 1730 break; 1731 } 1732 return 0; 1733 } 1734 1735 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, 1736 void *arg) 1737 { 1738 struct snd_pcm_hw_params *params = arg; 1739 snd_pcm_format_t format; 1740 int channels; 1741 ssize_t frame_size; 1742 1743 params->fifo_size = substream->runtime->hw.fifo_size; 1744 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { 1745 format = params_format(params); 1746 channels = params_channels(params); 1747 frame_size = snd_pcm_format_size(format, channels); 1748 if (frame_size > 0) 1749 params->fifo_size /= (unsigned)frame_size; 1750 } 1751 return 0; 1752 } 1753 1754 /** 1755 * snd_pcm_lib_ioctl - a generic PCM ioctl callback 1756 * @substream: the pcm substream instance 1757 * @cmd: ioctl command 1758 * @arg: ioctl argument 1759 * 1760 * Processes the generic ioctl commands for PCM. 1761 * Can be passed as the ioctl callback for PCM ops. 1762 * 1763 * Return: Zero if successful, or a negative error code on failure. 1764 */ 1765 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, 1766 unsigned int cmd, void *arg) 1767 { 1768 switch (cmd) { 1769 case SNDRV_PCM_IOCTL1_RESET: 1770 return snd_pcm_lib_ioctl_reset(substream, arg); 1771 case SNDRV_PCM_IOCTL1_CHANNEL_INFO: 1772 return snd_pcm_lib_ioctl_channel_info(substream, arg); 1773 case SNDRV_PCM_IOCTL1_FIFO_SIZE: 1774 return snd_pcm_lib_ioctl_fifo_size(substream, arg); 1775 } 1776 return -ENXIO; 1777 } 1778 EXPORT_SYMBOL(snd_pcm_lib_ioctl); 1779 1780 /** 1781 * snd_pcm_period_elapsed - update the pcm status for the next period 1782 * @substream: the pcm substream instance 1783 * 1784 * This function is called from the interrupt handler when the 1785 * PCM has processed the period size. It will update the current 1786 * pointer, wake up sleepers, etc. 1787 * 1788 * Even if more than one periods have elapsed since the last call, you 1789 * have to call this only once. 1790 */ 1791 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) 1792 { 1793 struct snd_pcm_runtime *runtime; 1794 unsigned long flags; 1795 1796 if (snd_BUG_ON(!substream)) 1797 return; 1798 1799 snd_pcm_stream_lock_irqsave(substream, flags); 1800 if (PCM_RUNTIME_CHECK(substream)) 1801 goto _unlock; 1802 runtime = substream->runtime; 1803 1804 if (!snd_pcm_running(substream) || 1805 snd_pcm_update_hw_ptr0(substream, 1) < 0) 1806 goto _end; 1807 1808 #ifdef CONFIG_SND_PCM_TIMER 1809 if (substream->timer_running) 1810 snd_timer_interrupt(substream->timer, 1); 1811 #endif 1812 _end: 1813 kill_fasync(&runtime->fasync, SIGIO, POLL_IN); 1814 _unlock: 1815 snd_pcm_stream_unlock_irqrestore(substream, flags); 1816 } 1817 EXPORT_SYMBOL(snd_pcm_period_elapsed); 1818 1819 /* 1820 * Wait until avail_min data becomes available 1821 * Returns a negative error code if any error occurs during operation. 1822 * The available space is stored on availp. When err = 0 and avail = 0 1823 * on the capture stream, it indicates the stream is in DRAINING state. 1824 */ 1825 static int wait_for_avail(struct snd_pcm_substream *substream, 1826 snd_pcm_uframes_t *availp) 1827 { 1828 struct snd_pcm_runtime *runtime = substream->runtime; 1829 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 1830 wait_queue_entry_t wait; 1831 int err = 0; 1832 snd_pcm_uframes_t avail = 0; 1833 long wait_time, tout; 1834 1835 init_waitqueue_entry(&wait, current); 1836 set_current_state(TASK_INTERRUPTIBLE); 1837 add_wait_queue(&runtime->tsleep, &wait); 1838 1839 if (runtime->no_period_wakeup) 1840 wait_time = MAX_SCHEDULE_TIMEOUT; 1841 else { 1842 /* use wait time from substream if available */ 1843 if (substream->wait_time) { 1844 wait_time = substream->wait_time; 1845 } else { 1846 wait_time = 10; 1847 1848 if (runtime->rate) { 1849 long t = runtime->period_size * 2 / 1850 runtime->rate; 1851 wait_time = max(t, wait_time); 1852 } 1853 wait_time = msecs_to_jiffies(wait_time * 1000); 1854 } 1855 } 1856 1857 for (;;) { 1858 if (signal_pending(current)) { 1859 err = -ERESTARTSYS; 1860 break; 1861 } 1862 1863 /* 1864 * We need to check if space became available already 1865 * (and thus the wakeup happened already) first to close 1866 * the race of space already having become available. 1867 * This check must happen after been added to the waitqueue 1868 * and having current state be INTERRUPTIBLE. 1869 */ 1870 avail = snd_pcm_avail(substream); 1871 if (avail >= runtime->twake) 1872 break; 1873 snd_pcm_stream_unlock_irq(substream); 1874 1875 tout = schedule_timeout(wait_time); 1876 1877 snd_pcm_stream_lock_irq(substream); 1878 set_current_state(TASK_INTERRUPTIBLE); 1879 switch (runtime->status->state) { 1880 case SNDRV_PCM_STATE_SUSPENDED: 1881 err = -ESTRPIPE; 1882 goto _endloop; 1883 case SNDRV_PCM_STATE_XRUN: 1884 err = -EPIPE; 1885 goto _endloop; 1886 case SNDRV_PCM_STATE_DRAINING: 1887 if (is_playback) 1888 err = -EPIPE; 1889 else 1890 avail = 0; /* indicate draining */ 1891 goto _endloop; 1892 case SNDRV_PCM_STATE_OPEN: 1893 case SNDRV_PCM_STATE_SETUP: 1894 case SNDRV_PCM_STATE_DISCONNECTED: 1895 err = -EBADFD; 1896 goto _endloop; 1897 case SNDRV_PCM_STATE_PAUSED: 1898 continue; 1899 } 1900 if (!tout) { 1901 pcm_dbg(substream->pcm, 1902 "%s write error (DMA or IRQ trouble?)\n", 1903 is_playback ? "playback" : "capture"); 1904 err = -EIO; 1905 break; 1906 } 1907 } 1908 _endloop: 1909 set_current_state(TASK_RUNNING); 1910 remove_wait_queue(&runtime->tsleep, &wait); 1911 *availp = avail; 1912 return err; 1913 } 1914 1915 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream, 1916 int channel, unsigned long hwoff, 1917 void *buf, unsigned long bytes); 1918 1919 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *, 1920 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f); 1921 1922 /* calculate the target DMA-buffer position to be written/read */ 1923 static void *get_dma_ptr(struct snd_pcm_runtime *runtime, 1924 int channel, unsigned long hwoff) 1925 { 1926 return runtime->dma_area + hwoff + 1927 channel * (runtime->dma_bytes / runtime->channels); 1928 } 1929 1930 /* default copy_user ops for write; used for both interleaved and non- modes */ 1931 static int default_write_copy(struct snd_pcm_substream *substream, 1932 int channel, unsigned long hwoff, 1933 void *buf, unsigned long bytes) 1934 { 1935 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff), 1936 (void __user *)buf, bytes)) 1937 return -EFAULT; 1938 return 0; 1939 } 1940 1941 /* default copy_kernel ops for write */ 1942 static int default_write_copy_kernel(struct snd_pcm_substream *substream, 1943 int channel, unsigned long hwoff, 1944 void *buf, unsigned long bytes) 1945 { 1946 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes); 1947 return 0; 1948 } 1949 1950 /* fill silence instead of copy data; called as a transfer helper 1951 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when 1952 * a NULL buffer is passed 1953 */ 1954 static int fill_silence(struct snd_pcm_substream *substream, int channel, 1955 unsigned long hwoff, void *buf, unsigned long bytes) 1956 { 1957 struct snd_pcm_runtime *runtime = substream->runtime; 1958 1959 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) 1960 return 0; 1961 if (substream->ops->fill_silence) 1962 return substream->ops->fill_silence(substream, channel, 1963 hwoff, bytes); 1964 1965 snd_pcm_format_set_silence(runtime->format, 1966 get_dma_ptr(runtime, channel, hwoff), 1967 bytes_to_samples(runtime, bytes)); 1968 return 0; 1969 } 1970 1971 /* default copy_user ops for read; used for both interleaved and non- modes */ 1972 static int default_read_copy(struct snd_pcm_substream *substream, 1973 int channel, unsigned long hwoff, 1974 void *buf, unsigned long bytes) 1975 { 1976 if (copy_to_user((void __user *)buf, 1977 get_dma_ptr(substream->runtime, channel, hwoff), 1978 bytes)) 1979 return -EFAULT; 1980 return 0; 1981 } 1982 1983 /* default copy_kernel ops for read */ 1984 static int default_read_copy_kernel(struct snd_pcm_substream *substream, 1985 int channel, unsigned long hwoff, 1986 void *buf, unsigned long bytes) 1987 { 1988 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes); 1989 return 0; 1990 } 1991 1992 /* call transfer function with the converted pointers and sizes; 1993 * for interleaved mode, it's one shot for all samples 1994 */ 1995 static int interleaved_copy(struct snd_pcm_substream *substream, 1996 snd_pcm_uframes_t hwoff, void *data, 1997 snd_pcm_uframes_t off, 1998 snd_pcm_uframes_t frames, 1999 pcm_transfer_f transfer) 2000 { 2001 struct snd_pcm_runtime *runtime = substream->runtime; 2002 2003 /* convert to bytes */ 2004 hwoff = frames_to_bytes(runtime, hwoff); 2005 off = frames_to_bytes(runtime, off); 2006 frames = frames_to_bytes(runtime, frames); 2007 return transfer(substream, 0, hwoff, data + off, frames); 2008 } 2009 2010 /* call transfer function with the converted pointers and sizes for each 2011 * non-interleaved channel; when buffer is NULL, silencing instead of copying 2012 */ 2013 static int noninterleaved_copy(struct snd_pcm_substream *substream, 2014 snd_pcm_uframes_t hwoff, void *data, 2015 snd_pcm_uframes_t off, 2016 snd_pcm_uframes_t frames, 2017 pcm_transfer_f transfer) 2018 { 2019 struct snd_pcm_runtime *runtime = substream->runtime; 2020 int channels = runtime->channels; 2021 void **bufs = data; 2022 int c, err; 2023 2024 /* convert to bytes; note that it's not frames_to_bytes() here. 2025 * in non-interleaved mode, we copy for each channel, thus 2026 * each copy is n_samples bytes x channels = whole frames. 2027 */ 2028 off = samples_to_bytes(runtime, off); 2029 frames = samples_to_bytes(runtime, frames); 2030 hwoff = samples_to_bytes(runtime, hwoff); 2031 for (c = 0; c < channels; ++c, ++bufs) { 2032 if (!data || !*bufs) 2033 err = fill_silence(substream, c, hwoff, NULL, frames); 2034 else 2035 err = transfer(substream, c, hwoff, *bufs + off, 2036 frames); 2037 if (err < 0) 2038 return err; 2039 } 2040 return 0; 2041 } 2042 2043 /* fill silence on the given buffer position; 2044 * called from snd_pcm_playback_silence() 2045 */ 2046 static int fill_silence_frames(struct snd_pcm_substream *substream, 2047 snd_pcm_uframes_t off, snd_pcm_uframes_t frames) 2048 { 2049 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || 2050 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) 2051 return interleaved_copy(substream, off, NULL, 0, frames, 2052 fill_silence); 2053 else 2054 return noninterleaved_copy(substream, off, NULL, 0, frames, 2055 fill_silence); 2056 } 2057 2058 /* sanity-check for read/write methods */ 2059 static int pcm_sanity_check(struct snd_pcm_substream *substream) 2060 { 2061 struct snd_pcm_runtime *runtime; 2062 if (PCM_RUNTIME_CHECK(substream)) 2063 return -ENXIO; 2064 runtime = substream->runtime; 2065 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area)) 2066 return -EINVAL; 2067 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2068 return -EBADFD; 2069 return 0; 2070 } 2071 2072 static int pcm_accessible_state(struct snd_pcm_runtime *runtime) 2073 { 2074 switch (runtime->status->state) { 2075 case SNDRV_PCM_STATE_PREPARED: 2076 case SNDRV_PCM_STATE_RUNNING: 2077 case SNDRV_PCM_STATE_PAUSED: 2078 return 0; 2079 case SNDRV_PCM_STATE_XRUN: 2080 return -EPIPE; 2081 case SNDRV_PCM_STATE_SUSPENDED: 2082 return -ESTRPIPE; 2083 default: 2084 return -EBADFD; 2085 } 2086 } 2087 2088 /* update to the given appl_ptr and call ack callback if needed; 2089 * when an error is returned, take back to the original value 2090 */ 2091 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream, 2092 snd_pcm_uframes_t appl_ptr) 2093 { 2094 struct snd_pcm_runtime *runtime = substream->runtime; 2095 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr; 2096 int ret; 2097 2098 if (old_appl_ptr == appl_ptr) 2099 return 0; 2100 2101 runtime->control->appl_ptr = appl_ptr; 2102 if (substream->ops->ack) { 2103 ret = substream->ops->ack(substream); 2104 if (ret < 0) { 2105 runtime->control->appl_ptr = old_appl_ptr; 2106 return ret; 2107 } 2108 } 2109 2110 trace_applptr(substream, old_appl_ptr, appl_ptr); 2111 2112 return 0; 2113 } 2114 2115 /* the common loop for read/write data */ 2116 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, 2117 void *data, bool interleaved, 2118 snd_pcm_uframes_t size, bool in_kernel) 2119 { 2120 struct snd_pcm_runtime *runtime = substream->runtime; 2121 snd_pcm_uframes_t xfer = 0; 2122 snd_pcm_uframes_t offset = 0; 2123 snd_pcm_uframes_t avail; 2124 pcm_copy_f writer; 2125 pcm_transfer_f transfer; 2126 bool nonblock; 2127 bool is_playback; 2128 int err; 2129 2130 err = pcm_sanity_check(substream); 2131 if (err < 0) 2132 return err; 2133 2134 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 2135 if (interleaved) { 2136 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && 2137 runtime->channels > 1) 2138 return -EINVAL; 2139 writer = interleaved_copy; 2140 } else { 2141 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) 2142 return -EINVAL; 2143 writer = noninterleaved_copy; 2144 } 2145 2146 if (!data) { 2147 if (is_playback) 2148 transfer = fill_silence; 2149 else 2150 return -EINVAL; 2151 } else if (in_kernel) { 2152 if (substream->ops->copy_kernel) 2153 transfer = substream->ops->copy_kernel; 2154 else 2155 transfer = is_playback ? 2156 default_write_copy_kernel : default_read_copy_kernel; 2157 } else { 2158 if (substream->ops->copy_user) 2159 transfer = (pcm_transfer_f)substream->ops->copy_user; 2160 else 2161 transfer = is_playback ? 2162 default_write_copy : default_read_copy; 2163 } 2164 2165 if (size == 0) 2166 return 0; 2167 2168 nonblock = !!(substream->f_flags & O_NONBLOCK); 2169 2170 snd_pcm_stream_lock_irq(substream); 2171 err = pcm_accessible_state(runtime); 2172 if (err < 0) 2173 goto _end_unlock; 2174 2175 runtime->twake = runtime->control->avail_min ? : 1; 2176 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) 2177 snd_pcm_update_hw_ptr(substream); 2178 2179 /* 2180 * If size < start_threshold, wait indefinitely. Another 2181 * thread may start capture 2182 */ 2183 if (!is_playback && 2184 runtime->status->state == SNDRV_PCM_STATE_PREPARED && 2185 size >= runtime->start_threshold) { 2186 err = snd_pcm_start(substream); 2187 if (err < 0) 2188 goto _end_unlock; 2189 } 2190 2191 avail = snd_pcm_avail(substream); 2192 2193 while (size > 0) { 2194 snd_pcm_uframes_t frames, appl_ptr, appl_ofs; 2195 snd_pcm_uframes_t cont; 2196 if (!avail) { 2197 if (!is_playback && 2198 runtime->status->state == SNDRV_PCM_STATE_DRAINING) { 2199 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); 2200 goto _end_unlock; 2201 } 2202 if (nonblock) { 2203 err = -EAGAIN; 2204 goto _end_unlock; 2205 } 2206 runtime->twake = min_t(snd_pcm_uframes_t, size, 2207 runtime->control->avail_min ? : 1); 2208 err = wait_for_avail(substream, &avail); 2209 if (err < 0) 2210 goto _end_unlock; 2211 if (!avail) 2212 continue; /* draining */ 2213 } 2214 frames = size > avail ? avail : size; 2215 appl_ptr = READ_ONCE(runtime->control->appl_ptr); 2216 appl_ofs = appl_ptr % runtime->buffer_size; 2217 cont = runtime->buffer_size - appl_ofs; 2218 if (frames > cont) 2219 frames = cont; 2220 if (snd_BUG_ON(!frames)) { 2221 err = -EINVAL; 2222 goto _end_unlock; 2223 } 2224 snd_pcm_stream_unlock_irq(substream); 2225 err = writer(substream, appl_ofs, data, offset, frames, 2226 transfer); 2227 snd_pcm_stream_lock_irq(substream); 2228 if (err < 0) 2229 goto _end_unlock; 2230 err = pcm_accessible_state(runtime); 2231 if (err < 0) 2232 goto _end_unlock; 2233 appl_ptr += frames; 2234 if (appl_ptr >= runtime->boundary) 2235 appl_ptr -= runtime->boundary; 2236 err = pcm_lib_apply_appl_ptr(substream, appl_ptr); 2237 if (err < 0) 2238 goto _end_unlock; 2239 2240 offset += frames; 2241 size -= frames; 2242 xfer += frames; 2243 avail -= frames; 2244 if (is_playback && 2245 runtime->status->state == SNDRV_PCM_STATE_PREPARED && 2246 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) { 2247 err = snd_pcm_start(substream); 2248 if (err < 0) 2249 goto _end_unlock; 2250 } 2251 } 2252 _end_unlock: 2253 runtime->twake = 0; 2254 if (xfer > 0 && err >= 0) 2255 snd_pcm_update_state(substream, runtime); 2256 snd_pcm_stream_unlock_irq(substream); 2257 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; 2258 } 2259 EXPORT_SYMBOL(__snd_pcm_lib_xfer); 2260 2261 /* 2262 * standard channel mapping helpers 2263 */ 2264 2265 /* default channel maps for multi-channel playbacks, up to 8 channels */ 2266 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = { 2267 { .channels = 1, 2268 .map = { SNDRV_CHMAP_MONO } }, 2269 { .channels = 2, 2270 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, 2271 { .channels = 4, 2272 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2273 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2274 { .channels = 6, 2275 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2276 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2277 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, 2278 { .channels = 8, 2279 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2280 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2281 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2282 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, 2283 { } 2284 }; 2285 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps); 2286 2287 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */ 2288 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = { 2289 { .channels = 1, 2290 .map = { SNDRV_CHMAP_MONO } }, 2291 { .channels = 2, 2292 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, 2293 { .channels = 4, 2294 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2295 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2296 { .channels = 6, 2297 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2298 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2299 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, 2300 { .channels = 8, 2301 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, 2302 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, 2303 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, 2304 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, 2305 { } 2306 }; 2307 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps); 2308 2309 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch) 2310 { 2311 if (ch > info->max_channels) 2312 return false; 2313 return !info->channel_mask || (info->channel_mask & (1U << ch)); 2314 } 2315 2316 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol, 2317 struct snd_ctl_elem_info *uinfo) 2318 { 2319 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2320 2321 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 2322 uinfo->count = info->max_channels; 2323 uinfo->value.integer.min = 0; 2324 uinfo->value.integer.max = SNDRV_CHMAP_LAST; 2325 return 0; 2326 } 2327 2328 /* get callback for channel map ctl element 2329 * stores the channel position firstly matching with the current channels 2330 */ 2331 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, 2332 struct snd_ctl_elem_value *ucontrol) 2333 { 2334 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2335 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 2336 struct snd_pcm_substream *substream; 2337 const struct snd_pcm_chmap_elem *map; 2338 2339 if (!info->chmap) 2340 return -EINVAL; 2341 substream = snd_pcm_chmap_substream(info, idx); 2342 if (!substream) 2343 return -ENODEV; 2344 memset(ucontrol->value.integer.value, 0, 2345 sizeof(long) * info->max_channels); 2346 if (!substream->runtime) 2347 return 0; /* no channels set */ 2348 for (map = info->chmap; map->channels; map++) { 2349 int i; 2350 if (map->channels == substream->runtime->channels && 2351 valid_chmap_channels(info, map->channels)) { 2352 for (i = 0; i < map->channels; i++) 2353 ucontrol->value.integer.value[i] = map->map[i]; 2354 return 0; 2355 } 2356 } 2357 return -EINVAL; 2358 } 2359 2360 /* tlv callback for channel map ctl element 2361 * expands the pre-defined channel maps in a form of TLV 2362 */ 2363 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, 2364 unsigned int size, unsigned int __user *tlv) 2365 { 2366 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2367 const struct snd_pcm_chmap_elem *map; 2368 unsigned int __user *dst; 2369 int c, count = 0; 2370 2371 if (!info->chmap) 2372 return -EINVAL; 2373 if (size < 8) 2374 return -ENOMEM; 2375 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv)) 2376 return -EFAULT; 2377 size -= 8; 2378 dst = tlv + 2; 2379 for (map = info->chmap; map->channels; map++) { 2380 int chs_bytes = map->channels * 4; 2381 if (!valid_chmap_channels(info, map->channels)) 2382 continue; 2383 if (size < 8) 2384 return -ENOMEM; 2385 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) || 2386 put_user(chs_bytes, dst + 1)) 2387 return -EFAULT; 2388 dst += 2; 2389 size -= 8; 2390 count += 8; 2391 if (size < chs_bytes) 2392 return -ENOMEM; 2393 size -= chs_bytes; 2394 count += chs_bytes; 2395 for (c = 0; c < map->channels; c++) { 2396 if (put_user(map->map[c], dst)) 2397 return -EFAULT; 2398 dst++; 2399 } 2400 } 2401 if (put_user(count, tlv + 1)) 2402 return -EFAULT; 2403 return 0; 2404 } 2405 2406 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol) 2407 { 2408 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); 2409 info->pcm->streams[info->stream].chmap_kctl = NULL; 2410 kfree(info); 2411 } 2412 2413 /** 2414 * snd_pcm_add_chmap_ctls - create channel-mapping control elements 2415 * @pcm: the assigned PCM instance 2416 * @stream: stream direction 2417 * @chmap: channel map elements (for query) 2418 * @max_channels: the max number of channels for the stream 2419 * @private_value: the value passed to each kcontrol's private_value field 2420 * @info_ret: store struct snd_pcm_chmap instance if non-NULL 2421 * 2422 * Create channel-mapping control elements assigned to the given PCM stream(s). 2423 * Return: Zero if successful, or a negative error value. 2424 */ 2425 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, 2426 const struct snd_pcm_chmap_elem *chmap, 2427 int max_channels, 2428 unsigned long private_value, 2429 struct snd_pcm_chmap **info_ret) 2430 { 2431 struct snd_pcm_chmap *info; 2432 struct snd_kcontrol_new knew = { 2433 .iface = SNDRV_CTL_ELEM_IFACE_PCM, 2434 .access = SNDRV_CTL_ELEM_ACCESS_READ | 2435 SNDRV_CTL_ELEM_ACCESS_TLV_READ | 2436 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, 2437 .info = pcm_chmap_ctl_info, 2438 .get = pcm_chmap_ctl_get, 2439 .tlv.c = pcm_chmap_ctl_tlv, 2440 }; 2441 int err; 2442 2443 if (WARN_ON(pcm->streams[stream].chmap_kctl)) 2444 return -EBUSY; 2445 info = kzalloc(sizeof(*info), GFP_KERNEL); 2446 if (!info) 2447 return -ENOMEM; 2448 info->pcm = pcm; 2449 info->stream = stream; 2450 info->chmap = chmap; 2451 info->max_channels = max_channels; 2452 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 2453 knew.name = "Playback Channel Map"; 2454 else 2455 knew.name = "Capture Channel Map"; 2456 knew.device = pcm->device; 2457 knew.count = pcm->streams[stream].substream_count; 2458 knew.private_value = private_value; 2459 info->kctl = snd_ctl_new1(&knew, info); 2460 if (!info->kctl) { 2461 kfree(info); 2462 return -ENOMEM; 2463 } 2464 info->kctl->private_free = pcm_chmap_ctl_private_free; 2465 err = snd_ctl_add(pcm->card, info->kctl); 2466 if (err < 0) 2467 return err; 2468 pcm->streams[stream].chmap_kctl = info->kctl; 2469 if (info_ret) 2470 *info_ret = info; 2471 return 0; 2472 } 2473 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls); 2474