1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * ALSA sequencer Timing queue handling 4 * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl> 5 * 6 * MAJOR CHANGES 7 * Nov. 13, 1999 Takashi Iwai <iwai@ww.uni-erlangen.de> 8 * - Queues are allocated dynamically via ioctl. 9 * - When owner client is deleted, all owned queues are deleted, too. 10 * - Owner of unlocked queue is kept unmodified even if it is 11 * manipulated by other clients. 12 * - Owner field in SET_QUEUE_OWNER ioctl must be identical with the 13 * caller client. i.e. Changing owner to a third client is not 14 * allowed. 15 * 16 * Aug. 30, 2000 Takashi Iwai 17 * - Queues are managed in static array again, but with better way. 18 * The API itself is identical. 19 * - The queue is locked when struct snd_seq_queue pointer is returned via 20 * queueptr(). This pointer *MUST* be released afterward by 21 * queuefree(ptr). 22 * - Addition of experimental sync support. 23 */ 24 25 #include <linux/init.h> 26 #include <linux/slab.h> 27 #include <sound/core.h> 28 29 #include "seq_memory.h" 30 #include "seq_queue.h" 31 #include "seq_clientmgr.h" 32 #include "seq_fifo.h" 33 #include "seq_timer.h" 34 #include "seq_info.h" 35 36 /* list of allocated queues */ 37 static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES]; 38 static DEFINE_SPINLOCK(queue_list_lock); 39 /* number of queues allocated */ 40 static int num_queues; 41 42 int snd_seq_queue_get_cur_queues(void) 43 { 44 return num_queues; 45 } 46 47 /*----------------------------------------------------------------*/ 48 49 /* assign queue id and insert to list */ 50 static int queue_list_add(struct snd_seq_queue *q) 51 { 52 int i; 53 54 guard(spinlock_irqsave)(&queue_list_lock); 55 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 56 if (! queue_list[i]) { 57 queue_list[i] = q; 58 q->queue = i; 59 num_queues++; 60 return i; 61 } 62 } 63 return -1; 64 } 65 66 static struct snd_seq_queue *queue_list_remove(int id, int client) 67 { 68 struct snd_seq_queue *q; 69 70 guard(spinlock_irqsave)(&queue_list_lock); 71 q = queue_list[id]; 72 if (q) { 73 guard(spinlock)(&q->owner_lock); 74 if (q->owner == client) { 75 /* found */ 76 q->klocked = 1; 77 queue_list[id] = NULL; 78 num_queues--; 79 return q; 80 } 81 } 82 return NULL; 83 } 84 85 /*----------------------------------------------------------------*/ 86 87 /* create new queue (constructor) */ 88 static struct snd_seq_queue *queue_new(int owner, int locked) 89 { 90 struct snd_seq_queue *q; 91 92 q = kzalloc(sizeof(*q), GFP_KERNEL); 93 if (!q) 94 return NULL; 95 96 spin_lock_init(&q->owner_lock); 97 spin_lock_init(&q->check_lock); 98 mutex_init(&q->timer_mutex); 99 snd_use_lock_init(&q->use_lock); 100 q->queue = -1; 101 102 q->tickq = snd_seq_prioq_new(); 103 q->timeq = snd_seq_prioq_new(); 104 q->timer = snd_seq_timer_new(); 105 if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) { 106 snd_seq_prioq_delete(&q->tickq); 107 snd_seq_prioq_delete(&q->timeq); 108 snd_seq_timer_delete(&q->timer); 109 kfree(q); 110 return NULL; 111 } 112 113 q->owner = owner; 114 q->locked = locked; 115 q->klocked = 0; 116 117 return q; 118 } 119 120 /* delete queue (destructor) */ 121 static void queue_delete(struct snd_seq_queue *q) 122 { 123 /* stop and release the timer */ 124 mutex_lock(&q->timer_mutex); 125 snd_seq_timer_stop(q->timer); 126 snd_seq_timer_close(q); 127 mutex_unlock(&q->timer_mutex); 128 /* wait until access free */ 129 snd_use_lock_sync(&q->use_lock); 130 /* release resources... */ 131 snd_seq_prioq_delete(&q->tickq); 132 snd_seq_prioq_delete(&q->timeq); 133 snd_seq_timer_delete(&q->timer); 134 135 kfree(q); 136 } 137 138 139 /*----------------------------------------------------------------*/ 140 141 /* delete all existing queues */ 142 void snd_seq_queues_delete(void) 143 { 144 int i; 145 146 /* clear list */ 147 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 148 if (queue_list[i]) 149 queue_delete(queue_list[i]); 150 } 151 } 152 153 static void queue_use(struct snd_seq_queue *queue, int client, int use); 154 155 /* allocate a new queue - 156 * return pointer to new queue or ERR_PTR(-errno) for error 157 * The new queue's use_lock is set to 1. It is the caller's responsibility to 158 * call snd_use_lock_free(&q->use_lock). 159 */ 160 struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) 161 { 162 struct snd_seq_queue *q; 163 164 q = queue_new(client, locked); 165 if (q == NULL) 166 return ERR_PTR(-ENOMEM); 167 q->info_flags = info_flags; 168 queue_use(q, client, 1); 169 snd_use_lock_use(&q->use_lock); 170 if (queue_list_add(q) < 0) { 171 snd_use_lock_free(&q->use_lock); 172 queue_delete(q); 173 return ERR_PTR(-ENOMEM); 174 } 175 return q; 176 } 177 178 /* delete a queue - queue must be owned by the client */ 179 int snd_seq_queue_delete(int client, int queueid) 180 { 181 struct snd_seq_queue *q; 182 183 if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES) 184 return -EINVAL; 185 q = queue_list_remove(queueid, client); 186 if (q == NULL) 187 return -EINVAL; 188 queue_delete(q); 189 190 return 0; 191 } 192 193 194 /* return pointer to queue structure for specified id */ 195 struct snd_seq_queue *queueptr(int queueid) 196 { 197 struct snd_seq_queue *q; 198 199 if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES) 200 return NULL; 201 guard(spinlock_irqsave)(&queue_list_lock); 202 q = queue_list[queueid]; 203 if (q) 204 snd_use_lock_use(&q->use_lock); 205 return q; 206 } 207 208 /* return the (first) queue matching with the specified name */ 209 struct snd_seq_queue *snd_seq_queue_find_name(char *name) 210 { 211 int i; 212 struct snd_seq_queue *q; 213 214 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 215 q = queueptr(i); 216 if (q) { 217 if (strncmp(q->name, name, sizeof(q->name)) == 0) 218 return q; 219 queuefree(q); 220 } 221 } 222 return NULL; 223 } 224 225 226 /* -------------------------------------------------------- */ 227 228 #define MAX_CELL_PROCESSES_IN_QUEUE 1000 229 230 void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) 231 { 232 struct snd_seq_event_cell *cell; 233 snd_seq_tick_time_t cur_tick; 234 snd_seq_real_time_t cur_time; 235 int processed = 0; 236 237 if (q == NULL) 238 return; 239 240 /* make this function non-reentrant */ 241 scoped_guard(spinlock_irqsave, &q->check_lock) { 242 if (q->check_blocked) { 243 q->check_again = 1; 244 return; /* other thread is already checking queues */ 245 } 246 q->check_blocked = 1; 247 } 248 249 __again: 250 /* Process tick queue... */ 251 cur_tick = snd_seq_timer_get_cur_tick(q->timer); 252 for (;;) { 253 cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick); 254 if (!cell) 255 break; 256 snd_seq_dispatch_event(cell, atomic, hop); 257 if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) 258 goto out; /* the rest processed at the next batch */ 259 } 260 261 /* Process time queue... */ 262 cur_time = snd_seq_timer_get_cur_time(q->timer, false); 263 for (;;) { 264 cell = snd_seq_prioq_cell_out(q->timeq, &cur_time); 265 if (!cell) 266 break; 267 snd_seq_dispatch_event(cell, atomic, hop); 268 if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) 269 goto out; /* the rest processed at the next batch */ 270 } 271 272 out: 273 /* free lock */ 274 scoped_guard(spinlock_irqsave, &q->check_lock) { 275 if (q->check_again) { 276 q->check_again = 0; 277 if (processed < MAX_CELL_PROCESSES_IN_QUEUE) 278 goto __again; 279 } 280 q->check_blocked = 0; 281 } 282 } 283 284 285 /* enqueue a event to singe queue */ 286 int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop) 287 { 288 int dest, err; 289 struct snd_seq_queue *q; 290 291 if (snd_BUG_ON(!cell)) 292 return -EINVAL; 293 dest = cell->event.queue; /* destination queue */ 294 q = queueptr(dest); 295 if (q == NULL) 296 return -EINVAL; 297 /* handle relative time stamps, convert them into absolute */ 298 if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) { 299 switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { 300 case SNDRV_SEQ_TIME_STAMP_TICK: 301 cell->event.time.tick += q->timer->tick.cur_tick; 302 break; 303 304 case SNDRV_SEQ_TIME_STAMP_REAL: 305 snd_seq_inc_real_time(&cell->event.time.time, 306 &q->timer->cur_time); 307 break; 308 } 309 cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK; 310 cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS; 311 } 312 /* enqueue event in the real-time or midi queue */ 313 switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { 314 case SNDRV_SEQ_TIME_STAMP_TICK: 315 err = snd_seq_prioq_cell_in(q->tickq, cell); 316 break; 317 318 case SNDRV_SEQ_TIME_STAMP_REAL: 319 default: 320 err = snd_seq_prioq_cell_in(q->timeq, cell); 321 break; 322 } 323 324 if (err < 0) { 325 queuefree(q); /* unlock */ 326 return err; 327 } 328 329 /* trigger dispatching */ 330 snd_seq_check_queue(q, atomic, hop); 331 332 queuefree(q); /* unlock */ 333 334 return 0; 335 } 336 337 338 /*----------------------------------------------------------------*/ 339 340 static inline int check_access(struct snd_seq_queue *q, int client) 341 { 342 return (q->owner == client) || (!q->locked && !q->klocked); 343 } 344 345 /* check if the client has permission to modify queue parameters. 346 * if it does, lock the queue 347 */ 348 static int queue_access_lock(struct snd_seq_queue *q, int client) 349 { 350 int access_ok; 351 352 guard(spinlock_irqsave)(&q->owner_lock); 353 access_ok = check_access(q, client); 354 if (access_ok) 355 q->klocked = 1; 356 return access_ok; 357 } 358 359 /* unlock the queue */ 360 static inline void queue_access_unlock(struct snd_seq_queue *q) 361 { 362 guard(spinlock_irqsave)(&q->owner_lock); 363 q->klocked = 0; 364 } 365 366 /* exported - only checking permission */ 367 int snd_seq_queue_check_access(int queueid, int client) 368 { 369 struct snd_seq_queue *q = queueptr(queueid); 370 int access_ok; 371 372 if (! q) 373 return 0; 374 scoped_guard(spinlock_irqsave, &q->owner_lock) 375 access_ok = check_access(q, client); 376 queuefree(q); 377 return access_ok; 378 } 379 380 /*----------------------------------------------------------------*/ 381 382 /* 383 * change queue's owner and permission 384 */ 385 int snd_seq_queue_set_owner(int queueid, int client, int locked) 386 { 387 struct snd_seq_queue *q = queueptr(queueid); 388 389 if (q == NULL) 390 return -EINVAL; 391 392 if (! queue_access_lock(q, client)) { 393 queuefree(q); 394 return -EPERM; 395 } 396 397 scoped_guard(spinlock_irqsave, &q->owner_lock) { 398 q->locked = locked ? 1 : 0; 399 q->owner = client; 400 } 401 queue_access_unlock(q); 402 queuefree(q); 403 404 return 0; 405 } 406 407 408 /*----------------------------------------------------------------*/ 409 410 /* open timer - 411 * q->use mutex should be down before calling this function to avoid 412 * confliction with snd_seq_queue_use() 413 */ 414 int snd_seq_queue_timer_open(int queueid) 415 { 416 int result = 0; 417 struct snd_seq_queue *queue; 418 struct snd_seq_timer *tmr; 419 420 queue = queueptr(queueid); 421 if (queue == NULL) 422 return -EINVAL; 423 tmr = queue->timer; 424 result = snd_seq_timer_open(queue); 425 if (result < 0) { 426 snd_seq_timer_defaults(tmr); 427 result = snd_seq_timer_open(queue); 428 } 429 queuefree(queue); 430 return result; 431 } 432 433 /* close timer - 434 * q->use mutex should be down before calling this function 435 */ 436 int snd_seq_queue_timer_close(int queueid) 437 { 438 struct snd_seq_queue *queue; 439 int result = 0; 440 441 queue = queueptr(queueid); 442 if (queue == NULL) 443 return -EINVAL; 444 snd_seq_timer_close(queue); 445 queuefree(queue); 446 return result; 447 } 448 449 /* change queue tempo and ppq */ 450 int snd_seq_queue_timer_set_tempo(int queueid, int client, 451 struct snd_seq_queue_tempo *info) 452 { 453 struct snd_seq_queue *q = queueptr(queueid); 454 int result; 455 456 if (q == NULL) 457 return -EINVAL; 458 if (! queue_access_lock(q, client)) { 459 queuefree(q); 460 return -EPERM; 461 } 462 463 result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq); 464 if (result >= 0 && info->skew_base > 0) 465 result = snd_seq_timer_set_skew(q->timer, info->skew_value, 466 info->skew_base); 467 queue_access_unlock(q); 468 queuefree(q); 469 return result; 470 } 471 472 /* use or unuse this queue */ 473 static void queue_use(struct snd_seq_queue *queue, int client, int use) 474 { 475 if (use) { 476 if (!test_and_set_bit(client, queue->clients_bitmap)) 477 queue->clients++; 478 } else { 479 if (test_and_clear_bit(client, queue->clients_bitmap)) 480 queue->clients--; 481 } 482 if (queue->clients) { 483 if (use && queue->clients == 1) 484 snd_seq_timer_defaults(queue->timer); 485 snd_seq_timer_open(queue); 486 } else { 487 snd_seq_timer_close(queue); 488 } 489 } 490 491 /* use or unuse this queue - 492 * if it is the first client, starts the timer. 493 * if it is not longer used by any clients, stop the timer. 494 */ 495 int snd_seq_queue_use(int queueid, int client, int use) 496 { 497 struct snd_seq_queue *queue; 498 499 queue = queueptr(queueid); 500 if (queue == NULL) 501 return -EINVAL; 502 mutex_lock(&queue->timer_mutex); 503 queue_use(queue, client, use); 504 mutex_unlock(&queue->timer_mutex); 505 queuefree(queue); 506 return 0; 507 } 508 509 /* 510 * check if queue is used by the client 511 * return negative value if the queue is invalid. 512 * return 0 if not used, 1 if used. 513 */ 514 int snd_seq_queue_is_used(int queueid, int client) 515 { 516 struct snd_seq_queue *q; 517 int result; 518 519 q = queueptr(queueid); 520 if (q == NULL) 521 return -EINVAL; /* invalid queue */ 522 result = test_bit(client, q->clients_bitmap) ? 1 : 0; 523 queuefree(q); 524 return result; 525 } 526 527 528 /*----------------------------------------------------------------*/ 529 530 /* final stage notification - 531 * remove cells for no longer exist client (for non-owned queue) 532 * or delete this queue (for owned queue) 533 */ 534 void snd_seq_queue_client_leave(int client) 535 { 536 int i; 537 struct snd_seq_queue *q; 538 539 /* delete own queues from queue list */ 540 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 541 q = queue_list_remove(i, client); 542 if (q) 543 queue_delete(q); 544 } 545 546 /* remove cells from existing queues - 547 * they are not owned by this client 548 */ 549 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 550 q = queueptr(i); 551 if (!q) 552 continue; 553 if (test_bit(client, q->clients_bitmap)) { 554 snd_seq_prioq_leave(q->tickq, client, 0); 555 snd_seq_prioq_leave(q->timeq, client, 0); 556 snd_seq_queue_use(q->queue, client, 0); 557 } 558 queuefree(q); 559 } 560 } 561 562 563 564 /*----------------------------------------------------------------*/ 565 566 /* remove cells from all queues */ 567 void snd_seq_queue_client_leave_cells(int client) 568 { 569 int i; 570 struct snd_seq_queue *q; 571 572 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 573 q = queueptr(i); 574 if (!q) 575 continue; 576 snd_seq_prioq_leave(q->tickq, client, 0); 577 snd_seq_prioq_leave(q->timeq, client, 0); 578 queuefree(q); 579 } 580 } 581 582 /* remove cells based on flush criteria */ 583 void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info) 584 { 585 int i; 586 struct snd_seq_queue *q; 587 588 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 589 q = queueptr(i); 590 if (!q) 591 continue; 592 if (test_bit(client, q->clients_bitmap) && 593 (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) || 594 q->queue == info->queue)) { 595 snd_seq_prioq_remove_events(q->tickq, client, info); 596 snd_seq_prioq_remove_events(q->timeq, client, info); 597 } 598 queuefree(q); 599 } 600 } 601 602 /*----------------------------------------------------------------*/ 603 604 /* 605 * send events to all subscribed ports 606 */ 607 static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev, 608 int atomic, int hop) 609 { 610 struct snd_seq_event sev; 611 612 sev = *ev; 613 614 sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS; 615 sev.time.tick = q->timer->tick.cur_tick; 616 sev.queue = q->queue; 617 sev.data.queue.queue = q->queue; 618 619 /* broadcast events from Timer port */ 620 sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM; 621 sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER; 622 sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; 623 snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop); 624 } 625 626 /* 627 * process a received queue-control event. 628 * this function is exported for seq_sync.c. 629 */ 630 static void snd_seq_queue_process_event(struct snd_seq_queue *q, 631 struct snd_seq_event *ev, 632 int atomic, int hop) 633 { 634 switch (ev->type) { 635 case SNDRV_SEQ_EVENT_START: 636 snd_seq_prioq_leave(q->tickq, ev->source.client, 1); 637 snd_seq_prioq_leave(q->timeq, ev->source.client, 1); 638 if (! snd_seq_timer_start(q->timer)) 639 queue_broadcast_event(q, ev, atomic, hop); 640 break; 641 642 case SNDRV_SEQ_EVENT_CONTINUE: 643 if (! snd_seq_timer_continue(q->timer)) 644 queue_broadcast_event(q, ev, atomic, hop); 645 break; 646 647 case SNDRV_SEQ_EVENT_STOP: 648 snd_seq_timer_stop(q->timer); 649 queue_broadcast_event(q, ev, atomic, hop); 650 break; 651 652 case SNDRV_SEQ_EVENT_TEMPO: 653 snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value); 654 queue_broadcast_event(q, ev, atomic, hop); 655 break; 656 657 case SNDRV_SEQ_EVENT_SETPOS_TICK: 658 if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) { 659 queue_broadcast_event(q, ev, atomic, hop); 660 } 661 break; 662 663 case SNDRV_SEQ_EVENT_SETPOS_TIME: 664 if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) { 665 queue_broadcast_event(q, ev, atomic, hop); 666 } 667 break; 668 case SNDRV_SEQ_EVENT_QUEUE_SKEW: 669 if (snd_seq_timer_set_skew(q->timer, 670 ev->data.queue.param.skew.value, 671 ev->data.queue.param.skew.base) == 0) { 672 queue_broadcast_event(q, ev, atomic, hop); 673 } 674 break; 675 } 676 } 677 678 679 /* 680 * Queue control via timer control port: 681 * this function is exported as a callback of timer port. 682 */ 683 int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop) 684 { 685 struct snd_seq_queue *q; 686 687 if (snd_BUG_ON(!ev)) 688 return -EINVAL; 689 q = queueptr(ev->data.queue.queue); 690 691 if (q == NULL) 692 return -EINVAL; 693 694 if (! queue_access_lock(q, ev->source.client)) { 695 queuefree(q); 696 return -EPERM; 697 } 698 699 snd_seq_queue_process_event(q, ev, atomic, hop); 700 701 queue_access_unlock(q); 702 queuefree(q); 703 return 0; 704 } 705 706 707 /*----------------------------------------------------------------*/ 708 709 #ifdef CONFIG_SND_PROC_FS 710 /* exported to seq_info.c */ 711 void snd_seq_info_queues_read(struct snd_info_entry *entry, 712 struct snd_info_buffer *buffer) 713 { 714 int i, bpm; 715 struct snd_seq_queue *q; 716 struct snd_seq_timer *tmr; 717 bool locked; 718 int owner; 719 720 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 721 q = queueptr(i); 722 if (!q) 723 continue; 724 725 tmr = q->timer; 726 if (tmr->tempo) 727 bpm = 60000000 / tmr->tempo; 728 else 729 bpm = 0; 730 731 scoped_guard(spinlock_irq, &q->owner_lock) { 732 locked = q->locked; 733 owner = q->owner; 734 } 735 736 snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name); 737 snd_iprintf(buffer, "owned by client : %d\n", owner); 738 snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free"); 739 snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq)); 740 snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq)); 741 snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped"); 742 snd_iprintf(buffer, "timer PPQ : %d\n", tmr->ppq); 743 snd_iprintf(buffer, "current tempo : %d\n", tmr->tempo); 744 snd_iprintf(buffer, "current BPM : %d\n", bpm); 745 snd_iprintf(buffer, "current time : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec); 746 snd_iprintf(buffer, "current tick : %d\n", tmr->tick.cur_tick); 747 snd_iprintf(buffer, "\n"); 748 queuefree(q); 749 } 750 } 751 #endif /* CONFIG_SND_PROC_FS */ 752 753