1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * ALSA sequencer Timing queue handling 4 * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl> 5 * 6 * MAJOR CHANGES 7 * Nov. 13, 1999 Takashi Iwai <iwai@ww.uni-erlangen.de> 8 * - Queues are allocated dynamically via ioctl. 9 * - When owner client is deleted, all owned queues are deleted, too. 10 * - Owner of unlocked queue is kept unmodified even if it is 11 * manipulated by other clients. 12 * - Owner field in SET_QUEUE_OWNER ioctl must be identical with the 13 * caller client. i.e. Changing owner to a third client is not 14 * allowed. 15 * 16 * Aug. 30, 2000 Takashi Iwai 17 * - Queues are managed in static array again, but with better way. 18 * The API itself is identical. 19 * - The queue is locked when struct snd_seq_queue pointer is returned via 20 * queueptr(). This pointer *MUST* be released afterward by 21 * queuefree(ptr). 22 * - Addition of experimental sync support. 23 */ 24 25 #include <linux/init.h> 26 #include <linux/slab.h> 27 #include <sound/core.h> 28 29 #include "seq_memory.h" 30 #include "seq_queue.h" 31 #include "seq_clientmgr.h" 32 #include "seq_fifo.h" 33 #include "seq_timer.h" 34 #include "seq_info.h" 35 36 /* list of allocated queues */ 37 static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES]; 38 static DEFINE_SPINLOCK(queue_list_lock); 39 /* number of queues allocated */ 40 static int num_queues; 41 42 int snd_seq_queue_get_cur_queues(void) 43 { 44 return num_queues; 45 } 46 47 /*----------------------------------------------------------------*/ 48 49 /* assign queue id and insert to list */ 50 static int queue_list_add(struct snd_seq_queue *q) 51 { 52 int i; 53 54 guard(spinlock_irqsave)(&queue_list_lock); 55 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 56 if (! queue_list[i]) { 57 queue_list[i] = q; 58 q->queue = i; 59 num_queues++; 60 return i; 61 } 62 } 63 return -1; 64 } 65 66 static struct snd_seq_queue *queue_list_remove(int id, int client) 67 { 68 struct snd_seq_queue *q; 69 70 guard(spinlock_irqsave)(&queue_list_lock); 71 q = queue_list[id]; 72 if (q) { 73 guard(spinlock)(&q->owner_lock); 74 if (q->owner == client) { 75 /* found */ 76 q->klocked = 1; 77 queue_list[id] = NULL; 78 num_queues--; 79 return q; 80 } 81 } 82 return NULL; 83 } 84 85 /*----------------------------------------------------------------*/ 86 87 /* create new queue (constructor) */ 88 static struct snd_seq_queue *queue_new(int owner, int locked) 89 { 90 struct snd_seq_queue *q; 91 92 q = kzalloc(sizeof(*q), GFP_KERNEL); 93 if (!q) 94 return NULL; 95 96 spin_lock_init(&q->owner_lock); 97 spin_lock_init(&q->check_lock); 98 mutex_init(&q->timer_mutex); 99 snd_use_lock_init(&q->use_lock); 100 q->queue = -1; 101 102 q->tickq = snd_seq_prioq_new(); 103 q->timeq = snd_seq_prioq_new(); 104 q->timer = snd_seq_timer_new(); 105 if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) { 106 snd_seq_prioq_delete(&q->tickq); 107 snd_seq_prioq_delete(&q->timeq); 108 snd_seq_timer_delete(&q->timer); 109 kfree(q); 110 return NULL; 111 } 112 113 q->owner = owner; 114 q->locked = locked; 115 q->klocked = 0; 116 117 return q; 118 } 119 120 /* delete queue (destructor) */ 121 static void queue_delete(struct snd_seq_queue *q) 122 { 123 /* stop and release the timer */ 124 mutex_lock(&q->timer_mutex); 125 snd_seq_timer_stop(q->timer); 126 snd_seq_timer_close(q); 127 mutex_unlock(&q->timer_mutex); 128 /* wait until access free */ 129 snd_use_lock_sync(&q->use_lock); 130 /* release resources... */ 131 snd_seq_prioq_delete(&q->tickq); 132 snd_seq_prioq_delete(&q->timeq); 133 snd_seq_timer_delete(&q->timer); 134 135 kfree(q); 136 } 137 138 139 /*----------------------------------------------------------------*/ 140 141 /* delete all existing queues */ 142 void snd_seq_queues_delete(void) 143 { 144 int i; 145 146 /* clear list */ 147 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 148 if (queue_list[i]) 149 queue_delete(queue_list[i]); 150 } 151 } 152 153 static void queue_use(struct snd_seq_queue *queue, int client, int use); 154 155 /* allocate a new queue - 156 * return pointer to new queue or ERR_PTR(-errno) for error 157 * The new queue's use_lock is set to 1. It is the caller's responsibility to 158 * call snd_use_lock_free(&q->use_lock). 159 */ 160 struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) 161 { 162 struct snd_seq_queue *q; 163 164 q = queue_new(client, locked); 165 if (q == NULL) 166 return ERR_PTR(-ENOMEM); 167 q->info_flags = info_flags; 168 queue_use(q, client, 1); 169 snd_use_lock_use(&q->use_lock); 170 if (queue_list_add(q) < 0) { 171 snd_use_lock_free(&q->use_lock); 172 queue_delete(q); 173 return ERR_PTR(-ENOMEM); 174 } 175 return q; 176 } 177 178 /* delete a queue - queue must be owned by the client */ 179 int snd_seq_queue_delete(int client, int queueid) 180 { 181 struct snd_seq_queue *q; 182 183 if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES) 184 return -EINVAL; 185 q = queue_list_remove(queueid, client); 186 if (q == NULL) 187 return -EINVAL; 188 queue_delete(q); 189 190 return 0; 191 } 192 193 194 /* return pointer to queue structure for specified id */ 195 struct snd_seq_queue *queueptr(int queueid) 196 { 197 struct snd_seq_queue *q; 198 199 if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES) 200 return NULL; 201 guard(spinlock_irqsave)(&queue_list_lock); 202 q = queue_list[queueid]; 203 if (q) 204 snd_use_lock_use(&q->use_lock); 205 return q; 206 } 207 208 /* return the (first) queue matching with the specified name */ 209 struct snd_seq_queue *snd_seq_queue_find_name(char *name) 210 { 211 int i; 212 213 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 214 struct snd_seq_queue *q __free(snd_seq_queue) = 215 queueptr(i); 216 217 if (q) { 218 if (strncmp(q->name, name, sizeof(q->name)) == 0) 219 return no_free_ptr(q); 220 } 221 } 222 return NULL; 223 } 224 225 226 /* -------------------------------------------------------- */ 227 228 #define MAX_CELL_PROCESSES_IN_QUEUE 1000 229 230 void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) 231 { 232 struct snd_seq_event_cell *cell; 233 snd_seq_tick_time_t cur_tick; 234 snd_seq_real_time_t cur_time; 235 int processed = 0; 236 237 if (q == NULL) 238 return; 239 240 /* make this function non-reentrant */ 241 scoped_guard(spinlock_irqsave, &q->check_lock) { 242 if (q->check_blocked) { 243 q->check_again = 1; 244 return; /* other thread is already checking queues */ 245 } 246 q->check_blocked = 1; 247 } 248 249 __again: 250 /* Process tick queue... */ 251 cur_tick = snd_seq_timer_get_cur_tick(q->timer); 252 for (;;) { 253 cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick); 254 if (!cell) 255 break; 256 snd_seq_dispatch_event(cell, atomic, hop); 257 if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) 258 goto out; /* the rest processed at the next batch */ 259 } 260 261 /* Process time queue... */ 262 cur_time = snd_seq_timer_get_cur_time(q->timer, false); 263 for (;;) { 264 cell = snd_seq_prioq_cell_out(q->timeq, &cur_time); 265 if (!cell) 266 break; 267 snd_seq_dispatch_event(cell, atomic, hop); 268 if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) 269 goto out; /* the rest processed at the next batch */ 270 } 271 272 out: 273 /* free lock */ 274 scoped_guard(spinlock_irqsave, &q->check_lock) { 275 if (q->check_again) { 276 q->check_again = 0; 277 if (processed < MAX_CELL_PROCESSES_IN_QUEUE) 278 goto __again; 279 } 280 q->check_blocked = 0; 281 } 282 } 283 284 285 /* enqueue a event to singe queue */ 286 int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop) 287 { 288 int dest, err; 289 290 if (snd_BUG_ON(!cell)) 291 return -EINVAL; 292 dest = cell->event.queue; /* destination queue */ 293 294 struct snd_seq_queue *q __free(snd_seq_queue) = 295 queueptr(dest); 296 if (q == NULL) 297 return -EINVAL; 298 /* handle relative time stamps, convert them into absolute */ 299 if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) { 300 switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { 301 case SNDRV_SEQ_TIME_STAMP_TICK: 302 cell->event.time.tick += q->timer->tick.cur_tick; 303 break; 304 305 case SNDRV_SEQ_TIME_STAMP_REAL: 306 snd_seq_inc_real_time(&cell->event.time.time, 307 &q->timer->cur_time); 308 break; 309 } 310 cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK; 311 cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS; 312 } 313 /* enqueue event in the real-time or midi queue */ 314 switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { 315 case SNDRV_SEQ_TIME_STAMP_TICK: 316 err = snd_seq_prioq_cell_in(q->tickq, cell); 317 break; 318 319 case SNDRV_SEQ_TIME_STAMP_REAL: 320 default: 321 err = snd_seq_prioq_cell_in(q->timeq, cell); 322 break; 323 } 324 325 if (err < 0) 326 return err; 327 328 /* trigger dispatching */ 329 snd_seq_check_queue(q, atomic, hop); 330 331 return 0; 332 } 333 334 335 /*----------------------------------------------------------------*/ 336 337 static inline int check_access(struct snd_seq_queue *q, int client) 338 { 339 return (q->owner == client) || (!q->locked && !q->klocked); 340 } 341 342 /* check if the client has permission to modify queue parameters. 343 * if it does, lock the queue 344 */ 345 static int queue_access_lock(struct snd_seq_queue *q, int client) 346 { 347 int access_ok; 348 349 guard(spinlock_irqsave)(&q->owner_lock); 350 access_ok = check_access(q, client); 351 if (access_ok) 352 q->klocked = 1; 353 return access_ok; 354 } 355 356 /* unlock the queue */ 357 static inline void queue_access_unlock(struct snd_seq_queue *q) 358 { 359 guard(spinlock_irqsave)(&q->owner_lock); 360 q->klocked = 0; 361 } 362 363 /* exported - only checking permission */ 364 int snd_seq_queue_check_access(int queueid, int client) 365 { 366 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid); 367 368 if (! q) 369 return 0; 370 guard(spinlock_irqsave)(&q->owner_lock); 371 return check_access(q, client); 372 } 373 374 /*----------------------------------------------------------------*/ 375 376 /* 377 * change queue's owner and permission 378 */ 379 int snd_seq_queue_set_owner(int queueid, int client, int locked) 380 { 381 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid); 382 383 if (q == NULL) 384 return -EINVAL; 385 386 if (!queue_access_lock(q, client)) 387 return -EPERM; 388 389 scoped_guard(spinlock_irqsave, &q->owner_lock) { 390 q->locked = locked ? 1 : 0; 391 q->owner = client; 392 } 393 queue_access_unlock(q); 394 395 return 0; 396 } 397 398 399 /*----------------------------------------------------------------*/ 400 401 /* open timer - 402 * q->use mutex should be down before calling this function to avoid 403 * confliction with snd_seq_queue_use() 404 */ 405 int snd_seq_queue_timer_open(int queueid) 406 { 407 int result = 0; 408 struct snd_seq_timer *tmr; 409 struct snd_seq_queue *queue __free(snd_seq_queue) = 410 queueptr(queueid); 411 412 if (queue == NULL) 413 return -EINVAL; 414 tmr = queue->timer; 415 result = snd_seq_timer_open(queue); 416 if (result < 0) { 417 snd_seq_timer_defaults(tmr); 418 result = snd_seq_timer_open(queue); 419 } 420 return result; 421 } 422 423 /* close timer - 424 * q->use mutex should be down before calling this function 425 */ 426 int snd_seq_queue_timer_close(int queueid) 427 { 428 int result = 0; 429 struct snd_seq_queue *queue __free(snd_seq_queue) = 430 queueptr(queueid); 431 432 if (queue == NULL) 433 return -EINVAL; 434 snd_seq_timer_close(queue); 435 return result; 436 } 437 438 /* change queue tempo and ppq */ 439 int snd_seq_queue_timer_set_tempo(int queueid, int client, 440 struct snd_seq_queue_tempo *info) 441 { 442 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid); 443 int result; 444 445 if (q == NULL) 446 return -EINVAL; 447 if (!queue_access_lock(q, client)) 448 return -EPERM; 449 450 result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq, 451 info->tempo_base); 452 if (result >= 0 && info->skew_base > 0) 453 result = snd_seq_timer_set_skew(q->timer, info->skew_value, 454 info->skew_base); 455 queue_access_unlock(q); 456 return result; 457 } 458 459 /* use or unuse this queue */ 460 static void queue_use(struct snd_seq_queue *queue, int client, int use) 461 { 462 if (use) { 463 if (!test_and_set_bit(client, queue->clients_bitmap)) 464 queue->clients++; 465 } else { 466 if (test_and_clear_bit(client, queue->clients_bitmap)) 467 queue->clients--; 468 } 469 if (queue->clients) { 470 if (use && queue->clients == 1) 471 snd_seq_timer_defaults(queue->timer); 472 snd_seq_timer_open(queue); 473 } else { 474 snd_seq_timer_close(queue); 475 } 476 } 477 478 /* use or unuse this queue - 479 * if it is the first client, starts the timer. 480 * if it is not longer used by any clients, stop the timer. 481 */ 482 int snd_seq_queue_use(int queueid, int client, int use) 483 { 484 struct snd_seq_queue *queue __free(snd_seq_queue) = 485 queueptr(queueid); 486 487 if (queue == NULL) 488 return -EINVAL; 489 guard(mutex)(&queue->timer_mutex); 490 queue_use(queue, client, use); 491 return 0; 492 } 493 494 /* 495 * check if queue is used by the client 496 * return negative value if the queue is invalid. 497 * return 0 if not used, 1 if used. 498 */ 499 int snd_seq_queue_is_used(int queueid, int client) 500 { 501 struct snd_seq_queue *q __free(snd_seq_queue) = 502 queueptr(queueid); 503 504 if (q == NULL) 505 return -EINVAL; /* invalid queue */ 506 return test_bit(client, q->clients_bitmap) ? 1 : 0; 507 } 508 509 510 /*----------------------------------------------------------------*/ 511 512 /* final stage notification - 513 * remove cells for no longer exist client (for non-owned queue) 514 * or delete this queue (for owned queue) 515 */ 516 void snd_seq_queue_client_leave(int client) 517 { 518 int i; 519 520 /* delete own queues from queue list */ 521 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 522 struct snd_seq_queue *q = queue_list_remove(i, client); 523 if (q) 524 queue_delete(q); 525 } 526 527 /* remove cells from existing queues - 528 * they are not owned by this client 529 */ 530 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 531 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i); 532 if (!q) 533 continue; 534 if (test_bit(client, q->clients_bitmap)) { 535 snd_seq_prioq_leave(q->tickq, client, 0); 536 snd_seq_prioq_leave(q->timeq, client, 0); 537 snd_seq_queue_use(q->queue, client, 0); 538 } 539 } 540 } 541 542 543 544 /*----------------------------------------------------------------*/ 545 546 /* remove cells based on flush criteria */ 547 void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info) 548 { 549 int i; 550 551 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 552 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i); 553 if (!q) 554 continue; 555 if (test_bit(client, q->clients_bitmap) && 556 (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) || 557 q->queue == info->queue)) { 558 snd_seq_prioq_remove_events(q->tickq, client, info); 559 snd_seq_prioq_remove_events(q->timeq, client, info); 560 } 561 } 562 } 563 564 /*----------------------------------------------------------------*/ 565 566 /* 567 * send events to all subscribed ports 568 */ 569 static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev, 570 int atomic, int hop) 571 { 572 struct snd_seq_event sev; 573 574 sev = *ev; 575 576 sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS; 577 sev.time.tick = q->timer->tick.cur_tick; 578 sev.queue = q->queue; 579 sev.data.queue.queue = q->queue; 580 581 /* broadcast events from Timer port */ 582 sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM; 583 sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER; 584 sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; 585 snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop); 586 } 587 588 /* 589 * process a received queue-control event. 590 * this function is exported for seq_sync.c. 591 */ 592 static void snd_seq_queue_process_event(struct snd_seq_queue *q, 593 struct snd_seq_event *ev, 594 int atomic, int hop) 595 { 596 switch (ev->type) { 597 case SNDRV_SEQ_EVENT_START: 598 snd_seq_prioq_leave(q->tickq, ev->source.client, 1); 599 snd_seq_prioq_leave(q->timeq, ev->source.client, 1); 600 if (! snd_seq_timer_start(q->timer)) 601 queue_broadcast_event(q, ev, atomic, hop); 602 break; 603 604 case SNDRV_SEQ_EVENT_CONTINUE: 605 if (! snd_seq_timer_continue(q->timer)) 606 queue_broadcast_event(q, ev, atomic, hop); 607 break; 608 609 case SNDRV_SEQ_EVENT_STOP: 610 snd_seq_timer_stop(q->timer); 611 queue_broadcast_event(q, ev, atomic, hop); 612 break; 613 614 case SNDRV_SEQ_EVENT_TEMPO: 615 snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value); 616 queue_broadcast_event(q, ev, atomic, hop); 617 break; 618 619 case SNDRV_SEQ_EVENT_SETPOS_TICK: 620 if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) { 621 queue_broadcast_event(q, ev, atomic, hop); 622 } 623 break; 624 625 case SNDRV_SEQ_EVENT_SETPOS_TIME: 626 if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) { 627 queue_broadcast_event(q, ev, atomic, hop); 628 } 629 break; 630 case SNDRV_SEQ_EVENT_QUEUE_SKEW: 631 if (snd_seq_timer_set_skew(q->timer, 632 ev->data.queue.param.skew.value, 633 ev->data.queue.param.skew.base) == 0) { 634 queue_broadcast_event(q, ev, atomic, hop); 635 } 636 break; 637 } 638 } 639 640 641 /* 642 * Queue control via timer control port: 643 * this function is exported as a callback of timer port. 644 */ 645 int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop) 646 { 647 if (snd_BUG_ON(!ev)) 648 return -EINVAL; 649 650 struct snd_seq_queue *q __free(snd_seq_queue) = 651 queueptr(ev->data.queue.queue); 652 653 if (q == NULL) 654 return -EINVAL; 655 656 if (!queue_access_lock(q, ev->source.client)) 657 return -EPERM; 658 659 snd_seq_queue_process_event(q, ev, atomic, hop); 660 661 queue_access_unlock(q); 662 return 0; 663 } 664 665 666 /*----------------------------------------------------------------*/ 667 668 #ifdef CONFIG_SND_PROC_FS 669 /* exported to seq_info.c */ 670 void snd_seq_info_queues_read(struct snd_info_entry *entry, 671 struct snd_info_buffer *buffer) 672 { 673 int i, bpm; 674 struct snd_seq_timer *tmr; 675 bool locked; 676 int owner; 677 678 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { 679 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i); 680 if (!q) 681 continue; 682 683 tmr = q->timer; 684 if (tmr->tempo) 685 bpm = (60000 * tmr->tempo_base) / tmr->tempo; 686 else 687 bpm = 0; 688 689 scoped_guard(spinlock_irq, &q->owner_lock) { 690 locked = q->locked; 691 owner = q->owner; 692 } 693 694 snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name); 695 snd_iprintf(buffer, "owned by client : %d\n", owner); 696 snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free"); 697 snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq)); 698 snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq)); 699 snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped"); 700 snd_iprintf(buffer, "timer PPQ : %d\n", tmr->ppq); 701 snd_iprintf(buffer, "current tempo : %d\n", tmr->tempo); 702 snd_iprintf(buffer, "tempo base : %d ns\n", tmr->tempo_base); 703 snd_iprintf(buffer, "current BPM : %d\n", bpm); 704 snd_iprintf(buffer, "current time : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec); 705 snd_iprintf(buffer, "current tick : %d\n", tmr->tick.cur_tick); 706 snd_iprintf(buffer, "\n"); 707 } 708 } 709 #endif /* CONFIG_SND_PROC_FS */ 710 711