Lines Matching defs:q
50 static int queue_list_add(struct snd_seq_queue *q)
57 queue_list[i] = q;
58 q->queue = i;
68 struct snd_seq_queue *q;
71 q = queue_list[id];
72 if (q) {
73 guard(spinlock)(&q->owner_lock);
74 if (q->owner == client) {
76 q->klocked = 1;
79 return q;
90 struct snd_seq_queue *q;
92 q = kzalloc(sizeof(*q), GFP_KERNEL);
93 if (!q)
96 spin_lock_init(&q->owner_lock);
97 spin_lock_init(&q->check_lock);
98 mutex_init(&q->timer_mutex);
99 snd_use_lock_init(&q->use_lock);
100 q->queue = -1;
102 q->tickq = snd_seq_prioq_new();
103 q->timeq = snd_seq_prioq_new();
104 q->timer = snd_seq_timer_new();
105 if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
106 snd_seq_prioq_delete(&q->tickq);
107 snd_seq_prioq_delete(&q->timeq);
108 snd_seq_timer_delete(&q->timer);
109 kfree(q);
113 q->owner = owner;
114 q->locked = locked;
115 q->klocked = 0;
117 return q;
121 static void queue_delete(struct snd_seq_queue *q)
124 mutex_lock(&q->timer_mutex);
125 snd_seq_timer_stop(q->timer);
126 snd_seq_timer_close(q);
127 mutex_unlock(&q->timer_mutex);
129 snd_use_lock_sync(&q->use_lock);
131 snd_seq_prioq_delete(&q->tickq);
132 snd_seq_prioq_delete(&q->timeq);
133 snd_seq_timer_delete(&q->timer);
135 kfree(q);
158 * call snd_use_lock_free(&q->use_lock).
162 struct snd_seq_queue *q;
164 q = queue_new(client, locked);
165 if (q == NULL)
167 q->info_flags = info_flags;
168 queue_use(q, client, 1);
169 snd_use_lock_use(&q->use_lock);
170 if (queue_list_add(q) < 0) {
171 snd_use_lock_free(&q->use_lock);
172 queue_delete(q);
175 return q;
181 struct snd_seq_queue *q;
185 q = queue_list_remove(queueid, client);
186 if (q == NULL)
188 queue_delete(q);
197 struct snd_seq_queue *q;
202 q = queue_list[queueid];
203 if (q)
204 snd_use_lock_use(&q->use_lock);
205 return q;
214 struct snd_seq_queue *q __free(snd_seq_queue) = NULL;
215 q = queueptr(i);
216 if (q) {
217 if (strncmp(q->name, name, sizeof(q->name)) == 0)
218 return no_free_ptr(q);
229 void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
236 if (q == NULL)
240 scoped_guard(spinlock_irqsave, &q->check_lock) {
241 if (q->check_blocked) {
242 q->check_again = 1;
245 q->check_blocked = 1;
250 cur_tick = snd_seq_timer_get_cur_tick(q->timer);
252 cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
261 cur_time = snd_seq_timer_get_cur_time(q->timer, false);
263 cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
273 scoped_guard(spinlock_irqsave, &q->check_lock) {
274 if (q->check_again) {
275 q->check_again = 0;
279 q->check_blocked = 0;
288 struct snd_seq_queue *q __free(snd_seq_queue) = NULL;
293 q = queueptr(dest);
294 if (q == NULL)
300 cell->event.time.tick += q->timer->tick.cur_tick;
305 &q->timer->cur_time);
314 err = snd_seq_prioq_cell_in(q->tickq, cell);
319 err = snd_seq_prioq_cell_in(q->timeq, cell);
327 snd_seq_check_queue(q, atomic, hop);
335 static inline int check_access(struct snd_seq_queue *q, int client)
337 return (q->owner == client) || (!q->locked && !q->klocked);
343 static int queue_access_lock(struct snd_seq_queue *q, int client)
347 guard(spinlock_irqsave)(&q->owner_lock);
348 access_ok = check_access(q, client);
350 q->klocked = 1;
355 static inline void queue_access_unlock(struct snd_seq_queue *q)
357 guard(spinlock_irqsave)(&q->owner_lock);
358 q->klocked = 0;
364 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid);
366 if (! q)
368 guard(spinlock_irqsave)(&q->owner_lock);
369 return check_access(q, client);
379 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid);
381 if (q == NULL)
384 if (!queue_access_lock(q, client))
387 scoped_guard(spinlock_irqsave, &q->owner_lock) {
388 q->locked = locked ? 1 : 0;
389 q->owner = client;
391 queue_access_unlock(q);
400 * q->use mutex should be down before calling this function to avoid
422 * q->use mutex should be down before calling this function
440 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid);
443 if (q == NULL)
445 if (!queue_access_lock(q, client))
448 result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq,
451 result = snd_seq_timer_set_skew(q->timer, info->skew_value,
453 queue_access_unlock(q);
499 struct snd_seq_queue *q __free(snd_seq_queue) = NULL;
501 q = queueptr(queueid);
502 if (q == NULL)
504 return test_bit(client, q->clients_bitmap) ? 1 : 0;
520 struct snd_seq_queue *q = queue_list_remove(i, client);
521 if (q)
522 queue_delete(q);
529 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i);
530 if (!q)
532 if (test_bit(client, q->clients_bitmap)) {
533 snd_seq_prioq_leave(q->tickq, client, 0);
534 snd_seq_prioq_leave(q->timeq, client, 0);
535 snd_seq_queue_use(q->queue, client, 0);
550 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i);
551 if (!q)
553 if (test_bit(client, q->clients_bitmap) &&
555 q->queue == info->queue)) {
556 snd_seq_prioq_remove_events(q->tickq, client, info);
557 snd_seq_prioq_remove_events(q->timeq, client, info);
567 static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
575 sev.time.tick = q->timer->tick.cur_tick;
576 sev.queue = q->queue;
577 sev.data.queue.queue = q->queue;
590 static void snd_seq_queue_process_event(struct snd_seq_queue *q,
596 snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
597 snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
598 if (! snd_seq_timer_start(q->timer))
599 queue_broadcast_event(q, ev, atomic, hop);
603 if (! snd_seq_timer_continue(q->timer))
604 queue_broadcast_event(q, ev, atomic, hop);
608 snd_seq_timer_stop(q->timer);
609 queue_broadcast_event(q, ev, atomic, hop);
613 snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
614 queue_broadcast_event(q, ev, atomic, hop);
618 if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
619 queue_broadcast_event(q, ev, atomic, hop);
624 if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
625 queue_broadcast_event(q, ev, atomic, hop);
629 if (snd_seq_timer_set_skew(q->timer,
632 queue_broadcast_event(q, ev, atomic, hop);
645 struct snd_seq_queue *q __free(snd_seq_queue) = NULL;
649 q = queueptr(ev->data.queue.queue);
651 if (q == NULL)
654 if (!queue_access_lock(q, ev->source.client))
657 snd_seq_queue_process_event(q, ev, atomic, hop);
659 queue_access_unlock(q);
677 struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i);
678 if (!q)
681 tmr = q->timer;
687 scoped_guard(spinlock_irq, &q->owner_lock) {
688 locked = q->locked;
689 owner = q->owner;
692 snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
695 snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
696 snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));