Searched refs:q_busy (Results 1 – 6 of 6) sorted by relevance
56 if (q == &s->q_busy && in _cx18_enqueue()140 spin_lock(&s->q_busy.lock); in cx18_queue_get_mdl()141 list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) { in cx18_queue_get_mdl()151 if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) { in cx18_queue_get_mdl()158 atomic_dec(&s->q_busy.depth); in cx18_queue_get_mdl()167 atomic_dec(&s->q_busy.depth); in cx18_queue_get_mdl()171 spin_unlock(&s->q_busy.lock); in cx18_queue_get_mdl()201 if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy) in cx18_queue_flush()223 cx18_queue_flush(s, &s->q_busy, &s->q_free); in cx18_flush_queues()238 cx18_queue_flush(s, &s->q_busy, q_idle); in cx18_unload_queues()
396 struct cx18_queue q_busy; /* busy - in use by firmware */ member
464 u64_stats_t q_busy; member
379 u64_stats_inc(&tx_q->q_stats.q_busy); in idpf_tx_singleq_frame()
844 qbusy = u64_stats_read(&stats->q_busy); in idpf_collect_queue_stats()
2169 u64_stats_inc(&tx_q->q_stats.q_busy); in idpf_tx_maybe_stop_splitq()2195 u64_stats_inc(&tx_q->q_stats.q_busy); in idpf_tx_buf_hw_update()