Home
last modified time | relevance | path

Searched refs:q (Results 1 – 25 of 1109) sorted by relevance

12345678910>>...45

/linux/lib/crypto/
H A Dgf128mul.c57 #define gf128mul_dat(q) { \ argument
58 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
59 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
60 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
61 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
62 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
63 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
64 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
65 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
66 q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
[all …]
/linux/Documentation/networking/
H A Dtls-offload-layers.svg1q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.60937…
/linux/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_rx.c50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) in cache_offer() argument
52 struct funeth_rx_cache *c = &q->cache; in cache_offer()
58 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_offer()
67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) in cache_get() argument
69 struct funeth_rx_cache *c = &q->cache; in cache_get()
77 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in cache_get()
88 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_get()
98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, in funeth_alloc_page() argument
103 if (cache_get(q, rb)) in funeth_alloc_page()
110 rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, in funeth_alloc_page()
[all …]
H A Dfuneth_tx.c56 static void *txq_end(const struct funeth_txq *q) in txq_end() argument
58 return (void *)q->hw_wb; in txq_end()
64 static unsigned int txq_to_end(const struct funeth_txq *q, void *p) in txq_to_end() argument
66 return txq_end(q) - p; in txq_to_end()
78 static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, in fun_write_gl() argument
90 i < ngle && txq_to_end(q, gle); i++, gle++) in fun_write_gl()
93 if (txq_to_end(q, gle) == 0) { in fun_write_gl()
94 gle = (struct fun_dataop_gl *)q->desc; in fun_write_gl()
107 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, in fun_tls_tx() argument
132 FUN_QSTAT_INC(q, tx_tls_fallback); in fun_tls_tx()
[all …]
/linux/drivers/gpu/drm/xe/
H A Dxe_guc_submit.c52 exec_queue_to_guc(struct xe_exec_queue *q) in exec_queue_to_guc() argument
54 return &q->gt->uc.guc; in exec_queue_to_guc()
75 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument
77 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered()
80 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument
82 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered()
85 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument
87 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered()
90 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument
92 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED; in exec_queue_enabled()
[all …]
H A Dxe_exec_queue.c110 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
113 static void xe_exec_queue_group_cleanup(struct xe_exec_queue *q) in xe_exec_queue_group_cleanup() argument
115 struct xe_exec_queue_group *group = q->multi_queue.group; in xe_exec_queue_group_cleanup()
119 if (xe_exec_queue_is_multi_queue_secondary(q)) { in xe_exec_queue_group_cleanup()
124 xe_exec_queue_put(xe_exec_queue_multi_queue_primary(q)); in xe_exec_queue_group_cleanup()
141 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument
146 if (q->tlb_inval[i].dep_scheduler) in __xe_exec_queue_free()
147 xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler); in __xe_exec_queue_free()
149 if (xe_exec_queue_uses_pxp(q)) in __xe_exec_queue_free()
150 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in __xe_exec_queue_free()
[all …]
/linux/sound/core/seq/oss/
H A Dseq_oss_readq.c35 struct seq_oss_readq *q; in snd_seq_oss_readq_new() local
37 q = kzalloc_obj(*q); in snd_seq_oss_readq_new()
38 if (!q) in snd_seq_oss_readq_new()
41 q->q = kzalloc_objs(union evrec, maxlen); in snd_seq_oss_readq_new()
42 if (!q->q) { in snd_seq_oss_readq_new()
43 kfree(q); in snd_seq_oss_readq_new()
47 q->maxlen = maxlen; in snd_seq_oss_readq_new()
48 q->qlen = 0; in snd_seq_oss_readq_new()
49 q->head = q->tail = 0; in snd_seq_oss_readq_new()
50 init_waitqueue_head(&q->midi_sleep); in snd_seq_oss_readq_new()
[all …]
H A Dseq_oss_writeq.c27 struct seq_oss_writeq *q; in snd_seq_oss_writeq_new() local
30 q = kzalloc_obj(*q); in snd_seq_oss_writeq_new()
31 if (!q) in snd_seq_oss_writeq_new()
33 q->dp = dp; in snd_seq_oss_writeq_new()
34 q->maxlen = maxlen; in snd_seq_oss_writeq_new()
35 spin_lock_init(&q->sync_lock); in snd_seq_oss_writeq_new()
36 q->sync_event_put = 0; in snd_seq_oss_writeq_new()
37 q->sync_time = 0; in snd_seq_oss_writeq_new()
38 init_waitqueue_head(&q->sync_sleep); in snd_seq_oss_writeq_new()
47 return q; in snd_seq_oss_writeq_new()
[all …]
/linux/sound/core/seq/
H A Dseq_queue.c50 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument
57 queue_list[i] = q; in queue_list_add()
58 q->queue = i; in queue_list_add()
68 struct snd_seq_queue *q; in queue_list_remove() local
71 q = queue_list[id]; in queue_list_remove()
72 if (q) { in queue_list_remove()
73 guard(spinlock)(&q->owner_lock); in queue_list_remove()
74 if (q->owner == client) { in queue_list_remove()
76 q->klocked = 1; in queue_list_remove()
79 return q; in queue_list_remove()
[all …]
/linux/net/sched/
H A Dsch_choke.c75 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument
77 return (q->tail - q->head) & q->tab_mask; in choke_len()
81 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument
83 return q->flags & TC_RED_ECN; in use_ecn()
87 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument
89 return q->flags & TC_RED_HARDDROP; in use_harddrop()
93 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument
96 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes()
97 if (q->head == q->tail) in choke_zap_head_holes()
99 } while (q->tab[q->head] == NULL); in choke_zap_head_holes()
[all …]
H A Dsch_netem.c210 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument
212 struct clgstate *clg = &q->clg; in loss_4state()
213 u32 rnd = prandom_u32_state(&q->prng.prng_state); in loss_4state()
275 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
277 struct clgstate *clg = &q->clg; in loss_gilb_ell()
278 struct rnd_state *s = &q->prng.prng_state; in loss_gilb_ell()
297 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
299 switch (q->loss_model) { in loss_event()
302 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng); in loss_event()
310 return loss_4state(q); in loss_event()
[all …]
H A Dsch_sfq.c143 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument
146 return &q->slots[val].dep; in sfq_dep_head()
147 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head()
150 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument
153 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); in sfq_hash()
159 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local
166 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify()
169 fl = rcu_dereference_bh(q->filter_list); in sfq_classify()
171 return sfq_hash(q, skb) + 1; in sfq_classify()
187 if (TC_H_MIN(res.classid) <= q->divisor) in sfq_classify()
[all …]
H A Dsch_dualpi2.c147 static u64 head_enqueue_time(struct Qdisc *q) in head_enqueue_time() argument
149 struct sk_buff *skb = qdisc_peek_head(q); in head_enqueue_time()
170 static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q) in next_pi2_timeout() argument
172 return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate); in next_pi2_timeout()
185 static bool skb_apply_step(struct sk_buff *skb, struct dualpi2_sched_data *q) in skb_apply_step() argument
187 return skb_is_l4s(skb) && qdisc_qlen(q->l_queue) >= q->min_qlen_step; in skb_apply_step()
190 static bool dualpi2_mark(struct dualpi2_sched_data *q, struct sk_buff *skb) in dualpi2_mark() argument
193 q->ecn_mark++; in dualpi2_mark()
199 static void dualpi2_reset_c_protection(struct dualpi2_sched_data *q) in dualpi2_reset_c_protection() argument
209 dualpi2_calculate_c_protection(struct Qdisc * sch,struct dualpi2_sched_data * q,u32 wc) dualpi2_calculate_c_protection() argument
230 dualpi2_classic_marking(struct dualpi2_sched_data * q,struct sk_buff * skb,u32 prob,bool overload) dualpi2_classic_marking() argument
253 dualpi2_scalable_marking(struct dualpi2_sched_data * q,struct sk_buff * skb,u64 local_l_prob,u32 prob,bool overload) dualpi2_scalable_marking() argument
282 must_drop(struct Qdisc * sch,struct dualpi2_sched_data * q,struct sk_buff * skb) must_drop() argument
341 dualpi2_skb_classify(struct dualpi2_sched_data * q,struct sk_buff * skb) dualpi2_skb_classify() argument
388 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_enqueue_skb() local
444 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_qdisc_enqueue() local
517 dequeue_packet(struct Qdisc * sch,struct dualpi2_sched_data * q,int * credit_change,u64 now) dequeue_packet() argument
551 do_step_aqm(struct dualpi2_sched_data * q,struct sk_buff * skb,u64 now) do_step_aqm() argument
574 drop_and_retry(struct dualpi2_sched_data * q,struct sk_buff * skb,struct Qdisc * sch,enum skb_drop_reason reason) drop_and_retry() argument
585 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_qdisc_dequeue() local
626 get_queue_delays(struct dualpi2_sched_data * q,u64 * qdelay_c,u64 * qdelay_l) get_queue_delays() argument
641 struct dualpi2_sched_data *q = qdisc_priv(sch); calculate_probability() local
711 struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer); dualpi2_timer() local
762 struct dualpi2_sched_data *q; dualpi2_change() local
890 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_reset_default() local
916 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_init() local
947 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_dump() local
1023 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_dump_stats() local
1050 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_reset() local
1069 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_destroy() local
1094 dualpi2_unbind(struct Qdisc * q,unsigned long cl) dualpi2_unbind() argument
1101 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_tcf_block() local
[all...]
H A Dsch_fq_pie.c75 static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q, in fq_pie_hash() argument
78 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_pie_hash()
84 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_classify() local
91 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_pie_classify()
94 filter = rcu_dereference_bh(q->filter_list); in fq_pie_classify()
96 return fq_pie_hash(q, skb) + 1; in fq_pie_classify()
112 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_pie_classify()
134 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_qdisc_enqueue() local
152 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue()
155 memory_limited = q->memory_usage > q->memory_limit + skb->truesize; in fq_pie_qdisc_enqueue()
[all …]
H A Dsch_red.c55 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument
57 return q->flags & TC_RED_ECN; in red_use_ecn()
60 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument
62 return q->flags & TC_RED_HARDDROP; in red_use_harddrop()
65 static int red_use_nodrop(struct red_sched_data *q) in red_use_nodrop() argument
67 return q->flags & TC_RED_NODROP; in red_use_nodrop()
74 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local
75 struct Qdisc *child = q->qdisc; in red_enqueue()
79 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue()
80 &q->vars, in red_enqueue()
[all …]
H A Dsch_fq_codel.c70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument
73 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_codel_hash()
79 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local
86 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify()
89 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify()
91 return fq_codel_hash(q, skb) + 1; in fq_codel_classify()
107 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify()
140 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local
154 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop()
155 if (q->backlogs[i] > maxbacklog) { in fq_codel_drop()
[all …]
H A Dsch_fq.c79 /* Following field is only used for q->internal,
80 * because q->internal is not hashed in fq_root[]
93 struct rb_node rate_node; /* anchor in q->delayed tree */
197 static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow, in fq_flow_add_tail() argument
200 struct fq_perband_flows *pband = &q->band_flows[flow->band]; in fq_flow_add_tail()
213 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_unset_throttled() argument
215 rb_erase(&f->rate_node, &q->delayed); in fq_flow_unset_throttled()
216 q->throttled_flows--; in fq_flow_unset_throttled()
217 fq_flow_add_tail(q, f, OLD_FLOW); in fq_flow_unset_throttled()
220 static void fq_flow_set_throttled(struct fq_sched_data *q, struc argument
258 fq_gc(struct fq_sched_data * q,struct rb_root * root,struct sock * sk) fq_gc() argument
315 const struct fq_sched_data *q = qdisc_priv(sch); fq_fastpath_check() local
357 struct fq_sched_data *q = qdisc_priv(sch); fq_classify() local
535 fq_packet_beyond_horizon(const struct sk_buff * skb,const struct fq_sched_data * q,u64 now) fq_packet_beyond_horizon() argument
545 struct fq_sched_data *q = qdisc_priv(sch); fq_enqueue() local
607 fq_check_throttled(struct fq_sched_data * q,u64 now) fq_check_throttled() argument
650 struct fq_sched_data *q = qdisc_priv(sch); fq_dequeue() local
801 struct fq_sched_data *q = qdisc_priv(sch); fq_reset() local
836 fq_rehash(struct fq_sched_data * q,struct rb_root * old_array,u32 old_log,struct rb_root * new_array,u32 new_log) fq_rehash() argument
888 struct fq_sched_data *q = qdisc_priv(sch); fq_resize() local
972 fq_load_weights(struct fq_sched_data * q,const struct nlattr * attr,struct netlink_ext_ack * extack) fq_load_weights() argument
991 fq_load_priomap(struct fq_sched_data * q,const struct nlattr * attr,struct netlink_ext_ack * extack) fq_load_priomap() argument
1017 struct fq_sched_data *q = qdisc_priv(sch); fq_change() local
1157 struct fq_sched_data *q = qdisc_priv(sch); fq_destroy() local
1167 struct fq_sched_data *q = qdisc_priv(sch); fq_init() local
1212 struct fq_sched_data *q = qdisc_priv(sch); fq_dump() local
1285 struct fq_sched_data *q = qdisc_priv(sch); fq_dump_stats() local
[all...]
H A Dsch_skbprio.c40 static u16 calc_new_high_prio(const struct skbprio_sched_data *q) in calc_new_high_prio() argument
44 for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { in calc_new_high_prio()
45 if (!skb_queue_empty(&q->qdiscs[prio])) in calc_new_high_prio()
53 static u16 calc_new_low_prio(const struct skbprio_sched_data *q) in calc_new_low_prio() argument
57 for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) { in calc_new_low_prio()
58 if (!skb_queue_empty(&q->qdiscs[prio])) in calc_new_low_prio()
72 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_enqueue() local
81 qdisc = &q->qdiscs[prio]; in skbprio_enqueue()
84 if (sch->q.qlen < READ_ONCE(sch->limit)) { in skbprio_enqueue()
87 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue()
[all …]
/linux/drivers/media/common/videobuf2/
H A Dvideobuf2-core.c47 #define dprintk(q, level, fmt, arg...) \ argument
50 pr_info("[%s] %s: " fmt, (q)->name, __func__, \
103 #define log_qop(q, op) \ argument
104 dprintk(q, 2, "call_qop(%s)%s\n", #op, \
105 (q)->ops->op ? "" : " (nop)")
107 #define call_qop(q, op, args...) \ argument
111 log_qop(q, op); \
112 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
114 (q)->cnt_ ## op++; \
118 #define call_void_qop(q, op, args...) \ argument
[all …]
/linux/net/xdp/
H A Dxsk_queue.h125 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) in __xskq_cons_read_addr_unchecked()
127 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
128 u32 idx = cached_cons & q->ring_mask; in xskq_cons_read_addr_unchecked() argument
133 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_read_addr_unchecked()
135 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
136 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked()
222 static inline bool xskq_has_descs(struct xsk_queue *q) in xskq_cons_is_valid_desc() argument
224 return q in xskq_cons_is_valid_desc()
120 __xskq_cons_read_addr_unchecked(struct xsk_queue * q,u32 cached_cons,u64 * addr) __xskq_cons_read_addr_unchecked() argument
217 xskq_has_descs(struct xsk_queue * q) xskq_has_descs() argument
233 xskq_cons_read_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool) xskq_cons_read_desc() argument
249 xskq_cons_release_n(struct xsk_queue * q,u32 cnt) xskq_cons_release_n() argument
254 parse_desc(struct xsk_queue * q,struct xsk_buff_pool * pool,struct xdp_desc * desc,struct parsed_desc * parsed) parse_desc() argument
262 xskq_cons_read_desc_batch(struct xsk_queue * q,struct xsk_buff_pool * pool,u32 max) xskq_cons_read_desc_batch() argument
304 __xskq_cons_release(struct xsk_queue * q) __xskq_cons_release() argument
309 __xskq_cons_peek(struct xsk_queue * q) __xskq_cons_peek() argument
315 xskq_cons_get_entries(struct xsk_queue * q) xskq_cons_get_entries() argument
321 xskq_cons_nb_entries(struct xsk_queue * q,u32 max) xskq_cons_nb_entries() argument
334 xskq_cons_peek_addr_unchecked(struct xsk_queue * q,u64 * addr) xskq_cons_peek_addr_unchecked() argument
341 xskq_cons_peek_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool) xskq_cons_peek_desc() argument
354 xskq_cons_release(struct xsk_queue * q) xskq_cons_release() argument
359 xskq_cons_cancel_n(struct xsk_queue * q,u32 cnt) xskq_cons_cancel_n() argument
364 xskq_cons_present_entries(struct xsk_queue * q) xskq_cons_present_entries() argument
372 xskq_get_prod(struct xsk_queue * q) xskq_get_prod() argument
377 xskq_prod_nb_free(struct xsk_queue * q,u32 max) xskq_prod_nb_free() argument
391 xskq_prod_is_full(struct xsk_queue * q) xskq_prod_is_full() argument
396 xskq_prod_cancel_n(struct xsk_queue * q,u32 cnt) xskq_prod_cancel_n() argument
401 xskq_prod_reserve(struct xsk_queue * q) xskq_prod_reserve() argument
411 xskq_prod_reserve_addr(struct xsk_queue * q,u64 addr) xskq_prod_reserve_addr() argument
423 xskq_prod_write_addr(struct xsk_queue * q,u32 idx,u64 addr) xskq_prod_write_addr() argument
430 xskq_prod_write_addr_batch(struct xsk_queue * q,struct xdp_desc * descs,u32 nb_entries) xskq_prod_write_addr_batch() argument
443 xskq_prod_reserve_desc(struct xsk_queue * q,u64 addr,u32 len,u32 flags) xskq_prod_reserve_desc() argument
461 __xskq_prod_submit(struct xsk_queue * q,u32 idx) __xskq_prod_submit() argument
466 xskq_prod_submit(struct xsk_queue * q) xskq_prod_submit() argument
471 xskq_prod_submit_n(struct xsk_queue * q,u32 nb_entries) xskq_prod_submit_n() argument
476 xskq_prod_is_empty(struct xsk_queue * q) xskq_prod_is_empty() argument
484 xskq_nb_invalid_descs(struct xsk_queue * q) xskq_nb_invalid_descs() argument
489 xskq_nb_queue_empty_descs(struct xsk_queue * q) xskq_nb_queue_empty_descs() argument
[all...]
/linux/block/
H A Dblk-pm.c29 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) in blk_pm_runtime_init() argument
31 q->dev = dev; in blk_pm_runtime_init()
32 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init()
33 pm_runtime_set_autosuspend_delay(q->dev, -1); in blk_pm_runtime_init()
34 pm_runtime_use_autosuspend(q->dev); in blk_pm_runtime_init()
59 int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument
63 if (!q->dev) in blk_pre_runtime_suspend()
66 WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); in blk_pre_runtime_suspend()
68 spin_lock_irq(&q->queue_lock); in blk_pre_runtime_suspend()
69 q->rpm_status = RPM_SUSPENDING; in blk_pre_runtime_suspend()
[all …]
/linux/drivers/infiniband/sw/rxe/
H A Drxe_queue.h83 void rxe_queue_reset(struct rxe_queue *q);
88 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
95 static inline u32 queue_next_index(struct rxe_queue *q, int index) in queue_next_index() argument
97 return (index + 1) & q->index_mask; in queue_next_index()
100 static inline u32 queue_get_producer(const struct rxe_queue *q, in queue_get_producer() argument
108 prod = smp_load_acquire(&q->buf->producer_index); in queue_get_producer()
112 prod = q->index; in queue_get_producer()
116 prod = q->buf->producer_index; in queue_get_producer()
120 prod = smp_load_acquire(&q->buf->producer_index); in queue_get_producer()
127 static inline u32 queue_get_consumer(const struct rxe_queue *q, in queue_get_consumer() argument
[all …]
/linux/drivers/net/wireless/broadcom/b43/
H A Dpio.c24 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument
37 cookie = (((u16)q->index + 1) << 12); in generate_cookie()
49 struct b43_pio_txqueue *q = NULL; in parse_cookie() local
54 q = pio->tx_queue_AC_BK; in parse_cookie()
57 q = pio->tx_queue_AC_BE; in parse_cookie()
60 q = pio->tx_queue_AC_VI; in parse_cookie()
63 q = pio->tx_queue_AC_VO; in parse_cookie()
66 q = pio->tx_queue_mcast; in parse_cookie()
69 if (B43_WARN_ON(!q)) in parse_cookie()
72 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) in parse_cookie()
[all …]
/linux/drivers/s390/cio/
H A Dqdio_main.c116 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument
119 int tmp_count = count, tmp_start = start, nr = q->nr; in qdio_do_eqbs()
122 qperf_inc(q, eqbs); in qdio_do_eqbs()
124 if (!q->is_input_q) in qdio_do_eqbs()
125 nr += q->irq_ptr->nr_input_qs; in qdio_do_eqbs()
127 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, in qdio_do_eqbs()
137 qperf_inc(q, eqbs_partial); in qdio_do_eqbs()
138 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", in qdio_do_eqbs()
143 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); in qdio_do_eqbs()
146 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); in qdio_do_eqbs()
[all …]
/linux/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c169 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) in rspq_to_qset() argument
171 return container_of(q, struct sge_qset, rspq); in rspq_to_qset()
174 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument
176 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
189 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument
193 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq()
233 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument
237 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb()
240 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; in unmap_skb()
266 d = cidx + 1 == q->size ? q->sdesc : d + 1; in unmap_skb()
[all …]

12345678910>>...45