Home
last modified time | relevance | path

Searched full:q (Results 1 – 25 of 2164) sorted by relevance

12345678910>>...87

/linux/lib/crypto/
H A Dgf128mul.c57 #define gf128mul_dat(q) { \ argument
58 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
59 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
60 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
61 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
62 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
63 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
64 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
65 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
66 q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
[all …]
/linux/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_rx.c50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) in cache_offer() argument
52 struct funeth_rx_cache *c = &q->cache; in cache_offer()
58 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_offer()
67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) in cache_get() argument
69 struct funeth_rx_cache *c = &q->cache; in cache_get()
77 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in cache_get()
88 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_get()
98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, in funeth_alloc_page() argument
103 if (cache_get(q, rb)) in funeth_alloc_page()
110 rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, in funeth_alloc_page()
[all …]
H A Dfuneth_tx.c56 static void *txq_end(const struct funeth_txq *q) in txq_end() argument
58 return (void *)q->hw_wb; in txq_end()
64 static unsigned int txq_to_end(const struct funeth_txq *q, void *p) in txq_to_end() argument
66 return txq_end(q) - p; in txq_to_end()
78 static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, in fun_write_gl() argument
90 i < ngle && txq_to_end(q, gle); i++, gle++) in fun_write_gl()
93 if (txq_to_end(q, gle) == 0) { in fun_write_gl()
94 gle = (struct fun_dataop_gl *)q->desc; in fun_write_gl()
107 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, in fun_tls_tx() argument
132 FUN_QSTAT_INC(q, tx_tls_fallback); in fun_tls_tx()
[all …]
/linux/drivers/gpu/drm/xe/
H A Dxe_guc_submit.c51 exec_queue_to_guc(struct xe_exec_queue *q) in exec_queue_to_guc() argument
53 return &q->gt->uc.guc; in exec_queue_to_guc()
76 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument
78 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered()
81 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument
83 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered()
86 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument
88 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered()
91 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument
93 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED; in exec_queue_enabled()
[all …]
H A Dxe_exec_queue.c63 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
66 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument
71 if (q->tlb_inval[i].dep_scheduler) in __xe_exec_queue_free()
72 xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler); in __xe_exec_queue_free()
74 if (xe_exec_queue_uses_pxp(q)) in __xe_exec_queue_free()
75 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in __xe_exec_queue_free()
76 if (q->vm) in __xe_exec_queue_free()
77 xe_vm_put(q->vm); in __xe_exec_queue_free()
79 if (q->xef) in __xe_exec_queue_free()
80 xe_file_put(q->xef); in __xe_exec_queue_free()
[all …]
/linux/Documentation/networking/
H A Dtls-offload-layers.svg1q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.60937…
/linux/net/sched/
H A Dsch_choke.c75 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument
77 return (q->tail - q->head) & q->tab_mask; in choke_len()
81 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument
83 return q->flags & TC_RED_ECN; in use_ecn()
87 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument
89 return q->flags & TC_RED_HARDDROP; in use_harddrop()
93 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument
96 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes()
97 if (q->head == q->tail) in choke_zap_head_holes()
99 } while (q->tab[q->head] == NULL); in choke_zap_head_holes()
[all …]
H A Dsch_netem.c210 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument
212 struct clgstate *clg = &q->clg; in loss_4state()
213 u32 rnd = prandom_u32_state(&q->prng.prng_state); in loss_4state()
275 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
277 struct clgstate *clg = &q->clg; in loss_gilb_ell()
278 struct rnd_state *s = &q->prng.prng_state; in loss_gilb_ell()
297 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
299 switch (q->loss_model) { in loss_event()
302 return q->loss && q in loss_event()
357 packet_time_ns(u64 len,const struct netem_sched_data * q) packet_time_ns() argument
374 struct netem_sched_data *q = qdisc_priv(sch); tfifo_reset() local
393 struct netem_sched_data *q = qdisc_priv(sch); tfifo_enqueue() local
451 struct netem_sched_data *q = qdisc_priv(sch); netem_enqueue() local
654 get_slot_next(struct netem_sched_data * q,u64 now) get_slot_next() argument
673 netem_peek(struct netem_sched_data * q) netem_peek() argument
690 netem_erase_head(struct netem_sched_data * q,struct sk_buff * skb) netem_erase_head() argument
703 struct netem_sched_data *q = qdisc_priv(sch); netem_dequeue() local
787 struct netem_sched_data *q = qdisc_priv(sch); netem_reset() local
828 get_slot(struct netem_sched_data * q,const struct nlattr * attr) get_slot() argument
850 get_correlation(struct netem_sched_data * q,const struct nlattr * attr) get_correlation() argument
859 get_reorder(struct netem_sched_data * q,const struct nlattr * attr) get_reorder() argument
867 get_corrupt(struct netem_sched_data * q,const struct nlattr * attr) get_corrupt() argument
875 get_rate(struct netem_sched_data * q,const struct nlattr * attr) get_rate() argument
889 get_loss_clg(struct netem_sched_data * q,const struct nlattr * attr) get_loss_clg() argument
981 struct Qdisc *root, *q; check_netem_in_tree() local
1015 struct netem_sched_data *q = qdisc_priv(sch); netem_change() local
1131 struct netem_sched_data *q = qdisc_priv(sch); netem_init() local
1148 struct netem_sched_data *q = qdisc_priv(sch); netem_destroy() local
1157 dump_loss_model(const struct netem_sched_data * q,struct sk_buff * skb) dump_loss_model() argument
1209 const struct netem_sched_data *q = qdisc_priv(sch); netem_dump() local
1296 struct netem_sched_data *q = qdisc_priv(sch); netem_dump_class() local
1310 struct netem_sched_data *q = qdisc_priv(sch); netem_graft() local
1318 struct netem_sched_data *q = qdisc_priv(sch); netem_leaf() local
[all...]
H A Dsch_sfq.c143 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument
146 return &q->slots[val].dep; in sfq_dep_head()
147 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head()
150 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument
153 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); in sfq_hash()
159 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local
166 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify()
169 fl = rcu_dereference_bh(q->filter_list); in sfq_classify()
171 return sfq_hash(q, skb) + 1; in sfq_classify()
187 if (TC_H_MIN(res.classid) <= q->divisor) in sfq_classify()
[all …]
H A Dsch_dualpi2.c108 u64 last_qdelay; /* Q delay val at the last probability update */
147 static u64 head_enqueue_time(struct Qdisc *q) in head_enqueue_time() argument
149 struct sk_buff *skb = qdisc_peek_head(q); in head_enqueue_time()
170 static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q) in next_pi2_timeout() argument
172 return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate); in next_pi2_timeout()
185 static bool skb_apply_step(struct sk_buff *skb, struct dualpi2_sched_data *q) in skb_apply_step() argument
187 return skb_is_l4s(skb) && qdisc_qlen(q->l_queue) >= q->min_qlen_step; in skb_apply_step()
190 static bool dualpi2_mark(struct dualpi2_sched_data *q, struct sk_buff *skb) in dualpi2_mark() argument
193 q in dualpi2_mark()
199 dualpi2_reset_c_protection(struct dualpi2_sched_data * q) dualpi2_reset_c_protection() argument
209 dualpi2_calculate_c_protection(struct Qdisc * sch,struct dualpi2_sched_data * q,u32 wc) dualpi2_calculate_c_protection() argument
230 dualpi2_classic_marking(struct dualpi2_sched_data * q,struct sk_buff * skb,u32 prob,bool overload) dualpi2_classic_marking() argument
253 dualpi2_scalable_marking(struct dualpi2_sched_data * q,struct sk_buff * skb,u64 local_l_prob,u32 prob,bool overload) dualpi2_scalable_marking() argument
282 must_drop(struct Qdisc * sch,struct dualpi2_sched_data * q,struct sk_buff * skb) must_drop() argument
341 dualpi2_skb_classify(struct dualpi2_sched_data * q,struct sk_buff * skb) dualpi2_skb_classify() argument
388 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_enqueue_skb() local
444 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_qdisc_enqueue() local
517 dequeue_packet(struct Qdisc * sch,struct dualpi2_sched_data * q,int * credit_change,u64 now) dequeue_packet() argument
551 do_step_aqm(struct dualpi2_sched_data * q,struct sk_buff * skb,u64 now) do_step_aqm() argument
574 drop_and_retry(struct dualpi2_sched_data * q,struct sk_buff * skb,struct Qdisc * sch,enum skb_drop_reason reason) drop_and_retry() argument
585 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_qdisc_dequeue() local
626 get_queue_delays(struct dualpi2_sched_data * q,u64 * qdelay_c,u64 * qdelay_l) get_queue_delays() argument
641 struct dualpi2_sched_data *q = qdisc_priv(sch); calculate_probability() local
711 struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer); dualpi2_timer() local
762 struct dualpi2_sched_data *q; dualpi2_change() local
890 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_reset_default() local
916 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_init() local
947 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_dump() local
1023 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_dump_stats() local
1050 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_reset() local
1069 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_destroy() local
1094 dualpi2_unbind(struct Qdisc * q,unsigned long cl) dualpi2_unbind() argument
1101 struct dualpi2_sched_data *q = qdisc_priv(sch); dualpi2_tcf_block() local
[all...]
H A Dsch_red.c55 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument
57 return q->flags & TC_RED_ECN; in red_use_ecn()
60 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument
62 return q->flags & TC_RED_HARDDROP; in red_use_harddrop()
65 static int red_use_nodrop(struct red_sched_data *q) in red_use_nodrop() argument
67 return q->flags & TC_RED_NODROP; in red_use_nodrop()
74 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local
75 struct Qdisc *child = q->qdisc; in red_enqueue()
79 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue()
80 &q->vars, in red_enqueue()
[all …]
H A Dsch_fq_pie.c75 static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q, in fq_pie_hash() argument
78 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_pie_hash()
84 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_classify() local
91 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_pie_classify()
94 filter = rcu_dereference_bh(q->filter_list); in fq_pie_classify()
96 return fq_pie_hash(q, skb) + 1; in fq_pie_classify()
112 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_pie_classify()
134 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_qdisc_enqueue() local
152 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue()
155 memory_limited = q->memory_usage > q->memory_limit + skb->truesize; in fq_pie_qdisc_enqueue()
[all …]
H A Dsch_fq_codel.c70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument
73 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_codel_hash()
79 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local
86 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify()
89 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify()
91 return fq_codel_hash(q, skb) + 1; in fq_codel_classify()
107 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify()
140 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local
154 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop()
155 if (q in fq_codel_drop()
188 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_enqueue() local
259 struct fq_codel_sched_data *q = qdisc_priv(sch); dequeue_func() local
284 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_dequeue() local
336 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_reset() local
370 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_change() local
462 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_destroy() local
472 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_init() local
534 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_dump() local
581 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_dump_stats() local
622 fq_codel_unbind(struct Qdisc * q,unsigned long cl) fq_codel_unbind() argument
629 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_tcf_block() local
646 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_dump_class_stats() local
692 struct fq_codel_sched_data *q = qdisc_priv(sch); fq_codel_walk() local
[all...]
H A Dsch_fq.c79 /* Following field is only used for q->internal,
80 * because q->internal is not hashed in fq_root[]
93 struct rb_node rate_node; /* anchor in q->delayed tree */
197 static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow, in fq_flow_add_tail() argument
200 struct fq_perband_flows *pband = &q->band_flows[flow->band]; in fq_flow_add_tail()
213 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_unset_throttled() argument
215 rb_erase(&f->rate_node, &q->delayed); in fq_flow_unset_throttled()
216 q->throttled_flows--; in fq_flow_unset_throttled()
217 fq_flow_add_tail(q, f, OLD_FLOW); in fq_flow_unset_throttled()
220 static void fq_flow_set_throttled(struct fq_sched_data *q, struc argument
258 fq_gc(struct fq_sched_data * q,struct rb_root * root,struct sock * sk) fq_gc() argument
315 const struct fq_sched_data *q = qdisc_priv(sch); fq_fastpath_check() local
357 struct fq_sched_data *q = qdisc_priv(sch); fq_classify() local
535 fq_packet_beyond_horizon(const struct sk_buff * skb,const struct fq_sched_data * q,u64 now) fq_packet_beyond_horizon() argument
545 struct fq_sched_data *q = qdisc_priv(sch); fq_enqueue() local
607 fq_check_throttled(struct fq_sched_data * q,u64 now) fq_check_throttled() argument
650 struct fq_sched_data *q = qdisc_priv(sch); fq_dequeue() local
801 struct fq_sched_data *q = qdisc_priv(sch); fq_reset() local
836 fq_rehash(struct fq_sched_data * q,struct rb_root * old_array,u32 old_log,struct rb_root * new_array,u32 new_log) fq_rehash() argument
888 struct fq_sched_data *q = qdisc_priv(sch); fq_resize() local
972 fq_load_weights(struct fq_sched_data * q,const struct nlattr * attr,struct netlink_ext_ack * extack) fq_load_weights() argument
991 fq_load_priomap(struct fq_sched_data * q,const struct nlattr * attr,struct netlink_ext_ack * extack) fq_load_priomap() argument
1017 struct fq_sched_data *q = qdisc_priv(sch); fq_change() local
1157 struct fq_sched_data *q = qdisc_priv(sch); fq_destroy() local
1167 struct fq_sched_data *q = qdisc_priv(sch); fq_init() local
1212 struct fq_sched_data *q = qdisc_priv(sch); fq_dump() local
1285 struct fq_sched_data *q = qdisc_priv(sch); fq_dump_stats() local
[all...]
H A Dsch_multiq.c32 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_classify() local
35 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); in multiq_classify()
53 if (band >= q->bands) in multiq_classify()
54 return q->queues[0]; in multiq_classify()
56 return q->queues[band]; in multiq_classify()
79 sch->q.qlen++; in multiq_enqueue()
89 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dequeue() local
94 for (band = 0; band < q->bands; band++) { in multiq_dequeue()
96 q->curband++; in multiq_dequeue()
97 if (q in multiq_dequeue()
120 struct multiq_sched_data *q = qdisc_priv(sch); multiq_peek() local
151 struct multiq_sched_data *q = qdisc_priv(sch); multiq_reset() local
162 struct multiq_sched_data *q = qdisc_priv(sch); multiq_destroy() local
174 struct multiq_sched_data *q = qdisc_priv(sch); multiq_tune() local
238 struct multiq_sched_data *q = qdisc_priv(sch); multiq_init() local
263 struct multiq_sched_data *q = qdisc_priv(sch); multiq_dump() local
283 struct multiq_sched_data *q = qdisc_priv(sch); multiq_graft() local
296 struct multiq_sched_data *q = qdisc_priv(sch); multiq_leaf() local
304 struct multiq_sched_data *q = qdisc_priv(sch); multiq_find() local
319 multiq_unbind(struct Qdisc * q,unsigned long cl) multiq_unbind() argument
326 struct multiq_sched_data *q = qdisc_priv(sch); multiq_dump_class() local
336 struct multiq_sched_data *q = qdisc_priv(sch); multiq_dump_class_stats() local
349 struct multiq_sched_data *q = qdisc_priv(sch); multiq_walk() local
364 struct multiq_sched_data *q = qdisc_priv(sch); multiq_tcf_block() local
[all...]
/linux/drivers/media/common/videobuf2/
H A Dvideobuf2-core.c47 #define dprintk(q, level, fmt, arg...) \ argument
50 pr_info("[%s] %s: " fmt, (q)->name, __func__, \
103 #define log_qop(q, op) \ argument
104 dprintk(q, 2, "call_qop(%s)%s\n", #op, \
105 (q)->ops->op ? "" : " (nop)")
107 #define call_qop(q, op, args...) \ argument
111 log_qop(q, op); \
112 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
114 (q)->cnt_ ## op++; \
118 #define call_void_qop(q, op, args...) \ argument
[all …]
/linux/net/xdp/
H A Dxsk_queue.h120 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) in __xskq_cons_read_addr_unchecked() argument
122 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in __xskq_cons_read_addr_unchecked()
123 u32 idx = cached_cons & q->ring_mask; in __xskq_cons_read_addr_unchecked()
128 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_read_addr_unchecked() argument
130 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
131 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked()
217 static inline bool xskq_has_descs(struct xsk_queue *q) in xskq_has_descs() argument
219 return q->cached_cons != q->cached_prod; in xskq_has_descs()
222 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, in xskq_cons_is_valid_desc() argument
227 q->invalid_descs++; in xskq_cons_is_valid_desc()
[all …]
/linux/drivers/spi/
H A Dspi-fsl-qspi.c295 static bool needs_swap_endian(struct fsl_qspi *q) in needs_wakeup_wait_mode()
297 return !!(q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN); in needs_amba_base_offset() argument
300 static bool needs_4x_clock(struct fsl_qspi *q) in needs_amba_base_offset()
302 return !!(q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK); in needs_tdh_setting() argument
305 static bool needs_fill_txfifo(struct fsl_qspi *q) in needs_tdh_setting()
307 return !!(q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890);
310 static bool needs_wakeup_wait_mode(struct fsl_qspi *q)
312 return !!(q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618); in fsl_qspi_endian_xchg()
315 static bool needs_amba_base_offset(struct fsl_qspi *q)
317 return !(q
277 needs_swap_endian(struct fsl_qspi * q) needs_swap_endian() argument
282 needs_4x_clock(struct fsl_qspi * q) needs_4x_clock() argument
287 needs_fill_txfifo(struct fsl_qspi * q) needs_fill_txfifo() argument
292 needs_wakeup_wait_mode(struct fsl_qspi * q) needs_wakeup_wait_mode() argument
311 fsl_qspi_endian_xchg(struct fsl_qspi * q,u32 a) fsl_qspi_endian_xchg() argument
323 qspi_writel(struct fsl_qspi * q,u32 val,void __iomem * addr) qspi_writel() argument
331 qspi_readl(struct fsl_qspi * q,void __iomem * addr) qspi_readl() argument
341 struct fsl_qspi *q = dev_id; fsl_qspi_irq_handler() local
355 fsl_qspi_check_buswidth(struct fsl_qspi * q,u8 width) fsl_qspi_check_buswidth() argument
370 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_supports_op() local
415 fsl_qspi_prepare_lut(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_prepare_lut() argument
471 fsl_qspi_clk_prep_enable(struct fsl_qspi * q) fsl_qspi_clk_prep_enable() argument
491 fsl_qspi_clk_disable_unprep(struct fsl_qspi * q) fsl_qspi_clk_disable_unprep() argument
507 fsl_qspi_invalidate(struct fsl_qspi * q) fsl_qspi_invalidate() argument
525 fsl_qspi_select_mem(struct fsl_qspi * q,struct spi_device * spi,const struct spi_mem_op * op) fsl_qspi_select_mem() argument
552 fsl_qspi_read_ahb(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_read_ahb() argument
559 fsl_qspi_fill_txfifo(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_fill_txfifo() argument
584 fsl_qspi_read_rxfifo(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_read_rxfifo() argument
605 fsl_qspi_do_op(struct fsl_qspi * q,const struct spi_mem_op * op) fsl_qspi_do_op() argument
630 fsl_qspi_readl_poll_tout(struct fsl_qspi * q,void __iomem * base,u32 mask,u32 delay_us,u32 timeout_us) fsl_qspi_readl_poll_tout() argument
644 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_exec_op() local
706 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_adjust_op_size() local
721 fsl_qspi_default_setup(struct fsl_qspi * q) fsl_qspi_default_setup() argument
812 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller); fsl_qspi_get_name() local
849 struct fsl_qspi *q = data; fsl_qspi_disable() local
858 struct fsl_qspi *q = data; fsl_qspi_cleanup() local
871 struct fsl_qspi *q; fsl_qspi_probe() local
965 struct fsl_qspi *q = dev_get_drvdata(dev); fsl_qspi_resume() local
[all...]
/linux/drivers/net/wireless/broadcom/b43/
H A Dpio.c24 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument
37 cookie = (((u16)q->index + 1) << 12); in generate_cookie()
49 struct b43_pio_txqueue *q = NULL; in parse_cookie() local
54 q = pio->tx_queue_AC_BK; in parse_cookie()
57 q = pio->tx_queue_AC_BE; in parse_cookie()
60 q = pio->tx_queue_AC_VI; in parse_cookie()
63 q = pio->tx_queue_AC_VO; in parse_cookie()
66 q = pio->tx_queue_mcast; in parse_cookie()
69 if (B43_WARN_ON(!q)) in parse_cookie()
72 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) in parse_cookie()
[all …]
/linux/drivers/s390/cio/
H A Dqdio_main.c107 * @q: queue to manipulate
116 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument
119 int tmp_count = count, tmp_start = start, nr = q->nr; in qdio_do_eqbs()
122 qperf_inc(q, eqbs); in qdio_do_eqbs()
124 if (!q->is_input_q) in qdio_do_eqbs()
125 nr += q->irq_ptr->nr_input_qs; in qdio_do_eqbs()
127 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, in qdio_do_eqbs()
137 qperf_inc(q, eqbs_partial); in qdio_do_eqbs()
138 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", in qdio_do_eqbs()
143 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); in qdio_do_eqbs()
[all …]
/linux/drivers/infiniband/sw/rxe/
H A Drxe_queue.c46 inline void rxe_queue_reset(struct rxe_queue *q) in rxe_queue_reset() argument
52 memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); in rxe_queue_reset()
58 struct rxe_queue *q; in rxe_queue_init() local
66 q = kzalloc(sizeof(*q), GFP_KERNEL); in rxe_queue_init()
67 if (!q) in rxe_queue_init()
70 q->rxe = rxe; in rxe_queue_init()
71 q->type = type; in rxe_queue_init()
74 q->elem_size = elem_size; in rxe_queue_init()
81 q->log2_elem_size = order_base_2(elem_size); in rxe_queue_init()
85 q->index_mask = num_slots - 1; in rxe_queue_init()
[all …]
H A Drxe_queue.h18 * - The driver indices are always masked off to q->index_mask
26 * - By passing the type in the parameter list separate from q
83 void rxe_queue_reset(struct rxe_queue *q);
88 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
95 static inline u32 queue_next_index(struct rxe_queue *q, int index) in queue_next_index() argument
97 return (index + 1) & q->index_mask; in queue_next_index()
100 static inline u32 queue_get_producer(const struct rxe_queue *q, in queue_get_producer() argument
108 prod = smp_load_acquire(&q->buf->producer_index); in queue_get_producer()
112 prod = q->index; in queue_get_producer()
116 prod = q->buf->producer_index; in queue_get_producer()
[all …]
/linux/drivers/net/ethernet/pensando/ionic/
H A Dionic_txrx.c15 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
18 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
22 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
25 static void ionic_tx_clean(struct ionic_queue *q,
30 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell) in ionic_txq_post() argument
36 ionic_q_post(q, ring_dbell); in ionic_txq_post()
39 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell) in ionic_rxq_post() argument
41 ionic_q_post(q, ring_dbell); in ionic_rxq_post()
44 bool ionic_txq_poke_doorbell(struct ionic_queue *q) in ionic_txq_poke_doorbell() argument
50 netdev = q->lif->netdev; in ionic_txq_poke_doorbell()
[all …]
/linux/block/
H A Dblk-pm.c10 * @q: the queue of the device
14 * Initialize runtime-PM-related fields for @q and start auto suspend for
17 * request queue @q has been allocated, and runtime PM for it can not happen
29 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) in blk_pm_runtime_init() argument
31 q->dev = dev; in blk_pm_runtime_init()
32 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init()
33 pm_runtime_set_autosuspend_delay(q->dev, -1); in blk_pm_runtime_init()
34 pm_runtime_use_autosuspend(q->dev); in blk_pm_runtime_init()
40 * @q: the queue of the device
59 int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument
[all …]
/linux/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c169 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) in rspq_to_qset() argument
171 return container_of(q, struct sge_qset, rspq); in rspq_to_qset()
174 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument
176 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
182 * @q: the response queue to replenish
189 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument
193 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq()
214 * @q: the Tx queue containing Tx descriptors for the packet
233 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument
237 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb()
[all …]

12345678910>>...87