Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 449) sorted by relevance

12345678910>>...18

/linux/drivers/scsi/fnic/
H A Dvnic_rq.c15 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
18 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
22 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
23 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
30 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
33 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
34 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
36 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
39 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
47 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
H A Dvnic_rq.h93 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
96 return rq->ring.desc_avail; in vnic_rq_desc_avail()
99 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
102 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
105 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
107 return rq->to_use->desc; in vnic_rq_next_desc()
110 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
112 return rq->to_use->index; in vnic_rq_next_index()
115 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
117 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
/linux/kernel/sched/
H A Dsched.h79 struct rq;
104 extern void calc_global_load_tick(struct rq *this_rq);
105 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
107 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
361 extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec);
412 extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
416 extern void fair_server_init(struct rq *rq);
417 extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq);
670 void (*func)(struct rq *rq);
732 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
[all …]
H A Dpelt.h11 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
12 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
13 bool update_other_load_avgs(struct rq *rq);
16 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
18 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg() argument
20 return READ_ONCE(rq->avg_hw.load_avg); in hw_load_avg()
24 update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) in update_hw_load_avg() argument
29 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg() argument
36 int update_irq_load_avg(struct rq *rq, u64 running);
39 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
[all …]
H A Ddeadline.c71 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
73 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
76 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se) in rq_of_dl_se()
78 struct rq *rq = dl_se->rq; in rq_of_dl_se() local
81 rq = task_rq(dl_task_of(dl_se)); in rq_of_dl_se()
83 return rq; in rq_of_dl_se()
183 struct rq *rq = cpu_rq(i); in __dl_update() local
185 rq->dl.extra_bw += bw; in __dl_update()
288 static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw) in dl_rq_change_utilization() argument
291 sub_running_bw(dl_se, &rq->dl); in dl_rq_change_utilization()
[all …]
H A Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
15 if (rq) { in rq_sched_info_arrive()
16 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
17 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
27 if (rq) in rq_sched_info_depart()
28 rq->rq_cpu_time += delta; in rq_sched_info_depart()
32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument
34 if (rq) in rq_sched_info_dequeue()
35 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeue()
[all …]
H A Drt.c178 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
182 return rt_rq->rq; in rq_of_rt_rq()
191 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
196 return rt_rq->rq; in rq_of_rt_se()
230 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
234 rt_rq->rq = rq; in init_tg_rt_entry()
244 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
304 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
306 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
309 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
[all …]
H A Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
21 return sched_stop_runnable(rq); in balance_stop()
25 wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags) in wakeup_preempt_stop() argument
30 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
32 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
35 static struct task_struct *pick_task_stop(struct rq *rq, struct rq_flags *rf) in pick_task_stop() argument
37 if (!sched_stop_runnable(rq)) in pick_task_stop()
40 return rq->stop; in pick_task_stop()
44 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument
46 add_nr_running(rq, 1); in enqueue_task_stop()
[all …]
H A Dcore.c123 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
299 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
304 rq->core->core_task_seq++; in sched_core_enqueue()
309 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
312 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument
317 rq->core->core_task_seq++; in sched_core_dequeue()
320 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
329 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
330 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
331 resched_curr(rq); in sched_core_dequeue()
[all …]
H A Dpelt.c347 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
349 if (___update_load_sum(now, &rq->avg_rt, in update_rt_rq_load_avg()
354 ___update_load_avg(&rq->avg_rt, 1); in update_rt_rq_load_avg()
355 trace_pelt_rt_tp(rq); in update_rt_rq_load_avg()
373 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
375 if (___update_load_sum(now, &rq->avg_dl, in update_dl_rq_load_avg()
380 ___update_load_avg(&rq->avg_dl, 1); in update_dl_rq_load_avg()
381 trace_pelt_dl_tp(rq); in update_dl_rq_load_avg()
404 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) in update_hw_load_avg() argument
406 if (___update_load_sum(now, &rq->avg_hw, in update_hw_load_avg()
[all …]
H A Dext.h11 void scx_tick(struct rq *rq);
17 bool scx_can_stop_tick(struct rq *rq);
18 void scx_rq_activate(struct rq *rq);
19 void scx_rq_deactivate(struct rq *rq);
45 static inline void scx_tick(struct rq *rq) {} in scx_tick() argument
51 static inline bool scx_can_stop_tick(struct rq *rq) { return true; } in scx_can_stop_tick() argument
52 static inline void scx_rq_activate(struct rq *rq) {} in scx_rq_activate() argument
53 static inline void scx_rq_deactivate(struct rq *rq) {} in scx_rq_deactivate() argument
62 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify);
64 static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) in scx_update_idle() argument
[all …]
/linux/drivers/net/ethernet/cisco/enic/
H A Dvnic_rq.c18 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
21 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
25 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
26 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
31 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
34 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
35 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
37 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
40 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
48 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
H A Dvnic_rq.h86 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
89 return rq->ring.desc_avail; in vnic_rq_desc_avail()
92 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
95 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
98 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
100 return rq->to_use->desc; in vnic_rq_next_desc()
103 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
105 return rq->to_use->index; in vnic_rq_next_index()
108 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
113 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_rx.c65 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
69 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
72 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
74 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
90 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq, in mlx5e_read_enhanced_title_slot() argument
93 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_enhanced_title_slot()
98 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state))) in mlx5e_read_enhanced_title_slot()
101 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) in mlx5e_read_enhanced_title_slot()
106 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1); in mlx5e_read_enhanced_title_slot()
[all …]
/linux/drivers/scsi/esas2r/
H A Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
[all …]
H A Desas2r_int.c173 struct esas2r_request *rq, in esas2r_handle_outbound_rsp_err() argument
181 if (unlikely(rq->req_stat != RS_SUCCESS)) { in esas2r_handle_outbound_rsp_err()
182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); in esas2r_handle_outbound_rsp_err()
184 if (rq->req_stat == RS_ABORTED) { in esas2r_handle_outbound_rsp_err()
185 if (rq->timeout > RQ_MAX_TIMEOUT) in esas2r_handle_outbound_rsp_err()
186 rq->req_stat = RS_TIMEOUT; in esas2r_handle_outbound_rsp_err()
187 } else if (rq->req_stat == RS_SCSI_ERROR) { in esas2r_handle_outbound_rsp_err()
188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; in esas2r_handle_outbound_rsp_err()
197 rq->req_stat = RS_SUCCESS; in esas2r_handle_outbound_rsp_err()
198 rq->func_rsp.scsi_rsp.scsi_stat = in esas2r_handle_outbound_rsp_err()
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dgen6_engine_cs.c55 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument
58 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
72 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
74 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
84 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
89 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument
92 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
98 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs()
130 cs = intel_ring_begin(rq, 4); in gen6_emit_flush_rcs()
[all …]
H A Dselftest_timeline.c454 static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) in emit_ggtt_store_dw() argument
458 cs = intel_ring_begin(rq, 4); in emit_ggtt_store_dw()
462 if (GRAPHICS_VER(rq->i915) >= 8) { in emit_ggtt_store_dw()
467 } else if (GRAPHICS_VER(rq->i915) >= 4) { in emit_ggtt_store_dw()
479 intel_ring_advance(rq, cs); in emit_ggtt_store_dw()
487 struct i915_request *rq; in checked_tl_write() local
492 rq = ERR_PTR(err); in checked_tl_write()
503 rq = intel_engine_create_kernel_request(engine); in checked_tl_write()
504 if (IS_ERR(rq)) in checked_tl_write()
507 i915_request_get(rq); in checked_tl_write()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Drx.c19 int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) in mlx5e_xsk_alloc_rx_mpwqe() argument
21 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); in mlx5e_xsk_alloc_rx_mpwqe()
22 struct mlx5e_icosq *icosq = rq->icosq; in mlx5e_xsk_alloc_rx_mpwqe()
30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe()
35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs, in mlx5e_xsk_alloc_rx_mpwqe()
36 rq->mpwqe.pages_per_wqe); in mlx5e_xsk_alloc_rx_mpwqe()
44 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { in mlx5e_xsk_alloc_rx_mpwqe()
45 xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe()
50 pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs); in mlx5e_xsk_alloc_rx_mpwqe()
52 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_xsk_alloc_rx_mpwqe()
[all …]
/linux/block/
H A Dblk-flush.c103 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
105 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
108 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
115 rq->bio = rq->biotail; in blk_flush_restore_request()
116 if (rq->bio) in blk_flush_restore_request()
117 rq->__sector = rq->bio->bi_iter.bi_sector; in blk_flush_restore_request()
120 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
121 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
124 static void blk_account_io_flush(struct request *rq) in blk_account_io_flush() argument
126 struct block_device *part = rq->q->disk->part0; in blk_account_io_flush()
[all …]
H A Dblk-mq.c43 #include "blk-rq-qos.h"
49 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
50 static void blk_mq_request_bypass_insert(struct request *rq,
93 static bool blk_mq_check_in_driver(struct request *rq, void *priv) in blk_mq_check_in_driver() argument
97 if (rq->rq_flags & RQF_IO_STAT && in blk_mq_check_in_driver()
98 (!bdev_is_partition(mi->part) || rq->part == mi->part) && in blk_mq_check_in_driver()
99 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) in blk_mq_check_in_driver()
100 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_in_driver()
373 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
375 memset(rq, in blk_rq_init()
391 blk_mq_rq_time_init(struct request * rq,u64 alloc_time_ns) blk_mq_rq_time_init() argument
416 struct request *rq = tags->static_rqs[tag]; blk_mq_rq_ctx_init() local
468 struct request *rq; __blk_mq_alloc_requests_batch() local
505 struct request *rq; __blk_mq_alloc_requests() local
601 struct request *rq; blk_mq_rq_cache_fill() local
619 struct request *rq; blk_mq_alloc_cached_request() local
652 struct request *rq; blk_mq_alloc_request() local
703 struct request *rq; blk_mq_alloc_request_hctx() local
770 blk_mq_finish_request(struct request * rq) blk_mq_finish_request() argument
787 __blk_mq_free_request(struct request * rq) __blk_mq_free_request() argument
808 blk_mq_free_request(struct request * rq) blk_mq_free_request() argument
827 struct request *rq; blk_mq_free_plug_rqs() local
833 blk_dump_rq_flags(struct request * rq,char * msg) blk_dump_rq_flags() argument
1141 __blk_mq_end_request_acct(struct request * rq,u64 now) __blk_mq_end_request_acct() argument
1150 __blk_mq_end_request(struct request * rq,blk_status_t error) __blk_mq_end_request() argument
1167 blk_mq_end_request(struct request * rq,blk_status_t error) blk_mq_end_request() argument
1192 struct request *rq; blk_mq_end_request_batch() local
1241 struct request *rq, *next; blk_complete_reqs() local
1263 blk_mq_complete_need_ipi(struct request * rq) blk_mq_complete_need_ipi() argument
1290 blk_mq_complete_send_ipi(struct request * rq) blk_mq_complete_send_ipi() argument
1299 blk_mq_raise_softirq(struct request * rq) blk_mq_raise_softirq() argument
1310 blk_mq_complete_request_remote(struct request * rq) blk_mq_complete_request_remote() argument
1344 blk_mq_complete_request(struct request * rq) blk_mq_complete_request() argument
1359 blk_mq_start_request(struct request * rq) blk_mq_start_request() argument
1399 blk_add_rq_to_plug(struct blk_plug * plug,struct request * rq) blk_add_rq_to_plug() argument
1437 blk_execute_rq_nowait(struct request * rq,bool at_head) blk_execute_rq_nowait() argument
1461 blk_end_sync_rq(struct request * rq,blk_status_t ret) blk_end_sync_rq() argument
1470 blk_rq_is_poll(struct request * rq) blk_rq_is_poll() argument
1480 blk_rq_poll_completion(struct request * rq,struct completion * wait) blk_rq_poll_completion() argument
1498 blk_execute_rq(struct request * rq,bool at_head) blk_execute_rq() argument
1524 __blk_mq_requeue_request(struct request * rq) __blk_mq_requeue_request() argument
1539 blk_mq_requeue_request(struct request * rq,bool kick_requeue_list) blk_mq_requeue_request() argument
1564 struct request *rq; blk_mq_requeue_work() local
1609 blk_is_flush_data_rq(struct request * rq) blk_is_flush_data_rq() argument
1614 blk_mq_rq_inflight(struct request * rq,void * priv) blk_mq_rq_inflight() argument
1668 blk_mq_req_expired(struct request * rq,struct blk_expired_data * expired) blk_mq_req_expired() argument
1688 blk_mq_put_rq_ref(struct request * rq) blk_mq_put_rq_ref() argument
1698 blk_mq_check_expired(struct request * rq,void * priv) blk_mq_check_expired() argument
1716 blk_mq_handle_expired(struct request * rq,void * priv) blk_mq_handle_expired() argument
1819 struct request *rq; global() member
1857 __blk_mq_alloc_driver_tag(struct request * rq) __blk_mq_alloc_driver_tag() argument
1910 blk_mq_mark_tag_wait(struct blk_mq_hw_ctx * hctx,struct request * rq) blk_mq_mark_tag_wait() argument
2022 blk_mq_handle_dev_resource(struct request * rq,struct list_head * list) blk_mq_handle_dev_resource() argument
2035 blk_mq_prep_dispatch_rq(struct request * rq,bool need_budget) blk_mq_prep_dispatch_rq() argument
2076 struct request *rq; blk_mq_release_budgets() local
2111 struct request *rq; blk_mq_dispatch_rq_list() local
2564 blk_mq_request_bypass_insert(struct request * rq,blk_insert_t flags) blk_mq_request_bypass_insert() argument
2580 struct request *rq; blk_mq_insert_requests() local
2613 blk_mq_insert_request(struct request * rq,blk_insert_t flags) blk_mq_insert_request() argument
2675 blk_mq_bio_to_request(struct request * rq,struct bio * bio,unsigned int nr_segs) blk_mq_bio_to_request() argument
2701 __blk_mq_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,bool last) __blk_mq_issue_directly() argument
2733 blk_mq_get_budget_and_tag(struct request * rq) blk_mq_get_budget_and_tag() argument
2759 blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq) blk_mq_try_issue_directly() argument
2790 blk_mq_request_issue_directly(struct request * rq,bool last) blk_mq_request_issue_directly() argument
2808 struct request *rq; blk_mq_issue_direct() local
2854 struct request *rq = rq_list_pop(rqs); blk_mq_extract_queue_requests() local
2911 struct request *rq = rq_list_pop(rqs); blk_mq_dispatch_list() local
2998 struct request *rq = list_first_entry(list, struct request, blk_mq_try_issue_list_directly() local
3051 struct request *rq; blk_mq_get_new_requests() local
3074 struct request *rq; blk_mq_peek_cached_request() local
3089 blk_mq_use_cached_rq(struct request * rq,struct blk_plug * plug,struct bio * bio) blk_mq_use_cached_rq() argument
3138 struct request *rq; blk_mq_submit_bio() local
3261 blk_insert_cloned_request(struct request * rq) blk_insert_cloned_request() argument
3327 blk_rq_unprep_clone(struct request * rq) blk_rq_unprep_clone() argument
3356 blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data) blk_rq_prep_clone() argument
3413 blk_steal_bios(struct bio_list * list,struct request * rq) blk_steal_bios() argument
3454 struct request *rq = drv_tags->rqs[i]; blk_mq_clear_rq_mapping() local
3482 struct request *rq = tags->static_rqs[i]; blk_mq_free_rqs() local
3571 blk_mq_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,int node) blk_mq_init_request() argument
3642 struct request *rq = p; blk_mq_alloc_rqs() local
3666 blk_mq_has_request(struct request * rq,void * data) blk_mq_has_request() argument
5219 blk_rq_poll(struct request * rq,struct io_comp_batch * iob,unsigned int poll_flags) blk_rq_poll() argument
5237 blk_mq_rq_cpu(struct request * rq) blk_mq_rq_cpu() argument
[all...]
H A Dblk-crypto-internal.h32 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
58 static inline void blk_crypto_rq_set_defaults(struct request *rq) in blk_crypto_rq_set_defaults() argument
60 rq->crypt_ctx = NULL; in blk_crypto_rq_set_defaults()
61 rq->crypt_keyslot = NULL; in blk_crypto_rq_set_defaults()
64 static inline bool blk_crypto_rq_is_encrypted(struct request *rq) in blk_crypto_rq_is_encrypted() argument
66 return rq->crypt_ctx; in blk_crypto_rq_is_encrypted()
69 static inline bool blk_crypto_rq_has_keyslot(struct request *rq) in blk_crypto_rq_has_keyslot() argument
71 return rq->crypt_keyslot; in blk_crypto_rq_has_keyslot()
100 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, in bio_crypt_rq_ctx_compatible() argument
124 static inline void blk_crypto_rq_set_defaults(struct request *rq) { } in blk_crypto_rq_set_defaults() argument
[all …]
/linux/include/linux/
H A Dblk-mq.h232 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough() argument
234 return blk_op_is_passthrough(rq->cmd_flags); in blk_rq_is_passthrough()
244 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) argument
246 #define rq_dma_dir(rq) \ argument
247 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
260 static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq) in rq_list_add_tail() argument
262 rq->rq_next = NULL; in rq_list_add_tail()
264 rl->tail->rq_next = rq; in rq_list_add_tail()
266 rl->head = rq; in rq_list_add_tail()
267 rl->tail = rq; in rq_list_add_tail()
[all …]
/linux/drivers/gpu/drm/i915/selftests/
H A Di915_perf.c161 static int write_timestamp(struct i915_request *rq, int slot) in write_timestamp() argument
166 cs = intel_ring_begin(rq, 6); in write_timestamp()
171 if (GRAPHICS_VER(rq->i915) >= 8) in write_timestamp()
183 intel_ring_advance(rq, cs); in write_timestamp()
188 static ktime_t poll_status(struct i915_request *rq, int slot) in poll_status() argument
190 while (!intel_read_status_page(rq->engine, slot) && in poll_status()
191 !i915_request_completed(rq)) in poll_status()
201 struct i915_request *rq; in live_noa_delay() local
224 rq = intel_engine_create_kernel_request(stream->engine); in live_noa_delay()
225 if (IS_ERR(rq)) { in live_noa_delay()
[all …]
/linux/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.c62 #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) argument
155 struct hinic_rq *rq, u16 global_qid) in hinic_rq_prepare_ctxt() argument
162 wq = rq->wq; in hinic_rq_prepare_ctxt()
181 HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR); in hinic_rq_prepare_ctxt()
202 rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); in hinic_rq_prepare_ctxt()
203 rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); in hinic_rq_prepare_ctxt()
247 static int alloc_rq_skb_arr(struct hinic_rq *rq) in alloc_rq_skb_arr() argument
249 struct hinic_wq *wq = rq->wq; in alloc_rq_skb_arr()
252 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr()
253 rq->saved_skb = vzalloc(skb_arr_size); in alloc_rq_skb_arr()
[all …]

12345678910>>...18