Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 591) sorted by relevance

12345678910>>...24

/linux/drivers/gpu/drm/i915/
H A Di915_request.c114 struct i915_request *rq = to_request(fence); in i915_fence_release() local
116 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && in i915_fence_release()
117 rq->guc_prio != GUC_PRIO_FINI); in i915_fence_release()
119 i915_request_free_capture_list(fetch_and_zero(&rq->capture_list)); in i915_fence_release()
120 if (rq->batch_res) { in i915_fence_release()
121 i915_vma_resource_put(rq->batch_res); in i915_fence_release()
122 rq->batch_res = NULL; in i915_fence_release()
132 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
133 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
166 if (is_power_of_2(rq->execution_mask) && in i915_fence_release()
[all …]
H A Di915_request.h64 #define RQ_TRACE(rq, fmt, ...) do { \ argument
65 const struct i915_request *rq__ = (rq); \
378 void __i915_request_skip(struct i915_request *rq);
379 bool i915_request_set_error_once(struct i915_request *rq, int error);
380 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
383 void __i915_request_queue(struct i915_request *rq,
385 void __i915_request_queue_bh(struct i915_request *rq);
387 bool i915_request_retire(struct i915_request *rq);
388 void i915_request_retire_upto(struct i915_request *rq);
400 i915_request_get(struct i915_request *rq) in i915_request_get() argument
[all …]
/linux/kernel/sched/
H A Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
9 bool update_other_load_avgs(struct rq *rq);
12 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
14 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg()
13 hw_load_avg(struct rq * rq) hw_load_avg() argument
19 update_hw_load_avg(u64 now,struct rq * rq,u64 capacity) update_hw_load_avg() argument
24 hw_load_avg(struct rq * rq) hw_load_avg() argument
34 update_irq_load_avg(struct rq * rq,u64 running) update_irq_load_avg() argument
64 rq_clock_pelt(struct rq * rq) rq_clock_pelt() argument
73 _update_idle_rq_clock_pelt(struct rq * rq) _update_idle_rq_clock_pelt() argument
95 update_rq_clock_pelt(struct rq * rq,s64 delta) update_rq_clock_pelt() argument
133 update_idle_rq_clock_pelt(struct rq * rq) update_idle_rq_clock_pelt() argument
193 update_rt_rq_load_avg(u64 now,struct rq * rq,int running) update_rt_rq_load_avg() argument
199 update_dl_rq_load_avg(u64 now,struct rq * rq,int running) update_dl_rq_load_avg() argument
205 update_hw_load_avg(u64 now,struct rq * rq,u64 capacity) update_hw_load_avg() argument
210 hw_load_avg(struct rq * rq) hw_load_avg() argument
216 update_irq_load_avg(struct rq * rq,u64 running) update_irq_load_avg() argument
221 rq_clock_pelt(struct rq * rq) rq_clock_pelt() argument
227 update_rq_clock_pelt(struct rq * rq,s64 delta) update_rq_clock_pelt() argument
230 update_idle_rq_clock_pelt(struct rq * rq) update_idle_rq_clock_pelt() argument
[all...]
H A Dsched.h78 struct rq;
109 extern void calc_global_load_tick(struct rq *this_rq);
110 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
112 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
319 /* nests inside the rq lock: */
366 extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec);
371 * dl_se::rq -- runqueue we belong to.
390 extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *r
661 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ global() member
738 struct rq *rq; global() member
1011 struct rq { global() struct
1013 __lockrq global() argument
1038 uclamprq global() argument
1043 cfsrq global() argument
1044 rtrq global() argument
1045 dlrq global() argument
1049 leaf_cfs_rq_listrq global() argument
1050 tmp_alone_branchrq global() argument
1059 nr_uninterruptiblerq global() argument
1061 currrq global() argument
1062 idlerq global() argument
1063 stoprq global() argument
1064 next_balancerq global() argument
1065 prev_mmrq global() argument
1067 clock_update_flagsrq global() argument
1068 clockrq global() argument
1070 ____cacheline_alignedrq global() argument
1071 clock_peltrq global() argument
1072 lost_idle_timerq global() argument
1073 clock_pelt_idlerq global() argument
1074 clock_idlerq global() argument
1076 clock_pelt_idle_copyrq global() argument
1077 clock_idle_copyrq global() argument
1080 nr_iowaitrq global() argument
1083 last_seen_need_resched_nsrq global() argument
1084 ticks_without_reschedrq global() argument
1088 membarrier_staterq global() argument
1092 rdrq global() argument
1093 sdrq global() argument
1118 avg_irqrq global() argument
1135 prev_irq_timerq global() argument
1136 psi_irq_timerq global() argument
1142 prev_steal_time_rqrq global() argument
1159 rq_sched_inforq global() argument
1160 rq_cpu_timerq global() argument
1161 hrtick_timerq global() argument
1187 push_workrq global() argument
1188 corerq global() argument
1204 core_treerq global() argument
1216 cfsb_csd_listrq rq_of() argument
1229 cpu_of(struct rq * rq) cpu_of() argument
1262 sched_core_enabled(struct rq * rq) sched_core_enabled() argument
1276 rq_lockp(struct rq * rq) rq_lockp() argument
1284 __rq_lockp(struct rq * rq) __rq_lockp() argument
1303 sched_cpu_cookie_match(struct rq * rq,struct task_struct * p) sched_cpu_cookie_match() argument
1312 sched_core_cookie_match(struct rq * rq,struct task_struct * p) sched_core_cookie_match() argument
1335 sched_group_cookie_match(struct rq * rq,struct task_struct * p,struct sched_group * group) sched_group_cookie_match() argument
1365 sched_core_enabled(struct rq * rq) sched_core_enabled() argument
1375 rq_lockp(struct rq * rq) rq_lockp() argument
1380 __rq_lockp(struct rq * rq) __rq_lockp() argument
1385 sched_cpu_cookie_match(struct rq * rq,struct task_struct * p) sched_cpu_cookie_match() argument
1390 sched_core_cookie_match(struct rq * rq,struct task_struct * p) sched_core_cookie_match() argument
1395 sched_group_cookie_match(struct rq * rq,struct task_struct * p,struct sched_group * group) sched_group_cookie_match() argument
1404 lockdep_assert_rq_held(struct rq * rq) lockdep_assert_rq_held() argument
1413 raw_spin_rq_lock(struct rq * rq) raw_spin_rq_lock() argument
1418 raw_spin_rq_lock_irq(struct rq * rq) raw_spin_rq_lock_irq() argument
1424 raw_spin_rq_unlock_irq(struct rq * rq) raw_spin_rq_unlock_irq() argument
1430 _raw_spin_rq_lock_irqsave(struct rq * rq) _raw_spin_rq_lock_irqsave() argument
1440 raw_spin_rq_unlock_irqrestore(struct rq * rq,unsigned long flags) raw_spin_rq_unlock_irqrestore() argument
1446 raw_spin_rq_lock_irqsave(rq,flags) global() argument
1454 update_idle_core(struct rq * rq) update_idle_core() argument
1461 update_idle_core(struct rq * rq) update_idle_core() argument
1501 struct rq *rq = task_rq(p); cfs_rq_of() local
1543 assert_clock_updated(struct rq * rq) assert_clock_updated() argument
1552 rq_clock(struct rq * rq) rq_clock() argument
1560 rq_clock_task(struct rq * rq) rq_clock_task() argument
1568 rq_clock_skip_update(struct rq * rq) rq_clock_skip_update() argument
1578 rq_clock_cancel_skipupdate(struct rq * rq) rq_clock_cancel_skipupdate() argument
1593 rq_clock_start_loop_update(struct rq * rq) rq_clock_start_loop_update() argument
1600 rq_clock_stop_loop_update(struct rq * rq) rq_clock_stop_loop_update() argument
1631 rq_pin_lock(struct rq * rq,struct rq_flags * rf) rq_pin_lock() argument
1644 rq_unpin_lock(struct rq * rq,struct rq_flags * rf) rq_unpin_lock() argument
1654 rq_repin_lock(struct rq * rq,struct rq_flags * rf) rq_repin_lock() argument
1675 __task_rq_unlock(struct rq * rq,struct rq_flags * rf) __task_rq_unlock() argument
1683 task_rq_unlock(struct rq * rq,struct task_struct * p,struct rq_flags * rf) task_rq_unlock() argument
1697 rq_lock_irqsave(struct rq * rq,struct rq_flags * rf) rq_lock_irqsave() argument
1704 rq_lock_irq(struct rq * rq,struct rq_flags * rf) rq_lock_irq() argument
1711 rq_lock(struct rq * rq,struct rq_flags * rf) rq_lock() argument
1718 rq_unlock_irqrestore(struct rq * rq,struct rq_flags * rf) rq_unlock_irqrestore() argument
1725 rq_unlock_irq(struct rq * rq,struct rq_flags * rf) rq_unlock_irq() argument
1732 rq_unlock(struct rq * rq,struct rq_flags * rf) rq_unlock() argument
1757 struct rq *rq; this_rq_lock_irq() local
1825 queue_balance_callback(struct rq * rq,struct balance_callback * head,void (* func)(struct rq * rq)) queue_balance_callback() argument
1827 queue_balance_callback(struct rq * rq,struct balance_callback * head,void (* func)(struct rq * rq)) queue_balance_callback() argument
2004 sched_core_account_forceidle(struct rq * rq) sched_core_account_forceidle() argument
2012 sched_core_tick(struct rq * rq) sched_core_tick() argument
2020 sched_core_account_forceidle(struct rq * rq) sched_core_account_forceidle() argument
2022 sched_core_tick(struct rq * rq) sched_core_tick() argument
2173 task_current(struct rq * rq,struct task_struct * p) task_current() argument
2178 task_on_cpu(struct rq * rq,struct task_struct * p) task_on_cpu() argument
2345 put_prev_task(struct rq * rq,struct task_struct * prev) put_prev_task() argument
2351 set_next_task(struct rq * rq,struct task_struct * next) set_next_task() argument
2390 sched_stop_runnable(struct rq * rq) sched_stop_runnable() argument
2395 sched_dl_runnable(struct rq * rq) sched_dl_runnable() argument
2400 sched_rt_runnable(struct rq * rq) sched_rt_runnable() argument
2405 sched_fair_runnable(struct rq * rq) sched_fair_runnable() argument
2437 get_push_task(struct rq * rq) get_push_task() argument
2475 idle_set_state(struct rq * rq,struct cpuidle_state * idle_state) idle_set_state() argument
2481 idle_get_state(struct rq * rq) idle_get_state() argument
2490 idle_set_state(struct rq * rq,struct cpuidle_state * idle_state) idle_set_state() argument
2495 idle_get_state(struct rq * rq) idle_get_state() argument
2544 sched_update_tick_dependency(struct rq * rq) sched_update_tick_dependency() argument
2558 sched_update_tick_dependency(struct rq * rq) sched_update_tick_dependency() argument
2561 add_nr_running(struct rq * rq,unsigned count) add_nr_running() argument
2578 sub_nr_running(struct rq * rq,unsigned count) sub_nr_running() argument
2625 hrtick_enabled(struct rq * rq) hrtick_enabled() argument
2632 hrtick_enabled_fair(struct rq * rq) hrtick_enabled_fair() argument
2639 hrtick_enabled_dl(struct rq * rq) hrtick_enabled_dl() argument
2650 hrtick_enabled_fair(struct rq * rq) hrtick_enabled_fair() argument
2655 hrtick_enabled_dl(struct rq * rq) hrtick_enabled_dl() argument
2660 hrtick_enabled(struct rq * rq) hrtick_enabled() argument
2969 nohz_balance_exit_idle(struct rq * rq) nohz_balance_exit_idle() argument
3036 cpufreq_update_util(struct rq * rq,unsigned int flags) cpufreq_update_util() argument
3046 cpufreq_update_util(struct rq * rq,unsigned int flags) cpufreq_update_util() argument
3083 cpu_bw_dl(struct rq * rq) cpu_bw_dl() argument
3088 cpu_util_dl(struct rq * rq) cpu_util_dl() argument
3097 cpu_util_rt(struct rq * rq) cpu_util_rt() argument
3108 uclamp_rq_get(struct rq * rq,enum uclamp_id clamp_id) uclamp_rq_get() argument
3114 uclamp_rq_set(struct rq * rq,enum uclamp_id clamp_id,unsigned int value) uclamp_rq_set() argument
3120 uclamp_rq_is_idle(struct rq * rq) uclamp_rq_is_idle() argument
3126 uclamp_rq_is_capped(struct rq * rq) uclamp_rq_is_capped() argument
3193 uclamp_rq_is_capped(struct rq * rq) uclamp_rq_is_capped() argument
3201 uclamp_rq_get(struct rq * rq,enum uclamp_id clamp_id) uclamp_rq_get() argument
3210 uclamp_rq_set(struct rq * rq,enum uclamp_id clamp_id,unsigned int value) uclamp_rq_set() argument
3214 uclamp_rq_is_idle(struct rq * rq) uclamp_rq_is_idle() argument
3223 cpu_util_irq(struct rq * rq) cpu_util_irq() argument
3240 cpu_util_irq(struct rq * rq) cpu_util_irq() argument
3282 membarrier_switch_mm(struct rq * rq,struct mm_struct * prev_mm,struct mm_struct * next_mm) membarrier_switch_mm() argument
3300 membarrier_switch_mm(struct rq * rq,struct mm_struct * prev_mm,struct mm_struct * next_mm) membarrier_switch_mm() argument
3433 mm_cid_snapshot_time(struct rq * rq,struct mm_struct * mm) mm_cid_snapshot_time() argument
3441 __mm_cid_get(struct rq * rq,struct mm_struct * mm) __mm_cid_get() argument
3494 mm_cid_get(struct rq * rq,struct mm_struct * mm) mm_cid_get() argument
3517 switch_mm_cid(struct rq * rq,struct task_struct * prev,struct task_struct * next) switch_mm_cid() argument
3569 switch_mm_cid(struct rq * rq,struct task_struct * prev,struct task_struct * next) switch_mm_cid() argument
3572 task_tick_mm_cid(struct rq * rq,struct task_struct * curr) task_tick_mm_cid() argument
3621 splice_balance_callbacks(struct rq * rq) splice_balance_callbacks() argument
3626 balance_callbacks(struct rq * rq,struct balance_callback * head) balance_callbacks() argument
[all...]
H A Ddeadline.c67 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
69 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
72 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se) in rq_of_dl_se()
74 struct rq *rq = dl_se->rq; in rq_of_dl_se() local
77 rq = task_rq(dl_task_of(dl_se)); in rq_of_dl_se()
79 return rq; in rq_of_dl_se()
153 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
189 struct rq *r in __dl_update() local
325 struct rq *rq; dl_change_utilization() local
409 struct rq *rq = rq_of_dl_se(dl_se); task_non_contending() local
547 dl_overloaded(struct rq * rq) dl_overloaded() argument
552 dl_set_overload(struct rq * rq) dl_set_overload() argument
568 dl_clear_overload(struct rq * rq) dl_clear_overload() argument
585 has_pushable_dl_tasks(struct rq * rq) has_pushable_dl_tasks() argument
594 enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p) enqueue_pushable_dl_task() argument
612 dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p) dequeue_pushable_dl_task() argument
635 need_pull_dl_task(struct rq * rq,struct task_struct * prev) need_pull_dl_task() argument
646 deadline_queue_push_tasks(struct rq * rq) deadline_queue_push_tasks() argument
654 deadline_queue_pull_task(struct rq * rq) deadline_queue_pull_task() argument
661 dl_task_offline_migration(struct rq * rq,struct task_struct * p) dl_task_offline_migration() argument
734 enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p) enqueue_pushable_dl_task() argument
739 dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p) dequeue_pushable_dl_task() argument
753 deadline_queue_push_tasks(struct rq * rq) deadline_queue_push_tasks() argument
757 deadline_queue_pull_task(struct rq * rq) deadline_queue_pull_task() argument
769 replenish_dl_new_period(struct sched_dl_entity * dl_se,struct rq * rq) replenish_dl_new_period() argument
791 struct rq *rq = rq_of_dl_rq(dl_rq); setup_new_dl_entity() local
833 struct rq *rq = rq_of_dl_rq(dl_rq); replenish_dl_entity() local
950 update_dl_revised_wakeup(struct sched_dl_entity * dl_se,struct rq * rq) update_dl_revised_wakeup() argument
1013 struct rq *rq = rq_of_dl_se(dl_se); update_dl_entity() local
1048 struct rq *rq = rq_of_dl_rq(dl_rq); start_dl_timer() local
1090 __push_dl_task(struct rq * rq,struct rq_flags * rf) __push_dl_task() argument
1129 struct rq *rq; dl_task_timer() local
1132 struct rq *rq = rq_of_dl_se(dl_se); dl_task_timer() local
1267 struct rq *rq = rq_of_dl_se(dl_se); dl_check_constrained_dl() local
1302 grub_reclaim(u64 delta,struct rq * rq,struct sched_dl_entity * dl_se) grub_reclaim() argument
1325 update_curr_dl_se(struct rq * rq,struct sched_dl_entity * dl_se,s64 delta_exec) update_curr_dl_se() argument
1429 dl_server_init(struct sched_dl_entity * dl_se,struct rq * rq,dl_server_has_tasks_f has_tasks,dl_server_pick_f pick) dl_server_init() argument
1442 update_curr_dl(struct rq * rq) update_curr_dl() argument
1470 struct rq *rq; inactive_task_timer() local
1536 struct rq *rq = rq_of_dl_rq(dl_rq); inc_dl_deadline() local
1549 struct rq *rq = rq_of_dl_rq(dl_rq); dec_dl_deadline() local
1787 enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags) enqueue_task_dl() argument
1849 dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags) dequeue_task_dl() argument
1871 yield_task_dl(struct rq * rq) yield_task_dl() argument
1894 dl_task_is_earliest_deadline(struct task_struct * p,struct rq * rq) dl_task_is_earliest_deadline() argument
1908 struct rq *rq; select_task_rq_dl() local
1955 struct rq *rq; migrate_task_rq_dl() local
1985 check_preempt_equal_dl(struct rq * rq,struct task_struct * p) check_preempt_equal_dl() argument
2006 balance_dl(struct rq * rq,struct task_struct * p,struct rq_flags * rf) balance_dl() argument
2028 wakeup_preempt_dl(struct rq * rq,struct task_struct * p,int flags) wakeup_preempt_dl() argument
2048 start_hrtick_dl(struct rq * rq,struct sched_dl_entity * dl_se) start_hrtick_dl() argument
2053 start_hrtick_dl(struct rq * rq,struct sched_dl_entity * dl_se) start_hrtick_dl() argument
2058 set_next_task_dl(struct rq * rq,struct task_struct * p,bool first) set_next_task_dl() argument
2089 pick_task_dl(struct rq * rq) pick_task_dl() argument
2118 pick_next_task_dl(struct rq * rq) pick_next_task_dl() argument
2135 put_prev_task_dl(struct rq * rq,struct task_struct * p) put_prev_task_dl() argument
2158 task_tick_dl(struct rq * rq,struct task_struct * p,int queued) task_tick_dl() argument
2186 pick_dl_task(struct rq * rq,struct task_struct * p,int cpu) pick_dl_task() argument
2198 pick_earliest_pushable_dl_task(struct rq * rq,int cpu) pick_earliest_pushable_dl_task() argument
2312 find_lock_later_rq(struct task_struct * task,struct rq * rq) find_lock_later_rq() argument
2366 pick_next_pushable_dl_task(struct rq * rq) pick_next_pushable_dl_task() argument
2390 push_dl_task(struct rq * rq) push_dl_task() argument
2465 push_dl_tasks(struct rq * rq) push_dl_tasks() argument
2568 task_woken_dl(struct rq * rq,struct task_struct * p) task_woken_dl() argument
2584 struct rq *rq; set_cpus_allowed_dl() local
2614 rq_online_dl(struct rq * rq) rq_online_dl() argument
2625 rq_offline_dl(struct rq * rq) rq_offline_dl() argument
2646 struct rq *rq; dl_add_task_root_domain() local
2678 switched_from_dl(struct rq * rq,struct task_struct * p) switched_from_dl() argument
2732 switched_to_dl(struct rq * rq,struct task_struct * p) switched_to_dl() argument
2768 prio_changed_dl(struct rq * rq,struct task_struct * p,int oldprio) prio_changed_dl() argument
[all...]
H A Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
15 if (rq) { in rq_sched_info_arrive()
16 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
17 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
27 if (rq) in rq_sched_info_depart()
28 rq->rq_cpu_time += delta; in rq_sched_info_depart()
32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument
34 if (rq) in rq_sched_info_dequeue()
35 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeue()
[all …]
H A Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
21 return sched_stop_runnable(rq); in balance_stop()
26 wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags) in wakeup_preempt_stop() argument
31 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
33 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
36 static struct task_struct *pick_task_stop(struct rq *rq) in pick_task_stop() argument
44 pick_next_task_stop(struct rq * rq) pick_next_task_stop() argument
55 enqueue_task_stop(struct rq * rq,struct task_struct * p,int flags) enqueue_task_stop() argument
61 dequeue_task_stop(struct rq * rq,struct task_struct * p,int flags) dequeue_task_stop() argument
66 yield_task_stop(struct rq * rq) yield_task_stop() argument
71 put_prev_task_stop(struct rq * rq,struct task_struct * prev) put_prev_task_stop() argument
84 task_tick_stop(struct rq * rq,struct task_struct * curr,int queued) task_tick_stop() argument
88 switched_to_stop(struct rq * rq,struct task_struct * p) switched_to_stop() argument
94 prio_changed_stop(struct rq * rq,struct task_struct * p,int oldprio) prio_changed_stop() argument
99 update_curr_stop(struct rq * rq) update_curr_stop() argument
[all...]
H A Dcore.c120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
268 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
273 rq->core->core_task_seq++; in sched_core_enqueue()
278 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
281 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument
286 rq->core->core_task_seq++; in sched_core_dequeue()
289 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
298 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
299 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
300 resched_curr(rq); in sched_core_dequeue()
[all …]
H A Drt.c178 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
180 return rt_rq->rq; in rq_of_rt_rq()
188 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
192 return rt_rq->rq; in rq_of_rt_se()
220 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
224 rt_rq->rq = rq; in init_tg_rt_entry()
234 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
291 static inline struct rq *rq_of_rt_r in rt_task_of()
306 struct rq *rq = rq_of_rt_se(rt_se); rt_rq_of_se() local
323 need_pull_rt_task(struct rq * rq,struct task_struct * prev) need_pull_rt_task() argument
329 rt_overloaded(struct rq * rq) rt_overloaded() argument
334 rt_set_overload(struct rq * rq) rt_set_overload() argument
353 rt_clear_overload(struct rq * rq) rt_clear_overload() argument
363 has_pushable_tasks(struct rq * rq) has_pushable_tasks() argument
374 rt_queue_push_tasks(struct rq * rq) rt_queue_push_tasks() argument
382 rt_queue_pull_task(struct rq * rq) rt_queue_pull_task() argument
387 enqueue_pushable_task(struct rq * rq,struct task_struct * p) enqueue_pushable_task() argument
403 dequeue_pushable_task(struct rq * rq,struct task_struct * p) dequeue_pushable_task() argument
424 enqueue_pushable_task(struct rq * rq,struct task_struct * p) enqueue_pushable_task() argument
428 dequeue_pushable_task(struct rq * rq,struct task_struct * p) dequeue_pushable_task() argument
432 rt_queue_push_tasks(struct rq * rq) rt_queue_push_tasks() argument
514 for_each_rt_rq(rt_rq,iter,rq) global() argument
533 struct rq *rq = rq_of_rt_rq(rt_rq); sched_rt_rq_enqueue() local
621 for_each_rt_rq(rt_rq,iter,rq) global() argument
634 struct rq *rq = rq_of_rt_rq(rt_rq); sched_rt_rq_enqueue() local
735 __disable_runtime(struct rq * rq) __disable_runtime() argument
817 __enable_runtime(struct rq * rq) __enable_runtime() argument
878 struct rq *rq = rq_of_rt_rq(rt_rq); do_sched_rt_period_timer() local
1000 update_curr_rt(struct rq * rq) update_curr_rt() argument
1036 struct rq *rq = rq_of_rt_rq(rt_rq); dequeue_top_rt_rq() local
1053 struct rq *rq = rq_of_rt_rq(rt_rq); enqueue_top_rt_rq() local
1077 struct rq *rq = rq_of_rt_rq(rt_rq); inc_rt_prio_smp() local
1093 struct rq *rq = rq_of_rt_rq(rt_rq); dec_rt_prio_smp() local
1448 struct rq *rq = rq_of_rt_se(rt_se); enqueue_rt_entity() local
1460 struct rq *rq = rq_of_rt_se(rt_se); dequeue_rt_entity() local
1479 enqueue_task_rt(struct rq * rq,struct task_struct * p,int flags) enqueue_task_rt() argument
1495 dequeue_task_rt(struct rq * rq,struct task_struct * p,int flags) dequeue_task_rt() argument
1523 requeue_task_rt(struct rq * rq,struct task_struct * p,int head) requeue_task_rt() argument
1534 yield_task_rt(struct rq * rq) yield_task_rt() argument
1546 struct rq *rq; select_task_rq_rt() local
1614 check_preempt_equal_prio(struct rq * rq,struct task_struct * p) check_preempt_equal_prio() argument
1641 balance_rt(struct rq * rq,struct task_struct * p,struct rq_flags * rf) balance_rt() argument
1662 wakeup_preempt_rt(struct rq * rq,struct task_struct * p,int flags) wakeup_preempt_rt() argument
1687 set_next_task_rt(struct rq * rq,struct task_struct * p,bool first) set_next_task_rt() argument
1731 _pick_next_task_rt(struct rq * rq) _pick_next_task_rt() argument
1746 pick_task_rt(struct rq * rq) pick_task_rt() argument
1758 pick_next_task_rt(struct rq * rq) pick_next_task_rt() argument
1768 put_prev_task_rt(struct rq * rq,struct task_struct * p) put_prev_task_rt() argument
1793 pick_rt_task(struct rq * rq,struct task_struct * p,int cpu) pick_rt_task() argument
1806 pick_highest_pushable_task(struct rq * rq,int cpu) pick_highest_pushable_task() argument
1916 find_lock_lowest_rq(struct task_struct * task,struct rq * rq) find_lock_lowest_rq() argument
1976 pick_next_pushable_task(struct rq * rq) pick_next_pushable_task() argument
2001 push_rt_task(struct rq * rq,bool pull) push_rt_task() argument
2121 push_rt_tasks(struct rq * rq) push_rt_tasks() argument
2228 tell_cpu_to_push(struct rq * rq) tell_cpu_to_push() argument
2266 struct rq *rq; rto_push_irq_work_func() local
2414 task_woken_rt(struct rq * rq,struct task_struct * p) task_woken_rt() argument
2428 rq_online_rt(struct rq * rq) rq_online_rt() argument
2439 rq_offline_rt(struct rq * rq) rq_offline_rt() argument
2453 switched_from_rt(struct rq * rq,struct task_struct * p) switched_from_rt() argument
2484 switched_to_rt(struct rq * rq,struct task_struct * p) switched_to_rt() argument
2515 prio_changed_rt(struct rq * rq,struct task_struct * p,int oldprio) prio_changed_rt() argument
2552 watchdog(struct rq * rq,struct task_struct * p) watchdog() argument
2576 watchdog(struct rq * rq,struct task_struct * p) watchdog() argument
2587 task_tick_rt(struct rq * rq,struct task_struct * p,int queued) task_tick_rt() argument
2621 get_rr_interval_rt(struct rq * rq,struct task_struct * task) get_rr_interval_rt() argument
[all...]
/linux/drivers/scsi/fnic/
H A Dvnic_rq.c15 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
18 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
22 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
23 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
30 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
33 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
34 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
36 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
39 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
47 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
H A Dvnic_rq.h93 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
96 return rq->ring.desc_avail; in vnic_rq_desc_avail()
99 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
102 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
105 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
107 return rq->to_use->desc; in vnic_rq_next_desc()
110 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
112 return rq->to_use->index; in vnic_rq_next_index()
115 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
117 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
/linux/drivers/net/ethernet/cisco/enic/
H A Dvnic_rq.c18 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
21 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
25 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
26 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
31 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
34 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
35 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
37 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
40 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
48 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
H A Dvnic_rq.h84 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
87 return rq->ring.desc_avail; in vnic_rq_desc_avail()
90 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
93 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
96 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
98 return rq->to_use->desc; in vnic_rq_next_desc()
101 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
103 return rq->to_use->index; in vnic_rq_next_index()
106 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
111 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dgen8_engine_cs.c13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_rcs() argument
42 if (GRAPHICS_VER(rq->i915) == 9) in gen8_emit_flush_rcs()
46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0)) in gen8_emit_flush_rcs()
58 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs()
74 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs()
79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_xcs() argument
83 cs = intel_ring_begin(rq, 4); in gen8_emit_flush_xcs()
99 if (rq->engine->class == VIDEO_DECODE_CLASS) in gen8_emit_flush_xcs()
107 intel_ring_advance(rq, cs); in gen8_emit_flush_xcs()
112 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen11_emit_flush_rcs() argument
[all …]
H A Dgen6_engine_cs.c55 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument
58 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
72 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
74 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
84 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
89 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument
92 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
98 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs()
130 cs = intel_ring_begin(rq, 4); in gen6_emit_flush_rcs()
[all …]
H A Dselftest_execlists.c28 static bool is_active(struct i915_request *rq) in is_active() argument
30 if (i915_request_is_active(rq)) in is_active()
33 if (i915_request_on_hold(rq)) in is_active()
36 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active()
43 struct i915_request *rq, in wait_for_submit() argument
53 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit()
58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
69 struct i915_request *rq, in wait_for_reset() argument
81 if (i915_request_completed(rq)) in wait_for_reset()
125 struct i915_request *rq; live_sanitycheck() local
182 struct i915_request *rq[2]; live_unlite_restore() local
344 struct i915_request *rq; live_unlite_ring() local
493 struct i915_request *rq; live_pin_rewind() local
601 struct i915_request *rq; live_hold_reset() local
720 struct i915_request *rq; live_error_interrupt() local
820 emit_semaphore_chain(struct i915_request * rq,struct i915_vma * vma,int idx) emit_semaphore_chain() argument
860 struct i915_request *rq; semaphore_queue() local
895 struct i915_request *rq; release_queue() local
946 struct i915_request *rq; slice_semaphore_queue() local
1056 struct i915_request *rq; create_rewinder() local
1130 struct i915_request *rq[3] = {}; live_timeslice_rewind() local
1259 struct i915_request *rq; nop_request() local
1330 struct i915_request *rq, *nop; live_timeslice_queue() local
1430 struct i915_request *rq; live_timeslice_nopreempt() local
1721 struct i915_request *rq; spinner_create_request() local
1759 struct i915_request *rq; live_preempt() local
1852 struct i915_request *rq; live_late_preempt() local
2058 struct i915_request *rq; __cancel_active0() local
2102 struct i915_request *rq[2] = {}; __cancel_active1() local
2173 struct i915_request *rq[3] = {}; __cancel_queued() local
2268 struct i915_request *rq; __cancel_hostile() local
2325 struct i915_request *rq; __cancel_fail() local
2576 struct i915_request *rq; live_chain_preempt() local
2706 struct i915_request *rq; create_gang() local
2801 struct i915_request *rq; __live_preempt_ring() local
2975 struct i915_request *rq = NULL; live_preempt_gang() local
3153 struct i915_request *rq; create_gpr_client() local
3214 struct i915_request *rq; preempt_user() local
3299 struct i915_request *rq; live_preempt_user() local
3398 struct i915_request *rq; live_preempt_timeout() local
3490 struct i915_request *rq; smoke_submit() local
3754 struct i915_request *rq; nop_virtual_engine() local
3771 struct i915_request *rq; nop_virtual_engine() local
4027 struct i915_request *rq; slicein_virtual_engine() local
4094 struct i915_request *rq; sliceout_virtual_engine() local
4226 struct i915_request *rq; preserved_virtual_engine() local
4337 struct i915_request *rq; reset_virtual_engine() local
[all...]
H A Dselftest_timeline.c454 static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) in emit_ggtt_store_dw() argument
458 cs = intel_ring_begin(rq, 4); in emit_ggtt_store_dw()
462 if (GRAPHICS_VER(rq->i915) >= 8) { in emit_ggtt_store_dw()
467 } else if (GRAPHICS_VER(rq->i915) >= 4) { in emit_ggtt_store_dw()
479 intel_ring_advance(rq, cs); in emit_ggtt_store_dw()
487 struct i915_request *rq; in checked_tl_write() local
492 rq = ERR_PTR(err); in checked_tl_write()
503 rq = intel_engine_create_kernel_request(engine); in checked_tl_write()
504 if (IS_ERR(rq)) in checked_tl_write()
507 i915_request_get(rq); in checked_tl_write()
[all …]
H A Dgen2_engine_cs.c16 int gen2_emit_flush(struct i915_request *rq, u32 mode) in gen2_emit_flush() argument
25 cs = intel_ring_begin(rq, 2 + 4 * num_store_dw); in gen2_emit_flush()
38 intel_ring_advance(rq, cs); in gen2_emit_flush()
43 int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen4_emit_flush_rcs() argument
79 if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5) in gen4_emit_flush_rcs()
87 cs = intel_ring_begin(rq, i); in gen4_emit_flush_rcs()
105 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_emit_flush_rcs()
115 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_emit_flush_rcs()
124 intel_ring_advance(rq, cs); in gen4_emit_flush_rcs()
129 int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode) in gen4_emit_flush_vcs() argument
[all …]
/linux/fs/erofs/
H A Ddecompressor.c20 struct z_erofs_decompress_req *rq; member
68 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_prepare_dstpages() local
73 EROFS_SB(rq->sb)->lz4.max_distance_pages; in z_erofs_lz4_prepare_dstpages()
79 struct page *const page = rq->out[i]; in z_erofs_lz4_prepare_dstpages()
86 if (!rq->fillgaps && test_bit(j, bounced)) { in z_erofs_lz4_prepare_dstpages()
89 availables[top++] = rq->out[i - lz4_max_distance_pages]; in z_erofs_lz4_prepare_dstpages()
114 victim = __erofs_allocpage(pagepool, rq->gfp, true); in z_erofs_lz4_prepare_dstpages()
119 rq->out[i] = victim; in z_erofs_lz4_prepare_dstpages()
128 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_handle_overlap() local
133 if (rq->inplace_io) { in z_erofs_lz4_handle_overlap()
[all …]
/linux/drivers/scsi/esas2r/
H A Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
[all …]
H A Desas2r_vda.c59 static void clear_vda_request(struct esas2r_request *rq);
62 struct esas2r_request *rq);
67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument
93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl()
96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl()
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl()
115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_rx.c64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq, in mlx5e_read_enhanced_title_slot() argument
92 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_enhanced_title_slot()
97 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state))) in mlx5e_read_enhanced_title_slot()
100 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) in mlx5e_read_enhanced_title_slot()
105 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1); in mlx5e_read_enhanced_title_slot()
[all …]
/linux/block/
H A Dblk-flush.c103 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
105 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
108 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
115 rq->bio = rq->biotail; in blk_flush_restore_request()
116 if (rq->bio) in blk_flush_restore_request()
117 rq->__sector = rq->bio->bi_iter.bi_sector; in blk_flush_restore_request()
120 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
121 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
124 static void blk_account_io_flush(struct request *rq) in blk_account_io_flush() argument
126 struct block_device *part = rq->q->disk->part0; in blk_account_io_flush()
[all …]
H A Dblk-mq.c47 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
48 static void blk_mq_request_bypass_insert(struct request *rq,
91 static bool blk_mq_check_inflight(struct request *rq, void *priv) in blk_mq_check_inflight() argument
95 if (rq->part && blk_do_io_stat(rq) && in blk_mq_check_inflight()
96 (!bdev_is_partition(mi->part) || rq->part == mi->part) && in blk_mq_check_inflight()
97 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) in blk_mq_check_inflight()
98 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight()
314 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
316 memset(rq, 0, sizeof(*rq)); in blk_rq_init()
318 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init()
[all …]
/linux/include/linux/
H A Dblk-mq.h218 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough() argument
220 return blk_op_is_passthrough(rq->cmd_flags); in blk_rq_is_passthrough()
228 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) argument
230 #define rq_dma_dir(rq) \ argument
231 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
233 #define rq_list_add(listptr, rq) do { \ argument
234 (rq)->rq_next = *(listptr); \
235 *(listptr) = rq; \
238 #define rq_list_add_tail(lastpptr, rq) do { \ argument
239 (rq)->rq_next = NULL; \
[all …]

12345678910>>...24