xref: /linux/drivers/gpu/drm/i915/gvt/sched_policy.c (revision ba3ee00683bc2dad4c14fba805c2241ae23acff9)
14b63960eSZhi Wang /*
24b63960eSZhi Wang  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
34b63960eSZhi Wang  *
44b63960eSZhi Wang  * Permission is hereby granted, free of charge, to any person obtaining a
54b63960eSZhi Wang  * copy of this software and associated documentation files (the "Software"),
64b63960eSZhi Wang  * to deal in the Software without restriction, including without limitation
74b63960eSZhi Wang  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
84b63960eSZhi Wang  * and/or sell copies of the Software, and to permit persons to whom the
94b63960eSZhi Wang  * Software is furnished to do so, subject to the following conditions:
104b63960eSZhi Wang  *
114b63960eSZhi Wang  * The above copyright notice and this permission notice (including the next
124b63960eSZhi Wang  * paragraph) shall be included in all copies or substantial portions of the
134b63960eSZhi Wang  * Software.
144b63960eSZhi Wang  *
154b63960eSZhi Wang  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
164b63960eSZhi Wang  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
174b63960eSZhi Wang  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
184b63960eSZhi Wang  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
194b63960eSZhi Wang  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
204b63960eSZhi Wang  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
214b63960eSZhi Wang  * SOFTWARE.
224b63960eSZhi Wang  *
234b63960eSZhi Wang  * Authors:
244b63960eSZhi Wang  *    Anhua Xu
254b63960eSZhi Wang  *    Kevin Tian <kevin.tian@intel.com>
264b63960eSZhi Wang  *
274b63960eSZhi Wang  * Contributors:
284b63960eSZhi Wang  *    Min He <min.he@intel.com>
294b63960eSZhi Wang  *    Bing Niu <bing.niu@intel.com>
304b63960eSZhi Wang  *    Zhi Wang <zhi.a.wang@intel.com>
314b63960eSZhi Wang  *
324b63960eSZhi Wang  */
334b63960eSZhi Wang 
344b63960eSZhi Wang #include "i915_drv.h"
35feddf6e8SZhenyu Wang #include "gvt.h"
364b63960eSZhi Wang 
374b63960eSZhi Wang static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
384b63960eSZhi Wang {
390fac21e7SZhenyu Wang 	enum intel_engine_id i;
400fac21e7SZhenyu Wang 	struct intel_engine_cs *engine;
414b63960eSZhi Wang 
420fac21e7SZhenyu Wang 	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
434b63960eSZhi Wang 		if (!list_empty(workload_q_head(vgpu, i)))
444b63960eSZhi Wang 			return true;
454b63960eSZhi Wang 	}
464b63960eSZhi Wang 
474b63960eSZhi Wang 	return false;
484b63960eSZhi Wang }
494b63960eSZhi Wang 
50f6504cceSPing Gao struct vgpu_sched_data {
5132356920SPing Gao 	struct list_head lru_list;
52f6504cceSPing Gao 	struct intel_vgpu *vgpu;
53f6504cceSPing Gao 
54f6504cceSPing Gao 	ktime_t sched_in_time;
55f6504cceSPing Gao 	ktime_t sched_out_time;
56f6504cceSPing Gao 	ktime_t sched_time;
57f6504cceSPing Gao 	ktime_t left_ts;
58f6504cceSPing Gao 	ktime_t allocated_ts;
59f6504cceSPing Gao 
60f6504cceSPing Gao 	struct vgpu_sched_ctl sched_ctl;
61f6504cceSPing Gao };
62f6504cceSPing Gao 
63f6504cceSPing Gao struct gvt_sched_data {
64f6504cceSPing Gao 	struct intel_gvt *gvt;
65f6504cceSPing Gao 	struct hrtimer timer;
66f6504cceSPing Gao 	unsigned long period;
6732356920SPing Gao 	struct list_head lru_runq_head;
68f6504cceSPing Gao };
69f6504cceSPing Gao 
7039d467c2SPing Gao static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
7139d467c2SPing Gao {
7239d467c2SPing Gao 	ktime_t delta_ts;
7339d467c2SPing Gao 	struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
7439d467c2SPing Gao 
7539d467c2SPing Gao 	delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
7639d467c2SPing Gao 
7739d467c2SPing Gao 	vgpu_data->sched_time += delta_ts;
7839d467c2SPing Gao 	vgpu_data->left_ts -= delta_ts;
7939d467c2SPing Gao }
8039d467c2SPing Gao 
8139d467c2SPing Gao #define GVT_TS_BALANCE_PERIOD_MS 100
8239d467c2SPing Gao #define GVT_TS_BALANCE_STAGE_NUM 10
8339d467c2SPing Gao 
8439d467c2SPing Gao static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
8539d467c2SPing Gao {
8639d467c2SPing Gao 	struct vgpu_sched_data *vgpu_data;
8739d467c2SPing Gao 	struct list_head *pos;
8839d467c2SPing Gao 	static uint64_t stage_check;
8939d467c2SPing Gao 	int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
9039d467c2SPing Gao 
9139d467c2SPing Gao 	/* The timeslice accumulation reset at stage 0, which is
9239d467c2SPing Gao 	 * allocated again without adding previous debt.
9339d467c2SPing Gao 	 */
9439d467c2SPing Gao 	if (stage == 0) {
9539d467c2SPing Gao 		int total_weight = 0;
9639d467c2SPing Gao 		ktime_t fair_timeslice;
9739d467c2SPing Gao 
9839d467c2SPing Gao 		list_for_each(pos, &sched_data->lru_runq_head) {
9939d467c2SPing Gao 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
10039d467c2SPing Gao 			total_weight += vgpu_data->sched_ctl.weight;
10139d467c2SPing Gao 		}
10239d467c2SPing Gao 
10339d467c2SPing Gao 		list_for_each(pos, &sched_data->lru_runq_head) {
10439d467c2SPing Gao 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
10539d467c2SPing Gao 			fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
10639d467c2SPing Gao 						vgpu_data->sched_ctl.weight /
10739d467c2SPing Gao 						total_weight;
10839d467c2SPing Gao 
10939d467c2SPing Gao 			vgpu_data->allocated_ts = fair_timeslice;
11039d467c2SPing Gao 			vgpu_data->left_ts = vgpu_data->allocated_ts;
11139d467c2SPing Gao 		}
11239d467c2SPing Gao 	} else {
11339d467c2SPing Gao 		list_for_each(pos, &sched_data->lru_runq_head) {
11439d467c2SPing Gao 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
11539d467c2SPing Gao 
11639d467c2SPing Gao 			/* timeslice for next 100ms should add the left/debt
11739d467c2SPing Gao 			 * slice of previous stages.
11839d467c2SPing Gao 			 */
11939d467c2SPing Gao 			vgpu_data->left_ts += vgpu_data->allocated_ts;
12039d467c2SPing Gao 		}
12139d467c2SPing Gao 	}
12239d467c2SPing Gao }
12339d467c2SPing Gao 
1244b63960eSZhi Wang static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
1254b63960eSZhi Wang {
1264b63960eSZhi Wang 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1270fac21e7SZhenyu Wang 	enum intel_engine_id i;
1280fac21e7SZhenyu Wang 	struct intel_engine_cs *engine;
129f6504cceSPing Gao 	struct vgpu_sched_data *vgpu_data;
130f6504cceSPing Gao 	ktime_t cur_time;
1314b63960eSZhi Wang 
132ae157902SPing Gao 	/* no need to schedule if next_vgpu is the same with current_vgpu,
133ae157902SPing Gao 	 * let scheduler chose next_vgpu again by setting it to NULL.
134ae157902SPing Gao 	 */
135ae157902SPing Gao 	if (scheduler->next_vgpu == scheduler->current_vgpu) {
136ae157902SPing Gao 		scheduler->next_vgpu = NULL;
1374b63960eSZhi Wang 		return;
138ae157902SPing Gao 	}
1394b63960eSZhi Wang 
1404b63960eSZhi Wang 	/*
1414b63960eSZhi Wang 	 * after the flag is set, workload dispatch thread will
1424b63960eSZhi Wang 	 * stop dispatching workload for current vgpu
1434b63960eSZhi Wang 	 */
1444b63960eSZhi Wang 	scheduler->need_reschedule = true;
1454b63960eSZhi Wang 
1464b63960eSZhi Wang 	/* still have uncompleted workload? */
1470fac21e7SZhenyu Wang 	for_each_engine(engine, gvt->dev_priv, i) {
148954180aaSZhenyu Wang 		if (scheduler->current_workload[i])
1494b63960eSZhi Wang 			return;
1504b63960eSZhi Wang 	}
1514b63960eSZhi Wang 
152f6504cceSPing Gao 	cur_time = ktime_get();
153f6504cceSPing Gao 	if (scheduler->current_vgpu) {
154f6504cceSPing Gao 		vgpu_data = scheduler->current_vgpu->sched_data;
155f6504cceSPing Gao 		vgpu_data->sched_out_time = cur_time;
15639d467c2SPing Gao 		vgpu_update_timeslice(scheduler->current_vgpu);
157f6504cceSPing Gao 	}
158f6504cceSPing Gao 	vgpu_data = scheduler->next_vgpu->sched_data;
159f6504cceSPing Gao 	vgpu_data->sched_in_time = cur_time;
160f6504cceSPing Gao 
1614b63960eSZhi Wang 	/* switch current vgpu */
1624b63960eSZhi Wang 	scheduler->current_vgpu = scheduler->next_vgpu;
1634b63960eSZhi Wang 	scheduler->next_vgpu = NULL;
1644b63960eSZhi Wang 
1654b63960eSZhi Wang 	scheduler->need_reschedule = false;
1664b63960eSZhi Wang 
1674b63960eSZhi Wang 	/* wake up workload dispatch thread */
1680fac21e7SZhenyu Wang 	for_each_engine(engine, gvt->dev_priv, i)
1694b63960eSZhi Wang 		wake_up(&scheduler->waitq[i]);
1704b63960eSZhi Wang }
1714b63960eSZhi Wang 
17232356920SPing Gao static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
1734b63960eSZhi Wang {
174f6504cceSPing Gao 	struct vgpu_sched_data *vgpu_data;
1754b63960eSZhi Wang 	struct intel_vgpu *vgpu = NULL;
17632356920SPing Gao 	struct list_head *head = &sched_data->lru_runq_head;
17732356920SPing Gao 	struct list_head *pos;
1784b63960eSZhi Wang 
1794b63960eSZhi Wang 	/* search a vgpu with pending workload */
1804b63960eSZhi Wang 	list_for_each(pos, head) {
1814b63960eSZhi Wang 
18232356920SPing Gao 		vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
1834b63960eSZhi Wang 		if (!vgpu_has_pending_workload(vgpu_data->vgpu))
1844b63960eSZhi Wang 			continue;
1854b63960eSZhi Wang 
186b35f34d1SPing Gao 		/* Return the vGPU only if it has time slice left */
187b35f34d1SPing Gao 		if (vgpu_data->left_ts > 0) {
1884b63960eSZhi Wang 			vgpu = vgpu_data->vgpu;
1894b63960eSZhi Wang 			break;
1904b63960eSZhi Wang 		}
191b35f34d1SPing Gao 	}
1924b63960eSZhi Wang 
19332356920SPing Gao 	return vgpu;
19432356920SPing Gao }
19532356920SPing Gao 
19632356920SPing Gao /* in nanosecond */
19732356920SPing Gao #define GVT_DEFAULT_TIME_SLICE 1000000
19832356920SPing Gao 
19932356920SPing Gao static void tbs_sched_func(struct gvt_sched_data *sched_data)
20032356920SPing Gao {
20132356920SPing Gao 	struct intel_gvt *gvt = sched_data->gvt;
20232356920SPing Gao 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
20332356920SPing Gao 	struct vgpu_sched_data *vgpu_data;
20432356920SPing Gao 	struct intel_vgpu *vgpu = NULL;
20532356920SPing Gao 	/* no active vgpu or has already had a target */
20632356920SPing Gao 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
20732356920SPing Gao 		goto out;
20832356920SPing Gao 
20932356920SPing Gao 	vgpu = find_busy_vgpu(sched_data);
2104b63960eSZhi Wang 	if (vgpu) {
2114b63960eSZhi Wang 		scheduler->next_vgpu = vgpu;
21232356920SPing Gao 
21332356920SPing Gao 		/* Move the last used vGPU to the tail of lru_list */
21432356920SPing Gao 		vgpu_data = vgpu->sched_data;
21532356920SPing Gao 		list_del_init(&vgpu_data->lru_list);
21632356920SPing Gao 		list_add_tail(&vgpu_data->lru_list,
21732356920SPing Gao 				&sched_data->lru_runq_head);
218b35f34d1SPing Gao 	} else {
219b35f34d1SPing Gao 		scheduler->next_vgpu = gvt->idle_vgpu;
2204b63960eSZhi Wang 	}
2214b63960eSZhi Wang out:
2220b063bd3SZhenyu Wang 	if (scheduler->next_vgpu)
2234b63960eSZhi Wang 		try_to_schedule_next_vgpu(gvt);
2244b63960eSZhi Wang }
2254b63960eSZhi Wang 
22691d0101aSPing Gao void intel_gvt_schedule(struct intel_gvt *gvt)
22791d0101aSPing Gao {
228f6504cceSPing Gao 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
229c713cb2fSPing Gao 	static uint64_t timer_check;
2304b63960eSZhi Wang 
23191d0101aSPing Gao 	mutex_lock(&gvt->lock);
232c713cb2fSPing Gao 
233c713cb2fSPing Gao 	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
234c713cb2fSPing Gao 				(void *)&gvt->service_request)) {
235c713cb2fSPing Gao 		if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
236c713cb2fSPing Gao 			gvt_balance_timeslice(sched_data);
237c713cb2fSPing Gao 	}
238c713cb2fSPing Gao 	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
239c713cb2fSPing Gao 
24091d0101aSPing Gao 	tbs_sched_func(sched_data);
241c713cb2fSPing Gao 
2424b63960eSZhi Wang 	mutex_unlock(&gvt->lock);
2434b63960eSZhi Wang }
2444b63960eSZhi Wang 
24591d0101aSPing Gao static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
24691d0101aSPing Gao {
247f6504cceSPing Gao 	struct gvt_sched_data *data;
24891d0101aSPing Gao 
249f6504cceSPing Gao 	data = container_of(timer_data, struct gvt_sched_data, timer);
25091d0101aSPing Gao 
25191d0101aSPing Gao 	intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
25291d0101aSPing Gao 
25391d0101aSPing Gao 	hrtimer_add_expires_ns(&data->timer, data->period);
25491d0101aSPing Gao 
25591d0101aSPing Gao 	return HRTIMER_RESTART;
25691d0101aSPing Gao }
25791d0101aSPing Gao 
2584b63960eSZhi Wang static int tbs_sched_init(struct intel_gvt *gvt)
2594b63960eSZhi Wang {
2604b63960eSZhi Wang 	struct intel_gvt_workload_scheduler *scheduler =
2614b63960eSZhi Wang 		&gvt->scheduler;
2624b63960eSZhi Wang 
263f6504cceSPing Gao 	struct gvt_sched_data *data;
2644b63960eSZhi Wang 
2654b63960eSZhi Wang 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2664b63960eSZhi Wang 	if (!data)
2674b63960eSZhi Wang 		return -ENOMEM;
2684b63960eSZhi Wang 
26932356920SPing Gao 	INIT_LIST_HEAD(&data->lru_runq_head);
27091d0101aSPing Gao 	hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
27191d0101aSPing Gao 	data->timer.function = tbs_timer_fn;
2724b63960eSZhi Wang 	data->period = GVT_DEFAULT_TIME_SLICE;
2734b63960eSZhi Wang 	data->gvt = gvt;
2744b63960eSZhi Wang 
2754b63960eSZhi Wang 	scheduler->sched_data = data;
27691d0101aSPing Gao 
2774b63960eSZhi Wang 	return 0;
2784b63960eSZhi Wang }
2794b63960eSZhi Wang 
2804b63960eSZhi Wang static void tbs_sched_clean(struct intel_gvt *gvt)
2814b63960eSZhi Wang {
2824b63960eSZhi Wang 	struct intel_gvt_workload_scheduler *scheduler =
2834b63960eSZhi Wang 		&gvt->scheduler;
284f6504cceSPing Gao 	struct gvt_sched_data *data = scheduler->sched_data;
2854b63960eSZhi Wang 
28691d0101aSPing Gao 	hrtimer_cancel(&data->timer);
28791d0101aSPing Gao 
2884b63960eSZhi Wang 	kfree(data);
2894b63960eSZhi Wang 	scheduler->sched_data = NULL;
2904b63960eSZhi Wang }
2914b63960eSZhi Wang 
2924b63960eSZhi Wang static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
2934b63960eSZhi Wang {
294f6504cceSPing Gao 	struct vgpu_sched_data *data;
2954b63960eSZhi Wang 
2964b63960eSZhi Wang 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2974b63960eSZhi Wang 	if (!data)
2984b63960eSZhi Wang 		return -ENOMEM;
2994b63960eSZhi Wang 
300bc90d097SPing Gao 	data->sched_ctl.weight = vgpu->sched_ctl.weight;
3014b63960eSZhi Wang 	data->vgpu = vgpu;
30232356920SPing Gao 	INIT_LIST_HEAD(&data->lru_list);
3034b63960eSZhi Wang 
3044b63960eSZhi Wang 	vgpu->sched_data = data;
30591d0101aSPing Gao 
3064b63960eSZhi Wang 	return 0;
3074b63960eSZhi Wang }
3084b63960eSZhi Wang 
3094b63960eSZhi Wang static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
3104b63960eSZhi Wang {
3114b63960eSZhi Wang 	kfree(vgpu->sched_data);
3124b63960eSZhi Wang 	vgpu->sched_data = NULL;
3134b63960eSZhi Wang }
3144b63960eSZhi Wang 
3154b63960eSZhi Wang static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
3164b63960eSZhi Wang {
317f6504cceSPing Gao 	struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
318f6504cceSPing Gao 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
3194b63960eSZhi Wang 
32032356920SPing Gao 	if (!list_empty(&vgpu_data->lru_list))
3214b63960eSZhi Wang 		return;
3224b63960eSZhi Wang 
32332356920SPing Gao 	list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
32491d0101aSPing Gao 
32591d0101aSPing Gao 	if (!hrtimer_active(&sched_data->timer))
32691d0101aSPing Gao 		hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
32791d0101aSPing Gao 			sched_data->period), HRTIMER_MODE_ABS);
3284b63960eSZhi Wang }
3294b63960eSZhi Wang 
3304b63960eSZhi Wang static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
3314b63960eSZhi Wang {
332f6504cceSPing Gao 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
3334b63960eSZhi Wang 
33432356920SPing Gao 	list_del_init(&vgpu_data->lru_list);
3354b63960eSZhi Wang }
3364b63960eSZhi Wang 
337999ccb40SDu, Changbin static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
3384b63960eSZhi Wang 	.init = tbs_sched_init,
3394b63960eSZhi Wang 	.clean = tbs_sched_clean,
3404b63960eSZhi Wang 	.init_vgpu = tbs_sched_init_vgpu,
3414b63960eSZhi Wang 	.clean_vgpu = tbs_sched_clean_vgpu,
3424b63960eSZhi Wang 	.start_schedule = tbs_sched_start_schedule,
3434b63960eSZhi Wang 	.stop_schedule = tbs_sched_stop_schedule,
3444b63960eSZhi Wang };
3454b63960eSZhi Wang 
3464b63960eSZhi Wang int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
3474b63960eSZhi Wang {
3484b63960eSZhi Wang 	gvt->scheduler.sched_ops = &tbs_schedule_ops;
3494b63960eSZhi Wang 
3504b63960eSZhi Wang 	return gvt->scheduler.sched_ops->init(gvt);
3514b63960eSZhi Wang }
3524b63960eSZhi Wang 
3534b63960eSZhi Wang void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
3544b63960eSZhi Wang {
3554b63960eSZhi Wang 	gvt->scheduler.sched_ops->clean(gvt);
3564b63960eSZhi Wang }
3574b63960eSZhi Wang 
3584b63960eSZhi Wang int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
3594b63960eSZhi Wang {
3604b63960eSZhi Wang 	return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
3614b63960eSZhi Wang }
3624b63960eSZhi Wang 
3634b63960eSZhi Wang void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
3644b63960eSZhi Wang {
3654b63960eSZhi Wang 	vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
3664b63960eSZhi Wang }
3674b63960eSZhi Wang 
3684b63960eSZhi Wang void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
3694b63960eSZhi Wang {
3704b63960eSZhi Wang 	gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
3714b63960eSZhi Wang 
3724b63960eSZhi Wang 	vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
3734b63960eSZhi Wang }
3744b63960eSZhi Wang 
3754b63960eSZhi Wang void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
3764b63960eSZhi Wang {
3774b63960eSZhi Wang 	struct intel_gvt_workload_scheduler *scheduler =
3784b63960eSZhi Wang 		&vgpu->gvt->scheduler;
379*ba3ee006SChangbin Du 	int ring_id;
3804b63960eSZhi Wang 
3814b63960eSZhi Wang 	gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
3824b63960eSZhi Wang 
3834b63960eSZhi Wang 	scheduler->sched_ops->stop_schedule(vgpu);
3844b63960eSZhi Wang 
3854b63960eSZhi Wang 	if (scheduler->next_vgpu == vgpu)
3864b63960eSZhi Wang 		scheduler->next_vgpu = NULL;
3874b63960eSZhi Wang 
3884b63960eSZhi Wang 	if (scheduler->current_vgpu == vgpu) {
3894b63960eSZhi Wang 		/* stop workload dispatching */
3904b63960eSZhi Wang 		scheduler->need_reschedule = true;
3914b63960eSZhi Wang 		scheduler->current_vgpu = NULL;
3924b63960eSZhi Wang 	}
393*ba3ee006SChangbin Du 
394*ba3ee006SChangbin Du 	spin_lock_bh(&scheduler->mmio_context_lock);
395*ba3ee006SChangbin Du 	for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
396*ba3ee006SChangbin Du 		if (scheduler->engine_owner[ring_id] == vgpu) {
397*ba3ee006SChangbin Du 			intel_gvt_switch_mmio(vgpu, NULL, ring_id);
398*ba3ee006SChangbin Du 			scheduler->engine_owner[ring_id] = NULL;
399*ba3ee006SChangbin Du 		}
400*ba3ee006SChangbin Du 	}
401*ba3ee006SChangbin Du 	spin_unlock_bh(&scheduler->mmio_context_lock);
4024b63960eSZhi Wang }
403