xref: /linux/drivers/gpu/drm/i915/gvt/sched_policy.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
14b63960eSZhi Wang /*
24b63960eSZhi Wang  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
34b63960eSZhi Wang  *
44b63960eSZhi Wang  * Permission is hereby granted, free of charge, to any person obtaining a
54b63960eSZhi Wang  * copy of this software and associated documentation files (the "Software"),
64b63960eSZhi Wang  * to deal in the Software without restriction, including without limitation
74b63960eSZhi Wang  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
84b63960eSZhi Wang  * and/or sell copies of the Software, and to permit persons to whom the
94b63960eSZhi Wang  * Software is furnished to do so, subject to the following conditions:
104b63960eSZhi Wang  *
114b63960eSZhi Wang  * The above copyright notice and this permission notice (including the next
124b63960eSZhi Wang  * paragraph) shall be included in all copies or substantial portions of the
134b63960eSZhi Wang  * Software.
144b63960eSZhi Wang  *
154b63960eSZhi Wang  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
164b63960eSZhi Wang  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
174b63960eSZhi Wang  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
184b63960eSZhi Wang  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
194b63960eSZhi Wang  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
204b63960eSZhi Wang  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
214b63960eSZhi Wang  * SOFTWARE.
224b63960eSZhi Wang  *
234b63960eSZhi Wang  * Authors:
244b63960eSZhi Wang  *    Anhua Xu
254b63960eSZhi Wang  *    Kevin Tian <kevin.tian@intel.com>
264b63960eSZhi Wang  *
274b63960eSZhi Wang  * Contributors:
284b63960eSZhi Wang  *    Min He <min.he@intel.com>
294b63960eSZhi Wang  *    Bing Niu <bing.niu@intel.com>
304b63960eSZhi Wang  *    Zhi Wang <zhi.a.wang@intel.com>
314b63960eSZhi Wang  *
324b63960eSZhi Wang  */
334b63960eSZhi Wang 
344b63960eSZhi Wang #include "i915_drv.h"
35feddf6e8SZhenyu Wang #include "gvt.h"
364b63960eSZhi Wang 
vgpu_has_pending_workload(struct intel_vgpu * vgpu)374b63960eSZhi Wang static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
384b63960eSZhi Wang {
390fac21e7SZhenyu Wang 	enum intel_engine_id i;
400fac21e7SZhenyu Wang 	struct intel_engine_cs *engine;
414b63960eSZhi Wang 
42a61ac1e7SChris Wilson 	for_each_engine(engine, vgpu->gvt->gt, i) {
438fde4107SChris Wilson 		if (!list_empty(workload_q_head(vgpu, engine)))
444b63960eSZhi Wang 			return true;
454b63960eSZhi Wang 	}
464b63960eSZhi Wang 
474b63960eSZhi Wang 	return false;
484b63960eSZhi Wang }
494b63960eSZhi Wang 
5054ff01fdSZhenyu Wang /* We give 2 seconds higher prio for vGPU during start */
5154ff01fdSZhenyu Wang #define GVT_SCHED_VGPU_PRI_TIME  2
5254ff01fdSZhenyu Wang 
53f6504cceSPing Gao struct vgpu_sched_data {
5432356920SPing Gao 	struct list_head lru_list;
55f6504cceSPing Gao 	struct intel_vgpu *vgpu;
569212b13fSWeinan Li 	bool active;
5754ff01fdSZhenyu Wang 	bool pri_sched;
5854ff01fdSZhenyu Wang 	ktime_t pri_time;
59f6504cceSPing Gao 	ktime_t sched_in_time;
60f6504cceSPing Gao 	ktime_t sched_time;
61f6504cceSPing Gao 	ktime_t left_ts;
62f6504cceSPing Gao 	ktime_t allocated_ts;
63f6504cceSPing Gao 
64f6504cceSPing Gao 	struct vgpu_sched_ctl sched_ctl;
65f6504cceSPing Gao };
66f6504cceSPing Gao 
67f6504cceSPing Gao struct gvt_sched_data {
68f6504cceSPing Gao 	struct intel_gvt *gvt;
69f6504cceSPing Gao 	struct hrtimer timer;
70f6504cceSPing Gao 	unsigned long period;
7132356920SPing Gao 	struct list_head lru_runq_head;
72292bb0d3SZhipeng Gong 	ktime_t expire_time;
73f6504cceSPing Gao };
74f6504cceSPing Gao 
vgpu_update_timeslice(struct intel_vgpu * vgpu,ktime_t cur_time)7589babe7cSZhipeng Gong static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
7639d467c2SPing Gao {
7739d467c2SPing Gao 	ktime_t delta_ts;
7889babe7cSZhipeng Gong 	struct vgpu_sched_data *vgpu_data;
7939d467c2SPing Gao 
8089babe7cSZhipeng Gong 	if (!vgpu || vgpu == vgpu->gvt->idle_vgpu)
8189babe7cSZhipeng Gong 		return;
8239d467c2SPing Gao 
8389babe7cSZhipeng Gong 	vgpu_data = vgpu->sched_data;
8489babe7cSZhipeng Gong 	delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
8589babe7cSZhipeng Gong 	vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
8689babe7cSZhipeng Gong 	vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
8789babe7cSZhipeng Gong 	vgpu_data->sched_in_time = cur_time;
8839d467c2SPing Gao }
8939d467c2SPing Gao 
9039d467c2SPing Gao #define GVT_TS_BALANCE_PERIOD_MS 100
9139d467c2SPing Gao #define GVT_TS_BALANCE_STAGE_NUM 10
9239d467c2SPing Gao 
gvt_balance_timeslice(struct gvt_sched_data * sched_data)9339d467c2SPing Gao static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
9439d467c2SPing Gao {
9539d467c2SPing Gao 	struct vgpu_sched_data *vgpu_data;
9639d467c2SPing Gao 	struct list_head *pos;
972e679d48SJani Nikula 	static u64 stage_check;
9839d467c2SPing Gao 	int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
9939d467c2SPing Gao 
10039d467c2SPing Gao 	/* The timeslice accumulation reset at stage 0, which is
10139d467c2SPing Gao 	 * allocated again without adding previous debt.
10239d467c2SPing Gao 	 */
10339d467c2SPing Gao 	if (stage == 0) {
10439d467c2SPing Gao 		int total_weight = 0;
10539d467c2SPing Gao 		ktime_t fair_timeslice;
10639d467c2SPing Gao 
10739d467c2SPing Gao 		list_for_each(pos, &sched_data->lru_runq_head) {
10839d467c2SPing Gao 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
10939d467c2SPing Gao 			total_weight += vgpu_data->sched_ctl.weight;
11039d467c2SPing Gao 		}
11139d467c2SPing Gao 
11239d467c2SPing Gao 		list_for_each(pos, &sched_data->lru_runq_head) {
11339d467c2SPing Gao 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
11498039845SZhenyu Wang 			fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
11598039845SZhenyu Wang 						     total_weight) * vgpu_data->sched_ctl.weight;
11639d467c2SPing Gao 
11739d467c2SPing Gao 			vgpu_data->allocated_ts = fair_timeslice;
11839d467c2SPing Gao 			vgpu_data->left_ts = vgpu_data->allocated_ts;
11939d467c2SPing Gao 		}
12039d467c2SPing Gao 	} else {
12139d467c2SPing Gao 		list_for_each(pos, &sched_data->lru_runq_head) {
12239d467c2SPing Gao 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
12339d467c2SPing Gao 
12439d467c2SPing Gao 			/* timeslice for next 100ms should add the left/debt
12539d467c2SPing Gao 			 * slice of previous stages.
12639d467c2SPing Gao 			 */
12739d467c2SPing Gao 			vgpu_data->left_ts += vgpu_data->allocated_ts;
12839d467c2SPing Gao 		}
12939d467c2SPing Gao 	}
13039d467c2SPing Gao }
13139d467c2SPing Gao 
try_to_schedule_next_vgpu(struct intel_gvt * gvt)1324b63960eSZhi Wang static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
1334b63960eSZhi Wang {
1344b63960eSZhi Wang 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1350fac21e7SZhenyu Wang 	enum intel_engine_id i;
1360fac21e7SZhenyu Wang 	struct intel_engine_cs *engine;
137f6504cceSPing Gao 	struct vgpu_sched_data *vgpu_data;
138f6504cceSPing Gao 	ktime_t cur_time;
1394b63960eSZhi Wang 
140ae157902SPing Gao 	/* no need to schedule if next_vgpu is the same with current_vgpu,
141ae157902SPing Gao 	 * let scheduler chose next_vgpu again by setting it to NULL.
142ae157902SPing Gao 	 */
143ae157902SPing Gao 	if (scheduler->next_vgpu == scheduler->current_vgpu) {
144ae157902SPing Gao 		scheduler->next_vgpu = NULL;
1454b63960eSZhi Wang 		return;
146ae157902SPing Gao 	}
1474b63960eSZhi Wang 
1484b63960eSZhi Wang 	/*
1494b63960eSZhi Wang 	 * after the flag is set, workload dispatch thread will
1504b63960eSZhi Wang 	 * stop dispatching workload for current vgpu
1514b63960eSZhi Wang 	 */
1524b63960eSZhi Wang 	scheduler->need_reschedule = true;
1534b63960eSZhi Wang 
1544b63960eSZhi Wang 	/* still have uncompleted workload? */
155a61ac1e7SChris Wilson 	for_each_engine(engine, gvt->gt, i) {
156a61ac1e7SChris Wilson 		if (scheduler->current_workload[engine->id])
1574b63960eSZhi Wang 			return;
1584b63960eSZhi Wang 	}
1594b63960eSZhi Wang 
160f6504cceSPing Gao 	cur_time = ktime_get();
16189babe7cSZhipeng Gong 	vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
162f6504cceSPing Gao 	vgpu_data = scheduler->next_vgpu->sched_data;
163f6504cceSPing Gao 	vgpu_data->sched_in_time = cur_time;
164f6504cceSPing Gao 
1654b63960eSZhi Wang 	/* switch current vgpu */
1664b63960eSZhi Wang 	scheduler->current_vgpu = scheduler->next_vgpu;
1674b63960eSZhi Wang 	scheduler->next_vgpu = NULL;
1684b63960eSZhi Wang 
1694b63960eSZhi Wang 	scheduler->need_reschedule = false;
1704b63960eSZhi Wang 
1714b63960eSZhi Wang 	/* wake up workload dispatch thread */
172a61ac1e7SChris Wilson 	for_each_engine(engine, gvt->gt, i)
173a61ac1e7SChris Wilson 		wake_up(&scheduler->waitq[engine->id]);
1744b63960eSZhi Wang }
1754b63960eSZhi Wang 
find_busy_vgpu(struct gvt_sched_data * sched_data)17632356920SPing Gao static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
1774b63960eSZhi Wang {
178f6504cceSPing Gao 	struct vgpu_sched_data *vgpu_data;
1794b63960eSZhi Wang 	struct intel_vgpu *vgpu = NULL;
18032356920SPing Gao 	struct list_head *head = &sched_data->lru_runq_head;
18132356920SPing Gao 	struct list_head *pos;
1824b63960eSZhi Wang 
1834b63960eSZhi Wang 	/* search a vgpu with pending workload */
1844b63960eSZhi Wang 	list_for_each(pos, head) {
1854b63960eSZhi Wang 
18632356920SPing Gao 		vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
1874b63960eSZhi Wang 		if (!vgpu_has_pending_workload(vgpu_data->vgpu))
1884b63960eSZhi Wang 			continue;
1894b63960eSZhi Wang 
19054ff01fdSZhenyu Wang 		if (vgpu_data->pri_sched) {
19154ff01fdSZhenyu Wang 			if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
19254ff01fdSZhenyu Wang 				vgpu = vgpu_data->vgpu;
19354ff01fdSZhenyu Wang 				break;
19454ff01fdSZhenyu Wang 			} else
19554ff01fdSZhenyu Wang 				vgpu_data->pri_sched = false;
19654ff01fdSZhenyu Wang 		}
19754ff01fdSZhenyu Wang 
198b35f34d1SPing Gao 		/* Return the vGPU only if it has time slice left */
199b35f34d1SPing Gao 		if (vgpu_data->left_ts > 0) {
2004b63960eSZhi Wang 			vgpu = vgpu_data->vgpu;
2014b63960eSZhi Wang 			break;
2024b63960eSZhi Wang 		}
203b35f34d1SPing Gao 	}
2044b63960eSZhi Wang 
20532356920SPing Gao 	return vgpu;
20632356920SPing Gao }
20732356920SPing Gao 
20832356920SPing Gao /* in nanosecond */
20932356920SPing Gao #define GVT_DEFAULT_TIME_SLICE 1000000
21032356920SPing Gao 
tbs_sched_func(struct gvt_sched_data * sched_data)21132356920SPing Gao static void tbs_sched_func(struct gvt_sched_data *sched_data)
21232356920SPing Gao {
21332356920SPing Gao 	struct intel_gvt *gvt = sched_data->gvt;
21432356920SPing Gao 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
21532356920SPing Gao 	struct vgpu_sched_data *vgpu_data;
21632356920SPing Gao 	struct intel_vgpu *vgpu = NULL;
21754ff01fdSZhenyu Wang 
21832356920SPing Gao 	/* no active vgpu or has already had a target */
21932356920SPing Gao 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
22032356920SPing Gao 		goto out;
22132356920SPing Gao 
22232356920SPing Gao 	vgpu = find_busy_vgpu(sched_data);
2234b63960eSZhi Wang 	if (vgpu) {
2244b63960eSZhi Wang 		scheduler->next_vgpu = vgpu;
22532356920SPing Gao 		vgpu_data = vgpu->sched_data;
22654ff01fdSZhenyu Wang 		if (!vgpu_data->pri_sched) {
22754ff01fdSZhenyu Wang 			/* Move the last used vGPU to the tail of lru_list */
22832356920SPing Gao 			list_del_init(&vgpu_data->lru_list);
22932356920SPing Gao 			list_add_tail(&vgpu_data->lru_list,
23032356920SPing Gao 				      &sched_data->lru_runq_head);
23154ff01fdSZhenyu Wang 		}
232b35f34d1SPing Gao 	} else {
233b35f34d1SPing Gao 		scheduler->next_vgpu = gvt->idle_vgpu;
2344b63960eSZhi Wang 	}
2354b63960eSZhi Wang out:
2360b063bd3SZhenyu Wang 	if (scheduler->next_vgpu)
2374b63960eSZhi Wang 		try_to_schedule_next_vgpu(gvt);
2384b63960eSZhi Wang }
2394b63960eSZhi Wang 
intel_gvt_schedule(struct intel_gvt * gvt)24091d0101aSPing Gao void intel_gvt_schedule(struct intel_gvt *gvt)
24191d0101aSPing Gao {
242f6504cceSPing Gao 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
24389babe7cSZhipeng Gong 	ktime_t cur_time;
2444b63960eSZhi Wang 
2459a512e23SColin Xu 	mutex_lock(&gvt->sched_lock);
24689babe7cSZhipeng Gong 	cur_time = ktime_get();
247c713cb2fSPing Gao 
248c713cb2fSPing Gao 	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
249c713cb2fSPing Gao 				(void *)&gvt->service_request)) {
250292bb0d3SZhipeng Gong 		if (cur_time >= sched_data->expire_time) {
251c713cb2fSPing Gao 			gvt_balance_timeslice(sched_data);
252292bb0d3SZhipeng Gong 			sched_data->expire_time = ktime_add_ms(
253292bb0d3SZhipeng Gong 				cur_time, GVT_TS_BALANCE_PERIOD_MS);
254292bb0d3SZhipeng Gong 		}
255c713cb2fSPing Gao 	}
256c713cb2fSPing Gao 	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
257c713cb2fSPing Gao 
25889babe7cSZhipeng Gong 	vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
25991d0101aSPing Gao 	tbs_sched_func(sched_data);
260c713cb2fSPing Gao 
2619a512e23SColin Xu 	mutex_unlock(&gvt->sched_lock);
2624b63960eSZhi Wang }
2634b63960eSZhi Wang 
tbs_timer_fn(struct hrtimer * timer_data)26491d0101aSPing Gao static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
26591d0101aSPing Gao {
266f6504cceSPing Gao 	struct gvt_sched_data *data;
26791d0101aSPing Gao 
268f6504cceSPing Gao 	data = container_of(timer_data, struct gvt_sched_data, timer);
26991d0101aSPing Gao 
27091d0101aSPing Gao 	intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
27191d0101aSPing Gao 
27291d0101aSPing Gao 	hrtimer_add_expires_ns(&data->timer, data->period);
27391d0101aSPing Gao 
27491d0101aSPing Gao 	return HRTIMER_RESTART;
27591d0101aSPing Gao }
27691d0101aSPing Gao 
tbs_sched_init(struct intel_gvt * gvt)2774b63960eSZhi Wang static int tbs_sched_init(struct intel_gvt *gvt)
2784b63960eSZhi Wang {
2794b63960eSZhi Wang 	struct intel_gvt_workload_scheduler *scheduler =
2804b63960eSZhi Wang 		&gvt->scheduler;
2814b63960eSZhi Wang 
282f6504cceSPing Gao 	struct gvt_sched_data *data;
2834b63960eSZhi Wang 
2844b63960eSZhi Wang 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2854b63960eSZhi Wang 	if (!data)
2864b63960eSZhi Wang 		return -ENOMEM;
2874b63960eSZhi Wang 
28832356920SPing Gao 	INIT_LIST_HEAD(&data->lru_runq_head);
28991d0101aSPing Gao 	hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
29091d0101aSPing Gao 	data->timer.function = tbs_timer_fn;
2914b63960eSZhi Wang 	data->period = GVT_DEFAULT_TIME_SLICE;
2924b63960eSZhi Wang 	data->gvt = gvt;
2934b63960eSZhi Wang 
2944b63960eSZhi Wang 	scheduler->sched_data = data;
29591d0101aSPing Gao 
2964b63960eSZhi Wang 	return 0;
2974b63960eSZhi Wang }
2984b63960eSZhi Wang 
tbs_sched_clean(struct intel_gvt * gvt)2994b63960eSZhi Wang static void tbs_sched_clean(struct intel_gvt *gvt)
3004b63960eSZhi Wang {
3014b63960eSZhi Wang 	struct intel_gvt_workload_scheduler *scheduler =
3024b63960eSZhi Wang 		&gvt->scheduler;
303f6504cceSPing Gao 	struct gvt_sched_data *data = scheduler->sched_data;
3044b63960eSZhi Wang 
30591d0101aSPing Gao 	hrtimer_cancel(&data->timer);
30691d0101aSPing Gao 
3074b63960eSZhi Wang 	kfree(data);
3084b63960eSZhi Wang 	scheduler->sched_data = NULL;
3094b63960eSZhi Wang }
3104b63960eSZhi Wang 
tbs_sched_init_vgpu(struct intel_vgpu * vgpu)3114b63960eSZhi Wang static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
3124b63960eSZhi Wang {
313f6504cceSPing Gao 	struct vgpu_sched_data *data;
3144b63960eSZhi Wang 
3154b63960eSZhi Wang 	data = kzalloc(sizeof(*data), GFP_KERNEL);
3164b63960eSZhi Wang 	if (!data)
3174b63960eSZhi Wang 		return -ENOMEM;
3184b63960eSZhi Wang 
319bc90d097SPing Gao 	data->sched_ctl.weight = vgpu->sched_ctl.weight;
3204b63960eSZhi Wang 	data->vgpu = vgpu;
32132356920SPing Gao 	INIT_LIST_HEAD(&data->lru_list);
3224b63960eSZhi Wang 
3234b63960eSZhi Wang 	vgpu->sched_data = data;
32491d0101aSPing Gao 
3254b63960eSZhi Wang 	return 0;
3264b63960eSZhi Wang }
3274b63960eSZhi Wang 
tbs_sched_clean_vgpu(struct intel_vgpu * vgpu)3284b63960eSZhi Wang static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
3294b63960eSZhi Wang {
33061a66947SZhenyu Wang 	struct intel_gvt *gvt = vgpu->gvt;
33161a66947SZhenyu Wang 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
33261a66947SZhenyu Wang 
3334b63960eSZhi Wang 	kfree(vgpu->sched_data);
3344b63960eSZhi Wang 	vgpu->sched_data = NULL;
33561a66947SZhenyu Wang 
33661a66947SZhenyu Wang 	/* this vgpu id has been removed */
33761a66947SZhenyu Wang 	if (idr_is_empty(&gvt->vgpu_idr))
33861a66947SZhenyu Wang 		hrtimer_cancel(&sched_data->timer);
3394b63960eSZhi Wang }
3404b63960eSZhi Wang 
tbs_sched_start_schedule(struct intel_vgpu * vgpu)3414b63960eSZhi Wang static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
3424b63960eSZhi Wang {
343f6504cceSPing Gao 	struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
344f6504cceSPing Gao 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
34554ff01fdSZhenyu Wang 	ktime_t now;
3464b63960eSZhi Wang 
34732356920SPing Gao 	if (!list_empty(&vgpu_data->lru_list))
3484b63960eSZhi Wang 		return;
3494b63960eSZhi Wang 
35054ff01fdSZhenyu Wang 	now = ktime_get();
35154ff01fdSZhenyu Wang 	vgpu_data->pri_time = ktime_add(now,
35254ff01fdSZhenyu Wang 					ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
35354ff01fdSZhenyu Wang 	vgpu_data->pri_sched = true;
35454ff01fdSZhenyu Wang 
35554ff01fdSZhenyu Wang 	list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
35691d0101aSPing Gao 
35791d0101aSPing Gao 	if (!hrtimer_active(&sched_data->timer))
35891d0101aSPing Gao 		hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
35991d0101aSPing Gao 			sched_data->period), HRTIMER_MODE_ABS);
3609212b13fSWeinan Li 	vgpu_data->active = true;
3614b63960eSZhi Wang }
3624b63960eSZhi Wang 
tbs_sched_stop_schedule(struct intel_vgpu * vgpu)3634b63960eSZhi Wang static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
3644b63960eSZhi Wang {
365f6504cceSPing Gao 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
3664b63960eSZhi Wang 
36732356920SPing Gao 	list_del_init(&vgpu_data->lru_list);
3689212b13fSWeinan Li 	vgpu_data->active = false;
3694b63960eSZhi Wang }
3704b63960eSZhi Wang 
371*46420777SRikard Falkeborn static const struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
3724b63960eSZhi Wang 	.init = tbs_sched_init,
3734b63960eSZhi Wang 	.clean = tbs_sched_clean,
3744b63960eSZhi Wang 	.init_vgpu = tbs_sched_init_vgpu,
3754b63960eSZhi Wang 	.clean_vgpu = tbs_sched_clean_vgpu,
3764b63960eSZhi Wang 	.start_schedule = tbs_sched_start_schedule,
3774b63960eSZhi Wang 	.stop_schedule = tbs_sched_stop_schedule,
3784b63960eSZhi Wang };
3794b63960eSZhi Wang 
intel_gvt_init_sched_policy(struct intel_gvt * gvt)3804b63960eSZhi Wang int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
3814b63960eSZhi Wang {
3829a512e23SColin Xu 	int ret;
3834b63960eSZhi Wang 
3849a512e23SColin Xu 	mutex_lock(&gvt->sched_lock);
3859a512e23SColin Xu 	gvt->scheduler.sched_ops = &tbs_schedule_ops;
3869a512e23SColin Xu 	ret = gvt->scheduler.sched_ops->init(gvt);
3879a512e23SColin Xu 	mutex_unlock(&gvt->sched_lock);
3889a512e23SColin Xu 
3899a512e23SColin Xu 	return ret;
3904b63960eSZhi Wang }
3914b63960eSZhi Wang 
intel_gvt_clean_sched_policy(struct intel_gvt * gvt)3924b63960eSZhi Wang void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
3934b63960eSZhi Wang {
3949a512e23SColin Xu 	mutex_lock(&gvt->sched_lock);
3954b63960eSZhi Wang 	gvt->scheduler.sched_ops->clean(gvt);
3969a512e23SColin Xu 	mutex_unlock(&gvt->sched_lock);
3974b63960eSZhi Wang }
3984b63960eSZhi Wang 
3999a512e23SColin Xu /* for per-vgpu scheduler policy, there are 2 per-vgpu data:
4009a512e23SColin Xu  * sched_data, and sched_ctl. We see these 2 data as part of
4019a512e23SColin Xu  * the global scheduler which are proteced by gvt->sched_lock.
4029a512e23SColin Xu  * Caller should make their decision if the vgpu_lock should
4039a512e23SColin Xu  * be hold outside.
4049a512e23SColin Xu  */
4059a512e23SColin Xu 
intel_vgpu_init_sched_policy(struct intel_vgpu * vgpu)4064b63960eSZhi Wang int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
4074b63960eSZhi Wang {
4089a512e23SColin Xu 	int ret;
4099a512e23SColin Xu 
4109a512e23SColin Xu 	mutex_lock(&vgpu->gvt->sched_lock);
4119a512e23SColin Xu 	ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
4129a512e23SColin Xu 	mutex_unlock(&vgpu->gvt->sched_lock);
4139a512e23SColin Xu 
4149a512e23SColin Xu 	return ret;
4154b63960eSZhi Wang }
4164b63960eSZhi Wang 
intel_vgpu_clean_sched_policy(struct intel_vgpu * vgpu)4174b63960eSZhi Wang void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
4184b63960eSZhi Wang {
4199a512e23SColin Xu 	mutex_lock(&vgpu->gvt->sched_lock);
4204b63960eSZhi Wang 	vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
4219a512e23SColin Xu 	mutex_unlock(&vgpu->gvt->sched_lock);
4224b63960eSZhi Wang }
4234b63960eSZhi Wang 
intel_vgpu_start_schedule(struct intel_vgpu * vgpu)4244b63960eSZhi Wang void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
4254b63960eSZhi Wang {
4269212b13fSWeinan Li 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
4274b63960eSZhi Wang 
4289a512e23SColin Xu 	mutex_lock(&vgpu->gvt->sched_lock);
4299212b13fSWeinan Li 	if (!vgpu_data->active) {
4309212b13fSWeinan Li 		gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
4314b63960eSZhi Wang 		vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
4324b63960eSZhi Wang 	}
4339a512e23SColin Xu 	mutex_unlock(&vgpu->gvt->sched_lock);
4349212b13fSWeinan Li }
4354b63960eSZhi Wang 
intel_gvt_kick_schedule(struct intel_gvt * gvt)436c130456cSChangbin Du void intel_gvt_kick_schedule(struct intel_gvt *gvt)
437c130456cSChangbin Du {
4389a512e23SColin Xu 	mutex_lock(&gvt->sched_lock);
439c130456cSChangbin Du 	intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
4409a512e23SColin Xu 	mutex_unlock(&gvt->sched_lock);
441c130456cSChangbin Du }
442c130456cSChangbin Du 
intel_vgpu_stop_schedule(struct intel_vgpu * vgpu)4434b63960eSZhi Wang void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
4444b63960eSZhi Wang {
4454b63960eSZhi Wang 	struct intel_gvt_workload_scheduler *scheduler =
4464b63960eSZhi Wang 		&vgpu->gvt->scheduler;
4479212b13fSWeinan Li 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
448a61ac1e7SChris Wilson 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
4498fde4107SChris Wilson 	struct intel_engine_cs *engine;
4508fde4107SChris Wilson 	enum intel_engine_id id;
4519212b13fSWeinan Li 
4529212b13fSWeinan Li 	if (!vgpu_data->active)
4539212b13fSWeinan Li 		return;
4544b63960eSZhi Wang 
4554b63960eSZhi Wang 	gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
4564b63960eSZhi Wang 
4579a512e23SColin Xu 	mutex_lock(&vgpu->gvt->sched_lock);
4584b63960eSZhi Wang 	scheduler->sched_ops->stop_schedule(vgpu);
4594b63960eSZhi Wang 
4604b63960eSZhi Wang 	if (scheduler->next_vgpu == vgpu)
4614b63960eSZhi Wang 		scheduler->next_vgpu = NULL;
4624b63960eSZhi Wang 
4634b63960eSZhi Wang 	if (scheduler->current_vgpu == vgpu) {
4644b63960eSZhi Wang 		/* stop workload dispatching */
4654b63960eSZhi Wang 		scheduler->need_reschedule = true;
4664b63960eSZhi Wang 		scheduler->current_vgpu = NULL;
4674b63960eSZhi Wang 	}
468ba3ee006SChangbin Du 
469d858d569SDaniele Ceraolo Spurio 	intel_runtime_pm_get(&dev_priv->runtime_pm);
470ba3ee006SChangbin Du 	spin_lock_bh(&scheduler->mmio_context_lock);
471a61ac1e7SChris Wilson 	for_each_engine(engine, vgpu->gvt->gt, id) {
4728fde4107SChris Wilson 		if (scheduler->engine_owner[engine->id] == vgpu) {
4738fde4107SChris Wilson 			intel_gvt_switch_mmio(vgpu, NULL, engine);
4748fde4107SChris Wilson 			scheduler->engine_owner[engine->id] = NULL;
475ba3ee006SChangbin Du 		}
476ba3ee006SChangbin Du 	}
477ba3ee006SChangbin Du 	spin_unlock_bh(&scheduler->mmio_context_lock);
478d858d569SDaniele Ceraolo Spurio 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
4799a512e23SColin Xu 	mutex_unlock(&vgpu->gvt->sched_lock);
4804b63960eSZhi Wang }
481