xref: /linux/drivers/gpu/drm/i915/gvt/sched_policy.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Anhua Xu
25  *    Kevin Tian <kevin.tian@intel.com>
26  *
27  * Contributors:
28  *    Min He <min.he@intel.com>
29  *    Bing Niu <bing.niu@intel.com>
30  *    Zhi Wang <zhi.a.wang@intel.com>
31  *
32  */
33 
34 #include "gvt.h"
35 #include "i915_drv.h"
36 #include "sched_policy.h"
37 
vgpu_has_pending_workload(struct intel_vgpu * vgpu)38 static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
39 {
40 	enum intel_engine_id i;
41 	struct intel_engine_cs *engine;
42 
43 	for_each_engine(engine, vgpu->gvt->gt, i) {
44 		if (!list_empty(workload_q_head(vgpu, engine)))
45 			return true;
46 	}
47 
48 	return false;
49 }
50 
51 /* We give 2 seconds higher prio for vGPU during start */
52 #define GVT_SCHED_VGPU_PRI_TIME  2
53 
54 struct vgpu_sched_data {
55 	struct list_head lru_list;
56 	struct intel_vgpu *vgpu;
57 	bool active;
58 	bool pri_sched;
59 	ktime_t pri_time;
60 	ktime_t sched_in_time;
61 	ktime_t sched_time;
62 	ktime_t left_ts;
63 	ktime_t allocated_ts;
64 
65 	struct vgpu_sched_ctl sched_ctl;
66 };
67 
68 struct gvt_sched_data {
69 	struct intel_gvt *gvt;
70 	struct hrtimer timer;
71 	unsigned long period;
72 	struct list_head lru_runq_head;
73 	ktime_t expire_time;
74 };
75 
vgpu_update_timeslice(struct intel_vgpu * vgpu,ktime_t cur_time)76 static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
77 {
78 	ktime_t delta_ts;
79 	struct vgpu_sched_data *vgpu_data;
80 
81 	if (!vgpu || vgpu == vgpu->gvt->idle_vgpu)
82 		return;
83 
84 	vgpu_data = vgpu->sched_data;
85 	delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
86 	vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
87 	vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
88 	vgpu_data->sched_in_time = cur_time;
89 }
90 
91 #define GVT_TS_BALANCE_PERIOD_MS 100
92 #define GVT_TS_BALANCE_STAGE_NUM 10
93 
gvt_balance_timeslice(struct gvt_sched_data * sched_data)94 static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
95 {
96 	struct vgpu_sched_data *vgpu_data;
97 	struct list_head *pos;
98 	static u64 stage_check;
99 	int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
100 
101 	/* The timeslice accumulation reset at stage 0, which is
102 	 * allocated again without adding previous debt.
103 	 */
104 	if (stage == 0) {
105 		int total_weight = 0;
106 		ktime_t fair_timeslice;
107 
108 		list_for_each(pos, &sched_data->lru_runq_head) {
109 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
110 			total_weight += vgpu_data->sched_ctl.weight;
111 		}
112 
113 		list_for_each(pos, &sched_data->lru_runq_head) {
114 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
115 			fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
116 						     total_weight) * vgpu_data->sched_ctl.weight;
117 
118 			vgpu_data->allocated_ts = fair_timeslice;
119 			vgpu_data->left_ts = vgpu_data->allocated_ts;
120 		}
121 	} else {
122 		list_for_each(pos, &sched_data->lru_runq_head) {
123 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
124 
125 			/* timeslice for next 100ms should add the left/debt
126 			 * slice of previous stages.
127 			 */
128 			vgpu_data->left_ts += vgpu_data->allocated_ts;
129 		}
130 	}
131 }
132 
try_to_schedule_next_vgpu(struct intel_gvt * gvt)133 static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
134 {
135 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
136 	enum intel_engine_id i;
137 	struct intel_engine_cs *engine;
138 	struct vgpu_sched_data *vgpu_data;
139 	ktime_t cur_time;
140 
141 	/* no need to schedule if next_vgpu is the same with current_vgpu,
142 	 * let scheduler chose next_vgpu again by setting it to NULL.
143 	 */
144 	if (scheduler->next_vgpu == scheduler->current_vgpu) {
145 		scheduler->next_vgpu = NULL;
146 		return;
147 	}
148 
149 	/*
150 	 * after the flag is set, workload dispatch thread will
151 	 * stop dispatching workload for current vgpu
152 	 */
153 	scheduler->need_reschedule = true;
154 
155 	/* still have uncompleted workload? */
156 	for_each_engine(engine, gvt->gt, i) {
157 		if (scheduler->current_workload[engine->id])
158 			return;
159 	}
160 
161 	cur_time = ktime_get();
162 	vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
163 	vgpu_data = scheduler->next_vgpu->sched_data;
164 	vgpu_data->sched_in_time = cur_time;
165 
166 	/* switch current vgpu */
167 	scheduler->current_vgpu = scheduler->next_vgpu;
168 	scheduler->next_vgpu = NULL;
169 
170 	scheduler->need_reschedule = false;
171 
172 	/* wake up workload dispatch thread */
173 	for_each_engine(engine, gvt->gt, i)
174 		wake_up(&scheduler->waitq[engine->id]);
175 }
176 
find_busy_vgpu(struct gvt_sched_data * sched_data)177 static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
178 {
179 	struct vgpu_sched_data *vgpu_data;
180 	struct intel_vgpu *vgpu = NULL;
181 	struct list_head *head = &sched_data->lru_runq_head;
182 	struct list_head *pos;
183 
184 	/* search a vgpu with pending workload */
185 	list_for_each(pos, head) {
186 
187 		vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
188 		if (!vgpu_has_pending_workload(vgpu_data->vgpu))
189 			continue;
190 
191 		if (vgpu_data->pri_sched) {
192 			if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
193 				vgpu = vgpu_data->vgpu;
194 				break;
195 			} else
196 				vgpu_data->pri_sched = false;
197 		}
198 
199 		/* Return the vGPU only if it has time slice left */
200 		if (vgpu_data->left_ts > 0) {
201 			vgpu = vgpu_data->vgpu;
202 			break;
203 		}
204 	}
205 
206 	return vgpu;
207 }
208 
209 /* in nanosecond */
210 #define GVT_DEFAULT_TIME_SLICE 1000000
211 
tbs_sched_func(struct gvt_sched_data * sched_data)212 static void tbs_sched_func(struct gvt_sched_data *sched_data)
213 {
214 	struct intel_gvt *gvt = sched_data->gvt;
215 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
216 	struct vgpu_sched_data *vgpu_data;
217 	struct intel_vgpu *vgpu = NULL;
218 
219 	/* no active vgpu or has already had a target */
220 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
221 		goto out;
222 
223 	vgpu = find_busy_vgpu(sched_data);
224 	if (vgpu) {
225 		scheduler->next_vgpu = vgpu;
226 		vgpu_data = vgpu->sched_data;
227 		if (!vgpu_data->pri_sched) {
228 			/* Move the last used vGPU to the tail of lru_list */
229 			list_del_init(&vgpu_data->lru_list);
230 			list_add_tail(&vgpu_data->lru_list,
231 				      &sched_data->lru_runq_head);
232 		}
233 	} else {
234 		scheduler->next_vgpu = gvt->idle_vgpu;
235 	}
236 out:
237 	if (scheduler->next_vgpu)
238 		try_to_schedule_next_vgpu(gvt);
239 }
240 
intel_gvt_schedule(struct intel_gvt * gvt)241 void intel_gvt_schedule(struct intel_gvt *gvt)
242 {
243 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
244 	ktime_t cur_time;
245 
246 	mutex_lock(&gvt->sched_lock);
247 	cur_time = ktime_get();
248 
249 	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
250 				(void *)&gvt->service_request)) {
251 		if (cur_time >= sched_data->expire_time) {
252 			gvt_balance_timeslice(sched_data);
253 			sched_data->expire_time = ktime_add_ms(
254 				cur_time, GVT_TS_BALANCE_PERIOD_MS);
255 		}
256 	}
257 	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
258 
259 	vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
260 	tbs_sched_func(sched_data);
261 
262 	mutex_unlock(&gvt->sched_lock);
263 }
264 
tbs_timer_fn(struct hrtimer * timer_data)265 static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
266 {
267 	struct gvt_sched_data *data;
268 
269 	data = container_of(timer_data, struct gvt_sched_data, timer);
270 
271 	intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
272 
273 	hrtimer_add_expires_ns(&data->timer, data->period);
274 
275 	return HRTIMER_RESTART;
276 }
277 
tbs_sched_init(struct intel_gvt * gvt)278 static int tbs_sched_init(struct intel_gvt *gvt)
279 {
280 	struct intel_gvt_workload_scheduler *scheduler =
281 		&gvt->scheduler;
282 
283 	struct gvt_sched_data *data;
284 
285 	data = kzalloc_obj(*data);
286 	if (!data)
287 		return -ENOMEM;
288 
289 	INIT_LIST_HEAD(&data->lru_runq_head);
290 	hrtimer_setup(&data->timer, tbs_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
291 	data->period = GVT_DEFAULT_TIME_SLICE;
292 	data->gvt = gvt;
293 
294 	scheduler->sched_data = data;
295 
296 	return 0;
297 }
298 
tbs_sched_clean(struct intel_gvt * gvt)299 static void tbs_sched_clean(struct intel_gvt *gvt)
300 {
301 	struct intel_gvt_workload_scheduler *scheduler =
302 		&gvt->scheduler;
303 	struct gvt_sched_data *data = scheduler->sched_data;
304 
305 	hrtimer_cancel(&data->timer);
306 
307 	kfree(data);
308 	scheduler->sched_data = NULL;
309 }
310 
tbs_sched_init_vgpu(struct intel_vgpu * vgpu)311 static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
312 {
313 	struct vgpu_sched_data *data;
314 
315 	data = kzalloc_obj(*data);
316 	if (!data)
317 		return -ENOMEM;
318 
319 	data->sched_ctl.weight = vgpu->sched_ctl.weight;
320 	data->vgpu = vgpu;
321 	INIT_LIST_HEAD(&data->lru_list);
322 
323 	vgpu->sched_data = data;
324 
325 	return 0;
326 }
327 
tbs_sched_clean_vgpu(struct intel_vgpu * vgpu)328 static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
329 {
330 	struct intel_gvt *gvt = vgpu->gvt;
331 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
332 
333 	kfree(vgpu->sched_data);
334 	vgpu->sched_data = NULL;
335 
336 	/* this vgpu id has been removed */
337 	if (idr_is_empty(&gvt->vgpu_idr))
338 		hrtimer_cancel(&sched_data->timer);
339 }
340 
tbs_sched_start_schedule(struct intel_vgpu * vgpu)341 static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
342 {
343 	struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
344 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
345 	ktime_t now;
346 
347 	if (!list_empty(&vgpu_data->lru_list))
348 		return;
349 
350 	now = ktime_get();
351 	vgpu_data->pri_time = ktime_add(now,
352 					ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
353 	vgpu_data->pri_sched = true;
354 
355 	list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
356 
357 	if (!hrtimer_active(&sched_data->timer))
358 		hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
359 			sched_data->period), HRTIMER_MODE_ABS);
360 	vgpu_data->active = true;
361 }
362 
tbs_sched_stop_schedule(struct intel_vgpu * vgpu)363 static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
364 {
365 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
366 
367 	list_del_init(&vgpu_data->lru_list);
368 	vgpu_data->active = false;
369 }
370 
371 static const struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
372 	.init = tbs_sched_init,
373 	.clean = tbs_sched_clean,
374 	.init_vgpu = tbs_sched_init_vgpu,
375 	.clean_vgpu = tbs_sched_clean_vgpu,
376 	.start_schedule = tbs_sched_start_schedule,
377 	.stop_schedule = tbs_sched_stop_schedule,
378 };
379 
intel_gvt_init_sched_policy(struct intel_gvt * gvt)380 int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
381 {
382 	int ret;
383 
384 	mutex_lock(&gvt->sched_lock);
385 	gvt->scheduler.sched_ops = &tbs_schedule_ops;
386 	ret = gvt->scheduler.sched_ops->init(gvt);
387 	mutex_unlock(&gvt->sched_lock);
388 
389 	return ret;
390 }
391 
intel_gvt_clean_sched_policy(struct intel_gvt * gvt)392 void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
393 {
394 	mutex_lock(&gvt->sched_lock);
395 	gvt->scheduler.sched_ops->clean(gvt);
396 	mutex_unlock(&gvt->sched_lock);
397 }
398 
399 /* for per-vgpu scheduler policy, there are 2 per-vgpu data:
400  * sched_data, and sched_ctl. We see these 2 data as part of
401  * the global scheduler which are proteced by gvt->sched_lock.
402  * Caller should make their decision if the vgpu_lock should
403  * be hold outside.
404  */
405 
intel_vgpu_init_sched_policy(struct intel_vgpu * vgpu)406 int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
407 {
408 	int ret;
409 
410 	mutex_lock(&vgpu->gvt->sched_lock);
411 	ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
412 	mutex_unlock(&vgpu->gvt->sched_lock);
413 
414 	return ret;
415 }
416 
intel_vgpu_clean_sched_policy(struct intel_vgpu * vgpu)417 void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
418 {
419 	mutex_lock(&vgpu->gvt->sched_lock);
420 	vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
421 	mutex_unlock(&vgpu->gvt->sched_lock);
422 }
423 
intel_vgpu_start_schedule(struct intel_vgpu * vgpu)424 void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
425 {
426 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
427 
428 	mutex_lock(&vgpu->gvt->sched_lock);
429 	if (!vgpu_data->active) {
430 		gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
431 		vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
432 	}
433 	mutex_unlock(&vgpu->gvt->sched_lock);
434 }
435 
intel_gvt_kick_schedule(struct intel_gvt * gvt)436 void intel_gvt_kick_schedule(struct intel_gvt *gvt)
437 {
438 	mutex_lock(&gvt->sched_lock);
439 	intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
440 	mutex_unlock(&gvt->sched_lock);
441 }
442 
intel_vgpu_stop_schedule(struct intel_vgpu * vgpu)443 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
444 {
445 	struct intel_gvt_workload_scheduler *scheduler =
446 		&vgpu->gvt->scheduler;
447 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
448 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
449 	struct intel_engine_cs *engine;
450 	enum intel_engine_id id;
451 	intel_wakeref_t wakeref;
452 
453 	if (!vgpu_data->active)
454 		return;
455 
456 	gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
457 
458 	mutex_lock(&vgpu->gvt->sched_lock);
459 	scheduler->sched_ops->stop_schedule(vgpu);
460 
461 	if (scheduler->next_vgpu == vgpu)
462 		scheduler->next_vgpu = NULL;
463 
464 	if (scheduler->current_vgpu == vgpu) {
465 		/* stop workload dispatching */
466 		scheduler->need_reschedule = true;
467 		scheduler->current_vgpu = NULL;
468 	}
469 
470 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
471 	spin_lock_bh(&scheduler->mmio_context_lock);
472 	for_each_engine(engine, vgpu->gvt->gt, id) {
473 		if (scheduler->engine_owner[engine->id] == vgpu) {
474 			intel_gvt_switch_mmio(vgpu, NULL, engine);
475 			scheduler->engine_owner[engine->id] = NULL;
476 		}
477 	}
478 	spin_unlock_bh(&scheduler->mmio_context_lock);
479 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
480 	mutex_unlock(&vgpu->gvt->sched_lock);
481 }
482