1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
26 *
27 * Contributors:
28 * Ping Gao <ping.a.gao@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
30 * Bing Niu <bing.niu@intel.com>
31 *
32 */
33
34 #include <linux/vmalloc.h>
35
36 #include <drm/drm_print.h>
37
38 #include "gvt.h"
39 #include "i915_drv.h"
40 #include "i915_pvinfo.h"
41 #include "sched_policy.h"
42
populate_pvinfo_page(struct intel_vgpu * vgpu)43 void populate_pvinfo_page(struct intel_vgpu *vgpu)
44 {
45 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
46 /* setup the ballooning information */
47 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
48 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
49 vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
50 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
51 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
52
53 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
54 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
55 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
56
57 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
58 vgpu_aperture_gmadr_base(vgpu);
59 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
60 vgpu_aperture_sz(vgpu);
61 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
62 vgpu_hidden_gmadr_base(vgpu);
63 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
64 vgpu_hidden_sz(vgpu);
65
66 vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
67
68 vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
69 vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
70
71 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
72 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
73 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
74 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
75 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
76 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
77
78 drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
79 }
80
81 /*
82 * vGPU type name is defined as GVTg_Vx_y which contains the physical GPU
83 * generation type (e.g V4 as BDW server, V5 as SKL server).
84 *
85 * Depending on the physical SKU resource, we might see vGPU types like
86 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of
87 * vGPU on same physical GPU depending on available resource. Each vGPU
88 * type will have a different number of avail_instance to indicate how
89 * many vGPU instance can be created for this type.
90 */
91 #define VGPU_MAX_WEIGHT 16
92 #define VGPU_WEIGHT(vgpu_num) \
93 (VGPU_MAX_WEIGHT / (vgpu_num))
94
95 static const struct intel_vgpu_config intel_vgpu_configs[] = {
96 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
97 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
98 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
99 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
100 };
101
102 /**
103 * intel_gvt_init_vgpu_types - initialize vGPU type list
104 * @gvt : GVT device
105 *
106 * Initialize vGPU type list based on available resource.
107 *
108 */
intel_gvt_init_vgpu_types(struct intel_gvt * gvt)109 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
110 {
111 unsigned int low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
112 unsigned int high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
113 unsigned int num_types = ARRAY_SIZE(intel_vgpu_configs);
114 unsigned int i;
115
116 gvt->types = kzalloc_objs(struct intel_vgpu_type, num_types);
117 if (!gvt->types)
118 return -ENOMEM;
119
120 gvt->mdev_types = kzalloc_objs(*gvt->mdev_types, num_types);
121 if (!gvt->mdev_types)
122 goto out_free_types;
123
124 for (i = 0; i < num_types; ++i) {
125 const struct intel_vgpu_config *conf = &intel_vgpu_configs[i];
126
127 if (low_avail / conf->low_mm == 0)
128 break;
129 if (conf->weight < 1 || conf->weight > VGPU_MAX_WEIGHT)
130 goto out_free_mdev_types;
131
132 sprintf(gvt->types[i].name, "GVTg_V%u_%s",
133 GRAPHICS_VER(gvt->gt->i915) == 8 ? 4 : 5, conf->name);
134 gvt->types[i].conf = conf;
135
136 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
137 i, gvt->types[i].name,
138 min(low_avail / conf->low_mm,
139 high_avail / conf->high_mm),
140 conf->low_mm, conf->high_mm, conf->fence,
141 conf->weight, vgpu_edid_str(conf->edid));
142
143 gvt->mdev_types[i] = &gvt->types[i].type;
144 gvt->mdev_types[i]->sysfs_name = gvt->types[i].name;
145 }
146
147 gvt->num_types = i;
148 return 0;
149
150 out_free_mdev_types:
151 kfree(gvt->mdev_types);
152 out_free_types:
153 kfree(gvt->types);
154 return -EINVAL;
155 }
156
intel_gvt_clean_vgpu_types(struct intel_gvt * gvt)157 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
158 {
159 kfree(gvt->mdev_types);
160 kfree(gvt->types);
161 }
162
163 /**
164 * intel_gvt_activate_vgpu - activate a virtual GPU
165 * @vgpu: virtual GPU
166 *
167 * This function is called when user wants to activate a virtual GPU.
168 *
169 */
intel_gvt_activate_vgpu(struct intel_vgpu * vgpu)170 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
171 {
172 set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
173 }
174
175 /**
176 * intel_gvt_deactivate_vgpu - deactivate a virtual GPU
177 * @vgpu: virtual GPU
178 *
179 * This function is called when user wants to deactivate a virtual GPU.
180 * The virtual GPU will be stopped.
181 *
182 */
intel_gvt_deactivate_vgpu(struct intel_vgpu * vgpu)183 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
184 {
185 mutex_lock(&vgpu->vgpu_lock);
186
187 clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
188
189 if (atomic_read(&vgpu->submission.running_workload_num)) {
190 mutex_unlock(&vgpu->vgpu_lock);
191 intel_gvt_wait_vgpu_idle(vgpu);
192 mutex_lock(&vgpu->vgpu_lock);
193 }
194
195 intel_vgpu_stop_schedule(vgpu);
196
197 mutex_unlock(&vgpu->vgpu_lock);
198 }
199
200 /**
201 * intel_gvt_release_vgpu - release a virtual GPU
202 * @vgpu: virtual GPU
203 *
204 * This function is called when user wants to release a virtual GPU.
205 * The virtual GPU will be stopped and all runtime information will be
206 * destroyed.
207 *
208 */
intel_gvt_release_vgpu(struct intel_vgpu * vgpu)209 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
210 {
211 intel_gvt_deactivate_vgpu(vgpu);
212
213 mutex_lock(&vgpu->vgpu_lock);
214 vgpu->d3_entered = false;
215 intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
216 intel_vgpu_dmabuf_cleanup(vgpu);
217 mutex_unlock(&vgpu->vgpu_lock);
218 }
219
220 /**
221 * intel_gvt_destroy_vgpu - destroy a virtual GPU
222 * @vgpu: virtual GPU
223 *
224 * This function is called when user wants to destroy a virtual GPU.
225 *
226 */
intel_gvt_destroy_vgpu(struct intel_vgpu * vgpu)227 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
228 {
229 struct intel_gvt *gvt = vgpu->gvt;
230 struct drm_i915_private *i915 = gvt->gt->i915;
231
232 drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status),
233 "vGPU is still active!\n");
234
235 /*
236 * remove idr first so later clean can judge if need to stop
237 * service if no active vgpu.
238 */
239 mutex_lock(&gvt->lock);
240 idr_remove(&gvt->vgpu_idr, vgpu->id);
241 mutex_unlock(&gvt->lock);
242
243 mutex_lock(&vgpu->vgpu_lock);
244 intel_gvt_debugfs_remove_vgpu(vgpu);
245 intel_vgpu_clean_sched_policy(vgpu);
246 intel_vgpu_clean_submission(vgpu);
247 intel_vgpu_clean_display(vgpu);
248 intel_vgpu_clean_opregion(vgpu);
249 intel_vgpu_reset_ggtt(vgpu, true);
250 intel_vgpu_clean_gtt(vgpu);
251 intel_vgpu_detach_regions(vgpu);
252 intel_vgpu_free_resource(vgpu);
253 intel_vgpu_clean_mmio(vgpu);
254 intel_vgpu_dmabuf_cleanup(vgpu);
255 mutex_unlock(&vgpu->vgpu_lock);
256 }
257
258 #define IDLE_VGPU_IDR 0
259
260 /**
261 * intel_gvt_create_idle_vgpu - create an idle virtual GPU
262 * @gvt: GVT device
263 *
264 * This function is called when user wants to create an idle virtual GPU.
265 *
266 * Returns:
267 * pointer to intel_vgpu, error pointer if failed.
268 */
intel_gvt_create_idle_vgpu(struct intel_gvt * gvt)269 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
270 {
271 struct intel_vgpu *vgpu;
272 enum intel_engine_id i;
273 int ret;
274
275 vgpu = vzalloc(sizeof(*vgpu));
276 if (!vgpu)
277 return ERR_PTR(-ENOMEM);
278
279 vgpu->id = IDLE_VGPU_IDR;
280 vgpu->gvt = gvt;
281 mutex_init(&vgpu->vgpu_lock);
282
283 for (i = 0; i < I915_NUM_ENGINES; i++)
284 INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
285
286 ret = intel_vgpu_init_sched_policy(vgpu);
287 if (ret)
288 goto out_free_vgpu;
289
290 clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
291 return vgpu;
292
293 out_free_vgpu:
294 vfree(vgpu);
295 return ERR_PTR(ret);
296 }
297
298 /**
299 * intel_gvt_destroy_idle_vgpu - destroy an idle virtual GPU
300 * @vgpu: virtual GPU
301 *
302 * This function is called when user wants to destroy an idle virtual GPU.
303 *
304 */
intel_gvt_destroy_idle_vgpu(struct intel_vgpu * vgpu)305 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
306 {
307 mutex_lock(&vgpu->vgpu_lock);
308 intel_vgpu_clean_sched_policy(vgpu);
309 mutex_unlock(&vgpu->vgpu_lock);
310
311 vfree(vgpu);
312 }
313
intel_gvt_create_vgpu(struct intel_vgpu * vgpu,const struct intel_vgpu_config * conf)314 int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
315 const struct intel_vgpu_config *conf)
316 {
317 struct intel_gvt *gvt = vgpu->gvt;
318 struct drm_i915_private *dev_priv = gvt->gt->i915;
319 int ret;
320
321 gvt_dbg_core("low %u MB high %u MB fence %u\n",
322 BYTES_TO_MB(conf->low_mm), BYTES_TO_MB(conf->high_mm),
323 conf->fence);
324
325 mutex_lock(&gvt->lock);
326 ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
327 GFP_KERNEL);
328 if (ret < 0)
329 goto out_unlock;
330
331 vgpu->id = ret;
332 vgpu->sched_ctl.weight = conf->weight;
333 mutex_init(&vgpu->vgpu_lock);
334 mutex_init(&vgpu->dmabuf_lock);
335 INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
336 INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
337 idr_init_base(&vgpu->object_idr, 1);
338 intel_vgpu_init_cfg_space(vgpu, 1);
339 vgpu->d3_entered = false;
340
341 ret = intel_vgpu_init_mmio(vgpu);
342 if (ret)
343 goto out_clean_idr;
344
345 ret = intel_vgpu_alloc_resource(vgpu, conf);
346 if (ret)
347 goto out_clean_vgpu_mmio;
348
349 populate_pvinfo_page(vgpu);
350
351 ret = intel_vgpu_init_gtt(vgpu);
352 if (ret)
353 goto out_clean_vgpu_resource;
354
355 ret = intel_vgpu_init_opregion(vgpu);
356 if (ret)
357 goto out_clean_gtt;
358
359 ret = intel_vgpu_init_display(vgpu, conf->edid);
360 if (ret)
361 goto out_clean_opregion;
362
363 ret = intel_vgpu_setup_submission(vgpu);
364 if (ret)
365 goto out_clean_display;
366
367 ret = intel_vgpu_init_sched_policy(vgpu);
368 if (ret)
369 goto out_clean_submission;
370
371 intel_gvt_debugfs_add_vgpu(vgpu);
372
373 ret = intel_gvt_set_opregion(vgpu);
374 if (ret)
375 goto out_clean_sched_policy;
376
377 if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
378 ret = intel_gvt_set_edid(vgpu, PORT_B);
379 else
380 ret = intel_gvt_set_edid(vgpu, PORT_D);
381 if (ret)
382 goto out_clean_sched_policy;
383
384 intel_gvt_update_reg_whitelist(vgpu);
385 mutex_unlock(&gvt->lock);
386 return 0;
387
388 out_clean_sched_policy:
389 intel_vgpu_clean_sched_policy(vgpu);
390 out_clean_submission:
391 intel_vgpu_clean_submission(vgpu);
392 out_clean_display:
393 intel_vgpu_clean_display(vgpu);
394 out_clean_opregion:
395 intel_vgpu_clean_opregion(vgpu);
396 out_clean_gtt:
397 intel_vgpu_clean_gtt(vgpu);
398 out_clean_vgpu_resource:
399 intel_vgpu_free_resource(vgpu);
400 out_clean_vgpu_mmio:
401 intel_vgpu_clean_mmio(vgpu);
402 out_clean_idr:
403 idr_remove(&gvt->vgpu_idr, vgpu->id);
404 out_unlock:
405 mutex_unlock(&gvt->lock);
406 return ret;
407 }
408
409 /**
410 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
411 * @vgpu: virtual GPU
412 * @dmlr: vGPU Device Model Level Reset or GT Reset
413 * @engine_mask: engines to reset for GT reset
414 *
415 * This function is called when user wants to reset a virtual GPU through
416 * device model reset or GT reset. The caller should hold the vgpu lock.
417 *
418 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
419 * the whole vGPU to default state as when it is created. This vGPU function
420 * is required both for functionary and security concerns.The ultimate goal
421 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
422 * assign a vGPU to a virtual machine we must issue such reset first.
423 *
424 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
425 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
426 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
427 * the reset request. Guest driver can issue a GT reset by programming the
428 * virtual GDRST register to reset specific virtual GPU engine or all
429 * engines.
430 *
431 * The parameter dev_level is to identify if we will do DMLR or GT reset.
432 * The parameter engine_mask is to specific the engines that need to be
433 * reset. If value ALL_ENGINES is given for engine_mask, it means
434 * the caller requests a full GT reset that we will reset all virtual
435 * GPU engines. For FLR, engine_mask is ignored.
436 */
intel_gvt_reset_vgpu_locked(struct intel_vgpu * vgpu,bool dmlr,intel_engine_mask_t engine_mask)437 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
438 intel_engine_mask_t engine_mask)
439 {
440 struct intel_gvt *gvt = vgpu->gvt;
441 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
442 intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
443
444 gvt_dbg_core("------------------------------------------\n");
445 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
446 vgpu->id, dmlr, engine_mask);
447
448 vgpu->resetting_eng = resetting_eng;
449
450 intel_vgpu_stop_schedule(vgpu);
451 /*
452 * The current_vgpu will set to NULL after stopping the
453 * scheduler when the reset is triggered by current vgpu.
454 */
455 if (scheduler->current_vgpu == NULL) {
456 mutex_unlock(&vgpu->vgpu_lock);
457 intel_gvt_wait_vgpu_idle(vgpu);
458 mutex_lock(&vgpu->vgpu_lock);
459 }
460
461 intel_vgpu_reset_submission(vgpu, resetting_eng);
462 /* full GPU reset or device model level reset */
463 if (engine_mask == ALL_ENGINES || dmlr) {
464 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
465 if (engine_mask == ALL_ENGINES)
466 intel_vgpu_invalidate_ppgtt(vgpu);
467 /*fence will not be reset during virtual reset */
468 if (dmlr) {
469 if(!vgpu->d3_entered) {
470 intel_vgpu_invalidate_ppgtt(vgpu);
471 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
472 }
473 intel_vgpu_reset_ggtt(vgpu, true);
474 intel_vgpu_reset_resource(vgpu);
475 }
476
477 intel_vgpu_reset_mmio(vgpu, dmlr);
478 populate_pvinfo_page(vgpu);
479
480 if (dmlr) {
481 intel_vgpu_reset_display(vgpu);
482 intel_vgpu_reset_cfg_space(vgpu);
483 /* only reset the failsafe mode when dmlr reset */
484 vgpu->failsafe = false;
485 /*
486 * PCI_D0 is set before dmlr, so reset d3_entered here
487 * after done using.
488 */
489 if(vgpu->d3_entered)
490 vgpu->d3_entered = false;
491 else
492 vgpu->pv_notified = false;
493 }
494 }
495
496 vgpu->resetting_eng = 0;
497 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
498 gvt_dbg_core("------------------------------------------\n");
499 }
500
501 /**
502 * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
503 * @vgpu: virtual GPU
504 *
505 * This function is called when user wants to reset a virtual GPU.
506 *
507 */
intel_gvt_reset_vgpu(struct intel_vgpu * vgpu)508 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
509 {
510 mutex_lock(&vgpu->vgpu_lock);
511 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
512 mutex_unlock(&vgpu->vgpu_lock);
513 }
514