xref: /linux/drivers/gpu/drm/i915/gvt/gvt.h (revision 55f3538c4923e9dfca132e99ebec370e8094afda)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Kevin Tian <kevin.tian@intel.com>
25  *    Eddie Dong <eddie.dong@intel.com>
26  *
27  * Contributors:
28  *    Niu Bing <bing.niu@intel.com>
29  *    Zhi Wang <zhi.a.wang@intel.com>
30  *
31  */
32 
33 #ifndef _GVT_H_
34 #define _GVT_H_
35 
36 #include "debug.h"
37 #include "hypercall.h"
38 #include "mmio.h"
39 #include "reg.h"
40 #include "interrupt.h"
41 #include "gtt.h"
42 #include "display.h"
43 #include "edid.h"
44 #include "execlist.h"
45 #include "scheduler.h"
46 #include "sched_policy.h"
47 #include "mmio_context.h"
48 #include "cmd_parser.h"
49 #include "fb_decoder.h"
50 #include "dmabuf.h"
51 
52 #define GVT_MAX_VGPU 8
53 
54 enum {
55 	INTEL_GVT_HYPERVISOR_XEN = 0,
56 	INTEL_GVT_HYPERVISOR_KVM,
57 };
58 
59 struct intel_gvt_host {
60 	bool initialized;
61 	int hypervisor_type;
62 	struct intel_gvt_mpt *mpt;
63 };
64 
65 extern struct intel_gvt_host intel_gvt_host;
66 
67 /* Describe per-platform limitations. */
68 struct intel_gvt_device_info {
69 	u32 max_support_vgpus;
70 	u32 cfg_space_size;
71 	u32 mmio_size;
72 	u32 mmio_bar;
73 	unsigned long msi_cap_offset;
74 	u32 gtt_start_offset;
75 	u32 gtt_entry_size;
76 	u32 gtt_entry_size_shift;
77 	int gmadr_bytes_in_cmd;
78 	u32 max_surface_size;
79 };
80 
81 /* GM resources owned by a vGPU */
82 struct intel_vgpu_gm {
83 	u64 aperture_sz;
84 	u64 hidden_sz;
85 	void *aperture_va;
86 	struct drm_mm_node low_gm_node;
87 	struct drm_mm_node high_gm_node;
88 };
89 
90 #define INTEL_GVT_MAX_NUM_FENCES 32
91 
92 /* Fences owned by a vGPU */
93 struct intel_vgpu_fence {
94 	struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
95 	u32 base;
96 	u32 size;
97 };
98 
99 struct intel_vgpu_mmio {
100 	void *vreg;
101 	void *sreg;
102 	bool disable_warn_untrack;
103 };
104 
105 #define INTEL_GVT_MAX_BAR_NUM 4
106 
107 struct intel_vgpu_pci_bar {
108 	u64 size;
109 	bool tracked;
110 };
111 
112 struct intel_vgpu_cfg_space {
113 	unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
114 	struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
115 };
116 
117 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
118 
119 #define INTEL_GVT_MAX_PIPE 4
120 
121 struct intel_vgpu_irq {
122 	bool irq_warn_once[INTEL_GVT_EVENT_MAX];
123 	DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
124 		       INTEL_GVT_EVENT_MAX);
125 };
126 
127 struct intel_vgpu_opregion {
128 	bool mapped;
129 	void *va;
130 	void *va_gopregion;
131 	u32 gfn[INTEL_GVT_OPREGION_PAGES];
132 };
133 
134 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
135 
136 #define INTEL_GVT_MAX_PORT 5
137 
138 struct intel_vgpu_display {
139 	struct intel_vgpu_i2c_edid i2c_edid;
140 	struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
141 	struct intel_vgpu_sbi sbi;
142 };
143 
144 struct vgpu_sched_ctl {
145 	int weight;
146 };
147 
148 enum {
149 	INTEL_VGPU_EXECLIST_SUBMISSION = 1,
150 	INTEL_VGPU_GUC_SUBMISSION,
151 };
152 
153 struct intel_vgpu_submission_ops {
154 	const char *name;
155 	int (*init)(struct intel_vgpu *vgpu);
156 	void (*clean)(struct intel_vgpu *vgpu);
157 	void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
158 };
159 
160 struct intel_vgpu_submission {
161 	struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
162 	struct list_head workload_q_head[I915_NUM_ENGINES];
163 	struct kmem_cache *workloads;
164 	atomic_t running_workload_num;
165 	struct i915_gem_context *shadow_ctx;
166 	DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
167 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
168 	void *ring_scan_buffer[I915_NUM_ENGINES];
169 	int ring_scan_buffer_size[I915_NUM_ENGINES];
170 	const struct intel_vgpu_submission_ops *ops;
171 	int virtual_submission_interface;
172 	bool active;
173 };
174 
175 struct intel_vgpu {
176 	struct intel_gvt *gvt;
177 	int id;
178 	unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
179 	bool active;
180 	bool pv_notified;
181 	bool failsafe;
182 	unsigned int resetting_eng;
183 	void *sched_data;
184 	struct vgpu_sched_ctl sched_ctl;
185 
186 	struct intel_vgpu_fence fence;
187 	struct intel_vgpu_gm gm;
188 	struct intel_vgpu_cfg_space cfg_space;
189 	struct intel_vgpu_mmio mmio;
190 	struct intel_vgpu_irq irq;
191 	struct intel_vgpu_gtt gtt;
192 	struct intel_vgpu_opregion opregion;
193 	struct intel_vgpu_display display;
194 	struct intel_vgpu_submission submission;
195 	u32 hws_pga[I915_NUM_ENGINES];
196 
197 	struct dentry *debugfs;
198 
199 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
200 	struct {
201 		struct mdev_device *mdev;
202 		struct vfio_region *region;
203 		int num_regions;
204 		struct eventfd_ctx *intx_trigger;
205 		struct eventfd_ctx *msi_trigger;
206 		struct rb_root cache;
207 		struct mutex cache_lock;
208 		struct notifier_block iommu_notifier;
209 		struct notifier_block group_notifier;
210 		struct kvm *kvm;
211 		struct work_struct release_work;
212 		atomic_t released;
213 		struct vfio_device *vfio_device;
214 	} vdev;
215 #endif
216 
217 	struct list_head dmabuf_obj_list_head;
218 	struct mutex dmabuf_lock;
219 	struct idr object_idr;
220 
221 	struct completion vblank_done;
222 
223 };
224 
225 /* validating GM healthy status*/
226 #define vgpu_is_vm_unhealthy(ret_val) \
227 	(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
228 
229 struct intel_gvt_gm {
230 	unsigned long vgpu_allocated_low_gm_size;
231 	unsigned long vgpu_allocated_high_gm_size;
232 };
233 
234 struct intel_gvt_fence {
235 	unsigned long vgpu_allocated_fence_num;
236 };
237 
238 /* Special MMIO blocks. */
239 struct gvt_mmio_block {
240 	unsigned int device;
241 	i915_reg_t   offset;
242 	unsigned int size;
243 	gvt_mmio_func read;
244 	gvt_mmio_func write;
245 };
246 
247 #define INTEL_GVT_MMIO_HASH_BITS 11
248 
249 struct intel_gvt_mmio {
250 	u8 *mmio_attribute;
251 /* Register contains RO bits */
252 #define F_RO		(1 << 0)
253 /* Register contains graphics address */
254 #define F_GMADR		(1 << 1)
255 /* Mode mask registers with high 16 bits as the mask bits */
256 #define F_MODE_MASK	(1 << 2)
257 /* This reg can be accessed by GPU commands */
258 #define F_CMD_ACCESS	(1 << 3)
259 /* This reg has been accessed by a VM */
260 #define F_ACCESSED	(1 << 4)
261 /* This reg has been accessed through GPU commands */
262 #define F_CMD_ACCESSED	(1 << 5)
263 /* This reg could be accessed by unaligned address */
264 #define F_UNALIGN	(1 << 6)
265 
266 	struct gvt_mmio_block *mmio_block;
267 	unsigned int num_mmio_block;
268 
269 	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
270 	unsigned long num_tracked_mmio;
271 };
272 
273 struct intel_gvt_firmware {
274 	void *cfg_space;
275 	void *mmio;
276 	bool firmware_loaded;
277 };
278 
279 #define NR_MAX_INTEL_VGPU_TYPES 20
280 struct intel_vgpu_type {
281 	char name[16];
282 	unsigned int avail_instance;
283 	unsigned int low_gm_size;
284 	unsigned int high_gm_size;
285 	unsigned int fence;
286 	unsigned int weight;
287 	enum intel_vgpu_edid resolution;
288 };
289 
290 struct intel_gvt {
291 	struct mutex lock;
292 	struct drm_i915_private *dev_priv;
293 	struct idr vgpu_idr;	/* vGPU IDR pool */
294 
295 	struct intel_gvt_device_info device_info;
296 	struct intel_gvt_gm gm;
297 	struct intel_gvt_fence fence;
298 	struct intel_gvt_mmio mmio;
299 	struct intel_gvt_firmware firmware;
300 	struct intel_gvt_irq irq;
301 	struct intel_gvt_gtt gtt;
302 	struct intel_gvt_workload_scheduler scheduler;
303 	struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
304 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
305 	struct intel_vgpu_type *types;
306 	unsigned int num_types;
307 	struct intel_vgpu *idle_vgpu;
308 
309 	struct task_struct *service_thread;
310 	wait_queue_head_t service_thread_wq;
311 	unsigned long service_request;
312 
313 	struct engine_mmio *engine_mmio_list;
314 
315 	struct dentry *debugfs_root;
316 };
317 
318 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
319 {
320 	return i915->gvt;
321 }
322 
323 enum {
324 	INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
325 
326 	/* Scheduling trigger by timer */
327 	INTEL_GVT_REQUEST_SCHED = 1,
328 
329 	/* Scheduling trigger by event */
330 	INTEL_GVT_REQUEST_EVENT_SCHED = 2,
331 };
332 
333 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
334 		int service)
335 {
336 	set_bit(service, (void *)&gvt->service_request);
337 	wake_up(&gvt->service_thread_wq);
338 }
339 
340 void intel_gvt_free_firmware(struct intel_gvt *gvt);
341 int intel_gvt_load_firmware(struct intel_gvt *gvt);
342 
343 /* Aperture/GM space definitions for GVT device */
344 #define MB_TO_BYTES(mb) ((mb) << 20ULL)
345 #define BYTES_TO_MB(b) ((b) >> 20ULL)
346 
347 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
348 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
349 #define HOST_FENCE 4
350 
351 /* Aperture/GM space definitions for GVT device */
352 #define gvt_aperture_sz(gvt)	  (gvt->dev_priv->ggtt.mappable_end)
353 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
354 
355 #define gvt_ggtt_gm_sz(gvt)	  (gvt->dev_priv->ggtt.base.total)
356 #define gvt_ggtt_sz(gvt) \
357 	((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
358 #define gvt_hidden_sz(gvt)	  (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
359 
360 #define gvt_aperture_gmadr_base(gvt) (0)
361 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
362 				     + gvt_aperture_sz(gvt) - 1)
363 
364 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
365 				    + gvt_aperture_sz(gvt))
366 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
367 				   + gvt_hidden_sz(gvt) - 1)
368 
369 #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
370 
371 /* Aperture/GM space definitions for vGPU */
372 #define vgpu_aperture_offset(vgpu)	((vgpu)->gm.low_gm_node.start)
373 #define vgpu_hidden_offset(vgpu)	((vgpu)->gm.high_gm_node.start)
374 #define vgpu_aperture_sz(vgpu)		((vgpu)->gm.aperture_sz)
375 #define vgpu_hidden_sz(vgpu)		((vgpu)->gm.hidden_sz)
376 
377 #define vgpu_aperture_pa_base(vgpu) \
378 	(gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
379 
380 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
381 
382 #define vgpu_aperture_pa_end(vgpu) \
383 	(vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
384 
385 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
386 #define vgpu_aperture_gmadr_end(vgpu) \
387 	(vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
388 
389 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
390 #define vgpu_hidden_gmadr_end(vgpu) \
391 	(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
392 
393 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
394 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
395 
396 struct intel_vgpu_creation_params {
397 	__u64 handle;
398 	__u64 low_gm_sz;  /* in MB */
399 	__u64 high_gm_sz; /* in MB */
400 	__u64 fence_sz;
401 	__u64 resolution;
402 	__s32 primary;
403 	__u64 vgpu_id;
404 
405 	__u32 weight;
406 };
407 
408 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
409 			      struct intel_vgpu_creation_params *param);
410 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
411 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
412 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
413 	u32 fence, u64 value);
414 
415 /* Macros for easily accessing vGPU virtual/shadow register.
416    Explicitly seperate use for typed MMIO reg or real offset.*/
417 #define vgpu_vreg_t(vgpu, reg) \
418 	(*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
419 #define vgpu_vreg(vgpu, offset) \
420 	(*(u32 *)(vgpu->mmio.vreg + (offset)))
421 #define vgpu_vreg64_t(vgpu, reg) \
422 	(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
423 #define vgpu_vreg64(vgpu, offset) \
424 	(*(u64 *)(vgpu->mmio.vreg + (offset)))
425 #define vgpu_sreg_t(vgpu, reg) \
426 	(*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
427 #define vgpu_sreg(vgpu, offset) \
428 	(*(u32 *)(vgpu->mmio.sreg + (offset)))
429 
430 #define for_each_active_vgpu(gvt, vgpu, id) \
431 	idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
432 		for_each_if(vgpu->active)
433 
434 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
435 					    u32 offset, u32 val, bool low)
436 {
437 	u32 *pval;
438 
439 	/* BAR offset should be 32 bits algiend */
440 	offset = rounddown(offset, 4);
441 	pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
442 
443 	if (low) {
444 		/*
445 		 * only update bit 31 - bit 4,
446 		 * leave the bit 3 - bit 0 unchanged.
447 		 */
448 		*pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
449 	} else {
450 		*pval = val;
451 	}
452 }
453 
454 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
455 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
456 
457 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
458 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
459 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
460 					 struct intel_vgpu_type *type);
461 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
462 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
463 				 unsigned int engine_mask);
464 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
465 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
466 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
467 
468 /* validating GM functions */
469 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
470 	((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
471 	 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
472 
473 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
474 	((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
475 	 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
476 
477 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
478 	 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
479 	  (vgpu_gmadr_is_hidden(vgpu, gmadr))))
480 
481 #define gvt_gmadr_is_aperture(gvt, gmadr) \
482 	 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
483 	  (gmadr <= gvt_aperture_gmadr_end(gvt)))
484 
485 #define gvt_gmadr_is_hidden(gvt, gmadr) \
486 	  ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
487 	   (gmadr <= gvt_hidden_gmadr_end(gvt)))
488 
489 #define gvt_gmadr_is_valid(gvt, gmadr) \
490 	  (gvt_gmadr_is_aperture(gvt, gmadr) || \
491 	    gvt_gmadr_is_hidden(gvt, gmadr))
492 
493 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
494 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
495 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
496 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
497 			     unsigned long *h_index);
498 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
499 			     unsigned long *g_index);
500 
501 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
502 		bool primary);
503 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
504 
505 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
506 		void *p_data, unsigned int bytes);
507 
508 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
509 		void *p_data, unsigned int bytes);
510 
511 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
512 {
513 	/* We are 64bit bar. */
514 	return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
515 			PCI_BASE_ADDRESS_MEM_MASK;
516 }
517 
518 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
519 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
520 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
521 
522 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
523 void populate_pvinfo_page(struct intel_vgpu *vgpu);
524 
525 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
526 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
527 
528 struct intel_gvt_ops {
529 	int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
530 				unsigned int);
531 	int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
532 				unsigned int);
533 	int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
534 				unsigned int);
535 	int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
536 				unsigned int);
537 	struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
538 				struct intel_vgpu_type *);
539 	void (*vgpu_destroy)(struct intel_vgpu *);
540 	void (*vgpu_reset)(struct intel_vgpu *);
541 	void (*vgpu_activate)(struct intel_vgpu *);
542 	void (*vgpu_deactivate)(struct intel_vgpu *);
543 	struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
544 			const char *name);
545 	bool (*get_gvt_attrs)(struct attribute ***type_attrs,
546 			struct attribute_group ***intel_vgpu_type_groups);
547 	int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
548 	int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
549 	int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
550 				     unsigned int);
551 };
552 
553 
554 enum {
555 	GVT_FAILSAFE_UNSUPPORTED_GUEST,
556 	GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
557 	GVT_FAILSAFE_GUEST_ERR,
558 };
559 
560 static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
561 {
562 	intel_runtime_pm_get(dev_priv);
563 }
564 
565 static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
566 {
567 	intel_runtime_pm_put(dev_priv);
568 }
569 
570 /**
571  * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
572  * @gvt: a GVT device
573  * @offset: register offset
574  *
575  */
576 static inline void intel_gvt_mmio_set_accessed(
577 			struct intel_gvt *gvt, unsigned int offset)
578 {
579 	gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
580 }
581 
582 /**
583  * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
584  * @gvt: a GVT device
585  * @offset: register offset
586  *
587  */
588 static inline bool intel_gvt_mmio_is_cmd_access(
589 			struct intel_gvt *gvt, unsigned int offset)
590 {
591 	return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
592 }
593 
594 /**
595  * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
596  * @gvt: a GVT device
597  * @offset: register offset
598  *
599  */
600 static inline bool intel_gvt_mmio_is_unalign(
601 			struct intel_gvt *gvt, unsigned int offset)
602 {
603 	return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
604 }
605 
606 /**
607  * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
608  * @gvt: a GVT device
609  * @offset: register offset
610  *
611  */
612 static inline void intel_gvt_mmio_set_cmd_accessed(
613 			struct intel_gvt *gvt, unsigned int offset)
614 {
615 	gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
616 }
617 
618 /**
619  * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
620  * @gvt: a GVT device
621  * @offset: register offset
622  *
623  * Returns:
624  * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
625  *
626  */
627 static inline bool intel_gvt_mmio_has_mode_mask(
628 			struct intel_gvt *gvt, unsigned int offset)
629 {
630 	return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
631 }
632 
633 int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
634 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
635 int intel_gvt_debugfs_init(struct intel_gvt *gvt);
636 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
637 
638 
639 #include "trace.h"
640 #include "mpt.h"
641 
642 #endif
643