xref: /linux/drivers/gpu/drm/i915/gvt/scheduler.h (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Zhi Wang <zhi.a.wang@intel.com>
25  *
26  * Contributors:
27  *    Ping Gao <ping.a.gao@intel.com>
28  *    Tina Zhang <tina.zhang@intel.com>
29  *    Chanbin Du <changbin.du@intel.com>
30  *    Min He <min.he@intel.com>
31  *    Bing Niu <bing.niu@intel.com>
32  *    Zhenyu Wang <zhenyuw@linux.intel.com>
33  *
34  */
35 
36 #ifndef _GVT_SCHEDULER_H_
37 #define _GVT_SCHEDULER_H_
38 
39 struct intel_gvt_workload_scheduler {
40 	struct intel_vgpu *current_vgpu;
41 	struct intel_vgpu *next_vgpu;
42 	struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
43 	bool need_reschedule;
44 
45 	spinlock_t mmio_context_lock;
46 	/* can be null when owner is host */
47 	struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
48 
49 	wait_queue_head_t workload_complete_wq;
50 	struct task_struct *thread[I915_NUM_ENGINES];
51 	wait_queue_head_t waitq[I915_NUM_ENGINES];
52 
53 	void *sched_data;
54 	struct intel_gvt_sched_policy_ops *sched_ops;
55 };
56 
57 #define INDIRECT_CTX_ADDR_MASK 0xffffffc0
58 #define INDIRECT_CTX_SIZE_MASK 0x3f
59 struct shadow_indirect_ctx {
60 	struct drm_i915_gem_object *obj;
61 	unsigned long guest_gma;
62 	unsigned long shadow_gma;
63 	void *shadow_va;
64 	u32 size;
65 };
66 
67 #define PER_CTX_ADDR_MASK 0xfffff000
68 struct shadow_per_ctx {
69 	unsigned long guest_gma;
70 	unsigned long shadow_gma;
71 	unsigned valid;
72 };
73 
74 struct intel_shadow_wa_ctx {
75 	struct shadow_indirect_ctx indirect_ctx;
76 	struct shadow_per_ctx per_ctx;
77 
78 };
79 
80 struct intel_vgpu_workload {
81 	struct intel_vgpu *vgpu;
82 	int ring_id;
83 	struct i915_request *req;
84 	/* if this workload has been dispatched to i915? */
85 	bool dispatched;
86 	bool shadow;      /* if workload has done shadow of guest request */
87 	int status;
88 
89 	struct intel_vgpu_mm *shadow_mm;
90 
91 	/* different submission model may need different handler */
92 	int (*prepare)(struct intel_vgpu_workload *);
93 	int (*complete)(struct intel_vgpu_workload *);
94 	struct list_head list;
95 
96 	DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
97 	void *shadow_ring_buffer_va;
98 
99 	/* execlist context information */
100 	struct execlist_ctx_descriptor_format ctx_desc;
101 	struct execlist_ring_context *ring_context;
102 	unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
103 	unsigned long guest_rb_head;
104 	bool restore_inhibit;
105 	struct intel_vgpu_elsp_dwords elsp_dwords;
106 	bool emulate_schedule_in;
107 	atomic_t shadow_ctx_active;
108 	wait_queue_head_t shadow_ctx_status_wq;
109 	u64 ring_context_gpa;
110 
111 	/* shadow batch buffer */
112 	struct list_head shadow_bb;
113 	struct intel_shadow_wa_ctx wa_ctx;
114 
115 	/* oa registers */
116 	u32 oactxctrl;
117 	u32 flex_mmio[7];
118 };
119 
120 struct intel_vgpu_shadow_bb {
121 	struct list_head list;
122 	struct drm_i915_gem_object *obj;
123 	struct i915_vma *vma;
124 	void *va;
125 	u32 *bb_start_cmd_va;
126 	unsigned int clflush;
127 	bool accessing;
128 	unsigned long bb_offset;
129 	bool ppgtt;
130 };
131 
132 #define workload_q_head(vgpu, ring_id) \
133 	(&(vgpu->submission.workload_q_head[ring_id]))
134 
135 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
136 
137 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
138 
139 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
140 
141 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
142 
143 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
144 
145 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
146 				 intel_engine_mask_t engine_mask);
147 
148 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
149 
150 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
151 				     intel_engine_mask_t engine_mask,
152 				     unsigned int interface);
153 
154 extern const struct intel_vgpu_submission_ops
155 intel_vgpu_execlist_submission_ops;
156 
157 struct intel_vgpu_workload *
158 intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
159 			   struct execlist_ctx_descriptor_format *desc);
160 
161 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
162 
163 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
164 				intel_engine_mask_t engine_mask);
165 
166 #endif
167