xref: /linux/drivers/gpu/drm/i915/gvt/mmio.c (revision 55f3538c4923e9dfca132e99ebec370e8094afda)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Ke Yu
25  *    Kevin Tian <kevin.tian@intel.com>
26  *    Dexuan Cui
27  *
28  * Contributors:
29  *    Tina Zhang <tina.zhang@intel.com>
30  *    Min He <min.he@intel.com>
31  *    Niu Bing <bing.niu@intel.com>
32  *    Zhi Wang <zhi.a.wang@intel.com>
33  *
34  */
35 
36 #include "i915_drv.h"
37 #include "gvt.h"
38 
39 /**
40  * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
41  * @vgpu: a vGPU
42  *
43  * Returns:
44  * Zero on success, negative error code if failed
45  */
46 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
47 {
48 	u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
49 	return gpa - gttmmio_gpa;
50 }
51 
52 #define reg_is_mmio(gvt, reg)  \
53 	(reg >= 0 && reg < gvt->device_info.mmio_size)
54 
55 #define reg_is_gtt(gvt, reg)   \
56 	(reg >= gvt->device_info.gtt_start_offset \
57 	 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
58 
59 static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
60 {
61 	u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
62 	u64 aperture_sz = vgpu_aperture_sz(vgpu);
63 
64 	return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
65 }
66 
67 static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
68 			    void *pdata, unsigned int size, bool is_read)
69 {
70 	u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
71 	u64 offset = gpa - aperture_gpa;
72 
73 	if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
74 		gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
75 			     offset, size);
76 		return -EINVAL;
77 	}
78 
79 	if (!vgpu->gm.aperture_va) {
80 		gvt_vgpu_err("BAR is not enabled\n");
81 		return -ENXIO;
82 	}
83 
84 	if (is_read)
85 		memcpy(pdata, vgpu->gm.aperture_va + offset, size);
86 	else
87 		memcpy(vgpu->gm.aperture_va + offset, pdata, size);
88 	return 0;
89 }
90 
91 static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
92 		void *p_data, unsigned int bytes, bool read)
93 {
94 	struct intel_gvt *gvt = NULL;
95 	void *pt = NULL;
96 	unsigned int offset = 0;
97 
98 	if (!vgpu || !p_data)
99 		return;
100 
101 	gvt = vgpu->gvt;
102 	mutex_lock(&gvt->lock);
103 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
104 	if (reg_is_mmio(gvt, offset)) {
105 		if (read)
106 			intel_vgpu_default_mmio_read(vgpu, offset, p_data,
107 					bytes);
108 		else
109 			intel_vgpu_default_mmio_write(vgpu, offset, p_data,
110 					bytes);
111 	} else if (reg_is_gtt(gvt, offset) &&
112 			vgpu->gtt.ggtt_mm->virtual_page_table) {
113 		offset -= gvt->device_info.gtt_start_offset;
114 		pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
115 		if (read)
116 			memcpy(p_data, pt, bytes);
117 		else
118 			memcpy(pt, p_data, bytes);
119 
120 	}
121 	mutex_unlock(&gvt->lock);
122 }
123 
124 /**
125  * intel_vgpu_emulate_mmio_read - emulate MMIO read
126  * @vgpu: a vGPU
127  * @pa: guest physical address
128  * @p_data: data return buffer
129  * @bytes: access data length
130  *
131  * Returns:
132  * Zero on success, negative error code if failed
133  */
134 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
135 		void *p_data, unsigned int bytes)
136 {
137 	struct intel_gvt *gvt = vgpu->gvt;
138 	unsigned int offset = 0;
139 	int ret = -EINVAL;
140 
141 	if (vgpu->failsafe) {
142 		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
143 		return 0;
144 	}
145 	mutex_lock(&gvt->lock);
146 
147 	if (vgpu_gpa_is_aperture(vgpu, pa)) {
148 		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
149 		goto out;
150 	}
151 
152 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
153 
154 	if (WARN_ON(bytes > 8))
155 		goto err;
156 
157 	if (reg_is_gtt(gvt, offset)) {
158 		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
159 			goto err;
160 		if (WARN_ON(bytes != 4 && bytes != 8))
161 			goto err;
162 		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
163 			goto err;
164 
165 		ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
166 				p_data, bytes);
167 		if (ret)
168 			goto err;
169 		goto out;
170 	}
171 
172 	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
173 		ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
174 		goto out;
175 	}
176 
177 	if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
178 		goto err;
179 
180 	if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
181 		if (WARN_ON(!IS_ALIGNED(offset, bytes)))
182 			goto err;
183 	}
184 
185 	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true);
186 	if (ret < 0)
187 		goto err;
188 
189 	intel_gvt_mmio_set_accessed(gvt, offset);
190 	ret = 0;
191 	goto out;
192 
193 err:
194 	gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
195 			offset, bytes);
196 out:
197 	mutex_unlock(&gvt->lock);
198 	return ret;
199 }
200 
201 /**
202  * intel_vgpu_emulate_mmio_write - emulate MMIO write
203  * @vgpu: a vGPU
204  * @pa: guest physical address
205  * @p_data: write data buffer
206  * @bytes: access data length
207  *
208  * Returns:
209  * Zero on success, negative error code if failed
210  */
211 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
212 		void *p_data, unsigned int bytes)
213 {
214 	struct intel_gvt *gvt = vgpu->gvt;
215 	unsigned int offset = 0;
216 	int ret = -EINVAL;
217 
218 	if (vgpu->failsafe) {
219 		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
220 		return 0;
221 	}
222 
223 	mutex_lock(&gvt->lock);
224 
225 	if (vgpu_gpa_is_aperture(vgpu, pa)) {
226 		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
227 		goto out;
228 	}
229 
230 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
231 
232 	if (WARN_ON(bytes > 8))
233 		goto err;
234 
235 	if (reg_is_gtt(gvt, offset)) {
236 		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
237 			goto err;
238 		if (WARN_ON(bytes != 4 && bytes != 8))
239 			goto err;
240 		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
241 			goto err;
242 
243 		ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
244 				p_data, bytes);
245 		if (ret)
246 			goto err;
247 		goto out;
248 	}
249 
250 	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
251 		ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
252 		goto out;
253 	}
254 
255 	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
256 	if (ret < 0)
257 		goto err;
258 
259 	intel_gvt_mmio_set_accessed(gvt, offset);
260 	ret = 0;
261 	goto out;
262 err:
263 	gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
264 		     bytes);
265 out:
266 	mutex_unlock(&gvt->lock);
267 	return ret;
268 }
269 
270 
271 /**
272  * intel_vgpu_reset_mmio - reset virtual MMIO space
273  * @vgpu: a vGPU
274  *
275  */
276 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
277 {
278 	struct intel_gvt *gvt = vgpu->gvt;
279 	const struct intel_gvt_device_info *info = &gvt->device_info;
280 	void  *mmio = gvt->firmware.mmio;
281 
282 	if (dmlr) {
283 		memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
284 		memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
285 
286 		vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
287 
288 		/* set the bit 0:2(Core C-State ) to C0 */
289 		vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
290 
291 		vgpu->mmio.disable_warn_untrack = false;
292 	} else {
293 #define GVT_GEN8_MMIO_RESET_OFFSET		(0x44200)
294 		/* only reset the engine related, so starting with 0x44200
295 		 * interrupt include DE,display mmio related will not be
296 		 * touched
297 		 */
298 		memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
299 		memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
300 	}
301 
302 }
303 
304 /**
305  * intel_vgpu_init_mmio - init MMIO  space
306  * @vgpu: a vGPU
307  *
308  * Returns:
309  * Zero on success, negative error code if failed
310  */
311 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
312 {
313 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
314 
315 	vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
316 	if (!vgpu->mmio.vreg)
317 		return -ENOMEM;
318 
319 	vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
320 
321 	intel_vgpu_reset_mmio(vgpu, true);
322 
323 	return 0;
324 }
325 
326 /**
327  * intel_vgpu_clean_mmio - clean MMIO space
328  * @vgpu: a vGPU
329  *
330  */
331 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
332 {
333 	vfree(vgpu->mmio.vreg);
334 	vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
335 }
336