xref: /linux/drivers/gpu/drm/i915/gvt/handlers.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Kevin Tian <kevin.tian@intel.com>
25  *    Eddie Dong <eddie.dong@intel.com>
26  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
27  *
28  * Contributors:
29  *    Min He <min.he@intel.com>
30  *    Tina Zhang <tina.zhang@intel.com>
31  *    Pei Zhang <pei.zhang@intel.com>
32  *    Niu Bing <bing.niu@intel.com>
33  *    Ping Gao <ping.a.gao@intel.com>
34  *    Zhi Wang <zhi.a.wang@intel.com>
35  *
36 
37  */
38 
39 #include <linux/vmalloc.h>
40 
41 #include <drm/display/drm_dp.h>
42 #include <drm/drm_print.h>
43 
44 #include "display/bxt_dpio_phy_regs.h"
45 #include "display/i9xx_plane_regs.h"
46 #include "display/intel_crt_regs.h"
47 #include "display/intel_cursor_regs.h"
48 #include "display/intel_display_regs.h"
49 #include "display/intel_display_types.h"
50 #include "display/intel_dmc_regs.h"
51 #include "display/intel_dp_aux_regs.h"
52 #include "display/intel_dpio_phy.h"
53 #include "display/intel_fbc.h"
54 #include "display/intel_fdi_regs.h"
55 #include "display/intel_pps_regs.h"
56 #include "display/intel_psr_regs.h"
57 #include "display/intel_sbi_regs.h"
58 #include "display/intel_sprite_regs.h"
59 #include "display/intel_vga_regs.h"
60 #include "display/skl_universal_plane_regs.h"
61 #include "display/skl_watermark_regs.h"
62 #include "display/vlv_dsi_pll_regs.h"
63 
64 #include "gt/intel_engine_regs.h"
65 #include "gt/intel_gt_regs.h"
66 
67 #include "display_helpers.h"
68 #include "gvt.h"
69 #include "i915_drv.h"
70 #include "i915_pvinfo.h"
71 #include "i915_reg.h"
72 #include "intel_mchbar_regs.h"
73 #include "sched_policy.h"
74 
75 /* XXX FIXME i915 has changed PP_XXX definition */
76 #define PCH_PP_STATUS  _MMIO(0xc7200)
77 #define PCH_PP_CONTROL _MMIO(0xc7204)
78 #define PCH_PP_ON_DELAYS _MMIO(0xc7208)
79 #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
80 #define PCH_PP_DIVISOR _MMIO(0xc7210)
81 
intel_gvt_get_device_type(struct intel_gvt * gvt)82 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
83 {
84 	struct drm_i915_private *i915 = gvt->gt->i915;
85 
86 	if (IS_BROADWELL(i915))
87 		return D_BDW;
88 	else if (IS_SKYLAKE(i915))
89 		return D_SKL;
90 	else if (IS_KABYLAKE(i915))
91 		return D_KBL;
92 	else if (IS_BROXTON(i915))
93 		return D_BXT;
94 	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
95 		return D_CFL;
96 
97 	return 0;
98 }
99 
intel_gvt_match_device(struct intel_gvt * gvt,unsigned long device)100 static bool intel_gvt_match_device(struct intel_gvt *gvt,
101 		unsigned long device)
102 {
103 	return intel_gvt_get_device_type(gvt) & device;
104 }
105 
read_vreg(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)106 static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
107 	void *p_data, unsigned int bytes)
108 {
109 	memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
110 }
111 
write_vreg(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)112 static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
113 	void *p_data, unsigned int bytes)
114 {
115 	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
116 }
117 
intel_gvt_find_mmio_info(struct intel_gvt * gvt,unsigned int offset)118 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
119 						  unsigned int offset)
120 {
121 	struct intel_gvt_mmio_info *e;
122 
123 	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
124 		if (e->offset == offset)
125 			return e;
126 	}
127 	return NULL;
128 }
129 
setup_mmio_info(struct intel_gvt * gvt,u32 offset,u32 size,u16 flags,u32 addr_mask,u32 ro_mask,u32 device,gvt_mmio_func read,gvt_mmio_func write)130 static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size,
131 			   u16 flags, u32 addr_mask, u32 ro_mask, u32 device,
132 			   gvt_mmio_func read, gvt_mmio_func write)
133 {
134 	struct intel_gvt_mmio_info *p;
135 	u32 start, end, i;
136 
137 	if (!intel_gvt_match_device(gvt, device))
138 		return 0;
139 
140 	if (WARN_ON(!IS_ALIGNED(offset, 4)))
141 		return -EINVAL;
142 
143 	start = offset;
144 	end = offset + size;
145 
146 	for (i = start; i < end; i += 4) {
147 		p = intel_gvt_find_mmio_info(gvt, i);
148 		if (!p) {
149 			WARN(1, "assign a handler to a non-tracked mmio %x\n",
150 				i);
151 			return -ENODEV;
152 		}
153 		p->ro_mask = ro_mask;
154 		gvt->mmio.mmio_attribute[i / 4] = flags;
155 		if (read)
156 			p->read = read;
157 		if (write)
158 			p->write = write;
159 	}
160 	return 0;
161 }
162 
163 /**
164  * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
165  * @gvt: a GVT device
166  * @offset: register offset
167  *
168  * Returns:
169  * The engine containing the offset within its mmio page.
170  */
171 const struct intel_engine_cs *
intel_gvt_render_mmio_to_engine(struct intel_gvt * gvt,unsigned int offset)172 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
173 {
174 	struct intel_engine_cs *engine;
175 	enum intel_engine_id id;
176 
177 	offset &= ~GENMASK(11, 0);
178 	for_each_engine(engine, gvt->gt, id)
179 		if (engine->mmio_base == offset)
180 			return engine;
181 
182 	return NULL;
183 }
184 
185 #define offset_to_fence_num(offset) \
186 	((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
187 
188 #define fence_num_to_offset(num) \
189 	(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
190 
191 
enter_failsafe_mode(struct intel_vgpu * vgpu,int reason)192 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
193 {
194 	switch (reason) {
195 	case GVT_FAILSAFE_UNSUPPORTED_GUEST:
196 		pr_err("Detected your guest driver doesn't support GVT-g.\n");
197 		break;
198 	case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
199 		pr_err("Graphics resource is not enough for the guest\n");
200 		break;
201 	case GVT_FAILSAFE_GUEST_ERR:
202 		pr_err("GVT Internal error  for the guest\n");
203 		break;
204 	default:
205 		break;
206 	}
207 	pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
208 	vgpu->failsafe = true;
209 }
210 
sanitize_fence_mmio_access(struct intel_vgpu * vgpu,unsigned int fence_num,void * p_data,unsigned int bytes)211 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
212 		unsigned int fence_num, void *p_data, unsigned int bytes)
213 {
214 	unsigned int max_fence = vgpu_fence_sz(vgpu);
215 
216 	if (fence_num >= max_fence) {
217 		gvt_vgpu_err("access oob fence reg %d/%d\n",
218 			     fence_num, max_fence);
219 
220 		/* When guest access oob fence regs without access
221 		 * pv_info first, we treat guest not supporting GVT,
222 		 * and we will let vgpu enter failsafe mode.
223 		 */
224 		if (!vgpu->pv_notified)
225 			enter_failsafe_mode(vgpu,
226 					GVT_FAILSAFE_UNSUPPORTED_GUEST);
227 
228 		memset(p_data, 0, bytes);
229 		return -EINVAL;
230 	}
231 	return 0;
232 }
233 
gamw_echo_dev_rw_ia_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)234 static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
235 		unsigned int offset, void *p_data, unsigned int bytes)
236 {
237 	u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
238 
239 	if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) {
240 		if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
241 			gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
242 		else if (!ips)
243 			gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
244 		else {
245 			/* All engines must be enabled together for vGPU,
246 			 * since we don't know which engine the ppgtt will
247 			 * bind to when shadowing.
248 			 */
249 			gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
250 				     ips);
251 			return -EINVAL;
252 		}
253 	}
254 
255 	write_vreg(vgpu, offset, p_data, bytes);
256 	return 0;
257 }
258 
fence_mmio_read(struct intel_vgpu * vgpu,unsigned int off,void * p_data,unsigned int bytes)259 static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
260 		void *p_data, unsigned int bytes)
261 {
262 	int ret;
263 
264 	ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
265 			p_data, bytes);
266 	if (ret)
267 		return ret;
268 	read_vreg(vgpu, off, p_data, bytes);
269 	return 0;
270 }
271 
fence_mmio_write(struct intel_vgpu * vgpu,unsigned int off,void * p_data,unsigned int bytes)272 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
273 		void *p_data, unsigned int bytes)
274 {
275 	struct intel_gvt *gvt = vgpu->gvt;
276 	unsigned int fence_num = offset_to_fence_num(off);
277 	intel_wakeref_t wakeref;
278 	int ret;
279 
280 	ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
281 	if (ret)
282 		return ret;
283 	write_vreg(vgpu, off, p_data, bytes);
284 
285 	wakeref = mmio_hw_access_pre(gvt->gt);
286 	intel_vgpu_write_fence(vgpu, fence_num,
287 			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
288 	mmio_hw_access_post(gvt->gt, wakeref);
289 	return 0;
290 }
291 
292 #define CALC_MODE_MASK_REG(old, new) \
293 	(((new) & GENMASK(31, 16)) \
294 	 | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
295 	 | ((new) & ((new) >> 16))))
296 
mul_force_wake_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)297 static int mul_force_wake_write(struct intel_vgpu *vgpu,
298 		unsigned int offset, void *p_data, unsigned int bytes)
299 {
300 	u32 old, new;
301 	u32 ack_reg_offset;
302 
303 	old = vgpu_vreg(vgpu, offset);
304 	new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
305 
306 	if (GRAPHICS_VER(vgpu->gvt->gt->i915)  >=  9) {
307 		switch (offset) {
308 		case FORCEWAKE_RENDER_GEN9_REG:
309 			ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
310 			break;
311 		case FORCEWAKE_GT_GEN9_REG:
312 			ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG;
313 			break;
314 		case FORCEWAKE_MEDIA_GEN9_REG:
315 			ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
316 			break;
317 		default:
318 			/*should not hit here*/
319 			gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
320 			return -EINVAL;
321 		}
322 	} else {
323 		ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
324 	}
325 
326 	vgpu_vreg(vgpu, offset) = new;
327 	vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
328 	return 0;
329 }
330 
gdrst_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)331 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
332 			    void *p_data, unsigned int bytes)
333 {
334 	intel_engine_mask_t engine_mask = 0;
335 	u32 data;
336 
337 	write_vreg(vgpu, offset, p_data, bytes);
338 	data = vgpu_vreg(vgpu, offset);
339 
340 	if (data & GEN6_GRDOM_FULL) {
341 		gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
342 		engine_mask = ALL_ENGINES;
343 	} else {
344 		if (data & GEN6_GRDOM_RENDER) {
345 			gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
346 			engine_mask |= BIT(RCS0);
347 		}
348 		if (data & GEN6_GRDOM_MEDIA) {
349 			gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
350 			engine_mask |= BIT(VCS0);
351 		}
352 		if (data & GEN6_GRDOM_BLT) {
353 			gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
354 			engine_mask |= BIT(BCS0);
355 		}
356 		if (data & GEN6_GRDOM_VECS) {
357 			gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
358 			engine_mask |= BIT(VECS0);
359 		}
360 		if (data & GEN8_GRDOM_MEDIA2) {
361 			gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
362 			engine_mask |= BIT(VCS1);
363 		}
364 		if (data & GEN9_GRDOM_GUC) {
365 			gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
366 			vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
367 		}
368 		engine_mask &= vgpu->gvt->gt->info.engine_mask;
369 	}
370 
371 	/* vgpu_lock already hold by emulate mmio r/w */
372 	intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
373 
374 	/* sw will wait for the device to ack the reset request */
375 	vgpu_vreg(vgpu, offset) = 0;
376 
377 	return 0;
378 }
379 
gmbus_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)380 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
381 		void *p_data, unsigned int bytes)
382 {
383 	return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
384 }
385 
gmbus_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)386 static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
387 		void *p_data, unsigned int bytes)
388 {
389 	return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
390 }
391 
pch_pp_control_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)392 static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
393 		unsigned int offset, void *p_data, unsigned int bytes)
394 {
395 	write_vreg(vgpu, offset, p_data, bytes);
396 
397 	if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
398 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
399 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
400 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
401 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
402 
403 	} else
404 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
405 			~(PP_ON | PP_SEQUENCE_POWER_DOWN
406 					| PP_CYCLE_DELAY_ACTIVE);
407 	return 0;
408 }
409 
transconf_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)410 static int transconf_mmio_write(struct intel_vgpu *vgpu,
411 		unsigned int offset, void *p_data, unsigned int bytes)
412 {
413 	write_vreg(vgpu, offset, p_data, bytes);
414 
415 	if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
416 		vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
417 	else
418 		vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
419 	return 0;
420 }
421 
lcpll_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)422 static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
423 		void *p_data, unsigned int bytes)
424 {
425 	write_vreg(vgpu, offset, p_data, bytes);
426 
427 	if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
428 		vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
429 	else
430 		vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
431 
432 	if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
433 		vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
434 	else
435 		vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
436 
437 	return 0;
438 }
439 
dpy_reg_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)440 static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
441 		void *p_data, unsigned int bytes)
442 {
443 	switch (offset) {
444 	case 0xe651c:
445 	case 0xe661c:
446 	case 0xe671c:
447 	case 0xe681c:
448 		vgpu_vreg(vgpu, offset) = 1 << 17;
449 		break;
450 	case 0xe6c04:
451 		vgpu_vreg(vgpu, offset) = 0x3;
452 		break;
453 	case 0xe6e1c:
454 		vgpu_vreg(vgpu, offset) = 0x2f << 16;
455 		break;
456 	default:
457 		return -EINVAL;
458 	}
459 
460 	read_vreg(vgpu, offset, p_data, bytes);
461 	return 0;
462 }
463 
464 /*
465  * Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to
466  *   TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on
467  *   setup_virtual_dp_monitor().
468  * emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled
469  *   DPLL. Later guest driver may setup a different DPLLx when setting mode.
470  * So the correct sequence to find DP stream clock is:
471  *   Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x.
472  *   Check correct PLLx for PORT_x to get PLL frequency and DP bitrate.
473  * Then Refresh rate then can be calculated based on follow equations:
474  *   Pixel clock = h_total * v_total * refresh_rate
475  *   stream clock = Pixel clock
476  *   ls_clk = DP bitrate
477  *   Link M/N = strm_clk / ls_clk
478  */
479 
bdw_vgpu_get_dp_bitrate(struct intel_vgpu * vgpu,enum port port)480 static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
481 {
482 	u32 dp_br = 0;
483 	u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
484 
485 	switch (ddi_pll_sel) {
486 	case PORT_CLK_SEL_LCPLL_2700:
487 		dp_br = 270000 * 2;
488 		break;
489 	case PORT_CLK_SEL_LCPLL_1350:
490 		dp_br = 135000 * 2;
491 		break;
492 	case PORT_CLK_SEL_LCPLL_810:
493 		dp_br = 81000 * 2;
494 		break;
495 	case PORT_CLK_SEL_SPLL:
496 	{
497 		switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
498 		case SPLL_FREQ_810MHz:
499 			dp_br = 81000 * 2;
500 			break;
501 		case SPLL_FREQ_1350MHz:
502 			dp_br = 135000 * 2;
503 			break;
504 		case SPLL_FREQ_2700MHz:
505 			dp_br = 270000 * 2;
506 			break;
507 		default:
508 			gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
509 				    vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
510 			break;
511 		}
512 		break;
513 	}
514 	case PORT_CLK_SEL_WRPLL1:
515 	case PORT_CLK_SEL_WRPLL2:
516 	{
517 		u32 wrpll_ctl;
518 		int refclk, n, p, r;
519 
520 		if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1)
521 			wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
522 		else
523 			wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
524 
525 		switch (wrpll_ctl & WRPLL_REF_MASK) {
526 		case WRPLL_REF_PCH_SSC:
527 			refclk = 135000;
528 			break;
529 		case WRPLL_REF_LCPLL:
530 			refclk = 2700000;
531 			break;
532 		default:
533 			gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
534 				    vgpu->id, port_name(port), wrpll_ctl);
535 			goto out;
536 		}
537 
538 		r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK;
539 		p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
540 		n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
541 
542 		dp_br = (refclk * n / 10) / (p * r) * 2;
543 		break;
544 	}
545 	default:
546 		gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
547 			    vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
548 		break;
549 	}
550 
551 out:
552 	return dp_br;
553 }
554 
bxt_vgpu_get_dp_bitrate(struct intel_vgpu * vgpu,enum port port)555 static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
556 {
557 	u32 dp_br = 0;
558 	int refclk = 100000;
559 	enum dpio_phy phy = DPIO_PHY0;
560 	enum dpio_channel ch = DPIO_CH0;
561 	struct dpll clock = {};
562 	u32 temp;
563 
564 	/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
565 	switch (port) {
566 	case PORT_A:
567 		phy = DPIO_PHY1;
568 		ch = DPIO_CH0;
569 		break;
570 	case PORT_B:
571 		phy = DPIO_PHY0;
572 		ch = DPIO_CH0;
573 		break;
574 	case PORT_C:
575 		phy = DPIO_PHY0;
576 		ch = DPIO_CH1;
577 		break;
578 	default:
579 		gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
580 		goto out;
581 	}
582 
583 	temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
584 	if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) {
585 		gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
586 			    vgpu->id, port_name(port), temp);
587 		goto out;
588 	}
589 
590 	clock.m1 = 2;
591 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK,
592 				 vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
593 	if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
594 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
595 					  vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
596 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK,
597 				vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
598 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK,
599 				 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
600 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK,
601 				 vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
602 	clock.m = clock.m1 * clock.m2;
603 	clock.p = clock.p1 * clock.p2 * 5;
604 
605 	if (clock.n == 0 || clock.p == 0) {
606 		gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
607 		goto out;
608 	}
609 
610 	clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
611 	clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
612 
613 	dp_br = clock.dot;
614 
615 out:
616 	return dp_br;
617 }
618 
skl_vgpu_get_dp_bitrate(struct intel_vgpu * vgpu,enum port port)619 static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
620 {
621 	u32 dp_br = 0;
622 	enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0;
623 
624 	/* Find the enabled DPLL for the DDI/PORT */
625 	if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
626 	    (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
627 		dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
628 			DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
629 			DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
630 	} else {
631 		gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
632 			    vgpu->id, port_name(port));
633 		return dp_br;
634 	}
635 
636 	/* Find PLL output frequency from correct DPLL, and get bir rate */
637 	switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
638 		DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >>
639 		DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) {
640 		case DPLL_CTRL1_LINK_RATE_810:
641 			dp_br = 81000 * 2;
642 			break;
643 		case DPLL_CTRL1_LINK_RATE_1080:
644 			dp_br = 108000 * 2;
645 			break;
646 		case DPLL_CTRL1_LINK_RATE_1350:
647 			dp_br = 135000 * 2;
648 			break;
649 		case DPLL_CTRL1_LINK_RATE_1620:
650 			dp_br = 162000 * 2;
651 			break;
652 		case DPLL_CTRL1_LINK_RATE_2160:
653 			dp_br = 216000 * 2;
654 			break;
655 		case DPLL_CTRL1_LINK_RATE_2700:
656 			dp_br = 270000 * 2;
657 			break;
658 		default:
659 			dp_br = 0;
660 			gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
661 				    vgpu->id, port_name(port), dpll_id);
662 	}
663 
664 	return dp_br;
665 }
666 
vgpu_update_refresh_rate(struct intel_vgpu * vgpu)667 static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
668 {
669 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
670 	struct intel_display *display = dev_priv->display;
671 	enum port port;
672 	u32 dp_br, link_m, link_n, htotal, vtotal;
673 
674 	/* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
675 	port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &
676 		TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
677 	if (port != PORT_B && port != PORT_D) {
678 		gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
679 		return;
680 	}
681 
682 	/* Calculate DP bitrate from PLL */
683 	if (IS_BROADWELL(dev_priv))
684 		dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
685 	else if (IS_BROXTON(dev_priv))
686 		dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
687 	else
688 		dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
689 
690 	/* Get DP link symbol clock M/N */
691 	link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A));
692 	link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A));
693 
694 	/* Get H/V total from transcoder timing */
695 	htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(display, TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
696 	vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(display, TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
697 
698 	if (dp_br && link_n && htotal && vtotal) {
699 		u64 pixel_clk = 0;
700 		u32 new_rate = 0;
701 		u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
702 
703 		/* Calculate pixel clock by (ls_clk * M / N) */
704 		pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
705 		pixel_clk *= MSEC_PER_SEC;
706 
707 		/* Calculate refresh rate by (pixel_clk / (h_total * v_total)) */
708 		new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
709 
710 		if (*old_rate != new_rate)
711 			*old_rate = new_rate;
712 
713 		gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
714 			    vgpu->id, pipe_name(PIPE_A), new_rate);
715 	}
716 }
717 
pipeconf_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)718 static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
719 		void *p_data, unsigned int bytes)
720 {
721 	u32 data;
722 
723 	write_vreg(vgpu, offset, p_data, bytes);
724 	data = vgpu_vreg(vgpu, offset);
725 
726 	if (data & TRANSCONF_ENABLE) {
727 		vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE;
728 		vgpu_update_refresh_rate(vgpu);
729 		vgpu_update_vblank_emulation(vgpu, true);
730 	} else {
731 		vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE;
732 		vgpu_update_vblank_emulation(vgpu, false);
733 	}
734 	return 0;
735 }
736 
737 /* sorted in ascending order */
738 static i915_reg_t force_nonpriv_white_list[] = {
739 	_MMIO(0xd80),
740 	GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
741 	GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
742 	CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
743 	PS_INVOCATION_COUNT, //_MMIO(0x2348)
744 	PS_DEPTH_COUNT, //_MMIO(0x2350)
745 	GEN8_CS_CHICKEN1,//_MMIO(0x2580)
746 	_MMIO(0x2690),
747 	_MMIO(0x2694),
748 	_MMIO(0x2698),
749 	_MMIO(0x2754),
750 	_MMIO(0x28a0),
751 	_MMIO(0x4de0),
752 	_MMIO(0x4de4),
753 	_MMIO(0x4dfc),
754 	GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
755 	_MMIO(0x7014),
756 	HDC_CHICKEN0,//_MMIO(0x7300)
757 	GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
758 	_MMIO(0x7700),
759 	_MMIO(0x7704),
760 	_MMIO(0x7708),
761 	_MMIO(0x770c),
762 	_MMIO(0x83a8),
763 	_MMIO(0xb110),
764 	_MMIO(0xb118),
765 	_MMIO(0xe100),
766 	_MMIO(0xe18c),
767 	_MMIO(0xe48c),
768 	_MMIO(0xe5f4),
769 	_MMIO(0x64844),
770 };
771 
772 /* a simple bsearch */
in_whitelist(u32 reg)773 static inline bool in_whitelist(u32 reg)
774 {
775 	int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
776 	i915_reg_t *array = force_nonpriv_white_list;
777 
778 	while (left < right) {
779 		int mid = (left + right)/2;
780 
781 		if (reg > array[mid].reg)
782 			left = mid + 1;
783 		else if (reg < array[mid].reg)
784 			right = mid;
785 		else
786 			return true;
787 	}
788 	return false;
789 }
790 
force_nonpriv_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)791 static int force_nonpriv_write(struct intel_vgpu *vgpu,
792 	unsigned int offset, void *p_data, unsigned int bytes)
793 {
794 	u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
795 	const struct intel_engine_cs *engine =
796 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
797 
798 	if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
799 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
800 			vgpu->id, offset, bytes);
801 		return -EINVAL;
802 	}
803 
804 	if (!in_whitelist(reg_nonpriv) &&
805 	    reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
806 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
807 			vgpu->id, reg_nonpriv, offset);
808 	} else
809 		intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
810 
811 	return 0;
812 }
813 
ddi_buf_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)814 static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
815 		void *p_data, unsigned int bytes)
816 {
817 	write_vreg(vgpu, offset, p_data, bytes);
818 
819 	if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
820 		vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
821 	} else {
822 		vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
823 		if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
824 			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
825 				&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
826 	}
827 	return 0;
828 }
829 
fdi_rx_iir_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)830 static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
831 		unsigned int offset, void *p_data, unsigned int bytes)
832 {
833 	vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
834 	return 0;
835 }
836 
837 #define FDI_LINK_TRAIN_PATTERN1         0
838 #define FDI_LINK_TRAIN_PATTERN2         1
839 
fdi_auto_training_started(struct intel_vgpu * vgpu)840 static int fdi_auto_training_started(struct intel_vgpu *vgpu)
841 {
842 	u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
843 	u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
844 	u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
845 
846 	if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
847 			(rx_ctl & FDI_RX_ENABLE) &&
848 			(rx_ctl & FDI_AUTO_TRAINING) &&
849 			(tx_ctl & DP_TP_CTL_ENABLE) &&
850 			(tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
851 		return 1;
852 	else
853 		return 0;
854 }
855 
check_fdi_rx_train_status(struct intel_vgpu * vgpu,enum pipe pipe,unsigned int train_pattern)856 static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
857 		enum pipe pipe, unsigned int train_pattern)
858 {
859 	i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
860 	unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
861 	unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
862 	unsigned int fdi_iir_check_bits;
863 
864 	fdi_rx_imr = FDI_RX_IMR(pipe);
865 	fdi_tx_ctl = FDI_TX_CTL(pipe);
866 	fdi_rx_ctl = FDI_RX_CTL(pipe);
867 
868 	if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
869 		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
870 		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
871 		fdi_iir_check_bits = FDI_RX_BIT_LOCK;
872 	} else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
873 		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
874 		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
875 		fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
876 	} else {
877 		gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
878 		return -EINVAL;
879 	}
880 
881 	fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
882 	fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
883 
884 	/* If imr bit has been masked */
885 	if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
886 		return 0;
887 
888 	if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
889 			== fdi_tx_check_bits)
890 		&& ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
891 			== fdi_rx_check_bits))
892 		return 1;
893 	else
894 		return 0;
895 }
896 
897 #define INVALID_INDEX (~0U)
898 
calc_index(unsigned int offset,i915_reg_t _start,i915_reg_t _next,i915_reg_t _end)899 static unsigned int calc_index(unsigned int offset, i915_reg_t _start,
900 			       i915_reg_t _next, i915_reg_t _end)
901 {
902 	u32 start = i915_mmio_reg_offset(_start);
903 	u32 next = i915_mmio_reg_offset(_next);
904 	u32 end = i915_mmio_reg_offset(_end);
905 	u32 stride = next - start;
906 
907 	if (offset < start || offset > end)
908 		return INVALID_INDEX;
909 	offset -= start;
910 	return offset / stride;
911 }
912 
913 #define FDI_RX_CTL_TO_PIPE(offset) \
914 	calc_index(offset, FDI_RX_CTL(PIPE_A), FDI_RX_CTL(PIPE_B), FDI_RX_CTL(PIPE_C))
915 
916 #define FDI_TX_CTL_TO_PIPE(offset) \
917 	calc_index(offset, FDI_TX_CTL(PIPE_A), FDI_TX_CTL(PIPE_B), FDI_TX_CTL(PIPE_C))
918 
919 #define FDI_RX_IMR_TO_PIPE(offset) \
920 	calc_index(offset, FDI_RX_IMR(PIPE_A), FDI_RX_IMR(PIPE_B), FDI_RX_IMR(PIPE_C))
921 
update_fdi_rx_iir_status(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)922 static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
923 		unsigned int offset, void *p_data, unsigned int bytes)
924 {
925 	i915_reg_t fdi_rx_iir;
926 	unsigned int index;
927 	int ret;
928 
929 	if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
930 		index = FDI_RX_CTL_TO_PIPE(offset);
931 	else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
932 		index = FDI_TX_CTL_TO_PIPE(offset);
933 	else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
934 		index = FDI_RX_IMR_TO_PIPE(offset);
935 	else {
936 		gvt_vgpu_err("Unsupported registers %x\n", offset);
937 		return -EINVAL;
938 	}
939 
940 	write_vreg(vgpu, offset, p_data, bytes);
941 
942 	fdi_rx_iir = FDI_RX_IIR(index);
943 
944 	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
945 	if (ret < 0)
946 		return ret;
947 	if (ret)
948 		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
949 
950 	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
951 	if (ret < 0)
952 		return ret;
953 	if (ret)
954 		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
955 
956 	if (offset == _FDI_RXA_CTL)
957 		if (fdi_auto_training_started(vgpu))
958 			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
959 				DP_TP_STATUS_AUTOTRAIN_DONE;
960 	return 0;
961 }
962 
963 #define DP_TP_CTL_TO_PORT(offset) \
964 	calc_index(offset, DP_TP_CTL(PORT_A), DP_TP_CTL(PORT_B), DP_TP_CTL(PORT_E))
965 
dp_tp_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)966 static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
967 		void *p_data, unsigned int bytes)
968 {
969 	i915_reg_t status_reg;
970 	unsigned int index;
971 	u32 data;
972 
973 	write_vreg(vgpu, offset, p_data, bytes);
974 
975 	index = DP_TP_CTL_TO_PORT(offset);
976 	data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
977 	if (data == 0x2) {
978 		status_reg = DP_TP_STATUS(index);
979 		vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
980 	}
981 	return 0;
982 }
983 
dp_tp_status_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)984 static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
985 		unsigned int offset, void *p_data, unsigned int bytes)
986 {
987 	u32 reg_val;
988 	u32 sticky_mask;
989 
990 	reg_val = *((u32 *)p_data);
991 	sticky_mask = GENMASK(27, 26) | (1 << 24);
992 
993 	vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
994 		(vgpu_vreg(vgpu, offset) & sticky_mask);
995 	vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
996 	return 0;
997 }
998 
pch_adpa_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)999 static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
1000 		unsigned int offset, void *p_data, unsigned int bytes)
1001 {
1002 	u32 data;
1003 
1004 	write_vreg(vgpu, offset, p_data, bytes);
1005 	data = vgpu_vreg(vgpu, offset);
1006 
1007 	if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
1008 		vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
1009 	return 0;
1010 }
1011 
south_chicken2_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1012 static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
1013 		unsigned int offset, void *p_data, unsigned int bytes)
1014 {
1015 	u32 data;
1016 
1017 	write_vreg(vgpu, offset, p_data, bytes);
1018 	data = vgpu_vreg(vgpu, offset);
1019 
1020 	if (data & FDI_MPHY_IOSFSB_RESET_CTL)
1021 		vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
1022 	else
1023 		vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
1024 	return 0;
1025 }
1026 
1027 #define DSPSURF_TO_PIPE(display, offset) \
1028 	calc_index(offset, DSPSURF(display, PIPE_A), DSPSURF(display, PIPE_B), DSPSURF(display, PIPE_C))
1029 
pri_surf_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1030 static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1031 		void *p_data, unsigned int bytes)
1032 {
1033 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1034 	struct intel_display *display = dev_priv->display;
1035 	u32 pipe = DSPSURF_TO_PIPE(display, offset);
1036 	int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
1037 
1038 	write_vreg(vgpu, offset, p_data, bytes);
1039 	vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset);
1040 
1041 	vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++;
1042 
1043 	if (vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) & PLANE_CTL_ASYNC_FLIP)
1044 		intel_vgpu_trigger_virtual_event(vgpu, event);
1045 	else
1046 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1047 
1048 	return 0;
1049 }
1050 
1051 #define SPRSURF_TO_PIPE(offset) \
1052 	calc_index(offset, SPRSURF(PIPE_A), SPRSURF(PIPE_B), SPRSURF(PIPE_C))
1053 
spr_surf_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1054 static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1055 		void *p_data, unsigned int bytes)
1056 {
1057 	u32 pipe = SPRSURF_TO_PIPE(offset);
1058 	int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
1059 
1060 	write_vreg(vgpu, offset, p_data, bytes);
1061 	vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1062 
1063 	if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
1064 		intel_vgpu_trigger_virtual_event(vgpu, event);
1065 	else
1066 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1067 
1068 	return 0;
1069 }
1070 
reg50080_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1071 static int reg50080_mmio_write(struct intel_vgpu *vgpu,
1072 			       unsigned int offset, void *p_data,
1073 			       unsigned int bytes)
1074 {
1075 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1076 	struct intel_display *display = dev_priv->display;
1077 	enum pipe pipe = REG_50080_TO_PIPE(offset);
1078 	enum plane_id plane = REG_50080_TO_PLANE(offset);
1079 	int event = SKL_FLIP_EVENT(pipe, plane);
1080 
1081 	write_vreg(vgpu, offset, p_data, bytes);
1082 	if (plane == PLANE_PRIMARY) {
1083 		vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset);
1084 		vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++;
1085 	} else {
1086 		vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
1087 	}
1088 
1089 	if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
1090 		intel_vgpu_trigger_virtual_event(vgpu, event);
1091 	else
1092 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
1093 
1094 	return 0;
1095 }
1096 
trigger_aux_channel_interrupt(struct intel_vgpu * vgpu,unsigned int reg)1097 static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
1098 		unsigned int reg)
1099 {
1100 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1101 	enum intel_gvt_event_type event;
1102 
1103 	if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
1104 		event = AUX_CHANNEL_A;
1105 	else if (reg == i915_mmio_reg_offset(PCH_DP_AUX_CH_CTL(AUX_CH_B)) ||
1106 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
1107 		event = AUX_CHANNEL_B;
1108 	else if (reg == i915_mmio_reg_offset(PCH_DP_AUX_CH_CTL(AUX_CH_C)) ||
1109 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
1110 		event = AUX_CHANNEL_C;
1111 	else if (reg == i915_mmio_reg_offset(PCH_DP_AUX_CH_CTL(AUX_CH_D)) ||
1112 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
1113 		event = AUX_CHANNEL_D;
1114 	else {
1115 		drm_WARN_ON(&dev_priv->drm, true);
1116 		return -EINVAL;
1117 	}
1118 
1119 	intel_vgpu_trigger_virtual_event(vgpu, event);
1120 	return 0;
1121 }
1122 
dp_aux_ch_ctl_trans_done(struct intel_vgpu * vgpu,u32 value,unsigned int reg,int len,bool data_valid)1123 static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
1124 		unsigned int reg, int len, bool data_valid)
1125 {
1126 	/* mark transaction done */
1127 	value |= DP_AUX_CH_CTL_DONE;
1128 	value &= ~DP_AUX_CH_CTL_SEND_BUSY;
1129 	value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
1130 
1131 	if (data_valid)
1132 		value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
1133 	else
1134 		value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
1135 
1136 	/* message size */
1137 	value &= ~(0xf << 20);
1138 	value |= (len << 20);
1139 	vgpu_vreg(vgpu, reg) = value;
1140 
1141 	if (value & DP_AUX_CH_CTL_INTERRUPT)
1142 		return trigger_aux_channel_interrupt(vgpu, reg);
1143 	return 0;
1144 }
1145 
dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data * dpcd,u8 t)1146 static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
1147 		u8 t)
1148 {
1149 	if ((t & DP_TRAINING_PATTERN_MASK) == DP_TRAINING_PATTERN_1) {
1150 		/* training pattern 1 for CR */
1151 		/* set LANE0_CR_DONE, LANE1_CR_DONE */
1152 		dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CR_DONE |
1153 			DP_LANE_CR_DONE << 4;
1154 		/* set LANE2_CR_DONE, LANE3_CR_DONE */
1155 		dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CR_DONE |
1156 			DP_LANE_CR_DONE << 4;
1157 	} else if ((t & DP_TRAINING_PATTERN_MASK) ==
1158 			DP_TRAINING_PATTERN_2) {
1159 		/* training pattern 2 for EQ */
1160 		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane0_1 */
1161 		dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CHANNEL_EQ_DONE |
1162 			DP_LANE_CHANNEL_EQ_DONE << 4;
1163 		dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_SYMBOL_LOCKED |
1164 			DP_LANE_SYMBOL_LOCKED << 4;
1165 		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane2_3 */
1166 		dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CHANNEL_EQ_DONE |
1167 			DP_LANE_CHANNEL_EQ_DONE << 4;
1168 		dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_SYMBOL_LOCKED |
1169 			DP_LANE_SYMBOL_LOCKED << 4;
1170 		/* set INTERLANE_ALIGN_DONE */
1171 		dpcd->data[DP_LANE_ALIGN_STATUS_UPDATED] |=
1172 			DP_INTERLANE_ALIGN_DONE;
1173 	} else if ((t & DP_TRAINING_PATTERN_MASK) ==
1174 			DP_TRAINING_PATTERN_DISABLE) {
1175 		/* finish link training */
1176 		/* set sink status as synchronized */
1177 		dpcd->data[DP_SINK_STATUS] = DP_RECEIVE_PORT_0_STATUS |
1178 			DP_RECEIVE_PORT_1_STATUS;
1179 	}
1180 }
1181 
1182 #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
1183 
1184 #define dpy_is_valid_port(port)	\
1185 		(((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
1186 
dp_aux_ch_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1187 static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
1188 		unsigned int offset, void *p_data, unsigned int bytes)
1189 {
1190 	struct intel_vgpu_display *display = &vgpu->display;
1191 	int msg, addr, ctrl, op, len;
1192 	int port_index = OFFSET_TO_DP_AUX_PORT(offset);
1193 	struct intel_vgpu_dpcd_data *dpcd = NULL;
1194 	struct intel_vgpu_port *port = NULL;
1195 	u32 data;
1196 
1197 	if (!dpy_is_valid_port(port_index)) {
1198 		gvt_vgpu_err("Unsupported DP port access!\n");
1199 		return 0;
1200 	}
1201 
1202 	write_vreg(vgpu, offset, p_data, bytes);
1203 	data = vgpu_vreg(vgpu, offset);
1204 
1205 	if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9 &&
1206 	    offset != i915_mmio_reg_offset(DP_AUX_CH_CTL(port_index))) {
1207 		/* SKL DPB/C/D aux ctl register changed */
1208 		return 0;
1209 	} else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
1210 		   offset != i915_mmio_reg_offset(port_index ?
1211 						  PCH_DP_AUX_CH_CTL(port_index) :
1212 						  DP_AUX_CH_CTL(port_index))) {
1213 		/* write to the data registers */
1214 		return 0;
1215 	}
1216 
1217 	if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
1218 		/* just want to clear the sticky bits */
1219 		vgpu_vreg(vgpu, offset) = 0;
1220 		return 0;
1221 	}
1222 
1223 	port = &display->ports[port_index];
1224 	dpcd = port->dpcd;
1225 
1226 	/* read out message from DATA1 register */
1227 	msg = vgpu_vreg(vgpu, offset + 4);
1228 	addr = (msg >> 8) & 0xffff;
1229 	ctrl = (msg >> 24) & 0xff;
1230 	len = msg & 0xff;
1231 	op = ctrl >> 4;
1232 
1233 	if (op == DP_AUX_NATIVE_WRITE) {
1234 		int t;
1235 		u8 buf[16];
1236 
1237 		if ((addr + len + 1) >= DPCD_SIZE) {
1238 			/*
1239 			 * Write request exceeds what we supported,
1240 			 * DCPD spec: When a Source Device is writing a DPCD
1241 			 * address not supported by the Sink Device, the Sink
1242 			 * Device shall reply with AUX NACK and “M” equal to
1243 			 * zero.
1244 			 */
1245 
1246 			/* NAK the write */
1247 			vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
1248 			dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
1249 			return 0;
1250 		}
1251 
1252 		/*
1253 		 * Write request format: Headr (command + address + size) occupies
1254 		 * 4 bytes, followed by (len + 1) bytes of data. See details at
1255 		 * intel_dp_aux_transfer().
1256 		 */
1257 		if ((len + 1 + 4) > AUX_BURST_SIZE) {
1258 			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1259 			return -EINVAL;
1260 		}
1261 
1262 		/* unpack data from vreg to buf */
1263 		for (t = 0; t < 4; t++) {
1264 			u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
1265 
1266 			buf[t * 4] = (r >> 24) & 0xff;
1267 			buf[t * 4 + 1] = (r >> 16) & 0xff;
1268 			buf[t * 4 + 2] = (r >> 8) & 0xff;
1269 			buf[t * 4 + 3] = r & 0xff;
1270 		}
1271 
1272 		/* write to virtual DPCD */
1273 		if (dpcd && dpcd->data_valid) {
1274 			for (t = 0; t <= len; t++) {
1275 				int p = addr + t;
1276 
1277 				dpcd->data[p] = buf[t];
1278 				/* check for link training */
1279 				if (p == DP_TRAINING_PATTERN_SET)
1280 					dp_aux_ch_ctl_link_training(dpcd,
1281 							buf[t]);
1282 			}
1283 		}
1284 
1285 		/* ACK the write */
1286 		vgpu_vreg(vgpu, offset + 4) = 0;
1287 		dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
1288 				dpcd && dpcd->data_valid);
1289 		return 0;
1290 	}
1291 
1292 	if (op == DP_AUX_NATIVE_READ) {
1293 		int idx, i, ret = 0;
1294 
1295 		if ((addr + len + 1) >= DPCD_SIZE) {
1296 			/*
1297 			 * read request exceeds what we supported
1298 			 * DPCD spec: A Sink Device receiving a Native AUX CH
1299 			 * read request for an unsupported DPCD address must
1300 			 * reply with an AUX ACK and read data set equal to
1301 			 * zero instead of replying with AUX NACK.
1302 			 */
1303 
1304 			/* ACK the READ*/
1305 			vgpu_vreg(vgpu, offset + 4) = 0;
1306 			vgpu_vreg(vgpu, offset + 8) = 0;
1307 			vgpu_vreg(vgpu, offset + 12) = 0;
1308 			vgpu_vreg(vgpu, offset + 16) = 0;
1309 			vgpu_vreg(vgpu, offset + 20) = 0;
1310 
1311 			dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1312 					true);
1313 			return 0;
1314 		}
1315 
1316 		for (idx = 1; idx <= 5; idx++) {
1317 			/* clear the data registers */
1318 			vgpu_vreg(vgpu, offset + 4 * idx) = 0;
1319 		}
1320 
1321 		/*
1322 		 * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
1323 		 */
1324 		if ((len + 2) > AUX_BURST_SIZE) {
1325 			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
1326 			return -EINVAL;
1327 		}
1328 
1329 		/* read from virtual DPCD to vreg */
1330 		/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
1331 		if (dpcd && dpcd->data_valid) {
1332 			for (i = 1; i <= (len + 1); i++) {
1333 				int t;
1334 
1335 				t = dpcd->data[addr + i - 1];
1336 				t <<= (24 - 8 * (i % 4));
1337 				ret |= t;
1338 
1339 				if ((i % 4 == 3) || (i == (len + 1))) {
1340 					vgpu_vreg(vgpu, offset +
1341 							(i / 4 + 1) * 4) = ret;
1342 					ret = 0;
1343 				}
1344 			}
1345 		}
1346 		dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
1347 				dpcd && dpcd->data_valid);
1348 		return 0;
1349 	}
1350 
1351 	/* i2c transaction starts */
1352 	intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
1353 
1354 	if (data & DP_AUX_CH_CTL_INTERRUPT)
1355 		trigger_aux_channel_interrupt(vgpu, offset);
1356 	return 0;
1357 }
1358 
mbctl_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1359 static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
1360 		void *p_data, unsigned int bytes)
1361 {
1362 	*(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
1363 	write_vreg(vgpu, offset, p_data, bytes);
1364 	return 0;
1365 }
1366 
vga_control_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1367 static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1368 		void *p_data, unsigned int bytes)
1369 {
1370 	bool vga_disable;
1371 
1372 	write_vreg(vgpu, offset, p_data, bytes);
1373 	vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
1374 
1375 	gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
1376 			vga_disable ? "Disable" : "Enable");
1377 	return 0;
1378 }
1379 
read_virtual_sbi_register(struct intel_vgpu * vgpu,unsigned int sbi_offset)1380 static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
1381 		unsigned int sbi_offset)
1382 {
1383 	struct intel_vgpu_display *display = &vgpu->display;
1384 	int num = display->sbi.number;
1385 	int i;
1386 
1387 	for (i = 0; i < num; ++i)
1388 		if (display->sbi.registers[i].offset == sbi_offset)
1389 			break;
1390 
1391 	if (i == num)
1392 		return 0;
1393 
1394 	return display->sbi.registers[i].value;
1395 }
1396 
write_virtual_sbi_register(struct intel_vgpu * vgpu,unsigned int offset,u32 value)1397 static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1398 		unsigned int offset, u32 value)
1399 {
1400 	struct intel_vgpu_display *display = &vgpu->display;
1401 	int num = display->sbi.number;
1402 	int i;
1403 
1404 	for (i = 0; i < num; ++i) {
1405 		if (display->sbi.registers[i].offset == offset)
1406 			break;
1407 	}
1408 
1409 	if (i == num) {
1410 		if (num == SBI_REG_MAX) {
1411 			gvt_vgpu_err("SBI caching meets maximum limits\n");
1412 			return;
1413 		}
1414 		display->sbi.number++;
1415 	}
1416 
1417 	display->sbi.registers[i].offset = offset;
1418 	display->sbi.registers[i].value = value;
1419 }
1420 
sbi_data_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1421 static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1422 		void *p_data, unsigned int bytes)
1423 {
1424 	if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRRD) {
1425 		unsigned int sbi_offset;
1426 
1427 		sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR));
1428 
1429 		vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu, sbi_offset);
1430 	}
1431 	read_vreg(vgpu, offset, p_data, bytes);
1432 	return 0;
1433 }
1434 
sbi_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1435 static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1436 		void *p_data, unsigned int bytes)
1437 {
1438 	u32 data;
1439 
1440 	write_vreg(vgpu, offset, p_data, bytes);
1441 	data = vgpu_vreg(vgpu, offset);
1442 
1443 	data &= ~SBI_STATUS_MASK;
1444 	data |= SBI_STATUS_READY;
1445 
1446 	data &= ~SBI_RESPONSE_MASK;
1447 	data |= SBI_RESPONSE_SUCCESS;
1448 
1449 	vgpu_vreg(vgpu, offset) = data;
1450 
1451 	if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRWR) {
1452 		unsigned int sbi_offset;
1453 
1454 		sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR));
1455 
1456 		write_virtual_sbi_register(vgpu, sbi_offset, vgpu_vreg_t(vgpu, SBI_DATA));
1457 	}
1458 	return 0;
1459 }
1460 
1461 #define _vgtif_reg(x) \
1462 	(VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
1463 
pvinfo_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1464 static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1465 		void *p_data, unsigned int bytes)
1466 {
1467 	bool invalid_read = false;
1468 
1469 	read_vreg(vgpu, offset, p_data, bytes);
1470 
1471 	switch (offset) {
1472 	case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
1473 		if (offset + bytes > _vgtif_reg(vgt_id) + 4)
1474 			invalid_read = true;
1475 		break;
1476 	case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
1477 		_vgtif_reg(avail_rs.fence_num):
1478 		if (offset + bytes >
1479 			_vgtif_reg(avail_rs.fence_num) + 4)
1480 			invalid_read = true;
1481 		break;
1482 	case 0x78010:	/* vgt_caps */
1483 	case 0x7881c:
1484 		break;
1485 	default:
1486 		invalid_read = true;
1487 		break;
1488 	}
1489 	if (invalid_read)
1490 		gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1491 				offset, bytes, *(u32 *)p_data);
1492 	vgpu->pv_notified = true;
1493 	return 0;
1494 }
1495 
handle_g2v_notification(struct intel_vgpu * vgpu,int notification)1496 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1497 {
1498 	enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1499 	struct intel_vgpu_mm *mm;
1500 	u64 *pdps;
1501 
1502 	pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
1503 
1504 	switch (notification) {
1505 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
1506 		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1507 		fallthrough;
1508 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
1509 		mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
1510 		return PTR_ERR_OR_ZERO(mm);
1511 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
1512 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
1513 		return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
1514 	case VGT_G2V_EXECLIST_CONTEXT_CREATE:
1515 	case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
1516 	case 1:	/* Remove this in guest driver. */
1517 		break;
1518 	default:
1519 		gvt_vgpu_err("Invalid PV notification %d\n", notification);
1520 	}
1521 	return 0;
1522 }
1523 
send_display_ready_uevent(struct intel_vgpu * vgpu,int ready)1524 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
1525 {
1526 	struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
1527 	char *env[3] = {NULL, NULL, NULL};
1528 	char vmid_str[20];
1529 	char display_ready_str[20];
1530 
1531 	snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
1532 	env[0] = display_ready_str;
1533 
1534 	snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
1535 	env[1] = vmid_str;
1536 
1537 	return kobject_uevent_env(kobj, KOBJ_ADD, env);
1538 }
1539 
pvinfo_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1540 static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1541 		void *p_data, unsigned int bytes)
1542 {
1543 	u32 data = *(u32 *)p_data;
1544 	bool invalid_write = false;
1545 
1546 	switch (offset) {
1547 	case _vgtif_reg(display_ready):
1548 		send_display_ready_uevent(vgpu, data ? 1 : 0);
1549 		break;
1550 	case _vgtif_reg(g2v_notify):
1551 		handle_g2v_notification(vgpu, data);
1552 		break;
1553 	/* add xhot and yhot to handled list to avoid error log */
1554 	case _vgtif_reg(cursor_x_hot):
1555 	case _vgtif_reg(cursor_y_hot):
1556 	case _vgtif_reg(pdp[0].lo):
1557 	case _vgtif_reg(pdp[0].hi):
1558 	case _vgtif_reg(pdp[1].lo):
1559 	case _vgtif_reg(pdp[1].hi):
1560 	case _vgtif_reg(pdp[2].lo):
1561 	case _vgtif_reg(pdp[2].hi):
1562 	case _vgtif_reg(pdp[3].lo):
1563 	case _vgtif_reg(pdp[3].hi):
1564 	case _vgtif_reg(execlist_context_descriptor_lo):
1565 	case _vgtif_reg(execlist_context_descriptor_hi):
1566 		break;
1567 	case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
1568 		invalid_write = true;
1569 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1570 		break;
1571 	default:
1572 		invalid_write = true;
1573 		gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1574 				offset, bytes, data);
1575 		break;
1576 	}
1577 
1578 	if (!invalid_write)
1579 		write_vreg(vgpu, offset, p_data, bytes);
1580 
1581 	return 0;
1582 }
1583 
pf_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1584 static int pf_write(struct intel_vgpu *vgpu,
1585 		unsigned int offset, void *p_data, unsigned int bytes)
1586 {
1587 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1588 	u32 val = *(u32 *)p_data;
1589 
1590 	if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
1591 	   offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
1592 	   offset == _PS_1C_CTRL) && (val & PS_BINDING_MASK) != PS_BINDING_PIPE) {
1593 		drm_WARN_ONCE(&i915->drm, true,
1594 			      "VM(%d): guest is trying to scaling a plane\n",
1595 			      vgpu->id);
1596 		return 0;
1597 	}
1598 
1599 	return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
1600 }
1601 
power_well_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1602 static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
1603 		unsigned int offset, void *p_data, unsigned int bytes)
1604 {
1605 	write_vreg(vgpu, offset, p_data, bytes);
1606 
1607 	if (vgpu_vreg(vgpu, offset) &
1608 	    HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
1609 		vgpu_vreg(vgpu, offset) |=
1610 			HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1611 	else
1612 		vgpu_vreg(vgpu, offset) &=
1613 			~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
1614 	return 0;
1615 }
1616 
gen9_dbuf_ctl_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1617 static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
1618 		unsigned int offset, void *p_data, unsigned int bytes)
1619 {
1620 	write_vreg(vgpu, offset, p_data, bytes);
1621 
1622 	if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
1623 		vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
1624 	else
1625 		vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
1626 
1627 	return 0;
1628 }
1629 
fpga_dbg_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1630 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
1631 	unsigned int offset, void *p_data, unsigned int bytes)
1632 {
1633 	write_vreg(vgpu, offset, p_data, bytes);
1634 
1635 	if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
1636 		vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
1637 	return 0;
1638 }
1639 
dma_ctrl_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1640 static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1641 		void *p_data, unsigned int bytes)
1642 {
1643 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1644 	u32 mode;
1645 
1646 	write_vreg(vgpu, offset, p_data, bytes);
1647 	mode = vgpu_vreg(vgpu, offset);
1648 
1649 	if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1650 		drm_WARN_ONCE(&i915->drm, 1,
1651 				"VM(%d): iGVT-g doesn't support GuC\n",
1652 				vgpu->id);
1653 		return 0;
1654 	}
1655 
1656 	return 0;
1657 }
1658 
gen9_trtte_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1659 static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
1660 		void *p_data, unsigned int bytes)
1661 {
1662 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1663 	u32 trtte = *(u32 *)p_data;
1664 
1665 	if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
1666 		drm_WARN(&i915->drm, 1,
1667 				"VM(%d): Use physical address for TRTT!\n",
1668 				vgpu->id);
1669 		return -EINVAL;
1670 	}
1671 	write_vreg(vgpu, offset, p_data, bytes);
1672 
1673 	return 0;
1674 }
1675 
gen9_trtt_chicken_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1676 static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
1677 		void *p_data, unsigned int bytes)
1678 {
1679 	write_vreg(vgpu, offset, p_data, bytes);
1680 	return 0;
1681 }
1682 
dpll_status_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1683 static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
1684 		void *p_data, unsigned int bytes)
1685 {
1686 	u32 v = 0;
1687 
1688 	if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
1689 		v |= (1 << 0);
1690 
1691 	if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
1692 		v |= (1 << 8);
1693 
1694 	if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
1695 		v |= (1 << 16);
1696 
1697 	if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
1698 		v |= (1 << 24);
1699 
1700 	vgpu_vreg(vgpu, offset) = v;
1701 
1702 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1703 }
1704 
mailbox_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1705 static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1706 		void *p_data, unsigned int bytes)
1707 {
1708 	u32 value = *(u32 *)p_data;
1709 	u32 cmd = value & 0xff;
1710 	u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
1711 
1712 	switch (cmd) {
1713 	case GEN9_PCODE_READ_MEM_LATENCY:
1714 		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1715 		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1716 		    IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1717 		    IS_COMETLAKE(vgpu->gvt->gt->i915)) {
1718 			/**
1719 			 * "Read memory latency" command on gen9.
1720 			 * Below memory latency values are read
1721 			 * from skylake platform.
1722 			 */
1723 			if (!*data0)
1724 				*data0 = 0x1e1a1100;
1725 			else
1726 				*data0 = 0x61514b3d;
1727 		} else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
1728 			/**
1729 			 * "Read memory latency" command on gen9.
1730 			 * Below memory latency values are read
1731 			 * from Broxton MRB.
1732 			 */
1733 			if (!*data0)
1734 				*data0 = 0x16080707;
1735 			else
1736 				*data0 = 0x16161616;
1737 		}
1738 		break;
1739 	case SKL_PCODE_CDCLK_CONTROL:
1740 		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1741 		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1742 		    IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1743 		    IS_COMETLAKE(vgpu->gvt->gt->i915))
1744 			*data0 = SKL_CDCLK_READY_FOR_CHANGE;
1745 		break;
1746 	case GEN6_PCODE_READ_RC6VIDS:
1747 		*data0 |= 0x1;
1748 		break;
1749 	}
1750 
1751 	gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
1752 		     vgpu->id, value, *data0);
1753 	/**
1754 	 * PCODE_READY clear means ready for pcode read/write,
1755 	 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
1756 	 * always emulate as pcode read/write success and ready for access
1757 	 * anytime, since we don't touch real physical registers here.
1758 	 */
1759 	value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
1760 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1761 }
1762 
hws_pga_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1763 static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
1764 		void *p_data, unsigned int bytes)
1765 {
1766 	u32 value = *(u32 *)p_data;
1767 	const struct intel_engine_cs *engine =
1768 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
1769 
1770 	if (value != 0 &&
1771 	    !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
1772 		gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
1773 			      offset, value);
1774 		return -EINVAL;
1775 	}
1776 
1777 	/*
1778 	 * Need to emulate all the HWSP register write to ensure host can
1779 	 * update the VM CSB status correctly. Here listed registers can
1780 	 * support BDW, SKL or other platforms with same HWSP registers.
1781 	 */
1782 	if (unlikely(!engine)) {
1783 		gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
1784 			     offset);
1785 		return -EINVAL;
1786 	}
1787 	vgpu->hws_pga[engine->id] = value;
1788 	gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
1789 		     vgpu->id, value, offset);
1790 
1791 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1792 }
1793 
skl_power_well_ctl_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1794 static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
1795 		unsigned int offset, void *p_data, unsigned int bytes)
1796 {
1797 	u32 v = *(u32 *)p_data;
1798 
1799 	if (IS_BROXTON(vgpu->gvt->gt->i915))
1800 		v &= (1 << 31) | (1 << 29);
1801 	else
1802 		v &= (1 << 31) | (1 << 29) | (1 << 9) |
1803 			(1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
1804 	v |= (v >> 1);
1805 
1806 	return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
1807 }
1808 
skl_lcpll_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1809 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1810 		void *p_data, unsigned int bytes)
1811 {
1812 	u32 v = *(u32 *)p_data;
1813 
1814 	/* other bits are MBZ. */
1815 	v &= (1 << 31) | (1 << 30);
1816 	v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
1817 
1818 	vgpu_vreg(vgpu, offset) = v;
1819 
1820 	return 0;
1821 }
1822 
bxt_de_pll_enable_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1823 static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
1824 		unsigned int offset, void *p_data, unsigned int bytes)
1825 {
1826 	u32 v = *(u32 *)p_data;
1827 
1828 	if (v & BXT_DE_PLL_PLL_ENABLE)
1829 		v |= BXT_DE_PLL_LOCK;
1830 
1831 	vgpu_vreg(vgpu, offset) = v;
1832 
1833 	return 0;
1834 }
1835 
bxt_port_pll_enable_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1836 static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
1837 		unsigned int offset, void *p_data, unsigned int bytes)
1838 {
1839 	u32 v = *(u32 *)p_data;
1840 
1841 	if (v & PORT_PLL_ENABLE)
1842 		v |= PORT_PLL_LOCK;
1843 
1844 	vgpu_vreg(vgpu, offset) = v;
1845 
1846 	return 0;
1847 }
1848 
bxt_phy_ctl_family_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1849 static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
1850 		unsigned int offset, void *p_data, unsigned int bytes)
1851 {
1852 	u32 v = *(u32 *)p_data;
1853 	u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
1854 
1855 	switch (offset) {
1856 	case _PHY_CTL_FAMILY_EDP:
1857 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
1858 		break;
1859 	case _PHY_CTL_FAMILY_DDI:
1860 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
1861 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
1862 		break;
1863 	}
1864 
1865 	vgpu_vreg(vgpu, offset) = v;
1866 
1867 	return 0;
1868 }
1869 
bxt_port_tx_dw3_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1870 static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
1871 		unsigned int offset, void *p_data, unsigned int bytes)
1872 {
1873 	u32 v = vgpu_vreg(vgpu, offset);
1874 
1875 	v &= ~UNIQUE_TRANGE_EN_METHOD;
1876 
1877 	vgpu_vreg(vgpu, offset) = v;
1878 
1879 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1880 }
1881 
bxt_pcs_dw12_grp_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1882 static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
1883 		unsigned int offset, void *p_data, unsigned int bytes)
1884 {
1885 	u32 v = *(u32 *)p_data;
1886 
1887 	if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
1888 		vgpu_vreg(vgpu, offset - 0x600) = v;
1889 		vgpu_vreg(vgpu, offset - 0x800) = v;
1890 	} else {
1891 		vgpu_vreg(vgpu, offset - 0x400) = v;
1892 		vgpu_vreg(vgpu, offset - 0x600) = v;
1893 	}
1894 
1895 	vgpu_vreg(vgpu, offset) = v;
1896 
1897 	return 0;
1898 }
1899 
bxt_gt_disp_pwron_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1900 static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
1901 		unsigned int offset, void *p_data, unsigned int bytes)
1902 {
1903 	u32 v = *(u32 *)p_data;
1904 
1905 	if (v & BIT(0)) {
1906 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
1907 			~PHY_RESERVED;
1908 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
1909 			PHY_POWER_GOOD;
1910 	}
1911 
1912 	if (v & BIT(1)) {
1913 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
1914 			~PHY_RESERVED;
1915 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
1916 			PHY_POWER_GOOD;
1917 	}
1918 
1919 
1920 	vgpu_vreg(vgpu, offset) = v;
1921 
1922 	return 0;
1923 }
1924 
edp_psr_imr_iir_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1925 static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1926 		unsigned int offset, void *p_data, unsigned int bytes)
1927 {
1928 	vgpu_vreg(vgpu, offset) = 0;
1929 	return 0;
1930 }
1931 
1932 /*
1933  * FixMe:
1934  * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
1935  * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
1936  * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
1937  * these MI_BATCH_BUFFER.
1938  * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
1939  * PML4 PTE: PAT(0) PCD(1) PWT(1).
1940  * The performance is still expected to be low, will need further improvement.
1941  */
bxt_ppat_low_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1942 static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
1943 			      void *p_data, unsigned int bytes)
1944 {
1945 	u64 pat =
1946 		GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1947 		GEN8_PPAT(1, 0) |
1948 		GEN8_PPAT(2, 0) |
1949 		GEN8_PPAT(3, CHV_PPAT_SNOOP) |
1950 		GEN8_PPAT(4, CHV_PPAT_SNOOP) |
1951 		GEN8_PPAT(5, CHV_PPAT_SNOOP) |
1952 		GEN8_PPAT(6, CHV_PPAT_SNOOP) |
1953 		GEN8_PPAT(7, CHV_PPAT_SNOOP);
1954 
1955 	vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
1956 
1957 	return 0;
1958 }
1959 
guc_status_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1960 static int guc_status_read(struct intel_vgpu *vgpu,
1961 			   unsigned int offset, void *p_data,
1962 			   unsigned int bytes)
1963 {
1964 	/* keep MIA_IN_RESET before clearing */
1965 	read_vreg(vgpu, offset, p_data, bytes);
1966 	vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
1967 	return 0;
1968 }
1969 
mmio_read_from_hw(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1970 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1971 		unsigned int offset, void *p_data, unsigned int bytes)
1972 {
1973 	struct intel_gvt *gvt = vgpu->gvt;
1974 	const struct intel_engine_cs *engine =
1975 		intel_gvt_render_mmio_to_engine(gvt, offset);
1976 
1977 	/**
1978 	 * Read HW reg in following case
1979 	 * a. the offset isn't a ring mmio
1980 	 * b. the offset's ring is running on hw.
1981 	 * c. the offset is ring time stamp mmio
1982 	 */
1983 
1984 	if (!engine ||
1985 	    vgpu == gvt->scheduler.engine_owner[engine->id] ||
1986 	    offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
1987 	    offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
1988 		intel_wakeref_t wakeref;
1989 
1990 		wakeref = mmio_hw_access_pre(gvt->gt);
1991 		vgpu_vreg(vgpu, offset) =
1992 			intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
1993 		mmio_hw_access_post(gvt->gt, wakeref);
1994 	}
1995 
1996 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1997 }
1998 
elsp_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)1999 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2000 		void *p_data, unsigned int bytes)
2001 {
2002 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
2003 	const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
2004 	struct intel_vgpu_execlist *execlist;
2005 	u32 data = *(u32 *)p_data;
2006 	int ret = 0;
2007 
2008 	if (drm_WARN_ON(&i915->drm, !engine))
2009 		return -EINVAL;
2010 
2011 	/*
2012 	 * Due to d3_entered is used to indicate skipping PPGTT invalidation on
2013 	 * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
2014 	 * vGPU reset if in resuming.
2015 	 * In S0ix exit, the device power state also transite from D3 to D0 as
2016 	 * S3 resume, but no vGPU reset (triggered by QEMU device model). After
2017 	 * S0ix exit, all engines continue to work. However the d3_entered
2018 	 * remains set which will break next vGPU reset logic (miss the expected
2019 	 * PPGTT invalidation).
2020 	 * Engines can only work in D0. Thus the 1st elsp write gives GVT a
2021 	 * chance to clear d3_entered.
2022 	 */
2023 	if (vgpu->d3_entered)
2024 		vgpu->d3_entered = false;
2025 
2026 	execlist = &vgpu->submission.execlist[engine->id];
2027 
2028 	execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
2029 	if (execlist->elsp_dwords.index == 3) {
2030 		ret = intel_vgpu_submit_execlist(vgpu, engine);
2031 		if(ret)
2032 			gvt_vgpu_err("fail submit workload on ring %s\n",
2033 				     engine->name);
2034 	}
2035 
2036 	++execlist->elsp_dwords.index;
2037 	execlist->elsp_dwords.index &= 0x3;
2038 	return ret;
2039 }
2040 
ring_mode_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)2041 static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2042 		void *p_data, unsigned int bytes)
2043 {
2044 	u32 data = *(u32 *)p_data;
2045 	const struct intel_engine_cs *engine =
2046 		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
2047 	bool enable_execlist;
2048 	int ret;
2049 
2050 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
2051 	if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2052 	    IS_COMETLAKE(vgpu->gvt->gt->i915))
2053 		(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
2054 	write_vreg(vgpu, offset, p_data, bytes);
2055 
2056 	if (IS_MASKED_BITS_ENABLED(data, 1)) {
2057 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2058 		return 0;
2059 	}
2060 
2061 	if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
2062 	     IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
2063 	    IS_MASKED_BITS_ENABLED(data, 2)) {
2064 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2065 		return 0;
2066 	}
2067 
2068 	/* when PPGTT mode enabled, we will check if guest has called
2069 	 * pvinfo, if not, we will treat this guest as non-gvtg-aware
2070 	 * guest, and stop emulating its cfg space, mmio, gtt, etc.
2071 	 */
2072 	if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
2073 	    IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
2074 	    !vgpu->pv_notified) {
2075 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2076 		return 0;
2077 	}
2078 	if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
2079 	    IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
2080 		enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
2081 
2082 		gvt_dbg_core("EXECLIST %s on ring %s\n",
2083 			     (enable_execlist ? "enabling" : "disabling"),
2084 			     engine->name);
2085 
2086 		if (!enable_execlist)
2087 			return 0;
2088 
2089 		ret = intel_vgpu_select_submission_ops(vgpu,
2090 						       engine->mask,
2091 						       INTEL_VGPU_EXECLIST_SUBMISSION);
2092 		if (ret)
2093 			return ret;
2094 
2095 		intel_vgpu_start_schedule(vgpu);
2096 	}
2097 	return 0;
2098 }
2099 
gvt_reg_tlb_control_handler(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)2100 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
2101 		unsigned int offset, void *p_data, unsigned int bytes)
2102 {
2103 	unsigned int id = 0;
2104 
2105 	write_vreg(vgpu, offset, p_data, bytes);
2106 	vgpu_vreg(vgpu, offset) = 0;
2107 
2108 	switch (offset) {
2109 	case 0x4260:
2110 		id = RCS0;
2111 		break;
2112 	case 0x4264:
2113 		id = VCS0;
2114 		break;
2115 	case 0x4268:
2116 		id = VCS1;
2117 		break;
2118 	case 0x426c:
2119 		id = BCS0;
2120 		break;
2121 	case 0x4270:
2122 		id = VECS0;
2123 		break;
2124 	default:
2125 		return -EINVAL;
2126 	}
2127 	set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
2128 
2129 	return 0;
2130 }
2131 
ring_reset_ctl_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)2132 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
2133 	unsigned int offset, void *p_data, unsigned int bytes)
2134 {
2135 	u32 data;
2136 
2137 	write_vreg(vgpu, offset, p_data, bytes);
2138 	data = vgpu_vreg(vgpu, offset);
2139 
2140 	if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
2141 		data |= RESET_CTL_READY_TO_RESET;
2142 	else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
2143 		data &= ~RESET_CTL_READY_TO_RESET;
2144 
2145 	vgpu_vreg(vgpu, offset) = data;
2146 	return 0;
2147 }
2148 
csfe_chicken1_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)2149 static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
2150 				    unsigned int offset, void *p_data,
2151 				    unsigned int bytes)
2152 {
2153 	u32 data = *(u32 *)p_data;
2154 
2155 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
2156 	write_vreg(vgpu, offset, p_data, bytes);
2157 
2158 	if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
2159 	    IS_MASKED_BITS_ENABLED(data, 0x8))
2160 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
2161 
2162 	return 0;
2163 }
2164 
2165 #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
2166 	ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
2167 		s, f, am, rm, d, r, w); \
2168 	if (ret) \
2169 		return ret; \
2170 } while (0)
2171 
2172 #define MMIO_DH(reg, d, r, w) \
2173 	MMIO_F(reg, 4, 0, 0, 0, d, r, w)
2174 
2175 #define MMIO_DFH(reg, d, f, r, w) \
2176 	MMIO_F(reg, 4, f, 0, 0, d, r, w)
2177 
2178 #define MMIO_GM(reg, d, r, w) \
2179 	MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
2180 
2181 #define MMIO_GM_RDR(reg, d, r, w) \
2182 	MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
2183 
2184 #define MMIO_RO(reg, d, f, rm, r, w) \
2185 	MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
2186 
2187 #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
2188 	MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
2189 	MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
2190 	MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
2191 	MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
2192 	if (HAS_ENGINE(gvt->gt, VCS1)) \
2193 		MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
2194 } while (0)
2195 
2196 #define MMIO_RING_DFH(prefix, d, f, r, w) \
2197 	MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
2198 
2199 #define MMIO_RING_GM(prefix, d, r, w) \
2200 	MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
2201 
2202 #define MMIO_RING_GM_RDR(prefix, d, r, w) \
2203 	MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
2204 
2205 #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
2206 	MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
2207 
init_generic_mmio_info(struct intel_gvt * gvt)2208 static int init_generic_mmio_info(struct intel_gvt *gvt)
2209 {
2210 	struct drm_i915_private *dev_priv = gvt->gt->i915;
2211 	struct intel_display *display = dev_priv->display;
2212 	int ret;
2213 
2214 	MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
2215 		intel_vgpu_reg_imr_handler);
2216 
2217 	MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
2218 	MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
2219 	MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
2220 
2221 	MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
2222 
2223 
2224 	MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
2225 		gamw_echo_dev_rw_ia_write);
2226 
2227 	MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2228 	MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2229 	MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
2230 
2231 #define RING_REG(base) _MMIO((base) + 0x28)
2232 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2233 #undef RING_REG
2234 
2235 #define RING_REG(base) _MMIO((base) + 0x134)
2236 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2237 #undef RING_REG
2238 
2239 #define RING_REG(base) _MMIO((base) + 0x6c)
2240 	MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
2241 #undef RING_REG
2242 	MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
2243 
2244 	MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
2245 	MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
2246 	MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
2247 
2248 	MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
2249 	MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
2250 	MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
2251 	MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
2252 	MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
2253 
2254 	/* RING MODE */
2255 #define RING_REG(base) _MMIO((base) + 0x29c)
2256 	MMIO_RING_DFH(RING_REG, D_ALL,
2257 		F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL,
2258 		ring_mode_mmio_write);
2259 #undef RING_REG
2260 
2261 	MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2262 		NULL, NULL);
2263 	MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2264 			NULL, NULL);
2265 	MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
2266 			mmio_read_from_hw, NULL);
2267 	MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
2268 			mmio_read_from_hw, NULL);
2269 
2270 	MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2271 	MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2272 		NULL, NULL);
2273 	MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2274 	MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2275 	MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2276 
2277 	MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2278 	MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2279 	MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2280 	MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
2281 		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2282 	MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2283 	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
2284 	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2285 		NULL, NULL);
2286 	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
2287 		 NULL, NULL);
2288 	MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
2289 	MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
2290 	MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
2291 	MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
2292 	MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
2293 	MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
2294 	MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2295 	MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2296 	MMIO_DFH(HSW_HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2297 	MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2298 
2299 	/* display */
2300 	MMIO_DH(TRANSCONF(display, TRANSCODER_A), D_ALL, NULL,
2301 		pipeconf_mmio_write);
2302 	MMIO_DH(TRANSCONF(display, TRANSCODER_B), D_ALL, NULL,
2303 		pipeconf_mmio_write);
2304 	MMIO_DH(TRANSCONF(display, TRANSCODER_C), D_ALL, NULL,
2305 		pipeconf_mmio_write);
2306 	MMIO_DH(TRANSCONF(display, TRANSCODER_EDP), D_ALL, NULL,
2307 		pipeconf_mmio_write);
2308 	MMIO_DH(DSPSURF(display, PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
2309 	MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
2310 		reg50080_mmio_write);
2311 	MMIO_DH(DSPSURF(display, PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
2312 	MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
2313 		reg50080_mmio_write);
2314 	MMIO_DH(DSPSURF(display, PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
2315 	MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
2316 		reg50080_mmio_write);
2317 	MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
2318 	MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
2319 		reg50080_mmio_write);
2320 	MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
2321 	MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
2322 		reg50080_mmio_write);
2323 	MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
2324 	MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
2325 		reg50080_mmio_write);
2326 
2327 	MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
2328 		gmbus_mmio_write);
2329 	MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
2330 
2331 	MMIO_F(PCH_DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2332 	       dp_aux_ch_ctl_mmio_write);
2333 	MMIO_F(PCH_DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2334 	       dp_aux_ch_ctl_mmio_write);
2335 	MMIO_F(PCH_DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
2336 	       dp_aux_ch_ctl_mmio_write);
2337 
2338 	MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
2339 
2340 	MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
2341 	MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
2342 
2343 	MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
2344 	MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
2345 	MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
2346 	MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2347 	MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2348 	MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2349 	MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
2350 	MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
2351 	MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
2352 	MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
2353 	MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
2354 	MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
2355 	MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
2356 	MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
2357 	MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
2358 	MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
2359 
2360 	MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
2361 		PORTA_HOTPLUG_STATUS_MASK
2362 		| PORTB_HOTPLUG_STATUS_MASK
2363 		| PORTC_HOTPLUG_STATUS_MASK
2364 		| PORTD_HOTPLUG_STATUS_MASK,
2365 		NULL, NULL);
2366 
2367 	MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
2368 	MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
2369 	MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
2370 	MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
2371 	MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
2372 
2373 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_A), 6 * 4, 0, 0, 0, D_ALL, NULL,
2374 	       dp_aux_ch_ctl_mmio_write);
2375 
2376 	MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2377 	MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2378 	MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2379 	MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2380 	MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
2381 
2382 	MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
2383 	MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
2384 	MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
2385 	MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
2386 	MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
2387 
2388 	MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
2389 	MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
2390 	MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
2391 	MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
2392 	MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
2393 
2394 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
2395 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
2396 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
2397 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
2398 
2399 	MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
2400 	MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2401 	MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2402 	MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2403 	MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
2404 	MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
2405 	MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
2406 	MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
2407 	MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
2408 	MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
2409 	MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
2410 	MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
2411 	MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
2412 
2413 	MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
2414 	MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
2415 	MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
2416 
2417 	MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
2418 	MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
2419 
2420 	MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2421 	MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
2422 
2423 	MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
2424 	MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2425 	MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2426 	MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2427 	MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
2428 	MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
2429 
2430 	MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2431 	MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2432 	MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2433 	MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2434 
2435 	MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2436 	MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2437 	MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2438 
2439 	MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2440 	MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2441 	MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2442 	MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2443 	MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2444 	MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2445 	MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2446 	MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2447 	MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2448 	MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2449 	MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2450 	MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2451 	MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2452 	MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2453 	MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2454 	MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2455 	MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2456 
2457 	MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2458 	MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
2459 	MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2460 	MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2461 	MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
2462 	MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
2463 	MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
2464 	MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2465 	MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2466 	MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2467 	MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2468 
2469 	MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2470 	MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2471 	MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
2472 
2473 	return 0;
2474 }
2475 
init_bdw_mmio_info(struct intel_gvt * gvt)2476 static int init_bdw_mmio_info(struct intel_gvt *gvt)
2477 {
2478 	int ret;
2479 
2480 	MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2481 	MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2482 	MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2483 
2484 	MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2485 	MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2486 	MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2487 
2488 	MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2489 	MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2490 	MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2491 
2492 	MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2493 	MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2494 	MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2495 
2496 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
2497 		intel_vgpu_reg_imr_handler);
2498 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
2499 		intel_vgpu_reg_ier_handler);
2500 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
2501 		intel_vgpu_reg_iir_handler);
2502 
2503 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
2504 		intel_vgpu_reg_imr_handler);
2505 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
2506 		intel_vgpu_reg_ier_handler);
2507 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
2508 		intel_vgpu_reg_iir_handler);
2509 
2510 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
2511 		intel_vgpu_reg_imr_handler);
2512 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
2513 		intel_vgpu_reg_ier_handler);
2514 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
2515 		intel_vgpu_reg_iir_handler);
2516 
2517 	MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2518 	MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2519 	MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2520 
2521 	MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2522 	MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2523 	MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2524 
2525 	MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2526 	MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2527 	MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
2528 
2529 	MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
2530 		intel_vgpu_reg_master_irq_handler);
2531 
2532 	MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
2533 		mmio_read_from_hw, NULL);
2534 
2535 #define RING_REG(base) _MMIO((base) + 0xd0)
2536 	MMIO_RING_F(RING_REG, 4, F_RO, 0,
2537 		~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
2538 		ring_reset_ctl_write);
2539 #undef RING_REG
2540 
2541 #define RING_REG(base) _MMIO((base) + 0x230)
2542 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
2543 #undef RING_REG
2544 
2545 #define RING_REG(base) _MMIO((base) + 0x234)
2546 	MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
2547 		NULL, NULL);
2548 #undef RING_REG
2549 
2550 #define RING_REG(base) _MMIO((base) + 0x244)
2551 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2552 #undef RING_REG
2553 
2554 #define RING_REG(base) _MMIO((base) + 0x370)
2555 	MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
2556 #undef RING_REG
2557 
2558 #define RING_REG(base) _MMIO((base) + 0x3a0)
2559 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
2560 #undef RING_REG
2561 
2562 	MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
2563 
2564 #define RING_REG(base) _MMIO((base) + 0x270)
2565 	MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
2566 #undef RING_REG
2567 
2568 	MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
2569 
2570 	MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2571 
2572 	MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2573 		NULL, NULL);
2574 	MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2575 		NULL, NULL);
2576 	MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2577 
2578 	MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
2579 	MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
2580 	MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2581 	MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
2582 	MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
2583 
2584 	MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
2585 		D_BDW_PLUS, NULL, force_nonpriv_write);
2586 
2587 	MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
2588 
2589 	MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
2590 
2591 	MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2592 	MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2593 	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2594 	MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2595 
2596 	MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
2597 
2598 	MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2599 	MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2600 	MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2601 	MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2602 	MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2603 	MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2604 	MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2605 	MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2606 	MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2607 	MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2608 	return 0;
2609 }
2610 
init_skl_mmio_info(struct intel_gvt * gvt)2611 static int init_skl_mmio_info(struct intel_gvt *gvt)
2612 {
2613 	int ret;
2614 
2615 	MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2616 	MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
2617 	MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2618 	MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL);
2619 	MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2620 	MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
2621 
2622 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2623 						dp_aux_ch_ctl_mmio_write);
2624 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2625 						dp_aux_ch_ctl_mmio_write);
2626 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2627 						dp_aux_ch_ctl_mmio_write);
2628 
2629 	MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
2630 
2631 	MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
2632 
2633 	MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2634 	MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2635 	MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
2636 	MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2637 	MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2638 	MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
2639 
2640 	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2641 	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2642 	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2643 	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2644 	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2645 	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2646 
2647 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2648 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2649 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2650 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2651 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2652 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2653 
2654 	MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2655 	MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2656 	MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2657 	MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2658 	MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2659 	MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2660 
2661 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2662 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2663 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2664 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2665 
2666 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2667 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2668 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2669 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2670 
2671 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2672 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2673 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2674 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2675 
2676 	MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
2677 	MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
2678 	MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
2679 
2680 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2681 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2682 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2683 
2684 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2685 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2686 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2687 
2688 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2689 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2690 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2691 
2692 	MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
2693 	MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
2694 	MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
2695 
2696 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2697 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2698 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2699 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2700 
2701 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2702 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2703 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2704 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2705 
2706 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2707 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2708 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2709 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2710 
2711 	MMIO_DH(PLANE_AUX_DIST(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2712 	MMIO_DH(PLANE_AUX_DIST(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2713 	MMIO_DH(PLANE_AUX_DIST(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2714 	MMIO_DH(PLANE_AUX_DIST(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2715 
2716 	MMIO_DH(PLANE_AUX_DIST(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2717 	MMIO_DH(PLANE_AUX_DIST(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2718 	MMIO_DH(PLANE_AUX_DIST(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2719 	MMIO_DH(PLANE_AUX_DIST(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2720 
2721 	MMIO_DH(PLANE_AUX_DIST(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2722 	MMIO_DH(PLANE_AUX_DIST(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2723 	MMIO_DH(PLANE_AUX_DIST(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2724 	MMIO_DH(PLANE_AUX_DIST(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2725 
2726 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2727 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2728 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2729 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2730 
2731 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2732 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2733 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2734 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2735 
2736 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2737 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2738 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2739 	MMIO_DH(PLANE_AUX_OFFSET(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2740 
2741 	MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2742 
2743 	MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
2744 		NULL, NULL);
2745 	MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
2746 		NULL, NULL);
2747 
2748 	MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
2749 		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2750 	MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2751 		NULL, NULL);
2752 
2753 	/* TRTT */
2754 	MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2755 	MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2756 	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2757 	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2758 	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2759 	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
2760 		 NULL, gen9_trtte_write);
2761 	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
2762 		 NULL, gen9_trtt_chicken_write);
2763 
2764 	MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2765 	MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
2766 
2767 #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
2768 	MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2769 		      NULL, csfe_chicken1_mmio_write);
2770 #undef CSFE_CHICKEN1_REG
2771 	MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2772 		 NULL, NULL);
2773 	MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2774 		 NULL, NULL);
2775 
2776 	MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
2777 	MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2778 
2779 	return 0;
2780 }
2781 
init_bxt_mmio_info(struct intel_gvt * gvt)2782 static int init_bxt_mmio_info(struct intel_gvt *gvt)
2783 {
2784 	int ret;
2785 
2786 	MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
2787 	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
2788 		NULL, bxt_phy_ctl_family_write);
2789 	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
2790 		NULL, bxt_phy_ctl_family_write);
2791 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
2792 		NULL, bxt_port_pll_enable_write);
2793 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
2794 		NULL, bxt_port_pll_enable_write);
2795 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
2796 		bxt_port_pll_enable_write);
2797 
2798 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
2799 		NULL, bxt_pcs_dw12_grp_write);
2800 	MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT,
2801 		bxt_port_tx_dw3_read, NULL);
2802 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
2803 		NULL, bxt_pcs_dw12_grp_write);
2804 	MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT,
2805 		bxt_port_tx_dw3_read, NULL);
2806 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
2807 		NULL, bxt_pcs_dw12_grp_write);
2808 	MMIO_DH(BXT_PORT_TX_DW3_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT,
2809 		bxt_port_tx_dw3_read, NULL);
2810 	MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
2811 	MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
2812 	MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
2813 	MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
2814 	MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2815 	       0, 0, D_BXT, NULL, NULL);
2816 	MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2817 	       0, 0, D_BXT, NULL, NULL);
2818 	MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2819 	       0, 0, D_BXT, NULL, NULL);
2820 	MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
2821 	       0, 0, D_BXT, NULL, NULL);
2822 
2823 	MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
2824 
2825 	MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
2826 
2827 	return 0;
2828 }
2829 
find_mmio_block(struct intel_gvt * gvt,unsigned int offset)2830 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2831 					      unsigned int offset)
2832 {
2833 	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2834 	int num = gvt->mmio.num_mmio_block;
2835 	int i;
2836 
2837 	for (i = 0; i < num; i++, block++) {
2838 		if (offset >= i915_mmio_reg_offset(block->offset) &&
2839 		    offset < i915_mmio_reg_offset(block->offset) + block->size)
2840 			return block;
2841 	}
2842 	return NULL;
2843 }
2844 
2845 /**
2846  * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
2847  * @gvt: GVT device
2848  *
2849  * This function is called at the driver unloading stage, to clean up the MMIO
2850  * information table of GVT device
2851  *
2852  */
intel_gvt_clean_mmio_info(struct intel_gvt * gvt)2853 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
2854 {
2855 	struct hlist_node *tmp;
2856 	struct intel_gvt_mmio_info *e;
2857 	int i;
2858 
2859 	hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
2860 		kfree(e);
2861 
2862 	kfree(gvt->mmio.mmio_block);
2863 	gvt->mmio.mmio_block = NULL;
2864 	gvt->mmio.num_mmio_block = 0;
2865 
2866 	vfree(gvt->mmio.mmio_attribute);
2867 	gvt->mmio.mmio_attribute = NULL;
2868 }
2869 
handle_mmio(struct intel_gvt_mmio_table_iter * iter,u32 offset,u32 size)2870 static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
2871 		       u32 size)
2872 {
2873 	struct intel_gvt *gvt = iter->data;
2874 	struct intel_gvt_mmio_info *info, *p;
2875 	u32 start, end, i;
2876 
2877 	if (WARN_ON(!IS_ALIGNED(offset, 4)))
2878 		return -EINVAL;
2879 
2880 	start = offset;
2881 	end = offset + size;
2882 
2883 	for (i = start; i < end; i += 4) {
2884 		p = intel_gvt_find_mmio_info(gvt, i);
2885 		if (p) {
2886 			WARN(1, "dup mmio definition offset %x\n", i);
2887 
2888 			/* We return -EEXIST here to make GVT-g load fail.
2889 			 * So duplicated MMIO can be found as soon as
2890 			 * possible.
2891 			 */
2892 			return -EEXIST;
2893 		}
2894 
2895 		info = kzalloc_obj(*info);
2896 		if (!info)
2897 			return -ENOMEM;
2898 
2899 		info->offset = i;
2900 		info->read = intel_vgpu_default_mmio_read;
2901 		info->write = intel_vgpu_default_mmio_write;
2902 		INIT_HLIST_NODE(&info->node);
2903 		hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
2904 		gvt->mmio.num_tracked_mmio++;
2905 	}
2906 	return 0;
2907 }
2908 
handle_mmio_block(struct intel_gvt_mmio_table_iter * iter,u32 offset,u32 size)2909 static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter,
2910 			     u32 offset, u32 size)
2911 {
2912 	struct intel_gvt *gvt = iter->data;
2913 	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2914 	void *ret;
2915 
2916 	ret = krealloc(block,
2917 			 (gvt->mmio.num_mmio_block + 1) * sizeof(*block),
2918 			 GFP_KERNEL);
2919 	if (!ret)
2920 		return -ENOMEM;
2921 
2922 	gvt->mmio.mmio_block = block = ret;
2923 
2924 	block += gvt->mmio.num_mmio_block;
2925 
2926 	memset(block, 0, sizeof(*block));
2927 
2928 	block->offset = _MMIO(offset);
2929 	block->size = size;
2930 
2931 	gvt->mmio.num_mmio_block++;
2932 
2933 	return 0;
2934 }
2935 
handle_mmio_cb(struct intel_gvt_mmio_table_iter * iter,u32 offset,u32 size)2936 static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset,
2937 			  u32 size)
2938 {
2939 	if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0)))
2940 		return handle_mmio(iter, offset, size);
2941 	else
2942 		return handle_mmio_block(iter, offset, size);
2943 }
2944 
init_mmio_info(struct intel_gvt * gvt)2945 static int init_mmio_info(struct intel_gvt *gvt)
2946 {
2947 	struct intel_gvt_mmio_table_iter iter = {
2948 		.i915 = gvt->gt->i915,
2949 		.data = gvt,
2950 		.handle_mmio_cb = handle_mmio_cb,
2951 	};
2952 
2953 	return intel_gvt_iterate_mmio_table(&iter);
2954 }
2955 
init_mmio_block_handlers(struct intel_gvt * gvt)2956 static int init_mmio_block_handlers(struct intel_gvt *gvt)
2957 {
2958 	struct gvt_mmio_block *block;
2959 
2960 	block = find_mmio_block(gvt, VGT_PVINFO_PAGE);
2961 	if (!block) {
2962 		WARN(1, "fail to assign handlers to mmio block %x\n",
2963 		     i915_mmio_reg_offset(gvt->mmio.mmio_block->offset));
2964 		return -ENODEV;
2965 	}
2966 
2967 	block->read = pvinfo_mmio_read;
2968 	block->write = pvinfo_mmio_write;
2969 
2970 	return 0;
2971 }
2972 
2973 /**
2974  * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
2975  * @gvt: GVT device
2976  *
2977  * This function is called at the initialization stage, to setup the MMIO
2978  * information table for GVT device
2979  *
2980  * Returns:
2981  * zero on success, negative if failed.
2982  */
intel_gvt_setup_mmio_info(struct intel_gvt * gvt)2983 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2984 {
2985 	struct intel_gvt_device_info *info = &gvt->device_info;
2986 	struct drm_i915_private *i915 = gvt->gt->i915;
2987 	int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
2988 	int ret;
2989 
2990 	gvt->mmio.mmio_attribute = vzalloc(size);
2991 	if (!gvt->mmio.mmio_attribute)
2992 		return -ENOMEM;
2993 
2994 	ret = init_mmio_info(gvt);
2995 	if (ret)
2996 		goto err;
2997 
2998 	ret = init_mmio_block_handlers(gvt);
2999 	if (ret)
3000 		goto err;
3001 
3002 	ret = init_generic_mmio_info(gvt);
3003 	if (ret)
3004 		goto err;
3005 
3006 	if (IS_BROADWELL(i915)) {
3007 		ret = init_bdw_mmio_info(gvt);
3008 		if (ret)
3009 			goto err;
3010 	} else if (IS_SKYLAKE(i915) ||
3011 		   IS_KABYLAKE(i915) ||
3012 		   IS_COFFEELAKE(i915) ||
3013 		   IS_COMETLAKE(i915)) {
3014 		ret = init_bdw_mmio_info(gvt);
3015 		if (ret)
3016 			goto err;
3017 		ret = init_skl_mmio_info(gvt);
3018 		if (ret)
3019 			goto err;
3020 	} else if (IS_BROXTON(i915)) {
3021 		ret = init_bdw_mmio_info(gvt);
3022 		if (ret)
3023 			goto err;
3024 		ret = init_skl_mmio_info(gvt);
3025 		if (ret)
3026 			goto err;
3027 		ret = init_bxt_mmio_info(gvt);
3028 		if (ret)
3029 			goto err;
3030 	}
3031 
3032 	return 0;
3033 err:
3034 	intel_gvt_clean_mmio_info(gvt);
3035 	return ret;
3036 }
3037 
3038 /**
3039  * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
3040  * @gvt: a GVT device
3041  * @handler: the handler
3042  * @data: private data given to handler
3043  *
3044  * Returns:
3045  * Zero on success, negative error code if failed.
3046  */
intel_gvt_for_each_tracked_mmio(struct intel_gvt * gvt,int (* handler)(struct intel_gvt * gvt,u32 offset,void * data),void * data)3047 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
3048 	int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
3049 	void *data)
3050 {
3051 	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
3052 	struct intel_gvt_mmio_info *e;
3053 	int i, j, ret;
3054 
3055 	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
3056 		ret = handler(gvt, e->offset, data);
3057 		if (ret)
3058 			return ret;
3059 	}
3060 
3061 	for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3062 		/* pvinfo data doesn't come from hw mmio */
3063 		if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
3064 			continue;
3065 
3066 		for (j = 0; j < block->size; j += 4) {
3067 			ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data);
3068 			if (ret)
3069 				return ret;
3070 		}
3071 	}
3072 	return 0;
3073 }
3074 
3075 /**
3076  * intel_vgpu_default_mmio_read - default MMIO read handler
3077  * @vgpu: a vGPU
3078  * @offset: access offset
3079  * @p_data: data return buffer
3080  * @bytes: access data length
3081  *
3082  * Returns:
3083  * Zero on success, negative error code if failed.
3084  */
intel_vgpu_default_mmio_read(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)3085 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
3086 		void *p_data, unsigned int bytes)
3087 {
3088 	read_vreg(vgpu, offset, p_data, bytes);
3089 	return 0;
3090 }
3091 
3092 /**
3093  * intel_vgpu_default_mmio_write() - default MMIO write handler
3094  * @vgpu: a vGPU
3095  * @offset: access offset
3096  * @p_data: write data buffer
3097  * @bytes: access data length
3098  *
3099  * Returns:
3100  * Zero on success, negative error code if failed.
3101  */
intel_vgpu_default_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)3102 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3103 		void *p_data, unsigned int bytes)
3104 {
3105 	write_vreg(vgpu, offset, p_data, bytes);
3106 	return 0;
3107 }
3108 
3109 /**
3110  * intel_vgpu_mask_mmio_write - write mask register
3111  * @vgpu: a vGPU
3112  * @offset: access offset
3113  * @p_data: write data buffer
3114  * @bytes: access data length
3115  *
3116  * Returns:
3117  * Zero on success, negative error code if failed.
3118  */
intel_vgpu_mask_mmio_write(struct intel_vgpu * vgpu,unsigned int offset,void * p_data,unsigned int bytes)3119 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3120 		void *p_data, unsigned int bytes)
3121 {
3122 	u32 mask, old_vreg;
3123 
3124 	old_vreg = vgpu_vreg(vgpu, offset);
3125 	write_vreg(vgpu, offset, p_data, bytes);
3126 	mask = vgpu_vreg(vgpu, offset) >> 16;
3127 	vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
3128 				(vgpu_vreg(vgpu, offset) & mask);
3129 
3130 	return 0;
3131 }
3132 
3133 /**
3134  * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
3135  * @vgpu: a vGPU
3136  * @offset: register offset
3137  * @pdata: data buffer
3138  * @bytes: data length
3139  * @is_read: read or write
3140  *
3141  * Returns:
3142  * Zero on success, negative error code if failed.
3143  */
intel_vgpu_mmio_reg_rw(struct intel_vgpu * vgpu,unsigned int offset,void * pdata,unsigned int bytes,bool is_read)3144 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3145 			   void *pdata, unsigned int bytes, bool is_read)
3146 {
3147 	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
3148 	struct intel_gvt *gvt = vgpu->gvt;
3149 	struct intel_gvt_mmio_info *mmio_info;
3150 	struct gvt_mmio_block *mmio_block;
3151 	gvt_mmio_func func;
3152 	int ret;
3153 
3154 	if (drm_WARN_ON(&i915->drm, bytes > 8))
3155 		return -EINVAL;
3156 
3157 	/*
3158 	 * Handle special MMIO blocks.
3159 	 */
3160 	mmio_block = find_mmio_block(gvt, offset);
3161 	if (mmio_block) {
3162 		func = is_read ? mmio_block->read : mmio_block->write;
3163 		if (func)
3164 			return func(vgpu, offset, pdata, bytes);
3165 		goto default_rw;
3166 	}
3167 
3168 	/*
3169 	 * Normal tracked MMIOs.
3170 	 */
3171 	mmio_info = intel_gvt_find_mmio_info(gvt, offset);
3172 	if (!mmio_info) {
3173 		gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
3174 		goto default_rw;
3175 	}
3176 
3177 	if (is_read)
3178 		return mmio_info->read(vgpu, offset, pdata, bytes);
3179 	else {
3180 		u64 ro_mask = mmio_info->ro_mask;
3181 		u32 old_vreg = 0;
3182 		u64 data = 0;
3183 
3184 		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3185 			old_vreg = vgpu_vreg(vgpu, offset);
3186 		}
3187 
3188 		if (likely(!ro_mask))
3189 			ret = mmio_info->write(vgpu, offset, pdata, bytes);
3190 		else if (!~ro_mask) {
3191 			gvt_vgpu_err("try to write RO reg %x\n", offset);
3192 			return 0;
3193 		} else {
3194 			/* keep the RO bits in the virtual register */
3195 			memcpy(&data, pdata, bytes);
3196 			data &= ~ro_mask;
3197 			data |= vgpu_vreg(vgpu, offset) & ro_mask;
3198 			ret = mmio_info->write(vgpu, offset, &data, bytes);
3199 		}
3200 
3201 		/* higher 16bits of mode ctl regs are mask bits for change */
3202 		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
3203 			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
3204 
3205 			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
3206 					| (vgpu_vreg(vgpu, offset) & mask);
3207 		}
3208 	}
3209 
3210 	return ret;
3211 
3212 default_rw:
3213 	return is_read ?
3214 		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
3215 		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
3216 }
3217 
intel_gvt_restore_fence(struct intel_gvt * gvt)3218 void intel_gvt_restore_fence(struct intel_gvt *gvt)
3219 {
3220 	struct intel_vgpu *vgpu;
3221 	int i, id;
3222 
3223 	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3224 		intel_wakeref_t wakeref;
3225 
3226 		wakeref = mmio_hw_access_pre(gvt->gt);
3227 		for (i = 0; i < vgpu_fence_sz(vgpu); i++)
3228 			intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
3229 		mmio_hw_access_post(gvt->gt, wakeref);
3230 	}
3231 }
3232 
mmio_pm_restore_handler(struct intel_gvt * gvt,u32 offset,void * data)3233 static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
3234 {
3235 	struct intel_vgpu *vgpu = data;
3236 	struct drm_i915_private *dev_priv = gvt->gt->i915;
3237 
3238 	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
3239 		intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset));
3240 
3241 	return 0;
3242 }
3243 
intel_gvt_restore_mmio(struct intel_gvt * gvt)3244 void intel_gvt_restore_mmio(struct intel_gvt *gvt)
3245 {
3246 	struct intel_vgpu *vgpu;
3247 	int id;
3248 
3249 	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3250 		intel_wakeref_t wakeref;
3251 
3252 		wakeref = mmio_hw_access_pre(gvt->gt);
3253 		intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
3254 		mmio_hw_access_post(gvt->gt, wakeref);
3255 	}
3256 }
3257