xref: /linux/drivers/gpu/drm/i915/gvt/cmd_parser.c (revision 905e46acd3272d04566fec49afbd7ad9e2ed9ae3)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Ke Yu
25  *    Kevin Tian <kevin.tian@intel.com>
26  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
27  *
28  * Contributors:
29  *    Min He <min.he@intel.com>
30  *    Ping Gao <ping.a.gao@intel.com>
31  *    Tina Zhang <tina.zhang@intel.com>
32  *    Yulei Zhang <yulei.zhang@intel.com>
33  *    Zhi Wang <zhi.a.wang@intel.com>
34  *
35  */
36 
37 #include <linux/slab.h>
38 #include "i915_drv.h"
39 #include "gvt.h"
40 #include "i915_pvinfo.h"
41 #include "trace.h"
42 
43 #define INVALID_OP    (~0U)
44 
45 #define OP_LEN_MI           9
46 #define OP_LEN_2D           10
47 #define OP_LEN_3D_MEDIA     16
48 #define OP_LEN_MFX_VC       16
49 #define OP_LEN_VEBOX	    16
50 
51 #define CMD_TYPE(cmd)	(((cmd) >> 29) & 7)
52 
53 struct sub_op_bits {
54 	int hi;
55 	int low;
56 };
57 struct decode_info {
58 	char *name;
59 	int op_len;
60 	int nr_sub_op;
61 	struct sub_op_bits *sub_op;
62 };
63 
64 #define   MAX_CMD_BUDGET			0x7fffffff
65 #define   MI_WAIT_FOR_PLANE_C_FLIP_PENDING      (1<<15)
66 #define   MI_WAIT_FOR_PLANE_B_FLIP_PENDING      (1<<9)
67 #define   MI_WAIT_FOR_PLANE_A_FLIP_PENDING      (1<<1)
68 
69 #define   MI_WAIT_FOR_SPRITE_C_FLIP_PENDING      (1<<20)
70 #define   MI_WAIT_FOR_SPRITE_B_FLIP_PENDING      (1<<10)
71 #define   MI_WAIT_FOR_SPRITE_A_FLIP_PENDING      (1<<2)
72 
73 /* Render Command Map */
74 
75 /* MI_* command Opcode (28:23) */
76 #define OP_MI_NOOP                          0x0
77 #define OP_MI_SET_PREDICATE                 0x1  /* HSW+ */
78 #define OP_MI_USER_INTERRUPT                0x2
79 #define OP_MI_WAIT_FOR_EVENT                0x3
80 #define OP_MI_FLUSH                         0x4
81 #define OP_MI_ARB_CHECK                     0x5
82 #define OP_MI_RS_CONTROL                    0x6  /* HSW+ */
83 #define OP_MI_REPORT_HEAD                   0x7
84 #define OP_MI_ARB_ON_OFF                    0x8
85 #define OP_MI_URB_ATOMIC_ALLOC              0x9  /* HSW+ */
86 #define OP_MI_BATCH_BUFFER_END              0xA
87 #define OP_MI_SUSPEND_FLUSH                 0xB
88 #define OP_MI_PREDICATE                     0xC  /* IVB+ */
89 #define OP_MI_TOPOLOGY_FILTER               0xD  /* IVB+ */
90 #define OP_MI_SET_APPID                     0xE  /* IVB+ */
91 #define OP_MI_RS_CONTEXT                    0xF  /* HSW+ */
92 #define OP_MI_LOAD_SCAN_LINES_INCL          0x12 /* HSW+ */
93 #define OP_MI_DISPLAY_FLIP                  0x14
94 #define OP_MI_SEMAPHORE_MBOX                0x16
95 #define OP_MI_SET_CONTEXT                   0x18
96 #define OP_MI_MATH                          0x1A
97 #define OP_MI_URB_CLEAR                     0x19
98 #define OP_MI_SEMAPHORE_SIGNAL		    0x1B  /* BDW+ */
99 #define OP_MI_SEMAPHORE_WAIT		    0x1C  /* BDW+ */
100 
101 #define OP_MI_STORE_DATA_IMM                0x20
102 #define OP_MI_STORE_DATA_INDEX              0x21
103 #define OP_MI_LOAD_REGISTER_IMM             0x22
104 #define OP_MI_UPDATE_GTT                    0x23
105 #define OP_MI_STORE_REGISTER_MEM            0x24
106 #define OP_MI_FLUSH_DW                      0x26
107 #define OP_MI_CLFLUSH                       0x27
108 #define OP_MI_REPORT_PERF_COUNT             0x28
109 #define OP_MI_LOAD_REGISTER_MEM             0x29  /* HSW+ */
110 #define OP_MI_LOAD_REGISTER_REG             0x2A  /* HSW+ */
111 #define OP_MI_RS_STORE_DATA_IMM             0x2B  /* HSW+ */
112 #define OP_MI_LOAD_URB_MEM                  0x2C  /* HSW+ */
113 #define OP_MI_STORE_URM_MEM                 0x2D  /* HSW+ */
114 #define OP_MI_2E			    0x2E  /* BDW+ */
115 #define OP_MI_2F			    0x2F  /* BDW+ */
116 #define OP_MI_BATCH_BUFFER_START            0x31
117 
118 /* Bit definition for dword 0 */
119 #define _CMDBIT_BB_START_IN_PPGTT	(1UL << 8)
120 
121 #define OP_MI_CONDITIONAL_BATCH_BUFFER_END  0x36
122 
123 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
124 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
125 #define BATCH_BUFFER_ADR_SPACE_BIT(x)	(((x) >> 8) & 1U)
126 #define BATCH_BUFFER_2ND_LEVEL_BIT(x)   ((x) >> 22 & 1U)
127 
128 /* 2D command: Opcode (28:22) */
129 #define OP_2D(x)    ((2<<7) | x)
130 
131 #define OP_XY_SETUP_BLT                             OP_2D(0x1)
132 #define OP_XY_SETUP_CLIP_BLT                        OP_2D(0x3)
133 #define OP_XY_SETUP_MONO_PATTERN_SL_BLT             OP_2D(0x11)
134 #define OP_XY_PIXEL_BLT                             OP_2D(0x24)
135 #define OP_XY_SCANLINES_BLT                         OP_2D(0x25)
136 #define OP_XY_TEXT_BLT                              OP_2D(0x26)
137 #define OP_XY_TEXT_IMMEDIATE_BLT                    OP_2D(0x31)
138 #define OP_XY_COLOR_BLT                             OP_2D(0x50)
139 #define OP_XY_PAT_BLT                               OP_2D(0x51)
140 #define OP_XY_MONO_PAT_BLT                          OP_2D(0x52)
141 #define OP_XY_SRC_COPY_BLT                          OP_2D(0x53)
142 #define OP_XY_MONO_SRC_COPY_BLT                     OP_2D(0x54)
143 #define OP_XY_FULL_BLT                              OP_2D(0x55)
144 #define OP_XY_FULL_MONO_SRC_BLT                     OP_2D(0x56)
145 #define OP_XY_FULL_MONO_PATTERN_BLT                 OP_2D(0x57)
146 #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT        OP_2D(0x58)
147 #define OP_XY_MONO_PAT_FIXED_BLT                    OP_2D(0x59)
148 #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT           OP_2D(0x71)
149 #define OP_XY_PAT_BLT_IMMEDIATE                     OP_2D(0x72)
150 #define OP_XY_SRC_COPY_CHROMA_BLT                   OP_2D(0x73)
151 #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT            OP_2D(0x74)
152 #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT   OP_2D(0x75)
153 #define OP_XY_PAT_CHROMA_BLT                        OP_2D(0x76)
154 #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE              OP_2D(0x77)
155 
156 /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
157 #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
158 	((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
159 
160 #define OP_STATE_PREFETCH                       OP_3D_MEDIA(0x0, 0x0, 0x03)
161 
162 #define OP_STATE_BASE_ADDRESS                   OP_3D_MEDIA(0x0, 0x1, 0x01)
163 #define OP_STATE_SIP                            OP_3D_MEDIA(0x0, 0x1, 0x02)
164 #define OP_3D_MEDIA_0_1_4			OP_3D_MEDIA(0x0, 0x1, 0x04)
165 
166 #define OP_3DSTATE_VF_STATISTICS_GM45           OP_3D_MEDIA(0x1, 0x0, 0x0B)
167 
168 #define OP_PIPELINE_SELECT                      OP_3D_MEDIA(0x1, 0x1, 0x04)
169 
170 #define OP_MEDIA_VFE_STATE                      OP_3D_MEDIA(0x2, 0x0, 0x0)
171 #define OP_MEDIA_CURBE_LOAD                     OP_3D_MEDIA(0x2, 0x0, 0x1)
172 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
173 #define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
174 #define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
175 
176 #define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
177 #define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
178 #define OP_MEDIA_OBJECT_WALKER                  OP_3D_MEDIA(0x2, 0x1, 0x3)
179 #define OP_GPGPU_WALKER                         OP_3D_MEDIA(0x2, 0x1, 0x5)
180 
181 #define OP_3DSTATE_CLEAR_PARAMS                 OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
182 #define OP_3DSTATE_DEPTH_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
183 #define OP_3DSTATE_STENCIL_BUFFER               OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
184 #define OP_3DSTATE_HIER_DEPTH_BUFFER            OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
185 #define OP_3DSTATE_VERTEX_BUFFERS               OP_3D_MEDIA(0x3, 0x0, 0x08)
186 #define OP_3DSTATE_VERTEX_ELEMENTS              OP_3D_MEDIA(0x3, 0x0, 0x09)
187 #define OP_3DSTATE_INDEX_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x0A)
188 #define OP_3DSTATE_VF_STATISTICS                OP_3D_MEDIA(0x3, 0x0, 0x0B)
189 #define OP_3DSTATE_VF                           OP_3D_MEDIA(0x3, 0x0, 0x0C)  /* HSW+ */
190 #define OP_3DSTATE_CC_STATE_POINTERS            OP_3D_MEDIA(0x3, 0x0, 0x0E)
191 #define OP_3DSTATE_SCISSOR_STATE_POINTERS       OP_3D_MEDIA(0x3, 0x0, 0x0F)
192 #define OP_3DSTATE_VS                           OP_3D_MEDIA(0x3, 0x0, 0x10)
193 #define OP_3DSTATE_GS                           OP_3D_MEDIA(0x3, 0x0, 0x11)
194 #define OP_3DSTATE_CLIP                         OP_3D_MEDIA(0x3, 0x0, 0x12)
195 #define OP_3DSTATE_SF                           OP_3D_MEDIA(0x3, 0x0, 0x13)
196 #define OP_3DSTATE_WM                           OP_3D_MEDIA(0x3, 0x0, 0x14)
197 #define OP_3DSTATE_CONSTANT_VS                  OP_3D_MEDIA(0x3, 0x0, 0x15)
198 #define OP_3DSTATE_CONSTANT_GS                  OP_3D_MEDIA(0x3, 0x0, 0x16)
199 #define OP_3DSTATE_CONSTANT_PS                  OP_3D_MEDIA(0x3, 0x0, 0x17)
200 #define OP_3DSTATE_SAMPLE_MASK                  OP_3D_MEDIA(0x3, 0x0, 0x18)
201 #define OP_3DSTATE_CONSTANT_HS                  OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
202 #define OP_3DSTATE_CONSTANT_DS                  OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
203 #define OP_3DSTATE_HS                           OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
204 #define OP_3DSTATE_TE                           OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
205 #define OP_3DSTATE_DS                           OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
206 #define OP_3DSTATE_STREAMOUT                    OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
207 #define OP_3DSTATE_SBE                          OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
208 #define OP_3DSTATE_PS                           OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
209 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
210 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC   OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
211 #define OP_3DSTATE_BLEND_STATE_POINTERS         OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
212 #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
213 #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
214 #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
215 #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
216 #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
217 #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
218 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
219 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
220 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
221 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
222 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
223 #define OP_3DSTATE_URB_VS                       OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
224 #define OP_3DSTATE_URB_HS                       OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
225 #define OP_3DSTATE_URB_DS                       OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
226 #define OP_3DSTATE_URB_GS                       OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
227 #define OP_3DSTATE_GATHER_CONSTANT_VS           OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
228 #define OP_3DSTATE_GATHER_CONSTANT_GS           OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
229 #define OP_3DSTATE_GATHER_CONSTANT_HS           OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
230 #define OP_3DSTATE_GATHER_CONSTANT_DS           OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
231 #define OP_3DSTATE_GATHER_CONSTANT_PS           OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
232 #define OP_3DSTATE_DX9_CONSTANTF_VS             OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
233 #define OP_3DSTATE_DX9_CONSTANTF_PS             OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
234 #define OP_3DSTATE_DX9_CONSTANTI_VS             OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
235 #define OP_3DSTATE_DX9_CONSTANTI_PS             OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
236 #define OP_3DSTATE_DX9_CONSTANTB_VS             OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
237 #define OP_3DSTATE_DX9_CONSTANTB_PS             OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
238 #define OP_3DSTATE_DX9_LOCAL_VALID_VS           OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
239 #define OP_3DSTATE_DX9_LOCAL_VALID_PS           OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
240 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS       OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
241 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS       OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
242 #define OP_3DSTATE_BINDING_TABLE_EDIT_VS        OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
243 #define OP_3DSTATE_BINDING_TABLE_EDIT_GS        OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
244 #define OP_3DSTATE_BINDING_TABLE_EDIT_HS        OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
245 #define OP_3DSTATE_BINDING_TABLE_EDIT_DS        OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
246 #define OP_3DSTATE_BINDING_TABLE_EDIT_PS        OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
247 
248 #define OP_3DSTATE_VF_INSTANCING 		OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
249 #define OP_3DSTATE_VF_SGVS  			OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
250 #define OP_3DSTATE_VF_TOPOLOGY   		OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
251 #define OP_3DSTATE_WM_CHROMAKEY   		OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
252 #define OP_3DSTATE_PS_BLEND   			OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
253 #define OP_3DSTATE_WM_DEPTH_STENCIL   		OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
254 #define OP_3DSTATE_PS_EXTRA   			OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
255 #define OP_3DSTATE_RASTER   			OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
256 #define OP_3DSTATE_SBE_SWIZ   			OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
257 #define OP_3DSTATE_WM_HZ_OP   			OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
258 #define OP_3DSTATE_COMPONENT_PACKING		OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
259 
260 #define OP_3DSTATE_DRAWING_RECTANGLE            OP_3D_MEDIA(0x3, 0x1, 0x00)
261 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0        OP_3D_MEDIA(0x3, 0x1, 0x02)
262 #define OP_3DSTATE_CHROMA_KEY                   OP_3D_MEDIA(0x3, 0x1, 0x04)
263 #define OP_SNB_3DSTATE_DEPTH_BUFFER             OP_3D_MEDIA(0x3, 0x1, 0x05)
264 #define OP_3DSTATE_POLY_STIPPLE_OFFSET          OP_3D_MEDIA(0x3, 0x1, 0x06)
265 #define OP_3DSTATE_POLY_STIPPLE_PATTERN         OP_3D_MEDIA(0x3, 0x1, 0x07)
266 #define OP_3DSTATE_LINE_STIPPLE                 OP_3D_MEDIA(0x3, 0x1, 0x08)
267 #define OP_3DSTATE_AA_LINE_PARAMS               OP_3D_MEDIA(0x3, 0x1, 0x0A)
268 #define OP_3DSTATE_GS_SVB_INDEX                 OP_3D_MEDIA(0x3, 0x1, 0x0B)
269 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1        OP_3D_MEDIA(0x3, 0x1, 0x0C)
270 #define OP_3DSTATE_MULTISAMPLE_BDW		OP_3D_MEDIA(0x3, 0x0, 0x0D)
271 #define OP_SNB_3DSTATE_STENCIL_BUFFER           OP_3D_MEDIA(0x3, 0x1, 0x0E)
272 #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER        OP_3D_MEDIA(0x3, 0x1, 0x0F)
273 #define OP_SNB_3DSTATE_CLEAR_PARAMS             OP_3D_MEDIA(0x3, 0x1, 0x10)
274 #define OP_3DSTATE_MONOFILTER_SIZE              OP_3D_MEDIA(0x3, 0x1, 0x11)
275 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS       OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
276 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS       OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
277 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS       OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
278 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS       OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
279 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS       OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
280 #define OP_3DSTATE_SO_DECL_LIST                 OP_3D_MEDIA(0x3, 0x1, 0x17)
281 #define OP_3DSTATE_SO_BUFFER                    OP_3D_MEDIA(0x3, 0x1, 0x18)
282 #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC     OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
283 #define OP_3DSTATE_GATHER_POOL_ALLOC            OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
284 #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
285 #define OP_3DSTATE_SAMPLE_PATTERN               OP_3D_MEDIA(0x3, 0x1, 0x1C)
286 #define OP_PIPE_CONTROL                         OP_3D_MEDIA(0x3, 0x2, 0x00)
287 #define OP_3DPRIMITIVE                          OP_3D_MEDIA(0x3, 0x3, 0x00)
288 
289 /* VCCP Command Parser */
290 
291 /*
292  * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
293  * git://anongit.freedesktop.org/vaapi/intel-driver
294  * src/i965_defines.h
295  *
296  */
297 
298 #define OP_MFX(pipeline, op, sub_opa, sub_opb)     \
299 	(3 << 13 | \
300 	 (pipeline) << 11 | \
301 	 (op) << 8 | \
302 	 (sub_opa) << 5 | \
303 	 (sub_opb))
304 
305 #define OP_MFX_PIPE_MODE_SELECT                    OP_MFX(2, 0, 0, 0)  /* ALL */
306 #define OP_MFX_SURFACE_STATE                       OP_MFX(2, 0, 0, 1)  /* ALL */
307 #define OP_MFX_PIPE_BUF_ADDR_STATE                 OP_MFX(2, 0, 0, 2)  /* ALL */
308 #define OP_MFX_IND_OBJ_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 3)  /* ALL */
309 #define OP_MFX_BSP_BUF_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 4)  /* ALL */
310 #define OP_2_0_0_5                                 OP_MFX(2, 0, 0, 5)  /* ALL */
311 #define OP_MFX_STATE_POINTER                       OP_MFX(2, 0, 0, 6)  /* ALL */
312 #define OP_MFX_QM_STATE                            OP_MFX(2, 0, 0, 7)  /* IVB+ */
313 #define OP_MFX_FQM_STATE                           OP_MFX(2, 0, 0, 8)  /* IVB+ */
314 #define OP_MFX_PAK_INSERT_OBJECT                   OP_MFX(2, 0, 2, 8)  /* IVB+ */
315 #define OP_MFX_STITCH_OBJECT                       OP_MFX(2, 0, 2, 0xA)  /* IVB+ */
316 
317 #define OP_MFD_IT_OBJECT                           OP_MFX(2, 0, 1, 9) /* ALL */
318 
319 #define OP_MFX_WAIT                                OP_MFX(1, 0, 0, 0) /* IVB+ */
320 #define OP_MFX_AVC_IMG_STATE                       OP_MFX(2, 1, 0, 0) /* ALL */
321 #define OP_MFX_AVC_QM_STATE                        OP_MFX(2, 1, 0, 1) /* ALL */
322 #define OP_MFX_AVC_DIRECTMODE_STATE                OP_MFX(2, 1, 0, 2) /* ALL */
323 #define OP_MFX_AVC_SLICE_STATE                     OP_MFX(2, 1, 0, 3) /* ALL */
324 #define OP_MFX_AVC_REF_IDX_STATE                   OP_MFX(2, 1, 0, 4) /* ALL */
325 #define OP_MFX_AVC_WEIGHTOFFSET_STATE              OP_MFX(2, 1, 0, 5) /* ALL */
326 #define OP_MFD_AVC_PICID_STATE                     OP_MFX(2, 1, 1, 5) /* HSW+ */
327 #define OP_MFD_AVC_DPB_STATE			   OP_MFX(2, 1, 1, 6) /* IVB+ */
328 #define OP_MFD_AVC_SLICEADDR                       OP_MFX(2, 1, 1, 7) /* IVB+ */
329 #define OP_MFD_AVC_BSD_OBJECT                      OP_MFX(2, 1, 1, 8) /* ALL */
330 #define OP_MFC_AVC_PAK_OBJECT                      OP_MFX(2, 1, 2, 9) /* ALL */
331 
332 #define OP_MFX_VC1_PRED_PIPE_STATE                 OP_MFX(2, 2, 0, 1) /* ALL */
333 #define OP_MFX_VC1_DIRECTMODE_STATE                OP_MFX(2, 2, 0, 2) /* ALL */
334 #define OP_MFD_VC1_SHORT_PIC_STATE                 OP_MFX(2, 2, 1, 0) /* IVB+ */
335 #define OP_MFD_VC1_LONG_PIC_STATE                  OP_MFX(2, 2, 1, 1) /* IVB+ */
336 #define OP_MFD_VC1_BSD_OBJECT                      OP_MFX(2, 2, 1, 8) /* ALL */
337 
338 #define OP_MFX_MPEG2_PIC_STATE                     OP_MFX(2, 3, 0, 0) /* ALL */
339 #define OP_MFX_MPEG2_QM_STATE                      OP_MFX(2, 3, 0, 1) /* ALL */
340 #define OP_MFD_MPEG2_BSD_OBJECT                    OP_MFX(2, 3, 1, 8) /* ALL */
341 #define OP_MFC_MPEG2_SLICEGROUP_STATE              OP_MFX(2, 3, 2, 3) /* ALL */
342 #define OP_MFC_MPEG2_PAK_OBJECT                    OP_MFX(2, 3, 2, 9) /* ALL */
343 
344 #define OP_MFX_2_6_0_0                             OP_MFX(2, 6, 0, 0) /* IVB+ */
345 #define OP_MFX_2_6_0_8                             OP_MFX(2, 6, 0, 8) /* IVB+ */
346 #define OP_MFX_2_6_0_9                             OP_MFX(2, 6, 0, 9) /* IVB+ */
347 
348 #define OP_MFX_JPEG_PIC_STATE                      OP_MFX(2, 7, 0, 0)
349 #define OP_MFX_JPEG_HUFF_TABLE_STATE               OP_MFX(2, 7, 0, 2)
350 #define OP_MFD_JPEG_BSD_OBJECT                     OP_MFX(2, 7, 1, 8)
351 
352 #define OP_VEB(pipeline, op, sub_opa, sub_opb) \
353 	(3 << 13 | \
354 	 (pipeline) << 11 | \
355 	 (op) << 8 | \
356 	 (sub_opa) << 5 | \
357 	 (sub_opb))
358 
359 #define OP_VEB_SURFACE_STATE                       OP_VEB(2, 4, 0, 0)
360 #define OP_VEB_STATE                               OP_VEB(2, 4, 0, 2)
361 #define OP_VEB_DNDI_IECP_STATE                     OP_VEB(2, 4, 0, 3)
362 
363 struct parser_exec_state;
364 
365 typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
366 
367 #define GVT_CMD_HASH_BITS   7
368 
369 /* which DWords need address fix */
370 #define ADDR_FIX_1(x1)			(1 << (x1))
371 #define ADDR_FIX_2(x1, x2)		(ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
372 #define ADDR_FIX_3(x1, x2, x3)		(ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
373 #define ADDR_FIX_4(x1, x2, x3, x4)	(ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
374 #define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
375 
376 struct cmd_info {
377 	char *name;
378 	u32 opcode;
379 
380 #define F_LEN_MASK	(1U<<0)
381 #define F_LEN_CONST  1U
382 #define F_LEN_VAR    0U
383 
384 /*
385  * command has its own ip advance logic
386  * e.g. MI_BATCH_START, MI_BATCH_END
387  */
388 #define F_IP_ADVANCE_CUSTOM (1<<1)
389 
390 #define F_POST_HANDLE	(1<<2)
391 	u32 flag;
392 
393 #define R_RCS	(1 << RCS)
394 #define R_VCS1  (1 << VCS)
395 #define R_VCS2  (1 << VCS2)
396 #define R_VCS	(R_VCS1 | R_VCS2)
397 #define R_BCS	(1 << BCS)
398 #define R_VECS	(1 << VECS)
399 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
400 	/* rings that support this cmd: BLT/RCS/VCS/VECS */
401 	uint16_t rings;
402 
403 	/* devices that support this cmd: SNB/IVB/HSW/... */
404 	uint16_t devices;
405 
406 	/* which DWords are address that need fix up.
407 	 * bit 0 means a 32-bit non address operand in command
408 	 * bit 1 means address operand, which could be 32-bit
409 	 * or 64-bit depending on different architectures.(
410 	 * defined by "gmadr_bytes_in_cmd" in intel_gvt.
411 	 * No matter the address length, each address only takes
412 	 * one bit in the bitmap.
413 	 */
414 	uint16_t addr_bitmap;
415 
416 	/* flag == F_LEN_CONST : command length
417 	 * flag == F_LEN_VAR : length bias bits
418 	 * Note: length is in DWord
419 	 */
420 	uint8_t	len;
421 
422 	parser_cmd_handler handler;
423 };
424 
425 struct cmd_entry {
426 	struct hlist_node hlist;
427 	struct cmd_info *info;
428 };
429 
430 enum {
431 	RING_BUFFER_INSTRUCTION,
432 	BATCH_BUFFER_INSTRUCTION,
433 	BATCH_BUFFER_2ND_LEVEL,
434 };
435 
436 enum {
437 	GTT_BUFFER,
438 	PPGTT_BUFFER
439 };
440 
441 struct parser_exec_state {
442 	struct intel_vgpu *vgpu;
443 	int ring_id;
444 
445 	int buf_type;
446 
447 	/* batch buffer address type */
448 	int buf_addr_type;
449 
450 	/* graphics memory address of ring buffer start */
451 	unsigned long ring_start;
452 	unsigned long ring_size;
453 	unsigned long ring_head;
454 	unsigned long ring_tail;
455 
456 	/* instruction graphics memory address */
457 	unsigned long ip_gma;
458 
459 	/* mapped va of the instr_gma */
460 	void *ip_va;
461 	void *rb_va;
462 
463 	void *ret_bb_va;
464 	/* next instruction when return from  batch buffer to ring buffer */
465 	unsigned long ret_ip_gma_ring;
466 
467 	/* next instruction when return from 2nd batch buffer to batch buffer */
468 	unsigned long ret_ip_gma_bb;
469 
470 	/* batch buffer address type (GTT or PPGTT)
471 	 * used when ret from 2nd level batch buffer
472 	 */
473 	int saved_buf_addr_type;
474 
475 	struct cmd_info *info;
476 
477 	struct intel_vgpu_workload *workload;
478 };
479 
480 #define gmadr_dw_number(s)	\
481 	(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
482 
483 static unsigned long bypass_scan_mask = 0;
484 
485 /* ring ALL, type = 0 */
486 static struct sub_op_bits sub_op_mi[] = {
487 	{31, 29},
488 	{28, 23},
489 };
490 
491 static struct decode_info decode_info_mi = {
492 	"MI",
493 	OP_LEN_MI,
494 	ARRAY_SIZE(sub_op_mi),
495 	sub_op_mi,
496 };
497 
498 /* ring RCS, command type 2 */
499 static struct sub_op_bits sub_op_2d[] = {
500 	{31, 29},
501 	{28, 22},
502 };
503 
504 static struct decode_info decode_info_2d = {
505 	"2D",
506 	OP_LEN_2D,
507 	ARRAY_SIZE(sub_op_2d),
508 	sub_op_2d,
509 };
510 
511 /* ring RCS, command type 3 */
512 static struct sub_op_bits sub_op_3d_media[] = {
513 	{31, 29},
514 	{28, 27},
515 	{26, 24},
516 	{23, 16},
517 };
518 
519 static struct decode_info decode_info_3d_media = {
520 	"3D_Media",
521 	OP_LEN_3D_MEDIA,
522 	ARRAY_SIZE(sub_op_3d_media),
523 	sub_op_3d_media,
524 };
525 
526 /* ring VCS, command type 3 */
527 static struct sub_op_bits sub_op_mfx_vc[] = {
528 	{31, 29},
529 	{28, 27},
530 	{26, 24},
531 	{23, 21},
532 	{20, 16},
533 };
534 
535 static struct decode_info decode_info_mfx_vc = {
536 	"MFX_VC",
537 	OP_LEN_MFX_VC,
538 	ARRAY_SIZE(sub_op_mfx_vc),
539 	sub_op_mfx_vc,
540 };
541 
542 /* ring VECS, command type 3 */
543 static struct sub_op_bits sub_op_vebox[] = {
544 	{31, 29},
545 	{28, 27},
546 	{26, 24},
547 	{23, 21},
548 	{20, 16},
549 };
550 
551 static struct decode_info decode_info_vebox = {
552 	"VEBOX",
553 	OP_LEN_VEBOX,
554 	ARRAY_SIZE(sub_op_vebox),
555 	sub_op_vebox,
556 };
557 
558 static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
559 	[RCS] = {
560 		&decode_info_mi,
561 		NULL,
562 		NULL,
563 		&decode_info_3d_media,
564 		NULL,
565 		NULL,
566 		NULL,
567 		NULL,
568 	},
569 
570 	[VCS] = {
571 		&decode_info_mi,
572 		NULL,
573 		NULL,
574 		&decode_info_mfx_vc,
575 		NULL,
576 		NULL,
577 		NULL,
578 		NULL,
579 	},
580 
581 	[BCS] = {
582 		&decode_info_mi,
583 		NULL,
584 		&decode_info_2d,
585 		NULL,
586 		NULL,
587 		NULL,
588 		NULL,
589 		NULL,
590 	},
591 
592 	[VECS] = {
593 		&decode_info_mi,
594 		NULL,
595 		NULL,
596 		&decode_info_vebox,
597 		NULL,
598 		NULL,
599 		NULL,
600 		NULL,
601 	},
602 
603 	[VCS2] = {
604 		&decode_info_mi,
605 		NULL,
606 		NULL,
607 		&decode_info_mfx_vc,
608 		NULL,
609 		NULL,
610 		NULL,
611 		NULL,
612 	},
613 };
614 
615 static inline u32 get_opcode(u32 cmd, int ring_id)
616 {
617 	struct decode_info *d_info;
618 
619 	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
620 	if (d_info == NULL)
621 		return INVALID_OP;
622 
623 	return cmd >> (32 - d_info->op_len);
624 }
625 
626 static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
627 		unsigned int opcode, int ring_id)
628 {
629 	struct cmd_entry *e;
630 
631 	hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
632 		if ((opcode == e->info->opcode) &&
633 				(e->info->rings & (1 << ring_id)))
634 			return e->info;
635 	}
636 	return NULL;
637 }
638 
639 static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
640 		u32 cmd, int ring_id)
641 {
642 	u32 opcode;
643 
644 	opcode = get_opcode(cmd, ring_id);
645 	if (opcode == INVALID_OP)
646 		return NULL;
647 
648 	return find_cmd_entry(gvt, opcode, ring_id);
649 }
650 
651 static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
652 {
653 	return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
654 }
655 
656 static inline void print_opcode(u32 cmd, int ring_id)
657 {
658 	struct decode_info *d_info;
659 	int i;
660 
661 	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
662 	if (d_info == NULL)
663 		return;
664 
665 	gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
666 			cmd >> (32 - d_info->op_len), d_info->name);
667 
668 	for (i = 0; i < d_info->nr_sub_op; i++)
669 		pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
670 					d_info->sub_op[i].low));
671 
672 	pr_err("\n");
673 }
674 
675 static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
676 {
677 	return s->ip_va + (index << 2);
678 }
679 
680 static inline u32 cmd_val(struct parser_exec_state *s, int index)
681 {
682 	return *cmd_ptr(s, index);
683 }
684 
685 static void parser_exec_state_dump(struct parser_exec_state *s)
686 {
687 	int cnt = 0;
688 	int i;
689 
690 	gvt_dbg_cmd("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
691 			" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
692 			s->ring_id, s->ring_start, s->ring_start + s->ring_size,
693 			s->ring_head, s->ring_tail);
694 
695 	gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
696 			s->buf_type == RING_BUFFER_INSTRUCTION ?
697 			"RING_BUFFER" : "BATCH_BUFFER",
698 			s->buf_addr_type == GTT_BUFFER ?
699 			"GTT" : "PPGTT", s->ip_gma);
700 
701 	if (s->ip_va == NULL) {
702 		gvt_dbg_cmd(" ip_va(NULL)");
703 		return;
704 	}
705 
706 	gvt_dbg_cmd("  ip_va=%p: %08x %08x %08x %08x\n",
707 			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
708 			cmd_val(s, 2), cmd_val(s, 3));
709 
710 	print_opcode(cmd_val(s, 0), s->ring_id);
711 
712 	/* print the whole page to trace */
713 	pr_err("    ip_va=%p: %08x %08x %08x %08x\n",
714 			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
715 			cmd_val(s, 2), cmd_val(s, 3));
716 
717 	s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
718 
719 	while (cnt < 1024) {
720 		pr_err("ip_va=%p: ", s->ip_va);
721 		for (i = 0; i < 8; i++)
722 			pr_err("%08x ", cmd_val(s, i));
723 		pr_err("\n");
724 
725 		s->ip_va += 8 * sizeof(u32);
726 		cnt += 8;
727 	}
728 }
729 
730 static inline void update_ip_va(struct parser_exec_state *s)
731 {
732 	unsigned long len = 0;
733 
734 	if (WARN_ON(s->ring_head == s->ring_tail))
735 		return;
736 
737 	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
738 		unsigned long ring_top = s->ring_start + s->ring_size;
739 
740 		if (s->ring_head > s->ring_tail) {
741 			if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
742 				len = (s->ip_gma - s->ring_head);
743 			else if (s->ip_gma >= s->ring_start &&
744 					s->ip_gma <= s->ring_tail)
745 				len = (ring_top - s->ring_head) +
746 					(s->ip_gma - s->ring_start);
747 		} else
748 			len = (s->ip_gma - s->ring_head);
749 
750 		s->ip_va = s->rb_va + len;
751 	} else {/* shadow batch buffer */
752 		s->ip_va = s->ret_bb_va;
753 	}
754 }
755 
756 static inline int ip_gma_set(struct parser_exec_state *s,
757 		unsigned long ip_gma)
758 {
759 	WARN_ON(!IS_ALIGNED(ip_gma, 4));
760 
761 	s->ip_gma = ip_gma;
762 	update_ip_va(s);
763 	return 0;
764 }
765 
766 static inline int ip_gma_advance(struct parser_exec_state *s,
767 		unsigned int dw_len)
768 {
769 	s->ip_gma += (dw_len << 2);
770 
771 	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
772 		if (s->ip_gma >= s->ring_start + s->ring_size)
773 			s->ip_gma -= s->ring_size;
774 		update_ip_va(s);
775 	} else {
776 		s->ip_va += (dw_len << 2);
777 	}
778 
779 	return 0;
780 }
781 
782 static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
783 {
784 	if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
785 		return info->len;
786 	else
787 		return (cmd & ((1U << info->len) - 1)) + 2;
788 	return 0;
789 }
790 
791 static inline int cmd_length(struct parser_exec_state *s)
792 {
793 	return get_cmd_length(s->info, cmd_val(s, 0));
794 }
795 
796 /* do not remove this, some platform may need clflush here */
797 #define patch_value(s, addr, val) do { \
798 	*addr = val; \
799 } while (0)
800 
801 static bool is_shadowed_mmio(unsigned int offset)
802 {
803 	bool ret = false;
804 
805 	if ((offset == 0x2168) || /*BB current head register UDW */
806 	    (offset == 0x2140) || /*BB current header register */
807 	    (offset == 0x211c) || /*second BB header register UDW */
808 	    (offset == 0x2114)) { /*second BB header register UDW */
809 		ret = true;
810 	}
811 	return ret;
812 }
813 
814 static inline bool is_force_nonpriv_mmio(unsigned int offset)
815 {
816 	return (offset >= 0x24d0 && offset < 0x2500);
817 }
818 
819 static int force_nonpriv_reg_handler(struct parser_exec_state *s,
820 				     unsigned int offset, unsigned int index)
821 {
822 	struct intel_gvt *gvt = s->vgpu->gvt;
823 	unsigned int data = cmd_val(s, index + 1);
824 
825 	if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
826 		gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
827 			offset, data);
828 		return -EINVAL;
829 	}
830 	return 0;
831 }
832 
833 static int cmd_reg_handler(struct parser_exec_state *s,
834 	unsigned int offset, unsigned int index, char *cmd)
835 {
836 	struct intel_vgpu *vgpu = s->vgpu;
837 	struct intel_gvt *gvt = vgpu->gvt;
838 
839 	if (offset + 4 > gvt->device_info.mmio_size) {
840 		gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
841 				cmd, offset);
842 		return -EINVAL;
843 	}
844 
845 	if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
846 		gvt_vgpu_err("%s access to non-render register (%x)\n",
847 				cmd, offset);
848 		return 0;
849 	}
850 
851 	if (is_shadowed_mmio(offset)) {
852 		gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
853 		return 0;
854 	}
855 
856 	if (is_force_nonpriv_mmio(offset) &&
857 	    force_nonpriv_reg_handler(s, offset, index))
858 		return -EINVAL;
859 
860 	if (offset == i915_mmio_reg_offset(DERRMR) ||
861 		offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
862 		/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
863 		patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
864 	}
865 
866 	/* TODO: Update the global mask if this MMIO is a masked-MMIO */
867 	intel_gvt_mmio_set_cmd_accessed(gvt, offset);
868 	return 0;
869 }
870 
871 #define cmd_reg(s, i) \
872 	(cmd_val(s, i) & GENMASK(22, 2))
873 
874 #define cmd_reg_inhibit(s, i) \
875 	(cmd_val(s, i) & GENMASK(22, 18))
876 
877 #define cmd_gma(s, i) \
878 	(cmd_val(s, i) & GENMASK(31, 2))
879 
880 #define cmd_gma_hi(s, i) \
881 	(cmd_val(s, i) & GENMASK(15, 0))
882 
883 static int cmd_handler_lri(struct parser_exec_state *s)
884 {
885 	int i, ret = 0;
886 	int cmd_len = cmd_length(s);
887 	struct intel_gvt *gvt = s->vgpu->gvt;
888 
889 	for (i = 1; i < cmd_len; i += 2) {
890 		if (IS_BROADWELL(gvt->dev_priv) &&
891 				(s->ring_id != RCS)) {
892 			if (s->ring_id == BCS &&
893 					cmd_reg(s, i) ==
894 					i915_mmio_reg_offset(DERRMR))
895 				ret |= 0;
896 			else
897 				ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
898 		}
899 		if (ret)
900 			break;
901 		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
902 	}
903 	return ret;
904 }
905 
906 static int cmd_handler_lrr(struct parser_exec_state *s)
907 {
908 	int i, ret = 0;
909 	int cmd_len = cmd_length(s);
910 
911 	for (i = 1; i < cmd_len; i += 2) {
912 		if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
913 			ret |= ((cmd_reg_inhibit(s, i) ||
914 					(cmd_reg_inhibit(s, i + 1)))) ?
915 				-EINVAL : 0;
916 		if (ret)
917 			break;
918 		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
919 		ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
920 	}
921 	return ret;
922 }
923 
924 static inline int cmd_address_audit(struct parser_exec_state *s,
925 		unsigned long guest_gma, int op_size, bool index_mode);
926 
927 static int cmd_handler_lrm(struct parser_exec_state *s)
928 {
929 	struct intel_gvt *gvt = s->vgpu->gvt;
930 	int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
931 	unsigned long gma;
932 	int i, ret = 0;
933 	int cmd_len = cmd_length(s);
934 
935 	for (i = 1; i < cmd_len;) {
936 		if (IS_BROADWELL(gvt->dev_priv))
937 			ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
938 		if (ret)
939 			break;
940 		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
941 		if (cmd_val(s, 0) & (1 << 22)) {
942 			gma = cmd_gma(s, i + 1);
943 			if (gmadr_bytes == 8)
944 				gma |= (cmd_gma_hi(s, i + 2)) << 32;
945 			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
946 		}
947 		i += gmadr_dw_number(s) + 1;
948 	}
949 	return ret;
950 }
951 
952 static int cmd_handler_srm(struct parser_exec_state *s)
953 {
954 	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
955 	unsigned long gma;
956 	int i, ret = 0;
957 	int cmd_len = cmd_length(s);
958 
959 	for (i = 1; i < cmd_len;) {
960 		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
961 		if (cmd_val(s, 0) & (1 << 22)) {
962 			gma = cmd_gma(s, i + 1);
963 			if (gmadr_bytes == 8)
964 				gma |= (cmd_gma_hi(s, i + 2)) << 32;
965 			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
966 		}
967 		i += gmadr_dw_number(s) + 1;
968 	}
969 	return ret;
970 }
971 
972 struct cmd_interrupt_event {
973 	int pipe_control_notify;
974 	int mi_flush_dw;
975 	int mi_user_interrupt;
976 };
977 
978 static struct cmd_interrupt_event cmd_interrupt_events[] = {
979 	[RCS] = {
980 		.pipe_control_notify = RCS_PIPE_CONTROL,
981 		.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
982 		.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
983 	},
984 	[BCS] = {
985 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
986 		.mi_flush_dw = BCS_MI_FLUSH_DW,
987 		.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
988 	},
989 	[VCS] = {
990 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
991 		.mi_flush_dw = VCS_MI_FLUSH_DW,
992 		.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
993 	},
994 	[VCS2] = {
995 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
996 		.mi_flush_dw = VCS2_MI_FLUSH_DW,
997 		.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
998 	},
999 	[VECS] = {
1000 		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1001 		.mi_flush_dw = VECS_MI_FLUSH_DW,
1002 		.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
1003 	},
1004 };
1005 
1006 static int cmd_handler_pipe_control(struct parser_exec_state *s)
1007 {
1008 	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1009 	unsigned long gma;
1010 	bool index_mode = false;
1011 	unsigned int post_sync;
1012 	int ret = 0;
1013 
1014 	post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
1015 
1016 	/* LRI post sync */
1017 	if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
1018 		ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
1019 	/* post sync */
1020 	else if (post_sync) {
1021 		if (post_sync == 2)
1022 			ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
1023 		else if (post_sync == 3)
1024 			ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1025 		else if (post_sync == 1) {
1026 			/* check ggtt*/
1027 			if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1028 				gma = cmd_val(s, 2) & GENMASK(31, 3);
1029 				if (gmadr_bytes == 8)
1030 					gma |= (cmd_gma_hi(s, 3)) << 32;
1031 				/* Store Data Index */
1032 				if (cmd_val(s, 1) & (1 << 21))
1033 					index_mode = true;
1034 				ret |= cmd_address_audit(s, gma, sizeof(u64),
1035 						index_mode);
1036 			}
1037 		}
1038 	}
1039 
1040 	if (ret)
1041 		return ret;
1042 
1043 	if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
1044 		set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
1045 				s->workload->pending_events);
1046 	return 0;
1047 }
1048 
1049 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1050 {
1051 	set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
1052 			s->workload->pending_events);
1053 	return 0;
1054 }
1055 
1056 static int cmd_advance_default(struct parser_exec_state *s)
1057 {
1058 	return ip_gma_advance(s, cmd_length(s));
1059 }
1060 
1061 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
1062 {
1063 	int ret;
1064 
1065 	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1066 		s->buf_type = BATCH_BUFFER_INSTRUCTION;
1067 		ret = ip_gma_set(s, s->ret_ip_gma_bb);
1068 		s->buf_addr_type = s->saved_buf_addr_type;
1069 	} else {
1070 		s->buf_type = RING_BUFFER_INSTRUCTION;
1071 		s->buf_addr_type = GTT_BUFFER;
1072 		if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
1073 			s->ret_ip_gma_ring -= s->ring_size;
1074 		ret = ip_gma_set(s, s->ret_ip_gma_ring);
1075 	}
1076 	return ret;
1077 }
1078 
1079 struct mi_display_flip_command_info {
1080 	int pipe;
1081 	int plane;
1082 	int event;
1083 	i915_reg_t stride_reg;
1084 	i915_reg_t ctrl_reg;
1085 	i915_reg_t surf_reg;
1086 	u64 stride_val;
1087 	u64 tile_val;
1088 	u64 surf_val;
1089 	bool async_flip;
1090 };
1091 
1092 struct plane_code_mapping {
1093 	int pipe;
1094 	int plane;
1095 	int event;
1096 };
1097 
1098 static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1099 		struct mi_display_flip_command_info *info)
1100 {
1101 	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1102 	struct plane_code_mapping gen8_plane_code[] = {
1103 		[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
1104 		[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
1105 		[2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
1106 		[3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
1107 		[4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
1108 		[5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
1109 	};
1110 	u32 dword0, dword1, dword2;
1111 	u32 v;
1112 
1113 	dword0 = cmd_val(s, 0);
1114 	dword1 = cmd_val(s, 1);
1115 	dword2 = cmd_val(s, 2);
1116 
1117 	v = (dword0 & GENMASK(21, 19)) >> 19;
1118 	if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
1119 		return -EINVAL;
1120 
1121 	info->pipe = gen8_plane_code[v].pipe;
1122 	info->plane = gen8_plane_code[v].plane;
1123 	info->event = gen8_plane_code[v].event;
1124 	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1125 	info->tile_val = (dword1 & 0x1);
1126 	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1127 	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1128 
1129 	if (info->plane == PLANE_A) {
1130 		info->ctrl_reg = DSPCNTR(info->pipe);
1131 		info->stride_reg = DSPSTRIDE(info->pipe);
1132 		info->surf_reg = DSPSURF(info->pipe);
1133 	} else if (info->plane == PLANE_B) {
1134 		info->ctrl_reg = SPRCTL(info->pipe);
1135 		info->stride_reg = SPRSTRIDE(info->pipe);
1136 		info->surf_reg = SPRSURF(info->pipe);
1137 	} else {
1138 		WARN_ON(1);
1139 		return -EINVAL;
1140 	}
1141 	return 0;
1142 }
1143 
1144 static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1145 		struct mi_display_flip_command_info *info)
1146 {
1147 	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1148 	struct intel_vgpu *vgpu = s->vgpu;
1149 	u32 dword0 = cmd_val(s, 0);
1150 	u32 dword1 = cmd_val(s, 1);
1151 	u32 dword2 = cmd_val(s, 2);
1152 	u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
1153 
1154 	info->plane = PRIMARY_PLANE;
1155 
1156 	switch (plane) {
1157 	case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
1158 		info->pipe = PIPE_A;
1159 		info->event = PRIMARY_A_FLIP_DONE;
1160 		break;
1161 	case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
1162 		info->pipe = PIPE_B;
1163 		info->event = PRIMARY_B_FLIP_DONE;
1164 		break;
1165 	case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
1166 		info->pipe = PIPE_C;
1167 		info->event = PRIMARY_C_FLIP_DONE;
1168 		break;
1169 
1170 	case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
1171 		info->pipe = PIPE_A;
1172 		info->event = SPRITE_A_FLIP_DONE;
1173 		info->plane = SPRITE_PLANE;
1174 		break;
1175 	case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
1176 		info->pipe = PIPE_B;
1177 		info->event = SPRITE_B_FLIP_DONE;
1178 		info->plane = SPRITE_PLANE;
1179 		break;
1180 	case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
1181 		info->pipe = PIPE_C;
1182 		info->event = SPRITE_C_FLIP_DONE;
1183 		info->plane = SPRITE_PLANE;
1184 		break;
1185 
1186 	default:
1187 		gvt_vgpu_err("unknown plane code %d\n", plane);
1188 		return -EINVAL;
1189 	}
1190 
1191 	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1192 	info->tile_val = (dword1 & GENMASK(2, 0));
1193 	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1194 	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1195 
1196 	info->ctrl_reg = DSPCNTR(info->pipe);
1197 	info->stride_reg = DSPSTRIDE(info->pipe);
1198 	info->surf_reg = DSPSURF(info->pipe);
1199 
1200 	return 0;
1201 }
1202 
1203 static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1204 		struct mi_display_flip_command_info *info)
1205 {
1206 	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1207 	u32 stride, tile;
1208 
1209 	if (!info->async_flip)
1210 		return 0;
1211 
1212 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1213 		stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1214 		tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
1215 				GENMASK(12, 10)) >> 10;
1216 	} else {
1217 		stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
1218 				GENMASK(15, 6)) >> 6;
1219 		tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
1220 	}
1221 
1222 	if (stride != info->stride_val)
1223 		gvt_dbg_cmd("cannot change stride during async flip\n");
1224 
1225 	if (tile != info->tile_val)
1226 		gvt_dbg_cmd("cannot change tile during async flip\n");
1227 
1228 	return 0;
1229 }
1230 
1231 static int gen8_update_plane_mmio_from_mi_display_flip(
1232 		struct parser_exec_state *s,
1233 		struct mi_display_flip_command_info *info)
1234 {
1235 	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1236 	struct intel_vgpu *vgpu = s->vgpu;
1237 
1238 	set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
1239 		      info->surf_val << 12);
1240 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1241 		set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
1242 			      info->stride_val);
1243 		set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
1244 			      info->tile_val << 10);
1245 	} else {
1246 		set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
1247 			      info->stride_val << 6);
1248 		set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
1249 			      info->tile_val << 10);
1250 	}
1251 
1252 	vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
1253 	intel_vgpu_trigger_virtual_event(vgpu, info->event);
1254 	return 0;
1255 }
1256 
1257 static int decode_mi_display_flip(struct parser_exec_state *s,
1258 		struct mi_display_flip_command_info *info)
1259 {
1260 	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1261 
1262 	if (IS_BROADWELL(dev_priv))
1263 		return gen8_decode_mi_display_flip(s, info);
1264 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1265 		return skl_decode_mi_display_flip(s, info);
1266 
1267 	return -ENODEV;
1268 }
1269 
1270 static int check_mi_display_flip(struct parser_exec_state *s,
1271 		struct mi_display_flip_command_info *info)
1272 {
1273 	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1274 
1275 	if (IS_BROADWELL(dev_priv)
1276 		|| IS_SKYLAKE(dev_priv)
1277 		|| IS_KABYLAKE(dev_priv))
1278 		return gen8_check_mi_display_flip(s, info);
1279 	return -ENODEV;
1280 }
1281 
1282 static int update_plane_mmio_from_mi_display_flip(
1283 		struct parser_exec_state *s,
1284 		struct mi_display_flip_command_info *info)
1285 {
1286 	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1287 
1288 	if (IS_BROADWELL(dev_priv)
1289 		|| IS_SKYLAKE(dev_priv)
1290 		|| IS_KABYLAKE(dev_priv))
1291 		return gen8_update_plane_mmio_from_mi_display_flip(s, info);
1292 	return -ENODEV;
1293 }
1294 
1295 static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1296 {
1297 	struct mi_display_flip_command_info info;
1298 	struct intel_vgpu *vgpu = s->vgpu;
1299 	int ret;
1300 	int i;
1301 	int len = cmd_length(s);
1302 
1303 	ret = decode_mi_display_flip(s, &info);
1304 	if (ret) {
1305 		gvt_vgpu_err("fail to decode MI display flip command\n");
1306 		return ret;
1307 	}
1308 
1309 	ret = check_mi_display_flip(s, &info);
1310 	if (ret) {
1311 		gvt_vgpu_err("invalid MI display flip command\n");
1312 		return ret;
1313 	}
1314 
1315 	ret = update_plane_mmio_from_mi_display_flip(s, &info);
1316 	if (ret) {
1317 		gvt_vgpu_err("fail to update plane mmio\n");
1318 		return ret;
1319 	}
1320 
1321 	for (i = 0; i < len; i++)
1322 		patch_value(s, cmd_ptr(s, i), MI_NOOP);
1323 	return 0;
1324 }
1325 
1326 static bool is_wait_for_flip_pending(u32 cmd)
1327 {
1328 	return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
1329 			MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
1330 			MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
1331 			MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
1332 			MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
1333 			MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
1334 }
1335 
1336 static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
1337 {
1338 	u32 cmd = cmd_val(s, 0);
1339 
1340 	if (!is_wait_for_flip_pending(cmd))
1341 		return 0;
1342 
1343 	patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1344 	return 0;
1345 }
1346 
1347 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1348 {
1349 	unsigned long addr;
1350 	unsigned long gma_high, gma_low;
1351 	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1352 
1353 	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
1354 		return INTEL_GVT_INVALID_ADDR;
1355 
1356 	gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1357 	if (gmadr_bytes == 4) {
1358 		addr = gma_low;
1359 	} else {
1360 		gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
1361 		addr = (((unsigned long)gma_high) << 32) | gma_low;
1362 	}
1363 	return addr;
1364 }
1365 
1366 static inline int cmd_address_audit(struct parser_exec_state *s,
1367 		unsigned long guest_gma, int op_size, bool index_mode)
1368 {
1369 	struct intel_vgpu *vgpu = s->vgpu;
1370 	u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
1371 	int i;
1372 	int ret;
1373 
1374 	if (op_size > max_surface_size) {
1375 		gvt_vgpu_err("command address audit fail name %s\n",
1376 			s->info->name);
1377 		return -EINVAL;
1378 	}
1379 
1380 	if (index_mode)	{
1381 		if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
1382 			ret = -EINVAL;
1383 			goto err;
1384 		}
1385 	} else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
1386 			(!vgpu_gmadr_is_valid(s->vgpu,
1387 					      guest_gma + op_size - 1))) {
1388 		ret = -EINVAL;
1389 		goto err;
1390 	}
1391 	return 0;
1392 err:
1393 	gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1394 			s->info->name, guest_gma, op_size);
1395 
1396 	pr_err("cmd dump: ");
1397 	for (i = 0; i < cmd_length(s); i++) {
1398 		if (!(i % 4))
1399 			pr_err("\n%08x ", cmd_val(s, i));
1400 		else
1401 			pr_err("%08x ", cmd_val(s, i));
1402 	}
1403 	pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1404 			vgpu->id,
1405 			vgpu_aperture_gmadr_base(vgpu),
1406 			vgpu_aperture_gmadr_end(vgpu),
1407 			vgpu_hidden_gmadr_base(vgpu),
1408 			vgpu_hidden_gmadr_end(vgpu));
1409 	return ret;
1410 }
1411 
1412 static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1413 {
1414 	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1415 	int op_size = (cmd_length(s) - 3) * sizeof(u32);
1416 	int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
1417 	unsigned long gma, gma_low, gma_high;
1418 	int ret = 0;
1419 
1420 	/* check ppggt */
1421 	if (!(cmd_val(s, 0) & (1 << 22)))
1422 		return 0;
1423 
1424 	gma = cmd_val(s, 2) & GENMASK(31, 2);
1425 
1426 	if (gmadr_bytes == 8) {
1427 		gma_low = cmd_val(s, 1) & GENMASK(31, 2);
1428 		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1429 		gma = (gma_high << 32) | gma_low;
1430 		core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
1431 	}
1432 	ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
1433 	return ret;
1434 }
1435 
1436 static inline int unexpected_cmd(struct parser_exec_state *s)
1437 {
1438 	struct intel_vgpu *vgpu = s->vgpu;
1439 
1440 	gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1441 
1442 	return -EINVAL;
1443 }
1444 
1445 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
1446 {
1447 	return unexpected_cmd(s);
1448 }
1449 
1450 static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
1451 {
1452 	return unexpected_cmd(s);
1453 }
1454 
1455 static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
1456 {
1457 	return unexpected_cmd(s);
1458 }
1459 
1460 static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
1461 {
1462 	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1463 	int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
1464 			sizeof(u32);
1465 	unsigned long gma, gma_high;
1466 	int ret = 0;
1467 
1468 	if (!(cmd_val(s, 0) & (1 << 22)))
1469 		return ret;
1470 
1471 	gma = cmd_val(s, 1) & GENMASK(31, 2);
1472 	if (gmadr_bytes == 8) {
1473 		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1474 		gma = (gma_high << 32) | gma;
1475 	}
1476 	ret = cmd_address_audit(s, gma, op_size, false);
1477 	return ret;
1478 }
1479 
1480 static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
1481 {
1482 	return unexpected_cmd(s);
1483 }
1484 
1485 static int cmd_handler_mi_clflush(struct parser_exec_state *s)
1486 {
1487 	return unexpected_cmd(s);
1488 }
1489 
1490 static int cmd_handler_mi_conditional_batch_buffer_end(
1491 		struct parser_exec_state *s)
1492 {
1493 	return unexpected_cmd(s);
1494 }
1495 
1496 static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
1497 {
1498 	return unexpected_cmd(s);
1499 }
1500 
1501 static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
1502 {
1503 	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1504 	unsigned long gma;
1505 	bool index_mode = false;
1506 	int ret = 0;
1507 
1508 	/* Check post-sync and ppgtt bit */
1509 	if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
1510 		gma = cmd_val(s, 1) & GENMASK(31, 3);
1511 		if (gmadr_bytes == 8)
1512 			gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
1513 		/* Store Data Index */
1514 		if (cmd_val(s, 0) & (1 << 21))
1515 			index_mode = true;
1516 		ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
1517 	}
1518 	/* Check notify bit */
1519 	if ((cmd_val(s, 0) & (1 << 8)))
1520 		set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
1521 				s->workload->pending_events);
1522 	return ret;
1523 }
1524 
1525 static void addr_type_update_snb(struct parser_exec_state *s)
1526 {
1527 	if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
1528 			(BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
1529 		s->buf_addr_type = PPGTT_BUFFER;
1530 	}
1531 }
1532 
1533 
1534 static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1535 		unsigned long gma, unsigned long end_gma, void *va)
1536 {
1537 	unsigned long copy_len, offset;
1538 	unsigned long len = 0;
1539 	unsigned long gpa;
1540 
1541 	while (gma != end_gma) {
1542 		gpa = intel_vgpu_gma_to_gpa(mm, gma);
1543 		if (gpa == INTEL_GVT_INVALID_ADDR) {
1544 			gvt_vgpu_err("invalid gma address: %lx\n", gma);
1545 			return -EFAULT;
1546 		}
1547 
1548 		offset = gma & (GTT_PAGE_SIZE - 1);
1549 
1550 		copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
1551 			GTT_PAGE_SIZE - offset : end_gma - gma;
1552 
1553 		intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
1554 
1555 		len += copy_len;
1556 		gma += copy_len;
1557 	}
1558 	return len;
1559 }
1560 
1561 
1562 /*
1563  * Check whether a batch buffer needs to be scanned. Currently
1564  * the only criteria is based on privilege.
1565  */
1566 static int batch_buffer_needs_scan(struct parser_exec_state *s)
1567 {
1568 	struct intel_gvt *gvt = s->vgpu->gvt;
1569 
1570 	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
1571 		|| IS_KABYLAKE(gvt->dev_priv)) {
1572 		/* BDW decides privilege based on address space */
1573 		if (cmd_val(s, 0) & (1 << 8))
1574 			return 0;
1575 	}
1576 	return 1;
1577 }
1578 
1579 static uint32_t find_bb_size(struct parser_exec_state *s)
1580 {
1581 	unsigned long gma = 0;
1582 	struct cmd_info *info;
1583 	uint32_t bb_size = 0;
1584 	uint32_t cmd_len = 0;
1585 	bool met_bb_end = false;
1586 	struct intel_vgpu *vgpu = s->vgpu;
1587 	u32 cmd;
1588 
1589 	/* get the start gm address of the batch buffer */
1590 	gma = get_gma_bb_from_cmd(s, 1);
1591 	cmd = cmd_val(s, 0);
1592 
1593 	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1594 	if (info == NULL) {
1595 		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1596 				cmd, get_opcode(cmd, s->ring_id));
1597 		return -EINVAL;
1598 	}
1599 	do {
1600 		copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1601 				gma, gma + 4, &cmd);
1602 		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1603 		if (info == NULL) {
1604 			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1605 				cmd, get_opcode(cmd, s->ring_id));
1606 			return -EINVAL;
1607 		}
1608 
1609 		if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1610 			met_bb_end = true;
1611 		} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1612 			if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
1613 				/* chained batch buffer */
1614 				met_bb_end = true;
1615 			}
1616 		}
1617 		cmd_len = get_cmd_length(info, cmd) << 2;
1618 		bb_size += cmd_len;
1619 		gma += cmd_len;
1620 
1621 	} while (!met_bb_end);
1622 
1623 	return bb_size;
1624 }
1625 
1626 static int perform_bb_shadow(struct parser_exec_state *s)
1627 {
1628 	struct intel_shadow_bb_entry *entry_obj;
1629 	struct intel_vgpu *vgpu = s->vgpu;
1630 	unsigned long gma = 0;
1631 	uint32_t bb_size;
1632 	void *dst = NULL;
1633 	int ret = 0;
1634 
1635 	/* get the start gm address of the batch buffer */
1636 	gma = get_gma_bb_from_cmd(s, 1);
1637 
1638 	/* get the size of the batch buffer */
1639 	bb_size = find_bb_size(s);
1640 
1641 	/* allocate shadow batch buffer */
1642 	entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
1643 	if (entry_obj == NULL)
1644 		return -ENOMEM;
1645 
1646 	entry_obj->obj =
1647 		i915_gem_object_create(s->vgpu->gvt->dev_priv,
1648 				       roundup(bb_size, PAGE_SIZE));
1649 	if (IS_ERR(entry_obj->obj)) {
1650 		ret = PTR_ERR(entry_obj->obj);
1651 		goto free_entry;
1652 	}
1653 	entry_obj->len = bb_size;
1654 	INIT_LIST_HEAD(&entry_obj->list);
1655 
1656 	dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
1657 	if (IS_ERR(dst)) {
1658 		ret = PTR_ERR(dst);
1659 		goto put_obj;
1660 	}
1661 
1662 	ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
1663 	if (ret) {
1664 		gvt_vgpu_err("failed to set shadow batch to CPU\n");
1665 		goto unmap_src;
1666 	}
1667 
1668 	entry_obj->va = dst;
1669 	entry_obj->bb_start_cmd_va = s->ip_va;
1670 
1671 	/* copy batch buffer to shadow batch buffer*/
1672 	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1673 			      gma, gma + bb_size,
1674 			      dst);
1675 	if (ret < 0) {
1676 		gvt_vgpu_err("fail to copy guest ring buffer\n");
1677 		goto unmap_src;
1678 	}
1679 
1680 	list_add(&entry_obj->list, &s->workload->shadow_bb);
1681 	/*
1682 	 * ip_va saves the virtual address of the shadow batch buffer, while
1683 	 * ip_gma saves the graphics address of the original batch buffer.
1684 	 * As the shadow batch buffer is just a copy from the originial one,
1685 	 * it should be right to use shadow batch buffer'va and original batch
1686 	 * buffer's gma in pair. After all, we don't want to pin the shadow
1687 	 * buffer here (too early).
1688 	 */
1689 	s->ip_va = dst;
1690 	s->ip_gma = gma;
1691 
1692 	return 0;
1693 
1694 unmap_src:
1695 	i915_gem_object_unpin_map(entry_obj->obj);
1696 put_obj:
1697 	i915_gem_object_put(entry_obj->obj);
1698 free_entry:
1699 	kfree(entry_obj);
1700 	return ret;
1701 }
1702 
1703 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1704 {
1705 	bool second_level;
1706 	int ret = 0;
1707 	struct intel_vgpu *vgpu = s->vgpu;
1708 
1709 	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1710 		gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1711 		return -EINVAL;
1712 	}
1713 
1714 	second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1715 	if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1716 		gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1717 		return -EINVAL;
1718 	}
1719 
1720 	s->saved_buf_addr_type = s->buf_addr_type;
1721 	addr_type_update_snb(s);
1722 	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
1723 		s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
1724 		s->buf_type = BATCH_BUFFER_INSTRUCTION;
1725 	} else if (second_level) {
1726 		s->buf_type = BATCH_BUFFER_2ND_LEVEL;
1727 		s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
1728 		s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
1729 	}
1730 
1731 	if (batch_buffer_needs_scan(s)) {
1732 		ret = perform_bb_shadow(s);
1733 		if (ret < 0)
1734 			gvt_vgpu_err("invalid shadow batch buffer\n");
1735 	} else {
1736 		/* emulate a batch buffer end to do return right */
1737 		ret = cmd_handler_mi_batch_buffer_end(s);
1738 		if (ret < 0)
1739 			return ret;
1740 	}
1741 
1742 	return ret;
1743 }
1744 
1745 static struct cmd_info cmd_info[] = {
1746 	{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1747 
1748 	{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
1749 		0, 1, NULL},
1750 
1751 	{"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
1752 		0, 1, cmd_handler_mi_user_interrupt},
1753 
1754 	{"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
1755 		D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
1756 
1757 	{"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1758 
1759 	{"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1760 		NULL},
1761 
1762 	{"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1763 		NULL},
1764 
1765 	{"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1766 		NULL},
1767 
1768 	{"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1769 		NULL},
1770 
1771 	{"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
1772 		D_ALL, 0, 1, NULL},
1773 
1774 	{"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
1775 		F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1776 		cmd_handler_mi_batch_buffer_end},
1777 
1778 	{"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
1779 		0, 1, NULL},
1780 
1781 	{"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1782 		NULL},
1783 
1784 	{"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
1785 		D_ALL, 0, 1, NULL},
1786 
1787 	{"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1788 		NULL},
1789 
1790 	{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1791 		NULL},
1792 
1793 	{"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
1794 		R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
1795 
1796 	{"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
1797 		0, 8, NULL},
1798 
1799 	{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
1800 
1801 	{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1802 
1803 	{"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
1804 		D_BDW_PLUS, 0, 8, NULL},
1805 
1806 	{"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1807 		ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
1808 
1809 	{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1810 		ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
1811 
1812 	{"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
1813 		0, 8, cmd_handler_mi_store_data_index},
1814 
1815 	{"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
1816 		D_ALL, 0, 8, cmd_handler_lri},
1817 
1818 	{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
1819 		cmd_handler_mi_update_gtt},
1820 
1821 	{"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
1822 		D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
1823 
1824 	{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
1825 		cmd_handler_mi_flush_dw},
1826 
1827 	{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
1828 		10, cmd_handler_mi_clflush},
1829 
1830 	{"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
1831 		D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
1832 
1833 	{"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
1834 		D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
1835 
1836 	{"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
1837 		D_ALL, 0, 8, cmd_handler_lrr},
1838 
1839 	{"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
1840 		D_ALL, 0, 8, NULL},
1841 
1842 	{"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
1843 		ADDR_FIX_1(2), 8, NULL},
1844 
1845 	{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
1846 		ADDR_FIX_1(2), 8, NULL},
1847 
1848 	{"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
1849 		8, cmd_handler_mi_op_2e},
1850 
1851 	{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
1852 		8, cmd_handler_mi_op_2f},
1853 
1854 	{"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
1855 		F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
1856 		cmd_handler_mi_batch_buffer_start},
1857 
1858 	{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
1859 		F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
1860 		cmd_handler_mi_conditional_batch_buffer_end},
1861 
1862 	{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
1863 		R_RCS | R_BCS, D_ALL, 0, 2, NULL},
1864 
1865 	{"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1866 		ADDR_FIX_2(4, 7), 8, NULL},
1867 
1868 	{"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1869 		0, 8, NULL},
1870 
1871 	{"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
1872 		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1873 
1874 	{"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1875 
1876 	{"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
1877 		0, 8, NULL},
1878 
1879 	{"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1880 		ADDR_FIX_1(3), 8, NULL},
1881 
1882 	{"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
1883 		D_ALL, 0, 8, NULL},
1884 
1885 	{"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
1886 		ADDR_FIX_1(4), 8, NULL},
1887 
1888 	{"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1889 		ADDR_FIX_2(4, 5), 8, NULL},
1890 
1891 	{"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1892 		ADDR_FIX_1(4), 8, NULL},
1893 
1894 	{"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
1895 		ADDR_FIX_2(4, 7), 8, NULL},
1896 
1897 	{"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
1898 		D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1899 
1900 	{"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1901 
1902 	{"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
1903 		D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
1904 
1905 	{"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
1906 		R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1907 
1908 	{"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
1909 		OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
1910 		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1911 
1912 	{"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
1913 		D_ALL, ADDR_FIX_1(4), 8, NULL},
1914 
1915 	{"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
1916 		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1917 
1918 	{"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
1919 		D_ALL, ADDR_FIX_1(4), 8, NULL},
1920 
1921 	{"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
1922 		D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1923 
1924 	{"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
1925 		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1926 
1927 	{"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
1928 		OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
1929 		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1930 
1931 	{"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
1932 		ADDR_FIX_2(4, 5), 8, NULL},
1933 
1934 	{"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
1935 		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1936 
1937 	{"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
1938 		OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
1939 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1940 
1941 	{"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
1942 		OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
1943 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1944 
1945 	{"3DSTATE_BLEND_STATE_POINTERS",
1946 		OP_3DSTATE_BLEND_STATE_POINTERS,
1947 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1948 
1949 	{"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
1950 		OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
1951 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1952 
1953 	{"3DSTATE_BINDING_TABLE_POINTERS_VS",
1954 		OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
1955 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1956 
1957 	{"3DSTATE_BINDING_TABLE_POINTERS_HS",
1958 		OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
1959 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1960 
1961 	{"3DSTATE_BINDING_TABLE_POINTERS_DS",
1962 		OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
1963 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1964 
1965 	{"3DSTATE_BINDING_TABLE_POINTERS_GS",
1966 		OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
1967 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1968 
1969 	{"3DSTATE_BINDING_TABLE_POINTERS_PS",
1970 		OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
1971 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1972 
1973 	{"3DSTATE_SAMPLER_STATE_POINTERS_VS",
1974 		OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
1975 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1976 
1977 	{"3DSTATE_SAMPLER_STATE_POINTERS_HS",
1978 		OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
1979 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1980 
1981 	{"3DSTATE_SAMPLER_STATE_POINTERS_DS",
1982 		OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
1983 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1984 
1985 	{"3DSTATE_SAMPLER_STATE_POINTERS_GS",
1986 		OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
1987 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1988 
1989 	{"3DSTATE_SAMPLER_STATE_POINTERS_PS",
1990 		OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
1991 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1992 
1993 	{"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
1994 		0, 8, NULL},
1995 
1996 	{"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
1997 		0, 8, NULL},
1998 
1999 	{"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
2000 		0, 8, NULL},
2001 
2002 	{"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
2003 		0, 8, NULL},
2004 
2005 	{"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
2006 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2007 
2008 	{"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
2009 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2010 
2011 	{"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
2012 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2013 
2014 	{"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
2015 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2016 
2017 	{"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
2018 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2019 
2020 	{"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
2021 		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2022 
2023 	{"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
2024 		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2025 
2026 	{"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
2027 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2028 
2029 	{"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
2030 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2031 
2032 	{"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
2033 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2034 
2035 	{"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
2036 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2037 
2038 	{"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
2039 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2040 
2041 	{"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
2042 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2043 
2044 	{"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
2045 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2046 
2047 	{"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
2048 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2049 
2050 	{"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
2051 		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2052 
2053 	{"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
2054 		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2055 
2056 	{"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
2057 		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2058 
2059 	{"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
2060 		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2061 
2062 	{"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
2063 		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2064 
2065 	{"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
2066 		D_BDW_PLUS, 0, 8, NULL},
2067 
2068 	{"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2069 		NULL},
2070 
2071 	{"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
2072 		D_BDW_PLUS, 0, 8, NULL},
2073 
2074 	{"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
2075 		D_BDW_PLUS, 0, 8, NULL},
2076 
2077 	{"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2078 		8, NULL},
2079 
2080 	{"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
2081 		R_RCS, D_BDW_PLUS, 0, 8, NULL},
2082 
2083 	{"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2084 		8, NULL},
2085 
2086 	{"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2087 		NULL},
2088 
2089 	{"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2090 		NULL},
2091 
2092 	{"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2093 		NULL},
2094 
2095 	{"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
2096 		D_BDW_PLUS, 0, 8, NULL},
2097 
2098 	{"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
2099 		R_RCS, D_ALL, 0, 8, NULL},
2100 
2101 	{"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
2102 		D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
2103 
2104 	{"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
2105 		R_RCS, D_ALL, 0, 1, NULL},
2106 
2107 	{"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2108 
2109 	{"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
2110 		R_RCS, D_ALL, 0, 8, NULL},
2111 
2112 	{"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
2113 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2114 
2115 	{"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2116 
2117 	{"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2118 
2119 	{"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2120 
2121 	{"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
2122 		D_BDW_PLUS, 0, 8, NULL},
2123 
2124 	{"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
2125 		D_BDW_PLUS, 0, 8, NULL},
2126 
2127 	{"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
2128 		D_ALL, 0, 8, NULL},
2129 
2130 	{"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
2131 		D_BDW_PLUS, 0, 8, NULL},
2132 
2133 	{"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
2134 		D_BDW_PLUS, 0, 8, NULL},
2135 
2136 	{"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2137 
2138 	{"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2139 
2140 	{"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2141 
2142 	{"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
2143 		D_ALL, 0, 8, NULL},
2144 
2145 	{"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2146 
2147 	{"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2148 
2149 	{"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
2150 		R_RCS, D_ALL, 0, 8, NULL},
2151 
2152 	{"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
2153 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2154 
2155 	{"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
2156 		0, 8, NULL},
2157 
2158 	{"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
2159 		D_ALL, ADDR_FIX_1(2), 8, NULL},
2160 
2161 	{"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
2162 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2163 
2164 	{"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
2165 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2166 
2167 	{"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
2168 		D_ALL, 0, 8, NULL},
2169 
2170 	{"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
2171 		D_ALL, 0, 8, NULL},
2172 
2173 	{"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
2174 		D_ALL, 0, 8, NULL},
2175 
2176 	{"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
2177 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2178 
2179 	{"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
2180 		D_BDW_PLUS, 0, 8, NULL},
2181 
2182 	{"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
2183 		D_ALL, ADDR_FIX_1(2), 8, NULL},
2184 
2185 	{"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
2186 		R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
2187 
2188 	{"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
2189 		R_RCS, D_ALL, 0, 8, NULL},
2190 
2191 	{"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2192 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2193 
2194 	{"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
2195 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2196 
2197 	{"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
2198 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2199 
2200 	{"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2201 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2202 
2203 	{"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2204 		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2205 
2206 	{"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
2207 		R_RCS, D_ALL, 0, 8, NULL},
2208 
2209 	{"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
2210 		D_ALL, 0, 9, NULL},
2211 
2212 	{"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2213 		ADDR_FIX_2(2, 4), 8, NULL},
2214 
2215 	{"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2216 		OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
2217 		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2218 
2219 	{"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
2220 		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2221 
2222 	{"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2223 		OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
2224 		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2225 
2226 	{"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
2227 		D_BDW_PLUS, 0, 8, NULL},
2228 
2229 	{"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
2230 		ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
2231 
2232 	{"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2233 
2234 	{"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
2235 		1, NULL},
2236 
2237 	{"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
2238 		ADDR_FIX_1(1), 8, NULL},
2239 
2240 	{"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2241 
2242 	{"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2243 		ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
2244 
2245 	{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
2246 		ADDR_FIX_1(1), 8, NULL},
2247 
2248 	{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2249 
2250 	{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2251 
2252 	{"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2253 		0, 8, NULL},
2254 
2255 	{"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
2256 		D_SKL_PLUS, 0, 8, NULL},
2257 
2258 	{"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
2259 		F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2260 
2261 	{"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
2262 		0, 16, NULL},
2263 
2264 	{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
2265 		0, 16, NULL},
2266 
2267 	{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2268 
2269 	{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
2270 		0, 16, NULL},
2271 
2272 	{"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
2273 		0, 16, NULL},
2274 
2275 	{"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2276 		0, 16, NULL},
2277 
2278 	{"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2279 		0, 8, NULL},
2280 
2281 	{"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
2282 		NULL},
2283 
2284 	{"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
2285 		F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2286 
2287 	{"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
2288 		R_VCS, D_ALL, 0, 12, NULL},
2289 
2290 	{"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
2291 		R_VCS, D_ALL, 0, 12, NULL},
2292 
2293 	{"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
2294 		R_VCS, D_BDW_PLUS, 0, 12, NULL},
2295 
2296 	{"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
2297 		F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2298 
2299 	{"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
2300 		F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
2301 
2302 	{"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2303 
2304 	{"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
2305 		R_VCS, D_ALL, 0, 12, NULL},
2306 
2307 	{"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
2308 		R_VCS, D_ALL, 0, 12, NULL},
2309 
2310 	{"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
2311 		R_VCS, D_ALL, 0, 12, NULL},
2312 
2313 	{"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
2314 		R_VCS, D_ALL, 0, 12, NULL},
2315 
2316 	{"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
2317 		R_VCS, D_ALL, 0, 12, NULL},
2318 
2319 	{"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
2320 		R_VCS, D_ALL, 0, 12, NULL},
2321 
2322 	{"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
2323 		R_VCS, D_ALL, 0, 6, NULL},
2324 
2325 	{"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
2326 		R_VCS, D_ALL, 0, 12, NULL},
2327 
2328 	{"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
2329 		R_VCS, D_ALL, 0, 12, NULL},
2330 
2331 	{"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
2332 		R_VCS, D_ALL, 0, 12, NULL},
2333 
2334 	{"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
2335 		R_VCS, D_ALL, 0, 12, NULL},
2336 
2337 	{"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
2338 		R_VCS, D_ALL, 0, 12, NULL},
2339 
2340 	{"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
2341 		R_VCS, D_ALL, 0, 12, NULL},
2342 
2343 	{"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
2344 		R_VCS, D_ALL, 0, 12, NULL},
2345 	{"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
2346 		R_VCS, D_ALL, 0, 12, NULL},
2347 
2348 	{"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
2349 		R_VCS, D_ALL, 0, 12, NULL},
2350 
2351 	{"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
2352 		R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
2353 
2354 	{"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
2355 		R_VCS, D_ALL, 0, 12, NULL},
2356 
2357 	{"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
2358 		R_VCS, D_ALL, 0, 12, NULL},
2359 
2360 	{"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
2361 		R_VCS, D_ALL, 0, 12, NULL},
2362 
2363 	{"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
2364 		R_VCS, D_ALL, 0, 12, NULL},
2365 
2366 	{"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
2367 		R_VCS, D_ALL, 0, 12, NULL},
2368 
2369 	{"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
2370 		R_VCS, D_ALL, 0, 12, NULL},
2371 
2372 	{"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
2373 		R_VCS, D_ALL, 0, 12, NULL},
2374 
2375 	{"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
2376 		R_VCS, D_ALL, 0, 12, NULL},
2377 
2378 	{"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
2379 		R_VCS, D_ALL, 0, 12, NULL},
2380 
2381 	{"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
2382 		R_VCS, D_ALL, 0, 12, NULL},
2383 
2384 	{"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
2385 		R_VCS, D_ALL, 0, 12, NULL},
2386 
2387 	{"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
2388 		0, 16, NULL},
2389 
2390 	{"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2391 
2392 	{"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2393 
2394 	{"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
2395 		R_VCS, D_ALL, 0, 12, NULL},
2396 
2397 	{"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
2398 		R_VCS, D_ALL, 0, 12, NULL},
2399 
2400 	{"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
2401 		R_VCS, D_ALL, 0, 12, NULL},
2402 
2403 	{"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
2404 
2405 	{"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
2406 		0, 12, NULL},
2407 
2408 	{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2409 		0, 20, NULL},
2410 };
2411 
2412 static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2413 {
2414 	hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2415 }
2416 
2417 #define GVT_MAX_CMD_LENGTH     20  /* In Dword */
2418 
2419 static void trace_cs_command(struct parser_exec_state *s,
2420 		cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
2421 {
2422 	/* This buffer is used by ftrace to store all commands copied from
2423 	 * guest gma space. Sometimes commands can cross pages, this should
2424 	 * not be handled in ftrace logic. So this is just used as a
2425 	 * 'bounce buffer'
2426 	 */
2427 	u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
2428 	int i;
2429 	u32 cmd_len = cmd_length(s);
2430 	/* The chosen value of GVT_MAX_CMD_LENGTH are just based on
2431 	 * following two considerations:
2432 	 * 1) From observation, most common ring commands is not that long.
2433 	 *    But there are execeptions. So it indeed makes sence to observe
2434 	 *    longer commands.
2435 	 * 2) From the performance and debugging point of view, dumping all
2436 	 *    contents of very commands is not necessary.
2437 	 * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
2438 	 * future for performance considerations.
2439 	 */
2440 	if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
2441 		gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
2442 		cmd_len = GVT_MAX_CMD_LENGTH;
2443 	}
2444 
2445 	for (i = 0; i < cmd_len; i++)
2446 		cmd_trace_buf[i] = cmd_val(s, i);
2447 
2448 	trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
2449 			cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
2450 			cost_pre_cmd_handler, cost_cmd_handler);
2451 }
2452 
2453 /* call the cmd handler, and advance ip */
2454 static int cmd_parser_exec(struct parser_exec_state *s)
2455 {
2456 	struct cmd_info *info;
2457 	u32 cmd;
2458 	int ret = 0;
2459 	cycles_t t0, t1, t2;
2460 	struct parser_exec_state s_before_advance_custom;
2461 	struct intel_vgpu *vgpu = s->vgpu;
2462 
2463 	t0 = get_cycles();
2464 
2465 	cmd = cmd_val(s, 0);
2466 
2467 	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2468 	if (info == NULL) {
2469 		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2470 				cmd, get_opcode(cmd, s->ring_id));
2471 		return -EINVAL;
2472 	}
2473 
2474 	gvt_dbg_cmd("%s\n", info->name);
2475 
2476 	s->info = info;
2477 
2478 	t1 = get_cycles();
2479 
2480 	s_before_advance_custom = *s;
2481 
2482 	if (info->handler) {
2483 		ret = info->handler(s);
2484 		if (ret < 0) {
2485 			gvt_vgpu_err("%s handler error\n", info->name);
2486 			return ret;
2487 		}
2488 	}
2489 	t2 = get_cycles();
2490 
2491 	trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
2492 
2493 	if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2494 		ret = cmd_advance_default(s);
2495 		if (ret) {
2496 			gvt_vgpu_err("%s IP advance error\n", info->name);
2497 			return ret;
2498 		}
2499 	}
2500 	return 0;
2501 }
2502 
2503 static inline bool gma_out_of_range(unsigned long gma,
2504 		unsigned long gma_head, unsigned int gma_tail)
2505 {
2506 	if (gma_tail >= gma_head)
2507 		return (gma < gma_head) || (gma > gma_tail);
2508 	else
2509 		return (gma > gma_tail) && (gma < gma_head);
2510 }
2511 
2512 static int command_scan(struct parser_exec_state *s,
2513 		unsigned long rb_head, unsigned long rb_tail,
2514 		unsigned long rb_start, unsigned long rb_len)
2515 {
2516 
2517 	unsigned long gma_head, gma_tail, gma_bottom;
2518 	int ret = 0;
2519 	struct intel_vgpu *vgpu = s->vgpu;
2520 
2521 	gma_head = rb_start + rb_head;
2522 	gma_tail = rb_start + rb_tail;
2523 	gma_bottom = rb_start +  rb_len;
2524 
2525 	gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
2526 
2527 	while (s->ip_gma != gma_tail) {
2528 		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2529 			if (!(s->ip_gma >= rb_start) ||
2530 				!(s->ip_gma < gma_bottom)) {
2531 				gvt_vgpu_err("ip_gma %lx out of ring scope."
2532 					"(base:0x%lx, bottom: 0x%lx)\n",
2533 					s->ip_gma, rb_start,
2534 					gma_bottom);
2535 				parser_exec_state_dump(s);
2536 				return -EINVAL;
2537 			}
2538 			if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2539 				gvt_vgpu_err("ip_gma %lx out of range."
2540 					"base 0x%lx head 0x%lx tail 0x%lx\n",
2541 					s->ip_gma, rb_start,
2542 					rb_head, rb_tail);
2543 				parser_exec_state_dump(s);
2544 				break;
2545 			}
2546 		}
2547 		ret = cmd_parser_exec(s);
2548 		if (ret) {
2549 			gvt_vgpu_err("cmd parser error\n");
2550 			parser_exec_state_dump(s);
2551 			break;
2552 		}
2553 	}
2554 
2555 	gvt_dbg_cmd("scan_end\n");
2556 
2557 	return ret;
2558 }
2559 
2560 static int scan_workload(struct intel_vgpu_workload *workload)
2561 {
2562 	unsigned long gma_head, gma_tail, gma_bottom;
2563 	struct parser_exec_state s;
2564 	int ret = 0;
2565 
2566 	/* ring base is page aligned */
2567 	if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
2568 		return -EINVAL;
2569 
2570 	gma_head = workload->rb_start + workload->rb_head;
2571 	gma_tail = workload->rb_start + workload->rb_tail;
2572 	gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
2573 
2574 	s.buf_type = RING_BUFFER_INSTRUCTION;
2575 	s.buf_addr_type = GTT_BUFFER;
2576 	s.vgpu = workload->vgpu;
2577 	s.ring_id = workload->ring_id;
2578 	s.ring_start = workload->rb_start;
2579 	s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2580 	s.ring_head = gma_head;
2581 	s.ring_tail = gma_tail;
2582 	s.rb_va = workload->shadow_ring_buffer_va;
2583 	s.workload = workload;
2584 
2585 	if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2586 		gma_head == gma_tail)
2587 		return 0;
2588 
2589 	ret = ip_gma_set(&s, gma_head);
2590 	if (ret)
2591 		goto out;
2592 
2593 	ret = command_scan(&s, workload->rb_head, workload->rb_tail,
2594 		workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
2595 
2596 out:
2597 	return ret;
2598 }
2599 
2600 static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2601 {
2602 
2603 	unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
2604 	struct parser_exec_state s;
2605 	int ret = 0;
2606 	struct intel_vgpu_workload *workload = container_of(wa_ctx,
2607 				struct intel_vgpu_workload,
2608 				wa_ctx);
2609 
2610 	/* ring base is page aligned */
2611 	if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
2612 		return -EINVAL;
2613 
2614 	ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
2615 	ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2616 			PAGE_SIZE);
2617 	gma_head = wa_ctx->indirect_ctx.guest_gma;
2618 	gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2619 	gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
2620 
2621 	s.buf_type = RING_BUFFER_INSTRUCTION;
2622 	s.buf_addr_type = GTT_BUFFER;
2623 	s.vgpu = workload->vgpu;
2624 	s.ring_id = workload->ring_id;
2625 	s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2626 	s.ring_size = ring_size;
2627 	s.ring_head = gma_head;
2628 	s.ring_tail = gma_tail;
2629 	s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2630 	s.workload = workload;
2631 
2632 	ret = ip_gma_set(&s, gma_head);
2633 	if (ret)
2634 		goto out;
2635 
2636 	ret = command_scan(&s, 0, ring_tail,
2637 		wa_ctx->indirect_ctx.guest_gma, ring_size);
2638 out:
2639 	return ret;
2640 }
2641 
2642 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2643 {
2644 	struct intel_vgpu *vgpu = workload->vgpu;
2645 	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2646 	u32 *cs;
2647 	int ret;
2648 
2649 	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2650 
2651 	/* calculate workload ring buffer size */
2652 	workload->rb_len = (workload->rb_tail + guest_rb_size -
2653 			workload->rb_head) % guest_rb_size;
2654 
2655 	gma_head = workload->rb_start + workload->rb_head;
2656 	gma_tail = workload->rb_start + workload->rb_tail;
2657 	gma_top = workload->rb_start + guest_rb_size;
2658 
2659 	/* allocate shadow ring buffer */
2660 	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
2661 	if (IS_ERR(cs))
2662 		return PTR_ERR(cs);
2663 
2664 	/* get shadow ring buffer va */
2665 	workload->shadow_ring_buffer_va = cs;
2666 
2667 	/* head > tail --> copy head <-> top */
2668 	if (gma_head > gma_tail) {
2669 		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2670 				      gma_head, gma_top, cs);
2671 		if (ret < 0) {
2672 			gvt_vgpu_err("fail to copy guest ring buffer\n");
2673 			return ret;
2674 		}
2675 		cs += ret / sizeof(u32);
2676 		gma_head = workload->rb_start;
2677 	}
2678 
2679 	/* copy head or start <-> tail */
2680 	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
2681 	if (ret < 0) {
2682 		gvt_vgpu_err("fail to copy guest ring buffer\n");
2683 		return ret;
2684 	}
2685 	cs += ret / sizeof(u32);
2686 	intel_ring_advance(workload->req, cs);
2687 	return 0;
2688 }
2689 
2690 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
2691 {
2692 	int ret;
2693 	struct intel_vgpu *vgpu = workload->vgpu;
2694 
2695 	ret = shadow_workload_ring_buffer(workload);
2696 	if (ret) {
2697 		gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2698 		return ret;
2699 	}
2700 
2701 	ret = scan_workload(workload);
2702 	if (ret) {
2703 		gvt_vgpu_err("scan workload error\n");
2704 		return ret;
2705 	}
2706 	return 0;
2707 }
2708 
2709 static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2710 {
2711 	int ctx_size = wa_ctx->indirect_ctx.size;
2712 	unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2713 	struct intel_vgpu_workload *workload = container_of(wa_ctx,
2714 					struct intel_vgpu_workload,
2715 					wa_ctx);
2716 	struct intel_vgpu *vgpu = workload->vgpu;
2717 	struct drm_i915_gem_object *obj;
2718 	int ret = 0;
2719 	void *map;
2720 
2721 	obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
2722 				     roundup(ctx_size + CACHELINE_BYTES,
2723 					     PAGE_SIZE));
2724 	if (IS_ERR(obj))
2725 		return PTR_ERR(obj);
2726 
2727 	/* get the va of the shadow batch buffer */
2728 	map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2729 	if (IS_ERR(map)) {
2730 		gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2731 		ret = PTR_ERR(map);
2732 		goto put_obj;
2733 	}
2734 
2735 	ret = i915_gem_object_set_to_cpu_domain(obj, false);
2736 	if (ret) {
2737 		gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2738 		goto unmap_src;
2739 	}
2740 
2741 	ret = copy_gma_to_hva(workload->vgpu,
2742 				workload->vgpu->gtt.ggtt_mm,
2743 				guest_gma, guest_gma + ctx_size,
2744 				map);
2745 	if (ret < 0) {
2746 		gvt_vgpu_err("fail to copy guest indirect ctx\n");
2747 		goto unmap_src;
2748 	}
2749 
2750 	wa_ctx->indirect_ctx.obj = obj;
2751 	wa_ctx->indirect_ctx.shadow_va = map;
2752 	return 0;
2753 
2754 unmap_src:
2755 	i915_gem_object_unpin_map(obj);
2756 put_obj:
2757 	i915_gem_object_put(wa_ctx->indirect_ctx.obj);
2758 	return ret;
2759 }
2760 
2761 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2762 {
2763 	uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
2764 	unsigned char *bb_start_sva;
2765 
2766 	per_ctx_start[0] = 0x18800001;
2767 	per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
2768 
2769 	bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
2770 				wa_ctx->indirect_ctx.size;
2771 
2772 	memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
2773 
2774 	return 0;
2775 }
2776 
2777 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2778 {
2779 	int ret;
2780 	struct intel_vgpu_workload *workload = container_of(wa_ctx,
2781 					struct intel_vgpu_workload,
2782 					wa_ctx);
2783 	struct intel_vgpu *vgpu = workload->vgpu;
2784 
2785 	if (wa_ctx->indirect_ctx.size == 0)
2786 		return 0;
2787 
2788 	ret = shadow_indirect_ctx(wa_ctx);
2789 	if (ret) {
2790 		gvt_vgpu_err("fail to shadow indirect ctx\n");
2791 		return ret;
2792 	}
2793 
2794 	combine_wa_ctx(wa_ctx);
2795 
2796 	ret = scan_wa_ctx(wa_ctx);
2797 	if (ret) {
2798 		gvt_vgpu_err("scan wa ctx error\n");
2799 		return ret;
2800 	}
2801 
2802 	return 0;
2803 }
2804 
2805 static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
2806 		unsigned int opcode, int rings)
2807 {
2808 	struct cmd_info *info = NULL;
2809 	unsigned int ring;
2810 
2811 	for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
2812 		info = find_cmd_entry(gvt, opcode, ring);
2813 		if (info)
2814 			break;
2815 	}
2816 	return info;
2817 }
2818 
2819 static int init_cmd_table(struct intel_gvt *gvt)
2820 {
2821 	int i;
2822 	struct cmd_entry *e;
2823 	struct cmd_info	*info;
2824 	unsigned int gen_type;
2825 
2826 	gen_type = intel_gvt_get_device_type(gvt);
2827 
2828 	for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
2829 		if (!(cmd_info[i].devices & gen_type))
2830 			continue;
2831 
2832 		e = kzalloc(sizeof(*e), GFP_KERNEL);
2833 		if (!e)
2834 			return -ENOMEM;
2835 
2836 		e->info = &cmd_info[i];
2837 		info = find_cmd_entry_any_ring(gvt,
2838 				e->info->opcode, e->info->rings);
2839 		if (info) {
2840 			gvt_err("%s %s duplicated\n", e->info->name,
2841 					info->name);
2842 			return -EEXIST;
2843 		}
2844 
2845 		INIT_HLIST_NODE(&e->hlist);
2846 		add_cmd_entry(gvt, e);
2847 		gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
2848 				e->info->name, e->info->opcode, e->info->flag,
2849 				e->info->devices, e->info->rings);
2850 	}
2851 	return 0;
2852 }
2853 
2854 static void clean_cmd_table(struct intel_gvt *gvt)
2855 {
2856 	struct hlist_node *tmp;
2857 	struct cmd_entry *e;
2858 	int i;
2859 
2860 	hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
2861 		kfree(e);
2862 
2863 	hash_init(gvt->cmd_table);
2864 }
2865 
2866 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
2867 {
2868 	clean_cmd_table(gvt);
2869 }
2870 
2871 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
2872 {
2873 	int ret;
2874 
2875 	ret = init_cmd_table(gvt);
2876 	if (ret) {
2877 		intel_gvt_clean_cmd_parser(gvt);
2878 		return ret;
2879 	}
2880 	return 0;
2881 }
2882