1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Ke Yu 25 * Kevin Tian <kevin.tian@intel.com> 26 * Zhiyuan Lv <zhiyuan.lv@intel.com> 27 * 28 * Contributors: 29 * Min He <min.he@intel.com> 30 * Ping Gao <ping.a.gao@intel.com> 31 * Tina Zhang <tina.zhang@intel.com> 32 * Yulei Zhang <yulei.zhang@intel.com> 33 * Zhi Wang <zhi.a.wang@intel.com> 34 * 35 */ 36 37 #include <linux/slab.h> 38 39 #include <drm/drm_print.h> 40 #include <drm/intel/intel_gmd_misc_regs.h> 41 42 #include "display/i9xx_plane_regs.h" 43 #include "display/intel_display_regs.h" 44 #include "display/intel_sprite_regs.h" 45 46 #include "gem/i915_gem_context.h" 47 #include "gem/i915_gem_pm.h" 48 49 #include "gt/intel_context.h" 50 #include "gt/intel_engine_regs.h" 51 #include "gt/intel_gpu_commands.h" 52 #include "gt/intel_gt_regs.h" 53 #include "gt/intel_gt_requests.h" 54 #include "gt/intel_lrc.h" 55 #include "gt/intel_ring.h" 56 #include "gt/shmem_utils.h" 57 58 #include "display_helpers.h" 59 #include "gvt.h" 60 #include "i915_drv.h" 61 #include "i915_pvinfo.h" 62 #include "i915_reg.h" 63 #include "trace.h" 64 65 #define INVALID_OP (~0U) 66 67 #define OP_LEN_MI 9 68 #define OP_LEN_2D 10 69 #define OP_LEN_3D_MEDIA 16 70 #define OP_LEN_MFX_VC 16 71 #define OP_LEN_VEBOX 16 72 73 #define CMD_TYPE(cmd) (((cmd) >> 29) & 7) 74 75 struct sub_op_bits { 76 int hi; 77 int low; 78 }; 79 struct decode_info { 80 const char *name; 81 int op_len; 82 int nr_sub_op; 83 const struct sub_op_bits *sub_op; 84 }; 85 86 #define MAX_CMD_BUDGET 0x7fffffff 87 #define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15) 88 #define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9) 89 #define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1) 90 91 #define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20) 92 #define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10) 93 #define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2) 94 95 /* Render Command Map */ 96 97 /* MI_* command Opcode (28:23) */ 98 #define OP_MI_NOOP 0x0 99 #define OP_MI_SET_PREDICATE 0x1 /* HSW+ */ 100 #define OP_MI_USER_INTERRUPT 0x2 101 #define OP_MI_WAIT_FOR_EVENT 0x3 102 #define OP_MI_FLUSH 0x4 103 #define OP_MI_ARB_CHECK 0x5 104 #define OP_MI_RS_CONTROL 0x6 /* HSW+ */ 105 #define OP_MI_REPORT_HEAD 0x7 106 #define OP_MI_ARB_ON_OFF 0x8 107 #define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */ 108 #define OP_MI_BATCH_BUFFER_END 0xA 109 #define OP_MI_SUSPEND_FLUSH 0xB 110 #define OP_MI_PREDICATE 0xC /* IVB+ */ 111 #define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */ 112 #define OP_MI_SET_APPID 0xE /* IVB+ */ 113 #define OP_MI_RS_CONTEXT 0xF /* HSW+ */ 114 #define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */ 115 #define OP_MI_DISPLAY_FLIP 0x14 116 #define OP_MI_SEMAPHORE_MBOX 0x16 117 #define OP_MI_SET_CONTEXT 0x18 118 #define OP_MI_MATH 0x1A 119 #define OP_MI_URB_CLEAR 0x19 120 #define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */ 121 #define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */ 122 123 #define OP_MI_STORE_DATA_IMM 0x20 124 #define OP_MI_STORE_DATA_INDEX 0x21 125 #define OP_MI_LOAD_REGISTER_IMM 0x22 126 #define OP_MI_UPDATE_GTT 0x23 127 #define OP_MI_STORE_REGISTER_MEM 0x24 128 #define OP_MI_FLUSH_DW 0x26 129 #define OP_MI_CLFLUSH 0x27 130 #define OP_MI_REPORT_PERF_COUNT 0x28 131 #define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */ 132 #define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */ 133 #define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */ 134 #define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */ 135 #define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */ 136 #define OP_MI_2E 0x2E /* BDW+ */ 137 #define OP_MI_2F 0x2F /* BDW+ */ 138 #define OP_MI_BATCH_BUFFER_START 0x31 139 140 /* Bit definition for dword 0 */ 141 #define _CMDBIT_BB_START_IN_PPGTT (1UL << 8) 142 143 #define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36 144 145 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) 146 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) 147 #define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U) 148 #define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U) 149 150 /* 2D command: Opcode (28:22) */ 151 #define OP_2D(x) ((2<<7) | x) 152 153 #define OP_XY_SETUP_BLT OP_2D(0x1) 154 #define OP_XY_SETUP_CLIP_BLT OP_2D(0x3) 155 #define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11) 156 #define OP_XY_PIXEL_BLT OP_2D(0x24) 157 #define OP_XY_SCANLINES_BLT OP_2D(0x25) 158 #define OP_XY_TEXT_BLT OP_2D(0x26) 159 #define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31) 160 #define OP_XY_COLOR_BLT OP_2D(0x50) 161 #define OP_XY_PAT_BLT OP_2D(0x51) 162 #define OP_XY_MONO_PAT_BLT OP_2D(0x52) 163 #define OP_XY_SRC_COPY_BLT OP_2D(0x53) 164 #define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54) 165 #define OP_XY_FULL_BLT OP_2D(0x55) 166 #define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56) 167 #define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57) 168 #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58) 169 #define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59) 170 #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71) 171 #define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72) 172 #define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73) 173 #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74) 174 #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75) 175 #define OP_XY_PAT_CHROMA_BLT OP_2D(0x76) 176 #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77) 177 178 /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */ 179 #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \ 180 ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode)) 181 182 #define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03) 183 184 #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01) 185 #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02) 186 #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04) 187 #define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03) 188 189 #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B) 190 191 #define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04) 192 193 #define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0) 194 #define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1) 195 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2) 196 #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3) 197 #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4) 198 #define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5) 199 200 #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0) 201 #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2) 202 #define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3) 203 #define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5) 204 205 #define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */ 206 #define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */ 207 #define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */ 208 #define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */ 209 #define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08) 210 #define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09) 211 #define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A) 212 #define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B) 213 #define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */ 214 #define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E) 215 #define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F) 216 #define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10) 217 #define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11) 218 #define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12) 219 #define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13) 220 #define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14) 221 #define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15) 222 #define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16) 223 #define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17) 224 #define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18) 225 #define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */ 226 #define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */ 227 #define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */ 228 #define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */ 229 #define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */ 230 #define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */ 231 #define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */ 232 #define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */ 233 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */ 234 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */ 235 #define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */ 236 #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */ 237 #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */ 238 #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */ 239 #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */ 240 #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */ 241 #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */ 242 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */ 243 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */ 244 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */ 245 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */ 246 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */ 247 #define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */ 248 #define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */ 249 #define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */ 250 #define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */ 251 #define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */ 252 #define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */ 253 #define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */ 254 #define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */ 255 #define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */ 256 #define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */ 257 #define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */ 258 #define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */ 259 #define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */ 260 #define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */ 261 #define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */ 262 #define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */ 263 #define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */ 264 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */ 265 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */ 266 #define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */ 267 #define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */ 268 #define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */ 269 #define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */ 270 #define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */ 271 272 #define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */ 273 #define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */ 274 #define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */ 275 #define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */ 276 #define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */ 277 #define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */ 278 #define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */ 279 #define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */ 280 #define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */ 281 #define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */ 282 #define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */ 283 284 #define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00) 285 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02) 286 #define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04) 287 #define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05) 288 #define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06) 289 #define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07) 290 #define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08) 291 #define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A) 292 #define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B) 293 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C) 294 #define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D) 295 #define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E) 296 #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F) 297 #define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10) 298 #define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11) 299 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */ 300 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */ 301 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */ 302 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */ 303 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */ 304 #define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17) 305 #define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18) 306 #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */ 307 #define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */ 308 #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */ 309 #define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C) 310 #define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00) 311 #define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00) 312 313 /* VCCP Command Parser */ 314 315 /* 316 * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License) 317 * git://anongit.freedesktop.org/vaapi/intel-driver 318 * src/i965_defines.h 319 * 320 */ 321 322 #define OP_MFX(pipeline, op, sub_opa, sub_opb) \ 323 (3 << 13 | \ 324 (pipeline) << 11 | \ 325 (op) << 8 | \ 326 (sub_opa) << 5 | \ 327 (sub_opb)) 328 329 #define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */ 330 #define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */ 331 #define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */ 332 #define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */ 333 #define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */ 334 #define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */ 335 #define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */ 336 #define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */ 337 #define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */ 338 #define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */ 339 #define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */ 340 341 #define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */ 342 343 #define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */ 344 #define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */ 345 #define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */ 346 #define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */ 347 #define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */ 348 #define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */ 349 #define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */ 350 #define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */ 351 #define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */ 352 #define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */ 353 #define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */ 354 #define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */ 355 356 #define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */ 357 #define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */ 358 #define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */ 359 #define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */ 360 #define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */ 361 362 #define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */ 363 #define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */ 364 #define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */ 365 #define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */ 366 #define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */ 367 368 #define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */ 369 #define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */ 370 #define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */ 371 372 #define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0) 373 #define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2) 374 #define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8) 375 376 #define OP_VEB(pipeline, op, sub_opa, sub_opb) \ 377 (3 << 13 | \ 378 (pipeline) << 11 | \ 379 (op) << 8 | \ 380 (sub_opa) << 5 | \ 381 (sub_opb)) 382 383 #define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0) 384 #define OP_VEB_STATE OP_VEB(2, 4, 0, 2) 385 #define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3) 386 387 struct parser_exec_state; 388 389 typedef int (*parser_cmd_handler)(struct parser_exec_state *s); 390 391 #define GVT_CMD_HASH_BITS 7 392 393 /* which DWords need address fix */ 394 #define ADDR_FIX_1(x1) (1 << (x1)) 395 #define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2)) 396 #define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3)) 397 #define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4)) 398 #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5)) 399 400 #define DWORD_FIELD(dword, end, start) \ 401 FIELD_GET(GENMASK(end, start), cmd_val(s, dword)) 402 403 #define OP_LENGTH_BIAS 2 404 #define CMD_LEN(value) (value + OP_LENGTH_BIAS) 405 406 static int gvt_check_valid_cmd_length(int len, int valid_len) 407 { 408 if (valid_len != len) { 409 gvt_err("len is not valid: len=%u valid_len=%u\n", 410 len, valid_len); 411 return -EFAULT; 412 } 413 return 0; 414 } 415 416 struct cmd_info { 417 const char *name; 418 u32 opcode; 419 420 #define F_LEN_MASK 3U 421 #define F_LEN_CONST 1U 422 #define F_LEN_VAR 0U 423 /* value is const although LEN maybe variable */ 424 #define F_LEN_VAR_FIXED (1<<1) 425 426 /* 427 * command has its own ip advance logic 428 * e.g. MI_BATCH_START, MI_BATCH_END 429 */ 430 #define F_IP_ADVANCE_CUSTOM (1<<2) 431 u32 flag; 432 433 #define R_RCS BIT(RCS0) 434 #define R_VCS1 BIT(VCS0) 435 #define R_VCS2 BIT(VCS1) 436 #define R_VCS (R_VCS1 | R_VCS2) 437 #define R_BCS BIT(BCS0) 438 #define R_VECS BIT(VECS0) 439 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS) 440 /* rings that support this cmd: BLT/RCS/VCS/VECS */ 441 intel_engine_mask_t rings; 442 443 /* devices that support this cmd: SNB/IVB/HSW/... */ 444 u16 devices; 445 446 /* which DWords are address that need fix up. 447 * bit 0 means a 32-bit non address operand in command 448 * bit 1 means address operand, which could be 32-bit 449 * or 64-bit depending on different architectures.( 450 * defined by "gmadr_bytes_in_cmd" in intel_gvt. 451 * No matter the address length, each address only takes 452 * one bit in the bitmap. 453 */ 454 u16 addr_bitmap; 455 456 /* flag == F_LEN_CONST : command length 457 * flag == F_LEN_VAR : length bias bits 458 * Note: length is in DWord 459 */ 460 u32 len; 461 462 parser_cmd_handler handler; 463 464 /* valid length in DWord */ 465 u32 valid_len; 466 }; 467 468 struct cmd_entry { 469 struct hlist_node hlist; 470 const struct cmd_info *info; 471 }; 472 473 enum { 474 RING_BUFFER_INSTRUCTION, 475 BATCH_BUFFER_INSTRUCTION, 476 BATCH_BUFFER_2ND_LEVEL, 477 RING_BUFFER_CTX, 478 }; 479 480 enum { 481 GTT_BUFFER, 482 PPGTT_BUFFER 483 }; 484 485 struct parser_exec_state { 486 struct intel_vgpu *vgpu; 487 const struct intel_engine_cs *engine; 488 489 int buf_type; 490 491 /* batch buffer address type */ 492 int buf_addr_type; 493 494 /* graphics memory address of ring buffer start */ 495 unsigned long ring_start; 496 unsigned long ring_size; 497 unsigned long ring_head; 498 unsigned long ring_tail; 499 500 /* instruction graphics memory address */ 501 unsigned long ip_gma; 502 503 /* mapped va of the instr_gma */ 504 void *ip_va; 505 void *rb_va; 506 507 void *ret_bb_va; 508 /* next instruction when return from batch buffer to ring buffer */ 509 unsigned long ret_ip_gma_ring; 510 511 /* next instruction when return from 2nd batch buffer to batch buffer */ 512 unsigned long ret_ip_gma_bb; 513 514 /* batch buffer address type (GTT or PPGTT) 515 * used when ret from 2nd level batch buffer 516 */ 517 int saved_buf_addr_type; 518 bool is_ctx_wa; 519 bool is_init_ctx; 520 521 const struct cmd_info *info; 522 523 struct intel_vgpu_workload *workload; 524 }; 525 526 #define gmadr_dw_number(s) \ 527 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) 528 529 static unsigned long bypass_scan_mask = 0; 530 531 /* ring ALL, type = 0 */ 532 static const struct sub_op_bits sub_op_mi[] = { 533 {31, 29}, 534 {28, 23}, 535 }; 536 537 static const struct decode_info decode_info_mi = { 538 "MI", 539 OP_LEN_MI, 540 ARRAY_SIZE(sub_op_mi), 541 sub_op_mi, 542 }; 543 544 /* ring RCS, command type 2 */ 545 static const struct sub_op_bits sub_op_2d[] = { 546 {31, 29}, 547 {28, 22}, 548 }; 549 550 static const struct decode_info decode_info_2d = { 551 "2D", 552 OP_LEN_2D, 553 ARRAY_SIZE(sub_op_2d), 554 sub_op_2d, 555 }; 556 557 /* ring RCS, command type 3 */ 558 static const struct sub_op_bits sub_op_3d_media[] = { 559 {31, 29}, 560 {28, 27}, 561 {26, 24}, 562 {23, 16}, 563 }; 564 565 static const struct decode_info decode_info_3d_media = { 566 "3D_Media", 567 OP_LEN_3D_MEDIA, 568 ARRAY_SIZE(sub_op_3d_media), 569 sub_op_3d_media, 570 }; 571 572 /* ring VCS, command type 3 */ 573 static const struct sub_op_bits sub_op_mfx_vc[] = { 574 {31, 29}, 575 {28, 27}, 576 {26, 24}, 577 {23, 21}, 578 {20, 16}, 579 }; 580 581 static const struct decode_info decode_info_mfx_vc = { 582 "MFX_VC", 583 OP_LEN_MFX_VC, 584 ARRAY_SIZE(sub_op_mfx_vc), 585 sub_op_mfx_vc, 586 }; 587 588 /* ring VECS, command type 3 */ 589 static const struct sub_op_bits sub_op_vebox[] = { 590 {31, 29}, 591 {28, 27}, 592 {26, 24}, 593 {23, 21}, 594 {20, 16}, 595 }; 596 597 static const struct decode_info decode_info_vebox = { 598 "VEBOX", 599 OP_LEN_VEBOX, 600 ARRAY_SIZE(sub_op_vebox), 601 sub_op_vebox, 602 }; 603 604 static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { 605 [RCS0] = { 606 &decode_info_mi, 607 NULL, 608 NULL, 609 &decode_info_3d_media, 610 NULL, 611 NULL, 612 NULL, 613 NULL, 614 }, 615 616 [VCS0] = { 617 &decode_info_mi, 618 NULL, 619 NULL, 620 &decode_info_mfx_vc, 621 NULL, 622 NULL, 623 NULL, 624 NULL, 625 }, 626 627 [BCS0] = { 628 &decode_info_mi, 629 NULL, 630 &decode_info_2d, 631 NULL, 632 NULL, 633 NULL, 634 NULL, 635 NULL, 636 }, 637 638 [VECS0] = { 639 &decode_info_mi, 640 NULL, 641 NULL, 642 &decode_info_vebox, 643 NULL, 644 NULL, 645 NULL, 646 NULL, 647 }, 648 649 [VCS1] = { 650 &decode_info_mi, 651 NULL, 652 NULL, 653 &decode_info_mfx_vc, 654 NULL, 655 NULL, 656 NULL, 657 NULL, 658 }, 659 }; 660 661 static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine) 662 { 663 const struct decode_info *d_info; 664 665 d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)]; 666 if (d_info == NULL) 667 return INVALID_OP; 668 669 return cmd >> (32 - d_info->op_len); 670 } 671 672 static inline const struct cmd_info * 673 find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode, 674 const struct intel_engine_cs *engine) 675 { 676 struct cmd_entry *e; 677 678 hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { 679 if (opcode == e->info->opcode && 680 e->info->rings & engine->mask) 681 return e->info; 682 } 683 return NULL; 684 } 685 686 static inline const struct cmd_info * 687 get_cmd_info(struct intel_gvt *gvt, u32 cmd, 688 const struct intel_engine_cs *engine) 689 { 690 u32 opcode; 691 692 opcode = get_opcode(cmd, engine); 693 if (opcode == INVALID_OP) 694 return NULL; 695 696 return find_cmd_entry(gvt, opcode, engine); 697 } 698 699 static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low) 700 { 701 return (cmd >> low) & ((1U << (hi - low + 1)) - 1); 702 } 703 704 static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine) 705 { 706 const struct decode_info *d_info; 707 int i; 708 709 d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)]; 710 if (d_info == NULL) 711 return; 712 713 gvt_dbg_cmd("opcode=0x%x %s sub_ops:", 714 cmd >> (32 - d_info->op_len), d_info->name); 715 716 for (i = 0; i < d_info->nr_sub_op; i++) 717 pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi, 718 d_info->sub_op[i].low)); 719 720 pr_err("\n"); 721 } 722 723 static inline u32 *cmd_ptr(struct parser_exec_state *s, int index) 724 { 725 return s->ip_va + (index << 2); 726 } 727 728 static inline u32 cmd_val(struct parser_exec_state *s, int index) 729 { 730 return *cmd_ptr(s, index); 731 } 732 733 static inline bool is_init_ctx(struct parser_exec_state *s) 734 { 735 return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx); 736 } 737 738 static void parser_exec_state_dump(struct parser_exec_state *s) 739 { 740 int cnt = 0; 741 int i; 742 743 gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)" 744 " ring_head(%08lx) ring_tail(%08lx)\n", 745 s->vgpu->id, s->engine->name, 746 s->ring_start, s->ring_start + s->ring_size, 747 s->ring_head, s->ring_tail); 748 749 gvt_dbg_cmd(" %s %s ip_gma(%08lx) ", 750 s->buf_type == RING_BUFFER_INSTRUCTION ? 751 "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ? 752 "CTX_BUFFER" : "BATCH_BUFFER"), 753 s->buf_addr_type == GTT_BUFFER ? 754 "GTT" : "PPGTT", s->ip_gma); 755 756 if (s->ip_va == NULL) { 757 gvt_dbg_cmd(" ip_va(NULL)"); 758 return; 759 } 760 761 gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n", 762 s->ip_va, cmd_val(s, 0), cmd_val(s, 1), 763 cmd_val(s, 2), cmd_val(s, 3)); 764 765 print_opcode(cmd_val(s, 0), s->engine); 766 767 s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); 768 769 while (cnt < 1024) { 770 gvt_dbg_cmd("ip_va=%p: ", s->ip_va); 771 for (i = 0; i < 8; i++) 772 gvt_dbg_cmd("%08x ", cmd_val(s, i)); 773 gvt_dbg_cmd("\n"); 774 775 s->ip_va += 8 * sizeof(u32); 776 cnt += 8; 777 } 778 } 779 780 static inline void update_ip_va(struct parser_exec_state *s) 781 { 782 unsigned long len = 0; 783 784 if (WARN_ON(s->ring_head == s->ring_tail)) 785 return; 786 787 if (s->buf_type == RING_BUFFER_INSTRUCTION || 788 s->buf_type == RING_BUFFER_CTX) { 789 unsigned long ring_top = s->ring_start + s->ring_size; 790 791 if (s->ring_head > s->ring_tail) { 792 if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top) 793 len = (s->ip_gma - s->ring_head); 794 else if (s->ip_gma >= s->ring_start && 795 s->ip_gma <= s->ring_tail) 796 len = (ring_top - s->ring_head) + 797 (s->ip_gma - s->ring_start); 798 } else 799 len = (s->ip_gma - s->ring_head); 800 801 s->ip_va = s->rb_va + len; 802 } else {/* shadow batch buffer */ 803 s->ip_va = s->ret_bb_va; 804 } 805 } 806 807 static inline int ip_gma_set(struct parser_exec_state *s, 808 unsigned long ip_gma) 809 { 810 WARN_ON(!IS_ALIGNED(ip_gma, 4)); 811 812 s->ip_gma = ip_gma; 813 update_ip_va(s); 814 return 0; 815 } 816 817 static inline int ip_gma_advance(struct parser_exec_state *s, 818 unsigned int dw_len) 819 { 820 s->ip_gma += (dw_len << 2); 821 822 if (s->buf_type == RING_BUFFER_INSTRUCTION) { 823 if (s->ip_gma >= s->ring_start + s->ring_size) 824 s->ip_gma -= s->ring_size; 825 update_ip_va(s); 826 } else { 827 s->ip_va += (dw_len << 2); 828 } 829 830 return 0; 831 } 832 833 static inline int get_cmd_length(const struct cmd_info *info, u32 cmd) 834 { 835 if ((info->flag & F_LEN_MASK) == F_LEN_CONST) 836 return info->len; 837 else 838 return (cmd & ((1U << info->len) - 1)) + 2; 839 return 0; 840 } 841 842 static inline int cmd_length(struct parser_exec_state *s) 843 { 844 return get_cmd_length(s->info, cmd_val(s, 0)); 845 } 846 847 /* do not remove this, some platform may need clflush here */ 848 #define patch_value(s, addr, val) do { \ 849 *addr = val; \ 850 } while (0) 851 852 static inline bool is_mocs_mmio(unsigned int offset) 853 { 854 return ((offset >= 0xc800) && (offset <= 0xcff8)) || 855 ((offset >= 0xb020) && (offset <= 0xb0a0)); 856 } 857 858 static int is_cmd_update_pdps(unsigned int offset, 859 struct parser_exec_state *s) 860 { 861 u32 base = s->workload->engine->mmio_base; 862 return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0)); 863 } 864 865 static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s, 866 unsigned int offset, unsigned int index) 867 { 868 struct intel_vgpu *vgpu = s->vgpu; 869 struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm; 870 struct intel_vgpu_mm *mm; 871 u64 pdps[GEN8_3LVL_PDPES]; 872 873 if (shadow_mm->ppgtt_mm.root_entry_type == 874 GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 875 pdps[0] = (u64)cmd_val(s, 2) << 32; 876 pdps[0] |= cmd_val(s, 4); 877 878 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 879 if (!mm) { 880 gvt_vgpu_err("failed to get the 4-level shadow vm\n"); 881 return -EINVAL; 882 } 883 intel_vgpu_mm_get(mm); 884 list_add_tail(&mm->ppgtt_mm.link, 885 &s->workload->lri_shadow_mm); 886 *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]); 887 *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]); 888 } else { 889 /* Currently all guests use PML4 table and now can't 890 * have a guest with 3-level table but uses LRI for 891 * PPGTT update. So this is simply un-testable. */ 892 GEM_BUG_ON(1); 893 gvt_vgpu_err("invalid shared shadow vm type\n"); 894 return -EINVAL; 895 } 896 return 0; 897 } 898 899 static int cmd_reg_handler(struct parser_exec_state *s, 900 unsigned int offset, unsigned int index, char *cmd) 901 { 902 struct intel_vgpu *vgpu = s->vgpu; 903 struct intel_gvt *gvt = vgpu->gvt; 904 u32 ctx_sr_ctl; 905 u32 *vreg, vreg_old; 906 907 if (offset + 4 > gvt->device_info.mmio_size) { 908 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", 909 cmd, offset); 910 return -EFAULT; 911 } 912 913 if (is_init_ctx(s)) { 914 struct intel_gvt_mmio_info *mmio_info; 915 916 intel_gvt_mmio_set_cmd_accessible(gvt, offset); 917 mmio_info = intel_gvt_find_mmio_info(gvt, offset); 918 if (mmio_info && mmio_info->write) 919 intel_gvt_mmio_set_cmd_write_patch(gvt, offset); 920 return 0; 921 } 922 923 if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) { 924 gvt_vgpu_err("%s access to non-render register (%x)\n", 925 cmd, offset); 926 return -EBADRQC; 927 } 928 929 if (!strncmp(cmd, "srm", 3) || 930 !strncmp(cmd, "lrm", 3)) { 931 if (offset == i915_mmio_reg_offset(GEN8_L3SQCREG4) || 932 offset == 0x21f0 || 933 (IS_BROADWELL(gvt->gt->i915) && 934 offset == i915_mmio_reg_offset(INSTPM))) 935 return 0; 936 else { 937 gvt_vgpu_err("%s access to register (%x)\n", 938 cmd, offset); 939 return -EPERM; 940 } 941 } 942 943 if (!strncmp(cmd, "lrr-src", 7) || 944 !strncmp(cmd, "lrr-dst", 7)) { 945 if (IS_BROADWELL(gvt->gt->i915) && offset == 0x215c) 946 return 0; 947 else { 948 gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd, offset); 949 return -EPERM; 950 } 951 } 952 953 if (!strncmp(cmd, "pipe_ctrl", 9)) { 954 /* TODO: add LRI POST logic here */ 955 return 0; 956 } 957 958 if (strncmp(cmd, "lri", 3)) 959 return -EPERM; 960 961 /* below are all lri handlers */ 962 vreg = &vgpu_vreg(s->vgpu, offset); 963 964 if (is_cmd_update_pdps(offset, s) && 965 cmd_pdp_mmio_update_handler(s, offset, index)) 966 return -EINVAL; 967 968 if (offset == i915_mmio_reg_offset(DERRMR) || 969 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { 970 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ 971 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); 972 } 973 974 if (is_mocs_mmio(offset)) 975 *vreg = cmd_val(s, index + 1); 976 977 vreg_old = *vreg; 978 979 if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) { 980 u32 cmdval_new, cmdval; 981 struct intel_gvt_mmio_info *mmio_info; 982 983 cmdval = cmd_val(s, index + 1); 984 985 mmio_info = intel_gvt_find_mmio_info(gvt, offset); 986 if (!mmio_info) { 987 cmdval_new = cmdval; 988 } else { 989 u64 ro_mask = mmio_info->ro_mask; 990 int ret; 991 992 if (likely(!ro_mask)) 993 ret = mmio_info->write(s->vgpu, offset, 994 &cmdval, 4); 995 else { 996 gvt_vgpu_err("try to write RO reg %x\n", 997 offset); 998 ret = -EBADRQC; 999 } 1000 if (ret) 1001 return ret; 1002 cmdval_new = *vreg; 1003 } 1004 if (cmdval_new != cmdval) 1005 patch_value(s, cmd_ptr(s, index+1), cmdval_new); 1006 } 1007 1008 /* only patch cmd. restore vreg value if changed in mmio write handler*/ 1009 *vreg = vreg_old; 1010 1011 /* TODO 1012 * In order to let workload with inhibit context to generate 1013 * correct image data into memory, vregs values will be loaded to 1014 * hw via LRIs in the workload with inhibit context. But as 1015 * indirect context is loaded prior to LRIs in workload, we don't 1016 * want reg values specified in indirect context overwritten by 1017 * LRIs in workloads. So, when scanning an indirect context, we 1018 * update reg values in it into vregs, so LRIs in workload with 1019 * inhibit context will restore with correct values 1020 */ 1021 if (GRAPHICS_VER(s->engine->i915) == 9 && 1022 intel_gvt_mmio_is_sr_in_ctx(gvt, offset) && 1023 !strncmp(cmd, "lri", 3)) { 1024 intel_gvt_read_gpa(s->vgpu, 1025 s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); 1026 /* check inhibit context */ 1027 if (ctx_sr_ctl & 1) { 1028 u32 data = cmd_val(s, index + 1); 1029 1030 if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset)) 1031 intel_vgpu_mask_mmio_write(vgpu, 1032 offset, &data, 4); 1033 else 1034 vgpu_vreg(vgpu, offset) = data; 1035 } 1036 } 1037 1038 return 0; 1039 } 1040 1041 #define cmd_reg(s, i) \ 1042 (cmd_val(s, i) & GENMASK(22, 2)) 1043 1044 #define cmd_reg_inhibit(s, i) \ 1045 (cmd_val(s, i) & GENMASK(22, 18)) 1046 1047 #define cmd_gma(s, i) \ 1048 (cmd_val(s, i) & GENMASK(31, 2)) 1049 1050 #define cmd_gma_hi(s, i) \ 1051 (cmd_val(s, i) & GENMASK(15, 0)) 1052 1053 static int cmd_handler_lri(struct parser_exec_state *s) 1054 { 1055 int i, ret = 0; 1056 int cmd_len = cmd_length(s); 1057 1058 for (i = 1; i < cmd_len; i += 2) { 1059 if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) { 1060 if (s->engine->id == BCS0 && 1061 cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) 1062 ret |= 0; 1063 else 1064 ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0; 1065 } 1066 if (ret) 1067 break; 1068 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); 1069 if (ret) 1070 break; 1071 } 1072 return ret; 1073 } 1074 1075 static int cmd_handler_lrr(struct parser_exec_state *s) 1076 { 1077 int i, ret = 0; 1078 int cmd_len = cmd_length(s); 1079 1080 for (i = 1; i < cmd_len; i += 2) { 1081 if (IS_BROADWELL(s->engine->i915)) 1082 ret |= ((cmd_reg_inhibit(s, i) || 1083 (cmd_reg_inhibit(s, i + 1)))) ? 1084 -EBADRQC : 0; 1085 if (ret) 1086 break; 1087 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src"); 1088 if (ret) 1089 break; 1090 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst"); 1091 if (ret) 1092 break; 1093 } 1094 return ret; 1095 } 1096 1097 static inline int cmd_address_audit(struct parser_exec_state *s, 1098 unsigned long guest_gma, int op_size, bool index_mode); 1099 1100 static int cmd_handler_lrm(struct parser_exec_state *s) 1101 { 1102 struct intel_gvt *gvt = s->vgpu->gvt; 1103 int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; 1104 unsigned long gma; 1105 int i, ret = 0; 1106 int cmd_len = cmd_length(s); 1107 1108 for (i = 1; i < cmd_len;) { 1109 if (IS_BROADWELL(s->engine->i915)) 1110 ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0; 1111 if (ret) 1112 break; 1113 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm"); 1114 if (ret) 1115 break; 1116 if (cmd_val(s, 0) & (1 << 22)) { 1117 gma = cmd_gma(s, i + 1); 1118 if (gmadr_bytes == 8) 1119 gma |= (cmd_gma_hi(s, i + 2)) << 32; 1120 ret |= cmd_address_audit(s, gma, sizeof(u32), false); 1121 if (ret) 1122 break; 1123 } 1124 i += gmadr_dw_number(s) + 1; 1125 } 1126 return ret; 1127 } 1128 1129 static int cmd_handler_srm(struct parser_exec_state *s) 1130 { 1131 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 1132 unsigned long gma; 1133 int i, ret = 0; 1134 int cmd_len = cmd_length(s); 1135 1136 for (i = 1; i < cmd_len;) { 1137 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm"); 1138 if (ret) 1139 break; 1140 if (cmd_val(s, 0) & (1 << 22)) { 1141 gma = cmd_gma(s, i + 1); 1142 if (gmadr_bytes == 8) 1143 gma |= (cmd_gma_hi(s, i + 2)) << 32; 1144 ret |= cmd_address_audit(s, gma, sizeof(u32), false); 1145 if (ret) 1146 break; 1147 } 1148 i += gmadr_dw_number(s) + 1; 1149 } 1150 return ret; 1151 } 1152 1153 struct cmd_interrupt_event { 1154 int pipe_control_notify; 1155 int mi_flush_dw; 1156 int mi_user_interrupt; 1157 }; 1158 1159 static const struct cmd_interrupt_event cmd_interrupt_events[] = { 1160 [RCS0] = { 1161 .pipe_control_notify = RCS_PIPE_CONTROL, 1162 .mi_flush_dw = INTEL_GVT_EVENT_RESERVED, 1163 .mi_user_interrupt = RCS_MI_USER_INTERRUPT, 1164 }, 1165 [BCS0] = { 1166 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, 1167 .mi_flush_dw = BCS_MI_FLUSH_DW, 1168 .mi_user_interrupt = BCS_MI_USER_INTERRUPT, 1169 }, 1170 [VCS0] = { 1171 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, 1172 .mi_flush_dw = VCS_MI_FLUSH_DW, 1173 .mi_user_interrupt = VCS_MI_USER_INTERRUPT, 1174 }, 1175 [VCS1] = { 1176 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, 1177 .mi_flush_dw = VCS2_MI_FLUSH_DW, 1178 .mi_user_interrupt = VCS2_MI_USER_INTERRUPT, 1179 }, 1180 [VECS0] = { 1181 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, 1182 .mi_flush_dw = VECS_MI_FLUSH_DW, 1183 .mi_user_interrupt = VECS_MI_USER_INTERRUPT, 1184 }, 1185 }; 1186 1187 static int cmd_handler_pipe_control(struct parser_exec_state *s) 1188 { 1189 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 1190 unsigned long gma; 1191 bool index_mode = false; 1192 unsigned int post_sync; 1193 int ret = 0; 1194 u32 hws_pga, val; 1195 1196 post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14; 1197 1198 /* LRI post sync */ 1199 if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE) 1200 ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl"); 1201 /* post sync */ 1202 else if (post_sync) { 1203 if (post_sync == 2) 1204 ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl"); 1205 else if (post_sync == 3) 1206 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); 1207 else if (post_sync == 1) { 1208 /* check ggtt*/ 1209 if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) { 1210 gma = cmd_val(s, 2) & GENMASK(31, 3); 1211 if (gmadr_bytes == 8) 1212 gma |= (cmd_gma_hi(s, 3)) << 32; 1213 /* Store Data Index */ 1214 if (cmd_val(s, 1) & (1 << 21)) 1215 index_mode = true; 1216 ret |= cmd_address_audit(s, gma, sizeof(u64), 1217 index_mode); 1218 if (ret) 1219 return ret; 1220 if (index_mode) { 1221 hws_pga = s->vgpu->hws_pga[s->engine->id]; 1222 gma = hws_pga + gma; 1223 patch_value(s, cmd_ptr(s, 2), gma); 1224 val = cmd_val(s, 1) & (~(1 << 21)); 1225 patch_value(s, cmd_ptr(s, 1), val); 1226 } 1227 } 1228 } 1229 } 1230 1231 if (ret) 1232 return ret; 1233 1234 if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY) 1235 set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify, 1236 s->workload->pending_events); 1237 return 0; 1238 } 1239 1240 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) 1241 { 1242 set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt, 1243 s->workload->pending_events); 1244 patch_value(s, cmd_ptr(s, 0), MI_NOOP); 1245 return 0; 1246 } 1247 1248 static int cmd_advance_default(struct parser_exec_state *s) 1249 { 1250 return ip_gma_advance(s, cmd_length(s)); 1251 } 1252 1253 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s) 1254 { 1255 int ret; 1256 1257 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { 1258 s->buf_type = BATCH_BUFFER_INSTRUCTION; 1259 ret = ip_gma_set(s, s->ret_ip_gma_bb); 1260 s->buf_addr_type = s->saved_buf_addr_type; 1261 } else if (s->buf_type == RING_BUFFER_CTX) { 1262 ret = ip_gma_set(s, s->ring_tail); 1263 } else { 1264 s->buf_type = RING_BUFFER_INSTRUCTION; 1265 s->buf_addr_type = GTT_BUFFER; 1266 if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size) 1267 s->ret_ip_gma_ring -= s->ring_size; 1268 ret = ip_gma_set(s, s->ret_ip_gma_ring); 1269 } 1270 return ret; 1271 } 1272 1273 struct mi_display_flip_command_info { 1274 int pipe; 1275 int plane; 1276 int event; 1277 i915_reg_t stride_reg; 1278 i915_reg_t ctrl_reg; 1279 i915_reg_t surf_reg; 1280 u64 stride_val; 1281 u64 tile_val; 1282 u64 surf_val; 1283 bool async_flip; 1284 }; 1285 1286 struct plane_code_mapping { 1287 int pipe; 1288 int plane; 1289 int event; 1290 }; 1291 1292 static int gen8_decode_mi_display_flip(struct parser_exec_state *s, 1293 struct mi_display_flip_command_info *info) 1294 { 1295 struct drm_i915_private *dev_priv = s->engine->i915; 1296 struct intel_display *display = dev_priv->display; 1297 struct plane_code_mapping gen8_plane_code[] = { 1298 [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE}, 1299 [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE}, 1300 [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE}, 1301 [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE}, 1302 [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE}, 1303 [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE}, 1304 }; 1305 u32 dword0, dword1, dword2; 1306 u32 v; 1307 1308 dword0 = cmd_val(s, 0); 1309 dword1 = cmd_val(s, 1); 1310 dword2 = cmd_val(s, 2); 1311 1312 v = (dword0 & GENMASK(21, 19)) >> 19; 1313 if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code))) 1314 return -EBADRQC; 1315 1316 info->pipe = gen8_plane_code[v].pipe; 1317 info->plane = gen8_plane_code[v].plane; 1318 info->event = gen8_plane_code[v].event; 1319 info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; 1320 info->tile_val = (dword1 & 0x1); 1321 info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; 1322 info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1); 1323 1324 if (info->plane == PLANE_A) { 1325 info->ctrl_reg = DSPCNTR(display, info->pipe); 1326 info->stride_reg = DSPSTRIDE(display, info->pipe); 1327 info->surf_reg = DSPSURF(display, info->pipe); 1328 } else if (info->plane == PLANE_B) { 1329 info->ctrl_reg = SPRCTL(info->pipe); 1330 info->stride_reg = SPRSTRIDE(info->pipe); 1331 info->surf_reg = SPRSURF(info->pipe); 1332 } else { 1333 drm_WARN_ON(&dev_priv->drm, 1); 1334 return -EBADRQC; 1335 } 1336 return 0; 1337 } 1338 1339 static int skl_decode_mi_display_flip(struct parser_exec_state *s, 1340 struct mi_display_flip_command_info *info) 1341 { 1342 struct drm_i915_private *dev_priv = s->engine->i915; 1343 struct intel_display *display = dev_priv->display; 1344 struct intel_vgpu *vgpu = s->vgpu; 1345 u32 dword0 = cmd_val(s, 0); 1346 u32 dword1 = cmd_val(s, 1); 1347 u32 dword2 = cmd_val(s, 2); 1348 u32 plane = (dword0 & GENMASK(12, 8)) >> 8; 1349 1350 info->plane = PRIMARY_PLANE; 1351 1352 switch (plane) { 1353 case MI_DISPLAY_FLIP_SKL_PLANE_1_A: 1354 info->pipe = PIPE_A; 1355 info->event = PRIMARY_A_FLIP_DONE; 1356 break; 1357 case MI_DISPLAY_FLIP_SKL_PLANE_1_B: 1358 info->pipe = PIPE_B; 1359 info->event = PRIMARY_B_FLIP_DONE; 1360 break; 1361 case MI_DISPLAY_FLIP_SKL_PLANE_1_C: 1362 info->pipe = PIPE_C; 1363 info->event = PRIMARY_C_FLIP_DONE; 1364 break; 1365 1366 case MI_DISPLAY_FLIP_SKL_PLANE_2_A: 1367 info->pipe = PIPE_A; 1368 info->event = SPRITE_A_FLIP_DONE; 1369 info->plane = SPRITE_PLANE; 1370 break; 1371 case MI_DISPLAY_FLIP_SKL_PLANE_2_B: 1372 info->pipe = PIPE_B; 1373 info->event = SPRITE_B_FLIP_DONE; 1374 info->plane = SPRITE_PLANE; 1375 break; 1376 case MI_DISPLAY_FLIP_SKL_PLANE_2_C: 1377 info->pipe = PIPE_C; 1378 info->event = SPRITE_C_FLIP_DONE; 1379 info->plane = SPRITE_PLANE; 1380 break; 1381 1382 default: 1383 gvt_vgpu_err("unknown plane code %d\n", plane); 1384 return -EBADRQC; 1385 } 1386 1387 info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; 1388 info->tile_val = (dword1 & GENMASK(2, 0)); 1389 info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; 1390 info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1); 1391 1392 info->ctrl_reg = DSPCNTR(display, info->pipe); 1393 info->stride_reg = DSPSTRIDE(display, info->pipe); 1394 info->surf_reg = DSPSURF(display, info->pipe); 1395 1396 return 0; 1397 } 1398 1399 static int gen8_check_mi_display_flip(struct parser_exec_state *s, 1400 struct mi_display_flip_command_info *info) 1401 { 1402 u32 stride, tile; 1403 1404 if (!info->async_flip) 1405 return 0; 1406 1407 if (GRAPHICS_VER(s->engine->i915) >= 9) { 1408 stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); 1409 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & 1410 GENMASK(12, 10)) >> 10; 1411 } else { 1412 stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) & 1413 GENMASK(15, 6)) >> 6; 1414 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; 1415 } 1416 1417 if (stride != info->stride_val) 1418 gvt_dbg_cmd("cannot change stride during async flip\n"); 1419 1420 if (tile != info->tile_val) 1421 gvt_dbg_cmd("cannot change tile during async flip\n"); 1422 1423 return 0; 1424 } 1425 1426 static int gen8_update_plane_mmio_from_mi_display_flip( 1427 struct parser_exec_state *s, 1428 struct mi_display_flip_command_info *info) 1429 { 1430 struct drm_i915_private *dev_priv = s->engine->i915; 1431 struct intel_display *display = dev_priv->display; 1432 struct intel_vgpu *vgpu = s->vgpu; 1433 1434 set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), 1435 info->surf_val << 12); 1436 if (GRAPHICS_VER(dev_priv) >= 9) { 1437 set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0), 1438 info->stride_val); 1439 set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10), 1440 info->tile_val << 10); 1441 } else { 1442 set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6), 1443 info->stride_val << 6); 1444 set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10), 1445 info->tile_val << 10); 1446 } 1447 1448 if (info->plane == PLANE_PRIMARY) 1449 vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, info->pipe))++; 1450 1451 if (info->async_flip) 1452 intel_vgpu_trigger_virtual_event(vgpu, info->event); 1453 else 1454 set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]); 1455 1456 return 0; 1457 } 1458 1459 static int decode_mi_display_flip(struct parser_exec_state *s, 1460 struct mi_display_flip_command_info *info) 1461 { 1462 if (IS_BROADWELL(s->engine->i915)) 1463 return gen8_decode_mi_display_flip(s, info); 1464 if (GRAPHICS_VER(s->engine->i915) >= 9) 1465 return skl_decode_mi_display_flip(s, info); 1466 1467 return -ENODEV; 1468 } 1469 1470 static int check_mi_display_flip(struct parser_exec_state *s, 1471 struct mi_display_flip_command_info *info) 1472 { 1473 return gen8_check_mi_display_flip(s, info); 1474 } 1475 1476 static int update_plane_mmio_from_mi_display_flip( 1477 struct parser_exec_state *s, 1478 struct mi_display_flip_command_info *info) 1479 { 1480 return gen8_update_plane_mmio_from_mi_display_flip(s, info); 1481 } 1482 1483 static int cmd_handler_mi_display_flip(struct parser_exec_state *s) 1484 { 1485 struct mi_display_flip_command_info info; 1486 struct intel_vgpu *vgpu = s->vgpu; 1487 int ret; 1488 int i; 1489 int len = cmd_length(s); 1490 u32 valid_len = CMD_LEN(1); 1491 1492 /* Flip Type == Stereo 3D Flip */ 1493 if (DWORD_FIELD(2, 1, 0) == 2) 1494 valid_len++; 1495 ret = gvt_check_valid_cmd_length(cmd_length(s), 1496 valid_len); 1497 if (ret) 1498 return ret; 1499 1500 ret = decode_mi_display_flip(s, &info); 1501 if (ret) { 1502 gvt_vgpu_err("fail to decode MI display flip command\n"); 1503 return ret; 1504 } 1505 1506 ret = check_mi_display_flip(s, &info); 1507 if (ret) { 1508 gvt_vgpu_err("invalid MI display flip command\n"); 1509 return ret; 1510 } 1511 1512 ret = update_plane_mmio_from_mi_display_flip(s, &info); 1513 if (ret) { 1514 gvt_vgpu_err("fail to update plane mmio\n"); 1515 return ret; 1516 } 1517 1518 for (i = 0; i < len; i++) 1519 patch_value(s, cmd_ptr(s, i), MI_NOOP); 1520 return 0; 1521 } 1522 1523 static bool is_wait_for_flip_pending(u32 cmd) 1524 { 1525 return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING | 1526 MI_WAIT_FOR_PLANE_B_FLIP_PENDING | 1527 MI_WAIT_FOR_PLANE_C_FLIP_PENDING | 1528 MI_WAIT_FOR_SPRITE_A_FLIP_PENDING | 1529 MI_WAIT_FOR_SPRITE_B_FLIP_PENDING | 1530 MI_WAIT_FOR_SPRITE_C_FLIP_PENDING); 1531 } 1532 1533 static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s) 1534 { 1535 u32 cmd = cmd_val(s, 0); 1536 1537 if (!is_wait_for_flip_pending(cmd)) 1538 return 0; 1539 1540 patch_value(s, cmd_ptr(s, 0), MI_NOOP); 1541 return 0; 1542 } 1543 1544 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index) 1545 { 1546 unsigned long addr; 1547 unsigned long gma_high, gma_low; 1548 struct intel_vgpu *vgpu = s->vgpu; 1549 int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd; 1550 1551 if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) { 1552 gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes); 1553 return INTEL_GVT_INVALID_ADDR; 1554 } 1555 1556 gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK; 1557 if (gmadr_bytes == 4) { 1558 addr = gma_low; 1559 } else { 1560 gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK; 1561 addr = (((unsigned long)gma_high) << 32) | gma_low; 1562 } 1563 return addr; 1564 } 1565 1566 static inline int cmd_address_audit(struct parser_exec_state *s, 1567 unsigned long guest_gma, int op_size, bool index_mode) 1568 { 1569 struct intel_vgpu *vgpu = s->vgpu; 1570 u32 max_surface_size = vgpu->gvt->device_info.max_surface_size; 1571 int i; 1572 int ret; 1573 1574 if (op_size > max_surface_size) { 1575 gvt_vgpu_err("command address audit fail name %s\n", 1576 s->info->name); 1577 return -EFAULT; 1578 } 1579 1580 if (index_mode) { 1581 if (guest_gma >= I915_GTT_PAGE_SIZE) { 1582 ret = -EFAULT; 1583 goto err; 1584 } 1585 } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) { 1586 ret = -EFAULT; 1587 goto err; 1588 } 1589 1590 return 0; 1591 1592 err: 1593 gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", 1594 s->info->name, guest_gma, op_size); 1595 1596 pr_err("cmd dump: "); 1597 for (i = 0; i < cmd_length(s); i++) { 1598 if (!(i % 4)) 1599 pr_err("\n%08x ", cmd_val(s, i)); 1600 else 1601 pr_err("%08x ", cmd_val(s, i)); 1602 } 1603 pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n", 1604 vgpu->id, 1605 vgpu_aperture_gmadr_base(vgpu), 1606 vgpu_aperture_gmadr_end(vgpu), 1607 vgpu_hidden_gmadr_base(vgpu), 1608 vgpu_hidden_gmadr_end(vgpu)); 1609 return ret; 1610 } 1611 1612 static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) 1613 { 1614 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 1615 int op_size = (cmd_length(s) - 3) * sizeof(u32); 1616 int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0; 1617 unsigned long gma, gma_low, gma_high; 1618 u32 valid_len = CMD_LEN(2); 1619 int ret = 0; 1620 1621 /* check ppggt */ 1622 if (!(cmd_val(s, 0) & (1 << 22))) 1623 return 0; 1624 1625 /* check if QWORD */ 1626 if (DWORD_FIELD(0, 21, 21)) 1627 valid_len++; 1628 ret = gvt_check_valid_cmd_length(cmd_length(s), 1629 valid_len); 1630 if (ret) 1631 return ret; 1632 1633 gma = cmd_val(s, 2) & GENMASK(31, 2); 1634 1635 if (gmadr_bytes == 8) { 1636 gma_low = cmd_val(s, 1) & GENMASK(31, 2); 1637 gma_high = cmd_val(s, 2) & GENMASK(15, 0); 1638 gma = (gma_high << 32) | gma_low; 1639 core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0; 1640 } 1641 ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false); 1642 return ret; 1643 } 1644 1645 static inline int unexpected_cmd(struct parser_exec_state *s) 1646 { 1647 struct intel_vgpu *vgpu = s->vgpu; 1648 1649 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); 1650 1651 return -EBADRQC; 1652 } 1653 1654 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s) 1655 { 1656 return unexpected_cmd(s); 1657 } 1658 1659 static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s) 1660 { 1661 return unexpected_cmd(s); 1662 } 1663 1664 static int cmd_handler_mi_op_2e(struct parser_exec_state *s) 1665 { 1666 return unexpected_cmd(s); 1667 } 1668 1669 static int cmd_handler_mi_op_2f(struct parser_exec_state *s) 1670 { 1671 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 1672 int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) * 1673 sizeof(u32); 1674 unsigned long gma, gma_high; 1675 u32 valid_len = CMD_LEN(1); 1676 int ret = 0; 1677 1678 if (!(cmd_val(s, 0) & (1 << 22))) 1679 return ret; 1680 1681 /* check inline data */ 1682 if (cmd_val(s, 0) & BIT(18)) 1683 valid_len = CMD_LEN(9); 1684 ret = gvt_check_valid_cmd_length(cmd_length(s), 1685 valid_len); 1686 if (ret) 1687 return ret; 1688 1689 gma = cmd_val(s, 1) & GENMASK(31, 2); 1690 if (gmadr_bytes == 8) { 1691 gma_high = cmd_val(s, 2) & GENMASK(15, 0); 1692 gma = (gma_high << 32) | gma; 1693 } 1694 ret = cmd_address_audit(s, gma, op_size, false); 1695 return ret; 1696 } 1697 1698 static int cmd_handler_mi_store_data_index(struct parser_exec_state *s) 1699 { 1700 return unexpected_cmd(s); 1701 } 1702 1703 static int cmd_handler_mi_clflush(struct parser_exec_state *s) 1704 { 1705 return unexpected_cmd(s); 1706 } 1707 1708 static int cmd_handler_mi_conditional_batch_buffer_end( 1709 struct parser_exec_state *s) 1710 { 1711 return unexpected_cmd(s); 1712 } 1713 1714 static int cmd_handler_mi_update_gtt(struct parser_exec_state *s) 1715 { 1716 return unexpected_cmd(s); 1717 } 1718 1719 static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) 1720 { 1721 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 1722 unsigned long gma; 1723 bool index_mode = false; 1724 int ret = 0; 1725 u32 hws_pga, val; 1726 u32 valid_len = CMD_LEN(2); 1727 1728 ret = gvt_check_valid_cmd_length(cmd_length(s), 1729 valid_len); 1730 if (ret) { 1731 /* Check again for Qword */ 1732 ret = gvt_check_valid_cmd_length(cmd_length(s), 1733 ++valid_len); 1734 return ret; 1735 } 1736 1737 /* Check post-sync and ppgtt bit */ 1738 if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) { 1739 gma = cmd_val(s, 1) & GENMASK(31, 3); 1740 if (gmadr_bytes == 8) 1741 gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32; 1742 /* Store Data Index */ 1743 if (cmd_val(s, 0) & (1 << 21)) 1744 index_mode = true; 1745 ret = cmd_address_audit(s, gma, sizeof(u64), index_mode); 1746 if (ret) 1747 return ret; 1748 if (index_mode) { 1749 hws_pga = s->vgpu->hws_pga[s->engine->id]; 1750 gma = hws_pga + gma; 1751 patch_value(s, cmd_ptr(s, 1), gma); 1752 val = cmd_val(s, 0) & (~(1 << 21)); 1753 patch_value(s, cmd_ptr(s, 0), val); 1754 } 1755 } 1756 /* Check notify bit */ 1757 if ((cmd_val(s, 0) & (1 << 8))) 1758 set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw, 1759 s->workload->pending_events); 1760 return ret; 1761 } 1762 1763 static void addr_type_update_snb(struct parser_exec_state *s) 1764 { 1765 if ((s->buf_type == RING_BUFFER_INSTRUCTION) && 1766 (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) { 1767 s->buf_addr_type = PPGTT_BUFFER; 1768 } 1769 } 1770 1771 1772 static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, 1773 unsigned long gma, unsigned long end_gma, void *va) 1774 { 1775 unsigned long copy_len, offset; 1776 unsigned long len = 0; 1777 unsigned long gpa; 1778 1779 while (gma != end_gma) { 1780 gpa = intel_vgpu_gma_to_gpa(mm, gma); 1781 if (gpa == INTEL_GVT_INVALID_ADDR) { 1782 gvt_vgpu_err("invalid gma address: %lx\n", gma); 1783 return -EFAULT; 1784 } 1785 1786 offset = gma & (I915_GTT_PAGE_SIZE - 1); 1787 1788 copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ? 1789 I915_GTT_PAGE_SIZE - offset : end_gma - gma; 1790 1791 intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len); 1792 1793 len += copy_len; 1794 gma += copy_len; 1795 } 1796 return len; 1797 } 1798 1799 1800 /* 1801 * Check whether a batch buffer needs to be scanned. Currently 1802 * the only criteria is based on privilege. 1803 */ 1804 static int batch_buffer_needs_scan(struct parser_exec_state *s) 1805 { 1806 /* Decide privilege based on address space */ 1807 if (cmd_val(s, 0) & BIT(8) && 1808 !(s->vgpu->scan_nonprivbb & s->engine->mask)) 1809 return 0; 1810 1811 return 1; 1812 } 1813 1814 static const char *repr_addr_type(unsigned int type) 1815 { 1816 return type == PPGTT_BUFFER ? "ppgtt" : "ggtt"; 1817 } 1818 1819 static int find_bb_size(struct parser_exec_state *s, 1820 unsigned long *bb_size, 1821 unsigned long *bb_end_cmd_offset) 1822 { 1823 unsigned long gma = 0; 1824 const struct cmd_info *info; 1825 u32 cmd_len = 0; 1826 bool bb_end = false; 1827 struct intel_vgpu *vgpu = s->vgpu; 1828 u32 cmd; 1829 struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? 1830 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; 1831 1832 *bb_size = 0; 1833 *bb_end_cmd_offset = 0; 1834 1835 /* get the start gm address of the batch buffer */ 1836 gma = get_gma_bb_from_cmd(s, 1); 1837 if (gma == INTEL_GVT_INVALID_ADDR) 1838 return -EFAULT; 1839 1840 cmd = cmd_val(s, 0); 1841 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); 1842 if (info == NULL) { 1843 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", 1844 cmd, get_opcode(cmd, s->engine), 1845 repr_addr_type(s->buf_addr_type), 1846 s->engine->name, s->workload); 1847 return -EBADRQC; 1848 } 1849 do { 1850 if (copy_gma_to_hva(s->vgpu, mm, 1851 gma, gma + 4, &cmd) < 0) 1852 return -EFAULT; 1853 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); 1854 if (info == NULL) { 1855 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", 1856 cmd, get_opcode(cmd, s->engine), 1857 repr_addr_type(s->buf_addr_type), 1858 s->engine->name, s->workload); 1859 return -EBADRQC; 1860 } 1861 1862 if (info->opcode == OP_MI_BATCH_BUFFER_END) { 1863 bb_end = true; 1864 } else if (info->opcode == OP_MI_BATCH_BUFFER_START) { 1865 if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) 1866 /* chained batch buffer */ 1867 bb_end = true; 1868 } 1869 1870 if (bb_end) 1871 *bb_end_cmd_offset = *bb_size; 1872 1873 cmd_len = get_cmd_length(info, cmd) << 2; 1874 *bb_size += cmd_len; 1875 gma += cmd_len; 1876 } while (!bb_end); 1877 1878 return 0; 1879 } 1880 1881 static int audit_bb_end(struct parser_exec_state *s, void *va) 1882 { 1883 struct intel_vgpu *vgpu = s->vgpu; 1884 u32 cmd = *(u32 *)va; 1885 const struct cmd_info *info; 1886 1887 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); 1888 if (info == NULL) { 1889 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", 1890 cmd, get_opcode(cmd, s->engine), 1891 repr_addr_type(s->buf_addr_type), 1892 s->engine->name, s->workload); 1893 return -EBADRQC; 1894 } 1895 1896 if ((info->opcode == OP_MI_BATCH_BUFFER_END) || 1897 ((info->opcode == OP_MI_BATCH_BUFFER_START) && 1898 (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0))) 1899 return 0; 1900 1901 return -EBADRQC; 1902 } 1903 1904 static int perform_bb_shadow(struct parser_exec_state *s) 1905 { 1906 struct intel_vgpu *vgpu = s->vgpu; 1907 struct intel_vgpu_shadow_bb *bb; 1908 unsigned long gma = 0; 1909 unsigned long bb_size; 1910 unsigned long bb_end_cmd_offset; 1911 int ret = 0; 1912 struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? 1913 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; 1914 unsigned long start_offset = 0; 1915 1916 /* Get the start gm address of the batch buffer */ 1917 gma = get_gma_bb_from_cmd(s, 1); 1918 if (gma == INTEL_GVT_INVALID_ADDR) 1919 return -EFAULT; 1920 1921 ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset); 1922 if (ret) 1923 return ret; 1924 1925 bb = kzalloc_obj(*bb); 1926 if (!bb) 1927 return -ENOMEM; 1928 1929 bb->ppgtt = s->buf_addr_type != GTT_BUFFER; 1930 1931 /* 1932 * The start_offset stores the batch buffer's start gma's 1933 * offset relative to page boundary. So for non-privileged batch 1934 * buffer, the shadowed gem object holds exactly the same page 1935 * layout as original gem object. This is for the convenience of 1936 * replacing the whole non-privilged batch buffer page to this 1937 * shadowed one in PPGTT at the same gma address. (This replacing 1938 * action is not implemented yet now, but may be necessary in 1939 * future). 1940 * For prileged batch buffer, we just change start gma address to 1941 * that of shadowed page. 1942 */ 1943 if (bb->ppgtt) 1944 start_offset = gma & ~I915_GTT_PAGE_MASK; 1945 1946 bb->obj = i915_gem_object_create_shmem(s->engine->i915, 1947 round_up(bb_size + start_offset, 1948 PAGE_SIZE)); 1949 if (IS_ERR(bb->obj)) { 1950 ret = PTR_ERR(bb->obj); 1951 goto err_free_bb; 1952 } 1953 1954 bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB); 1955 if (IS_ERR(bb->va)) { 1956 ret = PTR_ERR(bb->va); 1957 goto err_free_obj; 1958 } 1959 1960 ret = copy_gma_to_hva(s->vgpu, mm, 1961 gma, gma + bb_size, 1962 bb->va + start_offset); 1963 if (ret < 0) { 1964 gvt_vgpu_err("fail to copy guest ring buffer\n"); 1965 ret = -EFAULT; 1966 goto err_unmap; 1967 } 1968 1969 ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset); 1970 if (ret) 1971 goto err_unmap; 1972 1973 i915_gem_object_unlock(bb->obj); 1974 INIT_LIST_HEAD(&bb->list); 1975 list_add(&bb->list, &s->workload->shadow_bb); 1976 1977 bb->bb_start_cmd_va = s->ip_va; 1978 1979 if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa)) 1980 bb->bb_offset = s->ip_va - s->rb_va; 1981 else 1982 bb->bb_offset = 0; 1983 1984 /* 1985 * ip_va saves the virtual address of the shadow batch buffer, while 1986 * ip_gma saves the graphics address of the original batch buffer. 1987 * As the shadow batch buffer is just a copy from the original one, 1988 * it should be right to use shadow batch buffer'va and original batch 1989 * buffer's gma in pair. After all, we don't want to pin the shadow 1990 * buffer here (too early). 1991 */ 1992 s->ip_va = bb->va + start_offset; 1993 s->ip_gma = gma; 1994 return 0; 1995 err_unmap: 1996 i915_gem_object_unpin_map(bb->obj); 1997 err_free_obj: 1998 i915_gem_object_put(bb->obj); 1999 err_free_bb: 2000 kfree(bb); 2001 return ret; 2002 } 2003 2004 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) 2005 { 2006 bool second_level; 2007 int ret = 0; 2008 struct intel_vgpu *vgpu = s->vgpu; 2009 2010 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { 2011 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); 2012 return -EFAULT; 2013 } 2014 2015 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; 2016 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { 2017 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n"); 2018 return -EFAULT; 2019 } 2020 2021 s->saved_buf_addr_type = s->buf_addr_type; 2022 addr_type_update_snb(s); 2023 if (s->buf_type == RING_BUFFER_INSTRUCTION) { 2024 s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32); 2025 s->buf_type = BATCH_BUFFER_INSTRUCTION; 2026 } else if (second_level) { 2027 s->buf_type = BATCH_BUFFER_2ND_LEVEL; 2028 s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32); 2029 s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32); 2030 } 2031 2032 if (batch_buffer_needs_scan(s)) { 2033 ret = perform_bb_shadow(s); 2034 if (ret < 0) 2035 gvt_vgpu_err("invalid shadow batch buffer\n"); 2036 } else { 2037 /* emulate a batch buffer end to do return right */ 2038 ret = cmd_handler_mi_batch_buffer_end(s); 2039 if (ret < 0) 2040 return ret; 2041 } 2042 return ret; 2043 } 2044 2045 static int mi_noop_index; 2046 2047 static const struct cmd_info cmd_info[] = { 2048 {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, 2049 2050 {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL, 2051 0, 1, NULL}, 2052 2053 {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL, 2054 0, 1, cmd_handler_mi_user_interrupt}, 2055 2056 {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS, 2057 D_ALL, 0, 1, cmd_handler_mi_wait_for_event}, 2058 2059 {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, 2060 2061 {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1, 2062 NULL}, 2063 2064 {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1, 2065 NULL}, 2066 2067 {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1, 2068 NULL}, 2069 2070 {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1, 2071 NULL}, 2072 2073 {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS, 2074 D_ALL, 0, 1, NULL}, 2075 2076 {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END, 2077 F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1, 2078 cmd_handler_mi_batch_buffer_end}, 2079 2080 {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 2081 0, 1, NULL}, 2082 2083 {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1, 2084 NULL}, 2085 2086 {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL, 2087 D_ALL, 0, 1, NULL}, 2088 2089 {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1, 2090 NULL}, 2091 2092 {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1, 2093 NULL}, 2094 2095 {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR, 2096 R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip}, 2097 2098 {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED, 2099 R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)}, 2100 2101 {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL}, 2102 2103 {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, 2104 D_ALL, 0, 8, NULL, CMD_LEN(0)}, 2105 2106 {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, 2107 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8, 2108 NULL, CMD_LEN(0)}, 2109 2110 {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, 2111 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2), 2112 8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)}, 2113 2114 {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS, 2115 ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm}, 2116 2117 {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL, 2118 0, 8, cmd_handler_mi_store_data_index}, 2119 2120 {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL, 2121 D_ALL, 0, 8, cmd_handler_lri}, 2122 2123 {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10, 2124 cmd_handler_mi_update_gtt}, 2125 2126 {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, 2127 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, 2128 cmd_handler_srm, CMD_LEN(2)}, 2129 2130 {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6, 2131 cmd_handler_mi_flush_dw}, 2132 2133 {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1), 2134 10, cmd_handler_mi_clflush}, 2135 2136 {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, 2137 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6, 2138 cmd_handler_mi_report_perf_count, CMD_LEN(2)}, 2139 2140 {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, 2141 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, 2142 cmd_handler_lrm, CMD_LEN(2)}, 2143 2144 {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, 2145 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8, 2146 cmd_handler_lrr, CMD_LEN(1)}, 2147 2148 {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, 2149 F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0, 2150 8, NULL, CMD_LEN(2)}, 2151 2152 {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED, 2153 R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)}, 2154 2155 {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL, 2156 ADDR_FIX_1(2), 8, NULL}, 2157 2158 {"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 2159 ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)}, 2160 2161 {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1), 2162 8, cmd_handler_mi_op_2f}, 2163 2164 {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START, 2165 F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8, 2166 cmd_handler_mi_batch_buffer_start}, 2167 2168 {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END, 2169 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, 2170 cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)}, 2171 2172 {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST, 2173 R_RCS | R_BCS, D_ALL, 0, 2, NULL}, 2174 2175 {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL, 2176 ADDR_FIX_2(4, 7), 8, NULL}, 2177 2178 {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL, 2179 0, 8, NULL}, 2180 2181 {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT, 2182 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL}, 2183 2184 {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL}, 2185 2186 {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL, 2187 0, 8, NULL}, 2188 2189 {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL, 2190 ADDR_FIX_1(3), 8, NULL}, 2191 2192 {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS, 2193 D_ALL, 0, 8, NULL}, 2194 2195 {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL, 2196 ADDR_FIX_1(4), 8, NULL}, 2197 2198 {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL, 2199 ADDR_FIX_2(4, 5), 8, NULL}, 2200 2201 {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL, 2202 ADDR_FIX_1(4), 8, NULL}, 2203 2204 {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL, 2205 ADDR_FIX_2(4, 7), 8, NULL}, 2206 2207 {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS, 2208 D_ALL, ADDR_FIX_2(4, 5), 8, NULL}, 2209 2210 {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL}, 2211 2212 {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS, 2213 D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL}, 2214 2215 {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR, 2216 R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL}, 2217 2218 {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT", 2219 OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT, 2220 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL}, 2221 2222 {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS, 2223 D_ALL, ADDR_FIX_1(4), 8, NULL}, 2224 2225 {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT, 2226 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL}, 2227 2228 {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS, 2229 D_ALL, ADDR_FIX_1(4), 8, NULL}, 2230 2231 {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS, 2232 D_ALL, ADDR_FIX_2(4, 7), 8, NULL}, 2233 2234 {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT, 2235 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL}, 2236 2237 {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT", 2238 OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT, 2239 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL}, 2240 2241 {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL, 2242 ADDR_FIX_2(4, 5), 8, NULL}, 2243 2244 {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE, 2245 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL}, 2246 2247 {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP", 2248 OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP, 2249 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2250 2251 {"3DSTATE_VIEWPORT_STATE_POINTERS_CC", 2252 OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC, 2253 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2254 2255 {"3DSTATE_BLEND_STATE_POINTERS", 2256 OP_3DSTATE_BLEND_STATE_POINTERS, 2257 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2258 2259 {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS", 2260 OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS, 2261 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2262 2263 {"3DSTATE_BINDING_TABLE_POINTERS_VS", 2264 OP_3DSTATE_BINDING_TABLE_POINTERS_VS, 2265 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2266 2267 {"3DSTATE_BINDING_TABLE_POINTERS_HS", 2268 OP_3DSTATE_BINDING_TABLE_POINTERS_HS, 2269 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2270 2271 {"3DSTATE_BINDING_TABLE_POINTERS_DS", 2272 OP_3DSTATE_BINDING_TABLE_POINTERS_DS, 2273 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2274 2275 {"3DSTATE_BINDING_TABLE_POINTERS_GS", 2276 OP_3DSTATE_BINDING_TABLE_POINTERS_GS, 2277 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2278 2279 {"3DSTATE_BINDING_TABLE_POINTERS_PS", 2280 OP_3DSTATE_BINDING_TABLE_POINTERS_PS, 2281 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2282 2283 {"3DSTATE_SAMPLER_STATE_POINTERS_VS", 2284 OP_3DSTATE_SAMPLER_STATE_POINTERS_VS, 2285 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2286 2287 {"3DSTATE_SAMPLER_STATE_POINTERS_HS", 2288 OP_3DSTATE_SAMPLER_STATE_POINTERS_HS, 2289 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2290 2291 {"3DSTATE_SAMPLER_STATE_POINTERS_DS", 2292 OP_3DSTATE_SAMPLER_STATE_POINTERS_DS, 2293 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2294 2295 {"3DSTATE_SAMPLER_STATE_POINTERS_GS", 2296 OP_3DSTATE_SAMPLER_STATE_POINTERS_GS, 2297 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2298 2299 {"3DSTATE_SAMPLER_STATE_POINTERS_PS", 2300 OP_3DSTATE_SAMPLER_STATE_POINTERS_PS, 2301 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2302 2303 {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL, 2304 0, 8, NULL}, 2305 2306 {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL, 2307 0, 8, NULL}, 2308 2309 {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL, 2310 0, 8, NULL}, 2311 2312 {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL, 2313 0, 8, NULL}, 2314 2315 {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS, 2316 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2317 2318 {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS, 2319 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2320 2321 {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS, 2322 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2323 2324 {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS, 2325 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2326 2327 {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS, 2328 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2329 2330 {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS, 2331 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL}, 2332 2333 {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS, 2334 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL}, 2335 2336 {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS, 2337 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2338 2339 {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS, 2340 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2341 2342 {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS, 2343 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2344 2345 {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS, 2346 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2347 2348 {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS, 2349 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2350 2351 {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS, 2352 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2353 2354 {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS, 2355 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2356 2357 {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS, 2358 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2359 2360 {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS, 2361 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, 2362 2363 {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS, 2364 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, 2365 2366 {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS, 2367 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, 2368 2369 {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS, 2370 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, 2371 2372 {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS, 2373 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL}, 2374 2375 {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS, 2376 D_BDW_PLUS, 0, 8, NULL}, 2377 2378 {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, 2379 NULL}, 2380 2381 {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS, 2382 D_BDW_PLUS, 0, 8, NULL}, 2383 2384 {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS, 2385 D_BDW_PLUS, 0, 8, NULL}, 2386 2387 {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 2388 8, NULL}, 2389 2390 {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR, 2391 R_RCS, D_BDW_PLUS, 0, 8, NULL}, 2392 2393 {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 2394 8, NULL}, 2395 2396 {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, 2397 NULL}, 2398 2399 {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, 2400 NULL}, 2401 2402 {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8, 2403 NULL}, 2404 2405 {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS, 2406 D_BDW_PLUS, 0, 8, NULL}, 2407 2408 {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR, 2409 R_RCS, D_ALL, 0, 8, NULL}, 2410 2411 {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS, 2412 D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL}, 2413 2414 {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST, 2415 R_RCS, D_ALL, 0, 1, NULL}, 2416 2417 {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2418 2419 {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR, 2420 R_RCS, D_ALL, 0, 8, NULL}, 2421 2422 {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS, 2423 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2424 2425 {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2426 2427 {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2428 2429 {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2430 2431 {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS, 2432 D_BDW_PLUS, 0, 8, NULL}, 2433 2434 {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS, 2435 D_BDW_PLUS, 0, 8, NULL}, 2436 2437 {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS, 2438 D_ALL, 0, 8, NULL}, 2439 2440 {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS, 2441 D_BDW_PLUS, 0, 8, NULL}, 2442 2443 {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS, 2444 D_BDW_PLUS, 0, 8, NULL}, 2445 2446 {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2447 2448 {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2449 2450 {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2451 2452 {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS, 2453 D_ALL, 0, 8, NULL}, 2454 2455 {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2456 2457 {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2458 2459 {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR, 2460 R_RCS, D_ALL, 0, 8, NULL}, 2461 2462 {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0, 2463 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2464 2465 {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL, 2466 0, 8, NULL}, 2467 2468 {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS, 2469 D_ALL, ADDR_FIX_1(2), 8, NULL}, 2470 2471 {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET, 2472 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2473 2474 {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN, 2475 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2476 2477 {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS, 2478 D_ALL, 0, 8, NULL}, 2479 2480 {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS, 2481 D_ALL, 0, 8, NULL}, 2482 2483 {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS, 2484 D_ALL, 0, 8, NULL}, 2485 2486 {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1, 2487 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2488 2489 {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS, 2490 D_BDW_PLUS, 0, 8, NULL}, 2491 2492 {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS, 2493 D_ALL, ADDR_FIX_1(2), 8, NULL}, 2494 2495 {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR, 2496 R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL}, 2497 2498 {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR, 2499 R_RCS, D_ALL, 0, 8, NULL}, 2500 2501 {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS, 2502 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2503 2504 {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS, 2505 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2506 2507 {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS, 2508 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2509 2510 {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS, 2511 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2512 2513 {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS, 2514 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2515 2516 {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR, 2517 R_RCS, D_ALL, 0, 8, NULL}, 2518 2519 {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS, 2520 D_ALL, 0, 9, NULL}, 2521 2522 {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 2523 ADDR_FIX_2(2, 4), 8, NULL}, 2524 2525 {"3DSTATE_BINDING_TABLE_POOL_ALLOC", 2526 OP_3DSTATE_BINDING_TABLE_POOL_ALLOC, 2527 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL}, 2528 2529 {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC, 2530 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL}, 2531 2532 {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC", 2533 OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC, 2534 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL}, 2535 2536 {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS, 2537 D_BDW_PLUS, 0, 8, NULL}, 2538 2539 {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL, 2540 ADDR_FIX_1(2), 8, cmd_handler_pipe_control}, 2541 2542 {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2543 2544 {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0, 2545 1, NULL}, 2546 2547 {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL, 2548 ADDR_FIX_1(1), 8, NULL}, 2549 2550 {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2551 2552 {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 2553 ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL}, 2554 2555 {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL, 2556 ADDR_FIX_1(1), 8, NULL}, 2557 2558 {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS, 2559 F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL}, 2560 2561 {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2562 2563 {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 2564 2565 {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 2566 0, 8, NULL}, 2567 2568 {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS, 2569 D_SKL_PLUS, 0, 8, NULL}, 2570 2571 {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD, 2572 F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, 2573 2574 {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL, 2575 0, 16, NULL}, 2576 2577 {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL, 2578 0, 16, NULL}, 2579 2580 {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL, 2581 0, 16, NULL}, 2582 2583 {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, 2584 2585 {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL, 2586 0, 16, NULL}, 2587 2588 {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL, 2589 0, 16, NULL}, 2590 2591 {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL, 2592 0, 16, NULL}, 2593 2594 {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL, 2595 0, 8, NULL}, 2596 2597 {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16, 2598 NULL}, 2599 2600 {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45, 2601 F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, 2602 2603 {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR, 2604 R_VCS, D_ALL, 0, 12, NULL}, 2605 2606 {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR, 2607 R_VCS, D_ALL, 0, 12, NULL}, 2608 2609 {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR, 2610 R_VCS, D_BDW_PLUS, 0, 12, NULL}, 2611 2612 {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE, 2613 F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL}, 2614 2615 {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE, 2616 F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL}, 2617 2618 {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL}, 2619 2620 {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR, 2621 R_VCS, D_ALL, 0, 12, NULL}, 2622 2623 {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR, 2624 R_VCS, D_ALL, 0, 12, NULL}, 2625 2626 {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR, 2627 R_VCS, D_ALL, 0, 12, NULL}, 2628 2629 {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR, 2630 R_VCS, D_ALL, 0, 12, NULL}, 2631 2632 {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR, 2633 R_VCS, D_ALL, 0, 12, NULL}, 2634 2635 {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR, 2636 R_VCS, D_ALL, 0, 12, NULL}, 2637 2638 {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR, 2639 R_VCS, D_ALL, 0, 6, NULL}, 2640 2641 {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR, 2642 R_VCS, D_ALL, 0, 12, NULL}, 2643 2644 {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR, 2645 R_VCS, D_ALL, 0, 12, NULL}, 2646 2647 {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR, 2648 R_VCS, D_ALL, 0, 12, NULL}, 2649 2650 {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR, 2651 R_VCS, D_ALL, 0, 12, NULL}, 2652 2653 {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR, 2654 R_VCS, D_ALL, 0, 12, NULL}, 2655 2656 {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR, 2657 R_VCS, D_ALL, 0, 12, NULL}, 2658 2659 {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR, 2660 R_VCS, D_ALL, 0, 12, NULL}, 2661 {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR, 2662 R_VCS, D_ALL, 0, 12, NULL}, 2663 2664 {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR, 2665 R_VCS, D_ALL, 0, 12, NULL}, 2666 2667 {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR, 2668 R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL}, 2669 2670 {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR, 2671 R_VCS, D_ALL, 0, 12, NULL}, 2672 2673 {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR, 2674 R_VCS, D_ALL, 0, 12, NULL}, 2675 2676 {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR, 2677 R_VCS, D_ALL, 0, 12, NULL}, 2678 2679 {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR, 2680 R_VCS, D_ALL, 0, 12, NULL}, 2681 2682 {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR, 2683 R_VCS, D_ALL, 0, 12, NULL}, 2684 2685 {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR, 2686 R_VCS, D_ALL, 0, 12, NULL}, 2687 2688 {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR, 2689 R_VCS, D_ALL, 0, 12, NULL}, 2690 2691 {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR, 2692 R_VCS, D_ALL, 0, 12, NULL}, 2693 2694 {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR, 2695 R_VCS, D_ALL, 0, 12, NULL}, 2696 2697 {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR, 2698 R_VCS, D_ALL, 0, 12, NULL}, 2699 2700 {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR, 2701 R_VCS, D_ALL, 0, 12, NULL}, 2702 2703 {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL, 2704 0, 16, NULL}, 2705 2706 {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL}, 2707 2708 {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL}, 2709 2710 {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR, 2711 R_VCS, D_ALL, 0, 12, NULL}, 2712 2713 {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR, 2714 R_VCS, D_ALL, 0, 12, NULL}, 2715 2716 {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR, 2717 R_VCS, D_ALL, 0, 12, NULL}, 2718 2719 {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL}, 2720 2721 {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL, 2722 0, 12, NULL}, 2723 2724 {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS, 2725 0, 12, NULL}, 2726 }; 2727 2728 static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e) 2729 { 2730 hash_add(gvt->cmd_table, &e->hlist, e->info->opcode); 2731 } 2732 2733 /* call the cmd handler, and advance ip */ 2734 static int cmd_parser_exec(struct parser_exec_state *s) 2735 { 2736 struct intel_vgpu *vgpu = s->vgpu; 2737 const struct cmd_info *info; 2738 u32 cmd; 2739 int ret = 0; 2740 2741 cmd = cmd_val(s, 0); 2742 2743 /* fastpath for MI_NOOP */ 2744 if (cmd == MI_NOOP) 2745 info = &cmd_info[mi_noop_index]; 2746 else 2747 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); 2748 2749 if (info == NULL) { 2750 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", 2751 cmd, get_opcode(cmd, s->engine), 2752 repr_addr_type(s->buf_addr_type), 2753 s->engine->name, s->workload); 2754 return -EBADRQC; 2755 } 2756 2757 s->info = info; 2758 2759 trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va, 2760 cmd_length(s), s->buf_type, s->buf_addr_type, 2761 s->workload, info->name); 2762 2763 if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) { 2764 ret = gvt_check_valid_cmd_length(cmd_length(s), 2765 info->valid_len); 2766 if (ret) 2767 return ret; 2768 } 2769 2770 if (info->handler) { 2771 ret = info->handler(s); 2772 if (ret < 0) { 2773 gvt_vgpu_err("%s handler error\n", info->name); 2774 return ret; 2775 } 2776 } 2777 2778 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { 2779 ret = cmd_advance_default(s); 2780 if (ret) { 2781 gvt_vgpu_err("%s IP advance error\n", info->name); 2782 return ret; 2783 } 2784 } 2785 return 0; 2786 } 2787 2788 static inline bool gma_out_of_range(unsigned long gma, 2789 unsigned long gma_head, unsigned int gma_tail) 2790 { 2791 if (gma_tail >= gma_head) 2792 return (gma < gma_head) || (gma > gma_tail); 2793 else 2794 return (gma > gma_tail) && (gma < gma_head); 2795 } 2796 2797 /* Keep the consistent return type, e.g EBADRQC for unknown 2798 * cmd, EFAULT for invalid address, EPERM for nonpriv. later 2799 * works as the input of VM healthy status. 2800 */ 2801 static int command_scan(struct parser_exec_state *s, 2802 unsigned long rb_head, unsigned long rb_tail, 2803 unsigned long rb_start, unsigned long rb_len) 2804 { 2805 2806 unsigned long gma_head, gma_tail, gma_bottom; 2807 int ret = 0; 2808 struct intel_vgpu *vgpu = s->vgpu; 2809 2810 gma_head = rb_start + rb_head; 2811 gma_tail = rb_start + rb_tail; 2812 gma_bottom = rb_start + rb_len; 2813 2814 while (s->ip_gma != gma_tail) { 2815 if (s->buf_type == RING_BUFFER_INSTRUCTION || 2816 s->buf_type == RING_BUFFER_CTX) { 2817 if (!(s->ip_gma >= rb_start) || 2818 !(s->ip_gma < gma_bottom)) { 2819 gvt_vgpu_err("ip_gma %lx out of ring scope." 2820 "(base:0x%lx, bottom: 0x%lx)\n", 2821 s->ip_gma, rb_start, 2822 gma_bottom); 2823 parser_exec_state_dump(s); 2824 return -EFAULT; 2825 } 2826 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { 2827 gvt_vgpu_err("ip_gma %lx out of range." 2828 "base 0x%lx head 0x%lx tail 0x%lx\n", 2829 s->ip_gma, rb_start, 2830 rb_head, rb_tail); 2831 parser_exec_state_dump(s); 2832 break; 2833 } 2834 } 2835 ret = cmd_parser_exec(s); 2836 if (ret) { 2837 gvt_vgpu_err("cmd parser error\n"); 2838 parser_exec_state_dump(s); 2839 break; 2840 } 2841 } 2842 2843 return ret; 2844 } 2845 2846 static int scan_workload(struct intel_vgpu_workload *workload) 2847 { 2848 unsigned long gma_head, gma_tail; 2849 struct parser_exec_state s; 2850 int ret = 0; 2851 2852 /* ring base is page aligned */ 2853 if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE))) 2854 return -EINVAL; 2855 2856 gma_head = workload->rb_start + workload->rb_head; 2857 gma_tail = workload->rb_start + workload->rb_tail; 2858 2859 s.buf_type = RING_BUFFER_INSTRUCTION; 2860 s.buf_addr_type = GTT_BUFFER; 2861 s.vgpu = workload->vgpu; 2862 s.engine = workload->engine; 2863 s.ring_start = workload->rb_start; 2864 s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); 2865 s.ring_head = gma_head; 2866 s.ring_tail = gma_tail; 2867 s.rb_va = workload->shadow_ring_buffer_va; 2868 s.workload = workload; 2869 s.is_ctx_wa = false; 2870 2871 if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail) 2872 return 0; 2873 2874 ret = ip_gma_set(&s, gma_head); 2875 if (ret) 2876 goto out; 2877 2878 ret = command_scan(&s, workload->rb_head, workload->rb_tail, 2879 workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl)); 2880 2881 out: 2882 return ret; 2883 } 2884 2885 static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 2886 { 2887 2888 unsigned long gma_head, gma_tail, ring_size, ring_tail; 2889 struct parser_exec_state s; 2890 int ret = 0; 2891 struct intel_vgpu_workload *workload = container_of(wa_ctx, 2892 struct intel_vgpu_workload, 2893 wa_ctx); 2894 2895 /* ring base is page aligned */ 2896 if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, 2897 I915_GTT_PAGE_SIZE))) 2898 return -EINVAL; 2899 2900 ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32); 2901 ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES, 2902 PAGE_SIZE); 2903 gma_head = wa_ctx->indirect_ctx.guest_gma; 2904 gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail; 2905 2906 s.buf_type = RING_BUFFER_INSTRUCTION; 2907 s.buf_addr_type = GTT_BUFFER; 2908 s.vgpu = workload->vgpu; 2909 s.engine = workload->engine; 2910 s.ring_start = wa_ctx->indirect_ctx.guest_gma; 2911 s.ring_size = ring_size; 2912 s.ring_head = gma_head; 2913 s.ring_tail = gma_tail; 2914 s.rb_va = wa_ctx->indirect_ctx.shadow_va; 2915 s.workload = workload; 2916 s.is_ctx_wa = true; 2917 2918 ret = ip_gma_set(&s, gma_head); 2919 if (ret) 2920 goto out; 2921 2922 ret = command_scan(&s, 0, ring_tail, 2923 wa_ctx->indirect_ctx.guest_gma, ring_size); 2924 out: 2925 return ret; 2926 } 2927 2928 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) 2929 { 2930 struct intel_vgpu *vgpu = workload->vgpu; 2931 struct intel_vgpu_submission *s = &vgpu->submission; 2932 unsigned long gma_head, gma_tail, gma_top, guest_rb_size; 2933 void *shadow_ring_buffer_va; 2934 int ret; 2935 2936 guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); 2937 2938 /* calculate workload ring buffer size */ 2939 workload->rb_len = (workload->rb_tail + guest_rb_size - 2940 workload->rb_head) % guest_rb_size; 2941 2942 gma_head = workload->rb_start + workload->rb_head; 2943 gma_tail = workload->rb_start + workload->rb_tail; 2944 gma_top = workload->rb_start + guest_rb_size; 2945 2946 if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) { 2947 void *p; 2948 2949 /* realloc the new ring buffer if needed */ 2950 p = krealloc(s->ring_scan_buffer[workload->engine->id], 2951 workload->rb_len, GFP_KERNEL); 2952 if (!p) { 2953 gvt_vgpu_err("fail to re-alloc ring scan buffer\n"); 2954 return -ENOMEM; 2955 } 2956 s->ring_scan_buffer[workload->engine->id] = p; 2957 s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len; 2958 } 2959 2960 shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id]; 2961 2962 /* get shadow ring buffer va */ 2963 workload->shadow_ring_buffer_va = shadow_ring_buffer_va; 2964 2965 /* head > tail --> copy head <-> top */ 2966 if (gma_head > gma_tail) { 2967 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, 2968 gma_head, gma_top, shadow_ring_buffer_va); 2969 if (ret < 0) { 2970 gvt_vgpu_err("fail to copy guest ring buffer\n"); 2971 return ret; 2972 } 2973 shadow_ring_buffer_va += ret; 2974 gma_head = workload->rb_start; 2975 } 2976 2977 /* copy head or start <-> tail */ 2978 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, 2979 shadow_ring_buffer_va); 2980 if (ret < 0) { 2981 gvt_vgpu_err("fail to copy guest ring buffer\n"); 2982 return ret; 2983 } 2984 return 0; 2985 } 2986 2987 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload) 2988 { 2989 int ret; 2990 struct intel_vgpu *vgpu = workload->vgpu; 2991 2992 ret = shadow_workload_ring_buffer(workload); 2993 if (ret) { 2994 gvt_vgpu_err("fail to shadow workload ring_buffer\n"); 2995 return ret; 2996 } 2997 2998 ret = scan_workload(workload); 2999 if (ret) { 3000 gvt_vgpu_err("scan workload error\n"); 3001 return ret; 3002 } 3003 return 0; 3004 } 3005 3006 static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) 3007 { 3008 int ctx_size = wa_ctx->indirect_ctx.size; 3009 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; 3010 struct intel_vgpu_workload *workload = container_of(wa_ctx, 3011 struct intel_vgpu_workload, 3012 wa_ctx); 3013 struct intel_vgpu *vgpu = workload->vgpu; 3014 struct drm_i915_gem_object *obj; 3015 int ret = 0; 3016 void *map; 3017 3018 obj = i915_gem_object_create_shmem(workload->engine->i915, 3019 roundup(ctx_size + CACHELINE_BYTES, 3020 PAGE_SIZE)); 3021 if (IS_ERR(obj)) 3022 return PTR_ERR(obj); 3023 3024 /* get the va of the shadow batch buffer */ 3025 map = i915_gem_object_pin_map(obj, I915_MAP_WB); 3026 if (IS_ERR(map)) { 3027 gvt_vgpu_err("failed to vmap shadow indirect ctx\n"); 3028 ret = PTR_ERR(map); 3029 goto put_obj; 3030 } 3031 3032 i915_gem_object_lock(obj, NULL); 3033 ret = i915_gem_object_set_to_cpu_domain(obj, false); 3034 i915_gem_object_unlock(obj); 3035 if (ret) { 3036 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n"); 3037 goto unmap_src; 3038 } 3039 3040 ret = copy_gma_to_hva(workload->vgpu, 3041 workload->vgpu->gtt.ggtt_mm, 3042 guest_gma, guest_gma + ctx_size, 3043 map); 3044 if (ret < 0) { 3045 gvt_vgpu_err("fail to copy guest indirect ctx\n"); 3046 goto unmap_src; 3047 } 3048 3049 wa_ctx->indirect_ctx.obj = obj; 3050 wa_ctx->indirect_ctx.shadow_va = map; 3051 return 0; 3052 3053 unmap_src: 3054 i915_gem_object_unpin_map(obj); 3055 put_obj: 3056 i915_gem_object_put(obj); 3057 return ret; 3058 } 3059 3060 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 3061 { 3062 u32 per_ctx_start[CACHELINE_DWORDS] = {}; 3063 unsigned char *bb_start_sva; 3064 3065 if (!wa_ctx->per_ctx.valid) 3066 return 0; 3067 3068 per_ctx_start[0] = 0x18800001; 3069 per_ctx_start[1] = wa_ctx->per_ctx.guest_gma; 3070 3071 bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va + 3072 wa_ctx->indirect_ctx.size; 3073 3074 memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES); 3075 3076 return 0; 3077 } 3078 3079 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 3080 { 3081 int ret; 3082 struct intel_vgpu_workload *workload = container_of(wa_ctx, 3083 struct intel_vgpu_workload, 3084 wa_ctx); 3085 struct intel_vgpu *vgpu = workload->vgpu; 3086 3087 if (wa_ctx->indirect_ctx.size == 0) 3088 return 0; 3089 3090 ret = shadow_indirect_ctx(wa_ctx); 3091 if (ret) { 3092 gvt_vgpu_err("fail to shadow indirect ctx\n"); 3093 return ret; 3094 } 3095 3096 combine_wa_ctx(wa_ctx); 3097 3098 ret = scan_wa_ctx(wa_ctx); 3099 if (ret) { 3100 gvt_vgpu_err("scan wa ctx error\n"); 3101 return ret; 3102 } 3103 3104 return 0; 3105 } 3106 3107 /* generate dummy contexts by sending empty requests to HW, and let 3108 * the HW to fill Engine Contexts. This dummy contexts are used for 3109 * initialization purpose (update reg whitelist), so referred to as 3110 * init context here 3111 */ 3112 void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) 3113 { 3114 const unsigned long start = LRC_STATE_PN * PAGE_SIZE; 3115 struct intel_gvt *gvt = vgpu->gvt; 3116 struct intel_engine_cs *engine; 3117 enum intel_engine_id id; 3118 3119 if (gvt->is_reg_whitelist_updated) 3120 return; 3121 3122 /* scan init ctx to update cmd accessible list */ 3123 for_each_engine(engine, gvt->gt, id) { 3124 struct parser_exec_state s; 3125 void *vaddr; 3126 int ret; 3127 3128 if (!engine->default_state) 3129 continue; 3130 3131 vaddr = shmem_pin_map(engine->default_state); 3132 if (!vaddr) { 3133 gvt_err("failed to map %s->default state\n", 3134 engine->name); 3135 return; 3136 } 3137 3138 s.buf_type = RING_BUFFER_CTX; 3139 s.buf_addr_type = GTT_BUFFER; 3140 s.vgpu = vgpu; 3141 s.engine = engine; 3142 s.ring_start = 0; 3143 s.ring_size = engine->context_size - start; 3144 s.ring_head = 0; 3145 s.ring_tail = s.ring_size; 3146 s.rb_va = vaddr + start; 3147 s.workload = NULL; 3148 s.is_ctx_wa = false; 3149 s.is_init_ctx = true; 3150 3151 /* skipping the first RING_CTX_SIZE(0x50) dwords */ 3152 ret = ip_gma_set(&s, RING_CTX_SIZE); 3153 if (ret == 0) { 3154 ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size); 3155 if (ret) 3156 gvt_err("Scan init ctx error\n"); 3157 } 3158 3159 shmem_unpin_map(engine->default_state, vaddr); 3160 if (ret) 3161 return; 3162 } 3163 3164 gvt->is_reg_whitelist_updated = true; 3165 } 3166 3167 int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload) 3168 { 3169 struct intel_vgpu *vgpu = workload->vgpu; 3170 unsigned long gma_head, gma_tail, gma_start, ctx_size; 3171 struct parser_exec_state s; 3172 int ring_id = workload->engine->id; 3173 struct intel_context *ce = vgpu->submission.shadow[ring_id]; 3174 int ret; 3175 3176 GEM_BUG_ON(atomic_read(&ce->pin_count) < 0); 3177 3178 ctx_size = workload->engine->context_size - PAGE_SIZE; 3179 3180 /* Only ring contxt is loaded to HW for inhibit context, no need to 3181 * scan engine context 3182 */ 3183 if (is_inhibit_context(ce)) 3184 return 0; 3185 3186 gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE; 3187 gma_head = 0; 3188 gma_tail = ctx_size; 3189 3190 s.buf_type = RING_BUFFER_CTX; 3191 s.buf_addr_type = GTT_BUFFER; 3192 s.vgpu = workload->vgpu; 3193 s.engine = workload->engine; 3194 s.ring_start = gma_start; 3195 s.ring_size = ctx_size; 3196 s.ring_head = gma_start + gma_head; 3197 s.ring_tail = gma_start + gma_tail; 3198 s.rb_va = ce->lrc_reg_state; 3199 s.workload = workload; 3200 s.is_ctx_wa = false; 3201 s.is_init_ctx = false; 3202 3203 /* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring 3204 * context 3205 */ 3206 ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE); 3207 if (ret) 3208 goto out; 3209 3210 ret = command_scan(&s, gma_head, gma_tail, 3211 gma_start, ctx_size); 3212 out: 3213 if (ret) 3214 gvt_vgpu_err("scan shadow ctx error\n"); 3215 3216 return ret; 3217 } 3218 3219 static int init_cmd_table(struct intel_gvt *gvt) 3220 { 3221 unsigned int gen_type = intel_gvt_get_device_type(gvt); 3222 int i; 3223 3224 for (i = 0; i < ARRAY_SIZE(cmd_info); i++) { 3225 struct cmd_entry *e; 3226 3227 if (!(cmd_info[i].devices & gen_type)) 3228 continue; 3229 3230 e = kzalloc_obj(*e); 3231 if (!e) 3232 return -ENOMEM; 3233 3234 e->info = &cmd_info[i]; 3235 if (cmd_info[i].opcode == OP_MI_NOOP) 3236 mi_noop_index = i; 3237 3238 INIT_HLIST_NODE(&e->hlist); 3239 add_cmd_entry(gvt, e); 3240 gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n", 3241 e->info->name, e->info->opcode, e->info->flag, 3242 e->info->devices, e->info->rings); 3243 } 3244 3245 return 0; 3246 } 3247 3248 static void clean_cmd_table(struct intel_gvt *gvt) 3249 { 3250 struct hlist_node *tmp; 3251 struct cmd_entry *e; 3252 int i; 3253 3254 hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist) 3255 kfree(e); 3256 3257 hash_init(gvt->cmd_table); 3258 } 3259 3260 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt) 3261 { 3262 clean_cmd_table(gvt); 3263 } 3264 3265 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt) 3266 { 3267 int ret; 3268 3269 ret = init_cmd_table(gvt); 3270 if (ret) { 3271 intel_gvt_clean_cmd_parser(gvt); 3272 return ret; 3273 } 3274 return 0; 3275 } 3276