1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Brad Volkin <bradley.d.volkin@intel.com> 25 * 26 */ 27 28 #include "i915_drv.h" 29 30 /** 31 * DOC: batch buffer command parser 32 * 33 * Motivation: 34 * Certain OpenGL features (e.g. transform feedback, performance monitoring) 35 * require userspace code to submit batches containing commands such as 36 * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some 37 * generations of the hardware will noop these commands in "unsecure" batches 38 * (which includes all userspace batches submitted via i915) even though the 39 * commands may be safe and represent the intended programming model of the 40 * device. 41 * 42 * The software command parser is similar in operation to the command parsing 43 * done in hardware for unsecure batches. However, the software parser allows 44 * some operations that would be noop'd by hardware, if the parser determines 45 * the operation is safe, and submits the batch as "secure" to prevent hardware 46 * parsing. 47 * 48 * Threats: 49 * At a high level, the hardware (and software) checks attempt to prevent 50 * granting userspace undue privileges. There are three categories of privilege. 51 * 52 * First, commands which are explicitly defined as privileged or which should 53 * only be used by the kernel driver. The parser generally rejects such 54 * commands, though it may allow some from the drm master process. 55 * 56 * Second, commands which access registers. To support correct/enhanced 57 * userspace functionality, particularly certain OpenGL extensions, the parser 58 * provides a whitelist of registers which userspace may safely access (for both 59 * normal and drm master processes). 60 * 61 * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). 62 * The parser always rejects such commands. 63 * 64 * The majority of the problematic commands fall in the MI_* range, with only a 65 * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW). 66 * 67 * Implementation: 68 * Each ring maintains tables of commands and registers which the parser uses in 69 * scanning batch buffers submitted to that ring. 70 * 71 * Since the set of commands that the parser must check for is significantly 72 * smaller than the number of commands supported, the parser tables contain only 73 * those commands required by the parser. This generally works because command 74 * opcode ranges have standard command length encodings. So for commands that 75 * the parser does not need to check, it can easily skip them. This is 76 * implemented via a per-ring length decoding vfunc. 77 * 78 * Unfortunately, there are a number of commands that do not follow the standard 79 * length encoding for their opcode range, primarily amongst the MI_* commands. 80 * To handle this, the parser provides a way to define explicit "skip" entries 81 * in the per-ring command tables. 82 * 83 * Other command table entries map fairly directly to high level categories 84 * mentioned above: rejected, master-only, register whitelist. The parser 85 * implements a number of checks, including the privileged memory checks, via a 86 * general bitmasking mechanism. 87 */ 88 89 #define STD_MI_OPCODE_MASK 0xFF800000 90 #define STD_3D_OPCODE_MASK 0xFFFF0000 91 #define STD_2D_OPCODE_MASK 0xFFC00000 92 #define STD_MFX_OPCODE_MASK 0xFFFF0000 93 94 #define CMD(op, opm, f, lm, fl, ...) \ 95 { \ 96 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ 97 .cmd = { (op), (opm) }, \ 98 .length = { (lm) }, \ 99 __VA_ARGS__ \ 100 } 101 102 /* Convenience macros to compress the tables */ 103 #define SMI STD_MI_OPCODE_MASK 104 #define S3D STD_3D_OPCODE_MASK 105 #define S2D STD_2D_OPCODE_MASK 106 #define SMFX STD_MFX_OPCODE_MASK 107 #define F true 108 #define S CMD_DESC_SKIP 109 #define R CMD_DESC_REJECT 110 #define W CMD_DESC_REGISTER 111 #define B CMD_DESC_BITMASK 112 #define M CMD_DESC_MASTER 113 114 /* Command Mask Fixed Len Action 115 ---------------------------------------------------------- */ 116 static const struct drm_i915_cmd_descriptor common_cmds[] = { 117 CMD( MI_NOOP, SMI, F, 1, S ), 118 CMD( MI_USER_INTERRUPT, SMI, F, 1, R ), 119 CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ), 120 CMD( MI_ARB_CHECK, SMI, F, 1, S ), 121 CMD( MI_REPORT_HEAD, SMI, F, 1, S ), 122 CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), 123 CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ), 124 CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ), 125 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W, 126 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ), 127 CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B, 128 .reg = { .offset = 1, .mask = 0x007FFFFC }, 129 .bits = {{ 130 .offset = 0, 131 .mask = MI_GLOBAL_GTT, 132 .expected = 0, 133 }}, ), 134 CMD( MI_LOAD_REGISTER_MEM(1), SMI, !F, 0xFF, W | B, 135 .reg = { .offset = 1, .mask = 0x007FFFFC }, 136 .bits = {{ 137 .offset = 0, 138 .mask = MI_GLOBAL_GTT, 139 .expected = 0, 140 }}, ), 141 /* 142 * MI_BATCH_BUFFER_START requires some special handling. It's not 143 * really a 'skip' action but it doesn't seem like it's worth adding 144 * a new action. See i915_parse_cmds(). 145 */ 146 CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ), 147 }; 148 149 static const struct drm_i915_cmd_descriptor render_cmds[] = { 150 CMD( MI_FLUSH, SMI, F, 1, S ), 151 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), 152 CMD( MI_PREDICATE, SMI, F, 1, S ), 153 CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ), 154 CMD( MI_SET_APPID, SMI, F, 1, S ), 155 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), 156 CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ), 157 CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ), 158 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B, 159 .bits = {{ 160 .offset = 0, 161 .mask = MI_GLOBAL_GTT, 162 .expected = 0, 163 }}, ), 164 CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ), 165 CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B, 166 .bits = {{ 167 .offset = 0, 168 .mask = MI_GLOBAL_GTT, 169 .expected = 0, 170 }}, ), 171 CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B, 172 .bits = {{ 173 .offset = 1, 174 .mask = MI_REPORT_PERF_COUNT_GGTT, 175 .expected = 0, 176 }}, ), 177 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B, 178 .bits = {{ 179 .offset = 0, 180 .mask = MI_GLOBAL_GTT, 181 .expected = 0, 182 }}, ), 183 CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ), 184 CMD( PIPELINE_SELECT, S3D, F, 1, S ), 185 CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B, 186 .bits = {{ 187 .offset = 2, 188 .mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK, 189 .expected = 0, 190 }}, ), 191 CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ), 192 CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ), 193 CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ), 194 CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B, 195 .bits = {{ 196 .offset = 1, 197 .mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY), 198 .expected = 0, 199 }, 200 { 201 .offset = 1, 202 .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB | 203 PIPE_CONTROL_STORE_DATA_INDEX), 204 .expected = 0, 205 .condition_offset = 1, 206 .condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK, 207 }}, ), 208 }; 209 210 static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { 211 CMD( MI_SET_PREDICATE, SMI, F, 1, S ), 212 CMD( MI_RS_CONTROL, SMI, F, 1, S ), 213 CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), 214 CMD( MI_SET_APPID, SMI, F, 1, S ), 215 CMD( MI_RS_CONTEXT, SMI, F, 1, S ), 216 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), 217 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), 218 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), 219 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), 220 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), 221 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), 222 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ), 223 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ), 224 225 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ), 226 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ), 227 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ), 228 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ), 229 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ), 230 }; 231 232 static const struct drm_i915_cmd_descriptor video_cmds[] = { 233 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), 234 CMD( MI_SET_APPID, SMI, F, 1, S ), 235 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, 236 .bits = {{ 237 .offset = 0, 238 .mask = MI_GLOBAL_GTT, 239 .expected = 0, 240 }}, ), 241 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ), 242 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B, 243 .bits = {{ 244 .offset = 0, 245 .mask = MI_FLUSH_DW_NOTIFY, 246 .expected = 0, 247 }, 248 { 249 .offset = 1, 250 .mask = MI_FLUSH_DW_USE_GTT, 251 .expected = 0, 252 .condition_offset = 0, 253 .condition_mask = MI_FLUSH_DW_OP_MASK, 254 }, 255 { 256 .offset = 0, 257 .mask = MI_FLUSH_DW_STORE_INDEX, 258 .expected = 0, 259 .condition_offset = 0, 260 .condition_mask = MI_FLUSH_DW_OP_MASK, 261 }}, ), 262 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B, 263 .bits = {{ 264 .offset = 0, 265 .mask = MI_GLOBAL_GTT, 266 .expected = 0, 267 }}, ), 268 /* 269 * MFX_WAIT doesn't fit the way we handle length for most commands. 270 * It has a length field but it uses a non-standard length bias. 271 * It is always 1 dword though, so just treat it as fixed length. 272 */ 273 CMD( MFX_WAIT, SMFX, F, 1, S ), 274 }; 275 276 static const struct drm_i915_cmd_descriptor vecs_cmds[] = { 277 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), 278 CMD( MI_SET_APPID, SMI, F, 1, S ), 279 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, 280 .bits = {{ 281 .offset = 0, 282 .mask = MI_GLOBAL_GTT, 283 .expected = 0, 284 }}, ), 285 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ), 286 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B, 287 .bits = {{ 288 .offset = 0, 289 .mask = MI_FLUSH_DW_NOTIFY, 290 .expected = 0, 291 }, 292 { 293 .offset = 1, 294 .mask = MI_FLUSH_DW_USE_GTT, 295 .expected = 0, 296 .condition_offset = 0, 297 .condition_mask = MI_FLUSH_DW_OP_MASK, 298 }, 299 { 300 .offset = 0, 301 .mask = MI_FLUSH_DW_STORE_INDEX, 302 .expected = 0, 303 .condition_offset = 0, 304 .condition_mask = MI_FLUSH_DW_OP_MASK, 305 }}, ), 306 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B, 307 .bits = {{ 308 .offset = 0, 309 .mask = MI_GLOBAL_GTT, 310 .expected = 0, 311 }}, ), 312 }; 313 314 static const struct drm_i915_cmd_descriptor blt_cmds[] = { 315 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), 316 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B, 317 .bits = {{ 318 .offset = 0, 319 .mask = MI_GLOBAL_GTT, 320 .expected = 0, 321 }}, ), 322 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ), 323 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B, 324 .bits = {{ 325 .offset = 0, 326 .mask = MI_FLUSH_DW_NOTIFY, 327 .expected = 0, 328 }, 329 { 330 .offset = 1, 331 .mask = MI_FLUSH_DW_USE_GTT, 332 .expected = 0, 333 .condition_offset = 0, 334 .condition_mask = MI_FLUSH_DW_OP_MASK, 335 }, 336 { 337 .offset = 0, 338 .mask = MI_FLUSH_DW_STORE_INDEX, 339 .expected = 0, 340 .condition_offset = 0, 341 .condition_mask = MI_FLUSH_DW_OP_MASK, 342 }}, ), 343 CMD( COLOR_BLT, S2D, !F, 0x3F, S ), 344 CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ), 345 }; 346 347 static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { 348 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), 349 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), 350 }; 351 352 #undef CMD 353 #undef SMI 354 #undef S3D 355 #undef S2D 356 #undef SMFX 357 #undef F 358 #undef S 359 #undef R 360 #undef W 361 #undef B 362 #undef M 363 364 static const struct drm_i915_cmd_table gen7_render_cmds[] = { 365 { common_cmds, ARRAY_SIZE(common_cmds) }, 366 { render_cmds, ARRAY_SIZE(render_cmds) }, 367 }; 368 369 static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = { 370 { common_cmds, ARRAY_SIZE(common_cmds) }, 371 { render_cmds, ARRAY_SIZE(render_cmds) }, 372 { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) }, 373 }; 374 375 static const struct drm_i915_cmd_table gen7_video_cmds[] = { 376 { common_cmds, ARRAY_SIZE(common_cmds) }, 377 { video_cmds, ARRAY_SIZE(video_cmds) }, 378 }; 379 380 static const struct drm_i915_cmd_table hsw_vebox_cmds[] = { 381 { common_cmds, ARRAY_SIZE(common_cmds) }, 382 { vecs_cmds, ARRAY_SIZE(vecs_cmds) }, 383 }; 384 385 static const struct drm_i915_cmd_table gen7_blt_cmds[] = { 386 { common_cmds, ARRAY_SIZE(common_cmds) }, 387 { blt_cmds, ARRAY_SIZE(blt_cmds) }, 388 }; 389 390 static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { 391 { common_cmds, ARRAY_SIZE(common_cmds) }, 392 { blt_cmds, ARRAY_SIZE(blt_cmds) }, 393 { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) }, 394 }; 395 396 /* 397 * Register whitelists, sorted by increasing register offset. 398 */ 399 400 /* 401 * An individual whitelist entry granting access to register addr. If 402 * mask is non-zero the argument of immediate register writes will be 403 * AND-ed with mask, and the command will be rejected if the result 404 * doesn't match value. 405 * 406 * Registers with non-zero mask are only allowed to be written using 407 * LRI. 408 */ 409 struct drm_i915_reg_descriptor { 410 u32 addr; 411 u32 mask; 412 u32 value; 413 }; 414 415 /* Convenience macro for adding 32-bit registers. */ 416 #define REG32(address, ...) \ 417 { .addr = address, __VA_ARGS__ } 418 419 /* 420 * Convenience macro for adding 64-bit registers. 421 * 422 * Some registers that userspace accesses are 64 bits. The register 423 * access commands only allow 32-bit accesses. Hence, we have to include 424 * entries for both halves of the 64-bit registers. 425 */ 426 #define REG64(addr) \ 427 REG32(addr), REG32(addr + sizeof(u32)) 428 429 static const struct drm_i915_reg_descriptor gen7_render_regs[] = { 430 REG64(GPGPU_THREADS_DISPATCHED), 431 REG64(HS_INVOCATION_COUNT), 432 REG64(DS_INVOCATION_COUNT), 433 REG64(IA_VERTICES_COUNT), 434 REG64(IA_PRIMITIVES_COUNT), 435 REG64(VS_INVOCATION_COUNT), 436 REG64(GS_INVOCATION_COUNT), 437 REG64(GS_PRIMITIVES_COUNT), 438 REG64(CL_INVOCATION_COUNT), 439 REG64(CL_PRIMITIVES_COUNT), 440 REG64(PS_INVOCATION_COUNT), 441 REG64(PS_DEPTH_COUNT), 442 REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */ 443 REG64(MI_PREDICATE_SRC0), 444 REG64(MI_PREDICATE_SRC1), 445 REG32(GEN7_3DPRIM_END_OFFSET), 446 REG32(GEN7_3DPRIM_START_VERTEX), 447 REG32(GEN7_3DPRIM_VERTEX_COUNT), 448 REG32(GEN7_3DPRIM_INSTANCE_COUNT), 449 REG32(GEN7_3DPRIM_START_INSTANCE), 450 REG32(GEN7_3DPRIM_BASE_VERTEX), 451 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)), 452 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)), 453 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)), 454 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)), 455 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)), 456 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)), 457 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)), 458 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)), 459 REG32(GEN7_SO_WRITE_OFFSET(0)), 460 REG32(GEN7_SO_WRITE_OFFSET(1)), 461 REG32(GEN7_SO_WRITE_OFFSET(2)), 462 REG32(GEN7_SO_WRITE_OFFSET(3)), 463 REG32(GEN7_L3SQCREG1), 464 REG32(GEN7_L3CNTLREG2), 465 REG32(GEN7_L3CNTLREG3), 466 REG32(HSW_SCRATCH1, 467 .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE, 468 .value = 0), 469 REG32(HSW_ROW_CHICKEN3, 470 .mask = ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE << 16 | 471 HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE), 472 .value = 0), 473 }; 474 475 static const struct drm_i915_reg_descriptor gen7_blt_regs[] = { 476 REG32(BCS_SWCTRL), 477 }; 478 479 static const struct drm_i915_reg_descriptor ivb_master_regs[] = { 480 REG32(FORCEWAKE_MT), 481 REG32(DERRMR), 482 REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)), 483 REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)), 484 REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)), 485 }; 486 487 static const struct drm_i915_reg_descriptor hsw_master_regs[] = { 488 REG32(FORCEWAKE_MT), 489 REG32(DERRMR), 490 }; 491 492 #undef REG64 493 #undef REG32 494 495 static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) 496 { 497 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; 498 u32 subclient = 499 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; 500 501 if (client == INSTR_MI_CLIENT) 502 return 0x3F; 503 else if (client == INSTR_RC_CLIENT) { 504 if (subclient == INSTR_MEDIA_SUBCLIENT) 505 return 0xFFFF; 506 else 507 return 0xFF; 508 } 509 510 DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header); 511 return 0; 512 } 513 514 static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) 515 { 516 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; 517 u32 subclient = 518 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; 519 u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT; 520 521 if (client == INSTR_MI_CLIENT) 522 return 0x3F; 523 else if (client == INSTR_RC_CLIENT) { 524 if (subclient == INSTR_MEDIA_SUBCLIENT) { 525 if (op == 6) 526 return 0xFFFF; 527 else 528 return 0xFFF; 529 } else 530 return 0xFF; 531 } 532 533 DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header); 534 return 0; 535 } 536 537 static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) 538 { 539 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; 540 541 if (client == INSTR_MI_CLIENT) 542 return 0x3F; 543 else if (client == INSTR_BC_CLIENT) 544 return 0xFF; 545 546 DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header); 547 return 0; 548 } 549 550 static bool validate_cmds_sorted(struct intel_engine_cs *ring, 551 const struct drm_i915_cmd_table *cmd_tables, 552 int cmd_table_count) 553 { 554 int i; 555 bool ret = true; 556 557 if (!cmd_tables || cmd_table_count == 0) 558 return true; 559 560 for (i = 0; i < cmd_table_count; i++) { 561 const struct drm_i915_cmd_table *table = &cmd_tables[i]; 562 u32 previous = 0; 563 int j; 564 565 for (j = 0; j < table->count; j++) { 566 const struct drm_i915_cmd_descriptor *desc = 567 &table->table[j]; 568 u32 curr = desc->cmd.value & desc->cmd.mask; 569 570 if (curr < previous) { 571 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n", 572 ring->id, i, j, curr, previous); 573 ret = false; 574 } 575 576 previous = curr; 577 } 578 } 579 580 return ret; 581 } 582 583 static bool check_sorted(int ring_id, 584 const struct drm_i915_reg_descriptor *reg_table, 585 int reg_count) 586 { 587 int i; 588 u32 previous = 0; 589 bool ret = true; 590 591 for (i = 0; i < reg_count; i++) { 592 u32 curr = reg_table[i].addr; 593 594 if (curr < previous) { 595 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", 596 ring_id, i, curr, previous); 597 ret = false; 598 } 599 600 previous = curr; 601 } 602 603 return ret; 604 } 605 606 static bool validate_regs_sorted(struct intel_engine_cs *ring) 607 { 608 return check_sorted(ring->id, ring->reg_table, ring->reg_count) && 609 check_sorted(ring->id, ring->master_reg_table, 610 ring->master_reg_count); 611 } 612 613 struct cmd_node { 614 const struct drm_i915_cmd_descriptor *desc; 615 struct hlist_node node; 616 }; 617 618 /* 619 * Different command ranges have different numbers of bits for the opcode. For 620 * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The 621 * problem is that, for example, MI commands use bits 22:16 for other fields 622 * such as GGTT vs PPGTT bits. If we include those bits in the mask then when 623 * we mask a command from a batch it could hash to the wrong bucket due to 624 * non-opcode bits being set. But if we don't include those bits, some 3D 625 * commands may hash to the same bucket due to not including opcode bits that 626 * make the command unique. For now, we will risk hashing to the same bucket. 627 * 628 * If we attempt to generate a perfect hash, we should be able to look at bits 629 * 31:29 of a command from a batch buffer and use the full mask for that 630 * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this. 631 */ 632 #define CMD_HASH_MASK STD_MI_OPCODE_MASK 633 634 static int init_hash_table(struct intel_engine_cs *ring, 635 const struct drm_i915_cmd_table *cmd_tables, 636 int cmd_table_count) 637 { 638 int i, j; 639 640 hash_init(ring->cmd_hash); 641 642 for (i = 0; i < cmd_table_count; i++) { 643 const struct drm_i915_cmd_table *table = &cmd_tables[i]; 644 645 for (j = 0; j < table->count; j++) { 646 const struct drm_i915_cmd_descriptor *desc = 647 &table->table[j]; 648 struct cmd_node *desc_node = 649 kmalloc(sizeof(*desc_node), GFP_KERNEL); 650 651 if (!desc_node) 652 return -ENOMEM; 653 654 desc_node->desc = desc; 655 hash_add(ring->cmd_hash, &desc_node->node, 656 desc->cmd.value & CMD_HASH_MASK); 657 } 658 } 659 660 return 0; 661 } 662 663 static void fini_hash_table(struct intel_engine_cs *ring) 664 { 665 struct hlist_node *tmp; 666 struct cmd_node *desc_node; 667 int i; 668 669 hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) { 670 hash_del(&desc_node->node); 671 kfree(desc_node); 672 } 673 } 674 675 /** 676 * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer 677 * @ring: the ringbuffer to initialize 678 * 679 * Optionally initializes fields related to batch buffer command parsing in the 680 * struct intel_engine_cs based on whether the platform requires software 681 * command parsing. 682 * 683 * Return: non-zero if initialization fails 684 */ 685 int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) 686 { 687 const struct drm_i915_cmd_table *cmd_tables; 688 int cmd_table_count; 689 int ret; 690 691 if (!IS_GEN7(ring->dev)) 692 return 0; 693 694 switch (ring->id) { 695 case RCS: 696 if (IS_HASWELL(ring->dev)) { 697 cmd_tables = hsw_render_ring_cmds; 698 cmd_table_count = 699 ARRAY_SIZE(hsw_render_ring_cmds); 700 } else { 701 cmd_tables = gen7_render_cmds; 702 cmd_table_count = ARRAY_SIZE(gen7_render_cmds); 703 } 704 705 ring->reg_table = gen7_render_regs; 706 ring->reg_count = ARRAY_SIZE(gen7_render_regs); 707 708 if (IS_HASWELL(ring->dev)) { 709 ring->master_reg_table = hsw_master_regs; 710 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs); 711 } else { 712 ring->master_reg_table = ivb_master_regs; 713 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs); 714 } 715 716 ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask; 717 break; 718 case VCS: 719 cmd_tables = gen7_video_cmds; 720 cmd_table_count = ARRAY_SIZE(gen7_video_cmds); 721 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 722 break; 723 case BCS: 724 if (IS_HASWELL(ring->dev)) { 725 cmd_tables = hsw_blt_ring_cmds; 726 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); 727 } else { 728 cmd_tables = gen7_blt_cmds; 729 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); 730 } 731 732 ring->reg_table = gen7_blt_regs; 733 ring->reg_count = ARRAY_SIZE(gen7_blt_regs); 734 735 if (IS_HASWELL(ring->dev)) { 736 ring->master_reg_table = hsw_master_regs; 737 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs); 738 } else { 739 ring->master_reg_table = ivb_master_regs; 740 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs); 741 } 742 743 ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; 744 break; 745 case VECS: 746 cmd_tables = hsw_vebox_cmds; 747 cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); 748 /* VECS can use the same length_mask function as VCS */ 749 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 750 break; 751 default: 752 DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n", 753 ring->id); 754 BUG(); 755 } 756 757 BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); 758 BUG_ON(!validate_regs_sorted(ring)); 759 760 WARN_ON(!hash_empty(ring->cmd_hash)); 761 762 ret = init_hash_table(ring, cmd_tables, cmd_table_count); 763 if (ret) { 764 DRM_ERROR("CMD: cmd_parser_init failed!\n"); 765 fini_hash_table(ring); 766 return ret; 767 } 768 769 ring->needs_cmd_parser = true; 770 771 return 0; 772 } 773 774 /** 775 * i915_cmd_parser_fini_ring() - clean up cmd parser related fields 776 * @ring: the ringbuffer to clean up 777 * 778 * Releases any resources related to command parsing that may have been 779 * initialized for the specified ring. 780 */ 781 void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring) 782 { 783 if (!ring->needs_cmd_parser) 784 return; 785 786 fini_hash_table(ring); 787 } 788 789 static const struct drm_i915_cmd_descriptor* 790 find_cmd_in_table(struct intel_engine_cs *ring, 791 u32 cmd_header) 792 { 793 struct cmd_node *desc_node; 794 795 hash_for_each_possible(ring->cmd_hash, desc_node, node, 796 cmd_header & CMD_HASH_MASK) { 797 const struct drm_i915_cmd_descriptor *desc = desc_node->desc; 798 u32 masked_cmd = desc->cmd.mask & cmd_header; 799 u32 masked_value = desc->cmd.value & desc->cmd.mask; 800 801 if (masked_cmd == masked_value) 802 return desc; 803 } 804 805 return NULL; 806 } 807 808 /* 809 * Returns a pointer to a descriptor for the command specified by cmd_header. 810 * 811 * The caller must supply space for a default descriptor via the default_desc 812 * parameter. If no descriptor for the specified command exists in the ring's 813 * command parser tables, this function fills in default_desc based on the 814 * ring's default length encoding and returns default_desc. 815 */ 816 static const struct drm_i915_cmd_descriptor* 817 find_cmd(struct intel_engine_cs *ring, 818 u32 cmd_header, 819 struct drm_i915_cmd_descriptor *default_desc) 820 { 821 const struct drm_i915_cmd_descriptor *desc; 822 u32 mask; 823 824 desc = find_cmd_in_table(ring, cmd_header); 825 if (desc) 826 return desc; 827 828 mask = ring->get_cmd_length_mask(cmd_header); 829 if (!mask) 830 return NULL; 831 832 BUG_ON(!default_desc); 833 default_desc->flags = CMD_DESC_SKIP; 834 default_desc->length.mask = mask; 835 836 return default_desc; 837 } 838 839 static const struct drm_i915_reg_descriptor * 840 find_reg(const struct drm_i915_reg_descriptor *table, 841 int count, u32 addr) 842 { 843 if (table) { 844 int i; 845 846 for (i = 0; i < count; i++) { 847 if (table[i].addr == addr) 848 return &table[i]; 849 } 850 } 851 852 return NULL; 853 } 854 855 static u32 *vmap_batch(struct drm_i915_gem_object *obj, 856 unsigned start, unsigned len) 857 { 858 int i; 859 void *addr = NULL; 860 struct sg_page_iter sg_iter; 861 int first_page = start >> PAGE_SHIFT; 862 int last_page = (len + start + 4095) >> PAGE_SHIFT; 863 int npages = last_page - first_page; 864 struct page **pages; 865 866 pages = drm_malloc_ab(npages, sizeof(*pages)); 867 if (pages == NULL) { 868 DRM_DEBUG_DRIVER("Failed to get space for pages\n"); 869 goto finish; 870 } 871 872 i = 0; 873 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) { 874 pages[i++] = sg_page_iter_page(&sg_iter); 875 if (i == npages) 876 break; 877 } 878 879 addr = vmap(pages, i, 0, PAGE_KERNEL); 880 if (addr == NULL) { 881 DRM_DEBUG_DRIVER("Failed to vmap pages\n"); 882 goto finish; 883 } 884 885 finish: 886 if (pages) 887 drm_free_large(pages); 888 return (u32*)addr; 889 } 890 891 /* Returns a vmap'd pointer to dest_obj, which the caller must unmap */ 892 static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, 893 struct drm_i915_gem_object *src_obj, 894 u32 batch_start_offset, 895 u32 batch_len) 896 { 897 int needs_clflush = 0; 898 void *src_base, *src; 899 void *dst = NULL; 900 int ret; 901 902 if (batch_len > dest_obj->base.size || 903 batch_len + batch_start_offset > src_obj->base.size) 904 return ERR_PTR(-E2BIG); 905 906 if (WARN_ON(dest_obj->pages_pin_count == 0)) 907 return ERR_PTR(-ENODEV); 908 909 ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); 910 if (ret) { 911 DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n"); 912 return ERR_PTR(ret); 913 } 914 915 src_base = vmap_batch(src_obj, batch_start_offset, batch_len); 916 if (!src_base) { 917 DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); 918 ret = -ENOMEM; 919 goto unpin_src; 920 } 921 922 ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); 923 if (ret) { 924 DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n"); 925 goto unmap_src; 926 } 927 928 dst = vmap_batch(dest_obj, 0, batch_len); 929 if (!dst) { 930 DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); 931 ret = -ENOMEM; 932 goto unmap_src; 933 } 934 935 src = src_base + offset_in_page(batch_start_offset); 936 if (needs_clflush) 937 drm_clflush_virt_range(src, batch_len); 938 939 memcpy(dst, src, batch_len); 940 941 unmap_src: 942 vunmap(src_base); 943 unpin_src: 944 i915_gem_object_unpin_pages(src_obj); 945 946 return ret ? ERR_PTR(ret) : dst; 947 } 948 949 /** 950 * i915_needs_cmd_parser() - should a given ring use software command parsing? 951 * @ring: the ring in question 952 * 953 * Only certain platforms require software batch buffer command parsing, and 954 * only when enabled via module parameter. 955 * 956 * Return: true if the ring requires software command parsing 957 */ 958 bool i915_needs_cmd_parser(struct intel_engine_cs *ring) 959 { 960 if (!ring->needs_cmd_parser) 961 return false; 962 963 if (!USES_PPGTT(ring->dev)) 964 return false; 965 966 return (i915.enable_cmd_parser == 1); 967 } 968 969 static bool check_cmd(const struct intel_engine_cs *ring, 970 const struct drm_i915_cmd_descriptor *desc, 971 const u32 *cmd, u32 length, 972 const bool is_master, 973 bool *oacontrol_set) 974 { 975 if (desc->flags & CMD_DESC_REJECT) { 976 DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd); 977 return false; 978 } 979 980 if ((desc->flags & CMD_DESC_MASTER) && !is_master) { 981 DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n", 982 *cmd); 983 return false; 984 } 985 986 if (desc->flags & CMD_DESC_REGISTER) { 987 /* 988 * Get the distance between individual register offset 989 * fields if the command can perform more than one 990 * access at a time. 991 */ 992 const u32 step = desc->reg.step ? desc->reg.step : length; 993 u32 offset; 994 995 for (offset = desc->reg.offset; offset < length; 996 offset += step) { 997 const u32 reg_addr = cmd[offset] & desc->reg.mask; 998 const struct drm_i915_reg_descriptor *reg = 999 find_reg(ring->reg_table, ring->reg_count, 1000 reg_addr); 1001 1002 if (!reg && is_master) 1003 reg = find_reg(ring->master_reg_table, 1004 ring->master_reg_count, 1005 reg_addr); 1006 1007 if (!reg) { 1008 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n", 1009 reg_addr, *cmd, ring->id); 1010 return false; 1011 } 1012 1013 /* 1014 * OACONTROL requires some special handling for 1015 * writes. We want to make sure that any batch which 1016 * enables OA also disables it before the end of the 1017 * batch. The goal is to prevent one process from 1018 * snooping on the perf data from another process. To do 1019 * that, we need to check the value that will be written 1020 * to the register. Hence, limit OACONTROL writes to 1021 * only MI_LOAD_REGISTER_IMM commands. 1022 */ 1023 if (reg_addr == OACONTROL) { 1024 if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) { 1025 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); 1026 return false; 1027 } 1028 1029 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) 1030 *oacontrol_set = (cmd[offset + 1] != 0); 1031 } 1032 1033 /* 1034 * Check the value written to the register against the 1035 * allowed mask/value pair given in the whitelist entry. 1036 */ 1037 if (reg->mask) { 1038 if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) { 1039 DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n", 1040 reg_addr); 1041 return false; 1042 } 1043 1044 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && 1045 (offset + 2 > length || 1046 (cmd[offset + 1] & reg->mask) != reg->value)) { 1047 DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n", 1048 reg_addr); 1049 return false; 1050 } 1051 } 1052 } 1053 } 1054 1055 if (desc->flags & CMD_DESC_BITMASK) { 1056 int i; 1057 1058 for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) { 1059 u32 dword; 1060 1061 if (desc->bits[i].mask == 0) 1062 break; 1063 1064 if (desc->bits[i].condition_mask != 0) { 1065 u32 offset = 1066 desc->bits[i].condition_offset; 1067 u32 condition = cmd[offset] & 1068 desc->bits[i].condition_mask; 1069 1070 if (condition == 0) 1071 continue; 1072 } 1073 1074 dword = cmd[desc->bits[i].offset] & 1075 desc->bits[i].mask; 1076 1077 if (dword != desc->bits[i].expected) { 1078 DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n", 1079 *cmd, 1080 desc->bits[i].mask, 1081 desc->bits[i].expected, 1082 dword, ring->id); 1083 return false; 1084 } 1085 } 1086 } 1087 1088 return true; 1089 } 1090 1091 #define LENGTH_BIAS 2 1092 1093 /** 1094 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations 1095 * @ring: the ring on which the batch is to execute 1096 * @batch_obj: the batch buffer in question 1097 * @shadow_batch_obj: copy of the batch buffer in question 1098 * @batch_start_offset: byte offset in the batch at which execution starts 1099 * @batch_len: length of the commands in batch_obj 1100 * @is_master: is the submitting process the drm master? 1101 * 1102 * Parses the specified batch buffer looking for privilege violations as 1103 * described in the overview. 1104 * 1105 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES 1106 * if the batch appears legal but should use hardware parsing 1107 */ 1108 int i915_parse_cmds(struct intel_engine_cs *ring, 1109 struct drm_i915_gem_object *batch_obj, 1110 struct drm_i915_gem_object *shadow_batch_obj, 1111 u32 batch_start_offset, 1112 u32 batch_len, 1113 bool is_master) 1114 { 1115 u32 *cmd, *batch_base, *batch_end; 1116 struct drm_i915_cmd_descriptor default_desc = { 0 }; 1117 bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ 1118 int ret = 0; 1119 1120 batch_base = copy_batch(shadow_batch_obj, batch_obj, 1121 batch_start_offset, batch_len); 1122 if (IS_ERR(batch_base)) { 1123 DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); 1124 return PTR_ERR(batch_base); 1125 } 1126 1127 /* 1128 * We use the batch length as size because the shadow object is as 1129 * large or larger and copy_batch() will write MI_NOPs to the extra 1130 * space. Parsing should be faster in some cases this way. 1131 */ 1132 batch_end = batch_base + (batch_len / sizeof(*batch_end)); 1133 1134 cmd = batch_base; 1135 while (cmd < batch_end) { 1136 const struct drm_i915_cmd_descriptor *desc; 1137 u32 length; 1138 1139 if (*cmd == MI_BATCH_BUFFER_END) 1140 break; 1141 1142 desc = find_cmd(ring, *cmd, &default_desc); 1143 if (!desc) { 1144 DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", 1145 *cmd); 1146 ret = -EINVAL; 1147 break; 1148 } 1149 1150 /* 1151 * If the batch buffer contains a chained batch, return an 1152 * error that tells the caller to abort and dispatch the 1153 * workload as a non-secure batch. 1154 */ 1155 if (desc->cmd.value == MI_BATCH_BUFFER_START) { 1156 ret = -EACCES; 1157 break; 1158 } 1159 1160 if (desc->flags & CMD_DESC_FIXED) 1161 length = desc->length.fixed; 1162 else 1163 length = ((*cmd & desc->length.mask) + LENGTH_BIAS); 1164 1165 if ((batch_end - cmd) < length) { 1166 DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n", 1167 *cmd, 1168 length, 1169 batch_end - cmd); 1170 ret = -EINVAL; 1171 break; 1172 } 1173 1174 if (!check_cmd(ring, desc, cmd, length, is_master, 1175 &oacontrol_set)) { 1176 ret = -EINVAL; 1177 break; 1178 } 1179 1180 cmd += length; 1181 } 1182 1183 if (oacontrol_set) { 1184 DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n"); 1185 ret = -EINVAL; 1186 } 1187 1188 if (cmd >= batch_end) { 1189 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); 1190 ret = -EINVAL; 1191 } 1192 1193 vunmap(batch_base); 1194 1195 return ret; 1196 } 1197 1198 /** 1199 * i915_cmd_parser_get_version() - get the cmd parser version number 1200 * 1201 * The cmd parser maintains a simple increasing integer version number suitable 1202 * for passing to userspace clients to determine what operations are permitted. 1203 * 1204 * Return: the current version number of the cmd parser 1205 */ 1206 int i915_cmd_parser_get_version(void) 1207 { 1208 /* 1209 * Command parser version history 1210 * 1211 * 1. Initial version. Checks batches and reports violations, but leaves 1212 * hardware parsing enabled (so does not allow new use cases). 1213 * 2. Allow access to the MI_PREDICATE_SRC0 and 1214 * MI_PREDICATE_SRC1 registers. 1215 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register. 1216 */ 1217 return 3; 1218 } 1219