1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/kernel.h> 29 30 #include "radeon.h" 31 #include "radeon_asic.h" 32 #include "r600.h" 33 #include "r600d.h" 34 #include "r600_reg_safe.h" 35 36 static int r600_nomm; 37 38 struct r600_cs_track { 39 /* configuration we mirror so that we use same code btw kms/ums */ 40 u32 group_size; 41 u32 nbanks; 42 u32 npipes; 43 /* value we track */ 44 u32 sq_config; 45 u32 log_nsamples; 46 u32 nsamples; 47 u32 cb_color_base_last[8]; 48 struct radeon_bo *cb_color_bo[8]; 49 u64 cb_color_bo_mc[8]; 50 u64 cb_color_bo_offset[8]; 51 struct radeon_bo *cb_color_frag_bo[8]; 52 u64 cb_color_frag_offset[8]; 53 struct radeon_bo *cb_color_tile_bo[8]; 54 u64 cb_color_tile_offset[8]; 55 u32 cb_color_mask[8]; 56 u32 cb_color_info[8]; 57 u32 cb_color_view[8]; 58 u32 cb_color_size_idx[8]; /* unused */ 59 u32 cb_target_mask; 60 u32 cb_shader_mask; /* unused */ 61 bool is_resolve; 62 u32 cb_color_size[8]; 63 u32 vgt_strmout_en; 64 u32 vgt_strmout_buffer_en; 65 struct radeon_bo *vgt_strmout_bo[4]; 66 u64 vgt_strmout_bo_mc[4]; /* unused */ 67 u32 vgt_strmout_bo_offset[4]; 68 u32 vgt_strmout_size[4]; 69 u32 db_depth_control; 70 u32 db_depth_info; 71 u32 db_depth_size_idx; 72 u32 db_depth_view; 73 u32 db_depth_size; 74 u32 db_offset; 75 struct radeon_bo *db_bo; 76 u64 db_bo_mc; 77 bool sx_misc_kill_all_prims; 78 bool cb_dirty; 79 bool db_dirty; 80 bool streamout_dirty; 81 struct radeon_bo *htile_bo; 82 u64 htile_offset; 83 u32 htile_surface; 84 }; 85 86 #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } 87 #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } 88 #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 } 89 #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } 90 #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 } 91 #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } 92 #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } 93 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } 94 95 struct gpu_formats { 96 unsigned blockwidth; 97 unsigned blockheight; 98 unsigned blocksize; 99 unsigned valid_color; 100 enum radeon_family min_family; 101 }; 102 103 static const struct gpu_formats color_formats_table[] = { 104 /* 8 bit */ 105 FMT_8_BIT(V_038004_COLOR_8, 1), 106 FMT_8_BIT(V_038004_COLOR_4_4, 1), 107 FMT_8_BIT(V_038004_COLOR_3_3_2, 1), 108 FMT_8_BIT(V_038004_FMT_1, 0), 109 110 /* 16-bit */ 111 FMT_16_BIT(V_038004_COLOR_16, 1), 112 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1), 113 FMT_16_BIT(V_038004_COLOR_8_8, 1), 114 FMT_16_BIT(V_038004_COLOR_5_6_5, 1), 115 FMT_16_BIT(V_038004_COLOR_6_5_5, 1), 116 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1), 117 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1), 118 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1), 119 120 /* 24-bit */ 121 FMT_24_BIT(V_038004_FMT_8_8_8), 122 123 /* 32-bit */ 124 FMT_32_BIT(V_038004_COLOR_32, 1), 125 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), 126 FMT_32_BIT(V_038004_COLOR_16_16, 1), 127 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1), 128 FMT_32_BIT(V_038004_COLOR_8_24, 1), 129 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1), 130 FMT_32_BIT(V_038004_COLOR_24_8, 1), 131 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1), 132 FMT_32_BIT(V_038004_COLOR_10_11_11, 1), 133 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1), 134 FMT_32_BIT(V_038004_COLOR_11_11_10, 1), 135 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1), 136 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1), 137 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1), 138 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1), 139 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0), 140 FMT_32_BIT(V_038004_FMT_32_AS_8, 0), 141 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0), 142 143 /* 48-bit */ 144 FMT_48_BIT(V_038004_FMT_16_16_16), 145 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT), 146 147 /* 64-bit */ 148 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1), 149 FMT_64_BIT(V_038004_COLOR_32_32, 1), 150 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1), 151 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1), 152 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1), 153 154 FMT_96_BIT(V_038004_FMT_32_32_32), 155 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT), 156 157 /* 128-bit */ 158 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1), 159 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1), 160 161 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 }, 162 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 }, 163 164 /* block compressed formats */ 165 [V_038004_FMT_BC1] = { 4, 4, 8, 0 }, 166 [V_038004_FMT_BC2] = { 4, 4, 16, 0 }, 167 [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, 168 [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, 169 [V_038004_FMT_BC5] = { 4, 4, 16, 0}, 170 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 171 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 172 173 /* The other Evergreen formats */ 174 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, 175 }; 176 177 bool r600_fmt_is_valid_color(u32 format) 178 { 179 if (format >= ARRAY_SIZE(color_formats_table)) 180 return false; 181 182 if (color_formats_table[format].valid_color) 183 return true; 184 185 return false; 186 } 187 188 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family) 189 { 190 if (format >= ARRAY_SIZE(color_formats_table)) 191 return false; 192 193 if (family < color_formats_table[format].min_family) 194 return false; 195 196 if (color_formats_table[format].blockwidth > 0) 197 return true; 198 199 return false; 200 } 201 202 int r600_fmt_get_blocksize(u32 format) 203 { 204 if (format >= ARRAY_SIZE(color_formats_table)) 205 return 0; 206 207 return color_formats_table[format].blocksize; 208 } 209 210 int r600_fmt_get_nblocksx(u32 format, u32 w) 211 { 212 unsigned bw; 213 214 if (format >= ARRAY_SIZE(color_formats_table)) 215 return 0; 216 217 bw = color_formats_table[format].blockwidth; 218 if (bw == 0) 219 return 0; 220 221 return DIV_ROUND_UP(w, bw); 222 } 223 224 int r600_fmt_get_nblocksy(u32 format, u32 h) 225 { 226 unsigned bh; 227 228 if (format >= ARRAY_SIZE(color_formats_table)) 229 return 0; 230 231 bh = color_formats_table[format].blockheight; 232 if (bh == 0) 233 return 0; 234 235 return DIV_ROUND_UP(h, bh); 236 } 237 238 struct array_mode_checker { 239 int array_mode; 240 u32 group_size; 241 u32 nbanks; 242 u32 npipes; 243 u32 nsamples; 244 u32 blocksize; 245 }; 246 247 /* returns alignment in pixels for pitch/height/depth and bytes for base */ 248 static int r600_get_array_mode_alignment(struct array_mode_checker *values, 249 u32 *pitch_align, 250 u32 *height_align, 251 u32 *depth_align, 252 u64 *base_align) 253 { 254 u32 tile_width = 8; 255 u32 tile_height = 8; 256 u32 macro_tile_width = values->nbanks; 257 u32 macro_tile_height = values->npipes; 258 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples; 259 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; 260 261 switch (values->array_mode) { 262 case ARRAY_LINEAR_GENERAL: 263 /* technically tile_width/_height for pitch/height */ 264 *pitch_align = 1; /* tile_width */ 265 *height_align = 1; /* tile_height */ 266 *depth_align = 1; 267 *base_align = 1; 268 break; 269 case ARRAY_LINEAR_ALIGNED: 270 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); 271 *height_align = 1; 272 *depth_align = 1; 273 *base_align = values->group_size; 274 break; 275 case ARRAY_1D_TILED_THIN1: 276 *pitch_align = max((u32)tile_width, 277 (u32)(values->group_size / 278 (tile_height * values->blocksize * values->nsamples))); 279 *height_align = tile_height; 280 *depth_align = 1; 281 *base_align = values->group_size; 282 break; 283 case ARRAY_2D_TILED_THIN1: 284 *pitch_align = max((u32)macro_tile_width * tile_width, 285 (u32)((values->group_size * values->nbanks) / 286 (values->blocksize * values->nsamples * tile_width))); 287 *height_align = macro_tile_height * tile_height; 288 *depth_align = 1; 289 *base_align = max(macro_tile_bytes, 290 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples); 291 break; 292 default: 293 return -EINVAL; 294 } 295 296 return 0; 297 } 298 299 static void r600_cs_track_init(struct r600_cs_track *track) 300 { 301 int i; 302 303 /* assume DX9 mode */ 304 track->sq_config = DX9_CONSTS; 305 for (i = 0; i < 8; i++) { 306 track->cb_color_base_last[i] = 0; 307 track->cb_color_size[i] = 0; 308 track->cb_color_size_idx[i] = 0; 309 track->cb_color_info[i] = 0; 310 track->cb_color_view[i] = 0xFFFFFFFF; 311 track->cb_color_bo[i] = NULL; 312 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 313 track->cb_color_bo_mc[i] = 0xFFFFFFFF; 314 track->cb_color_frag_bo[i] = NULL; 315 track->cb_color_frag_offset[i] = 0xFFFFFFFF; 316 track->cb_color_tile_bo[i] = NULL; 317 track->cb_color_tile_offset[i] = 0xFFFFFFFF; 318 track->cb_color_mask[i] = 0xFFFFFFFF; 319 } 320 track->is_resolve = false; 321 track->nsamples = 16; 322 track->log_nsamples = 4; 323 track->cb_target_mask = 0xFFFFFFFF; 324 track->cb_shader_mask = 0xFFFFFFFF; 325 track->cb_dirty = true; 326 track->db_bo = NULL; 327 track->db_bo_mc = 0xFFFFFFFF; 328 /* assume the biggest format and that htile is enabled */ 329 track->db_depth_info = 7 | (1 << 25); 330 track->db_depth_view = 0xFFFFC000; 331 track->db_depth_size = 0xFFFFFFFF; 332 track->db_depth_size_idx = 0; 333 track->db_depth_control = 0xFFFFFFFF; 334 track->db_dirty = true; 335 track->htile_bo = NULL; 336 track->htile_offset = 0xFFFFFFFF; 337 track->htile_surface = 0; 338 339 for (i = 0; i < 4; i++) { 340 track->vgt_strmout_size[i] = 0; 341 track->vgt_strmout_bo[i] = NULL; 342 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF; 343 track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF; 344 } 345 track->streamout_dirty = true; 346 track->sx_misc_kill_all_prims = false; 347 } 348 349 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 350 { 351 struct r600_cs_track *track = p->track; 352 u32 slice_tile_max, tmp; 353 u32 height, height_align, pitch, pitch_align, depth_align; 354 u64 base_offset, base_align; 355 struct array_mode_checker array_check; 356 volatile u32 *ib = p->ib.ptr; 357 unsigned array_mode; 358 u32 format; 359 /* When resolve is used, the second colorbuffer has always 1 sample. */ 360 unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; 361 362 format = G_0280A0_FORMAT(track->cb_color_info[i]); 363 if (!r600_fmt_is_valid_color(format)) { 364 dev_warn_once(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", 365 __func__, __LINE__, format, 366 i, track->cb_color_info[i]); 367 return -EINVAL; 368 } 369 /* pitch in pixels */ 370 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; 371 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 372 slice_tile_max *= 64; 373 height = slice_tile_max / pitch; 374 if (height > 8192) 375 height = 8192; 376 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); 377 378 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; 379 array_check.array_mode = array_mode; 380 array_check.group_size = track->group_size; 381 array_check.nbanks = track->nbanks; 382 array_check.npipes = track->npipes; 383 array_check.nsamples = nsamples; 384 array_check.blocksize = r600_fmt_get_blocksize(format); 385 if (r600_get_array_mode_alignment(&array_check, 386 &pitch_align, &height_align, &depth_align, &base_align)) { 387 dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 388 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 389 track->cb_color_info[i]); 390 return -EINVAL; 391 } 392 switch (array_mode) { 393 case V_0280A0_ARRAY_LINEAR_GENERAL: 394 break; 395 case V_0280A0_ARRAY_LINEAR_ALIGNED: 396 break; 397 case V_0280A0_ARRAY_1D_TILED_THIN1: 398 /* avoid breaking userspace */ 399 if (height > 7) 400 height &= ~0x7; 401 break; 402 case V_0280A0_ARRAY_2D_TILED_THIN1: 403 break; 404 default: 405 dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 406 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 407 track->cb_color_info[i]); 408 return -EINVAL; 409 } 410 411 if (!IS_ALIGNED(pitch, pitch_align)) { 412 dev_warn_once(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", 413 __func__, __LINE__, pitch, pitch_align, array_mode); 414 return -EINVAL; 415 } 416 if (!IS_ALIGNED(height, height_align)) { 417 dev_warn_once(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", 418 __func__, __LINE__, height, height_align, array_mode); 419 return -EINVAL; 420 } 421 if (!IS_ALIGNED(base_offset, base_align)) { 422 dev_warn_once(p->dev, 423 "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, 424 base_offset, base_align, array_mode); 425 return -EINVAL; 426 } 427 428 /* check offset */ 429 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * 430 r600_fmt_get_blocksize(format) * nsamples; 431 switch (array_mode) { 432 default: 433 case V_0280A0_ARRAY_LINEAR_GENERAL: 434 case V_0280A0_ARRAY_LINEAR_ALIGNED: 435 tmp += track->cb_color_view[i] & 0xFF; 436 break; 437 case V_0280A0_ARRAY_1D_TILED_THIN1: 438 case V_0280A0_ARRAY_2D_TILED_THIN1: 439 tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp; 440 break; 441 } 442 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 443 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 444 /* the initial DDX does bad things with the CB size occasionally */ 445 /* it rounds up height too far for slice tile max but the BO is smaller */ 446 /* r600c,g also seem to flush at bad times in some apps resulting in 447 * bogus values here. So for linear just allow anything to avoid breaking 448 * broken userspace. 449 */ 450 } else { 451 dev_warn_once(p->dev, 452 "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n", 453 __func__, i, array_mode, 454 track->cb_color_bo_offset[i], tmp, 455 radeon_bo_size(track->cb_color_bo[i]), 456 pitch, height, r600_fmt_get_nblocksx(format, pitch), 457 r600_fmt_get_nblocksy(format, height), 458 r600_fmt_get_blocksize(format)); 459 return -EINVAL; 460 } 461 } 462 /* limit max tile */ 463 tmp = (height * pitch) >> 6; 464 if (tmp < slice_tile_max) 465 slice_tile_max = tmp; 466 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | 467 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 468 ib[track->cb_color_size_idx[i]] = tmp; 469 470 /* FMASK/CMASK */ 471 switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) { 472 case V_0280A0_TILE_DISABLE: 473 break; 474 case V_0280A0_FRAG_ENABLE: 475 if (track->nsamples > 1) { 476 uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]); 477 /* the tile size is 8x8, but the size is in units of bits. 478 * for bytes, do just * 8. */ 479 uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1); 480 481 if (bytes + track->cb_color_frag_offset[i] > 482 radeon_bo_size(track->cb_color_frag_bo[i])) { 483 dev_warn_once(p->dev, "%s FMASK_TILE_MAX too large " 484 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", 485 __func__, tile_max, bytes, 486 track->cb_color_frag_offset[i], 487 radeon_bo_size(track->cb_color_frag_bo[i])); 488 return -EINVAL; 489 } 490 } 491 fallthrough; 492 case V_0280A0_CLEAR_ENABLE: 493 { 494 uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); 495 /* One block = 128x128 pixels, one 8x8 tile has 4 bits.. 496 * (128*128) / (8*8) / 2 = 128 bytes per block. */ 497 uint32_t bytes = (block_max + 1) * 128; 498 499 if (bytes + track->cb_color_tile_offset[i] > 500 radeon_bo_size(track->cb_color_tile_bo[i])) { 501 dev_warn_once(p->dev, "%s CMASK_BLOCK_MAX too large " 502 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", 503 __func__, block_max, bytes, 504 track->cb_color_tile_offset[i], 505 radeon_bo_size(track->cb_color_tile_bo[i])); 506 return -EINVAL; 507 } 508 break; 509 } 510 default: 511 dev_warn_once(p->dev, "%s invalid tile mode\n", __func__); 512 return -EINVAL; 513 } 514 return 0; 515 } 516 517 static int r600_cs_track_validate_db(struct radeon_cs_parser *p) 518 { 519 struct r600_cs_track *track = p->track; 520 u32 nviews, bpe, ntiles, slice_tile_max, tmp; 521 u32 height_align, pitch_align, depth_align; 522 u32 pitch = 8192; 523 u32 height = 8192; 524 u64 base_offset, base_align; 525 struct array_mode_checker array_check; 526 int array_mode; 527 volatile u32 *ib = p->ib.ptr; 528 529 530 if (track->db_bo == NULL) { 531 dev_warn_once(p->dev, "z/stencil with no depth buffer\n"); 532 return -EINVAL; 533 } 534 switch (G_028010_FORMAT(track->db_depth_info)) { 535 case V_028010_DEPTH_16: 536 bpe = 2; 537 break; 538 case V_028010_DEPTH_X8_24: 539 case V_028010_DEPTH_8_24: 540 case V_028010_DEPTH_X8_24_FLOAT: 541 case V_028010_DEPTH_8_24_FLOAT: 542 case V_028010_DEPTH_32_FLOAT: 543 bpe = 4; 544 break; 545 case V_028010_DEPTH_X24_8_32_FLOAT: 546 bpe = 8; 547 break; 548 default: 549 dev_warn_once(p->dev, 550 "z/stencil with invalid format %d\n", 551 G_028010_FORMAT(track->db_depth_info)); 552 return -EINVAL; 553 } 554 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 555 if (!track->db_depth_size_idx) { 556 dev_warn_once(p->dev, "z/stencil buffer size not set\n"); 557 return -EINVAL; 558 } 559 tmp = radeon_bo_size(track->db_bo) - track->db_offset; 560 tmp = (tmp / bpe) >> 6; 561 if (!tmp) { 562 dev_warn_once(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", 563 track->db_depth_size, bpe, track->db_offset, 564 radeon_bo_size(track->db_bo)); 565 return -EINVAL; 566 } 567 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 568 } else { 569 /* pitch in pixels */ 570 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; 571 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 572 slice_tile_max *= 64; 573 height = slice_tile_max / pitch; 574 if (height > 8192) 575 height = 8192; 576 base_offset = track->db_bo_mc + track->db_offset; 577 array_mode = G_028010_ARRAY_MODE(track->db_depth_info); 578 array_check.array_mode = array_mode; 579 array_check.group_size = track->group_size; 580 array_check.nbanks = track->nbanks; 581 array_check.npipes = track->npipes; 582 array_check.nsamples = track->nsamples; 583 array_check.blocksize = bpe; 584 if (r600_get_array_mode_alignment(&array_check, 585 &pitch_align, &height_align, &depth_align, &base_align)) { 586 dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 587 G_028010_ARRAY_MODE(track->db_depth_info), 588 track->db_depth_info); 589 return -EINVAL; 590 } 591 switch (array_mode) { 592 case V_028010_ARRAY_1D_TILED_THIN1: 593 /* don't break userspace */ 594 height &= ~0x7; 595 break; 596 case V_028010_ARRAY_2D_TILED_THIN1: 597 break; 598 default: 599 dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 600 G_028010_ARRAY_MODE(track->db_depth_info), 601 track->db_depth_info); 602 return -EINVAL; 603 } 604 605 if (!IS_ALIGNED(pitch, pitch_align)) { 606 dev_warn_once(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", 607 __func__, __LINE__, pitch, pitch_align, array_mode); 608 return -EINVAL; 609 } 610 if (!IS_ALIGNED(height, height_align)) { 611 dev_warn_once(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", 612 __func__, __LINE__, height, height_align, array_mode); 613 return -EINVAL; 614 } 615 if (!IS_ALIGNED(base_offset, base_align)) { 616 dev_warn_once(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__, 617 base_offset, base_align, array_mode); 618 return -EINVAL; 619 } 620 621 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 622 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 623 tmp = ntiles * bpe * 64 * nviews * track->nsamples; 624 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 625 dev_warn_once(p->dev, 626 "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", 627 array_mode, 628 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 629 radeon_bo_size(track->db_bo)); 630 return -EINVAL; 631 } 632 } 633 634 /* hyperz */ 635 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { 636 unsigned long size; 637 unsigned nbx, nby; 638 639 if (track->htile_bo == NULL) { 640 dev_warn_once(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", 641 __func__, __LINE__, track->db_depth_info); 642 return -EINVAL; 643 } 644 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 645 dev_warn_once(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", 646 __func__, __LINE__, track->db_depth_size); 647 return -EINVAL; 648 } 649 650 nbx = pitch; 651 nby = height; 652 if (G_028D24_LINEAR(track->htile_surface)) { 653 /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */ 654 nbx = round_up(nbx, 16 * 8); 655 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */ 656 nby = round_up(nby, track->npipes * 8); 657 } else { 658 /* always assume 8x8 htile */ 659 /* align is htile align * 8, htile align vary according to 660 * number of pipe and tile width and nby 661 */ 662 switch (track->npipes) { 663 case 8: 664 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 665 nbx = round_up(nbx, 64 * 8); 666 nby = round_up(nby, 64 * 8); 667 break; 668 case 4: 669 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 670 nbx = round_up(nbx, 64 * 8); 671 nby = round_up(nby, 32 * 8); 672 break; 673 case 2: 674 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 675 nbx = round_up(nbx, 32 * 8); 676 nby = round_up(nby, 32 * 8); 677 break; 678 case 1: 679 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 680 nbx = round_up(nbx, 32 * 8); 681 nby = round_up(nby, 16 * 8); 682 break; 683 default: 684 dev_warn_once(p->dev, "%s:%d invalid num pipes %d\n", 685 __func__, __LINE__, track->npipes); 686 return -EINVAL; 687 } 688 } 689 /* compute number of htile */ 690 nbx = nbx >> 3; 691 nby = nby >> 3; 692 /* size must be aligned on npipes * 2K boundary */ 693 size = roundup(nbx * nby * 4, track->npipes * (2 << 10)); 694 size += track->htile_offset; 695 696 if (size > radeon_bo_size(track->htile_bo)) { 697 dev_warn_once(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", 698 __func__, __LINE__, radeon_bo_size(track->htile_bo), 699 size, nbx, nby); 700 return -EINVAL; 701 } 702 } 703 704 track->db_dirty = false; 705 return 0; 706 } 707 708 static int r600_cs_track_check(struct radeon_cs_parser *p) 709 { 710 struct r600_cs_track *track = p->track; 711 u32 tmp; 712 int r, i; 713 714 /* on legacy kernel we don't perform advanced check */ 715 if (p->rdev == NULL) 716 return 0; 717 718 /* check streamout */ 719 if (track->streamout_dirty && track->vgt_strmout_en) { 720 for (i = 0; i < 4; i++) { 721 if (track->vgt_strmout_buffer_en & (1 << i)) { 722 if (track->vgt_strmout_bo[i]) { 723 u64 offset = (u64)track->vgt_strmout_bo_offset[i] + 724 (u64)track->vgt_strmout_size[i]; 725 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { 726 dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n", 727 i, offset, 728 radeon_bo_size(track->vgt_strmout_bo[i])); 729 return -EINVAL; 730 } 731 } else { 732 dev_warn_once(p->dev, "No buffer for streamout %d\n", i); 733 return -EINVAL; 734 } 735 } 736 } 737 track->streamout_dirty = false; 738 } 739 740 if (track->sx_misc_kill_all_prims) 741 return 0; 742 743 /* check that we have a cb for each enabled target, we don't check 744 * shader_mask because it seems mesa isn't always setting it :( 745 */ 746 if (track->cb_dirty) { 747 tmp = track->cb_target_mask; 748 749 /* We must check both colorbuffers for RESOLVE. */ 750 if (track->is_resolve) { 751 tmp |= 0xff; 752 } 753 754 for (i = 0; i < 8; i++) { 755 u32 format = G_0280A0_FORMAT(track->cb_color_info[i]); 756 757 if (format != V_0280A0_COLOR_INVALID && 758 (tmp >> (i * 4)) & 0xF) { 759 /* at least one component is enabled */ 760 if (track->cb_color_bo[i] == NULL) { 761 dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", 762 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); 763 return -EINVAL; 764 } 765 /* perform rewrite of CB_COLOR[0-7]_SIZE */ 766 r = r600_cs_track_validate_cb(p, i); 767 if (r) 768 return r; 769 } 770 } 771 track->cb_dirty = false; 772 } 773 774 /* Check depth buffer */ 775 if (track->db_dirty && 776 G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && 777 (G_028800_STENCIL_ENABLE(track->db_depth_control) || 778 G_028800_Z_ENABLE(track->db_depth_control))) { 779 r = r600_cs_track_validate_db(p); 780 if (r) 781 return r; 782 } 783 784 return 0; 785 } 786 787 /** 788 * r600_cs_packet_parse_vline() - parse userspace VLINE packet 789 * @p: parser structure holding parsing context. 790 * 791 * This is an R600-specific function for parsing VLINE packets. 792 * Real work is done by r600_cs_common_vline_parse function. 793 * Here we just set up ASIC-specific register table and call 794 * the common implementation function. 795 */ 796 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) 797 { 798 static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END, 799 AVIVO_D2MODE_VLINE_START_END}; 800 static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS, 801 AVIVO_D2MODE_VLINE_STATUS}; 802 803 return r600_cs_common_vline_parse(p, vline_start_end, vline_status); 804 } 805 806 /** 807 * r600_cs_common_vline_parse() - common vline parser 808 * @p: parser structure holding parsing context. 809 * @vline_start_end: table of vline_start_end registers 810 * @vline_status: table of vline_status registers 811 * 812 * Userspace sends a special sequence for VLINE waits. 813 * PACKET0 - VLINE_START_END + value 814 * PACKET3 - WAIT_REG_MEM poll vline status reg 815 * RELOC (P3) - crtc_id in reloc. 816 * 817 * This function parses this and relocates the VLINE START END 818 * and WAIT_REG_MEM packets to the correct crtc. 819 * It also detects a switched off crtc and nulls out the 820 * wait in that case. This function is common for all ASICs that 821 * are R600 and newer. The parsing algorithm is the same, and only 822 * differs in which registers are used. 823 * 824 * Caller is the ASIC-specific function which passes the parser 825 * context and ASIC-specific register table 826 */ 827 int r600_cs_common_vline_parse(struct radeon_cs_parser *p, 828 uint32_t *vline_start_end, 829 uint32_t *vline_status) 830 { 831 struct drm_crtc *crtc; 832 struct radeon_crtc *radeon_crtc; 833 struct radeon_cs_packet p3reloc, wait_reg_mem; 834 int crtc_id; 835 int r; 836 uint32_t header, h_idx, reg, wait_reg_mem_info; 837 volatile uint32_t *ib; 838 839 ib = p->ib.ptr; 840 841 /* parse the WAIT_REG_MEM */ 842 r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx); 843 if (r) 844 return r; 845 846 /* check its a WAIT_REG_MEM */ 847 if (wait_reg_mem.type != RADEON_PACKET_TYPE3 || 848 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { 849 dev_warn_once(p->dev, "vline wait missing WAIT_REG_MEM segment\n"); 850 return -EINVAL; 851 } 852 853 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); 854 /* bit 4 is reg (0) or mem (1) */ 855 if (wait_reg_mem_info & 0x10) { 856 dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on MEM instead of REG\n"); 857 return -EINVAL; 858 } 859 /* bit 8 is me (0) or pfp (1) */ 860 if (wait_reg_mem_info & 0x100) { 861 dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on PFP instead of ME\n"); 862 return -EINVAL; 863 } 864 /* waiting for value to be equal */ 865 if ((wait_reg_mem_info & 0x7) != 0x3) { 866 dev_warn_once(p->dev, "vline WAIT_REG_MEM function not equal\n"); 867 return -EINVAL; 868 } 869 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) { 870 dev_warn_once(p->dev, "vline WAIT_REG_MEM bad reg\n"); 871 return -EINVAL; 872 } 873 874 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) { 875 dev_warn_once(p->dev, "vline WAIT_REG_MEM bad bit mask\n"); 876 return -EINVAL; 877 } 878 879 /* jump over the NOP */ 880 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); 881 if (r) 882 return r; 883 884 h_idx = p->idx - 2; 885 p->idx += wait_reg_mem.count + 2; 886 p->idx += p3reloc.count + 2; 887 888 header = radeon_get_ib_value(p, h_idx); 889 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 890 reg = R600_CP_PACKET0_GET_REG(header); 891 892 crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id); 893 if (!crtc) { 894 dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id); 895 return -ENOENT; 896 } 897 radeon_crtc = to_radeon_crtc(crtc); 898 crtc_id = radeon_crtc->crtc_id; 899 900 if (!crtc->enabled) { 901 /* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ 902 ib[h_idx + 2] = PACKET2(0); 903 ib[h_idx + 3] = PACKET2(0); 904 ib[h_idx + 4] = PACKET2(0); 905 ib[h_idx + 5] = PACKET2(0); 906 ib[h_idx + 6] = PACKET2(0); 907 ib[h_idx + 7] = PACKET2(0); 908 ib[h_idx + 8] = PACKET2(0); 909 } else if (reg == vline_start_end[0]) { 910 header &= ~R600_CP_PACKET0_REG_MASK; 911 header |= vline_start_end[crtc_id] >> 2; 912 ib[h_idx] = header; 913 ib[h_idx + 4] = vline_status[crtc_id] >> 2; 914 } else { 915 dev_warn_once(p->dev, "unknown crtc reloc\n"); 916 return -EINVAL; 917 } 918 return 0; 919 } 920 921 static int r600_packet0_check(struct radeon_cs_parser *p, 922 struct radeon_cs_packet *pkt, 923 unsigned idx, unsigned reg) 924 { 925 int r; 926 927 switch (reg) { 928 case AVIVO_D1MODE_VLINE_START_END: 929 r = r600_cs_packet_parse_vline(p); 930 if (r) { 931 dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 932 idx, reg); 933 return r; 934 } 935 break; 936 default: 937 pr_err("Forbidden register 0x%04X in cs at %d\n", reg, idx); 938 return -EINVAL; 939 } 940 return 0; 941 } 942 943 static int r600_cs_parse_packet0(struct radeon_cs_parser *p, 944 struct radeon_cs_packet *pkt) 945 { 946 unsigned reg, i; 947 unsigned idx; 948 int r; 949 950 idx = pkt->idx + 1; 951 reg = pkt->reg; 952 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { 953 r = r600_packet0_check(p, pkt, idx, reg); 954 if (r) { 955 return r; 956 } 957 } 958 return 0; 959 } 960 961 /** 962 * r600_cs_check_reg() - check if register is authorized or not 963 * @p: parser structure holding parsing context 964 * @reg: register we are testing 965 * @idx: index into the cs buffer 966 * 967 * This function will test against r600_reg_safe_bm and return 0 968 * if register is safe. If register is not flag as safe this function 969 * will test it against a list of register needing special handling. 970 */ 971 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 972 { 973 struct r600_cs_track *track = (struct r600_cs_track *)p->track; 974 struct radeon_bo_list *reloc; 975 u32 m, i, tmp, *ib; 976 int r; 977 978 i = (reg >> 7); 979 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { 980 dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 981 return -EINVAL; 982 } 983 m = 1 << ((reg >> 2) & 31); 984 if (!(r600_reg_safe_bm[i] & m)) 985 return 0; 986 ib = p->ib.ptr; 987 switch (reg) { 988 /* force following reg to 0 in an attempt to disable out buffer 989 * which will need us to better understand how it works to perform 990 * security check on it (Jerome) 991 */ 992 case R_0288A8_SQ_ESGS_RING_ITEMSIZE: 993 case R_008C44_SQ_ESGS_RING_SIZE: 994 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE: 995 case R_008C54_SQ_ESTMP_RING_SIZE: 996 case R_0288C0_SQ_FBUF_RING_ITEMSIZE: 997 case R_008C74_SQ_FBUF_RING_SIZE: 998 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE: 999 case R_008C5C_SQ_GSTMP_RING_SIZE: 1000 case R_0288AC_SQ_GSVS_RING_ITEMSIZE: 1001 case R_008C4C_SQ_GSVS_RING_SIZE: 1002 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE: 1003 case R_008C6C_SQ_PSTMP_RING_SIZE: 1004 case R_0288C4_SQ_REDUC_RING_ITEMSIZE: 1005 case R_008C7C_SQ_REDUC_RING_SIZE: 1006 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE: 1007 case R_008C64_SQ_VSTMP_RING_SIZE: 1008 case R_0288C8_SQ_GS_VERT_ITEMSIZE: 1009 /* get value to populate the IB don't remove */ 1010 /*tmp =radeon_get_ib_value(p, idx); 1011 ib[idx] = 0;*/ 1012 break; 1013 case SQ_ESGS_RING_BASE: 1014 case SQ_GSVS_RING_BASE: 1015 case SQ_ESTMP_RING_BASE: 1016 case SQ_GSTMP_RING_BASE: 1017 case SQ_PSTMP_RING_BASE: 1018 case SQ_VSTMP_RING_BASE: 1019 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1020 if (r) { 1021 dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1022 "0x%04X\n", reg); 1023 return -EINVAL; 1024 } 1025 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1026 break; 1027 case SQ_CONFIG: 1028 track->sq_config = radeon_get_ib_value(p, idx); 1029 break; 1030 case R_028800_DB_DEPTH_CONTROL: 1031 track->db_depth_control = radeon_get_ib_value(p, idx); 1032 track->db_dirty = true; 1033 break; 1034 case R_028010_DB_DEPTH_INFO: 1035 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1036 radeon_cs_packet_next_is_pkt3_nop(p)) { 1037 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1038 if (r) { 1039 dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1040 "0x%04X\n", reg); 1041 return -EINVAL; 1042 } 1043 track->db_depth_info = radeon_get_ib_value(p, idx); 1044 ib[idx] &= C_028010_ARRAY_MODE; 1045 track->db_depth_info &= C_028010_ARRAY_MODE; 1046 if (reloc->tiling_flags & RADEON_TILING_MACRO) { 1047 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1048 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1049 } else { 1050 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 1051 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 1052 } 1053 } else { 1054 track->db_depth_info = radeon_get_ib_value(p, idx); 1055 } 1056 track->db_dirty = true; 1057 break; 1058 case R_028004_DB_DEPTH_VIEW: 1059 track->db_depth_view = radeon_get_ib_value(p, idx); 1060 track->db_dirty = true; 1061 break; 1062 case R_028000_DB_DEPTH_SIZE: 1063 track->db_depth_size = radeon_get_ib_value(p, idx); 1064 track->db_depth_size_idx = idx; 1065 track->db_dirty = true; 1066 break; 1067 case R_028AB0_VGT_STRMOUT_EN: 1068 track->vgt_strmout_en = radeon_get_ib_value(p, idx); 1069 track->streamout_dirty = true; 1070 break; 1071 case R_028B20_VGT_STRMOUT_BUFFER_EN: 1072 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); 1073 track->streamout_dirty = true; 1074 break; 1075 case VGT_STRMOUT_BUFFER_BASE_0: 1076 case VGT_STRMOUT_BUFFER_BASE_1: 1077 case VGT_STRMOUT_BUFFER_BASE_2: 1078 case VGT_STRMOUT_BUFFER_BASE_3: 1079 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1080 if (r) { 1081 dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1082 "0x%04X\n", reg); 1083 return -EINVAL; 1084 } 1085 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; 1086 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1087 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1088 track->vgt_strmout_bo[tmp] = reloc->robj; 1089 track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset; 1090 track->streamout_dirty = true; 1091 break; 1092 case VGT_STRMOUT_BUFFER_SIZE_0: 1093 case VGT_STRMOUT_BUFFER_SIZE_1: 1094 case VGT_STRMOUT_BUFFER_SIZE_2: 1095 case VGT_STRMOUT_BUFFER_SIZE_3: 1096 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16; 1097 /* size in register is DWs, convert to bytes */ 1098 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4; 1099 track->streamout_dirty = true; 1100 break; 1101 case CP_COHER_BASE: 1102 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1103 if (r) { 1104 dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE " 1105 "0x%04X\n", reg); 1106 return -EINVAL; 1107 } 1108 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1109 break; 1110 case R_028238_CB_TARGET_MASK: 1111 track->cb_target_mask = radeon_get_ib_value(p, idx); 1112 track->cb_dirty = true; 1113 break; 1114 case R_02823C_CB_SHADER_MASK: 1115 track->cb_shader_mask = radeon_get_ib_value(p, idx); 1116 break; 1117 case R_028C04_PA_SC_AA_CONFIG: 1118 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); 1119 track->log_nsamples = tmp; 1120 track->nsamples = 1 << tmp; 1121 track->cb_dirty = true; 1122 break; 1123 case R_028808_CB_COLOR_CONTROL: 1124 tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); 1125 track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; 1126 track->cb_dirty = true; 1127 break; 1128 case R_0280A0_CB_COLOR0_INFO: 1129 case R_0280A4_CB_COLOR1_INFO: 1130 case R_0280A8_CB_COLOR2_INFO: 1131 case R_0280AC_CB_COLOR3_INFO: 1132 case R_0280B0_CB_COLOR4_INFO: 1133 case R_0280B4_CB_COLOR5_INFO: 1134 case R_0280B8_CB_COLOR6_INFO: 1135 case R_0280BC_CB_COLOR7_INFO: 1136 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1137 radeon_cs_packet_next_is_pkt3_nop(p)) { 1138 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1139 if (r) { 1140 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1141 return -EINVAL; 1142 } 1143 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1144 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1145 if (reloc->tiling_flags & RADEON_TILING_MACRO) { 1146 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1147 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1148 } else if (reloc->tiling_flags & RADEON_TILING_MICRO) { 1149 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1150 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1151 } 1152 } else { 1153 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1154 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1155 } 1156 track->cb_dirty = true; 1157 break; 1158 case R_028080_CB_COLOR0_VIEW: 1159 case R_028084_CB_COLOR1_VIEW: 1160 case R_028088_CB_COLOR2_VIEW: 1161 case R_02808C_CB_COLOR3_VIEW: 1162 case R_028090_CB_COLOR4_VIEW: 1163 case R_028094_CB_COLOR5_VIEW: 1164 case R_028098_CB_COLOR6_VIEW: 1165 case R_02809C_CB_COLOR7_VIEW: 1166 tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4; 1167 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); 1168 track->cb_dirty = true; 1169 break; 1170 case R_028060_CB_COLOR0_SIZE: 1171 case R_028064_CB_COLOR1_SIZE: 1172 case R_028068_CB_COLOR2_SIZE: 1173 case R_02806C_CB_COLOR3_SIZE: 1174 case R_028070_CB_COLOR4_SIZE: 1175 case R_028074_CB_COLOR5_SIZE: 1176 case R_028078_CB_COLOR6_SIZE: 1177 case R_02807C_CB_COLOR7_SIZE: 1178 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; 1179 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); 1180 track->cb_color_size_idx[tmp] = idx; 1181 track->cb_dirty = true; 1182 break; 1183 /* This register were added late, there is userspace 1184 * which does provide relocation for those but set 1185 * 0 offset. In order to avoid breaking old userspace 1186 * we detect this and set address to point to last 1187 * CB_COLOR0_BASE, note that if userspace doesn't set 1188 * CB_COLOR0_BASE before this register we will report 1189 * error. Old userspace always set CB_COLOR0_BASE 1190 * before any of this. 1191 */ 1192 case R_0280E0_CB_COLOR0_FRAG: 1193 case R_0280E4_CB_COLOR1_FRAG: 1194 case R_0280E8_CB_COLOR2_FRAG: 1195 case R_0280EC_CB_COLOR3_FRAG: 1196 case R_0280F0_CB_COLOR4_FRAG: 1197 case R_0280F4_CB_COLOR5_FRAG: 1198 case R_0280F8_CB_COLOR6_FRAG: 1199 case R_0280FC_CB_COLOR7_FRAG: 1200 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4; 1201 if (!radeon_cs_packet_next_is_pkt3_nop(p)) { 1202 if (!track->cb_color_base_last[tmp]) { 1203 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1204 return -EINVAL; 1205 } 1206 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; 1207 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; 1208 ib[idx] = track->cb_color_base_last[tmp]; 1209 } else { 1210 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1211 if (r) { 1212 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1213 return -EINVAL; 1214 } 1215 track->cb_color_frag_bo[tmp] = reloc->robj; 1216 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; 1217 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1218 } 1219 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1220 track->cb_dirty = true; 1221 } 1222 break; 1223 case R_0280C0_CB_COLOR0_TILE: 1224 case R_0280C4_CB_COLOR1_TILE: 1225 case R_0280C8_CB_COLOR2_TILE: 1226 case R_0280CC_CB_COLOR3_TILE: 1227 case R_0280D0_CB_COLOR4_TILE: 1228 case R_0280D4_CB_COLOR5_TILE: 1229 case R_0280D8_CB_COLOR6_TILE: 1230 case R_0280DC_CB_COLOR7_TILE: 1231 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4; 1232 if (!radeon_cs_packet_next_is_pkt3_nop(p)) { 1233 if (!track->cb_color_base_last[tmp]) { 1234 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1235 return -EINVAL; 1236 } 1237 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; 1238 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; 1239 ib[idx] = track->cb_color_base_last[tmp]; 1240 } else { 1241 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1242 if (r) { 1243 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1244 return -EINVAL; 1245 } 1246 track->cb_color_tile_bo[tmp] = reloc->robj; 1247 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; 1248 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1249 } 1250 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1251 track->cb_dirty = true; 1252 } 1253 break; 1254 case R_028100_CB_COLOR0_MASK: 1255 case R_028104_CB_COLOR1_MASK: 1256 case R_028108_CB_COLOR2_MASK: 1257 case R_02810C_CB_COLOR3_MASK: 1258 case R_028110_CB_COLOR4_MASK: 1259 case R_028114_CB_COLOR5_MASK: 1260 case R_028118_CB_COLOR6_MASK: 1261 case R_02811C_CB_COLOR7_MASK: 1262 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; 1263 track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); 1264 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1265 track->cb_dirty = true; 1266 } 1267 break; 1268 case CB_COLOR0_BASE: 1269 case CB_COLOR1_BASE: 1270 case CB_COLOR2_BASE: 1271 case CB_COLOR3_BASE: 1272 case CB_COLOR4_BASE: 1273 case CB_COLOR5_BASE: 1274 case CB_COLOR6_BASE: 1275 case CB_COLOR7_BASE: 1276 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1277 if (r) { 1278 dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1279 "0x%04X\n", reg); 1280 return -EINVAL; 1281 } 1282 tmp = (reg - CB_COLOR0_BASE) / 4; 1283 track->cb_color_bo_offset[tmp] = (u64)radeon_get_ib_value(p, idx) << 8; 1284 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1285 track->cb_color_base_last[tmp] = ib[idx]; 1286 track->cb_color_bo[tmp] = reloc->robj; 1287 track->cb_color_bo_mc[tmp] = reloc->gpu_offset; 1288 track->cb_dirty = true; 1289 break; 1290 case DB_DEPTH_BASE: 1291 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1292 if (r) { 1293 dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1294 "0x%04X\n", reg); 1295 return -EINVAL; 1296 } 1297 track->db_offset = radeon_get_ib_value(p, idx) << 8; 1298 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1299 track->db_bo = reloc->robj; 1300 track->db_bo_mc = reloc->gpu_offset; 1301 track->db_dirty = true; 1302 break; 1303 case DB_HTILE_DATA_BASE: 1304 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1305 if (r) { 1306 dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1307 "0x%04X\n", reg); 1308 return -EINVAL; 1309 } 1310 track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8; 1311 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1312 track->htile_bo = reloc->robj; 1313 track->db_dirty = true; 1314 break; 1315 case DB_HTILE_SURFACE: 1316 track->htile_surface = radeon_get_ib_value(p, idx); 1317 /* force 8x8 htile width and height */ 1318 ib[idx] |= 3; 1319 track->db_dirty = true; 1320 break; 1321 case SQ_PGM_START_FS: 1322 case SQ_PGM_START_ES: 1323 case SQ_PGM_START_VS: 1324 case SQ_PGM_START_GS: 1325 case SQ_PGM_START_PS: 1326 case SQ_ALU_CONST_CACHE_GS_0: 1327 case SQ_ALU_CONST_CACHE_GS_1: 1328 case SQ_ALU_CONST_CACHE_GS_2: 1329 case SQ_ALU_CONST_CACHE_GS_3: 1330 case SQ_ALU_CONST_CACHE_GS_4: 1331 case SQ_ALU_CONST_CACHE_GS_5: 1332 case SQ_ALU_CONST_CACHE_GS_6: 1333 case SQ_ALU_CONST_CACHE_GS_7: 1334 case SQ_ALU_CONST_CACHE_GS_8: 1335 case SQ_ALU_CONST_CACHE_GS_9: 1336 case SQ_ALU_CONST_CACHE_GS_10: 1337 case SQ_ALU_CONST_CACHE_GS_11: 1338 case SQ_ALU_CONST_CACHE_GS_12: 1339 case SQ_ALU_CONST_CACHE_GS_13: 1340 case SQ_ALU_CONST_CACHE_GS_14: 1341 case SQ_ALU_CONST_CACHE_GS_15: 1342 case SQ_ALU_CONST_CACHE_PS_0: 1343 case SQ_ALU_CONST_CACHE_PS_1: 1344 case SQ_ALU_CONST_CACHE_PS_2: 1345 case SQ_ALU_CONST_CACHE_PS_3: 1346 case SQ_ALU_CONST_CACHE_PS_4: 1347 case SQ_ALU_CONST_CACHE_PS_5: 1348 case SQ_ALU_CONST_CACHE_PS_6: 1349 case SQ_ALU_CONST_CACHE_PS_7: 1350 case SQ_ALU_CONST_CACHE_PS_8: 1351 case SQ_ALU_CONST_CACHE_PS_9: 1352 case SQ_ALU_CONST_CACHE_PS_10: 1353 case SQ_ALU_CONST_CACHE_PS_11: 1354 case SQ_ALU_CONST_CACHE_PS_12: 1355 case SQ_ALU_CONST_CACHE_PS_13: 1356 case SQ_ALU_CONST_CACHE_PS_14: 1357 case SQ_ALU_CONST_CACHE_PS_15: 1358 case SQ_ALU_CONST_CACHE_VS_0: 1359 case SQ_ALU_CONST_CACHE_VS_1: 1360 case SQ_ALU_CONST_CACHE_VS_2: 1361 case SQ_ALU_CONST_CACHE_VS_3: 1362 case SQ_ALU_CONST_CACHE_VS_4: 1363 case SQ_ALU_CONST_CACHE_VS_5: 1364 case SQ_ALU_CONST_CACHE_VS_6: 1365 case SQ_ALU_CONST_CACHE_VS_7: 1366 case SQ_ALU_CONST_CACHE_VS_8: 1367 case SQ_ALU_CONST_CACHE_VS_9: 1368 case SQ_ALU_CONST_CACHE_VS_10: 1369 case SQ_ALU_CONST_CACHE_VS_11: 1370 case SQ_ALU_CONST_CACHE_VS_12: 1371 case SQ_ALU_CONST_CACHE_VS_13: 1372 case SQ_ALU_CONST_CACHE_VS_14: 1373 case SQ_ALU_CONST_CACHE_VS_15: 1374 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1375 if (r) { 1376 dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1377 "0x%04X\n", reg); 1378 return -EINVAL; 1379 } 1380 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1381 break; 1382 case SX_MEMORY_EXPORT_BASE: 1383 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1384 if (r) { 1385 dev_warn_once(p->dev, "bad SET_CONFIG_REG " 1386 "0x%04X\n", reg); 1387 return -EINVAL; 1388 } 1389 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1390 break; 1391 case SX_MISC: 1392 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; 1393 break; 1394 default: 1395 dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1396 return -EINVAL; 1397 } 1398 return 0; 1399 } 1400 1401 unsigned r600_mip_minify(unsigned size, unsigned level) 1402 { 1403 unsigned val; 1404 1405 val = max(1U, size >> level); 1406 if (level > 0) 1407 val = roundup_pow_of_two(val); 1408 return val; 1409 } 1410 1411 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, 1412 unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format, 1413 unsigned block_align, unsigned height_align, unsigned base_align, 1414 unsigned *l0_size, unsigned *mipmap_size) 1415 { 1416 unsigned offset, i; 1417 unsigned width, height, depth, size; 1418 unsigned blocksize; 1419 unsigned nbx, nby; 1420 unsigned nlevels = llevel - blevel + 1; 1421 1422 *l0_size = -1; 1423 blocksize = r600_fmt_get_blocksize(format); 1424 1425 w0 = r600_mip_minify(w0, 0); 1426 h0 = r600_mip_minify(h0, 0); 1427 d0 = r600_mip_minify(d0, 0); 1428 for (i = 0, offset = 0; i < nlevels; i++) { 1429 width = r600_mip_minify(w0, i); 1430 nbx = r600_fmt_get_nblocksx(format, width); 1431 1432 nbx = round_up(nbx, block_align); 1433 1434 height = r600_mip_minify(h0, i); 1435 nby = r600_fmt_get_nblocksy(format, height); 1436 nby = round_up(nby, height_align); 1437 1438 depth = r600_mip_minify(d0, i); 1439 1440 size = nbx * nby * blocksize * nsamples; 1441 if (nfaces) 1442 size *= nfaces; 1443 else 1444 size *= depth; 1445 1446 if (i == 0) 1447 *l0_size = size; 1448 1449 if (i == 0 || i == 1) 1450 offset = round_up(offset, base_align); 1451 1452 offset += size; 1453 } 1454 *mipmap_size = offset; 1455 if (llevel == 0) 1456 *mipmap_size = *l0_size; 1457 if (!blevel) 1458 *mipmap_size -= *l0_size; 1459 } 1460 1461 /** 1462 * r600_check_texture_resource() - check if register is authorized or not 1463 * @p: parser structure holding parsing context 1464 * @idx: index into the cs buffer 1465 * @texture: texture's bo structure 1466 * @mipmap: mipmap's bo structure 1467 * @base_offset: base offset (used for error checking) 1468 * @mip_offset: mip offset (used for error checking) 1469 * @tiling_flags: tiling flags 1470 * 1471 * This function will check that the resource has valid field and that 1472 * the texture and mipmap bo object are big enough to cover this resource. 1473 */ 1474 static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1475 struct radeon_bo *texture, 1476 struct radeon_bo *mipmap, 1477 u64 base_offset, 1478 u64 mip_offset, 1479 u32 tiling_flags) 1480 { 1481 struct r600_cs_track *track = p->track; 1482 u32 dim, nfaces, llevel, blevel, w0, h0, d0; 1483 u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; 1484 u32 height_align, pitch, pitch_align, depth_align; 1485 u32 barray, larray; 1486 u64 base_align; 1487 struct array_mode_checker array_check; 1488 u32 format; 1489 bool is_array; 1490 1491 /* on legacy kernel we don't perform advanced check */ 1492 if (p->rdev == NULL) 1493 return 0; 1494 1495 /* convert to bytes */ 1496 base_offset <<= 8; 1497 mip_offset <<= 8; 1498 1499 word0 = radeon_get_ib_value(p, idx + 0); 1500 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1501 if (tiling_flags & RADEON_TILING_MACRO) 1502 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1503 else if (tiling_flags & RADEON_TILING_MICRO) 1504 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1505 } 1506 word1 = radeon_get_ib_value(p, idx + 1); 1507 word2 = radeon_get_ib_value(p, idx + 2) << 8; 1508 word3 = radeon_get_ib_value(p, idx + 3) << 8; 1509 word4 = radeon_get_ib_value(p, idx + 4); 1510 word5 = radeon_get_ib_value(p, idx + 5); 1511 dim = G_038000_DIM(word0); 1512 w0 = G_038000_TEX_WIDTH(word0) + 1; 1513 pitch = (G_038000_PITCH(word0) + 1) * 8; 1514 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1515 d0 = G_038004_TEX_DEPTH(word1); 1516 format = G_038004_DATA_FORMAT(word1); 1517 blevel = G_038010_BASE_LEVEL(word4); 1518 llevel = G_038014_LAST_LEVEL(word5); 1519 /* pitch in texels */ 1520 array_check.array_mode = G_038000_TILE_MODE(word0); 1521 array_check.group_size = track->group_size; 1522 array_check.nbanks = track->nbanks; 1523 array_check.npipes = track->npipes; 1524 array_check.nsamples = 1; 1525 array_check.blocksize = r600_fmt_get_blocksize(format); 1526 nfaces = 1; 1527 is_array = false; 1528 switch (dim) { 1529 case V_038000_SQ_TEX_DIM_1D: 1530 case V_038000_SQ_TEX_DIM_2D: 1531 case V_038000_SQ_TEX_DIM_3D: 1532 break; 1533 case V_038000_SQ_TEX_DIM_CUBEMAP: 1534 if (p->family >= CHIP_RV770) 1535 nfaces = 8; 1536 else 1537 nfaces = 6; 1538 break; 1539 case V_038000_SQ_TEX_DIM_1D_ARRAY: 1540 case V_038000_SQ_TEX_DIM_2D_ARRAY: 1541 is_array = true; 1542 break; 1543 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: 1544 is_array = true; 1545 fallthrough; 1546 case V_038000_SQ_TEX_DIM_2D_MSAA: 1547 array_check.nsamples = 1 << llevel; 1548 llevel = 0; 1549 break; 1550 default: 1551 dev_warn_once(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); 1552 return -EINVAL; 1553 } 1554 if (!r600_fmt_is_valid_texture(format, p->family)) { 1555 dev_warn_once(p->dev, "%s:%d texture invalid format %d\n", 1556 __func__, __LINE__, format); 1557 return -EINVAL; 1558 } 1559 1560 if (r600_get_array_mode_alignment(&array_check, 1561 &pitch_align, &height_align, &depth_align, &base_align)) { 1562 dev_warn_once(p->dev, "%s:%d tex array mode (%d) invalid\n", 1563 __func__, __LINE__, G_038000_TILE_MODE(word0)); 1564 return -EINVAL; 1565 } 1566 1567 /* XXX check height as well... */ 1568 1569 if (!IS_ALIGNED(pitch, pitch_align)) { 1570 dev_warn_once(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", 1571 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); 1572 return -EINVAL; 1573 } 1574 if (!IS_ALIGNED(base_offset, base_align)) { 1575 dev_warn_once(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", 1576 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); 1577 return -EINVAL; 1578 } 1579 if (!IS_ALIGNED(mip_offset, base_align)) { 1580 dev_warn_once(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", 1581 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); 1582 return -EINVAL; 1583 } 1584 1585 if (blevel > llevel) { 1586 dev_warn_once(p->dev, "texture blevel %d > llevel %d\n", 1587 blevel, llevel); 1588 } 1589 if (is_array) { 1590 barray = G_038014_BASE_ARRAY(word5); 1591 larray = G_038014_LAST_ARRAY(word5); 1592 1593 nfaces = larray - barray + 1; 1594 } 1595 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format, 1596 pitch_align, height_align, base_align, 1597 &l0_size, &mipmap_size); 1598 /* using get ib will give us the offset into the texture bo */ 1599 if ((l0_size + word2) > radeon_bo_size(texture)) { 1600 dev_warn_once(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", 1601 w0, h0, pitch_align, height_align, 1602 array_check.array_mode, format, word2, 1603 l0_size, radeon_bo_size(texture)); 1604 dev_warn_once(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); 1605 return -EINVAL; 1606 } 1607 /* using get ib will give us the offset into the mipmap bo */ 1608 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { 1609 /*dev_warn_once(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1610 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ 1611 } 1612 return 0; 1613 } 1614 1615 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1616 { 1617 u32 m, i; 1618 1619 i = (reg >> 7); 1620 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { 1621 dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1622 return false; 1623 } 1624 m = 1 << ((reg >> 2) & 31); 1625 if (!(r600_reg_safe_bm[i] & m)) 1626 return true; 1627 dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1628 return false; 1629 } 1630 1631 static int r600_packet3_check(struct radeon_cs_parser *p, 1632 struct radeon_cs_packet *pkt) 1633 { 1634 struct radeon_bo_list *reloc; 1635 struct r600_cs_track *track; 1636 volatile u32 *ib; 1637 unsigned idx; 1638 unsigned i; 1639 unsigned start_reg, end_reg, reg; 1640 int r; 1641 u32 idx_value; 1642 1643 track = (struct r600_cs_track *)p->track; 1644 ib = p->ib.ptr; 1645 idx = pkt->idx + 1; 1646 idx_value = radeon_get_ib_value(p, idx); 1647 1648 switch (pkt->opcode) { 1649 case PACKET3_SET_PREDICATION: 1650 { 1651 int pred_op; 1652 int tmp; 1653 uint64_t offset; 1654 1655 if (pkt->count != 1) { 1656 dev_warn_once(p->dev, "bad SET PREDICATION\n"); 1657 return -EINVAL; 1658 } 1659 1660 tmp = radeon_get_ib_value(p, idx + 1); 1661 pred_op = (tmp >> 16) & 0x7; 1662 1663 /* for the clear predicate operation */ 1664 if (pred_op == 0) 1665 return 0; 1666 1667 if (pred_op > 2) { 1668 dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op); 1669 return -EINVAL; 1670 } 1671 1672 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1673 if (r) { 1674 dev_warn_once(p->dev, "bad SET PREDICATION\n"); 1675 return -EINVAL; 1676 } 1677 1678 offset = reloc->gpu_offset + 1679 (idx_value & 0xfffffff0) + 1680 ((u64)(tmp & 0xff) << 32); 1681 1682 ib[idx + 0] = offset; 1683 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1684 } 1685 break; 1686 1687 case PACKET3_START_3D_CMDBUF: 1688 if (p->family >= CHIP_RV770 || pkt->count) { 1689 dev_warn_once(p->dev, "bad START_3D\n"); 1690 return -EINVAL; 1691 } 1692 break; 1693 case PACKET3_CONTEXT_CONTROL: 1694 if (pkt->count != 1) { 1695 dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n"); 1696 return -EINVAL; 1697 } 1698 break; 1699 case PACKET3_INDEX_TYPE: 1700 case PACKET3_NUM_INSTANCES: 1701 if (pkt->count) { 1702 dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES\n"); 1703 return -EINVAL; 1704 } 1705 break; 1706 case PACKET3_DRAW_INDEX: 1707 { 1708 uint64_t offset; 1709 if (pkt->count != 3) { 1710 dev_warn_once(p->dev, "bad DRAW_INDEX\n"); 1711 return -EINVAL; 1712 } 1713 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1714 if (r) { 1715 dev_warn_once(p->dev, "bad DRAW_INDEX\n"); 1716 return -EINVAL; 1717 } 1718 1719 offset = reloc->gpu_offset + 1720 idx_value + 1721 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1722 1723 ib[idx+0] = offset; 1724 ib[idx+1] = upper_32_bits(offset) & 0xff; 1725 1726 r = r600_cs_track_check(p); 1727 if (r) { 1728 dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1729 return r; 1730 } 1731 break; 1732 } 1733 case PACKET3_DRAW_INDEX_AUTO: 1734 if (pkt->count != 1) { 1735 dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n"); 1736 return -EINVAL; 1737 } 1738 r = r600_cs_track_check(p); 1739 if (r) { 1740 dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1741 return r; 1742 } 1743 break; 1744 case PACKET3_DRAW_INDEX_IMMD_BE: 1745 case PACKET3_DRAW_INDEX_IMMD: 1746 if (pkt->count < 2) { 1747 dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n"); 1748 return -EINVAL; 1749 } 1750 r = r600_cs_track_check(p); 1751 if (r) { 1752 dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1753 return r; 1754 } 1755 break; 1756 case PACKET3_WAIT_REG_MEM: 1757 if (pkt->count != 5) { 1758 dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); 1759 return -EINVAL; 1760 } 1761 /* bit 4 is reg (0) or mem (1) */ 1762 if (idx_value & 0x10) { 1763 uint64_t offset; 1764 1765 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1766 if (r) { 1767 dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); 1768 return -EINVAL; 1769 } 1770 1771 offset = reloc->gpu_offset + 1772 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + 1773 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1774 1775 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); 1776 ib[idx+2] = upper_32_bits(offset) & 0xff; 1777 } else if (idx_value & 0x100) { 1778 dev_warn_once(p->dev, "cannot use PFP on REG wait\n"); 1779 return -EINVAL; 1780 } 1781 break; 1782 case PACKET3_CP_DMA: 1783 { 1784 u32 command, size; 1785 u64 offset, tmp; 1786 if (pkt->count != 4) { 1787 dev_warn_once(p->dev, "bad CP DMA\n"); 1788 return -EINVAL; 1789 } 1790 command = radeon_get_ib_value(p, idx+4); 1791 size = command & 0x1fffff; 1792 if (command & PACKET3_CP_DMA_CMD_SAS) { 1793 /* src address space is register */ 1794 dev_warn_once(p->dev, "CP DMA SAS not supported\n"); 1795 return -EINVAL; 1796 } else { 1797 if (command & PACKET3_CP_DMA_CMD_SAIC) { 1798 dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n"); 1799 return -EINVAL; 1800 } 1801 /* src address space is memory */ 1802 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1803 if (r) { 1804 dev_warn_once(p->dev, "bad CP DMA SRC\n"); 1805 return -EINVAL; 1806 } 1807 1808 tmp = radeon_get_ib_value(p, idx) + 1809 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1810 1811 offset = reloc->gpu_offset + tmp; 1812 1813 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1814 dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n", 1815 tmp + size, radeon_bo_size(reloc->robj)); 1816 return -EINVAL; 1817 } 1818 1819 ib[idx] = offset; 1820 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1821 } 1822 if (command & PACKET3_CP_DMA_CMD_DAS) { 1823 /* dst address space is register */ 1824 dev_warn_once(p->dev, "CP DMA DAS not supported\n"); 1825 return -EINVAL; 1826 } else { 1827 /* dst address space is memory */ 1828 if (command & PACKET3_CP_DMA_CMD_DAIC) { 1829 dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n"); 1830 return -EINVAL; 1831 } 1832 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1833 if (r) { 1834 dev_warn_once(p->dev, "bad CP DMA DST\n"); 1835 return -EINVAL; 1836 } 1837 1838 tmp = radeon_get_ib_value(p, idx+2) + 1839 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); 1840 1841 offset = reloc->gpu_offset + tmp; 1842 1843 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1844 dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", 1845 tmp + size, radeon_bo_size(reloc->robj)); 1846 return -EINVAL; 1847 } 1848 1849 ib[idx+2] = offset; 1850 ib[idx+3] = upper_32_bits(offset) & 0xff; 1851 } 1852 break; 1853 } 1854 case PACKET3_SURFACE_SYNC: 1855 if (pkt->count != 3) { 1856 dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); 1857 return -EINVAL; 1858 } 1859 /* 0xffffffff/0x0 is flush all cache flag */ 1860 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || 1861 radeon_get_ib_value(p, idx + 2) != 0) { 1862 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1863 if (r) { 1864 dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); 1865 return -EINVAL; 1866 } 1867 ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1868 } 1869 break; 1870 case PACKET3_EVENT_WRITE: 1871 if (pkt->count != 2 && pkt->count != 0) { 1872 dev_warn_once(p->dev, "bad EVENT_WRITE\n"); 1873 return -EINVAL; 1874 } 1875 if (pkt->count) { 1876 uint64_t offset; 1877 1878 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1879 if (r) { 1880 dev_warn_once(p->dev, "bad EVENT_WRITE\n"); 1881 return -EINVAL; 1882 } 1883 offset = reloc->gpu_offset + 1884 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + 1885 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1886 1887 ib[idx+1] = offset & 0xfffffff8; 1888 ib[idx+2] = upper_32_bits(offset) & 0xff; 1889 } 1890 break; 1891 case PACKET3_EVENT_WRITE_EOP: 1892 { 1893 uint64_t offset; 1894 1895 if (pkt->count != 4) { 1896 dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n"); 1897 return -EINVAL; 1898 } 1899 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1900 if (r) { 1901 dev_warn_once(p->dev, "bad EVENT_WRITE\n"); 1902 return -EINVAL; 1903 } 1904 1905 offset = reloc->gpu_offset + 1906 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + 1907 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1908 1909 ib[idx+1] = offset & 0xfffffffc; 1910 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1911 break; 1912 } 1913 case PACKET3_SET_CONFIG_REG: 1914 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; 1915 end_reg = 4 * pkt->count + start_reg - 4; 1916 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 1917 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 1918 (end_reg >= PACKET3_SET_CONFIG_REG_END)) { 1919 dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n"); 1920 return -EINVAL; 1921 } 1922 for (i = 0; i < pkt->count; i++) { 1923 reg = start_reg + (4 * i); 1924 r = r600_cs_check_reg(p, reg, idx+1+i); 1925 if (r) 1926 return r; 1927 } 1928 break; 1929 case PACKET3_SET_CONTEXT_REG: 1930 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; 1931 end_reg = 4 * pkt->count + start_reg - 4; 1932 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 1933 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 1934 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { 1935 dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n"); 1936 return -EINVAL; 1937 } 1938 for (i = 0; i < pkt->count; i++) { 1939 reg = start_reg + (4 * i); 1940 r = r600_cs_check_reg(p, reg, idx+1+i); 1941 if (r) 1942 return r; 1943 } 1944 break; 1945 case PACKET3_SET_RESOURCE: 1946 if (pkt->count % 7) { 1947 dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 1948 return -EINVAL; 1949 } 1950 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; 1951 end_reg = 4 * pkt->count + start_reg - 4; 1952 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 1953 (start_reg >= PACKET3_SET_RESOURCE_END) || 1954 (end_reg >= PACKET3_SET_RESOURCE_END)) { 1955 dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 1956 return -EINVAL; 1957 } 1958 for (i = 0; i < (pkt->count / 7); i++) { 1959 struct radeon_bo *texture, *mipmap; 1960 u32 size, offset, base_offset, mip_offset; 1961 1962 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { 1963 case SQ_TEX_VTX_VALID_TEXTURE: 1964 /* tex base */ 1965 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1966 if (r) { 1967 dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 1968 return -EINVAL; 1969 } 1970 base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1971 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1972 if (reloc->tiling_flags & RADEON_TILING_MACRO) 1973 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1974 else if (reloc->tiling_flags & RADEON_TILING_MICRO) 1975 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1976 } 1977 texture = reloc->robj; 1978 /* tex mip base */ 1979 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1980 if (r) { 1981 dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 1982 return -EINVAL; 1983 } 1984 mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1985 mipmap = reloc->robj; 1986 r = r600_check_texture_resource(p, idx+(i*7)+1, 1987 texture, mipmap, 1988 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), 1989 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), 1990 reloc->tiling_flags); 1991 if (r) 1992 return r; 1993 ib[idx+1+(i*7)+2] += base_offset; 1994 ib[idx+1+(i*7)+3] += mip_offset; 1995 break; 1996 case SQ_TEX_VTX_VALID_BUFFER: 1997 { 1998 uint64_t offset64; 1999 /* vtx base */ 2000 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2001 if (r) { 2002 dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 2003 return -EINVAL; 2004 } 2005 offset = radeon_get_ib_value(p, idx+1+(i*7)+0); 2006 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1; 2007 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { 2008 /* force size to size of the buffer */ 2009 dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", 2010 size + offset, radeon_bo_size(reloc->robj)); 2011 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; 2012 } 2013 2014 offset64 = reloc->gpu_offset + offset; 2015 ib[idx+1+(i*8)+0] = offset64; 2016 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | 2017 (upper_32_bits(offset64) & 0xff); 2018 break; 2019 } 2020 case SQ_TEX_VTX_INVALID_TEXTURE: 2021 case SQ_TEX_VTX_INVALID_BUFFER: 2022 default: 2023 dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 2024 return -EINVAL; 2025 } 2026 } 2027 break; 2028 case PACKET3_SET_ALU_CONST: 2029 if (track->sq_config & DX9_CONSTS) { 2030 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; 2031 end_reg = 4 * pkt->count + start_reg - 4; 2032 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 2033 (start_reg >= PACKET3_SET_ALU_CONST_END) || 2034 (end_reg >= PACKET3_SET_ALU_CONST_END)) { 2035 dev_warn_once(p->dev, "bad SET_ALU_CONST\n"); 2036 return -EINVAL; 2037 } 2038 } 2039 break; 2040 case PACKET3_SET_BOOL_CONST: 2041 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET; 2042 end_reg = 4 * pkt->count + start_reg - 4; 2043 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 2044 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 2045 (end_reg >= PACKET3_SET_BOOL_CONST_END)) { 2046 dev_warn_once(p->dev, "bad SET_BOOL_CONST\n"); 2047 return -EINVAL; 2048 } 2049 break; 2050 case PACKET3_SET_LOOP_CONST: 2051 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET; 2052 end_reg = 4 * pkt->count + start_reg - 4; 2053 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 2054 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 2055 (end_reg >= PACKET3_SET_LOOP_CONST_END)) { 2056 dev_warn_once(p->dev, "bad SET_LOOP_CONST\n"); 2057 return -EINVAL; 2058 } 2059 break; 2060 case PACKET3_SET_CTL_CONST: 2061 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET; 2062 end_reg = 4 * pkt->count + start_reg - 4; 2063 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 2064 (start_reg >= PACKET3_SET_CTL_CONST_END) || 2065 (end_reg >= PACKET3_SET_CTL_CONST_END)) { 2066 dev_warn_once(p->dev, "bad SET_CTL_CONST\n"); 2067 return -EINVAL; 2068 } 2069 break; 2070 case PACKET3_SET_SAMPLER: 2071 if (pkt->count % 3) { 2072 dev_warn_once(p->dev, "bad SET_SAMPLER\n"); 2073 return -EINVAL; 2074 } 2075 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; 2076 end_reg = 4 * pkt->count + start_reg - 4; 2077 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 2078 (start_reg >= PACKET3_SET_SAMPLER_END) || 2079 (end_reg >= PACKET3_SET_SAMPLER_END)) { 2080 dev_warn_once(p->dev, "bad SET_SAMPLER\n"); 2081 return -EINVAL; 2082 } 2083 break; 2084 case PACKET3_STRMOUT_BASE_UPDATE: 2085 /* RS780 and RS880 also need this */ 2086 if (p->family < CHIP_RS780) { 2087 dev_warn_once(p->dev, "STRMOUT_BASE_UPDATE only supported on 7xx\n"); 2088 return -EINVAL; 2089 } 2090 if (pkt->count != 1) { 2091 dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE packet count\n"); 2092 return -EINVAL; 2093 } 2094 if (idx_value > 3) { 2095 dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE index\n"); 2096 return -EINVAL; 2097 } 2098 { 2099 u64 offset; 2100 2101 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2102 if (r) { 2103 dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE reloc\n"); 2104 return -EINVAL; 2105 } 2106 2107 if (reloc->robj != track->vgt_strmout_bo[idx_value]) { 2108 dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE, bo does not match\n"); 2109 return -EINVAL; 2110 } 2111 2112 offset = (u64)radeon_get_ib_value(p, idx+1) << 8; 2113 if (offset != track->vgt_strmout_bo_offset[idx_value]) { 2114 dev_warn_once(p->dev, 2115 "bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", 2116 offset, track->vgt_strmout_bo_offset[idx_value]); 2117 return -EINVAL; 2118 } 2119 2120 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2121 dev_warn_once(p->dev, 2122 "bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n", 2123 offset + 4, radeon_bo_size(reloc->robj)); 2124 return -EINVAL; 2125 } 2126 ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 2127 } 2128 break; 2129 case PACKET3_SURFACE_BASE_UPDATE: 2130 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { 2131 dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n"); 2132 return -EINVAL; 2133 } 2134 if (pkt->count) { 2135 dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n"); 2136 return -EINVAL; 2137 } 2138 break; 2139 case PACKET3_STRMOUT_BUFFER_UPDATE: 2140 if (pkt->count != 4) { 2141 dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); 2142 return -EINVAL; 2143 } 2144 /* Updating memory at DST_ADDRESS. */ 2145 if (idx_value & 0x1) { 2146 u64 offset; 2147 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2148 if (r) { 2149 dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2150 return -EINVAL; 2151 } 2152 offset = radeon_get_ib_value(p, idx+1); 2153 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2154 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2155 dev_warn_once(p->dev, 2156 "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", 2157 offset + 4, radeon_bo_size(reloc->robj)); 2158 return -EINVAL; 2159 } 2160 offset += reloc->gpu_offset; 2161 ib[idx+1] = offset; 2162 ib[idx+2] = upper_32_bits(offset) & 0xff; 2163 } 2164 /* Reading data from SRC_ADDRESS. */ 2165 if (((idx_value >> 1) & 0x3) == 2) { 2166 u64 offset; 2167 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2168 if (r) { 2169 dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2170 return -EINVAL; 2171 } 2172 offset = radeon_get_ib_value(p, idx+3); 2173 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2174 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2175 dev_warn_once(p->dev, 2176 "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", 2177 offset + 4, radeon_bo_size(reloc->robj)); 2178 return -EINVAL; 2179 } 2180 offset += reloc->gpu_offset; 2181 ib[idx+3] = offset; 2182 ib[idx+4] = upper_32_bits(offset) & 0xff; 2183 } 2184 break; 2185 case PACKET3_MEM_WRITE: 2186 { 2187 u64 offset; 2188 2189 if (pkt->count != 3) { 2190 dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n"); 2191 return -EINVAL; 2192 } 2193 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2194 if (r) { 2195 dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n"); 2196 return -EINVAL; 2197 } 2198 offset = radeon_get_ib_value(p, idx+0); 2199 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; 2200 if (offset & 0x7) { 2201 dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n"); 2202 return -EINVAL; 2203 } 2204 if ((offset + 8) > radeon_bo_size(reloc->robj)) { 2205 dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", 2206 offset + 8, radeon_bo_size(reloc->robj)); 2207 return -EINVAL; 2208 } 2209 offset += reloc->gpu_offset; 2210 ib[idx+0] = offset; 2211 ib[idx+1] = upper_32_bits(offset) & 0xff; 2212 break; 2213 } 2214 case PACKET3_COPY_DW: 2215 if (pkt->count != 4) { 2216 dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n"); 2217 return -EINVAL; 2218 } 2219 if (idx_value & 0x1) { 2220 u64 offset; 2221 /* SRC is memory. */ 2222 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2223 if (r) { 2224 dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n"); 2225 return -EINVAL; 2226 } 2227 offset = radeon_get_ib_value(p, idx+1); 2228 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2229 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2230 dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", 2231 offset + 4, radeon_bo_size(reloc->robj)); 2232 return -EINVAL; 2233 } 2234 offset += reloc->gpu_offset; 2235 ib[idx+1] = offset; 2236 ib[idx+2] = upper_32_bits(offset) & 0xff; 2237 } else { 2238 /* SRC is a reg. */ 2239 reg = radeon_get_ib_value(p, idx+1) << 2; 2240 if (!r600_is_safe_reg(p, reg, idx+1)) 2241 return -EINVAL; 2242 } 2243 if (idx_value & 0x2) { 2244 u64 offset; 2245 /* DST is memory. */ 2246 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2247 if (r) { 2248 dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n"); 2249 return -EINVAL; 2250 } 2251 offset = radeon_get_ib_value(p, idx+3); 2252 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2253 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2254 dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", 2255 offset + 4, radeon_bo_size(reloc->robj)); 2256 return -EINVAL; 2257 } 2258 offset += reloc->gpu_offset; 2259 ib[idx+3] = offset; 2260 ib[idx+4] = upper_32_bits(offset) & 0xff; 2261 } else { 2262 /* DST is a reg. */ 2263 reg = radeon_get_ib_value(p, idx+3) << 2; 2264 if (!r600_is_safe_reg(p, reg, idx+3)) 2265 return -EINVAL; 2266 } 2267 break; 2268 case PACKET3_NOP: 2269 break; 2270 default: 2271 dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode); 2272 return -EINVAL; 2273 } 2274 return 0; 2275 } 2276 2277 int r600_cs_parse(struct radeon_cs_parser *p) 2278 { 2279 struct radeon_cs_packet pkt; 2280 struct r600_cs_track *track; 2281 int r; 2282 2283 if (p->track == NULL) { 2284 /* initialize tracker, we are in kms */ 2285 track = kzalloc(sizeof(*track), GFP_KERNEL); 2286 if (track == NULL) 2287 return -ENOMEM; 2288 r600_cs_track_init(track); 2289 if (p->rdev->family < CHIP_RV770) { 2290 track->npipes = p->rdev->config.r600.tiling_npipes; 2291 track->nbanks = p->rdev->config.r600.tiling_nbanks; 2292 track->group_size = p->rdev->config.r600.tiling_group_size; 2293 } else if (p->rdev->family <= CHIP_RV740) { 2294 track->npipes = p->rdev->config.rv770.tiling_npipes; 2295 track->nbanks = p->rdev->config.rv770.tiling_nbanks; 2296 track->group_size = p->rdev->config.rv770.tiling_group_size; 2297 } 2298 p->track = track; 2299 } 2300 do { 2301 r = radeon_cs_packet_parse(p, &pkt, p->idx); 2302 if (r) { 2303 kfree(p->track); 2304 p->track = NULL; 2305 return r; 2306 } 2307 p->idx += pkt.count + 2; 2308 switch (pkt.type) { 2309 case RADEON_PACKET_TYPE0: 2310 r = r600_cs_parse_packet0(p, &pkt); 2311 break; 2312 case RADEON_PACKET_TYPE2: 2313 break; 2314 case RADEON_PACKET_TYPE3: 2315 r = r600_packet3_check(p, &pkt); 2316 break; 2317 default: 2318 dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type); 2319 kfree(p->track); 2320 p->track = NULL; 2321 return -EINVAL; 2322 } 2323 if (r) { 2324 kfree(p->track); 2325 p->track = NULL; 2326 return r; 2327 } 2328 } while (p->idx < p->chunk_ib->length_dw); 2329 #if 0 2330 for (r = 0; r < p->ib.length_dw; r++) { 2331 pr_info("%05d 0x%08X\n", r, p->ib.ptr[r]); 2332 mdelay(1); 2333 } 2334 #endif 2335 kfree(p->track); 2336 p->track = NULL; 2337 return 0; 2338 } 2339 2340 /* 2341 * DMA 2342 */ 2343 /** 2344 * r600_dma_cs_next_reloc() - parse next reloc 2345 * @p: parser structure holding parsing context. 2346 * @cs_reloc: reloc information 2347 * 2348 * Return the next reloc, do bo validation and compute 2349 * GPU offset using the provided start. 2350 **/ 2351 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 2352 struct radeon_bo_list **cs_reloc) 2353 { 2354 unsigned idx; 2355 2356 *cs_reloc = NULL; 2357 if (p->chunk_relocs == NULL) { 2358 dev_warn_once(p->dev, "No relocation chunk !\n"); 2359 return -EINVAL; 2360 } 2361 idx = p->dma_reloc_idx; 2362 if (idx >= p->nrelocs) { 2363 dev_warn_once(p->dev, "Relocs at %d after relocations chunk end %d !\n", 2364 idx, p->nrelocs); 2365 return -EINVAL; 2366 } 2367 *cs_reloc = &p->relocs[idx]; 2368 p->dma_reloc_idx++; 2369 return 0; 2370 } 2371 2372 #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28) 2373 #define GET_DMA_COUNT(h) ((h) & 0x0000ffff) 2374 #define GET_DMA_T(h) (((h) & 0x00800000) >> 23) 2375 2376 /** 2377 * r600_dma_cs_parse() - parse the DMA IB 2378 * @p: parser structure holding parsing context. 2379 * 2380 * Parses the DMA IB from the CS ioctl and updates 2381 * the GPU addresses based on the reloc information and 2382 * checks for errors. (R6xx-R7xx) 2383 * Returns 0 for success and an error on failure. 2384 **/ 2385 int r600_dma_cs_parse(struct radeon_cs_parser *p) 2386 { 2387 struct radeon_cs_chunk *ib_chunk = p->chunk_ib; 2388 struct radeon_bo_list *src_reloc, *dst_reloc; 2389 u32 header, cmd, count, tiled; 2390 volatile u32 *ib = p->ib.ptr; 2391 u32 idx, idx_value; 2392 u64 src_offset, dst_offset; 2393 int r; 2394 2395 do { 2396 if (p->idx >= ib_chunk->length_dw) { 2397 dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n", 2398 p->idx, ib_chunk->length_dw); 2399 return -EINVAL; 2400 } 2401 idx = p->idx; 2402 header = radeon_get_ib_value(p, idx); 2403 cmd = GET_DMA_CMD(header); 2404 count = GET_DMA_COUNT(header); 2405 tiled = GET_DMA_T(header); 2406 2407 switch (cmd) { 2408 case DMA_PACKET_WRITE: 2409 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2410 if (r) { 2411 dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n"); 2412 return -EINVAL; 2413 } 2414 if (tiled) { 2415 dst_offset = radeon_get_ib_value(p, idx+1); 2416 dst_offset <<= 8; 2417 2418 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); 2419 p->idx += count + 5; 2420 } else { 2421 dst_offset = radeon_get_ib_value(p, idx+1); 2422 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2423 2424 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); 2425 ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; 2426 p->idx += count + 3; 2427 } 2428 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2429 dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n", 2430 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2431 return -EINVAL; 2432 } 2433 break; 2434 case DMA_PACKET_COPY: 2435 r = r600_dma_cs_next_reloc(p, &src_reloc); 2436 if (r) { 2437 dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); 2438 return -EINVAL; 2439 } 2440 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2441 if (r) { 2442 dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); 2443 return -EINVAL; 2444 } 2445 if (tiled) { 2446 idx_value = radeon_get_ib_value(p, idx + 2); 2447 /* detile bit */ 2448 if (idx_value & (1 << 31)) { 2449 /* tiled src, linear dst */ 2450 src_offset = radeon_get_ib_value(p, idx+1); 2451 src_offset <<= 8; 2452 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); 2453 2454 dst_offset = radeon_get_ib_value(p, idx+5); 2455 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2456 ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); 2457 ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; 2458 } else { 2459 /* linear src, tiled dst */ 2460 src_offset = radeon_get_ib_value(p, idx+5); 2461 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2462 ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc); 2463 ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff; 2464 2465 dst_offset = radeon_get_ib_value(p, idx+1); 2466 dst_offset <<= 8; 2467 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); 2468 } 2469 p->idx += 7; 2470 } else { 2471 if (p->family >= CHIP_RV770) { 2472 src_offset = radeon_get_ib_value(p, idx+2); 2473 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2474 dst_offset = radeon_get_ib_value(p, idx+1); 2475 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2476 2477 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); 2478 ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); 2479 ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; 2480 ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff; 2481 p->idx += 5; 2482 } else { 2483 src_offset = radeon_get_ib_value(p, idx+2); 2484 src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2485 dst_offset = radeon_get_ib_value(p, idx+1); 2486 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; 2487 2488 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); 2489 ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); 2490 ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff; 2491 ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16; 2492 p->idx += 4; 2493 } 2494 } 2495 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2496 dev_warn_once(p->dev, "DMA copy src buffer too small (%llu %lu)\n", 2497 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2498 return -EINVAL; 2499 } 2500 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2501 dev_warn_once(p->dev, "DMA write dst buffer too small (%llu %lu)\n", 2502 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2503 return -EINVAL; 2504 } 2505 break; 2506 case DMA_PACKET_CONSTANT_FILL: 2507 if (p->family < CHIP_RV770) { 2508 dev_warn_once(p->dev, "Constant Fill is 7xx only !\n"); 2509 return -EINVAL; 2510 } 2511 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2512 if (r) { 2513 dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n"); 2514 return -EINVAL; 2515 } 2516 dst_offset = radeon_get_ib_value(p, idx+1); 2517 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; 2518 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2519 dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", 2520 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2521 return -EINVAL; 2522 } 2523 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); 2524 ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000; 2525 p->idx += 4; 2526 break; 2527 case DMA_PACKET_NOP: 2528 p->idx += 1; 2529 break; 2530 default: 2531 dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx); 2532 return -EINVAL; 2533 } 2534 } while (p->idx < p->chunk_ib->length_dw); 2535 #if 0 2536 for (r = 0; r < p->ib->length_dw; r++) { 2537 pr_info("%05d 0x%08X\n", r, p->ib.ptr[r]); 2538 mdelay(1); 2539 } 2540 #endif 2541 return 0; 2542 } 2543