1 /* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*- 2 * 3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 5 * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * Copyright 2014 Advanced Micro Devices, Inc. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 24 * OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * Authors: 27 * Kevin E. Martin <martin@valinux.com> 28 * Gareth Hughes <gareth@valinux.com> 29 * Keith Whitwell <keith@tungstengraphics.com> 30 */ 31 32 #ifndef __AMDGPU_DRM_H__ 33 #define __AMDGPU_DRM_H__ 34 35 #include "drm.h" 36 37 #if defined(__cplusplus) 38 extern "C" { 39 #endif 40 41 #define DRM_AMDGPU_GEM_CREATE 0x00 42 #define DRM_AMDGPU_GEM_MMAP 0x01 43 #define DRM_AMDGPU_CTX 0x02 44 #define DRM_AMDGPU_BO_LIST 0x03 45 #define DRM_AMDGPU_CS 0x04 46 #define DRM_AMDGPU_INFO 0x05 47 #define DRM_AMDGPU_GEM_METADATA 0x06 48 #define DRM_AMDGPU_GEM_WAIT_IDLE 0x07 49 #define DRM_AMDGPU_GEM_VA 0x08 50 #define DRM_AMDGPU_WAIT_CS 0x09 51 #define DRM_AMDGPU_GEM_OP 0x10 52 #define DRM_AMDGPU_GEM_USERPTR 0x11 53 54 #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) 55 #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) 56 #define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx) 57 #define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list) 58 #define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs) 59 #define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info) 60 #define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata) 61 #define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle) 62 #define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va) 63 #define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) 64 #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) 65 #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) 66 67 #define AMDGPU_GEM_DOMAIN_CPU 0x1 68 #define AMDGPU_GEM_DOMAIN_GTT 0x2 69 #define AMDGPU_GEM_DOMAIN_VRAM 0x4 70 #define AMDGPU_GEM_DOMAIN_GDS 0x8 71 #define AMDGPU_GEM_DOMAIN_GWS 0x10 72 #define AMDGPU_GEM_DOMAIN_OA 0x20 73 74 /* Flag that CPU access will be required for the case of VRAM domain */ 75 #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) 76 /* Flag that CPU access will not work, this VRAM domain is invisible */ 77 #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) 78 /* Flag that USWC attributes should be used for GTT */ 79 #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) 80 /* Flag that the memory should be in VRAM and cleared */ 81 #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) 82 /* Flag that create shadow bo(GTT) while allocating vram bo */ 83 #define AMDGPU_GEM_CREATE_SHADOW (1 << 4) 84 85 struct drm_amdgpu_gem_create_in { 86 /** the requested memory size */ 87 __u64 bo_size; 88 /** physical start_addr alignment in bytes for some HW requirements */ 89 __u64 alignment; 90 /** the requested memory domains */ 91 __u64 domains; 92 /** allocation flags */ 93 __u64 domain_flags; 94 }; 95 96 struct drm_amdgpu_gem_create_out { 97 /** returned GEM object handle */ 98 __u32 handle; 99 __u32 _pad; 100 }; 101 102 union drm_amdgpu_gem_create { 103 struct drm_amdgpu_gem_create_in in; 104 struct drm_amdgpu_gem_create_out out; 105 }; 106 107 /** Opcode to create new residency list. */ 108 #define AMDGPU_BO_LIST_OP_CREATE 0 109 /** Opcode to destroy previously created residency list */ 110 #define AMDGPU_BO_LIST_OP_DESTROY 1 111 /** Opcode to update resource information in the list */ 112 #define AMDGPU_BO_LIST_OP_UPDATE 2 113 114 struct drm_amdgpu_bo_list_in { 115 /** Type of operation */ 116 __u32 operation; 117 /** Handle of list or 0 if we want to create one */ 118 __u32 list_handle; 119 /** Number of BOs in list */ 120 __u32 bo_number; 121 /** Size of each element describing BO */ 122 __u32 bo_info_size; 123 /** Pointer to array describing BOs */ 124 __u64 bo_info_ptr; 125 }; 126 127 struct drm_amdgpu_bo_list_entry { 128 /** Handle of BO */ 129 __u32 bo_handle; 130 /** New (if specified) BO priority to be used during migration */ 131 __u32 bo_priority; 132 }; 133 134 struct drm_amdgpu_bo_list_out { 135 /** Handle of resource list */ 136 __u32 list_handle; 137 __u32 _pad; 138 }; 139 140 union drm_amdgpu_bo_list { 141 struct drm_amdgpu_bo_list_in in; 142 struct drm_amdgpu_bo_list_out out; 143 }; 144 145 /* context related */ 146 #define AMDGPU_CTX_OP_ALLOC_CTX 1 147 #define AMDGPU_CTX_OP_FREE_CTX 2 148 #define AMDGPU_CTX_OP_QUERY_STATE 3 149 150 /* GPU reset status */ 151 #define AMDGPU_CTX_NO_RESET 0 152 /* this the context caused it */ 153 #define AMDGPU_CTX_GUILTY_RESET 1 154 /* some other context caused it */ 155 #define AMDGPU_CTX_INNOCENT_RESET 2 156 /* unknown cause */ 157 #define AMDGPU_CTX_UNKNOWN_RESET 3 158 159 struct drm_amdgpu_ctx_in { 160 /** AMDGPU_CTX_OP_* */ 161 __u32 op; 162 /** For future use, no flags defined so far */ 163 __u32 flags; 164 __u32 ctx_id; 165 __u32 _pad; 166 }; 167 168 union drm_amdgpu_ctx_out { 169 struct { 170 __u32 ctx_id; 171 __u32 _pad; 172 } alloc; 173 174 struct { 175 /** For future use, no flags defined so far */ 176 __u64 flags; 177 /** Number of resets caused by this context so far. */ 178 __u32 hangs; 179 /** Reset status since the last call of the ioctl. */ 180 __u32 reset_status; 181 } state; 182 }; 183 184 union drm_amdgpu_ctx { 185 struct drm_amdgpu_ctx_in in; 186 union drm_amdgpu_ctx_out out; 187 }; 188 189 /* 190 * This is not a reliable API and you should expect it to fail for any 191 * number of reasons and have fallback path that do not use userptr to 192 * perform any operation. 193 */ 194 #define AMDGPU_GEM_USERPTR_READONLY (1 << 0) 195 #define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1) 196 #define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2) 197 #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) 198 199 struct drm_amdgpu_gem_userptr { 200 __u64 addr; 201 __u64 size; 202 /* AMDGPU_GEM_USERPTR_* */ 203 __u32 flags; 204 /* Resulting GEM handle */ 205 __u32 handle; 206 }; 207 208 /* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ 209 #define AMDGPU_TILING_ARRAY_MODE_SHIFT 0 210 #define AMDGPU_TILING_ARRAY_MODE_MASK 0xf 211 #define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4 212 #define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f 213 #define AMDGPU_TILING_TILE_SPLIT_SHIFT 9 214 #define AMDGPU_TILING_TILE_SPLIT_MASK 0x7 215 #define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12 216 #define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7 217 #define AMDGPU_TILING_BANK_WIDTH_SHIFT 15 218 #define AMDGPU_TILING_BANK_WIDTH_MASK 0x3 219 #define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17 220 #define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3 221 #define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19 222 #define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3 223 #define AMDGPU_TILING_NUM_BANKS_SHIFT 21 224 #define AMDGPU_TILING_NUM_BANKS_MASK 0x3 225 226 #define AMDGPU_TILING_SET(field, value) \ 227 (((value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT) 228 #define AMDGPU_TILING_GET(value, field) \ 229 (((value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK) 230 231 #define AMDGPU_GEM_METADATA_OP_SET_METADATA 1 232 #define AMDGPU_GEM_METADATA_OP_GET_METADATA 2 233 234 /** The same structure is shared for input/output */ 235 struct drm_amdgpu_gem_metadata { 236 /** GEM Object handle */ 237 __u32 handle; 238 /** Do we want get or set metadata */ 239 __u32 op; 240 struct { 241 /** For future use, no flags defined so far */ 242 __u64 flags; 243 /** family specific tiling info */ 244 __u64 tiling_info; 245 __u32 data_size_bytes; 246 __u32 data[64]; 247 } data; 248 }; 249 250 struct drm_amdgpu_gem_mmap_in { 251 /** the GEM object handle */ 252 __u32 handle; 253 __u32 _pad; 254 }; 255 256 struct drm_amdgpu_gem_mmap_out { 257 /** mmap offset from the vma offset manager */ 258 __u64 addr_ptr; 259 }; 260 261 union drm_amdgpu_gem_mmap { 262 struct drm_amdgpu_gem_mmap_in in; 263 struct drm_amdgpu_gem_mmap_out out; 264 }; 265 266 struct drm_amdgpu_gem_wait_idle_in { 267 /** GEM object handle */ 268 __u32 handle; 269 /** For future use, no flags defined so far */ 270 __u32 flags; 271 /** Absolute timeout to wait */ 272 __u64 timeout; 273 }; 274 275 struct drm_amdgpu_gem_wait_idle_out { 276 /** BO status: 0 - BO is idle, 1 - BO is busy */ 277 __u32 status; 278 /** Returned current memory domain */ 279 __u32 domain; 280 }; 281 282 union drm_amdgpu_gem_wait_idle { 283 struct drm_amdgpu_gem_wait_idle_in in; 284 struct drm_amdgpu_gem_wait_idle_out out; 285 }; 286 287 struct drm_amdgpu_wait_cs_in { 288 /** Command submission handle */ 289 __u64 handle; 290 /** Absolute timeout to wait */ 291 __u64 timeout; 292 __u32 ip_type; 293 __u32 ip_instance; 294 __u32 ring; 295 __u32 ctx_id; 296 }; 297 298 struct drm_amdgpu_wait_cs_out { 299 /** CS status: 0 - CS completed, 1 - CS still busy */ 300 __u64 status; 301 }; 302 303 union drm_amdgpu_wait_cs { 304 struct drm_amdgpu_wait_cs_in in; 305 struct drm_amdgpu_wait_cs_out out; 306 }; 307 308 #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 309 #define AMDGPU_GEM_OP_SET_PLACEMENT 1 310 311 /* Sets or returns a value associated with a buffer. */ 312 struct drm_amdgpu_gem_op { 313 /** GEM object handle */ 314 __u32 handle; 315 /** AMDGPU_GEM_OP_* */ 316 __u32 op; 317 /** Input or return value */ 318 __u64 value; 319 }; 320 321 #define AMDGPU_VA_OP_MAP 1 322 #define AMDGPU_VA_OP_UNMAP 2 323 324 /* Delay the page table update till the next CS */ 325 #define AMDGPU_VM_DELAY_UPDATE (1 << 0) 326 327 /* Mapping flags */ 328 /* readable mapping */ 329 #define AMDGPU_VM_PAGE_READABLE (1 << 1) 330 /* writable mapping */ 331 #define AMDGPU_VM_PAGE_WRITEABLE (1 << 2) 332 /* executable mapping, new for VI */ 333 #define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) 334 335 struct drm_amdgpu_gem_va { 336 /** GEM object handle */ 337 __u32 handle; 338 __u32 _pad; 339 /** AMDGPU_VA_OP_* */ 340 __u32 operation; 341 /** AMDGPU_VM_PAGE_* */ 342 __u32 flags; 343 /** va address to assign . Must be correctly aligned.*/ 344 __u64 va_address; 345 /** Specify offset inside of BO to assign. Must be correctly aligned.*/ 346 __u64 offset_in_bo; 347 /** Specify mapping size. Must be correctly aligned. */ 348 __u64 map_size; 349 }; 350 351 #define AMDGPU_HW_IP_GFX 0 352 #define AMDGPU_HW_IP_COMPUTE 1 353 #define AMDGPU_HW_IP_DMA 2 354 #define AMDGPU_HW_IP_UVD 3 355 #define AMDGPU_HW_IP_VCE 4 356 #define AMDGPU_HW_IP_NUM 5 357 358 #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 359 360 #define AMDGPU_CHUNK_ID_IB 0x01 361 #define AMDGPU_CHUNK_ID_FENCE 0x02 362 #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 363 364 struct drm_amdgpu_cs_chunk { 365 __u32 chunk_id; 366 __u32 length_dw; 367 __u64 chunk_data; 368 }; 369 370 struct drm_amdgpu_cs_in { 371 /** Rendering context id */ 372 __u32 ctx_id; 373 /** Handle of resource list associated with CS */ 374 __u32 bo_list_handle; 375 __u32 num_chunks; 376 __u32 _pad; 377 /** this points to __u64 * which point to cs chunks */ 378 __u64 chunks; 379 }; 380 381 struct drm_amdgpu_cs_out { 382 __u64 handle; 383 }; 384 385 union drm_amdgpu_cs { 386 struct drm_amdgpu_cs_in in; 387 struct drm_amdgpu_cs_out out; 388 }; 389 390 /* Specify flags to be used for IB */ 391 392 /* This IB should be submitted to CE */ 393 #define AMDGPU_IB_FLAG_CE (1<<0) 394 395 /* CE Preamble */ 396 #define AMDGPU_IB_FLAG_PREAMBLE (1<<1) 397 398 struct drm_amdgpu_cs_chunk_ib { 399 __u32 _pad; 400 /** AMDGPU_IB_FLAG_* */ 401 __u32 flags; 402 /** Virtual address to begin IB execution */ 403 __u64 va_start; 404 /** Size of submission */ 405 __u32 ib_bytes; 406 /** HW IP to submit to */ 407 __u32 ip_type; 408 /** HW IP index of the same type to submit to */ 409 __u32 ip_instance; 410 /** Ring index to submit to */ 411 __u32 ring; 412 }; 413 414 struct drm_amdgpu_cs_chunk_dep { 415 __u32 ip_type; 416 __u32 ip_instance; 417 __u32 ring; 418 __u32 ctx_id; 419 __u64 handle; 420 }; 421 422 struct drm_amdgpu_cs_chunk_fence { 423 __u32 handle; 424 __u32 offset; 425 }; 426 427 struct drm_amdgpu_cs_chunk_data { 428 union { 429 struct drm_amdgpu_cs_chunk_ib ib_data; 430 struct drm_amdgpu_cs_chunk_fence fence_data; 431 }; 432 }; 433 434 /** 435 * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU 436 * 437 */ 438 #define AMDGPU_IDS_FLAGS_FUSION 0x1 439 440 /* indicate if acceleration can be working */ 441 #define AMDGPU_INFO_ACCEL_WORKING 0x00 442 /* get the crtc_id from the mode object id? */ 443 #define AMDGPU_INFO_CRTC_FROM_ID 0x01 444 /* query hw IP info */ 445 #define AMDGPU_INFO_HW_IP_INFO 0x02 446 /* query hw IP instance count for the specified type */ 447 #define AMDGPU_INFO_HW_IP_COUNT 0x03 448 /* timestamp for GL_ARB_timer_query */ 449 #define AMDGPU_INFO_TIMESTAMP 0x05 450 /* Query the firmware version */ 451 #define AMDGPU_INFO_FW_VERSION 0x0e 452 /* Subquery id: Query VCE firmware version */ 453 #define AMDGPU_INFO_FW_VCE 0x1 454 /* Subquery id: Query UVD firmware version */ 455 #define AMDGPU_INFO_FW_UVD 0x2 456 /* Subquery id: Query GMC firmware version */ 457 #define AMDGPU_INFO_FW_GMC 0x03 458 /* Subquery id: Query GFX ME firmware version */ 459 #define AMDGPU_INFO_FW_GFX_ME 0x04 460 /* Subquery id: Query GFX PFP firmware version */ 461 #define AMDGPU_INFO_FW_GFX_PFP 0x05 462 /* Subquery id: Query GFX CE firmware version */ 463 #define AMDGPU_INFO_FW_GFX_CE 0x06 464 /* Subquery id: Query GFX RLC firmware version */ 465 #define AMDGPU_INFO_FW_GFX_RLC 0x07 466 /* Subquery id: Query GFX MEC firmware version */ 467 #define AMDGPU_INFO_FW_GFX_MEC 0x08 468 /* Subquery id: Query SMC firmware version */ 469 #define AMDGPU_INFO_FW_SMC 0x0a 470 /* Subquery id: Query SDMA firmware version */ 471 #define AMDGPU_INFO_FW_SDMA 0x0b 472 /* number of bytes moved for TTM migration */ 473 #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f 474 /* the used VRAM size */ 475 #define AMDGPU_INFO_VRAM_USAGE 0x10 476 /* the used GTT size */ 477 #define AMDGPU_INFO_GTT_USAGE 0x11 478 /* Information about GDS, etc. resource configuration */ 479 #define AMDGPU_INFO_GDS_CONFIG 0x13 480 /* Query information about VRAM and GTT domains */ 481 #define AMDGPU_INFO_VRAM_GTT 0x14 482 /* Query information about register in MMR address space*/ 483 #define AMDGPU_INFO_READ_MMR_REG 0x15 484 /* Query information about device: rev id, family, etc. */ 485 #define AMDGPU_INFO_DEV_INFO 0x16 486 /* visible vram usage */ 487 #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 488 /* number of TTM buffer evictions */ 489 #define AMDGPU_INFO_NUM_EVICTIONS 0x18 490 491 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 492 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff 493 #define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8 494 #define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff 495 496 struct drm_amdgpu_query_fw { 497 /** AMDGPU_INFO_FW_* */ 498 __u32 fw_type; 499 /** 500 * Index of the IP if there are more IPs of 501 * the same type. 502 */ 503 __u32 ip_instance; 504 /** 505 * Index of the engine. Whether this is used depends 506 * on the firmware type. (e.g. MEC, SDMA) 507 */ 508 __u32 index; 509 __u32 _pad; 510 }; 511 512 /* Input structure for the INFO ioctl */ 513 struct drm_amdgpu_info { 514 /* Where the return value will be stored */ 515 __u64 return_pointer; 516 /* The size of the return value. Just like "size" in "snprintf", 517 * it limits how many bytes the kernel can write. */ 518 __u32 return_size; 519 /* The query request id. */ 520 __u32 query; 521 522 union { 523 struct { 524 __u32 id; 525 __u32 _pad; 526 } mode_crtc; 527 528 struct { 529 /** AMDGPU_HW_IP_* */ 530 __u32 type; 531 /** 532 * Index of the IP if there are more IPs of the same 533 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. 534 */ 535 __u32 ip_instance; 536 } query_hw_ip; 537 538 struct { 539 __u32 dword_offset; 540 /** number of registers to read */ 541 __u32 count; 542 __u32 instance; 543 /** For future use, no flags defined so far */ 544 __u32 flags; 545 } read_mmr_reg; 546 547 struct drm_amdgpu_query_fw query_fw; 548 }; 549 }; 550 551 struct drm_amdgpu_info_gds { 552 /** GDS GFX partition size */ 553 __u32 gds_gfx_partition_size; 554 /** GDS compute partition size */ 555 __u32 compute_partition_size; 556 /** total GDS memory size */ 557 __u32 gds_total_size; 558 /** GWS size per GFX partition */ 559 __u32 gws_per_gfx_partition; 560 /** GSW size per compute partition */ 561 __u32 gws_per_compute_partition; 562 /** OA size per GFX partition */ 563 __u32 oa_per_gfx_partition; 564 /** OA size per compute partition */ 565 __u32 oa_per_compute_partition; 566 __u32 _pad; 567 }; 568 569 struct drm_amdgpu_info_vram_gtt { 570 __u64 vram_size; 571 __u64 vram_cpu_accessible_size; 572 __u64 gtt_size; 573 }; 574 575 struct drm_amdgpu_info_firmware { 576 __u32 ver; 577 __u32 feature; 578 }; 579 580 #define AMDGPU_VRAM_TYPE_UNKNOWN 0 581 #define AMDGPU_VRAM_TYPE_GDDR1 1 582 #define AMDGPU_VRAM_TYPE_DDR2 2 583 #define AMDGPU_VRAM_TYPE_GDDR3 3 584 #define AMDGPU_VRAM_TYPE_GDDR4 4 585 #define AMDGPU_VRAM_TYPE_GDDR5 5 586 #define AMDGPU_VRAM_TYPE_HBM 6 587 #define AMDGPU_VRAM_TYPE_DDR3 7 588 589 struct drm_amdgpu_info_device { 590 /** PCI Device ID */ 591 __u32 device_id; 592 /** Internal chip revision: A0, A1, etc.) */ 593 __u32 chip_rev; 594 __u32 external_rev; 595 /** Revision id in PCI Config space */ 596 __u32 pci_rev; 597 __u32 family; 598 __u32 num_shader_engines; 599 __u32 num_shader_arrays_per_engine; 600 /* in KHz */ 601 __u32 gpu_counter_freq; 602 __u64 max_engine_clock; 603 __u64 max_memory_clock; 604 /* cu information */ 605 __u32 cu_active_number; 606 __u32 cu_ao_mask; 607 __u32 cu_bitmap[4][4]; 608 /** Render backend pipe mask. One render backend is CB+DB. */ 609 __u32 enabled_rb_pipes_mask; 610 __u32 num_rb_pipes; 611 __u32 num_hw_gfx_contexts; 612 __u32 _pad; 613 __u64 ids_flags; 614 /** Starting virtual address for UMDs. */ 615 __u64 virtual_address_offset; 616 /** The maximum virtual address */ 617 __u64 virtual_address_max; 618 /** Required alignment of virtual addresses. */ 619 __u32 virtual_address_alignment; 620 /** Page table entry - fragment size */ 621 __u32 pte_fragment_size; 622 __u32 gart_page_size; 623 /** constant engine ram size*/ 624 __u32 ce_ram_size; 625 /** video memory type info*/ 626 __u32 vram_type; 627 /** video memory bit width*/ 628 __u32 vram_bit_width; 629 /* vce harvesting instance */ 630 __u32 vce_harvest_config; 631 }; 632 633 struct drm_amdgpu_info_hw_ip { 634 /** Version of h/w IP */ 635 __u32 hw_ip_version_major; 636 __u32 hw_ip_version_minor; 637 /** Capabilities */ 638 __u64 capabilities_flags; 639 /** command buffer address start alignment*/ 640 __u32 ib_start_alignment; 641 /** command buffer size alignment*/ 642 __u32 ib_size_alignment; 643 /** Bitmask of available rings. Bit 0 means ring 0, etc. */ 644 __u32 available_rings; 645 __u32 _pad; 646 }; 647 648 /* 649 * Supported GPU families 650 */ 651 #define AMDGPU_FAMILY_UNKNOWN 0 652 #define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */ 653 #define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ 654 #define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ 655 #define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ 656 #define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */ 657 658 #if defined(__cplusplus) 659 } 660 #endif 661 662 #endif 663