1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #ifndef KFD_IOCTL_H_INCLUDED 24 #define KFD_IOCTL_H_INCLUDED 25 26 #include <drm/drm.h> 27 #include <linux/ioctl.h> 28 29 /* 30 * - 1.1 - initial version 31 * - 1.3 - Add SMI events support 32 * - 1.4 - Indicate new SRAM EDC bit in device properties 33 * - 1.5 - Add SVM API 34 * - 1.6 - Query clear flags in SVM get_attr API 35 * - 1.7 - Checkpoint Restore (CRIU) API 36 * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs 37 * - 1.9 - Add available memory ioctl 38 * - 1.10 - Add SMI profiler event log 39 * - 1.11 - Add unified memory for ctx save/restore area 40 * - 1.12 - Add DMA buf export ioctl 41 * - 1.13 - Add debugger API 42 * - 1.14 - Update kfd_event_data 43 * - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl 44 * - 1.16 - Add contiguous VRAM allocation flag 45 * - 1.17 - Add SDMA queue creation with target SDMA engine ID 46 * - 1.18 - Rename pad in set_memory_policy_args to misc_process_flag 47 */ 48 #define KFD_IOCTL_MAJOR_VERSION 1 49 #define KFD_IOCTL_MINOR_VERSION 18 50 51 struct kfd_ioctl_get_version_args { 52 __u32 major_version; /* from KFD */ 53 __u32 minor_version; /* from KFD */ 54 }; 55 56 /* For kfd_ioctl_create_queue_args.queue_type. */ 57 #define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0 58 #define KFD_IOC_QUEUE_TYPE_SDMA 0x1 59 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2 60 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3 61 #define KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID 0x4 62 63 #define KFD_MAX_QUEUE_PERCENTAGE 100 64 #define KFD_MAX_QUEUE_PRIORITY 15 65 66 #define KFD_MIN_QUEUE_RING_SIZE 1024 67 68 struct kfd_ioctl_create_queue_args { 69 __u64 ring_base_address; /* to KFD */ 70 __u64 write_pointer_address; /* from KFD */ 71 __u64 read_pointer_address; /* from KFD */ 72 __u64 doorbell_offset; /* from KFD */ 73 74 __u32 ring_size; /* to KFD */ 75 __u32 gpu_id; /* to KFD */ 76 __u32 queue_type; /* to KFD */ 77 __u32 queue_percentage; /* to KFD */ 78 __u32 queue_priority; /* to KFD */ 79 __u32 queue_id; /* from KFD */ 80 81 __u64 eop_buffer_address; /* to KFD */ 82 __u64 eop_buffer_size; /* to KFD */ 83 __u64 ctx_save_restore_address; /* to KFD */ 84 __u32 ctx_save_restore_size; /* to KFD */ 85 __u32 ctl_stack_size; /* to KFD */ 86 __u32 sdma_engine_id; /* to KFD */ 87 __u32 pad; 88 }; 89 90 struct kfd_ioctl_destroy_queue_args { 91 __u32 queue_id; /* to KFD */ 92 __u32 pad; 93 }; 94 95 struct kfd_ioctl_update_queue_args { 96 __u64 ring_base_address; /* to KFD */ 97 98 __u32 queue_id; /* to KFD */ 99 __u32 ring_size; /* to KFD */ 100 __u32 queue_percentage; /* to KFD */ 101 __u32 queue_priority; /* to KFD */ 102 }; 103 104 struct kfd_ioctl_set_cu_mask_args { 105 __u32 queue_id; /* to KFD */ 106 __u32 num_cu_mask; /* to KFD */ 107 __u64 cu_mask_ptr; /* to KFD */ 108 }; 109 110 struct kfd_ioctl_get_queue_wave_state_args { 111 __u64 ctl_stack_address; /* to KFD */ 112 __u32 ctl_stack_used_size; /* from KFD */ 113 __u32 save_area_used_size; /* from KFD */ 114 __u32 queue_id; /* to KFD */ 115 __u32 pad; 116 }; 117 118 struct kfd_ioctl_get_available_memory_args { 119 __u64 available; /* from KFD */ 120 __u32 gpu_id; /* to KFD */ 121 __u32 pad; 122 }; 123 124 struct kfd_dbg_device_info_entry { 125 __u64 exception_status; 126 __u64 lds_base; 127 __u64 lds_limit; 128 __u64 scratch_base; 129 __u64 scratch_limit; 130 __u64 gpuvm_base; 131 __u64 gpuvm_limit; 132 __u32 gpu_id; 133 __u32 location_id; 134 __u32 vendor_id; 135 __u32 device_id; 136 __u32 revision_id; 137 __u32 subsystem_vendor_id; 138 __u32 subsystem_device_id; 139 __u32 fw_version; 140 __u32 gfx_target_version; 141 __u32 simd_count; 142 __u32 max_waves_per_simd; 143 __u32 array_count; 144 __u32 simd_arrays_per_engine; 145 __u32 num_xcc; 146 __u32 capability; 147 __u32 debug_prop; 148 }; 149 150 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ 151 #define KFD_IOC_CACHE_POLICY_COHERENT 0 152 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 153 154 /* Misc. per process flags */ 155 #define KFD_PROC_FLAG_MFMA_HIGH_PRECISION (1 << 0) 156 157 struct kfd_ioctl_set_memory_policy_args { 158 __u64 alternate_aperture_base; /* to KFD */ 159 __u64 alternate_aperture_size; /* to KFD */ 160 161 __u32 gpu_id; /* to KFD */ 162 __u32 default_policy; /* to KFD */ 163 __u32 alternate_policy; /* to KFD */ 164 __u32 misc_process_flag; /* to KFD */ 165 }; 166 167 /* 168 * All counters are monotonic. They are used for profiling of compute jobs. 169 * The profiling is done by userspace. 170 * 171 * In case of GPU reset, the counter should not be affected. 172 */ 173 174 struct kfd_ioctl_get_clock_counters_args { 175 __u64 gpu_clock_counter; /* from KFD */ 176 __u64 cpu_clock_counter; /* from KFD */ 177 __u64 system_clock_counter; /* from KFD */ 178 __u64 system_clock_freq; /* from KFD */ 179 180 __u32 gpu_id; /* to KFD */ 181 __u32 pad; 182 }; 183 184 struct kfd_process_device_apertures { 185 __u64 lds_base; /* from KFD */ 186 __u64 lds_limit; /* from KFD */ 187 __u64 scratch_base; /* from KFD */ 188 __u64 scratch_limit; /* from KFD */ 189 __u64 gpuvm_base; /* from KFD */ 190 __u64 gpuvm_limit; /* from KFD */ 191 __u32 gpu_id; /* from KFD */ 192 __u32 pad; 193 }; 194 195 /* 196 * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use 197 * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an 198 * unlimited number of GPUs. 199 */ 200 #define NUM_OF_SUPPORTED_GPUS 7 201 struct kfd_ioctl_get_process_apertures_args { 202 struct kfd_process_device_apertures 203 process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */ 204 205 /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */ 206 __u32 num_of_nodes; 207 __u32 pad; 208 }; 209 210 struct kfd_ioctl_get_process_apertures_new_args { 211 /* User allocated. Pointer to struct kfd_process_device_apertures 212 * filled in by Kernel 213 */ 214 __u64 kfd_process_device_apertures_ptr; 215 /* to KFD - indicates amount of memory present in 216 * kfd_process_device_apertures_ptr 217 * from KFD - Number of entries filled by KFD. 218 */ 219 __u32 num_of_nodes; 220 __u32 pad; 221 }; 222 223 #define MAX_ALLOWED_NUM_POINTS 100 224 #define MAX_ALLOWED_AW_BUFF_SIZE 4096 225 #define MAX_ALLOWED_WAC_BUFF_SIZE 128 226 227 struct kfd_ioctl_dbg_register_args { 228 __u32 gpu_id; /* to KFD */ 229 __u32 pad; 230 }; 231 232 struct kfd_ioctl_dbg_unregister_args { 233 __u32 gpu_id; /* to KFD */ 234 __u32 pad; 235 }; 236 237 struct kfd_ioctl_dbg_address_watch_args { 238 __u64 content_ptr; /* a pointer to the actual content */ 239 __u32 gpu_id; /* to KFD */ 240 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ 241 }; 242 243 struct kfd_ioctl_dbg_wave_control_args { 244 __u64 content_ptr; /* a pointer to the actual content */ 245 __u32 gpu_id; /* to KFD */ 246 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ 247 }; 248 249 #define KFD_INVALID_FD 0xffffffff 250 251 /* Matching HSA_EVENTTYPE */ 252 #define KFD_IOC_EVENT_SIGNAL 0 253 #define KFD_IOC_EVENT_NODECHANGE 1 254 #define KFD_IOC_EVENT_DEVICESTATECHANGE 2 255 #define KFD_IOC_EVENT_HW_EXCEPTION 3 256 #define KFD_IOC_EVENT_SYSTEM_EVENT 4 257 #define KFD_IOC_EVENT_DEBUG_EVENT 5 258 #define KFD_IOC_EVENT_PROFILE_EVENT 6 259 #define KFD_IOC_EVENT_QUEUE_EVENT 7 260 #define KFD_IOC_EVENT_MEMORY 8 261 262 #define KFD_IOC_WAIT_RESULT_COMPLETE 0 263 #define KFD_IOC_WAIT_RESULT_TIMEOUT 1 264 #define KFD_IOC_WAIT_RESULT_FAIL 2 265 266 #define KFD_SIGNAL_EVENT_LIMIT 4096 267 268 /* For kfd_event_data.hw_exception_data.reset_type. */ 269 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0 270 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1 271 272 /* For kfd_event_data.hw_exception_data.reset_cause. */ 273 #define KFD_HW_EXCEPTION_GPU_HANG 0 274 #define KFD_HW_EXCEPTION_ECC 1 275 276 /* For kfd_hsa_memory_exception_data.ErrorType */ 277 #define KFD_MEM_ERR_NO_RAS 0 278 #define KFD_MEM_ERR_SRAM_ECC 1 279 #define KFD_MEM_ERR_POISON_CONSUMED 2 280 #define KFD_MEM_ERR_GPU_HANG 3 281 282 struct kfd_ioctl_create_event_args { 283 __u64 event_page_offset; /* from KFD */ 284 __u32 event_trigger_data; /* from KFD - signal events only */ 285 __u32 event_type; /* to KFD */ 286 __u32 auto_reset; /* to KFD */ 287 __u32 node_id; /* to KFD - only valid for certain 288 event types */ 289 __u32 event_id; /* from KFD */ 290 __u32 event_slot_index; /* from KFD */ 291 }; 292 293 struct kfd_ioctl_destroy_event_args { 294 __u32 event_id; /* to KFD */ 295 __u32 pad; 296 }; 297 298 struct kfd_ioctl_set_event_args { 299 __u32 event_id; /* to KFD */ 300 __u32 pad; 301 }; 302 303 struct kfd_ioctl_reset_event_args { 304 __u32 event_id; /* to KFD */ 305 __u32 pad; 306 }; 307 308 struct kfd_memory_exception_failure { 309 __u32 NotPresent; /* Page not present or supervisor privilege */ 310 __u32 ReadOnly; /* Write access to a read-only page */ 311 __u32 NoExecute; /* Execute access to a page marked NX */ 312 __u32 imprecise; /* Can't determine the exact fault address */ 313 }; 314 315 /* memory exception data */ 316 struct kfd_hsa_memory_exception_data { 317 struct kfd_memory_exception_failure failure; 318 __u64 va; 319 __u32 gpu_id; 320 __u32 ErrorType; /* 0 = no RAS error, 321 * 1 = ECC_SRAM, 322 * 2 = Link_SYNFLOOD (poison), 323 * 3 = GPU hang (not attributable to a specific cause), 324 * other values reserved 325 */ 326 }; 327 328 /* hw exception data */ 329 struct kfd_hsa_hw_exception_data { 330 __u32 reset_type; 331 __u32 reset_cause; 332 __u32 memory_lost; 333 __u32 gpu_id; 334 }; 335 336 /* hsa signal event data */ 337 struct kfd_hsa_signal_event_data { 338 __u64 last_event_age; /* to and from KFD */ 339 }; 340 341 /* Event data */ 342 struct kfd_event_data { 343 union { 344 /* From KFD */ 345 struct kfd_hsa_memory_exception_data memory_exception_data; 346 struct kfd_hsa_hw_exception_data hw_exception_data; 347 /* To and From KFD */ 348 struct kfd_hsa_signal_event_data signal_event_data; 349 }; 350 __u64 kfd_event_data_ext; /* pointer to an extension structure 351 for future exception types */ 352 __u32 event_id; /* to KFD */ 353 __u32 pad; 354 }; 355 356 struct kfd_ioctl_wait_events_args { 357 __u64 events_ptr; /* pointed to struct 358 kfd_event_data array, to KFD */ 359 __u32 num_events; /* to KFD */ 360 __u32 wait_for_all; /* to KFD */ 361 __u32 timeout; /* to KFD */ 362 __u32 wait_result; /* from KFD */ 363 }; 364 365 struct kfd_ioctl_set_scratch_backing_va_args { 366 __u64 va_addr; /* to KFD */ 367 __u32 gpu_id; /* to KFD */ 368 __u32 pad; 369 }; 370 371 struct kfd_ioctl_get_tile_config_args { 372 /* to KFD: pointer to tile array */ 373 __u64 tile_config_ptr; 374 /* to KFD: pointer to macro tile array */ 375 __u64 macro_tile_config_ptr; 376 /* to KFD: array size allocated by user mode 377 * from KFD: array size filled by kernel 378 */ 379 __u32 num_tile_configs; 380 /* to KFD: array size allocated by user mode 381 * from KFD: array size filled by kernel 382 */ 383 __u32 num_macro_tile_configs; 384 385 __u32 gpu_id; /* to KFD */ 386 __u32 gb_addr_config; /* from KFD */ 387 __u32 num_banks; /* from KFD */ 388 __u32 num_ranks; /* from KFD */ 389 /* struct size can be extended later if needed 390 * without breaking ABI compatibility 391 */ 392 }; 393 394 struct kfd_ioctl_set_trap_handler_args { 395 __u64 tba_addr; /* to KFD */ 396 __u64 tma_addr; /* to KFD */ 397 __u32 gpu_id; /* to KFD */ 398 __u32 pad; 399 }; 400 401 struct kfd_ioctl_acquire_vm_args { 402 __u32 drm_fd; /* to KFD */ 403 __u32 gpu_id; /* to KFD */ 404 }; 405 406 /* Allocation flags: memory types */ 407 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0) 408 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1) 409 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2) 410 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3) 411 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4) 412 /* Allocation flags: attributes/access options */ 413 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31) 414 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30) 415 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29) 416 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) 417 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) 418 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26) 419 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25) 420 #define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24) 421 #define KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS (1 << 23) 422 423 /* Allocate memory for later SVM (shared virtual memory) mapping. 424 * 425 * @va_addr: virtual address of the memory to be allocated 426 * all later mappings on all GPUs will use this address 427 * @size: size in bytes 428 * @handle: buffer handle returned to user mode, used to refer to 429 * this allocation for mapping, unmapping and freeing 430 * @mmap_offset: for CPU-mapping the allocation by mmapping a render node 431 * for userptrs this is overloaded to specify the CPU address 432 * @gpu_id: device identifier 433 * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above 434 */ 435 struct kfd_ioctl_alloc_memory_of_gpu_args { 436 __u64 va_addr; /* to KFD */ 437 __u64 size; /* to KFD */ 438 __u64 handle; /* from KFD */ 439 __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */ 440 __u32 gpu_id; /* to KFD */ 441 __u32 flags; 442 }; 443 444 /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu 445 * 446 * @handle: memory handle returned by alloc 447 */ 448 struct kfd_ioctl_free_memory_of_gpu_args { 449 __u64 handle; /* to KFD */ 450 }; 451 452 /* Map memory to one or more GPUs 453 * 454 * @handle: memory handle returned by alloc 455 * @device_ids_array_ptr: array of gpu_ids (__u32 per device) 456 * @n_devices: number of devices in the array 457 * @n_success: number of devices mapped successfully 458 * 459 * @n_success returns information to the caller how many devices from 460 * the start of the array have mapped the buffer successfully. It can 461 * be passed into a subsequent retry call to skip those devices. For 462 * the first call the caller should initialize it to 0. 463 * 464 * If the ioctl completes with return code 0 (success), n_success == 465 * n_devices. 466 */ 467 struct kfd_ioctl_map_memory_to_gpu_args { 468 __u64 handle; /* to KFD */ 469 __u64 device_ids_array_ptr; /* to KFD */ 470 __u32 n_devices; /* to KFD */ 471 __u32 n_success; /* to/from KFD */ 472 }; 473 474 /* Unmap memory from one or more GPUs 475 * 476 * same arguments as for mapping 477 */ 478 struct kfd_ioctl_unmap_memory_from_gpu_args { 479 __u64 handle; /* to KFD */ 480 __u64 device_ids_array_ptr; /* to KFD */ 481 __u32 n_devices; /* to KFD */ 482 __u32 n_success; /* to/from KFD */ 483 }; 484 485 /* Allocate GWS for specific queue 486 * 487 * @queue_id: queue's id that GWS is allocated for 488 * @num_gws: how many GWS to allocate 489 * @first_gws: index of the first GWS allocated. 490 * only support contiguous GWS allocation 491 */ 492 struct kfd_ioctl_alloc_queue_gws_args { 493 __u32 queue_id; /* to KFD */ 494 __u32 num_gws; /* to KFD */ 495 __u32 first_gws; /* from KFD */ 496 __u32 pad; 497 }; 498 499 struct kfd_ioctl_get_dmabuf_info_args { 500 __u64 size; /* from KFD */ 501 __u64 metadata_ptr; /* to KFD */ 502 __u32 metadata_size; /* to KFD (space allocated by user) 503 * from KFD (actual metadata size) 504 */ 505 __u32 gpu_id; /* from KFD */ 506 __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */ 507 __u32 dmabuf_fd; /* to KFD */ 508 }; 509 510 struct kfd_ioctl_import_dmabuf_args { 511 __u64 va_addr; /* to KFD */ 512 __u64 handle; /* from KFD */ 513 __u32 gpu_id; /* to KFD */ 514 __u32 dmabuf_fd; /* to KFD */ 515 }; 516 517 struct kfd_ioctl_export_dmabuf_args { 518 __u64 handle; /* to KFD */ 519 __u32 flags; /* to KFD */ 520 __u32 dmabuf_fd; /* from KFD */ 521 }; 522 523 /* 524 * KFD SMI(System Management Interface) events 525 */ 526 enum kfd_smi_event { 527 KFD_SMI_EVENT_NONE = 0, /* not used */ 528 KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */ 529 KFD_SMI_EVENT_THERMAL_THROTTLE = 2, 530 KFD_SMI_EVENT_GPU_PRE_RESET = 3, 531 KFD_SMI_EVENT_GPU_POST_RESET = 4, 532 KFD_SMI_EVENT_MIGRATE_START = 5, 533 KFD_SMI_EVENT_MIGRATE_END = 6, 534 KFD_SMI_EVENT_PAGE_FAULT_START = 7, 535 KFD_SMI_EVENT_PAGE_FAULT_END = 8, 536 KFD_SMI_EVENT_QUEUE_EVICTION = 9, 537 KFD_SMI_EVENT_QUEUE_RESTORE = 10, 538 KFD_SMI_EVENT_UNMAP_FROM_GPU = 11, 539 540 /* 541 * max event number, as a flag bit to get events from all processes, 542 * this requires super user permission, otherwise will not be able to 543 * receive event from any process. Without this flag to receive events 544 * from same process. 545 */ 546 KFD_SMI_EVENT_ALL_PROCESS = 64 547 }; 548 549 /* The reason of the page migration event */ 550 enum KFD_MIGRATE_TRIGGERS { 551 KFD_MIGRATE_TRIGGER_PREFETCH, /* Prefetch to GPU VRAM or system memory */ 552 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, /* GPU page fault recover */ 553 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, /* CPU page fault recover */ 554 KFD_MIGRATE_TRIGGER_TTM_EVICTION /* TTM eviction */ 555 }; 556 557 /* The reason of user queue evition event */ 558 enum KFD_QUEUE_EVICTION_TRIGGERS { 559 KFD_QUEUE_EVICTION_TRIGGER_SVM, /* SVM buffer migration */ 560 KFD_QUEUE_EVICTION_TRIGGER_USERPTR, /* userptr movement */ 561 KFD_QUEUE_EVICTION_TRIGGER_TTM, /* TTM move buffer */ 562 KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, /* GPU suspend */ 563 KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, /* CRIU checkpoint */ 564 KFD_QUEUE_EVICTION_CRIU_RESTORE /* CRIU restore */ 565 }; 566 567 /* The reason of unmap buffer from GPU event */ 568 enum KFD_SVM_UNMAP_TRIGGERS { 569 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, /* MMU notifier CPU buffer movement */ 570 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,/* MMU notifier page migration */ 571 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU /* Unmap to free the buffer */ 572 }; 573 574 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) 575 #define KFD_SMI_EVENT_MSG_SIZE 96 576 577 struct kfd_ioctl_smi_events_args { 578 __u32 gpuid; /* to KFD */ 579 __u32 anon_fd; /* from KFD */ 580 }; 581 582 /* 583 * SVM event tracing via SMI system management interface 584 * 585 * Open event file descriptor 586 * use ioctl AMDKFD_IOC_SMI_EVENTS, pass in gpuid and return a anonymous file 587 * descriptor to receive SMI events. 588 * If calling with sudo permission, then file descriptor can be used to receive 589 * SVM events from all processes, otherwise, to only receive SVM events of same 590 * process. 591 * 592 * To enable the SVM event 593 * Write event file descriptor with KFD_SMI_EVENT_MASK_FROM_INDEX(event) bitmap 594 * mask to start record the event to the kfifo, use bitmap mask combination 595 * for multiple events. New event mask will overwrite the previous event mask. 596 * KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS) bit requires sudo 597 * permisson to receive SVM events from all process. 598 * 599 * To receive the event 600 * Application can poll file descriptor to wait for the events, then read event 601 * from the file into a buffer. Each event is one line string message, starting 602 * with the event id, then the event specific information. 603 * 604 * To decode event information 605 * The following event format string macro can be used with sscanf to decode 606 * the specific event information. 607 * event triggers: the reason to generate the event, defined as enum for unmap, 608 * eviction and migrate events. 609 * node, from, to, prefetch_loc, preferred_loc: GPU ID, or 0 for system memory. 610 * addr: user mode address, in pages 611 * size: in pages 612 * pid: the process ID to generate the event 613 * ns: timestamp in nanosecond-resolution, starts at system boot time but 614 * stops during suspend 615 * migrate_update: GPU page fault is recovered by 'M' for migrate, 'U' for update 616 * rw: 'W' for write page fault, 'R' for read page fault 617 * rescheduled: 'R' if the queue restore failed and rescheduled to try again 618 * error_code: migrate failure error code, 0 if no error 619 */ 620 #define KFD_EVENT_FMT_UPDATE_GPU_RESET(reset_seq_num, reset_cause)\ 621 "%x %s\n", (reset_seq_num), (reset_cause) 622 623 #define KFD_EVENT_FMT_THERMAL_THROTTLING(bitmask, counter)\ 624 "%llx:%llx\n", (bitmask), (counter) 625 626 #define KFD_EVENT_FMT_VMFAULT(pid, task_name)\ 627 "%x:%s\n", (pid), (task_name) 628 629 #define KFD_EVENT_FMT_PAGEFAULT_START(ns, pid, addr, node, rw)\ 630 "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (rw) 631 632 #define KFD_EVENT_FMT_PAGEFAULT_END(ns, pid, addr, node, migrate_update)\ 633 "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (migrate_update) 634 635 #define KFD_EVENT_FMT_MIGRATE_START(ns, pid, start, size, from, to, prefetch_loc,\ 636 preferred_loc, migrate_trigger)\ 637 "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", (ns), (pid), (start), (size),\ 638 (from), (to), (prefetch_loc), (preferred_loc), (migrate_trigger) 639 640 #define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger, error_code) \ 641 "%lld -%d @%lx(%lx) %x->%x %d %d\n", (ns), (pid), (start), (size),\ 642 (from), (to), (migrate_trigger), (error_code) 643 644 #define KFD_EVENT_FMT_QUEUE_EVICTION(ns, pid, node, evict_trigger)\ 645 "%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger) 646 647 #define KFD_EVENT_FMT_QUEUE_RESTORE(ns, pid, node, rescheduled)\ 648 "%lld -%d %x %c\n", (ns), (pid), (node), (rescheduled) 649 650 #define KFD_EVENT_FMT_UNMAP_FROM_GPU(ns, pid, addr, size, node, unmap_trigger)\ 651 "%lld -%d @%lx(%lx) %x %d\n", (ns), (pid), (addr), (size),\ 652 (node), (unmap_trigger) 653 654 /************************************************************************************************** 655 * CRIU IOCTLs (Checkpoint Restore In Userspace) 656 * 657 * When checkpointing a process, the userspace application will perform: 658 * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts 659 * all the queues. 660 * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges) 661 * 3. UNPAUSE op to un-evict all the queues 662 * 663 * When restoring a process, the CRIU userspace application will perform: 664 * 665 * 1. RESTORE op to restore process contents 666 * 2. RESUME op to start the process 667 * 668 * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User 669 * application needs to perform an UNPAUSE operation after calling PROCESS_INFO. 670 */ 671 672 enum kfd_criu_op { 673 KFD_CRIU_OP_PROCESS_INFO, 674 KFD_CRIU_OP_CHECKPOINT, 675 KFD_CRIU_OP_UNPAUSE, 676 KFD_CRIU_OP_RESTORE, 677 KFD_CRIU_OP_RESUME, 678 }; 679 680 /** 681 * kfd_ioctl_criu_args - Arguments perform CRIU operation 682 * @devices: [in/out] User pointer to memory location for devices information. 683 * This is an array of type kfd_criu_device_bucket. 684 * @bos: [in/out] User pointer to memory location for BOs information 685 * This is an array of type kfd_criu_bo_bucket. 686 * @priv_data: [in/out] User pointer to memory location for private data 687 * @priv_data_size: [in/out] Size of priv_data in bytes 688 * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array. 689 * @num_bos [in/out] Number of BOs used by process. Size of @bos array. 690 * @num_objects: [in/out] Number of objects used by process. Objects are opaque to 691 * user application. 692 * @pid: [in/out] PID of the process being checkpointed 693 * @op [in] Type of operation (kfd_criu_op) 694 * 695 * Return: 0 on success, -errno on failure 696 */ 697 struct kfd_ioctl_criu_args { 698 __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */ 699 __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */ 700 __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */ 701 __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */ 702 __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */ 703 __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */ 704 __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */ 705 __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */ 706 __u32 op; 707 }; 708 709 struct kfd_criu_device_bucket { 710 __u32 user_gpu_id; 711 __u32 actual_gpu_id; 712 __u32 drm_fd; 713 __u32 pad; 714 }; 715 716 struct kfd_criu_bo_bucket { 717 __u64 addr; 718 __u64 size; 719 __u64 offset; 720 __u64 restored_offset; /* During restore, updated offset for BO */ 721 __u32 gpu_id; /* This is the user_gpu_id */ 722 __u32 alloc_flags; 723 __u32 dmabuf_fd; 724 __u32 pad; 725 }; 726 727 /* CRIU IOCTLs - END */ 728 /**************************************************************************************************/ 729 730 /* Register offset inside the remapped mmio page 731 */ 732 enum kfd_mmio_remap { 733 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0, 734 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4, 735 }; 736 737 /* Guarantee host access to memory */ 738 #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001 739 /* Fine grained coherency between all devices with access */ 740 #define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002 741 /* Use any GPU in same hive as preferred device */ 742 #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004 743 /* GPUs only read, allows replication */ 744 #define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008 745 /* Allow execution on GPU */ 746 #define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010 747 /* GPUs mostly read, may allow similar optimizations as RO, but writes fault */ 748 #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 749 /* Keep GPU memory mapping always valid as if XNACK is disable */ 750 #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 751 /* Fine grained coherency between all devices using device-scope atomics */ 752 #define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080 753 754 /** 755 * kfd_ioctl_svm_op - SVM ioctl operations 756 * 757 * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes 758 * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes 759 */ 760 enum kfd_ioctl_svm_op { 761 KFD_IOCTL_SVM_OP_SET_ATTR, 762 KFD_IOCTL_SVM_OP_GET_ATTR 763 }; 764 765 /** kfd_ioctl_svm_location - Enum for preferred and prefetch locations 766 * 767 * GPU IDs are used to specify GPUs as preferred and prefetch locations. 768 * Below definitions are used for system memory or for leaving the preferred 769 * location unspecified. 770 */ 771 enum kfd_ioctl_svm_location { 772 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0, 773 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff 774 }; 775 776 /** 777 * kfd_ioctl_svm_attr_type - SVM attribute types 778 * 779 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for 780 * system memory 781 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for 782 * system memory. Setting this triggers an 783 * immediate prefetch (migration). 784 * @KFD_IOCTL_SVM_ATTR_ACCESS: 785 * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 786 * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given 787 * by the attribute value 788 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see 789 * KFD_IOCTL_SVM_FLAG_...) 790 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear 791 * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity 792 * (log2 num pages) 793 */ 794 enum kfd_ioctl_svm_attr_type { 795 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC, 796 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC, 797 KFD_IOCTL_SVM_ATTR_ACCESS, 798 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE, 799 KFD_IOCTL_SVM_ATTR_NO_ACCESS, 800 KFD_IOCTL_SVM_ATTR_SET_FLAGS, 801 KFD_IOCTL_SVM_ATTR_CLR_FLAGS, 802 KFD_IOCTL_SVM_ATTR_GRANULARITY 803 }; 804 805 /** 806 * kfd_ioctl_svm_attribute - Attributes as pairs of type and value 807 * 808 * The meaning of the @value depends on the attribute type. 809 * 810 * @type: attribute type (see enum @kfd_ioctl_svm_attr_type) 811 * @value: attribute value 812 */ 813 struct kfd_ioctl_svm_attribute { 814 __u32 type; 815 __u32 value; 816 }; 817 818 /** 819 * kfd_ioctl_svm_args - Arguments for SVM ioctl 820 * 821 * @op specifies the operation to perform (see enum 822 * @kfd_ioctl_svm_op). @start_addr and @size are common for all 823 * operations. 824 * 825 * A variable number of attributes can be given in @attrs. 826 * @nattr specifies the number of attributes. New attributes can be 827 * added in the future without breaking the ABI. If unknown attributes 828 * are given, the function returns -EINVAL. 829 * 830 * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address 831 * range. It may overlap existing virtual address ranges. If it does, 832 * the existing ranges will be split such that the attribute changes 833 * only apply to the specified address range. 834 * 835 * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes 836 * over all memory in the given range and returns the result as the 837 * attribute value. If different pages have different preferred or 838 * prefetch locations, 0xffffffff will be returned for 839 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or 840 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For 841 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be 842 * aggregated by bitwise AND. That means, a flag will be set in the 843 * output, if that flag is set for all pages in the range. For 844 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be 845 * aggregated by bitwise NOR. That means, a flag will be set in the 846 * output, if that flag is clear for all pages in the range. 847 * The minimum migration granularity throughout the range will be 848 * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY. 849 * 850 * Querying of accessibility attributes works by initializing the 851 * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the 852 * GPUID being queried. Multiple attributes can be given to allow 853 * querying multiple GPUIDs. The ioctl function overwrites the 854 * attribute type to indicate the access for the specified GPU. 855 */ 856 struct kfd_ioctl_svm_args { 857 __u64 start_addr; 858 __u64 size; 859 __u32 op; 860 __u32 nattr; 861 /* Variable length array of attributes */ 862 struct kfd_ioctl_svm_attribute attrs[]; 863 }; 864 865 /** 866 * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode 867 * 868 * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process 869 * 870 * @xnack_enabled indicates whether recoverable page faults should be 871 * enabled for the current process. 0 means disabled, positive means 872 * enabled, negative means leave unchanged. If enabled, virtual address 873 * translations on GFXv9 and later AMD GPUs can return XNACK and retry 874 * the access until a valid PTE is available. This is used to implement 875 * device page faults. 876 * 877 * On output, @xnack_enabled returns the (new) current mode (0 or 878 * positive). Therefore, a negative input value can be used to query 879 * the current mode without changing it. 880 * 881 * The XNACK mode fundamentally changes the way SVM managed memory works 882 * in the driver, with subtle effects on application performance and 883 * functionality. 884 * 885 * Enabling XNACK mode requires shader programs to be compiled 886 * differently. Furthermore, not all GPUs support changing the mode 887 * per-process. Therefore changing the mode is only allowed while no 888 * user mode queues exist in the process. This ensure that no shader 889 * code is running that may be compiled for the wrong mode. And GPUs 890 * that cannot change to the requested mode will prevent the XNACK 891 * mode from occurring. All GPUs used by the process must be in the 892 * same XNACK mode. 893 * 894 * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM. 895 * Therefore those GPUs are not considered for the XNACK mode switch. 896 * 897 * Return: 0 on success, -errno on failure 898 */ 899 struct kfd_ioctl_set_xnack_mode_args { 900 __s32 xnack_enabled; 901 }; 902 903 /* Wave launch override modes */ 904 enum kfd_dbg_trap_override_mode { 905 KFD_DBG_TRAP_OVERRIDE_OR = 0, 906 KFD_DBG_TRAP_OVERRIDE_REPLACE = 1 907 }; 908 909 /* Wave launch overrides */ 910 enum kfd_dbg_trap_mask { 911 KFD_DBG_TRAP_MASK_FP_INVALID = 1, 912 KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2, 913 KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4, 914 KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8, 915 KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16, 916 KFD_DBG_TRAP_MASK_FP_INEXACT = 32, 917 KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64, 918 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128, 919 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256, 920 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30), 921 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31) 922 }; 923 924 /* Wave launch modes */ 925 enum kfd_dbg_trap_wave_launch_mode { 926 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0, 927 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1, 928 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3 929 }; 930 931 /* Address watch modes */ 932 enum kfd_dbg_trap_address_watch_mode { 933 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0, 934 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1, 935 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2, 936 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3 937 }; 938 939 /* Additional wave settings */ 940 enum kfd_dbg_trap_flags { 941 KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1, 942 KFD_DBG_TRAP_FLAG_SINGLE_ALU_OP = 2, 943 }; 944 945 /* Trap exceptions */ 946 enum kfd_dbg_trap_exception_code { 947 EC_NONE = 0, 948 /* per queue */ 949 EC_QUEUE_WAVE_ABORT = 1, 950 EC_QUEUE_WAVE_TRAP = 2, 951 EC_QUEUE_WAVE_MATH_ERROR = 3, 952 EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4, 953 EC_QUEUE_WAVE_MEMORY_VIOLATION = 5, 954 EC_QUEUE_WAVE_APERTURE_VIOLATION = 6, 955 EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16, 956 EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17, 957 EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18, 958 EC_QUEUE_PACKET_RESERVED = 19, 959 EC_QUEUE_PACKET_UNSUPPORTED = 20, 960 EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21, 961 EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22, 962 EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23, 963 EC_QUEUE_PREEMPTION_ERROR = 30, 964 EC_QUEUE_NEW = 31, 965 /* per device */ 966 EC_DEVICE_QUEUE_DELETE = 32, 967 EC_DEVICE_MEMORY_VIOLATION = 33, 968 EC_DEVICE_RAS_ERROR = 34, 969 EC_DEVICE_FATAL_HALT = 35, 970 EC_DEVICE_NEW = 36, 971 /* per process */ 972 EC_PROCESS_RUNTIME = 48, 973 EC_PROCESS_DEVICE_REMOVE = 49, 974 EC_MAX 975 }; 976 977 /* Mask generated by ecode in kfd_dbg_trap_exception_code */ 978 #define KFD_EC_MASK(ecode) (1ULL << (ecode - 1)) 979 980 /* Masks for exception code type checks below */ 981 #define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \ 982 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \ 983 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \ 984 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \ 985 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \ 986 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \ 987 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \ 988 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \ 989 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \ 990 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \ 991 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \ 992 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \ 993 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \ 994 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \ 995 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \ 996 KFD_EC_MASK(EC_QUEUE_NEW)) 997 #define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \ 998 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \ 999 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \ 1000 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \ 1001 KFD_EC_MASK(EC_DEVICE_NEW)) 1002 #define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \ 1003 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE)) 1004 #define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \ 1005 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \ 1006 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \ 1007 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \ 1008 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \ 1009 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \ 1010 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \ 1011 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED)) 1012 1013 /* Checks for exception code types for KFD search */ 1014 #define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX) 1015 #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \ 1016 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE)) 1017 #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \ 1018 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE)) 1019 #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \ 1020 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS)) 1021 #define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \ 1022 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET)) 1023 1024 1025 /* Runtime enable states */ 1026 enum kfd_dbg_runtime_state { 1027 DEBUG_RUNTIME_STATE_DISABLED = 0, 1028 DEBUG_RUNTIME_STATE_ENABLED = 1, 1029 DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2, 1030 DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3 1031 }; 1032 1033 /* Runtime enable status */ 1034 struct kfd_runtime_info { 1035 __u64 r_debug; 1036 __u32 runtime_state; 1037 __u32 ttmp_setup; 1038 }; 1039 1040 /* Enable modes for runtime enable */ 1041 #define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1 1042 #define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2 1043 1044 /** 1045 * kfd_ioctl_runtime_enable_args - Arguments for runtime enable 1046 * 1047 * Coordinates debug exception signalling and debug device enablement with runtime. 1048 * 1049 * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger 1050 * @mode_mask - mask to set mode 1051 * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable 1052 * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable) 1053 * @capabilities_mask - mask to notify runtime on what KFD supports 1054 * 1055 * Return - 0 on SUCCESS. 1056 * - EBUSY if runtime enable call already pending. 1057 * - EEXIST if user queues already active prior to call. 1058 * If process is debug enabled, runtime enable will enable debug devices and 1059 * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME 1060 * to unblock - see kfd_ioctl_dbg_trap_args. 1061 * 1062 */ 1063 struct kfd_ioctl_runtime_enable_args { 1064 __u64 r_debug; 1065 __u32 mode_mask; 1066 __u32 capabilities_mask; 1067 }; 1068 1069 /* Queue information */ 1070 struct kfd_queue_snapshot_entry { 1071 __u64 exception_status; 1072 __u64 ring_base_address; 1073 __u64 write_pointer_address; 1074 __u64 read_pointer_address; 1075 __u64 ctx_save_restore_address; 1076 __u32 queue_id; 1077 __u32 gpu_id; 1078 __u32 ring_size; 1079 __u32 queue_type; 1080 __u32 ctx_save_restore_area_size; 1081 __u32 reserved; 1082 }; 1083 1084 /* Queue status return for suspend/resume */ 1085 #define KFD_DBG_QUEUE_ERROR_BIT 30 1086 #define KFD_DBG_QUEUE_INVALID_BIT 31 1087 #define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT) 1088 #define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT) 1089 1090 /* Context save area header information */ 1091 struct kfd_context_save_area_header { 1092 struct { 1093 __u32 control_stack_offset; 1094 __u32 control_stack_size; 1095 __u32 wave_state_offset; 1096 __u32 wave_state_size; 1097 } wave_state; 1098 __u32 debug_offset; 1099 __u32 debug_size; 1100 __u64 err_payload_addr; 1101 __u32 err_event_id; 1102 __u32 reserved1; 1103 }; 1104 1105 /* 1106 * Debug operations 1107 * 1108 * For specifics on usage and return values, see documentation per operation 1109 * below. Otherwise, generic error returns apply: 1110 * - ESRCH if the process to debug does not exist. 1111 * 1112 * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation 1113 * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior. 1114 * Also returns this error if GPU hardware scheduling is not supported. 1115 * 1116 * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not 1117 * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow 1118 * clean up of debug mode as long as process is debug enabled. 1119 * 1120 * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when 1121 * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior. 1122 * 1123 * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call. 1124 * 1125 * - Other errors may be returned when a DBG_HW_OP occurs while the GPU 1126 * is in a fatal state. 1127 * 1128 */ 1129 enum kfd_dbg_trap_operations { 1130 KFD_IOC_DBG_TRAP_ENABLE = 0, 1131 KFD_IOC_DBG_TRAP_DISABLE = 1, 1132 KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2, 1133 KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3, 1134 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */ 1135 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */ 1136 KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */ 1137 KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */ 1138 KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */ 1139 KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */ 1140 KFD_IOC_DBG_TRAP_SET_FLAGS = 10, 1141 KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11, 1142 KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12, 1143 KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13, 1144 KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14 1145 }; 1146 1147 /** 1148 * kfd_ioctl_dbg_trap_enable_args 1149 * 1150 * Arguments for KFD_IOC_DBG_TRAP_ENABLE. 1151 * 1152 * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in 1153 * kfd_ioctl_dbg_trap_args to disable debug session. 1154 * 1155 * @exception_mask (IN) - exceptions to raise to the debugger 1156 * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info) 1157 * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes 1158 * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised 1159 * exceptions set in exception_mask. 1160 * 1161 * Generic errors apply (see kfd_dbg_trap_operations). 1162 * Return - 0 on SUCCESS. 1163 * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable. 1164 * Size of kfd_runtime saved by the KFD returned to @rinfo_size. 1165 * - EBADF if KFD cannot get a reference to dbg_fd. 1166 * - EFAULT if KFD cannot copy runtime info to rinfo_ptr. 1167 * - EINVAL if target process is already debug enabled. 1168 * 1169 */ 1170 struct kfd_ioctl_dbg_trap_enable_args { 1171 __u64 exception_mask; 1172 __u64 rinfo_ptr; 1173 __u32 rinfo_size; 1174 __u32 dbg_fd; 1175 }; 1176 1177 /** 1178 * kfd_ioctl_dbg_trap_send_runtime_event_args 1179 * 1180 * 1181 * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT. 1182 * Raises exceptions to runtime. 1183 * 1184 * @exception_mask (IN) - exceptions to raise to runtime 1185 * @gpu_id (IN) - target device id 1186 * @queue_id (IN) - target queue id 1187 * 1188 * Generic errors apply (see kfd_dbg_trap_operations). 1189 * Return - 0 on SUCCESS. 1190 * - ENODEV if gpu_id not found. 1191 * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending 1192 * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args. 1193 * All other exceptions are raised to runtime through err_payload_addr. 1194 * See kfd_context_save_area_header. 1195 */ 1196 struct kfd_ioctl_dbg_trap_send_runtime_event_args { 1197 __u64 exception_mask; 1198 __u32 gpu_id; 1199 __u32 queue_id; 1200 }; 1201 1202 /** 1203 * kfd_ioctl_dbg_trap_set_exceptions_enabled_args 1204 * 1205 * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED 1206 * Set new exceptions to be raised to the debugger. 1207 * 1208 * @exception_mask (IN) - new exceptions to raise the debugger 1209 * 1210 * Generic errors apply (see kfd_dbg_trap_operations). 1211 * Return - 0 on SUCCESS. 1212 */ 1213 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args { 1214 __u64 exception_mask; 1215 }; 1216 1217 /** 1218 * kfd_ioctl_dbg_trap_set_wave_launch_override_args 1219 * 1220 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE 1221 * Enable HW exceptions to raise trap. 1222 * 1223 * @override_mode (IN) - see kfd_dbg_trap_override_mode 1224 * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask. 1225 * IN is the override modes requested to be enabled. 1226 * OUT is referenced in Return below. 1227 * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask. 1228 * IN is the override modes requested for support check. 1229 * OUT is referenced in Return below. 1230 * 1231 * Generic errors apply (see kfd_dbg_trap_operations). 1232 * Return - 0 on SUCCESS. 1233 * Previous enablement is returned in @enable_mask. 1234 * Actual override support is returned in @support_request_mask. 1235 * - EINVAL if override mode is not supported. 1236 * - EACCES if trap support requested is not actually supported. 1237 * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT). 1238 * Otherwise it is considered a generic error (see kfd_dbg_trap_operations). 1239 */ 1240 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args { 1241 __u32 override_mode; 1242 __u32 enable_mask; 1243 __u32 support_request_mask; 1244 __u32 pad; 1245 }; 1246 1247 /** 1248 * kfd_ioctl_dbg_trap_set_wave_launch_mode_args 1249 * 1250 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE 1251 * Set wave launch mode. 1252 * 1253 * @mode (IN) - see kfd_dbg_trap_wave_launch_mode 1254 * 1255 * Generic errors apply (see kfd_dbg_trap_operations). 1256 * Return - 0 on SUCCESS. 1257 */ 1258 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args { 1259 __u32 launch_mode; 1260 __u32 pad; 1261 }; 1262 1263 /** 1264 * kfd_ioctl_dbg_trap_suspend_queues_ags 1265 * 1266 * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES 1267 * Suspend queues. 1268 * 1269 * @exception_mask (IN) - raised exceptions to clear 1270 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) 1271 * to suspend 1272 * @num_queues (IN) - number of queues to suspend in @queue_array_ptr 1273 * @grace_period (IN) - wave time allowance before preemption 1274 * per 1K GPU clock cycle unit 1275 * 1276 * Generic errors apply (see kfd_dbg_trap_operations). 1277 * Destruction of a suspended queue is blocked until the queue is 1278 * resumed. This allows the debugger to access queue information and 1279 * the its context save area without running into a race condition on 1280 * queue destruction. 1281 * Automatically copies per queue context save area header information 1282 * into the save area base 1283 * (see kfd_queue_snapshot_entry and kfd_context_save_area_header). 1284 * 1285 * Return - Number of queues suspended on SUCCESS. 1286 * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked 1287 * for each queue id in @queue_array_ptr array reports unsuccessful 1288 * suspend reason. 1289 * KFD_DBG_QUEUE_ERROR_MASK = HW failure. 1290 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or 1291 * is being destroyed. 1292 */ 1293 struct kfd_ioctl_dbg_trap_suspend_queues_args { 1294 __u64 exception_mask; 1295 __u64 queue_array_ptr; 1296 __u32 num_queues; 1297 __u32 grace_period; 1298 }; 1299 1300 /** 1301 * kfd_ioctl_dbg_trap_resume_queues_args 1302 * 1303 * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES 1304 * Resume queues. 1305 * 1306 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) 1307 * to resume 1308 * @num_queues (IN) - number of queues to resume in @queue_array_ptr 1309 * 1310 * Generic errors apply (see kfd_dbg_trap_operations). 1311 * Return - Number of queues resumed on SUCCESS. 1312 * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask 1313 * for each queue id in @queue_array_ptr array reports unsuccessful 1314 * resume reason. 1315 * KFD_DBG_QUEUE_ERROR_MASK = HW failure. 1316 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist. 1317 */ 1318 struct kfd_ioctl_dbg_trap_resume_queues_args { 1319 __u64 queue_array_ptr; 1320 __u32 num_queues; 1321 __u32 pad; 1322 }; 1323 1324 /** 1325 * kfd_ioctl_dbg_trap_set_node_address_watch_args 1326 * 1327 * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH 1328 * Sets address watch for device. 1329 * 1330 * @address (IN) - watch address to set 1331 * @mode (IN) - see kfd_dbg_trap_address_watch_mode 1332 * @mask (IN) - watch address mask 1333 * @gpu_id (IN) - target gpu to set watch point 1334 * @id (OUT) - watch id allocated 1335 * 1336 * Generic errors apply (see kfd_dbg_trap_operations). 1337 * Return - 0 on SUCCESS. 1338 * Allocated watch ID returned to @id. 1339 * - ENODEV if gpu_id not found. 1340 * - ENOMEM if watch IDs can be allocated 1341 */ 1342 struct kfd_ioctl_dbg_trap_set_node_address_watch_args { 1343 __u64 address; 1344 __u32 mode; 1345 __u32 mask; 1346 __u32 gpu_id; 1347 __u32 id; 1348 }; 1349 1350 /** 1351 * kfd_ioctl_dbg_trap_clear_node_address_watch_args 1352 * 1353 * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH 1354 * Clear address watch for device. 1355 * 1356 * @gpu_id (IN) - target device to clear watch point 1357 * @id (IN) - allocated watch id to clear 1358 * 1359 * Generic errors apply (see kfd_dbg_trap_operations). 1360 * Return - 0 on SUCCESS. 1361 * - ENODEV if gpu_id not found. 1362 * - EINVAL if watch ID has not been allocated. 1363 */ 1364 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args { 1365 __u32 gpu_id; 1366 __u32 id; 1367 }; 1368 1369 /** 1370 * kfd_ioctl_dbg_trap_set_flags_args 1371 * 1372 * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS 1373 * Sets flags for wave behaviour. 1374 * 1375 * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled 1376 * 1377 * Generic errors apply (see kfd_dbg_trap_operations). 1378 * Return - 0 on SUCCESS. 1379 * - EACCESS if any debug device does not allow flag options. 1380 */ 1381 struct kfd_ioctl_dbg_trap_set_flags_args { 1382 __u32 flags; 1383 __u32 pad; 1384 }; 1385 1386 /** 1387 * kfd_ioctl_dbg_trap_query_debug_event_args 1388 * 1389 * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT 1390 * 1391 * Find one or more raised exceptions. This function can return multiple 1392 * exceptions from a single queue or a single device with one call. To find 1393 * all raised exceptions, this function must be called repeatedly until it 1394 * returns -EAGAIN. Returned exceptions can optionally be cleared by 1395 * setting the corresponding bit in the @exception_mask input parameter. 1396 * However, clearing an exception prevents retrieving further information 1397 * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO. 1398 * 1399 * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT) 1400 * @gpu_id (OUT) - gpu id of exceptions raised 1401 * @queue_id (OUT) - queue id of exceptions raised 1402 * 1403 * Generic errors apply (see kfd_dbg_trap_operations). 1404 * Return - 0 on raised exception found 1405 * Raised exceptions found are returned in @exception mask 1406 * with reported source id returned in @gpu_id or @queue_id. 1407 * - EAGAIN if no raised exception has been found 1408 */ 1409 struct kfd_ioctl_dbg_trap_query_debug_event_args { 1410 __u64 exception_mask; 1411 __u32 gpu_id; 1412 __u32 queue_id; 1413 }; 1414 1415 /** 1416 * kfd_ioctl_dbg_trap_query_exception_info_args 1417 * 1418 * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO 1419 * Get additional info on raised exception. 1420 * 1421 * @info_ptr (IN) - pointer to exception info buffer to copy to 1422 * @info_size (IN/OUT) - exception info buffer size (bytes) 1423 * @source_id (IN) - target gpu or queue id 1424 * @exception_code (IN) - target exception 1425 * @clear_exception (IN) - clear raised @exception_code exception 1426 * (0 = false, 1 = true) 1427 * 1428 * Generic errors apply (see kfd_dbg_trap_operations). 1429 * Return - 0 on SUCCESS. 1430 * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT) 1431 * bytes of memory exception data to @info_ptr. 1432 * If @exception_code is EC_PROCESS_RUNTIME, copy saved 1433 * kfd_runtime_info to @info_ptr. 1434 * Actual required @info_ptr size (bytes) is returned in @info_size. 1435 */ 1436 struct kfd_ioctl_dbg_trap_query_exception_info_args { 1437 __u64 info_ptr; 1438 __u32 info_size; 1439 __u32 source_id; 1440 __u32 exception_code; 1441 __u32 clear_exception; 1442 }; 1443 1444 /** 1445 * kfd_ioctl_dbg_trap_get_queue_snapshot_args 1446 * 1447 * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT 1448 * Get queue information. 1449 * 1450 * @exception_mask (IN) - exceptions raised to clear 1451 * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry) 1452 * @num_queues (IN/OUT) - number of queue snapshot entries 1453 * The debugger specifies the size of the array allocated in @num_queues. 1454 * KFD returns the number of queues that actually existed. If this is 1455 * larger than the size specified by the debugger, KFD will not overflow 1456 * the array allocated by the debugger. 1457 * 1458 * @entry_size (IN/OUT) - size per entry in bytes 1459 * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in 1460 * @entry_size. KFD returns the number of bytes actually populated per 1461 * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine, 1462 * which fields in struct kfd_queue_snapshot_entry are valid. This allows 1463 * growing the ABI in a backwards compatible manner. 1464 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the 1465 * event that it's larger than actual kfd_queue_snapshot_entry. 1466 * 1467 * Generic errors apply (see kfd_dbg_trap_operations). 1468 * Return - 0 on SUCCESS. 1469 * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN) 1470 * into @snapshot_buf_ptr if @num_queues(IN) > 0. 1471 * Otherwise return @num_queues(OUT) queue snapshot entries that exist. 1472 */ 1473 struct kfd_ioctl_dbg_trap_queue_snapshot_args { 1474 __u64 exception_mask; 1475 __u64 snapshot_buf_ptr; 1476 __u32 num_queues; 1477 __u32 entry_size; 1478 }; 1479 1480 /** 1481 * kfd_ioctl_dbg_trap_get_device_snapshot_args 1482 * 1483 * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT 1484 * Get device information. 1485 * 1486 * @exception_mask (IN) - exceptions raised to clear 1487 * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry) 1488 * @num_devices (IN/OUT) - number of debug devices to snapshot 1489 * The debugger specifies the size of the array allocated in @num_devices. 1490 * KFD returns the number of devices that actually existed. If this is 1491 * larger than the size specified by the debugger, KFD will not overflow 1492 * the array allocated by the debugger. 1493 * 1494 * @entry_size (IN/OUT) - size per entry in bytes 1495 * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in 1496 * @entry_size. KFD returns the number of bytes actually populated. The 1497 * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields 1498 * in struct kfd_dbg_device_info_entry are valid. This allows growing the 1499 * ABI in a backwards compatible manner. 1500 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the 1501 * event that it's larger than actual kfd_dbg_device_info_entry. 1502 * 1503 * Generic errors apply (see kfd_dbg_trap_operations). 1504 * Return - 0 on SUCCESS. 1505 * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN) 1506 * into @snapshot_buf_ptr if @num_devices(IN) > 0. 1507 * Otherwise return @num_devices(OUT) queue snapshot entries that exist. 1508 */ 1509 struct kfd_ioctl_dbg_trap_device_snapshot_args { 1510 __u64 exception_mask; 1511 __u64 snapshot_buf_ptr; 1512 __u32 num_devices; 1513 __u32 entry_size; 1514 }; 1515 1516 /** 1517 * kfd_ioctl_dbg_trap_args 1518 * 1519 * Arguments to debug target process. 1520 * 1521 * @pid - target process to debug 1522 * @op - debug operation (see kfd_dbg_trap_operations) 1523 * 1524 * @op determines which union struct args to use. 1525 * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct. 1526 */ 1527 struct kfd_ioctl_dbg_trap_args { 1528 __u32 pid; 1529 __u32 op; 1530 1531 union { 1532 struct kfd_ioctl_dbg_trap_enable_args enable; 1533 struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event; 1534 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled; 1535 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override; 1536 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode; 1537 struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues; 1538 struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues; 1539 struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch; 1540 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch; 1541 struct kfd_ioctl_dbg_trap_set_flags_args set_flags; 1542 struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event; 1543 struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info; 1544 struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot; 1545 struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot; 1546 }; 1547 }; 1548 1549 #define AMDKFD_IOCTL_BASE 'K' 1550 #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) 1551 #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) 1552 #define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type) 1553 #define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type) 1554 1555 #define AMDKFD_IOC_GET_VERSION \ 1556 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args) 1557 1558 #define AMDKFD_IOC_CREATE_QUEUE \ 1559 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args) 1560 1561 #define AMDKFD_IOC_DESTROY_QUEUE \ 1562 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args) 1563 1564 #define AMDKFD_IOC_SET_MEMORY_POLICY \ 1565 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args) 1566 1567 #define AMDKFD_IOC_GET_CLOCK_COUNTERS \ 1568 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args) 1569 1570 #define AMDKFD_IOC_GET_PROCESS_APERTURES \ 1571 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args) 1572 1573 #define AMDKFD_IOC_UPDATE_QUEUE \ 1574 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args) 1575 1576 #define AMDKFD_IOC_CREATE_EVENT \ 1577 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args) 1578 1579 #define AMDKFD_IOC_DESTROY_EVENT \ 1580 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args) 1581 1582 #define AMDKFD_IOC_SET_EVENT \ 1583 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args) 1584 1585 #define AMDKFD_IOC_RESET_EVENT \ 1586 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args) 1587 1588 #define AMDKFD_IOC_WAIT_EVENTS \ 1589 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args) 1590 1591 #define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \ 1592 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args) 1593 1594 #define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \ 1595 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args) 1596 1597 #define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \ 1598 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args) 1599 1600 #define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \ 1601 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args) 1602 1603 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \ 1604 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args) 1605 1606 #define AMDKFD_IOC_GET_TILE_CONFIG \ 1607 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args) 1608 1609 #define AMDKFD_IOC_SET_TRAP_HANDLER \ 1610 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args) 1611 1612 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \ 1613 AMDKFD_IOWR(0x14, \ 1614 struct kfd_ioctl_get_process_apertures_new_args) 1615 1616 #define AMDKFD_IOC_ACQUIRE_VM \ 1617 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args) 1618 1619 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \ 1620 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args) 1621 1622 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU \ 1623 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args) 1624 1625 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU \ 1626 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args) 1627 1628 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \ 1629 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args) 1630 1631 #define AMDKFD_IOC_SET_CU_MASK \ 1632 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args) 1633 1634 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \ 1635 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args) 1636 1637 #define AMDKFD_IOC_GET_DMABUF_INFO \ 1638 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args) 1639 1640 #define AMDKFD_IOC_IMPORT_DMABUF \ 1641 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args) 1642 1643 #define AMDKFD_IOC_ALLOC_QUEUE_GWS \ 1644 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args) 1645 1646 #define AMDKFD_IOC_SMI_EVENTS \ 1647 AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args) 1648 1649 #define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args) 1650 1651 #define AMDKFD_IOC_SET_XNACK_MODE \ 1652 AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args) 1653 1654 #define AMDKFD_IOC_CRIU_OP \ 1655 AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args) 1656 1657 #define AMDKFD_IOC_AVAILABLE_MEMORY \ 1658 AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args) 1659 1660 #define AMDKFD_IOC_EXPORT_DMABUF \ 1661 AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args) 1662 1663 #define AMDKFD_IOC_RUNTIME_ENABLE \ 1664 AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args) 1665 1666 #define AMDKFD_IOC_DBG_TRAP \ 1667 AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args) 1668 1669 #define AMDKFD_COMMAND_START 0x01 1670 #define AMDKFD_COMMAND_END 0x27 1671 1672 #endif 1673