xref: /linux/include/uapi/linux/kfd_ioctl.h (revision a30a7a29c35ef9d90bdec86d3051c32f47d6041f)
1b7facbaeSOded Gabbay /*
2b7facbaeSOded Gabbay  * Copyright 2014 Advanced Micro Devices, Inc.
3b7facbaeSOded Gabbay  *
4b7facbaeSOded Gabbay  * Permission is hereby granted, free of charge, to any person obtaining a
5b7facbaeSOded Gabbay  * copy of this software and associated documentation files (the "Software"),
6b7facbaeSOded Gabbay  * to deal in the Software without restriction, including without limitation
7b7facbaeSOded Gabbay  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b7facbaeSOded Gabbay  * and/or sell copies of the Software, and to permit persons to whom the
9b7facbaeSOded Gabbay  * Software is furnished to do so, subject to the following conditions:
10b7facbaeSOded Gabbay  *
11b7facbaeSOded Gabbay  * The above copyright notice and this permission notice shall be included in
12b7facbaeSOded Gabbay  * all copies or substantial portions of the Software.
13b7facbaeSOded Gabbay  *
14b7facbaeSOded Gabbay  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15b7facbaeSOded Gabbay  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16b7facbaeSOded Gabbay  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17b7facbaeSOded Gabbay  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18b7facbaeSOded Gabbay  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19b7facbaeSOded Gabbay  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20b7facbaeSOded Gabbay  * OTHER DEALINGS IN THE SOFTWARE.
21b7facbaeSOded Gabbay  */
22b7facbaeSOded Gabbay 
23b7facbaeSOded Gabbay #ifndef KFD_IOCTL_H_INCLUDED
24b7facbaeSOded Gabbay #define KFD_IOCTL_H_INCLUDED
25b7facbaeSOded Gabbay 
26472b46c3SMikko Rapeli #include <drm/drm.h>
27b7facbaeSOded Gabbay #include <linux/ioctl.h>
28b7facbaeSOded Gabbay 
2991e2c191SAmber Lin /*
3091e2c191SAmber Lin  * - 1.1 - initial version
3191e2c191SAmber Lin  * - 1.3 - Add SMI events support
328c44390dSFelix Kuehling  * - 1.4 - Indicate new SRAM EDC bit in device properties
3340ce74d1SPhilip Yang  * - 1.5 - Add SVM API
34a43e2a0eSFelix Kuehling  * - 1.6 - Query clear flags in SVM get_attr API
35692996f2SRajneesh Bhardwaj  * - 1.7 - Checkpoint Restore (CRIU) API
3665722ff6SDavid Yat Sin  * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs
379731dd4cSDaniel Phillips  * - 1.9 - Add available memory ioctl
383db7f894SPhilip Yang  * - 1.10 - Add SMI profiler event log
3950e8ca0bSEric Huang  * - 1.11 - Add unified memory for ctx save/restore area
40fd234e75SFelix Kuehling  * - 1.12 - Add DMA buf export ioctl
41a159afdaSJonathan Kim  * - 1.13 - Add debugger API
42d297eedfSJames Zhu  * - 1.14 - Update kfd_event_data
434cabb217SFelix Kuehling  * - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
4491e2c191SAmber Lin  */
45b7facbaeSOded Gabbay #define KFD_IOCTL_MAJOR_VERSION 1
464cabb217SFelix Kuehling #define KFD_IOCTL_MINOR_VERSION 15
47b7facbaeSOded Gabbay 
48b7facbaeSOded Gabbay struct kfd_ioctl_get_version_args {
49472b46c3SMikko Rapeli 	__u32 major_version;	/* from KFD */
50472b46c3SMikko Rapeli 	__u32 minor_version;	/* from KFD */
51b7facbaeSOded Gabbay };
52b7facbaeSOded Gabbay 
53b7facbaeSOded Gabbay /* For kfd_ioctl_create_queue_args.queue_type. */
541b4670f6SOak Zeng #define KFD_IOC_QUEUE_TYPE_COMPUTE		0x0
551b4670f6SOak Zeng #define KFD_IOC_QUEUE_TYPE_SDMA			0x1
561b4670f6SOak Zeng #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL		0x2
571b4670f6SOak Zeng #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI		0x3
58b7facbaeSOded Gabbay 
59b7facbaeSOded Gabbay #define KFD_MAX_QUEUE_PERCENTAGE	100
60b7facbaeSOded Gabbay #define KFD_MAX_QUEUE_PRIORITY		15
61b7facbaeSOded Gabbay 
62b7facbaeSOded Gabbay struct kfd_ioctl_create_queue_args {
63472b46c3SMikko Rapeli 	__u64 ring_base_address;	/* to KFD */
64472b46c3SMikko Rapeli 	__u64 write_pointer_address;	/* from KFD */
65472b46c3SMikko Rapeli 	__u64 read_pointer_address;	/* from KFD */
66472b46c3SMikko Rapeli 	__u64 doorbell_offset;	/* from KFD */
67b7facbaeSOded Gabbay 
68472b46c3SMikko Rapeli 	__u32 ring_size;		/* to KFD */
69472b46c3SMikko Rapeli 	__u32 gpu_id;		/* to KFD */
70472b46c3SMikko Rapeli 	__u32 queue_type;		/* to KFD */
71472b46c3SMikko Rapeli 	__u32 queue_percentage;	/* to KFD */
72472b46c3SMikko Rapeli 	__u32 queue_priority;	/* to KFD */
73472b46c3SMikko Rapeli 	__u32 queue_id;		/* from KFD */
74b7facbaeSOded Gabbay 
75472b46c3SMikko Rapeli 	__u64 eop_buffer_address;	/* to KFD */
76472b46c3SMikko Rapeli 	__u64 eop_buffer_size;	/* to KFD */
77472b46c3SMikko Rapeli 	__u64 ctx_save_restore_address; /* to KFD */
78373d7080SFelix Kuehling 	__u32 ctx_save_restore_size;	/* to KFD */
79373d7080SFelix Kuehling 	__u32 ctl_stack_size;		/* to KFD */
80b7facbaeSOded Gabbay };
81b7facbaeSOded Gabbay 
82b7facbaeSOded Gabbay struct kfd_ioctl_destroy_queue_args {
83472b46c3SMikko Rapeli 	__u32 queue_id;		/* to KFD */
84472b46c3SMikko Rapeli 	__u32 pad;
85b7facbaeSOded Gabbay };
86b7facbaeSOded Gabbay 
87b7facbaeSOded Gabbay struct kfd_ioctl_update_queue_args {
88472b46c3SMikko Rapeli 	__u64 ring_base_address;	/* to KFD */
89b7facbaeSOded Gabbay 
90472b46c3SMikko Rapeli 	__u32 queue_id;		/* to KFD */
91472b46c3SMikko Rapeli 	__u32 ring_size;		/* to KFD */
92472b46c3SMikko Rapeli 	__u32 queue_percentage;	/* to KFD */
93472b46c3SMikko Rapeli 	__u32 queue_priority;	/* to KFD */
94b7facbaeSOded Gabbay };
95b7facbaeSOded Gabbay 
96a7fe68a1SFelix Kuehling struct kfd_ioctl_set_cu_mask_args {
97a7fe68a1SFelix Kuehling 	__u32 queue_id;		/* to KFD */
98a7fe68a1SFelix Kuehling 	__u32 num_cu_mask;		/* to KFD */
99a7fe68a1SFelix Kuehling 	__u64 cu_mask_ptr;		/* to KFD */
100a7fe68a1SFelix Kuehling };
101a7fe68a1SFelix Kuehling 
1025df099e8SJay Cornwall struct kfd_ioctl_get_queue_wave_state_args {
1038e7f9171SDmitry V. Levin 	__u64 ctl_stack_address;	/* to KFD */
1048e7f9171SDmitry V. Levin 	__u32 ctl_stack_used_size;	/* from KFD */
1058e7f9171SDmitry V. Levin 	__u32 save_area_used_size;	/* from KFD */
1068e7f9171SDmitry V. Levin 	__u32 queue_id;			/* to KFD */
1078e7f9171SDmitry V. Levin 	__u32 pad;
1085df099e8SJay Cornwall };
1095df099e8SJay Cornwall 
1109731dd4cSDaniel Phillips struct kfd_ioctl_get_available_memory_args {
1119731dd4cSDaniel Phillips 	__u64 available;	/* from KFD */
1129731dd4cSDaniel Phillips 	__u32 gpu_id;		/* to KFD */
1139731dd4cSDaniel Phillips 	__u32 pad;
1149731dd4cSDaniel Phillips };
1159731dd4cSDaniel Phillips 
1164f98cf2bSJonathan Kim struct kfd_dbg_device_info_entry {
1174f98cf2bSJonathan Kim 	__u64 exception_status;
1184f98cf2bSJonathan Kim 	__u64 lds_base;
1194f98cf2bSJonathan Kim 	__u64 lds_limit;
1204f98cf2bSJonathan Kim 	__u64 scratch_base;
1214f98cf2bSJonathan Kim 	__u64 scratch_limit;
1224f98cf2bSJonathan Kim 	__u64 gpuvm_base;
1234f98cf2bSJonathan Kim 	__u64 gpuvm_limit;
1244f98cf2bSJonathan Kim 	__u32 gpu_id;
1254f98cf2bSJonathan Kim 	__u32 location_id;
1264f98cf2bSJonathan Kim 	__u32 vendor_id;
1274f98cf2bSJonathan Kim 	__u32 device_id;
1284f98cf2bSJonathan Kim 	__u32 revision_id;
1294f98cf2bSJonathan Kim 	__u32 subsystem_vendor_id;
1304f98cf2bSJonathan Kim 	__u32 subsystem_device_id;
1314f98cf2bSJonathan Kim 	__u32 fw_version;
1324f98cf2bSJonathan Kim 	__u32 gfx_target_version;
1334f98cf2bSJonathan Kim 	__u32 simd_count;
1344f98cf2bSJonathan Kim 	__u32 max_waves_per_simd;
1354f98cf2bSJonathan Kim 	__u32 array_count;
1364f98cf2bSJonathan Kim 	__u32 simd_arrays_per_engine;
1374f98cf2bSJonathan Kim 	__u32 num_xcc;
1384f98cf2bSJonathan Kim 	__u32 capability;
1394f98cf2bSJonathan Kim 	__u32 debug_prop;
1404f98cf2bSJonathan Kim };
1414f98cf2bSJonathan Kim 
142b7facbaeSOded Gabbay /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
143b7facbaeSOded Gabbay #define KFD_IOC_CACHE_POLICY_COHERENT 0
144b7facbaeSOded Gabbay #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
145b7facbaeSOded Gabbay 
146b7facbaeSOded Gabbay struct kfd_ioctl_set_memory_policy_args {
147472b46c3SMikko Rapeli 	__u64 alternate_aperture_base;	/* to KFD */
148472b46c3SMikko Rapeli 	__u64 alternate_aperture_size;	/* to KFD */
149b7facbaeSOded Gabbay 
150472b46c3SMikko Rapeli 	__u32 gpu_id;			/* to KFD */
151472b46c3SMikko Rapeli 	__u32 default_policy;		/* to KFD */
152472b46c3SMikko Rapeli 	__u32 alternate_policy;		/* to KFD */
153472b46c3SMikko Rapeli 	__u32 pad;
154b7facbaeSOded Gabbay };
155b7facbaeSOded Gabbay 
156b7facbaeSOded Gabbay /*
157b7facbaeSOded Gabbay  * All counters are monotonic. They are used for profiling of compute jobs.
158b7facbaeSOded Gabbay  * The profiling is done by userspace.
159b7facbaeSOded Gabbay  *
160b7facbaeSOded Gabbay  * In case of GPU reset, the counter should not be affected.
161b7facbaeSOded Gabbay  */
162b7facbaeSOded Gabbay 
163b7facbaeSOded Gabbay struct kfd_ioctl_get_clock_counters_args {
164472b46c3SMikko Rapeli 	__u64 gpu_clock_counter;	/* from KFD */
165472b46c3SMikko Rapeli 	__u64 cpu_clock_counter;	/* from KFD */
166472b46c3SMikko Rapeli 	__u64 system_clock_counter;	/* from KFD */
167472b46c3SMikko Rapeli 	__u64 system_clock_freq;	/* from KFD */
168b7facbaeSOded Gabbay 
169472b46c3SMikko Rapeli 	__u32 gpu_id;		/* to KFD */
170472b46c3SMikko Rapeli 	__u32 pad;
171b7facbaeSOded Gabbay };
172b7facbaeSOded Gabbay 
173b7facbaeSOded Gabbay struct kfd_process_device_apertures {
174472b46c3SMikko Rapeli 	__u64 lds_base;		/* from KFD */
175472b46c3SMikko Rapeli 	__u64 lds_limit;		/* from KFD */
176472b46c3SMikko Rapeli 	__u64 scratch_base;		/* from KFD */
177472b46c3SMikko Rapeli 	__u64 scratch_limit;		/* from KFD */
178472b46c3SMikko Rapeli 	__u64 gpuvm_base;		/* from KFD */
179472b46c3SMikko Rapeli 	__u64 gpuvm_limit;		/* from KFD */
180472b46c3SMikko Rapeli 	__u32 gpu_id;		/* from KFD */
181472b46c3SMikko Rapeli 	__u32 pad;
182b7facbaeSOded Gabbay };
183b7facbaeSOded Gabbay 
184c7bcbfa4SFelix Kuehling /*
185c7bcbfa4SFelix Kuehling  * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
186c7bcbfa4SFelix Kuehling  * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
187c7bcbfa4SFelix Kuehling  * unlimited number of GPUs.
188c7bcbfa4SFelix Kuehling  */
189c7bcbfa4SFelix Kuehling #define NUM_OF_SUPPORTED_GPUS 7
190b7facbaeSOded Gabbay struct kfd_ioctl_get_process_apertures_args {
191b7facbaeSOded Gabbay 	struct kfd_process_device_apertures
192b7facbaeSOded Gabbay 			process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
193b7facbaeSOded Gabbay 
194b7facbaeSOded Gabbay 	/* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
195472b46c3SMikko Rapeli 	__u32 num_of_nodes;
196472b46c3SMikko Rapeli 	__u32 pad;
197b7facbaeSOded Gabbay };
198b7facbaeSOded Gabbay 
199c7bcbfa4SFelix Kuehling struct kfd_ioctl_get_process_apertures_new_args {
200c7bcbfa4SFelix Kuehling 	/* User allocated. Pointer to struct kfd_process_device_apertures
201c7bcbfa4SFelix Kuehling 	 * filled in by Kernel
202c7bcbfa4SFelix Kuehling 	 */
203c7bcbfa4SFelix Kuehling 	__u64 kfd_process_device_apertures_ptr;
204c7bcbfa4SFelix Kuehling 	/* to KFD - indicates amount of memory present in
205c7bcbfa4SFelix Kuehling 	 *  kfd_process_device_apertures_ptr
206c7bcbfa4SFelix Kuehling 	 * from KFD - Number of entries filled by KFD.
207c7bcbfa4SFelix Kuehling 	 */
208c7bcbfa4SFelix Kuehling 	__u32 num_of_nodes;
209c7bcbfa4SFelix Kuehling 	__u32 pad;
210c7bcbfa4SFelix Kuehling };
211c7bcbfa4SFelix Kuehling 
212aef11009SYair Shachar #define MAX_ALLOWED_NUM_POINTS    100
213aef11009SYair Shachar #define MAX_ALLOWED_AW_BUFF_SIZE 4096
214aef11009SYair Shachar #define MAX_ALLOWED_WAC_BUFF_SIZE  128
215aef11009SYair Shachar 
216aef11009SYair Shachar struct kfd_ioctl_dbg_register_args {
217472b46c3SMikko Rapeli 	__u32 gpu_id;		/* to KFD */
218472b46c3SMikko Rapeli 	__u32 pad;
219aef11009SYair Shachar };
220aef11009SYair Shachar 
221aef11009SYair Shachar struct kfd_ioctl_dbg_unregister_args {
222472b46c3SMikko Rapeli 	__u32 gpu_id;		/* to KFD */
223472b46c3SMikko Rapeli 	__u32 pad;
224aef11009SYair Shachar };
225aef11009SYair Shachar 
226aef11009SYair Shachar struct kfd_ioctl_dbg_address_watch_args {
227472b46c3SMikko Rapeli 	__u64 content_ptr;		/* a pointer to the actual content */
228472b46c3SMikko Rapeli 	__u32 gpu_id;		/* to KFD */
229472b46c3SMikko Rapeli 	__u32 buf_size_in_bytes;	/*including gpu_id and buf_size */
230aef11009SYair Shachar };
231aef11009SYair Shachar 
232aef11009SYair Shachar struct kfd_ioctl_dbg_wave_control_args {
233472b46c3SMikko Rapeli 	__u64 content_ptr;		/* a pointer to the actual content */
234472b46c3SMikko Rapeli 	__u32 gpu_id;		/* to KFD */
235472b46c3SMikko Rapeli 	__u32 buf_size_in_bytes;	/*including gpu_id and buf_size */
236aef11009SYair Shachar };
237aef11009SYair Shachar 
23865722ff6SDavid Yat Sin #define KFD_INVALID_FD     0xffffffff
23965722ff6SDavid Yat Sin 
24029a5d3ebSAndrew Lewycky /* Matching HSA_EVENTTYPE */
24129a5d3ebSAndrew Lewycky #define KFD_IOC_EVENT_SIGNAL			0
24229a5d3ebSAndrew Lewycky #define KFD_IOC_EVENT_NODECHANGE		1
24329a5d3ebSAndrew Lewycky #define KFD_IOC_EVENT_DEVICESTATECHANGE		2
24429a5d3ebSAndrew Lewycky #define KFD_IOC_EVENT_HW_EXCEPTION		3
24529a5d3ebSAndrew Lewycky #define KFD_IOC_EVENT_SYSTEM_EVENT		4
24629a5d3ebSAndrew Lewycky #define KFD_IOC_EVENT_DEBUG_EVENT		5
24729a5d3ebSAndrew Lewycky #define KFD_IOC_EVENT_PROFILE_EVENT		6
24829a5d3ebSAndrew Lewycky #define KFD_IOC_EVENT_QUEUE_EVENT		7
24929a5d3ebSAndrew Lewycky #define KFD_IOC_EVENT_MEMORY			8
25029a5d3ebSAndrew Lewycky 
25129a5d3ebSAndrew Lewycky #define KFD_IOC_WAIT_RESULT_COMPLETE		0
25229a5d3ebSAndrew Lewycky #define KFD_IOC_WAIT_RESULT_TIMEOUT		1
25329a5d3ebSAndrew Lewycky #define KFD_IOC_WAIT_RESULT_FAIL		2
25429a5d3ebSAndrew Lewycky 
2557e86a365SOded Gabbay #define KFD_SIGNAL_EVENT_LIMIT			4096
25629a5d3ebSAndrew Lewycky 
2570c119abaSShaoyun Liu /* For kfd_event_data.hw_exception_data.reset_type. */
2580c119abaSShaoyun Liu #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET	0
2590c119abaSShaoyun Liu #define KFD_HW_EXCEPTION_PER_ENGINE_RESET	1
2600c119abaSShaoyun Liu 
2610c119abaSShaoyun Liu /* For kfd_event_data.hw_exception_data.reset_cause. */
2620c119abaSShaoyun Liu #define KFD_HW_EXCEPTION_GPU_HANG	0
2630c119abaSShaoyun Liu #define KFD_HW_EXCEPTION_ECC		1
2640c119abaSShaoyun Liu 
2659b54d201SEric Huang /* For kfd_hsa_memory_exception_data.ErrorType */
2669b54d201SEric Huang #define KFD_MEM_ERR_NO_RAS		0
2679b54d201SEric Huang #define KFD_MEM_ERR_SRAM_ECC		1
2689b54d201SEric Huang #define KFD_MEM_ERR_POISON_CONSUMED	2
2699b54d201SEric Huang #define KFD_MEM_ERR_GPU_HANG		3
2700c119abaSShaoyun Liu 
27129a5d3ebSAndrew Lewycky struct kfd_ioctl_create_event_args {
272472b46c3SMikko Rapeli 	__u64 event_page_offset;	/* from KFD */
273472b46c3SMikko Rapeli 	__u32 event_trigger_data;	/* from KFD - signal events only */
274472b46c3SMikko Rapeli 	__u32 event_type;		/* to KFD */
275472b46c3SMikko Rapeli 	__u32 auto_reset;		/* to KFD */
276472b46c3SMikko Rapeli 	__u32 node_id;		/* to KFD - only valid for certain
27729a5d3ebSAndrew Lewycky 							event types */
278472b46c3SMikko Rapeli 	__u32 event_id;		/* from KFD */
279472b46c3SMikko Rapeli 	__u32 event_slot_index;	/* from KFD */
28029a5d3ebSAndrew Lewycky };
28129a5d3ebSAndrew Lewycky 
28229a5d3ebSAndrew Lewycky struct kfd_ioctl_destroy_event_args {
283472b46c3SMikko Rapeli 	__u32 event_id;		/* to KFD */
284472b46c3SMikko Rapeli 	__u32 pad;
28529a5d3ebSAndrew Lewycky };
28629a5d3ebSAndrew Lewycky 
28729a5d3ebSAndrew Lewycky struct kfd_ioctl_set_event_args {
288472b46c3SMikko Rapeli 	__u32 event_id;		/* to KFD */
289472b46c3SMikko Rapeli 	__u32 pad;
29029a5d3ebSAndrew Lewycky };
29129a5d3ebSAndrew Lewycky 
29229a5d3ebSAndrew Lewycky struct kfd_ioctl_reset_event_args {
293472b46c3SMikko Rapeli 	__u32 event_id;		/* to KFD */
294472b46c3SMikko Rapeli 	__u32 pad;
29529a5d3ebSAndrew Lewycky };
29629a5d3ebSAndrew Lewycky 
29729a5d3ebSAndrew Lewycky struct kfd_memory_exception_failure {
298472b46c3SMikko Rapeli 	__u32 NotPresent;	/* Page not present or supervisor privilege */
299472b46c3SMikko Rapeli 	__u32 ReadOnly;	/* Write access to a read-only page */
300472b46c3SMikko Rapeli 	__u32 NoExecute;	/* Execute access to a page marked NX */
3012640c3faSshaoyunl 	__u32 imprecise;	/* Can't determine the	exact fault address */
30229a5d3ebSAndrew Lewycky };
30329a5d3ebSAndrew Lewycky 
30429a5d3ebSAndrew Lewycky /* memory exception data */
30529a5d3ebSAndrew Lewycky struct kfd_hsa_memory_exception_data {
30629a5d3ebSAndrew Lewycky 	struct kfd_memory_exception_failure failure;
307472b46c3SMikko Rapeli 	__u64 va;
308472b46c3SMikko Rapeli 	__u32 gpu_id;
3099b54d201SEric Huang 	__u32 ErrorType; /* 0 = no RAS error,
3109b54d201SEric Huang 			  * 1 = ECC_SRAM,
3119b54d201SEric Huang 			  * 2 = Link_SYNFLOOD (poison),
3129b54d201SEric Huang 			  * 3 = GPU hang (not attributable to a specific cause),
3139b54d201SEric Huang 			  * other values reserved
3149b54d201SEric Huang 			  */
31529a5d3ebSAndrew Lewycky };
31629a5d3ebSAndrew Lewycky 
3170c119abaSShaoyun Liu /* hw exception data */
3180c119abaSShaoyun Liu struct kfd_hsa_hw_exception_data {
319aba11838SDmitry V. Levin 	__u32 reset_type;
320aba11838SDmitry V. Levin 	__u32 reset_cause;
321aba11838SDmitry V. Levin 	__u32 memory_lost;
322aba11838SDmitry V. Levin 	__u32 gpu_id;
3230c119abaSShaoyun Liu };
3240c119abaSShaoyun Liu 
3256f582513SJames Zhu /* hsa signal event data */
3266f582513SJames Zhu struct kfd_hsa_signal_event_data {
3276f582513SJames Zhu 	__u64 last_event_age;	/* to and from KFD */
3286f582513SJames Zhu };
3296f582513SJames Zhu 
33029a5d3ebSAndrew Lewycky /* Event data */
33129a5d3ebSAndrew Lewycky struct kfd_event_data {
33229a5d3ebSAndrew Lewycky 	union {
3336f582513SJames Zhu 		/* From KFD */
33429a5d3ebSAndrew Lewycky 		struct kfd_hsa_memory_exception_data memory_exception_data;
3350c119abaSShaoyun Liu 		struct kfd_hsa_hw_exception_data hw_exception_data;
3366f582513SJames Zhu 		/* To and From KFD */
3376f582513SJames Zhu 		struct kfd_hsa_signal_event_data signal_event_data;
3386f582513SJames Zhu 	};
339472b46c3SMikko Rapeli 	__u64 kfd_event_data_ext;	/* pointer to an extension structure
34029a5d3ebSAndrew Lewycky 					   for future exception types */
341472b46c3SMikko Rapeli 	__u32 event_id;		/* to KFD */
342472b46c3SMikko Rapeli 	__u32 pad;
34329a5d3ebSAndrew Lewycky };
34429a5d3ebSAndrew Lewycky 
34529a5d3ebSAndrew Lewycky struct kfd_ioctl_wait_events_args {
346472b46c3SMikko Rapeli 	__u64 events_ptr;		/* pointed to struct
347aef11009SYair Shachar 					   kfd_event_data array, to KFD */
348472b46c3SMikko Rapeli 	__u32 num_events;		/* to KFD */
349472b46c3SMikko Rapeli 	__u32 wait_for_all;		/* to KFD */
350472b46c3SMikko Rapeli 	__u32 timeout;		/* to KFD */
351472b46c3SMikko Rapeli 	__u32 wait_result;		/* from KFD */
35229a5d3ebSAndrew Lewycky };
35329a5d3ebSAndrew Lewycky 
3546a1c9510SMoses Reuben struct kfd_ioctl_set_scratch_backing_va_args {
355b4d08520SDmitry V. Levin 	__u64 va_addr;	/* to KFD */
356b4d08520SDmitry V. Levin 	__u32 gpu_id;	/* to KFD */
357b4d08520SDmitry V. Levin 	__u32 pad;
3586a1c9510SMoses Reuben };
3596a1c9510SMoses Reuben 
3605d71dbc3SYong Zhao struct kfd_ioctl_get_tile_config_args {
3615d71dbc3SYong Zhao 	/* to KFD: pointer to tile array */
362b4d08520SDmitry V. Levin 	__u64 tile_config_ptr;
3635d71dbc3SYong Zhao 	/* to KFD: pointer to macro tile array */
364b4d08520SDmitry V. Levin 	__u64 macro_tile_config_ptr;
3655d71dbc3SYong Zhao 	/* to KFD: array size allocated by user mode
3665d71dbc3SYong Zhao 	 * from KFD: array size filled by kernel
3675d71dbc3SYong Zhao 	 */
368b4d08520SDmitry V. Levin 	__u32 num_tile_configs;
3695d71dbc3SYong Zhao 	/* to KFD: array size allocated by user mode
3705d71dbc3SYong Zhao 	 * from KFD: array size filled by kernel
3715d71dbc3SYong Zhao 	 */
372b4d08520SDmitry V. Levin 	__u32 num_macro_tile_configs;
3735d71dbc3SYong Zhao 
374b4d08520SDmitry V. Levin 	__u32 gpu_id;		/* to KFD */
375b4d08520SDmitry V. Levin 	__u32 gb_addr_config;	/* from KFD */
376b4d08520SDmitry V. Levin 	__u32 num_banks;		/* from KFD */
377b4d08520SDmitry V. Levin 	__u32 num_ranks;		/* from KFD */
3785d71dbc3SYong Zhao 	/* struct size can be extended later if needed
3795d71dbc3SYong Zhao 	 * without breaking ABI compatibility
3805d71dbc3SYong Zhao 	 */
3815d71dbc3SYong Zhao };
3825d71dbc3SYong Zhao 
383d7b9bd22SFelix Kuehling struct kfd_ioctl_set_trap_handler_args {
384a1102445SFelix Kuehling 	__u64 tba_addr;		/* to KFD */
385a1102445SFelix Kuehling 	__u64 tma_addr;		/* to KFD */
386a1102445SFelix Kuehling 	__u32 gpu_id;		/* to KFD */
387a1102445SFelix Kuehling 	__u32 pad;
388d7b9bd22SFelix Kuehling };
389d7b9bd22SFelix Kuehling 
3905ec7e028SFelix Kuehling struct kfd_ioctl_acquire_vm_args {
3915ec7e028SFelix Kuehling 	__u32 drm_fd;	/* to KFD */
3925ec7e028SFelix Kuehling 	__u32 gpu_id;	/* to KFD */
3935ec7e028SFelix Kuehling };
3945ec7e028SFelix Kuehling 
3955ec7e028SFelix Kuehling /* Allocation flags: memory types */
3965ec7e028SFelix Kuehling #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM		(1 << 0)
3975ec7e028SFelix Kuehling #define KFD_IOC_ALLOC_MEM_FLAGS_GTT		(1 << 1)
3985ec7e028SFelix Kuehling #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR		(1 << 2)
3995ec7e028SFelix Kuehling #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL	(1 << 3)
400d8e408a8SOak Zeng #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP	(1 << 4)
4015ec7e028SFelix Kuehling /* Allocation flags: attributes/access options */
4025ec7e028SFelix Kuehling #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE	(1 << 31)
4035ec7e028SFelix Kuehling #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE	(1 << 30)
4045ec7e028SFelix Kuehling #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC		(1 << 29)
4055ec7e028SFelix Kuehling #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE	(1 << 28)
4065ec7e028SFelix Kuehling #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM	(1 << 27)
4075ec7e028SFelix Kuehling #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT	(1 << 26)
40898291d6dSEric Huang #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED	(1 << 25)
4095f248462SDavid Francis #define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT	(1 << 24)
4105ec7e028SFelix Kuehling 
4115ec7e028SFelix Kuehling /* Allocate memory for later SVM (shared virtual memory) mapping.
4125ec7e028SFelix Kuehling  *
4135ec7e028SFelix Kuehling  * @va_addr:     virtual address of the memory to be allocated
4145ec7e028SFelix Kuehling  *               all later mappings on all GPUs will use this address
4155ec7e028SFelix Kuehling  * @size:        size in bytes
4165ec7e028SFelix Kuehling  * @handle:      buffer handle returned to user mode, used to refer to
4175ec7e028SFelix Kuehling  *               this allocation for mapping, unmapping and freeing
4185ec7e028SFelix Kuehling  * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
4195ec7e028SFelix Kuehling  *               for userptrs this is overloaded to specify the CPU address
4205ec7e028SFelix Kuehling  * @gpu_id:      device identifier
4215ec7e028SFelix Kuehling  * @flags:       memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
4225ec7e028SFelix Kuehling  */
4235ec7e028SFelix Kuehling struct kfd_ioctl_alloc_memory_of_gpu_args {
4245ec7e028SFelix Kuehling 	__u64 va_addr;		/* to KFD */
4255ec7e028SFelix Kuehling 	__u64 size;		/* to KFD */
4265ec7e028SFelix Kuehling 	__u64 handle;		/* from KFD */
4275ec7e028SFelix Kuehling 	__u64 mmap_offset;	/* to KFD (userptr), from KFD (mmap offset) */
4285ec7e028SFelix Kuehling 	__u32 gpu_id;		/* to KFD */
4295ec7e028SFelix Kuehling 	__u32 flags;
4305ec7e028SFelix Kuehling };
4315ec7e028SFelix Kuehling 
4325ec7e028SFelix Kuehling /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
4335ec7e028SFelix Kuehling  *
4345ec7e028SFelix Kuehling  * @handle: memory handle returned by alloc
4355ec7e028SFelix Kuehling  */
4365ec7e028SFelix Kuehling struct kfd_ioctl_free_memory_of_gpu_args {
4375ec7e028SFelix Kuehling 	__u64 handle;		/* to KFD */
4385ec7e028SFelix Kuehling };
4395ec7e028SFelix Kuehling 
4405ec7e028SFelix Kuehling /* Map memory to one or more GPUs
4415ec7e028SFelix Kuehling  *
4425ec7e028SFelix Kuehling  * @handle:                memory handle returned by alloc
4435ec7e028SFelix Kuehling  * @device_ids_array_ptr:  array of gpu_ids (__u32 per device)
4445ec7e028SFelix Kuehling  * @n_devices:             number of devices in the array
4455ec7e028SFelix Kuehling  * @n_success:             number of devices mapped successfully
4465ec7e028SFelix Kuehling  *
4475ec7e028SFelix Kuehling  * @n_success returns information to the caller how many devices from
4485ec7e028SFelix Kuehling  * the start of the array have mapped the buffer successfully. It can
4495ec7e028SFelix Kuehling  * be passed into a subsequent retry call to skip those devices. For
4505ec7e028SFelix Kuehling  * the first call the caller should initialize it to 0.
4515ec7e028SFelix Kuehling  *
4525ec7e028SFelix Kuehling  * If the ioctl completes with return code 0 (success), n_success ==
4535ec7e028SFelix Kuehling  * n_devices.
4545ec7e028SFelix Kuehling  */
4555ec7e028SFelix Kuehling struct kfd_ioctl_map_memory_to_gpu_args {
4565ec7e028SFelix Kuehling 	__u64 handle;			/* to KFD */
4575ec7e028SFelix Kuehling 	__u64 device_ids_array_ptr;	/* to KFD */
4585ec7e028SFelix Kuehling 	__u32 n_devices;		/* to KFD */
4595ec7e028SFelix Kuehling 	__u32 n_success;		/* to/from KFD */
4605ec7e028SFelix Kuehling };
4615ec7e028SFelix Kuehling 
4625ec7e028SFelix Kuehling /* Unmap memory from one or more GPUs
4635ec7e028SFelix Kuehling  *
4645ec7e028SFelix Kuehling  * same arguments as for mapping
4655ec7e028SFelix Kuehling  */
4665ec7e028SFelix Kuehling struct kfd_ioctl_unmap_memory_from_gpu_args {
4675ec7e028SFelix Kuehling 	__u64 handle;			/* to KFD */
4685ec7e028SFelix Kuehling 	__u64 device_ids_array_ptr;	/* to KFD */
4695ec7e028SFelix Kuehling 	__u32 n_devices;		/* to KFD */
4705ec7e028SFelix Kuehling 	__u32 n_success;		/* to/from KFD */
4715ec7e028SFelix Kuehling };
4725ec7e028SFelix Kuehling 
4735bb4b78bSOak Zeng /* Allocate GWS for specific queue
4745bb4b78bSOak Zeng  *
4755bb4b78bSOak Zeng  * @queue_id:    queue's id that GWS is allocated for
4765bb4b78bSOak Zeng  * @num_gws:     how many GWS to allocate
4775bb4b78bSOak Zeng  * @first_gws:   index of the first GWS allocated.
4785bb4b78bSOak Zeng  *               only support contiguous GWS allocation
4795bb4b78bSOak Zeng  */
4805bb4b78bSOak Zeng struct kfd_ioctl_alloc_queue_gws_args {
4815bb4b78bSOak Zeng 	__u32 queue_id;		/* to KFD */
4825bb4b78bSOak Zeng 	__u32 num_gws;		/* to KFD */
4835bb4b78bSOak Zeng 	__u32 first_gws;	/* from KFD */
4845bb4b78bSOak Zeng 	__u32 pad;
4855bb4b78bSOak Zeng };
4865bb4b78bSOak Zeng 
4871dde0ea9SFelix Kuehling struct kfd_ioctl_get_dmabuf_info_args {
4881dde0ea9SFelix Kuehling 	__u64 size;		/* from KFD */
4891dde0ea9SFelix Kuehling 	__u64 metadata_ptr;	/* to KFD */
4901dde0ea9SFelix Kuehling 	__u32 metadata_size;	/* to KFD (space allocated by user)
4911dde0ea9SFelix Kuehling 				 * from KFD (actual metadata size)
4921dde0ea9SFelix Kuehling 				 */
4931dde0ea9SFelix Kuehling 	__u32 gpu_id;	/* from KFD */
4941dde0ea9SFelix Kuehling 	__u32 flags;		/* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
4951dde0ea9SFelix Kuehling 	__u32 dmabuf_fd;	/* to KFD */
4961dde0ea9SFelix Kuehling };
4971dde0ea9SFelix Kuehling 
4981dde0ea9SFelix Kuehling struct kfd_ioctl_import_dmabuf_args {
4991dde0ea9SFelix Kuehling 	__u64 va_addr;	/* to KFD */
5001dde0ea9SFelix Kuehling 	__u64 handle;	/* from KFD */
5011dde0ea9SFelix Kuehling 	__u32 gpu_id;	/* to KFD */
5021dde0ea9SFelix Kuehling 	__u32 dmabuf_fd;	/* to KFD */
5031dde0ea9SFelix Kuehling };
5041dde0ea9SFelix Kuehling 
505fd234e75SFelix Kuehling struct kfd_ioctl_export_dmabuf_args {
506fd234e75SFelix Kuehling 	__u64 handle;		/* to KFD */
507fd234e75SFelix Kuehling 	__u32 flags;		/* to KFD */
508fd234e75SFelix Kuehling 	__u32 dmabuf_fd;	/* from KFD */
509fd234e75SFelix Kuehling };
510fd234e75SFelix Kuehling 
511938a0650SAmber Lin /*
512938a0650SAmber Lin  * KFD SMI(System Management Interface) events
513938a0650SAmber Lin  */
514522ec6e0SMukul Joshi enum kfd_smi_event {
515522ec6e0SMukul Joshi 	KFD_SMI_EVENT_NONE = 0, /* not used */
516522ec6e0SMukul Joshi 	KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
517522ec6e0SMukul Joshi 	KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
51855977744SMukul Joshi 	KFD_SMI_EVENT_GPU_PRE_RESET = 3,
51955977744SMukul Joshi 	KFD_SMI_EVENT_GPU_POST_RESET = 4,
520d7cfea33SPhilip Yang 	KFD_SMI_EVENT_MIGRATE_START = 5,
521d7cfea33SPhilip Yang 	KFD_SMI_EVENT_MIGRATE_END = 6,
522d7cfea33SPhilip Yang 	KFD_SMI_EVENT_PAGE_FAULT_START = 7,
523d7cfea33SPhilip Yang 	KFD_SMI_EVENT_PAGE_FAULT_END = 8,
524d7cfea33SPhilip Yang 	KFD_SMI_EVENT_QUEUE_EVICTION = 9,
525d7cfea33SPhilip Yang 	KFD_SMI_EVENT_QUEUE_RESTORE = 10,
526d7cfea33SPhilip Yang 	KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
527d7cfea33SPhilip Yang 
528d7cfea33SPhilip Yang 	/*
529d7cfea33SPhilip Yang 	 * max event number, as a flag bit to get events from all processes,
530d7cfea33SPhilip Yang 	 * this requires super user permission, otherwise will not be able to
531d7cfea33SPhilip Yang 	 * receive event from any process. Without this flag to receive events
532d7cfea33SPhilip Yang 	 * from same process.
533d7cfea33SPhilip Yang 	 */
534d7cfea33SPhilip Yang 	KFD_SMI_EVENT_ALL_PROCESS = 64
535d7cfea33SPhilip Yang };
536d7cfea33SPhilip Yang 
537d7cfea33SPhilip Yang enum KFD_MIGRATE_TRIGGERS {
538d7cfea33SPhilip Yang 	KFD_MIGRATE_TRIGGER_PREFETCH,
539d7cfea33SPhilip Yang 	KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
540d7cfea33SPhilip Yang 	KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
541d7cfea33SPhilip Yang 	KFD_MIGRATE_TRIGGER_TTM_EVICTION
542d7cfea33SPhilip Yang };
543d7cfea33SPhilip Yang 
544d7cfea33SPhilip Yang enum KFD_QUEUE_EVICTION_TRIGGERS {
545d7cfea33SPhilip Yang 	KFD_QUEUE_EVICTION_TRIGGER_SVM,
546d7cfea33SPhilip Yang 	KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
547d7cfea33SPhilip Yang 	KFD_QUEUE_EVICTION_TRIGGER_TTM,
548d7cfea33SPhilip Yang 	KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
549d7cfea33SPhilip Yang 	KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
550d7cfea33SPhilip Yang 	KFD_QUEUE_EVICTION_CRIU_RESTORE
551d7cfea33SPhilip Yang };
552d7cfea33SPhilip Yang 
553d7cfea33SPhilip Yang enum KFD_SVM_UNMAP_TRIGGERS {
554d7cfea33SPhilip Yang 	KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
555d7cfea33SPhilip Yang 	KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
556d7cfea33SPhilip Yang 	KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
557522ec6e0SMukul Joshi };
558522ec6e0SMukul Joshi 
559522ec6e0SMukul Joshi #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
560d58b8a99SPhilip Yang #define KFD_SMI_EVENT_MSG_SIZE	96
561938a0650SAmber Lin 
562938a0650SAmber Lin struct kfd_ioctl_smi_events_args {
563938a0650SAmber Lin 	__u32 gpuid;	/* to KFD */
564938a0650SAmber Lin 	__u32 anon_fd;	/* from KFD */
565938a0650SAmber Lin };
566938a0650SAmber Lin 
56736988070SRajneesh Bhardwaj /**************************************************************************************************
56836988070SRajneesh Bhardwaj  * CRIU IOCTLs (Checkpoint Restore In Userspace)
56936988070SRajneesh Bhardwaj  *
57036988070SRajneesh Bhardwaj  * When checkpointing a process, the userspace application will perform:
57136988070SRajneesh Bhardwaj  * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts
57236988070SRajneesh Bhardwaj  *    all the queues.
57336988070SRajneesh Bhardwaj  * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges)
57436988070SRajneesh Bhardwaj  * 3. UNPAUSE op to un-evict all the queues
57536988070SRajneesh Bhardwaj  *
57636988070SRajneesh Bhardwaj  * When restoring a process, the CRIU userspace application will perform:
57736988070SRajneesh Bhardwaj  *
57836988070SRajneesh Bhardwaj  * 1. RESTORE op to restore process contents
57936988070SRajneesh Bhardwaj  * 2. RESUME op to start the process
58036988070SRajneesh Bhardwaj  *
58136988070SRajneesh Bhardwaj  * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User
58236988070SRajneesh Bhardwaj  * application needs to perform an UNPAUSE operation after calling PROCESS_INFO.
58336988070SRajneesh Bhardwaj  */
58436988070SRajneesh Bhardwaj 
58536988070SRajneesh Bhardwaj enum kfd_criu_op {
58636988070SRajneesh Bhardwaj 	KFD_CRIU_OP_PROCESS_INFO,
58736988070SRajneesh Bhardwaj 	KFD_CRIU_OP_CHECKPOINT,
58836988070SRajneesh Bhardwaj 	KFD_CRIU_OP_UNPAUSE,
58936988070SRajneesh Bhardwaj 	KFD_CRIU_OP_RESTORE,
59036988070SRajneesh Bhardwaj 	KFD_CRIU_OP_RESUME,
59136988070SRajneesh Bhardwaj };
59236988070SRajneesh Bhardwaj 
59336988070SRajneesh Bhardwaj /**
59436988070SRajneesh Bhardwaj  * kfd_ioctl_criu_args - Arguments perform CRIU operation
59536988070SRajneesh Bhardwaj  * @devices:		[in/out] User pointer to memory location for devices information.
59636988070SRajneesh Bhardwaj  * 			This is an array of type kfd_criu_device_bucket.
59736988070SRajneesh Bhardwaj  * @bos:		[in/out] User pointer to memory location for BOs information
59836988070SRajneesh Bhardwaj  * 			This is an array of type kfd_criu_bo_bucket.
59936988070SRajneesh Bhardwaj  * @priv_data:		[in/out] User pointer to memory location for private data
60036988070SRajneesh Bhardwaj  * @priv_data_size:	[in/out] Size of priv_data in bytes
60136988070SRajneesh Bhardwaj  * @num_devices:	[in/out] Number of GPUs used by process. Size of @devices array.
60236988070SRajneesh Bhardwaj  * @num_bos		[in/out] Number of BOs used by process. Size of @bos array.
60336988070SRajneesh Bhardwaj  * @num_objects:	[in/out] Number of objects used by process. Objects are opaque to
60436988070SRajneesh Bhardwaj  *				 user application.
60536988070SRajneesh Bhardwaj  * @pid:		[in/out] PID of the process being checkpointed
60636988070SRajneesh Bhardwaj  * @op			[in] Type of operation (kfd_criu_op)
60736988070SRajneesh Bhardwaj  *
60836988070SRajneesh Bhardwaj  * Return: 0 on success, -errno on failure
60936988070SRajneesh Bhardwaj  */
61036988070SRajneesh Bhardwaj struct kfd_ioctl_criu_args {
61136988070SRajneesh Bhardwaj 	__u64 devices;		/* Used during ops: CHECKPOINT, RESTORE */
61236988070SRajneesh Bhardwaj 	__u64 bos;		/* Used during ops: CHECKPOINT, RESTORE */
61336988070SRajneesh Bhardwaj 	__u64 priv_data;	/* Used during ops: CHECKPOINT, RESTORE */
61436988070SRajneesh Bhardwaj 	__u64 priv_data_size;	/* Used during ops: PROCESS_INFO, RESTORE */
61536988070SRajneesh Bhardwaj 	__u32 num_devices;	/* Used during ops: PROCESS_INFO, RESTORE */
61636988070SRajneesh Bhardwaj 	__u32 num_bos;		/* Used during ops: PROCESS_INFO, RESTORE */
61736988070SRajneesh Bhardwaj 	__u32 num_objects;	/* Used during ops: PROCESS_INFO, RESTORE */
61836988070SRajneesh Bhardwaj 	__u32 pid;		/* Used during ops: PROCESS_INFO, RESUME */
61936988070SRajneesh Bhardwaj 	__u32 op;
62036988070SRajneesh Bhardwaj };
62136988070SRajneesh Bhardwaj 
62236988070SRajneesh Bhardwaj struct kfd_criu_device_bucket {
62336988070SRajneesh Bhardwaj 	__u32 user_gpu_id;
62436988070SRajneesh Bhardwaj 	__u32 actual_gpu_id;
62536988070SRajneesh Bhardwaj 	__u32 drm_fd;
62636988070SRajneesh Bhardwaj 	__u32 pad;
62736988070SRajneesh Bhardwaj };
62836988070SRajneesh Bhardwaj 
62936988070SRajneesh Bhardwaj struct kfd_criu_bo_bucket {
63036988070SRajneesh Bhardwaj 	__u64 addr;
63136988070SRajneesh Bhardwaj 	__u64 size;
63236988070SRajneesh Bhardwaj 	__u64 offset;
63336988070SRajneesh Bhardwaj 	__u64 restored_offset;    /* During restore, updated offset for BO */
63436988070SRajneesh Bhardwaj 	__u32 gpu_id;             /* This is the user_gpu_id */
63536988070SRajneesh Bhardwaj 	__u32 alloc_flags;
63636988070SRajneesh Bhardwaj 	__u32 dmabuf_fd;
63736988070SRajneesh Bhardwaj 	__u32 pad;
63836988070SRajneesh Bhardwaj };
63936988070SRajneesh Bhardwaj 
64036988070SRajneesh Bhardwaj /* CRIU IOCTLs - END */
64136988070SRajneesh Bhardwaj /**************************************************************************************************/
64236988070SRajneesh Bhardwaj 
64388807dc8SOak Zeng /* Register offset inside the remapped mmio page
64488807dc8SOak Zeng  */
64588807dc8SOak Zeng enum kfd_mmio_remap {
64688807dc8SOak Zeng 	KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
64788807dc8SOak Zeng 	KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
64888807dc8SOak Zeng };
64988807dc8SOak Zeng 
65040ce74d1SPhilip Yang /* Guarantee host access to memory */
65140ce74d1SPhilip Yang #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
65240ce74d1SPhilip Yang /* Fine grained coherency between all devices with access */
65340ce74d1SPhilip Yang #define KFD_IOCTL_SVM_FLAG_COHERENT    0x00000002
65440ce74d1SPhilip Yang /* Use any GPU in same hive as preferred device */
65540ce74d1SPhilip Yang #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL  0x00000004
65640ce74d1SPhilip Yang /* GPUs only read, allows replication */
65740ce74d1SPhilip Yang #define KFD_IOCTL_SVM_FLAG_GPU_RO      0x00000008
65840ce74d1SPhilip Yang /* Allow execution on GPU */
65940ce74d1SPhilip Yang #define KFD_IOCTL_SVM_FLAG_GPU_EXEC    0x00000010
66040ce74d1SPhilip Yang /* GPUs mostly read, may allow similar optimizations as RO, but writes fault */
66140ce74d1SPhilip Yang #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY     0x00000020
662fce7bf30SEric Huang /* Keep GPU memory mapping always valid as if XNACK is disable */
663fce7bf30SEric Huang #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED   0x00000040
6645f248462SDavid Francis /* Fine grained coherency between all devices using device-scope atomics */
6655f248462SDavid Francis #define KFD_IOCTL_SVM_FLAG_EXT_COHERENT        0x00000080
66640ce74d1SPhilip Yang 
66740ce74d1SPhilip Yang /**
66840ce74d1SPhilip Yang  * kfd_ioctl_svm_op - SVM ioctl operations
66940ce74d1SPhilip Yang  *
67040ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes
67140ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes
67240ce74d1SPhilip Yang  */
67340ce74d1SPhilip Yang enum kfd_ioctl_svm_op {
67440ce74d1SPhilip Yang 	KFD_IOCTL_SVM_OP_SET_ATTR,
67540ce74d1SPhilip Yang 	KFD_IOCTL_SVM_OP_GET_ATTR
67640ce74d1SPhilip Yang };
67740ce74d1SPhilip Yang 
67840ce74d1SPhilip Yang /** kfd_ioctl_svm_location - Enum for preferred and prefetch locations
67940ce74d1SPhilip Yang  *
68040ce74d1SPhilip Yang  * GPU IDs are used to specify GPUs as preferred and prefetch locations.
68140ce74d1SPhilip Yang  * Below definitions are used for system memory or for leaving the preferred
68240ce74d1SPhilip Yang  * location unspecified.
68340ce74d1SPhilip Yang  */
68440ce74d1SPhilip Yang enum kfd_ioctl_svm_location {
68540ce74d1SPhilip Yang 	KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
68640ce74d1SPhilip Yang 	KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
68740ce74d1SPhilip Yang };
68840ce74d1SPhilip Yang 
68940ce74d1SPhilip Yang /**
69040ce74d1SPhilip Yang  * kfd_ioctl_svm_attr_type - SVM attribute types
69140ce74d1SPhilip Yang  *
69240ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for
69340ce74d1SPhilip Yang  *                                    system memory
69440ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for
69540ce74d1SPhilip Yang  *                                   system memory. Setting this triggers an
69640ce74d1SPhilip Yang  *                                   immediate prefetch (migration).
69740ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_ACCESS:
69840ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
69940ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given
70040ce74d1SPhilip Yang  *                                by the attribute value
70140ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see
70240ce74d1SPhilip Yang  *                                KFD_IOCTL_SVM_FLAG_...)
70340ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear
70440ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity
70540ce74d1SPhilip Yang  *                                  (log2 num pages)
70640ce74d1SPhilip Yang  */
70740ce74d1SPhilip Yang enum kfd_ioctl_svm_attr_type {
70840ce74d1SPhilip Yang 	KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
70940ce74d1SPhilip Yang 	KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
71040ce74d1SPhilip Yang 	KFD_IOCTL_SVM_ATTR_ACCESS,
71140ce74d1SPhilip Yang 	KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
71240ce74d1SPhilip Yang 	KFD_IOCTL_SVM_ATTR_NO_ACCESS,
71340ce74d1SPhilip Yang 	KFD_IOCTL_SVM_ATTR_SET_FLAGS,
71440ce74d1SPhilip Yang 	KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
71540ce74d1SPhilip Yang 	KFD_IOCTL_SVM_ATTR_GRANULARITY
71640ce74d1SPhilip Yang };
71740ce74d1SPhilip Yang 
71840ce74d1SPhilip Yang /**
71940ce74d1SPhilip Yang  * kfd_ioctl_svm_attribute - Attributes as pairs of type and value
72040ce74d1SPhilip Yang  *
72140ce74d1SPhilip Yang  * The meaning of the @value depends on the attribute type.
72240ce74d1SPhilip Yang  *
72340ce74d1SPhilip Yang  * @type: attribute type (see enum @kfd_ioctl_svm_attr_type)
72440ce74d1SPhilip Yang  * @value: attribute value
72540ce74d1SPhilip Yang  */
72640ce74d1SPhilip Yang struct kfd_ioctl_svm_attribute {
72740ce74d1SPhilip Yang 	__u32 type;
72840ce74d1SPhilip Yang 	__u32 value;
72940ce74d1SPhilip Yang };
73040ce74d1SPhilip Yang 
73140ce74d1SPhilip Yang /**
73240ce74d1SPhilip Yang  * kfd_ioctl_svm_args - Arguments for SVM ioctl
73340ce74d1SPhilip Yang  *
73440ce74d1SPhilip Yang  * @op specifies the operation to perform (see enum
73540ce74d1SPhilip Yang  * @kfd_ioctl_svm_op).  @start_addr and @size are common for all
73640ce74d1SPhilip Yang  * operations.
73740ce74d1SPhilip Yang  *
73840ce74d1SPhilip Yang  * A variable number of attributes can be given in @attrs.
73940ce74d1SPhilip Yang  * @nattr specifies the number of attributes. New attributes can be
74040ce74d1SPhilip Yang  * added in the future without breaking the ABI. If unknown attributes
74140ce74d1SPhilip Yang  * are given, the function returns -EINVAL.
74240ce74d1SPhilip Yang  *
74340ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address
74440ce74d1SPhilip Yang  * range. It may overlap existing virtual address ranges. If it does,
74540ce74d1SPhilip Yang  * the existing ranges will be split such that the attribute changes
74640ce74d1SPhilip Yang  * only apply to the specified address range.
74740ce74d1SPhilip Yang  *
74840ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes
74940ce74d1SPhilip Yang  * over all memory in the given range and returns the result as the
75040ce74d1SPhilip Yang  * attribute value. If different pages have different preferred or
75140ce74d1SPhilip Yang  * prefetch locations, 0xffffffff will be returned for
75240ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
75340ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
75440ce74d1SPhilip Yang  * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
755a43e2a0eSFelix Kuehling  * aggregated by bitwise AND. That means, a flag will be set in the
756a43e2a0eSFelix Kuehling  * output, if that flag is set for all pages in the range. For
757a43e2a0eSFelix Kuehling  * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
758a43e2a0eSFelix Kuehling  * aggregated by bitwise NOR. That means, a flag will be set in the
759a43e2a0eSFelix Kuehling  * output, if that flag is clear for all pages in the range.
760a43e2a0eSFelix Kuehling  * The minimum migration granularity throughout the range will be
761a43e2a0eSFelix Kuehling  * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
76240ce74d1SPhilip Yang  *
76340ce74d1SPhilip Yang  * Querying of accessibility attributes works by initializing the
76440ce74d1SPhilip Yang  * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
76540ce74d1SPhilip Yang  * GPUID being queried. Multiple attributes can be given to allow
76640ce74d1SPhilip Yang  * querying multiple GPUIDs. The ioctl function overwrites the
76740ce74d1SPhilip Yang  * attribute type to indicate the access for the specified GPU.
76840ce74d1SPhilip Yang  */
76940ce74d1SPhilip Yang struct kfd_ioctl_svm_args {
77040ce74d1SPhilip Yang 	__u64 start_addr;
77140ce74d1SPhilip Yang 	__u64 size;
77240ce74d1SPhilip Yang 	__u32 op;
77340ce74d1SPhilip Yang 	__u32 nattr;
77440ce74d1SPhilip Yang 	/* Variable length array of attributes */
77579aa0367SFelix Kuehling 	struct kfd_ioctl_svm_attribute attrs[];
77640ce74d1SPhilip Yang };
77740ce74d1SPhilip Yang 
7780f7b5c44SAlex Sierra /**
7790f7b5c44SAlex Sierra  * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode
7800f7b5c44SAlex Sierra  *
7810f7b5c44SAlex Sierra  * @xnack_enabled:       [in/out] Whether to enable XNACK mode for this process
7820f7b5c44SAlex Sierra  *
7830f7b5c44SAlex Sierra  * @xnack_enabled indicates whether recoverable page faults should be
7840f7b5c44SAlex Sierra  * enabled for the current process. 0 means disabled, positive means
7850f7b5c44SAlex Sierra  * enabled, negative means leave unchanged. If enabled, virtual address
7860f7b5c44SAlex Sierra  * translations on GFXv9 and later AMD GPUs can return XNACK and retry
7870f7b5c44SAlex Sierra  * the access until a valid PTE is available. This is used to implement
7880f7b5c44SAlex Sierra  * device page faults.
7890f7b5c44SAlex Sierra  *
7900f7b5c44SAlex Sierra  * On output, @xnack_enabled returns the (new) current mode (0 or
7910f7b5c44SAlex Sierra  * positive). Therefore, a negative input value can be used to query
7920f7b5c44SAlex Sierra  * the current mode without changing it.
7930f7b5c44SAlex Sierra  *
7940f7b5c44SAlex Sierra  * The XNACK mode fundamentally changes the way SVM managed memory works
7950f7b5c44SAlex Sierra  * in the driver, with subtle effects on application performance and
7960f7b5c44SAlex Sierra  * functionality.
7970f7b5c44SAlex Sierra  *
7980f7b5c44SAlex Sierra  * Enabling XNACK mode requires shader programs to be compiled
7990f7b5c44SAlex Sierra  * differently. Furthermore, not all GPUs support changing the mode
8000f7b5c44SAlex Sierra  * per-process. Therefore changing the mode is only allowed while no
8010f7b5c44SAlex Sierra  * user mode queues exist in the process. This ensure that no shader
8020f7b5c44SAlex Sierra  * code is running that may be compiled for the wrong mode. And GPUs
8030f7b5c44SAlex Sierra  * that cannot change to the requested mode will prevent the XNACK
8040f7b5c44SAlex Sierra  * mode from occurring. All GPUs used by the process must be in the
8050f7b5c44SAlex Sierra  * same XNACK mode.
8060f7b5c44SAlex Sierra  *
8070f7b5c44SAlex Sierra  * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM.
8080f7b5c44SAlex Sierra  * Therefore those GPUs are not considered for the XNACK mode switch.
8090f7b5c44SAlex Sierra  *
8100f7b5c44SAlex Sierra  * Return: 0 on success, -errno on failure
8110f7b5c44SAlex Sierra  */
8120f7b5c44SAlex Sierra struct kfd_ioctl_set_xnack_mode_args {
8130f7b5c44SAlex Sierra 	__s32 xnack_enabled;
8140f7b5c44SAlex Sierra };
8150f7b5c44SAlex Sierra 
8164f98cf2bSJonathan Kim /* Wave launch override modes */
8174f98cf2bSJonathan Kim enum kfd_dbg_trap_override_mode {
8184f98cf2bSJonathan Kim 	KFD_DBG_TRAP_OVERRIDE_OR = 0,
8194f98cf2bSJonathan Kim 	KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
8204f98cf2bSJonathan Kim };
8214f98cf2bSJonathan Kim 
8224f98cf2bSJonathan Kim /* Wave launch overrides */
8234f98cf2bSJonathan Kim enum kfd_dbg_trap_mask {
8244f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_FP_INVALID = 1,
8254f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
8264f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
8274f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
8284f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
8294f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
8304f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
8314f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
8324f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
8334f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
8344f98cf2bSJonathan Kim 	KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
8354f98cf2bSJonathan Kim };
8364f98cf2bSJonathan Kim 
8374f98cf2bSJonathan Kim /* Wave launch modes */
8384f98cf2bSJonathan Kim enum kfd_dbg_trap_wave_launch_mode {
8394f98cf2bSJonathan Kim 	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
8404f98cf2bSJonathan Kim 	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
8414f98cf2bSJonathan Kim 	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
8424f98cf2bSJonathan Kim };
8434f98cf2bSJonathan Kim 
8444f98cf2bSJonathan Kim /* Address watch modes */
8454f98cf2bSJonathan Kim enum kfd_dbg_trap_address_watch_mode {
8464f98cf2bSJonathan Kim 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
8474f98cf2bSJonathan Kim 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
8484f98cf2bSJonathan Kim 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
8494f98cf2bSJonathan Kim 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
8504f98cf2bSJonathan Kim };
8514f98cf2bSJonathan Kim 
8524f98cf2bSJonathan Kim /* Additional wave settings */
8534f98cf2bSJonathan Kim enum kfd_dbg_trap_flags {
8544f98cf2bSJonathan Kim 	KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
8554f98cf2bSJonathan Kim };
8564f98cf2bSJonathan Kim 
8574f98cf2bSJonathan Kim /* Trap exceptions */
8584f98cf2bSJonathan Kim enum kfd_dbg_trap_exception_code {
8594f98cf2bSJonathan Kim 	EC_NONE = 0,
8604f98cf2bSJonathan Kim 	/* per queue */
8614f98cf2bSJonathan Kim 	EC_QUEUE_WAVE_ABORT = 1,
8624f98cf2bSJonathan Kim 	EC_QUEUE_WAVE_TRAP = 2,
8634f98cf2bSJonathan Kim 	EC_QUEUE_WAVE_MATH_ERROR = 3,
8644f98cf2bSJonathan Kim 	EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
8654f98cf2bSJonathan Kim 	EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
8664f98cf2bSJonathan Kim 	EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
8674f98cf2bSJonathan Kim 	EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
8684f98cf2bSJonathan Kim 	EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
8694f98cf2bSJonathan Kim 	EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
8704f98cf2bSJonathan Kim 	EC_QUEUE_PACKET_RESERVED = 19,
8714f98cf2bSJonathan Kim 	EC_QUEUE_PACKET_UNSUPPORTED = 20,
8724f98cf2bSJonathan Kim 	EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
8734f98cf2bSJonathan Kim 	EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
8744f98cf2bSJonathan Kim 	EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
8754f98cf2bSJonathan Kim 	EC_QUEUE_PREEMPTION_ERROR = 30,
8764f98cf2bSJonathan Kim 	EC_QUEUE_NEW = 31,
8774f98cf2bSJonathan Kim 	/* per device */
8784f98cf2bSJonathan Kim 	EC_DEVICE_QUEUE_DELETE = 32,
8794f98cf2bSJonathan Kim 	EC_DEVICE_MEMORY_VIOLATION = 33,
8804f98cf2bSJonathan Kim 	EC_DEVICE_RAS_ERROR = 34,
8814f98cf2bSJonathan Kim 	EC_DEVICE_FATAL_HALT = 35,
8824f98cf2bSJonathan Kim 	EC_DEVICE_NEW = 36,
8834f98cf2bSJonathan Kim 	/* per process */
8844f98cf2bSJonathan Kim 	EC_PROCESS_RUNTIME = 48,
8854f98cf2bSJonathan Kim 	EC_PROCESS_DEVICE_REMOVE = 49,
8864f98cf2bSJonathan Kim 	EC_MAX
8874f98cf2bSJonathan Kim };
8884f98cf2bSJonathan Kim 
8894f98cf2bSJonathan Kim /* Mask generated by ecode in kfd_dbg_trap_exception_code */
8904f98cf2bSJonathan Kim #define KFD_EC_MASK(ecode)	(1ULL << (ecode - 1))
8914f98cf2bSJonathan Kim 
8924f98cf2bSJonathan Kim /* Masks for exception code type checks below */
8934f98cf2bSJonathan Kim #define KFD_EC_MASK_QUEUE	(KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) |	\
8944f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) |	\
8954f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) |	\
8964f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) |	\
8974f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) |	\
8984f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) |	\
8994f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) |	\
9004f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) |	\
9014f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) |	\
9024f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) |	\
9034f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) |	\
9044f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) |	\
9054f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) |	\
9064f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED)	|	\
9074f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR)	|	\
9084f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_NEW))
9094f98cf2bSJonathan Kim #define KFD_EC_MASK_DEVICE	(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) |		\
9104f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) |		\
9114f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) |		\
9124f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) |	\
9134f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_DEVICE_NEW))
9144f98cf2bSJonathan Kim #define KFD_EC_MASK_PROCESS	(KFD_EC_MASK(EC_PROCESS_RUNTIME) |	\
9154f98cf2bSJonathan Kim 				 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
916*0cac183bSJonathan Kim #define KFD_EC_MASK_PACKET	(KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) |	\
917*0cac183bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) |	\
918*0cac183bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) |	\
919*0cac183bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) |	\
920*0cac183bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) |	\
921*0cac183bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) |	\
922*0cac183bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) |	\
923*0cac183bSJonathan Kim 				 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
9244f98cf2bSJonathan Kim 
9254f98cf2bSJonathan Kim /* Checks for exception code types for KFD search */
926*0cac183bSJonathan Kim #define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
9274f98cf2bSJonathan Kim #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode)					\
928*0cac183bSJonathan Kim 			(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
9294f98cf2bSJonathan Kim #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode)				\
930*0cac183bSJonathan Kim 			(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
9314f98cf2bSJonathan Kim #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode)				\
932*0cac183bSJonathan Kim 			(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
933*0cac183bSJonathan Kim #define KFD_DBG_EC_TYPE_IS_PACKET(ecode)				\
934*0cac183bSJonathan Kim 			(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
9354f98cf2bSJonathan Kim 
9364f98cf2bSJonathan Kim 
9374f98cf2bSJonathan Kim /* Runtime enable states */
9384f98cf2bSJonathan Kim enum kfd_dbg_runtime_state {
9394f98cf2bSJonathan Kim 	DEBUG_RUNTIME_STATE_DISABLED = 0,
9404f98cf2bSJonathan Kim 	DEBUG_RUNTIME_STATE_ENABLED = 1,
9414f98cf2bSJonathan Kim 	DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
9424f98cf2bSJonathan Kim 	DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
9434f98cf2bSJonathan Kim };
9444f98cf2bSJonathan Kim 
9454f98cf2bSJonathan Kim /* Runtime enable status */
9464f98cf2bSJonathan Kim struct kfd_runtime_info {
9474f98cf2bSJonathan Kim 	__u64 r_debug;
9484f98cf2bSJonathan Kim 	__u32 runtime_state;
9494f98cf2bSJonathan Kim 	__u32 ttmp_setup;
9504f98cf2bSJonathan Kim };
9514f98cf2bSJonathan Kim 
9524f98cf2bSJonathan Kim /* Enable modes for runtime enable */
9534f98cf2bSJonathan Kim #define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK	1
9544f98cf2bSJonathan Kim #define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK	2
9554f98cf2bSJonathan Kim 
9564f98cf2bSJonathan Kim /**
9574f98cf2bSJonathan Kim  * kfd_ioctl_runtime_enable_args - Arguments for runtime enable
9584f98cf2bSJonathan Kim  *
9594f98cf2bSJonathan Kim  * Coordinates debug exception signalling and debug device enablement with runtime.
9604f98cf2bSJonathan Kim  *
9614f98cf2bSJonathan Kim  * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger
9624f98cf2bSJonathan Kim  * @mode_mask - mask to set mode
9634f98cf2bSJonathan Kim  *	KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable
9644f98cf2bSJonathan Kim  *	KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable)
9654f98cf2bSJonathan Kim  * @capabilities_mask - mask to notify runtime on what KFD supports
9664f98cf2bSJonathan Kim  *
9674f98cf2bSJonathan Kim  * Return - 0 on SUCCESS.
9684f98cf2bSJonathan Kim  *	  - EBUSY if runtime enable call already pending.
9694f98cf2bSJonathan Kim  *	  - EEXIST if user queues already active prior to call.
9704f98cf2bSJonathan Kim  *	    If process is debug enabled, runtime enable will enable debug devices and
9714f98cf2bSJonathan Kim  *	    wait for debugger process to send runtime exception EC_PROCESS_RUNTIME
9724f98cf2bSJonathan Kim  *	    to unblock - see kfd_ioctl_dbg_trap_args.
9734f98cf2bSJonathan Kim  *
9744f98cf2bSJonathan Kim  */
9754f98cf2bSJonathan Kim struct kfd_ioctl_runtime_enable_args {
9764f98cf2bSJonathan Kim 	__u64 r_debug;
9774f98cf2bSJonathan Kim 	__u32 mode_mask;
9784f98cf2bSJonathan Kim 	__u32 capabilities_mask;
9794f98cf2bSJonathan Kim };
9804f98cf2bSJonathan Kim 
9814f98cf2bSJonathan Kim /* Queue information */
9824f98cf2bSJonathan Kim struct kfd_queue_snapshot_entry {
9834f98cf2bSJonathan Kim 	__u64 exception_status;
9844f98cf2bSJonathan Kim 	__u64 ring_base_address;
9854f98cf2bSJonathan Kim 	__u64 write_pointer_address;
9864f98cf2bSJonathan Kim 	__u64 read_pointer_address;
9874f98cf2bSJonathan Kim 	__u64 ctx_save_restore_address;
9884f98cf2bSJonathan Kim 	__u32 queue_id;
9894f98cf2bSJonathan Kim 	__u32 gpu_id;
9904f98cf2bSJonathan Kim 	__u32 ring_size;
9914f98cf2bSJonathan Kim 	__u32 queue_type;
9924f98cf2bSJonathan Kim 	__u32 ctx_save_restore_area_size;
9934f98cf2bSJonathan Kim 	__u32 reserved;
9944f98cf2bSJonathan Kim };
9954f98cf2bSJonathan Kim 
9964f98cf2bSJonathan Kim /* Queue status return for suspend/resume */
9974f98cf2bSJonathan Kim #define KFD_DBG_QUEUE_ERROR_BIT		30
9984f98cf2bSJonathan Kim #define KFD_DBG_QUEUE_INVALID_BIT	31
9994f98cf2bSJonathan Kim #define KFD_DBG_QUEUE_ERROR_MASK	(1 << KFD_DBG_QUEUE_ERROR_BIT)
10004f98cf2bSJonathan Kim #define KFD_DBG_QUEUE_INVALID_MASK	(1 << KFD_DBG_QUEUE_INVALID_BIT)
10014f98cf2bSJonathan Kim 
10024f98cf2bSJonathan Kim /* Context save area header information */
10034f98cf2bSJonathan Kim struct kfd_context_save_area_header {
10044f98cf2bSJonathan Kim 	struct {
10054f98cf2bSJonathan Kim 		__u32 control_stack_offset;
10064f98cf2bSJonathan Kim 		__u32 control_stack_size;
10074f98cf2bSJonathan Kim 		__u32 wave_state_offset;
10084f98cf2bSJonathan Kim 		__u32 wave_state_size;
10094f98cf2bSJonathan Kim 	} wave_state;
10104f98cf2bSJonathan Kim 	__u32 debug_offset;
10114f98cf2bSJonathan Kim 	__u32 debug_size;
10124f98cf2bSJonathan Kim 	__u64 err_payload_addr;
10134f98cf2bSJonathan Kim 	__u32 err_event_id;
10144f98cf2bSJonathan Kim 	__u32 reserved1;
10154f98cf2bSJonathan Kim };
10164f98cf2bSJonathan Kim 
10174f98cf2bSJonathan Kim /*
10184f98cf2bSJonathan Kim  * Debug operations
10194f98cf2bSJonathan Kim  *
10204f98cf2bSJonathan Kim  * For specifics on usage and return values, see documentation per operation
10214f98cf2bSJonathan Kim  * below.  Otherwise, generic error returns apply:
10224f98cf2bSJonathan Kim  *	- ESRCH if the process to debug does not exist.
10234f98cf2bSJonathan Kim  *
10244f98cf2bSJonathan Kim  *	- EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation
10254f98cf2bSJonathan Kim  *		 KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior.
10264f98cf2bSJonathan Kim  *		 Also returns this error if GPU hardware scheduling is not supported.
10274f98cf2bSJonathan Kim  *
10284f98cf2bSJonathan Kim  *	- EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not
10294f98cf2bSJonathan Kim  *		 PTRACE_ATTACHED.  KFD_IOC_DBG_TRAP_DISABLE is exempt to allow
10304f98cf2bSJonathan Kim  *		 clean up of debug mode as long as process is debug enabled.
10314f98cf2bSJonathan Kim  *
10324f98cf2bSJonathan Kim  *	- EACCES if any DBG_HW_OP (debug hardware operation) is requested when
10334f98cf2bSJonathan Kim  *		 AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior.
10344f98cf2bSJonathan Kim  *
10354f98cf2bSJonathan Kim  *	- ENODEV if any GPU does not support debugging on a DBG_HW_OP call.
10364f98cf2bSJonathan Kim  *
10374f98cf2bSJonathan Kim  *	- Other errors may be returned when a DBG_HW_OP occurs while the GPU
10384f98cf2bSJonathan Kim  *	  is in a fatal state.
10394f98cf2bSJonathan Kim  *
10404f98cf2bSJonathan Kim  */
10414f98cf2bSJonathan Kim enum kfd_dbg_trap_operations {
10424f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_ENABLE = 0,
10434f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_DISABLE = 1,
10444f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
10454f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
10464f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4,  /* DBG_HW_OP */
10474f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5,      /* DBG_HW_OP */
10484f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6,		/* DBG_HW_OP */
10494f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7,		/* DBG_HW_OP */
10504f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8,	/* DBG_HW_OP */
10514f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9,	/* DBG_HW_OP */
10524f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
10534f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
10544f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
10554f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
10564f98cf2bSJonathan Kim 	KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
10574f98cf2bSJonathan Kim };
10584f98cf2bSJonathan Kim 
10594f98cf2bSJonathan Kim /**
10604f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_enable_args
10614f98cf2bSJonathan Kim  *
10624f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_ENABLE.
10634f98cf2bSJonathan Kim  *
10644f98cf2bSJonathan Kim  *     Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in
10654f98cf2bSJonathan Kim  *     kfd_ioctl_dbg_trap_args to disable debug session.
10664f98cf2bSJonathan Kim  *
10674f98cf2bSJonathan Kim  *     @exception_mask (IN)	- exceptions to raise to the debugger
10684f98cf2bSJonathan Kim  *     @rinfo_ptr      (IN)	- pointer to runtime info buffer (see kfd_runtime_info)
10694f98cf2bSJonathan Kim  *     @rinfo_size     (IN/OUT)	- size of runtime info buffer in bytes
10704f98cf2bSJonathan Kim  *     @dbg_fd	       (IN)	- fd the KFD will nofify the debugger with of raised
10714f98cf2bSJonathan Kim  *				  exceptions set in exception_mask.
10724f98cf2bSJonathan Kim  *
10734f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
10744f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
10754f98cf2bSJonathan Kim  *		Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable.
10764f98cf2bSJonathan Kim  *		Size of kfd_runtime saved by the KFD returned to @rinfo_size.
10774f98cf2bSJonathan Kim  *            - EBADF if KFD cannot get a reference to dbg_fd.
10784f98cf2bSJonathan Kim  *            - EFAULT if KFD cannot copy runtime info to rinfo_ptr.
10794f98cf2bSJonathan Kim  *            - EINVAL if target process is already debug enabled.
10804f98cf2bSJonathan Kim  *
10814f98cf2bSJonathan Kim  */
10824f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_enable_args {
10834f98cf2bSJonathan Kim 	__u64 exception_mask;
10844f98cf2bSJonathan Kim 	__u64 rinfo_ptr;
10854f98cf2bSJonathan Kim 	__u32 rinfo_size;
10864f98cf2bSJonathan Kim 	__u32 dbg_fd;
10874f98cf2bSJonathan Kim };
10884f98cf2bSJonathan Kim 
10894f98cf2bSJonathan Kim /**
10904f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_send_runtime_event_args
10914f98cf2bSJonathan Kim  *
10924f98cf2bSJonathan Kim  *
10934f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT.
10944f98cf2bSJonathan Kim  *     Raises exceptions to runtime.
10954f98cf2bSJonathan Kim  *
10964f98cf2bSJonathan Kim  *     @exception_mask (IN) - exceptions to raise to runtime
10974f98cf2bSJonathan Kim  *     @gpu_id	       (IN) - target device id
10984f98cf2bSJonathan Kim  *     @queue_id       (IN) - target queue id
10994f98cf2bSJonathan Kim  *
11004f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
11014f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
11024f98cf2bSJonathan Kim  *	      - ENODEV if gpu_id not found.
11034f98cf2bSJonathan Kim  *		If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending
11044f98cf2bSJonathan Kim  *		AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args.
11054f98cf2bSJonathan Kim  *		All other exceptions are raised to runtime through err_payload_addr.
11064f98cf2bSJonathan Kim  *		See kfd_context_save_area_header.
11074f98cf2bSJonathan Kim  */
11084f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_send_runtime_event_args {
11094f98cf2bSJonathan Kim 	__u64 exception_mask;
11104f98cf2bSJonathan Kim 	__u32 gpu_id;
11114f98cf2bSJonathan Kim 	__u32 queue_id;
11124f98cf2bSJonathan Kim };
11134f98cf2bSJonathan Kim 
11144f98cf2bSJonathan Kim /**
11154f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_set_exceptions_enabled_args
11164f98cf2bSJonathan Kim  *
11174f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED
11184f98cf2bSJonathan Kim  *     Set new exceptions to be raised to the debugger.
11194f98cf2bSJonathan Kim  *
11204f98cf2bSJonathan Kim  *     @exception_mask (IN) - new exceptions to raise the debugger
11214f98cf2bSJonathan Kim  *
11224f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
11234f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
11244f98cf2bSJonathan Kim  */
11254f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
11264f98cf2bSJonathan Kim 	__u64 exception_mask;
11274f98cf2bSJonathan Kim };
11284f98cf2bSJonathan Kim 
11294f98cf2bSJonathan Kim /**
11304f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_set_wave_launch_override_args
11314f98cf2bSJonathan Kim  *
11324f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE
11334f98cf2bSJonathan Kim  *     Enable HW exceptions to raise trap.
11344f98cf2bSJonathan Kim  *
11354f98cf2bSJonathan Kim  *     @override_mode	     (IN)     - see kfd_dbg_trap_override_mode
11364f98cf2bSJonathan Kim  *     @enable_mask	     (IN/OUT) - reference kfd_dbg_trap_mask.
11374f98cf2bSJonathan Kim  *					IN is the override modes requested to be enabled.
11384f98cf2bSJonathan Kim  *					OUT is referenced in Return below.
11394f98cf2bSJonathan Kim  *     @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask.
11404f98cf2bSJonathan Kim  *					IN is the override modes requested for support check.
11414f98cf2bSJonathan Kim  *					OUT is referenced in Return below.
11424f98cf2bSJonathan Kim  *
11434f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
11444f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
11454f98cf2bSJonathan Kim  *		Previous enablement is returned in @enable_mask.
11464f98cf2bSJonathan Kim  *		Actual override support is returned in @support_request_mask.
11474f98cf2bSJonathan Kim  *	      - EINVAL if override mode is not supported.
11484f98cf2bSJonathan Kim  *	      - EACCES if trap support requested is not actually supported.
11494f98cf2bSJonathan Kim  *		i.e. enable_mask (IN) is not a subset of support_request_mask (OUT).
11504f98cf2bSJonathan Kim  *		Otherwise it is considered a generic error (see kfd_dbg_trap_operations).
11514f98cf2bSJonathan Kim  */
11524f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
11534f98cf2bSJonathan Kim 	__u32 override_mode;
11544f98cf2bSJonathan Kim 	__u32 enable_mask;
11554f98cf2bSJonathan Kim 	__u32 support_request_mask;
11564f98cf2bSJonathan Kim 	__u32 pad;
11574f98cf2bSJonathan Kim };
11584f98cf2bSJonathan Kim 
11594f98cf2bSJonathan Kim /**
11604f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_set_wave_launch_mode_args
11614f98cf2bSJonathan Kim  *
11624f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE
11634f98cf2bSJonathan Kim  *     Set wave launch mode.
11644f98cf2bSJonathan Kim  *
11654f98cf2bSJonathan Kim  *     @mode (IN) - see kfd_dbg_trap_wave_launch_mode
11664f98cf2bSJonathan Kim  *
11674f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
11684f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
11694f98cf2bSJonathan Kim  */
11704f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
11714f98cf2bSJonathan Kim 	__u32 launch_mode;
11724f98cf2bSJonathan Kim 	__u32 pad;
11734f98cf2bSJonathan Kim };
11744f98cf2bSJonathan Kim 
11754f98cf2bSJonathan Kim /**
11764f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_suspend_queues_ags
11774f98cf2bSJonathan Kim  *
11784f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES
11794f98cf2bSJonathan Kim  *     Suspend queues.
11804f98cf2bSJonathan Kim  *
11814f98cf2bSJonathan Kim  *     @exception_mask	(IN) - raised exceptions to clear
11824f98cf2bSJonathan Kim  *     @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
11834f98cf2bSJonathan Kim  *			       to suspend
11844f98cf2bSJonathan Kim  *     @num_queues	(IN) - number of queues to suspend in @queue_array_ptr
11854f98cf2bSJonathan Kim  *     @grace_period	(IN) - wave time allowance before preemption
11864f98cf2bSJonathan Kim  *			       per 1K GPU clock cycle unit
11874f98cf2bSJonathan Kim  *
11884f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
11894f98cf2bSJonathan Kim  *     Destruction of a suspended queue is blocked until the queue is
11904f98cf2bSJonathan Kim  *     resumed.  This allows the debugger to access queue information and
11914f98cf2bSJonathan Kim  *     the its context save area without running into a race condition on
11924f98cf2bSJonathan Kim  *     queue destruction.
11934f98cf2bSJonathan Kim  *     Automatically copies per queue context save area header information
11944f98cf2bSJonathan Kim  *     into the save area base
11954f98cf2bSJonathan Kim  *     (see kfd_queue_snapshot_entry and kfd_context_save_area_header).
11964f98cf2bSJonathan Kim  *
11974f98cf2bSJonathan Kim  *     Return - Number of queues suspended on SUCCESS.
11984f98cf2bSJonathan Kim  *	.	KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked
11994f98cf2bSJonathan Kim  *		for each queue id in @queue_array_ptr array reports unsuccessful
12004f98cf2bSJonathan Kim  *		suspend reason.
12014f98cf2bSJonathan Kim  *		KFD_DBG_QUEUE_ERROR_MASK = HW failure.
12024f98cf2bSJonathan Kim  *		KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or
12034f98cf2bSJonathan Kim  *		is being destroyed.
12044f98cf2bSJonathan Kim  */
12054f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_suspend_queues_args {
12064f98cf2bSJonathan Kim 	__u64 exception_mask;
12074f98cf2bSJonathan Kim 	__u64 queue_array_ptr;
12084f98cf2bSJonathan Kim 	__u32 num_queues;
12094f98cf2bSJonathan Kim 	__u32 grace_period;
12104f98cf2bSJonathan Kim };
12114f98cf2bSJonathan Kim 
12124f98cf2bSJonathan Kim /**
12134f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_resume_queues_args
12144f98cf2bSJonathan Kim  *
12154f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES
12164f98cf2bSJonathan Kim  *     Resume queues.
12174f98cf2bSJonathan Kim  *
12184f98cf2bSJonathan Kim  *     @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
12194f98cf2bSJonathan Kim  *			       to resume
12204f98cf2bSJonathan Kim  *     @num_queues	(IN) - number of queues to resume in @queue_array_ptr
12214f98cf2bSJonathan Kim  *
12224f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
12234f98cf2bSJonathan Kim  *     Return - Number of queues resumed on SUCCESS.
12244f98cf2bSJonathan Kim  *		KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask
12254f98cf2bSJonathan Kim  *		for each queue id in @queue_array_ptr array reports unsuccessful
12264f98cf2bSJonathan Kim  *		resume reason.
12274f98cf2bSJonathan Kim  *		KFD_DBG_QUEUE_ERROR_MASK = HW failure.
12284f98cf2bSJonathan Kim  *		KFD_DBG_QUEUE_INVALID_MASK = queue does not exist.
12294f98cf2bSJonathan Kim  */
12304f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_resume_queues_args {
12314f98cf2bSJonathan Kim 	__u64 queue_array_ptr;
12324f98cf2bSJonathan Kim 	__u32 num_queues;
12334f98cf2bSJonathan Kim 	__u32 pad;
12344f98cf2bSJonathan Kim };
12354f98cf2bSJonathan Kim 
12364f98cf2bSJonathan Kim /**
12374f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_set_node_address_watch_args
12384f98cf2bSJonathan Kim  *
12394f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH
12404f98cf2bSJonathan Kim  *     Sets address watch for device.
12414f98cf2bSJonathan Kim  *
12424f98cf2bSJonathan Kim  *     @address	(IN)  - watch address to set
12434f98cf2bSJonathan Kim  *     @mode    (IN)  - see kfd_dbg_trap_address_watch_mode
12444f98cf2bSJonathan Kim  *     @mask    (IN)  - watch address mask
12454f98cf2bSJonathan Kim  *     @gpu_id  (IN)  - target gpu to set watch point
12464f98cf2bSJonathan Kim  *     @id      (OUT) - watch id allocated
12474f98cf2bSJonathan Kim  *
12484f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
12494f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
12504f98cf2bSJonathan Kim  *		Allocated watch ID returned to @id.
12514f98cf2bSJonathan Kim  *	      - ENODEV if gpu_id not found.
12524f98cf2bSJonathan Kim  *	      - ENOMEM if watch IDs can be allocated
12534f98cf2bSJonathan Kim  */
12544f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
12554f98cf2bSJonathan Kim 	__u64 address;
12564f98cf2bSJonathan Kim 	__u32 mode;
12574f98cf2bSJonathan Kim 	__u32 mask;
12584f98cf2bSJonathan Kim 	__u32 gpu_id;
12594f98cf2bSJonathan Kim 	__u32 id;
12604f98cf2bSJonathan Kim };
12614f98cf2bSJonathan Kim 
12624f98cf2bSJonathan Kim /**
12634f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_clear_node_address_watch_args
12644f98cf2bSJonathan Kim  *
12654f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH
12664f98cf2bSJonathan Kim  *     Clear address watch for device.
12674f98cf2bSJonathan Kim  *
12684f98cf2bSJonathan Kim  *     @gpu_id  (IN)  - target device to clear watch point
12694f98cf2bSJonathan Kim  *     @id      (IN) - allocated watch id to clear
12704f98cf2bSJonathan Kim  *
12714f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
12724f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
12734f98cf2bSJonathan Kim  *	      - ENODEV if gpu_id not found.
12744f98cf2bSJonathan Kim  *	      - EINVAL if watch ID has not been allocated.
12754f98cf2bSJonathan Kim  */
12764f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
12774f98cf2bSJonathan Kim 	__u32 gpu_id;
12784f98cf2bSJonathan Kim 	__u32 id;
12794f98cf2bSJonathan Kim };
12804f98cf2bSJonathan Kim 
12814f98cf2bSJonathan Kim /**
12824f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_set_flags_args
12834f98cf2bSJonathan Kim  *
12844f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS
12854f98cf2bSJonathan Kim  *     Sets flags for wave behaviour.
12864f98cf2bSJonathan Kim  *
12874f98cf2bSJonathan Kim  *     @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled
12884f98cf2bSJonathan Kim  *
12894f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
12904f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
12914f98cf2bSJonathan Kim  *	      - EACCESS if any debug device does not allow flag options.
12924f98cf2bSJonathan Kim  */
12934f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_set_flags_args {
12944f98cf2bSJonathan Kim 	__u32 flags;
12954f98cf2bSJonathan Kim 	__u32 pad;
12964f98cf2bSJonathan Kim };
12974f98cf2bSJonathan Kim 
12984f98cf2bSJonathan Kim /**
12994f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_query_debug_event_args
13004f98cf2bSJonathan Kim  *
13014f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT
13024f98cf2bSJonathan Kim  *
13034f98cf2bSJonathan Kim  *     Find one or more raised exceptions. This function can return multiple
13044f98cf2bSJonathan Kim  *     exceptions from a single queue or a single device with one call. To find
13054f98cf2bSJonathan Kim  *     all raised exceptions, this function must be called repeatedly until it
13064f98cf2bSJonathan Kim  *     returns -EAGAIN. Returned exceptions can optionally be cleared by
13074f98cf2bSJonathan Kim  *     setting the corresponding bit in the @exception_mask input parameter.
13084f98cf2bSJonathan Kim  *     However, clearing an exception prevents retrieving further information
13094f98cf2bSJonathan Kim  *     about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO.
13104f98cf2bSJonathan Kim  *
13114f98cf2bSJonathan Kim  *     @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT)
13124f98cf2bSJonathan Kim  *     @gpu_id	       (OUT)    - gpu id of exceptions raised
13134f98cf2bSJonathan Kim  *     @queue_id       (OUT)    - queue id of exceptions raised
13144f98cf2bSJonathan Kim  *
13154f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
13164f98cf2bSJonathan Kim  *     Return - 0 on raised exception found
13174f98cf2bSJonathan Kim  *              Raised exceptions found are returned in @exception mask
13184f98cf2bSJonathan Kim  *              with reported source id returned in @gpu_id or @queue_id.
13194f98cf2bSJonathan Kim  *            - EAGAIN if no raised exception has been found
13204f98cf2bSJonathan Kim  */
13214f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_query_debug_event_args {
13224f98cf2bSJonathan Kim 	__u64 exception_mask;
13234f98cf2bSJonathan Kim 	__u32 gpu_id;
13244f98cf2bSJonathan Kim 	__u32 queue_id;
13254f98cf2bSJonathan Kim };
13264f98cf2bSJonathan Kim 
13274f98cf2bSJonathan Kim /**
13284f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_query_exception_info_args
13294f98cf2bSJonathan Kim  *
13304f98cf2bSJonathan Kim  *     Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO
13314f98cf2bSJonathan Kim  *     Get additional info on raised exception.
13324f98cf2bSJonathan Kim  *
13334f98cf2bSJonathan Kim  *     @info_ptr	(IN)	 - pointer to exception info buffer to copy to
13344f98cf2bSJonathan Kim  *     @info_size	(IN/OUT) - exception info buffer size (bytes)
13354f98cf2bSJonathan Kim  *     @source_id	(IN)     - target gpu or queue id
13364f98cf2bSJonathan Kim  *     @exception_code	(IN)     - target exception
13374f98cf2bSJonathan Kim  *     @clear_exception	(IN)     - clear raised @exception_code exception
13384f98cf2bSJonathan Kim  *				   (0 = false, 1 = true)
13394f98cf2bSJonathan Kim  *
13404f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
13414f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
13424f98cf2bSJonathan Kim  *              If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT)
13434f98cf2bSJonathan Kim  *		bytes of memory exception data to @info_ptr.
13444f98cf2bSJonathan Kim  *              If @exception_code is EC_PROCESS_RUNTIME, copy saved
13454f98cf2bSJonathan Kim  *              kfd_runtime_info to @info_ptr.
13464f98cf2bSJonathan Kim  *              Actual required @info_ptr size (bytes) is returned in @info_size.
13474f98cf2bSJonathan Kim  */
13484f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_query_exception_info_args {
13494f98cf2bSJonathan Kim 	__u64 info_ptr;
13504f98cf2bSJonathan Kim 	__u32 info_size;
13514f98cf2bSJonathan Kim 	__u32 source_id;
13524f98cf2bSJonathan Kim 	__u32 exception_code;
13534f98cf2bSJonathan Kim 	__u32 clear_exception;
13544f98cf2bSJonathan Kim };
13554f98cf2bSJonathan Kim 
13564f98cf2bSJonathan Kim /**
13574f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_get_queue_snapshot_args
13584f98cf2bSJonathan Kim  *
13594f98cf2bSJonathan Kim  *     Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT
13604f98cf2bSJonathan Kim  *     Get queue information.
13614f98cf2bSJonathan Kim  *
13624f98cf2bSJonathan Kim  *     @exception_mask	 (IN)	  - exceptions raised to clear
13634f98cf2bSJonathan Kim  *     @snapshot_buf_ptr (IN)	  - queue snapshot entry buffer (see kfd_queue_snapshot_entry)
13644f98cf2bSJonathan Kim  *     @num_queues	 (IN/OUT) - number of queue snapshot entries
13654f98cf2bSJonathan Kim  *         The debugger specifies the size of the array allocated in @num_queues.
13664f98cf2bSJonathan Kim  *         KFD returns the number of queues that actually existed. If this is
13674f98cf2bSJonathan Kim  *         larger than the size specified by the debugger, KFD will not overflow
13684f98cf2bSJonathan Kim  *         the array allocated by the debugger.
13694f98cf2bSJonathan Kim  *
13704f98cf2bSJonathan Kim  *     @entry_size	 (IN/OUT) - size per entry in bytes
13714f98cf2bSJonathan Kim  *         The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in
13724f98cf2bSJonathan Kim  *         @entry_size. KFD returns the number of bytes actually populated per
13734f98cf2bSJonathan Kim  *         entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine,
13744f98cf2bSJonathan Kim  *         which fields in struct kfd_queue_snapshot_entry are valid. This allows
13754f98cf2bSJonathan Kim  *         growing the ABI in a backwards compatible manner.
13764f98cf2bSJonathan Kim  *         Note that entry_size(IN) should still be used to stride the snapshot buffer in the
13774f98cf2bSJonathan Kim  *         event that it's larger than actual kfd_queue_snapshot_entry.
13784f98cf2bSJonathan Kim  *
13794f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
13804f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
13814f98cf2bSJonathan Kim  *              Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN)
13824f98cf2bSJonathan Kim  *              into @snapshot_buf_ptr if @num_queues(IN) > 0.
13834f98cf2bSJonathan Kim  *              Otherwise return @num_queues(OUT) queue snapshot entries that exist.
13844f98cf2bSJonathan Kim  */
13854f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_queue_snapshot_args {
13864f98cf2bSJonathan Kim 	__u64 exception_mask;
13874f98cf2bSJonathan Kim 	__u64 snapshot_buf_ptr;
13884f98cf2bSJonathan Kim 	__u32 num_queues;
13894f98cf2bSJonathan Kim 	__u32 entry_size;
13904f98cf2bSJonathan Kim };
13914f98cf2bSJonathan Kim 
13924f98cf2bSJonathan Kim /**
13934f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_get_device_snapshot_args
13944f98cf2bSJonathan Kim  *
13954f98cf2bSJonathan Kim  *     Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT
13964f98cf2bSJonathan Kim  *     Get device information.
13974f98cf2bSJonathan Kim  *
13984f98cf2bSJonathan Kim  *     @exception_mask	 (IN)	  - exceptions raised to clear
13994f98cf2bSJonathan Kim  *     @snapshot_buf_ptr (IN)	  - pointer to snapshot buffer (see kfd_dbg_device_info_entry)
14004f98cf2bSJonathan Kim  *     @num_devices	 (IN/OUT) - number of debug devices to snapshot
14014f98cf2bSJonathan Kim  *         The debugger specifies the size of the array allocated in @num_devices.
14024f98cf2bSJonathan Kim  *         KFD returns the number of devices that actually existed. If this is
14034f98cf2bSJonathan Kim  *         larger than the size specified by the debugger, KFD will not overflow
14044f98cf2bSJonathan Kim  *         the array allocated by the debugger.
14054f98cf2bSJonathan Kim  *
14064f98cf2bSJonathan Kim  *     @entry_size	 (IN/OUT) - size per entry in bytes
14074f98cf2bSJonathan Kim  *         The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in
14084f98cf2bSJonathan Kim  *         @entry_size. KFD returns the number of bytes actually populated. The
14094f98cf2bSJonathan Kim  *         debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields
14104f98cf2bSJonathan Kim  *         in struct kfd_dbg_device_info_entry are valid. This allows growing the
14114f98cf2bSJonathan Kim  *         ABI in a backwards compatible manner.
14124f98cf2bSJonathan Kim  *         Note that entry_size(IN) should still be used to stride the snapshot buffer in the
14134f98cf2bSJonathan Kim  *         event that it's larger than actual kfd_dbg_device_info_entry.
14144f98cf2bSJonathan Kim  *
14154f98cf2bSJonathan Kim  *     Generic errors apply (see kfd_dbg_trap_operations).
14164f98cf2bSJonathan Kim  *     Return - 0 on SUCCESS.
14174f98cf2bSJonathan Kim  *              Copies @num_devices(IN) device snapshot entries of size @entry_size(IN)
14184f98cf2bSJonathan Kim  *              into @snapshot_buf_ptr if @num_devices(IN) > 0.
14194f98cf2bSJonathan Kim  *              Otherwise return @num_devices(OUT) queue snapshot entries that exist.
14204f98cf2bSJonathan Kim  */
14214f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_device_snapshot_args {
14224f98cf2bSJonathan Kim 	__u64 exception_mask;
14234f98cf2bSJonathan Kim 	__u64 snapshot_buf_ptr;
14244f98cf2bSJonathan Kim 	__u32 num_devices;
14254f98cf2bSJonathan Kim 	__u32 entry_size;
14264f98cf2bSJonathan Kim };
14274f98cf2bSJonathan Kim 
14284f98cf2bSJonathan Kim /**
14294f98cf2bSJonathan Kim  * kfd_ioctl_dbg_trap_args
14304f98cf2bSJonathan Kim  *
14314f98cf2bSJonathan Kim  * Arguments to debug target process.
14324f98cf2bSJonathan Kim  *
14334f98cf2bSJonathan Kim  *     @pid - target process to debug
14344f98cf2bSJonathan Kim  *     @op  - debug operation (see kfd_dbg_trap_operations)
14354f98cf2bSJonathan Kim  *
14364f98cf2bSJonathan Kim  *     @op determines which union struct args to use.
14374f98cf2bSJonathan Kim  *     Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct.
14384f98cf2bSJonathan Kim  */
14394f98cf2bSJonathan Kim struct kfd_ioctl_dbg_trap_args {
14404f98cf2bSJonathan Kim 	__u32 pid;
14414f98cf2bSJonathan Kim 	__u32 op;
14424f98cf2bSJonathan Kim 
14434f98cf2bSJonathan Kim 	union {
14444f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_enable_args enable;
14454f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
14464f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
14474f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
14484f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
14494f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
14504f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
14514f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
14524f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
14534f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
14544f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
14554f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
14564f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
14574f98cf2bSJonathan Kim 		struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
14584f98cf2bSJonathan Kim 	};
14594f98cf2bSJonathan Kim };
14604f98cf2bSJonathan Kim 
1461b81c55dbSOded Gabbay #define AMDKFD_IOCTL_BASE 'K'
1462b81c55dbSOded Gabbay #define AMDKFD_IO(nr)			_IO(AMDKFD_IOCTL_BASE, nr)
1463b81c55dbSOded Gabbay #define AMDKFD_IOR(nr, type)		_IOR(AMDKFD_IOCTL_BASE, nr, type)
1464b81c55dbSOded Gabbay #define AMDKFD_IOW(nr, type)		_IOW(AMDKFD_IOCTL_BASE, nr, type)
1465b81c55dbSOded Gabbay #define AMDKFD_IOWR(nr, type)		_IOWR(AMDKFD_IOCTL_BASE, nr, type)
1466b7facbaeSOded Gabbay 
1467b81c55dbSOded Gabbay #define AMDKFD_IOC_GET_VERSION			\
1468b81c55dbSOded Gabbay 		AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
1469b7facbaeSOded Gabbay 
1470b81c55dbSOded Gabbay #define AMDKFD_IOC_CREATE_QUEUE			\
1471b81c55dbSOded Gabbay 		AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
1472b7facbaeSOded Gabbay 
1473b81c55dbSOded Gabbay #define AMDKFD_IOC_DESTROY_QUEUE		\
1474b81c55dbSOded Gabbay 		AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
1475b7facbaeSOded Gabbay 
1476b81c55dbSOded Gabbay #define AMDKFD_IOC_SET_MEMORY_POLICY		\
1477b81c55dbSOded Gabbay 		AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
1478b7facbaeSOded Gabbay 
1479b81c55dbSOded Gabbay #define AMDKFD_IOC_GET_CLOCK_COUNTERS		\
1480b81c55dbSOded Gabbay 		AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
1481b7facbaeSOded Gabbay 
1482b81c55dbSOded Gabbay #define AMDKFD_IOC_GET_PROCESS_APERTURES	\
1483b81c55dbSOded Gabbay 		AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
1484b7facbaeSOded Gabbay 
1485b81c55dbSOded Gabbay #define AMDKFD_IOC_UPDATE_QUEUE			\
1486b81c55dbSOded Gabbay 		AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
1487b81c55dbSOded Gabbay 
148829a5d3ebSAndrew Lewycky #define AMDKFD_IOC_CREATE_EVENT			\
148929a5d3ebSAndrew Lewycky 		AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
149029a5d3ebSAndrew Lewycky 
149129a5d3ebSAndrew Lewycky #define AMDKFD_IOC_DESTROY_EVENT		\
149229a5d3ebSAndrew Lewycky 		AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
149329a5d3ebSAndrew Lewycky 
149429a5d3ebSAndrew Lewycky #define AMDKFD_IOC_SET_EVENT			\
149529a5d3ebSAndrew Lewycky 		AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
149629a5d3ebSAndrew Lewycky 
149729a5d3ebSAndrew Lewycky #define AMDKFD_IOC_RESET_EVENT			\
149829a5d3ebSAndrew Lewycky 		AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
149929a5d3ebSAndrew Lewycky 
150029a5d3ebSAndrew Lewycky #define AMDKFD_IOC_WAIT_EVENTS			\
150129a5d3ebSAndrew Lewycky 		AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
150229a5d3ebSAndrew Lewycky 
15035bdd3eb2SMukul Joshi #define AMDKFD_IOC_DBG_REGISTER_DEPRECATED	\
1504aef11009SYair Shachar 		AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
1505aef11009SYair Shachar 
15065bdd3eb2SMukul Joshi #define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED	\
1507aef11009SYair Shachar 		AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
1508aef11009SYair Shachar 
15095bdd3eb2SMukul Joshi #define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED	\
1510aef11009SYair Shachar 		AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
1511aef11009SYair Shachar 
15125bdd3eb2SMukul Joshi #define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED	\
1513aef11009SYair Shachar 		AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
1514aef11009SYair Shachar 
15156a1c9510SMoses Reuben #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA	\
15166a1c9510SMoses Reuben 		AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
15176a1c9510SMoses Reuben 
15185d71dbc3SYong Zhao #define AMDKFD_IOC_GET_TILE_CONFIG                                      \
15195d71dbc3SYong Zhao 		AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
15205d71dbc3SYong Zhao 
1521d7b9bd22SFelix Kuehling #define AMDKFD_IOC_SET_TRAP_HANDLER		\
1522d7b9bd22SFelix Kuehling 		AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
1523d7b9bd22SFelix Kuehling 
1524c7bcbfa4SFelix Kuehling #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW	\
1525c7bcbfa4SFelix Kuehling 		AMDKFD_IOWR(0x14,		\
1526c7bcbfa4SFelix Kuehling 			struct kfd_ioctl_get_process_apertures_new_args)
1527c7bcbfa4SFelix Kuehling 
15285ec7e028SFelix Kuehling #define AMDKFD_IOC_ACQUIRE_VM			\
15295ec7e028SFelix Kuehling 		AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
15305ec7e028SFelix Kuehling 
15315ec7e028SFelix Kuehling #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU		\
15325ec7e028SFelix Kuehling 		AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
15335ec7e028SFelix Kuehling 
15345ec7e028SFelix Kuehling #define AMDKFD_IOC_FREE_MEMORY_OF_GPU		\
15355ec7e028SFelix Kuehling 		AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
15365ec7e028SFelix Kuehling 
15375ec7e028SFelix Kuehling #define AMDKFD_IOC_MAP_MEMORY_TO_GPU		\
15385ec7e028SFelix Kuehling 		AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
15395ec7e028SFelix Kuehling 
15405ec7e028SFelix Kuehling #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU	\
15415ec7e028SFelix Kuehling 		AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
15425ec7e028SFelix Kuehling 
1543a7fe68a1SFelix Kuehling #define AMDKFD_IOC_SET_CU_MASK		\
1544a7fe68a1SFelix Kuehling 		AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
1545a7fe68a1SFelix Kuehling 
15465df099e8SJay Cornwall #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE		\
15475df099e8SJay Cornwall 		AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
15485df099e8SJay Cornwall 
15491dde0ea9SFelix Kuehling #define AMDKFD_IOC_GET_DMABUF_INFO		\
15501dde0ea9SFelix Kuehling 		AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
15511dde0ea9SFelix Kuehling 
15521dde0ea9SFelix Kuehling #define AMDKFD_IOC_IMPORT_DMABUF		\
15531dde0ea9SFelix Kuehling 		AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
15541dde0ea9SFelix Kuehling 
15555bb4b78bSOak Zeng #define AMDKFD_IOC_ALLOC_QUEUE_GWS		\
15565bb4b78bSOak Zeng 		AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
15575bb4b78bSOak Zeng 
1558938a0650SAmber Lin #define AMDKFD_IOC_SMI_EVENTS			\
1559938a0650SAmber Lin 		AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
1560938a0650SAmber Lin 
156140ce74d1SPhilip Yang #define AMDKFD_IOC_SVM	AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
156240ce74d1SPhilip Yang 
15630f7b5c44SAlex Sierra #define AMDKFD_IOC_SET_XNACK_MODE		\
15640f7b5c44SAlex Sierra 		AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
15650f7b5c44SAlex Sierra 
156636988070SRajneesh Bhardwaj #define AMDKFD_IOC_CRIU_OP			\
156736988070SRajneesh Bhardwaj 		AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
156836988070SRajneesh Bhardwaj 
15699731dd4cSDaniel Phillips #define AMDKFD_IOC_AVAILABLE_MEMORY		\
15709731dd4cSDaniel Phillips 		AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
15719731dd4cSDaniel Phillips 
1572fd234e75SFelix Kuehling #define AMDKFD_IOC_EXPORT_DMABUF		\
1573fd234e75SFelix Kuehling 		AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
1574fd234e75SFelix Kuehling 
15754f98cf2bSJonathan Kim #define AMDKFD_IOC_RUNTIME_ENABLE		\
15764f98cf2bSJonathan Kim 		AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
15774f98cf2bSJonathan Kim 
15784f98cf2bSJonathan Kim #define AMDKFD_IOC_DBG_TRAP			\
15794f98cf2bSJonathan Kim 		AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
15804f98cf2bSJonathan Kim 
1581b81c55dbSOded Gabbay #define AMDKFD_COMMAND_START		0x01
15824f98cf2bSJonathan Kim #define AMDKFD_COMMAND_END		0x27
1583b7facbaeSOded Gabbay 
1584b7facbaeSOded Gabbay #endif
1585