xref: /aosp_15_r20/bionic/libc/kernel/uapi/linux/kfd_ioctl.h (revision 8d67ca893c1523eb926b9080dbe4e2ffd2a27ba1)
1 /*
2  * This file is auto-generated. Modifications will be lost.
3  *
4  * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
5  * for more information.
6  */
7 #ifndef KFD_IOCTL_H_INCLUDED
8 #define KFD_IOCTL_H_INCLUDED
9 #include <drm/drm.h>
10 #include <linux/ioctl.h>
11 #define KFD_IOCTL_MAJOR_VERSION 1
12 #define KFD_IOCTL_MINOR_VERSION 16
13 struct kfd_ioctl_get_version_args {
14   __u32 major_version;
15   __u32 minor_version;
16 };
17 #define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
18 #define KFD_IOC_QUEUE_TYPE_SDMA 0x1
19 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
20 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
21 #define KFD_MAX_QUEUE_PERCENTAGE 100
22 #define KFD_MAX_QUEUE_PRIORITY 15
23 struct kfd_ioctl_create_queue_args {
24   __u64 ring_base_address;
25   __u64 write_pointer_address;
26   __u64 read_pointer_address;
27   __u64 doorbell_offset;
28   __u32 ring_size;
29   __u32 gpu_id;
30   __u32 queue_type;
31   __u32 queue_percentage;
32   __u32 queue_priority;
33   __u32 queue_id;
34   __u64 eop_buffer_address;
35   __u64 eop_buffer_size;
36   __u64 ctx_save_restore_address;
37   __u32 ctx_save_restore_size;
38   __u32 ctl_stack_size;
39 };
40 struct kfd_ioctl_destroy_queue_args {
41   __u32 queue_id;
42   __u32 pad;
43 };
44 struct kfd_ioctl_update_queue_args {
45   __u64 ring_base_address;
46   __u32 queue_id;
47   __u32 ring_size;
48   __u32 queue_percentage;
49   __u32 queue_priority;
50 };
51 struct kfd_ioctl_set_cu_mask_args {
52   __u32 queue_id;
53   __u32 num_cu_mask;
54   __u64 cu_mask_ptr;
55 };
56 struct kfd_ioctl_get_queue_wave_state_args {
57   __u64 ctl_stack_address;
58   __u32 ctl_stack_used_size;
59   __u32 save_area_used_size;
60   __u32 queue_id;
61   __u32 pad;
62 };
63 struct kfd_ioctl_get_available_memory_args {
64   __u64 available;
65   __u32 gpu_id;
66   __u32 pad;
67 };
68 struct kfd_dbg_device_info_entry {
69   __u64 exception_status;
70   __u64 lds_base;
71   __u64 lds_limit;
72   __u64 scratch_base;
73   __u64 scratch_limit;
74   __u64 gpuvm_base;
75   __u64 gpuvm_limit;
76   __u32 gpu_id;
77   __u32 location_id;
78   __u32 vendor_id;
79   __u32 device_id;
80   __u32 revision_id;
81   __u32 subsystem_vendor_id;
82   __u32 subsystem_device_id;
83   __u32 fw_version;
84   __u32 gfx_target_version;
85   __u32 simd_count;
86   __u32 max_waves_per_simd;
87   __u32 array_count;
88   __u32 simd_arrays_per_engine;
89   __u32 num_xcc;
90   __u32 capability;
91   __u32 debug_prop;
92 };
93 #define KFD_IOC_CACHE_POLICY_COHERENT 0
94 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
95 struct kfd_ioctl_set_memory_policy_args {
96   __u64 alternate_aperture_base;
97   __u64 alternate_aperture_size;
98   __u32 gpu_id;
99   __u32 default_policy;
100   __u32 alternate_policy;
101   __u32 pad;
102 };
103 struct kfd_ioctl_get_clock_counters_args {
104   __u64 gpu_clock_counter;
105   __u64 cpu_clock_counter;
106   __u64 system_clock_counter;
107   __u64 system_clock_freq;
108   __u32 gpu_id;
109   __u32 pad;
110 };
111 struct kfd_process_device_apertures {
112   __u64 lds_base;
113   __u64 lds_limit;
114   __u64 scratch_base;
115   __u64 scratch_limit;
116   __u64 gpuvm_base;
117   __u64 gpuvm_limit;
118   __u32 gpu_id;
119   __u32 pad;
120 };
121 #define NUM_OF_SUPPORTED_GPUS 7
122 struct kfd_ioctl_get_process_apertures_args {
123   struct kfd_process_device_apertures process_apertures[NUM_OF_SUPPORTED_GPUS];
124   __u32 num_of_nodes;
125   __u32 pad;
126 };
127 struct kfd_ioctl_get_process_apertures_new_args {
128   __u64 kfd_process_device_apertures_ptr;
129   __u32 num_of_nodes;
130   __u32 pad;
131 };
132 #define MAX_ALLOWED_NUM_POINTS 100
133 #define MAX_ALLOWED_AW_BUFF_SIZE 4096
134 #define MAX_ALLOWED_WAC_BUFF_SIZE 128
135 struct kfd_ioctl_dbg_register_args {
136   __u32 gpu_id;
137   __u32 pad;
138 };
139 struct kfd_ioctl_dbg_unregister_args {
140   __u32 gpu_id;
141   __u32 pad;
142 };
143 struct kfd_ioctl_dbg_address_watch_args {
144   __u64 content_ptr;
145   __u32 gpu_id;
146   __u32 buf_size_in_bytes;
147 };
148 struct kfd_ioctl_dbg_wave_control_args {
149   __u64 content_ptr;
150   __u32 gpu_id;
151   __u32 buf_size_in_bytes;
152 };
153 #define KFD_INVALID_FD 0xffffffff
154 #define KFD_IOC_EVENT_SIGNAL 0
155 #define KFD_IOC_EVENT_NODECHANGE 1
156 #define KFD_IOC_EVENT_DEVICESTATECHANGE 2
157 #define KFD_IOC_EVENT_HW_EXCEPTION 3
158 #define KFD_IOC_EVENT_SYSTEM_EVENT 4
159 #define KFD_IOC_EVENT_DEBUG_EVENT 5
160 #define KFD_IOC_EVENT_PROFILE_EVENT 6
161 #define KFD_IOC_EVENT_QUEUE_EVENT 7
162 #define KFD_IOC_EVENT_MEMORY 8
163 #define KFD_IOC_WAIT_RESULT_COMPLETE 0
164 #define KFD_IOC_WAIT_RESULT_TIMEOUT 1
165 #define KFD_IOC_WAIT_RESULT_FAIL 2
166 #define KFD_SIGNAL_EVENT_LIMIT 4096
167 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
168 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
169 #define KFD_HW_EXCEPTION_GPU_HANG 0
170 #define KFD_HW_EXCEPTION_ECC 1
171 #define KFD_MEM_ERR_NO_RAS 0
172 #define KFD_MEM_ERR_SRAM_ECC 1
173 #define KFD_MEM_ERR_POISON_CONSUMED 2
174 #define KFD_MEM_ERR_GPU_HANG 3
175 struct kfd_ioctl_create_event_args {
176   __u64 event_page_offset;
177   __u32 event_trigger_data;
178   __u32 event_type;
179   __u32 auto_reset;
180   __u32 node_id;
181   __u32 event_id;
182   __u32 event_slot_index;
183 };
184 struct kfd_ioctl_destroy_event_args {
185   __u32 event_id;
186   __u32 pad;
187 };
188 struct kfd_ioctl_set_event_args {
189   __u32 event_id;
190   __u32 pad;
191 };
192 struct kfd_ioctl_reset_event_args {
193   __u32 event_id;
194   __u32 pad;
195 };
196 struct kfd_memory_exception_failure {
197   __u32 NotPresent;
198   __u32 ReadOnly;
199   __u32 NoExecute;
200   __u32 imprecise;
201 };
202 struct kfd_hsa_memory_exception_data {
203   struct kfd_memory_exception_failure failure;
204   __u64 va;
205   __u32 gpu_id;
206   __u32 ErrorType;
207 };
208 struct kfd_hsa_hw_exception_data {
209   __u32 reset_type;
210   __u32 reset_cause;
211   __u32 memory_lost;
212   __u32 gpu_id;
213 };
214 struct kfd_hsa_signal_event_data {
215   __u64 last_event_age;
216 };
217 struct kfd_event_data {
218   union {
219     struct kfd_hsa_memory_exception_data memory_exception_data;
220     struct kfd_hsa_hw_exception_data hw_exception_data;
221     struct kfd_hsa_signal_event_data signal_event_data;
222   };
223   __u64 kfd_event_data_ext;
224   __u32 event_id;
225   __u32 pad;
226 };
227 struct kfd_ioctl_wait_events_args {
228   __u64 events_ptr;
229   __u32 num_events;
230   __u32 wait_for_all;
231   __u32 timeout;
232   __u32 wait_result;
233 };
234 struct kfd_ioctl_set_scratch_backing_va_args {
235   __u64 va_addr;
236   __u32 gpu_id;
237   __u32 pad;
238 };
239 struct kfd_ioctl_get_tile_config_args {
240   __u64 tile_config_ptr;
241   __u64 macro_tile_config_ptr;
242   __u32 num_tile_configs;
243   __u32 num_macro_tile_configs;
244   __u32 gpu_id;
245   __u32 gb_addr_config;
246   __u32 num_banks;
247   __u32 num_ranks;
248 };
249 struct kfd_ioctl_set_trap_handler_args {
250   __u64 tba_addr;
251   __u64 tma_addr;
252   __u32 gpu_id;
253   __u32 pad;
254 };
255 struct kfd_ioctl_acquire_vm_args {
256   __u32 drm_fd;
257   __u32 gpu_id;
258 };
259 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
260 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
261 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
262 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
263 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
264 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
265 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
266 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
267 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
268 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
269 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
270 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
271 #define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24)
272 #define KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS (1 << 23)
273 struct kfd_ioctl_alloc_memory_of_gpu_args {
274   __u64 va_addr;
275   __u64 size;
276   __u64 handle;
277   __u64 mmap_offset;
278   __u32 gpu_id;
279   __u32 flags;
280 };
281 struct kfd_ioctl_free_memory_of_gpu_args {
282   __u64 handle;
283 };
284 struct kfd_ioctl_map_memory_to_gpu_args {
285   __u64 handle;
286   __u64 device_ids_array_ptr;
287   __u32 n_devices;
288   __u32 n_success;
289 };
290 struct kfd_ioctl_unmap_memory_from_gpu_args {
291   __u64 handle;
292   __u64 device_ids_array_ptr;
293   __u32 n_devices;
294   __u32 n_success;
295 };
296 struct kfd_ioctl_alloc_queue_gws_args {
297   __u32 queue_id;
298   __u32 num_gws;
299   __u32 first_gws;
300   __u32 pad;
301 };
302 struct kfd_ioctl_get_dmabuf_info_args {
303   __u64 size;
304   __u64 metadata_ptr;
305   __u32 metadata_size;
306   __u32 gpu_id;
307   __u32 flags;
308   __u32 dmabuf_fd;
309 };
310 struct kfd_ioctl_import_dmabuf_args {
311   __u64 va_addr;
312   __u64 handle;
313   __u32 gpu_id;
314   __u32 dmabuf_fd;
315 };
316 struct kfd_ioctl_export_dmabuf_args {
317   __u64 handle;
318   __u32 flags;
319   __u32 dmabuf_fd;
320 };
321 enum kfd_smi_event {
322   KFD_SMI_EVENT_NONE = 0,
323   KFD_SMI_EVENT_VMFAULT = 1,
324   KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
325   KFD_SMI_EVENT_GPU_PRE_RESET = 3,
326   KFD_SMI_EVENT_GPU_POST_RESET = 4,
327   KFD_SMI_EVENT_MIGRATE_START = 5,
328   KFD_SMI_EVENT_MIGRATE_END = 6,
329   KFD_SMI_EVENT_PAGE_FAULT_START = 7,
330   KFD_SMI_EVENT_PAGE_FAULT_END = 8,
331   KFD_SMI_EVENT_QUEUE_EVICTION = 9,
332   KFD_SMI_EVENT_QUEUE_RESTORE = 10,
333   KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
334   KFD_SMI_EVENT_ALL_PROCESS = 64
335 };
336 enum KFD_MIGRATE_TRIGGERS {
337   KFD_MIGRATE_TRIGGER_PREFETCH,
338   KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
339   KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
340   KFD_MIGRATE_TRIGGER_TTM_EVICTION
341 };
342 enum KFD_QUEUE_EVICTION_TRIGGERS {
343   KFD_QUEUE_EVICTION_TRIGGER_SVM,
344   KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
345   KFD_QUEUE_EVICTION_TRIGGER_TTM,
346   KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
347   KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
348   KFD_QUEUE_EVICTION_CRIU_RESTORE
349 };
350 enum KFD_SVM_UNMAP_TRIGGERS {
351   KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
352   KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
353   KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
354 };
355 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
356 #define KFD_SMI_EVENT_MSG_SIZE 96
357 struct kfd_ioctl_smi_events_args {
358   __u32 gpuid;
359   __u32 anon_fd;
360 };
361 enum kfd_criu_op {
362   KFD_CRIU_OP_PROCESS_INFO,
363   KFD_CRIU_OP_CHECKPOINT,
364   KFD_CRIU_OP_UNPAUSE,
365   KFD_CRIU_OP_RESTORE,
366   KFD_CRIU_OP_RESUME,
367 };
368 struct kfd_ioctl_criu_args {
369   __u64 devices;
370   __u64 bos;
371   __u64 priv_data;
372   __u64 priv_data_size;
373   __u32 num_devices;
374   __u32 num_bos;
375   __u32 num_objects;
376   __u32 pid;
377   __u32 op;
378 };
379 struct kfd_criu_device_bucket {
380   __u32 user_gpu_id;
381   __u32 actual_gpu_id;
382   __u32 drm_fd;
383   __u32 pad;
384 };
385 struct kfd_criu_bo_bucket {
386   __u64 addr;
387   __u64 size;
388   __u64 offset;
389   __u64 restored_offset;
390   __u32 gpu_id;
391   __u32 alloc_flags;
392   __u32 dmabuf_fd;
393   __u32 pad;
394 };
395 enum kfd_mmio_remap {
396   KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
397   KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
398 };
399 #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
400 #define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
401 #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
402 #define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
403 #define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
404 #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
405 #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
406 #define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080
407 enum kfd_ioctl_svm_op {
408   KFD_IOCTL_SVM_OP_SET_ATTR,
409   KFD_IOCTL_SVM_OP_GET_ATTR
410 };
411 enum kfd_ioctl_svm_location {
412   KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
413   KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
414 };
415 enum kfd_ioctl_svm_attr_type {
416   KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
417   KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
418   KFD_IOCTL_SVM_ATTR_ACCESS,
419   KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
420   KFD_IOCTL_SVM_ATTR_NO_ACCESS,
421   KFD_IOCTL_SVM_ATTR_SET_FLAGS,
422   KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
423   KFD_IOCTL_SVM_ATTR_GRANULARITY
424 };
425 struct kfd_ioctl_svm_attribute {
426   __u32 type;
427   __u32 value;
428 };
429 struct kfd_ioctl_svm_args {
430   __u64 start_addr;
431   __u64 size;
432   __u32 op;
433   __u32 nattr;
434   struct kfd_ioctl_svm_attribute attrs[];
435 };
436 struct kfd_ioctl_set_xnack_mode_args {
437   __s32 xnack_enabled;
438 };
439 enum kfd_dbg_trap_override_mode {
440   KFD_DBG_TRAP_OVERRIDE_OR = 0,
441   KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
442 };
443 enum kfd_dbg_trap_mask {
444   KFD_DBG_TRAP_MASK_FP_INVALID = 1,
445   KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
446   KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
447   KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
448   KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
449   KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
450   KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
451   KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
452   KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
453   KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
454   KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
455 };
456 enum kfd_dbg_trap_wave_launch_mode {
457   KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
458   KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
459   KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
460 };
461 enum kfd_dbg_trap_address_watch_mode {
462   KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
463   KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
464   KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
465   KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
466 };
467 enum kfd_dbg_trap_flags {
468   KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
469   KFD_DBG_TRAP_FLAG_SINGLE_ALU_OP = 2,
470 };
471 enum kfd_dbg_trap_exception_code {
472   EC_NONE = 0,
473   EC_QUEUE_WAVE_ABORT = 1,
474   EC_QUEUE_WAVE_TRAP = 2,
475   EC_QUEUE_WAVE_MATH_ERROR = 3,
476   EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
477   EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
478   EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
479   EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
480   EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
481   EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
482   EC_QUEUE_PACKET_RESERVED = 19,
483   EC_QUEUE_PACKET_UNSUPPORTED = 20,
484   EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
485   EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
486   EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
487   EC_QUEUE_PREEMPTION_ERROR = 30,
488   EC_QUEUE_NEW = 31,
489   EC_DEVICE_QUEUE_DELETE = 32,
490   EC_DEVICE_MEMORY_VIOLATION = 33,
491   EC_DEVICE_RAS_ERROR = 34,
492   EC_DEVICE_FATAL_HALT = 35,
493   EC_DEVICE_NEW = 36,
494   EC_PROCESS_RUNTIME = 48,
495   EC_PROCESS_DEVICE_REMOVE = 49,
496   EC_MAX
497 };
498 #define KFD_EC_MASK(ecode) (1ULL << (ecode - 1))
499 #define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | KFD_EC_MASK(EC_QUEUE_NEW))
500 #define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | KFD_EC_MASK(EC_DEVICE_NEW))
501 #define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
502 #define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
503 #define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
504 #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) (KFD_DBG_EC_IS_VALID(ecode) && ! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
505 #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) (KFD_DBG_EC_IS_VALID(ecode) && ! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
506 #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) (KFD_DBG_EC_IS_VALID(ecode) && ! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
507 #define KFD_DBG_EC_TYPE_IS_PACKET(ecode) (KFD_DBG_EC_IS_VALID(ecode) && ! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
508 enum kfd_dbg_runtime_state {
509   DEBUG_RUNTIME_STATE_DISABLED = 0,
510   DEBUG_RUNTIME_STATE_ENABLED = 1,
511   DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
512   DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
513 };
514 struct kfd_runtime_info {
515   __u64 r_debug;
516   __u32 runtime_state;
517   __u32 ttmp_setup;
518 };
519 #define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1
520 #define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2
521 struct kfd_ioctl_runtime_enable_args {
522   __u64 r_debug;
523   __u32 mode_mask;
524   __u32 capabilities_mask;
525 };
526 struct kfd_queue_snapshot_entry {
527   __u64 exception_status;
528   __u64 ring_base_address;
529   __u64 write_pointer_address;
530   __u64 read_pointer_address;
531   __u64 ctx_save_restore_address;
532   __u32 queue_id;
533   __u32 gpu_id;
534   __u32 ring_size;
535   __u32 queue_type;
536   __u32 ctx_save_restore_area_size;
537   __u32 reserved;
538 };
539 #define KFD_DBG_QUEUE_ERROR_BIT 30
540 #define KFD_DBG_QUEUE_INVALID_BIT 31
541 #define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT)
542 #define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT)
543 struct kfd_context_save_area_header {
544   struct {
545     __u32 control_stack_offset;
546     __u32 control_stack_size;
547     __u32 wave_state_offset;
548     __u32 wave_state_size;
549   } wave_state;
550   __u32 debug_offset;
551   __u32 debug_size;
552   __u64 err_payload_addr;
553   __u32 err_event_id;
554   __u32 reserved1;
555 };
556 enum kfd_dbg_trap_operations {
557   KFD_IOC_DBG_TRAP_ENABLE = 0,
558   KFD_IOC_DBG_TRAP_DISABLE = 1,
559   KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
560   KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
561   KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4,
562   KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5,
563   KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6,
564   KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7,
565   KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8,
566   KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9,
567   KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
568   KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
569   KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
570   KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
571   KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
572 };
573 struct kfd_ioctl_dbg_trap_enable_args {
574   __u64 exception_mask;
575   __u64 rinfo_ptr;
576   __u32 rinfo_size;
577   __u32 dbg_fd;
578 };
579 struct kfd_ioctl_dbg_trap_send_runtime_event_args {
580   __u64 exception_mask;
581   __u32 gpu_id;
582   __u32 queue_id;
583 };
584 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
585   __u64 exception_mask;
586 };
587 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
588   __u32 override_mode;
589   __u32 enable_mask;
590   __u32 support_request_mask;
591   __u32 pad;
592 };
593 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
594   __u32 launch_mode;
595   __u32 pad;
596 };
597 struct kfd_ioctl_dbg_trap_suspend_queues_args {
598   __u64 exception_mask;
599   __u64 queue_array_ptr;
600   __u32 num_queues;
601   __u32 grace_period;
602 };
603 struct kfd_ioctl_dbg_trap_resume_queues_args {
604   __u64 queue_array_ptr;
605   __u32 num_queues;
606   __u32 pad;
607 };
608 struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
609   __u64 address;
610   __u32 mode;
611   __u32 mask;
612   __u32 gpu_id;
613   __u32 id;
614 };
615 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
616   __u32 gpu_id;
617   __u32 id;
618 };
619 struct kfd_ioctl_dbg_trap_set_flags_args {
620   __u32 flags;
621   __u32 pad;
622 };
623 struct kfd_ioctl_dbg_trap_query_debug_event_args {
624   __u64 exception_mask;
625   __u32 gpu_id;
626   __u32 queue_id;
627 };
628 struct kfd_ioctl_dbg_trap_query_exception_info_args {
629   __u64 info_ptr;
630   __u32 info_size;
631   __u32 source_id;
632   __u32 exception_code;
633   __u32 clear_exception;
634 };
635 struct kfd_ioctl_dbg_trap_queue_snapshot_args {
636   __u64 exception_mask;
637   __u64 snapshot_buf_ptr;
638   __u32 num_queues;
639   __u32 entry_size;
640 };
641 struct kfd_ioctl_dbg_trap_device_snapshot_args {
642   __u64 exception_mask;
643   __u64 snapshot_buf_ptr;
644   __u32 num_devices;
645   __u32 entry_size;
646 };
647 struct kfd_ioctl_dbg_trap_args {
648   __u32 pid;
649   __u32 op;
650   union {
651     struct kfd_ioctl_dbg_trap_enable_args enable;
652     struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
653     struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
654     struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
655     struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
656     struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
657     struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
658     struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
659     struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
660     struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
661     struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
662     struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
663     struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
664     struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
665   };
666 };
667 #define AMDKFD_IOCTL_BASE 'K'
668 #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
669 #define AMDKFD_IOR(nr,type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
670 #define AMDKFD_IOW(nr,type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
671 #define AMDKFD_IOWR(nr,type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
672 #define AMDKFD_IOC_GET_VERSION AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
673 #define AMDKFD_IOC_CREATE_QUEUE AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
674 #define AMDKFD_IOC_DESTROY_QUEUE AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
675 #define AMDKFD_IOC_SET_MEMORY_POLICY AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
676 #define AMDKFD_IOC_GET_CLOCK_COUNTERS AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
677 #define AMDKFD_IOC_GET_PROCESS_APERTURES AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
678 #define AMDKFD_IOC_UPDATE_QUEUE AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
679 #define AMDKFD_IOC_CREATE_EVENT AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
680 #define AMDKFD_IOC_DESTROY_EVENT AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
681 #define AMDKFD_IOC_SET_EVENT AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
682 #define AMDKFD_IOC_RESET_EVENT AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
683 #define AMDKFD_IOC_WAIT_EVENTS AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
684 #define AMDKFD_IOC_DBG_REGISTER_DEPRECATED AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
685 #define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
686 #define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
687 #define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
688 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
689 #define AMDKFD_IOC_GET_TILE_CONFIG AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
690 #define AMDKFD_IOC_SET_TRAP_HANDLER AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
691 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW AMDKFD_IOWR(0x14, struct kfd_ioctl_get_process_apertures_new_args)
692 #define AMDKFD_IOC_ACQUIRE_VM AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
693 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
694 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
695 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
696 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
697 #define AMDKFD_IOC_SET_CU_MASK AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
698 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
699 #define AMDKFD_IOC_GET_DMABUF_INFO AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
700 #define AMDKFD_IOC_IMPORT_DMABUF AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
701 #define AMDKFD_IOC_ALLOC_QUEUE_GWS AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
702 #define AMDKFD_IOC_SMI_EVENTS AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
703 #define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
704 #define AMDKFD_IOC_SET_XNACK_MODE AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
705 #define AMDKFD_IOC_CRIU_OP AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
706 #define AMDKFD_IOC_AVAILABLE_MEMORY AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
707 #define AMDKFD_IOC_EXPORT_DMABUF AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
708 #define AMDKFD_IOC_RUNTIME_ENABLE AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
709 #define AMDKFD_IOC_DBG_TRAP AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
710 #define AMDKFD_COMMAND_START 0x01
711 #define AMDKFD_COMMAND_END 0x27
712 #endif
713