Lines Matching +full:wait +full:- +full:monitoring +full:- +full:ns
1 /* SPDX-License-Identifier: MIT */
17 * subject to backwards-compatibility constraints.
28 * The diagram below represents a high-level simplification of a discrete
68 * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
72 * - &DRM_IOCTL_XE_DEVICE_QUERY
73 * - &DRM_IOCTL_XE_GEM_CREATE
74 * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
75 * - &DRM_IOCTL_XE_VM_CREATE
76 * - &DRM_IOCTL_XE_VM_DESTROY
77 * - &DRM_IOCTL_XE_VM_BIND
78 * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
79 * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
80 * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
81 * - &DRM_IOCTL_XE_EXEC
82 * - &DRM_IOCTL_XE_WAIT_USER_FENCE
83 * - &DRM_IOCTL_XE_OBSERVATION
106 /* Must be kept compact -- no holes */
141 * .. code-block:: C
162 * struct drm_xe_user_extension - Base class for defining a chain of extensions
192 * struct drm_xe_ext_set_property - Generic set property extension
215 * struct drm_xe_engine_class_instance - instance of an engine class
222 * - %DRM_XE_ENGINE_CLASS_RENDER
223 * - %DRM_XE_ENGINE_CLASS_COPY
224 * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
225 * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
226 * - %DRM_XE_ENGINE_CLASS_COMPUTE
227 * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
249 * struct drm_xe_engine - describe hardware engine
260 * struct drm_xe_query_engines - describe engines
276 * enum drm_xe_memory_class - Supported memory classes.
290 * struct drm_xe_mem_region - Describes some region as known to
307 * @min_page_size: Min page-size in bytes for this region.
316 * Affected IOCTLS will return %-EINVAL if alignment restrictions are
366 * struct drm_xe_query_mem_regions - describe memory regions
382 * struct drm_xe_query_config - describe the device configuration
389 * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
391 * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
394 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
396 * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
398 * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
399 * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
420 * struct drm_xe_gt - describe an individual GT.
428 * - %DRM_XE_QUERY_GT_TYPE_MAIN
429 * - %DRM_XE_QUERY_GT_TYPE_MEDIA
479 * struct drm_xe_query_gt_list - A list with GT description items.
495 * struct drm_xe_query_topology_mask - describe the topology mask of a GT
505 * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
510 * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
515 * - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks. This type
518 * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
524 * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution
546 /** @mask: little-endian mask of @num_bytes */
551 * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
588 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
595 * @cpu_delta: Time delta in ns captured around reading the lower dword
602 * struct drm_xe_query_uc_fw_version - query a micro-controller firmware version
605 * of the micro-controller firmware.
608 /** @uc_type: The micro-controller type to query firmware version */
633 * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
641 * - %DRM_XE_DEVICE_QUERY_ENGINES
642 * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
643 * - %DRM_XE_DEVICE_QUERY_CONFIG
644 * - %DRM_XE_DEVICE_QUERY_GT_LIST
645 * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
649 * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
650 * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
656 * IOCTL call returns -EINVAL.
661 * .. code-block:: C
674 * for (int i = 0; i < engines->num_engines; i++) {
676 * engines->engines[i].instance.engine_class ==
678 * engines->engines[i].instance.engine_class ==
680 * engines->engines[i].instance.engine_class ==
682 * engines->engines[i].instance.engine_class ==
684 * engines->engines[i].instance.engine_class ==
717 * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
721 * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
722 * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
723 * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
726 * for small-bar systems (on full-bar systems this gets turned into a
733 * Note2: For clear-color CCS surfaces the kernel needs to read the
734 * clear-color value stored in the buffer, and on discrete platforms we
737 * small-bar systems.
740 * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
743 * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
800 * between GPU- and CPU is guaranteed. The caching mode of
801 * existing CPU-mappings will be updated transparently to
802 * user-space clients.
813 * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
833 * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
836 * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
837 * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
842 * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
843 * LR VMs can be created in recoverable page-fault mode using
846 * different per-VM overcommit semantics that are enabled by
849 * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
851 * demand when accessed, and also allows per-VM overcommit of memory.
873 * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
887 * struct drm_xe_vm_bind_op - run bind operations
890 * - %DRM_XE_VM_BIND_OP_MAP
891 * - %DRM_XE_VM_BIND_OP_UNMAP
892 * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
893 * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
894 * - %DRM_XE_VM_BIND_OP_PREFETCH
897 * - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
899 * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the
901 * fault handler. This is implied on a non-faulting VM as there is no
903 * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
930 * there is a mismatch (see note below for pre-MTL platforms).
932 * Note: On pre-MTL platforms there is only a caching mode and no
934 * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
939 * levels into the following coherency buckets on all pre-MTL platforms:
941 * ppGTT UC -> COH_NONE
942 * ppGTT WC -> COH_NONE
943 * ppGTT WT -> COH_NONE
944 * ppGTT WB -> COH_AT_LEAST_1WAY
947 * such platforms (or perhaps in general for dma-buf if shared with
950 * have a shared-LLC. On MTL+ this completely changes and the HW
954 * Note: For userptr and externally imported dma-buf the kernel expects
1017 * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
1024 * .. code-block:: C
1077 /** @num_syncs: amount of syncs to wait on */
1088 * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
1094 * .. code-block:: C
1133 * @instances: user pointer to a 2-d array of struct
1146 * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
1160 * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
1163 * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
1184 * struct drm_xe_sync - sync object
1187 * - %DRM_XE_SYNC_TYPE_SYNCOBJ
1188 * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
1189 * - %DRM_XE_SYNC_TYPE_USER_FENCE
1192 * - %DRM_XE_SYNC_FLAG_SIGNAL
1196 * .. code-block:: C
1208 * struct drm_syncobj_wait wait = {
1216 * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
1258 * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
1265 * .. code-block:: C
1310 * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
1312 * Wait on user fence, XE will wake-up on every HW engine interrupt in the
1320 * - %DRM_XE_UFENCE_WAIT_OP_EQ
1321 * - %DRM_XE_UFENCE_WAIT_OP_NEQ
1322 * - %DRM_XE_UFENCE_WAIT_OP_GT
1323 * - %DRM_XE_UFENCE_WAIT_OP_GTE
1324 * - %DRM_XE_UFENCE_WAIT_OP_LT
1325 * - %DRM_XE_UFENCE_WAIT_OP_LTE
1328 * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
1329 * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
1332 * - 0xffu for u8
1333 * - 0xffffu for u16
1334 * - 0xffffffffu for u32
1335 * - 0xffffffffffffffffu for u64
1342 * @addr: user pointer address to wait on, must qword aligned
1352 /** @op: wait operation (type of comparison) */
1356 /** @flags: wait flags */
1369 * @timeout: how long to wait before bailing, value in nanoseconds.
1371 * it contains timeout expressed in nanoseconds to wait (fence will
1373 * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
1375 * Passing negative timeout leads to neverending wait.
1395 * enum drm_xe_observation_type - Observation stream types
1403 * enum drm_xe_observation_op - Observation stream ops
1417 * struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION
1436 * enum drm_xe_observation_ioctls - Observation stream fd ioctl's
1459 * enum drm_xe_oa_unit_type - OA unit types
1464 * sub-types of OAG. For OAR/OAC, use OAG.
1473 * struct drm_xe_oa_unit - describe OA unit
1485 /** @capabilities: OA capabilities bit-mask */
1504 * struct drm_xe_query_oa_units - describe OA units
1513 * .. code-block:: C
1520 * poau = (u8 *)&qoa->oa_units[0];
1521 * for (int i = 0; i < qoa->num_oa_units; i++) {
1524 * poau += sizeof(*oau) + oau->num_engines * sizeof(oau->eci[0]);
1543 * enum drm_xe_oa_format_type - OA format types as specified in PRM/Bspec
1562 * enum drm_xe_oa_property_id - OA stream property id's
1648 * configuration will wait till input fences signal. Output fences
1657 * struct drm_xe_oa_config - OA metric configuration
1667 /** @uuid: String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" */
1681 * struct drm_xe_oa_stream_status - OA stream status returned from
1702 * struct drm_xe_oa_stream_info - OA stream info returned from