Lines Matching +full:enum +full:- +full:as +full:- +full:flags

1 /* SPDX-License-Identifier: MIT */
17 * subject to backwards-compatibility constraints.
18 * Sections in this file are organized as follows:
28 * The diagram below represents a high-level simplification of a discrete
30 * are necessary to understand this API, as well as how their relations
68 * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
72 * - &DRM_IOCTL_XE_DEVICE_QUERY
73 * - &DRM_IOCTL_XE_GEM_CREATE
74 * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
75 * - &DRM_IOCTL_XE_VM_CREATE
76 * - &DRM_IOCTL_XE_VM_DESTROY
77 * - &DRM_IOCTL_XE_VM_BIND
78 * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
79 * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
80 * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
81 * - &DRM_IOCTL_XE_EXEC
82 * - &DRM_IOCTL_XE_WAIT_USER_FENCE
83 * - &DRM_IOCTL_XE_OBSERVATION
84 * - &DRM_IOCTL_XE_MADVISE
85 * - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
92 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
110 /* Must be kept compact -- no holes */
135 * as demonstrated by Vulkan's approach to providing extensions for forward
147 * .. code-block:: C
168 * struct drm_xe_user_extension - Base class for defining a chain of extensions
198 * struct drm_xe_ext_set_property - Generic set property extension
221 * struct drm_xe_engine_class_instance - instance of an engine class
223 * It is returned as part of the @drm_xe_engine, but it also is used as
228 * - %DRM_XE_ENGINE_CLASS_RENDER
229 * - %DRM_XE_ENGINE_CLASS_COPY
230 * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
231 * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
232 * - %DRM_XE_ENGINE_CLASS_COMPUTE
233 * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
255 * struct drm_xe_engine - describe hardware engine
266 * struct drm_xe_query_engines - describe engines
282 * enum drm_xe_memory_class - Supported memory classes.
284 enum drm_xe_memory_class {
296 * struct drm_xe_mem_region - Describes some region as known to
303 * See enum drm_xe_memory_class for supported values.
307 * @instance: The unique ID for this region, which serves as the
308 * index in the placement bitmask used as argument for
313 * @min_page_size: Min page-size in bytes for this region.
322 * Affected IOCTLS will return %-EINVAL if alignment restrictions are
344 * is smaller than @total_size then this is referred to as a
372 * struct drm_xe_query_mem_regions - describe memory regions
388 * struct drm_xe_query_config - describe the device configuration
395 * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
397 * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
400 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
402 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
404 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
406 * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
408 * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
409 * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
432 * struct drm_xe_gt - describe an individual GT.
440 * - %DRM_XE_QUERY_GT_TYPE_MAIN
441 * - %DRM_XE_QUERY_GT_TYPE_MEDIA
491 * struct drm_xe_query_gt_list - A list with GT description items.
507 * struct drm_xe_query_topology_mask - describe the topology mask of a GT
517 * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
522 * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
527 * - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks. This type
530 * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
536 * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution
558 /** @mask: little-endian mask of @num_bytes */
563 * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
594 * @engine_cycles: Engine cycles as read from its register
614 * struct drm_xe_query_uc_fw_version - query a micro-controller firmware version
617 * of the micro-controller firmware.
620 /** @uc_type: The micro-controller type to query firmware version */
645 * struct drm_xe_query_pxp_status - query if PXP is ready
654 * -ENODEV: PXP not supported or disabled;
655 * -EIO: fatal error occurred during init, so PXP will never be enabled;
656 * -EINVAL: incorrect value provided as part of the query;
657 * -EFAULT: error copying the memory between kernel and userspace.
660 * everything works as expected, the status will transition to init complete in
665 * enum drm_xe_pxp_session_type. TYPE_NONE is always supported and therefore
678 * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
686 * - %DRM_XE_DEVICE_QUERY_ENGINES
687 * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
688 * - %DRM_XE_DEVICE_QUERY_CONFIG
689 * - %DRM_XE_DEVICE_QUERY_GT_LIST
690 * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
691 * configuration of the device such as information on slices, memory,
692 * caches, and so on. It is provided as a table of key / value
694 * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
695 * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
696 * - %DRM_XE_DEVICE_QUERY_PXP_STATUS
702 * IOCTL call returns -EINVAL.
707 * .. code-block:: C
720 * for (int i = 0; i < engines->num_engines; i++) {
722 * engines->engines[i].instance.engine_class ==
724 * engines->engines[i].instance.engine_class ==
726 * engines->engines[i].instance.engine_class ==
728 * engines->engines[i].instance.engine_class ==
730 * engines->engines[i].instance.engine_class ==
765 * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
768 * The @flags can be:
769 * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING - Modify the GEM object
772 * VM_BIND or accessed by the CPU. As a result, no backing memory is
774 * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
775 * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
778 * for small-bar systems (on full-bar systems this gets turned into a
780 * Note1: System memory can be used as an extra placement if the kernel
783 * behaviour as the i915 interface, see
785 * Note2: For clear-color CCS surfaces the kernel needs to read the
786 * clear-color value stored in the buffer, and on discrete platforms we
789 * small-bar systems.
792 * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
795 * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
803 * - %DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
804 * this object will be used with. Valid values are listed in enum
807 * %DRM_XE_PXP_TYPE_HWDRM will be marked as invalid if a PXP invalidation
837 * @flags: Flags, currently a mask of memory instances of where BO can
840 __u32 flags;
848 * 2. Cannot be exported as a PRIME fd.
867 * between GPU- and CPU is guaranteed. The caching mode of
868 * existing CPU-mappings will be updated transparently to
869 * user-space clients.
880 * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
882 * The @flags can be:
883 * - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset
885 * PCI memory barrier with low overhead (avoiding IOCTL call as well as writing
889 * Note: The mmap size can be at most 4K, due to HW limitations. As a result
895 * Roughly the usage would be as follows:
897 * .. code-block:: C
901 * .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER,
916 /** @flags: Flags */
917 __u32 flags;
927 * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
929 * The @flags can be:
930 * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
935 * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
939 * DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
941 * LR VMs can be created in recoverable page-fault mode using
944 * different per-VM overcommit semantics that are enabled by
947 * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
949 * demand when accessed, and also allows per-VM overcommit of memory.
960 /** @flags: Flags */
961 __u32 flags;
971 * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
985 * struct drm_xe_vm_bind_op - run bind operations
988 * - %DRM_XE_VM_BIND_OP_MAP
989 * - %DRM_XE_VM_BIND_OP_UNMAP
990 * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
991 * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
992 * - %DRM_XE_VM_BIND_OP_PREFETCH
994 * and the @flags can be:
995 * - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
997 * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the
999 * fault handler. This is implied on a non-faulting VM as there is no
1001 * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
1003 * dropped and all reads return zero. In the future, the NULL flags
1007 * - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
1009 * flag has no effect on BOs that are not marked as using PXP.
1010 * - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
1018 * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
1041 * there is a mismatch (see note below for pre-MTL platforms).
1043 * Note: On pre-MTL platforms there is only a caching mode and no
1045 * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
1046 * CPU caches even with the caching mode set as uncached. It's only the
1048 * is always mapped as WC on the CPU). However to keep the uapi somewhat
1050 * levels into the following coherency buckets on all pre-MTL platforms:
1052 * ppGTT UC -> COH_NONE
1053 * ppGTT WC -> COH_NONE
1054 * ppGTT WT -> COH_NONE
1055 * ppGTT WB -> COH_AT_LEAST_1WAY
1058 * such platforms (or perhaps in general for dma-buf if shared with
1061 * have a shared-LLC. On MTL+ this completely changes and the HW
1062 * defines the coherency mode as part of the @pat_index, where
1065 * Note: For userptr and externally imported dma-buf the kernel expects
1071 * attributes likely do not apply. Simply leaving as zero is one
1073 * DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
1122 /** @flags: Bind flags */
1123 __u32 flags;
1125 #define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
1141 * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
1148 * .. code-block:: C
1159 * .bind.flags = 0,
1212 * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
1218 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority.
1220 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice
1222 * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
1223 * this queue will be used with. Valid values are listed in enum
1231 * before PXP is ready, the ioctl will return -EBUSY if init is still in
1232 * progress or -EIO if init failed.
1233 * Given that going into a power-saving state kills PXP HWDRM sessions,
1241 * .. code-block:: C
1257 * indicate low latency hint with flag while creating exec queue as
1261 * .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
1289 /** @flags: flags to use for this exec queue */
1290 __u32 flags;
1296 * @instances: user pointer to a 2-d array of struct
1309 * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
1323 * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
1326 * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
1347 * struct drm_xe_sync - sync object
1350 * - %DRM_XE_SYNC_TYPE_SYNCOBJ
1351 * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
1352 * - %DRM_XE_SYNC_TYPE_USER_FENCE
1354 * and the @flags can be:
1355 * - %DRM_XE_SYNC_FLAG_SIGNAL
1359 * .. code-block:: C
1362 * .flags = DRM_XE_SYNC_FLAG_SIGNAL,
1375 * .flags = 0,
1392 /** @flags: Sync Flags */
1393 __u32 flags;
1421 * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
1428 * .. code-block:: C
1473 * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
1475 * Wait on user fence, XE will wake-up on every HW engine interrupt in the
1483 * - %DRM_XE_UFENCE_WAIT_OP_EQ
1484 * - %DRM_XE_UFENCE_WAIT_OP_NEQ
1485 * - %DRM_XE_UFENCE_WAIT_OP_GT
1486 * - %DRM_XE_UFENCE_WAIT_OP_GTE
1487 * - %DRM_XE_UFENCE_WAIT_OP_LT
1488 * - %DRM_XE_UFENCE_WAIT_OP_LTE
1490 * and the @flags can be:
1491 * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
1492 * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
1495 * - 0xffu for u8
1496 * - 0xffffu for u16
1497 * - 0xffffffffu for u32
1498 * - 0xffffffffffffffffu for u64
1519 /** @flags: wait flags */
1520 __u16 flags;
1558 * enum drm_xe_observation_type - Observation stream types
1560 enum drm_xe_observation_type {
1568 * enum drm_xe_observation_op - Observation stream ops
1570 enum drm_xe_observation_op {
1582 * struct drm_xe_observation_param - Input of &DRM_XE_OBSERVATION
1592 /** @observation_type: observation stream type, of enum @drm_xe_observation_type */
1594 /** @observation_op: observation stream op, of enum @drm_xe_observation_op */
1601 * enum drm_xe_observation_ioctls - Observation stream fd ioctl's
1606 enum drm_xe_observation_ioctls {
1624 * enum drm_xe_oa_unit_type - OA unit types
1626 enum drm_xe_oa_unit_type {
1629 * sub-types of OAG. For OAR/OAC, use OAG.
1641 * struct drm_xe_oa_unit - describe OA unit
1653 /** @capabilities: OA capabilities bit-mask */
1675 * struct drm_xe_query_oa_units - describe OA units
1682 * such as the one below:
1684 * .. code-block:: C
1691 * poau = (u8 *)&qoa->oa_units[0];
1692 * for (int i = 0; i < qoa->num_oa_units; i++) {
1695 * poau += sizeof(*oau) + oau->num_engines * sizeof(oau->eci[0]);
1707 * Written below as a u64 array to avoid problems with nested flexible
1714 * enum drm_xe_oa_format_type - OA format types as specified in PRM/Bspec
1717 enum drm_xe_oa_format_type {
1733 * enum drm_xe_oa_property_id - OA stream property id's
1735 * Stream params are specified as a chain of @drm_xe_ext_set_property
1736 * struct's, with @property values from enum @drm_xe_oa_property_id and
1745 enum drm_xe_oa_property_id {
1770 * OA_FORMAT's are specified the same way as in PRM/Bspec 52198/60942,
1771 * in terms of the following quantities: a. enum @drm_xe_oa_format_type
1842 * struct drm_xe_oa_config - OA metric configuration
1852 /** @uuid: String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x" */
1866 * struct drm_xe_oa_stream_status - OA stream status returned from
1887 * struct drm_xe_oa_stream_info - OA stream info returned from
1902 * enum drm_xe_pxp_session_type - Supported PXP session types.
1908 enum drm_xe_pxp_session_type {
1922 * enum drm_xe_eu_stall_property_id - EU stall sampling input property ids.
1924 * These properties are passed to the driver at open as a chain of
1935 enum drm_xe_eu_stall_property_id {
1958 * struct drm_xe_query_eu_stall - Information about EU stall sampling.
1968 /** @capabilities: EU stall capabilities bit-mask */
1993 * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
2000 * - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
2001 * - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
2002 * - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
2006 * .. code-block:: C
2045 * - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of fault tile as preferred loc
2046 * - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
2049 * - DRM_XE_MIGRATE_ALL_PAGES
2050 * - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
2054 #define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
2076 * - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour.
2079 * - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations.
2080 * - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations.
2081 * - DRM_XE_ATOMIC_CPU: Support CPU atomic only, no GPU atomics supported.
2120 * struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
2126 * The structure includes information such as atomic access policy,
2178 * struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
2191 * If second call fails with -ENOSPC, it means memory ranges changed between
2198 * .. code-block:: C