xref: /linux/drivers/gpu/drm/xe/xe_pt_types.h (revision de848da12f752170c2ebe114804a985314fd5a6a)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_PT_TYPES_H_
7 #define _XE_PT_TYPES_H_
8 
9 #include <linux/types.h>
10 
11 #include "xe_pt_walk.h"
12 
13 struct xe_bo;
14 struct xe_device;
15 struct xe_vma;
16 
17 enum xe_cache_level {
18 	XE_CACHE_NONE,
19 	XE_CACHE_WT,
20 	XE_CACHE_WB,
21 	XE_CACHE_NONE_COMPRESSION, /*UC + COH_NONE + COMPRESSION */
22 	__XE_CACHE_LEVEL_COUNT,
23 };
24 
25 #define XE_VM_MAX_LEVEL 4
26 
27 struct xe_pt {
28 	struct xe_ptw base;
29 	struct xe_bo *bo;
30 	unsigned int level;
31 	unsigned int num_live;
32 	bool rebind;
33 	bool is_compact;
34 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
35 	/** addr: Virtual address start address of the PT. */
36 	u64 addr;
37 #endif
38 };
39 
40 struct xe_pt_ops {
41 	u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset,
42 			     u16 pat_index, u32 pt_level);
43 	u64 (*pte_encode_vma)(u64 pte, struct xe_vma *vma,
44 			      u16 pat_index, u32 pt_level);
45 	u64 (*pte_encode_addr)(struct xe_device *xe, u64 addr,
46 			       u16 pat_index,
47 			       u32 pt_level, bool devmem, u64 flags);
48 	u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
49 			     u16 pat_index);
50 };
51 
52 struct xe_pt_entry {
53 	struct xe_pt *pt;
54 	u64 pte;
55 };
56 
57 struct xe_vm_pgtable_update {
58 	/** @bo: page table bo to write to */
59 	struct xe_bo *pt_bo;
60 
61 	/** @ofs: offset inside this PTE to begin writing to (in qwords) */
62 	u32 ofs;
63 
64 	/** @qwords: number of PTE's to write */
65 	u32 qwords;
66 
67 	/** @pt: opaque pointer useful for the caller of xe_migrate_update_pgtables */
68 	struct xe_pt *pt;
69 
70 	/** @pt_entries: Newly added pagetable entries */
71 	struct xe_pt_entry *pt_entries;
72 
73 	/** @flags: Target flags */
74 	u32 flags;
75 };
76 
77 /** struct xe_vm_pgtable_update_op - Page table update operation */
78 struct xe_vm_pgtable_update_op {
79 	/** @entries: entries to update for this operation */
80 	struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
81 	/** @vma: VMA for operation, operation not valid if NULL */
82 	struct xe_vma *vma;
83 	/** @num_entries: number of entries for this update operation */
84 	u32 num_entries;
85 	/** @bind: is a bind */
86 	bool bind;
87 	/** @rebind: is a rebind */
88 	bool rebind;
89 };
90 
91 /** struct xe_vm_pgtable_update_ops: page table update operations */
92 struct xe_vm_pgtable_update_ops {
93 	/** @ops: operations */
94 	struct xe_vm_pgtable_update_op *ops;
95 	/** @deferred: deferred list to destroy PT entries */
96 	struct llist_head deferred;
97 	/** @q: exec queue for PT operations */
98 	struct xe_exec_queue *q;
99 	/** @start: start address of ops */
100 	u64 start;
101 	/** @last: last address of ops */
102 	u64 last;
103 	/** @num_ops: number of operations */
104 	u32 num_ops;
105 	/** @current_op: current operations */
106 	u32 current_op;
107 	/** @needs_userptr_lock: Needs userptr lock */
108 	bool needs_userptr_lock;
109 	/** @needs_invalidation: Needs invalidation */
110 	bool needs_invalidation;
111 	/**
112 	 * @wait_vm_bookkeep: PT operations need to wait until VM is idle
113 	 * (bookkeep dma-resv slots are idle) and stage all future VM activity
114 	 * behind these operations (install PT operations into VM kernel
115 	 * dma-resv slot).
116 	 */
117 	bool wait_vm_bookkeep;
118 	/**
119 	 * @wait_vm_kernel: PT operations need to wait until VM kernel dma-resv
120 	 * slots are idle.
121 	 */
122 	bool wait_vm_kernel;
123 };
124 
125 #endif
126