xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
26 
27 #include <linux/rbtree.h>
28 
29 #include "gpu_scheduler.h"
30 #include "amdgpu_sync.h"
31 #include "amdgpu_ring.h"
32 
33 struct amdgpu_bo_va;
34 struct amdgpu_job;
35 struct amdgpu_bo_list_entry;
36 
37 /*
38  * GPUVM handling
39  */
40 
41 /* maximum number of VMIDs */
42 #define AMDGPU_NUM_VM	16
43 
44 /* Maximum number of PTEs the hardware can write with one command */
45 #define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
46 
47 /* number of entries in page table */
48 #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
49 
50 /* PTBs (Page Table Blocks) need to be aligned to 32K */
51 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
52 
53 /* LOG2 number of continuous pages for the fragment field */
54 #define AMDGPU_LOG2_PAGES_PER_FRAG 4
55 
56 #define AMDGPU_PTE_VALID	(1 << 0)
57 #define AMDGPU_PTE_SYSTEM	(1 << 1)
58 #define AMDGPU_PTE_SNOOPED	(1 << 2)
59 
60 /* VI only */
61 #define AMDGPU_PTE_EXECUTABLE	(1 << 4)
62 
63 #define AMDGPU_PTE_READABLE	(1 << 5)
64 #define AMDGPU_PTE_WRITEABLE	(1 << 6)
65 
66 #define AMDGPU_PTE_FRAG(x)	((x & 0x1f) << 7)
67 
68 /* How to programm VM fault handling */
69 #define AMDGPU_VM_FAULT_STOP_NEVER	0
70 #define AMDGPU_VM_FAULT_STOP_FIRST	1
71 #define AMDGPU_VM_FAULT_STOP_ALWAYS	2
72 
73 struct amdgpu_vm_pt {
74 	struct amdgpu_bo	*bo;
75 	uint64_t		addr;
76 };
77 
78 struct amdgpu_vm {
79 	/* tree of virtual addresses mapped */
80 	struct rb_root		va;
81 
82 	/* protecting invalidated */
83 	spinlock_t		status_lock;
84 
85 	/* BOs moved, but not yet updated in the PT */
86 	struct list_head	invalidated;
87 
88 	/* BOs cleared in the PT because of a move */
89 	struct list_head	cleared;
90 
91 	/* BO mappings freed, but not yet updated in the PT */
92 	struct list_head	freed;
93 
94 	/* contains the page directory */
95 	struct amdgpu_bo	*page_directory;
96 	unsigned		max_pde_used;
97 	struct dma_fence		*page_directory_fence;
98 	uint64_t		last_eviction_counter;
99 
100 	/* array of page tables, one for each page directory entry */
101 	struct amdgpu_vm_pt	*page_tables;
102 
103 	/* for id and flush management per ring */
104 	struct amdgpu_vm_id	*ids[AMDGPU_MAX_RINGS];
105 
106 	/* protecting freed */
107 	spinlock_t		freed_lock;
108 
109 	/* Scheduler entity for page table updates */
110 	struct amd_sched_entity	entity;
111 
112 	/* client id */
113 	u64                     client_id;
114 	/* each VM will map on CSA */
115 	struct amdgpu_bo_va *csa_bo_va;
116 };
117 
118 struct amdgpu_vm_id {
119 	struct list_head	list;
120 	struct dma_fence		*first;
121 	struct amdgpu_sync	active;
122 	struct dma_fence		*last_flush;
123 	atomic64_t		owner;
124 
125 	uint64_t		pd_gpu_addr;
126 	/* last flushed PD/PT update */
127 	struct dma_fence		*flushed_updates;
128 
129 	uint32_t                current_gpu_reset_count;
130 
131 	uint32_t		gds_base;
132 	uint32_t		gds_size;
133 	uint32_t		gws_base;
134 	uint32_t		gws_size;
135 	uint32_t		oa_base;
136 	uint32_t		oa_size;
137 };
138 
139 struct amdgpu_vm_manager {
140 	/* Handling of VMIDs */
141 	struct mutex				lock;
142 	unsigned				num_ids;
143 	struct list_head			ids_lru;
144 	struct amdgpu_vm_id			ids[AMDGPU_NUM_VM];
145 
146 	/* Handling of VM fences */
147 	u64					fence_context;
148 	unsigned				seqno[AMDGPU_MAX_RINGS];
149 
150 	uint32_t				max_pfn;
151 	/* vram base address for page table entry  */
152 	u64					vram_base_offset;
153 	/* is vm enabled? */
154 	bool					enabled;
155 	/* vm pte handling */
156 	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
157 	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
158 	unsigned				vm_pte_num_rings;
159 	atomic_t				vm_pte_next_ring;
160 	/* client id counter */
161 	atomic64_t				client_counter;
162 };
163 
164 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
165 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
166 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
167 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
168 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
169 			 struct list_head *validated,
170 			 struct amdgpu_bo_list_entry *entry);
171 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
172 			      int (*callback)(void *p, struct amdgpu_bo *bo),
173 			      void *param);
174 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
175 				  struct amdgpu_vm *vm);
176 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
177 		      struct amdgpu_sync *sync, struct dma_fence *fence,
178 		      struct amdgpu_job *job);
179 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
180 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
181 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
182 				    struct amdgpu_vm *vm);
183 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
184 			  struct amdgpu_vm *vm);
185 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
186 			     struct amdgpu_sync *sync);
187 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
188 			struct amdgpu_bo_va *bo_va,
189 			bool clear);
190 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
191 			     struct amdgpu_bo *bo);
192 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
193 				       struct amdgpu_bo *bo);
194 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
195 				      struct amdgpu_vm *vm,
196 				      struct amdgpu_bo *bo);
197 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
198 		     struct amdgpu_bo_va *bo_va,
199 		     uint64_t addr, uint64_t offset,
200 		     uint64_t size, uint64_t flags);
201 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
202 		       struct amdgpu_bo_va *bo_va,
203 		       uint64_t addr);
204 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
205 		      struct amdgpu_bo_va *bo_va);
206 
207 #endif
208