xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu_vm.h"
24 #include "amdgpu_job.h"
25 #include "amdgpu_object.h"
26 #include "amdgpu_trace.h"
27 
28 #define AMDGPU_VM_SDMA_MIN_NUM_DW	256u
29 #define AMDGPU_VM_SDMA_MAX_NUM_DW	(16u * 1024u)
30 
31 /**
32  * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
33  *
34  * @table: newly allocated or validated PD/PT
35  */
36 static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
37 {
38 	return amdgpu_ttm_alloc_gart(&table->bo.tbo);
39 }
40 
41 /* Allocate a new job for @count PTE updates */
42 static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
43 				    unsigned int count, u64 k_job_id)
44 {
45 	enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
46 		: AMDGPU_IB_POOL_DELAYED;
47 	struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate
48 		: &p->vm->delayed;
49 	unsigned int ndw;
50 	int r;
51 
52 	/* estimate how many dw we need */
53 	ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
54 	if (p->pages_addr)
55 		ndw += count * 2;
56 	ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
57 
58 	r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
59 				     ndw * 4, pool, &p->job, k_job_id);
60 	if (r)
61 		return r;
62 
63 	p->num_dw_left = ndw;
64 	return 0;
65 }
66 
67 /**
68  * amdgpu_vm_sdma_prepare - prepare SDMA command submission
69  *
70  * @p: see amdgpu_vm_update_params definition
71  * @sync: amdgpu_sync object with fences to wait for
72  * @k_job_id: identifier of the job, for tracing purpose
73  *
74  * Returns:
75  * Negativ errno, 0 for success.
76  */
77 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
78 				  struct amdgpu_sync *sync, u64 k_job_id)
79 {
80 	int r;
81 
82 	r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id);
83 	if (r)
84 		return r;
85 
86 	if (!sync)
87 		return 0;
88 
89 	r = amdgpu_sync_push_to_job(sync, p->job);
90 	if (r) {
91 		p->num_dw_left = 0;
92 		amdgpu_job_free(p->job);
93 	}
94 	return r;
95 }
96 
97 /**
98  * amdgpu_vm_sdma_commit - commit SDMA command submission
99  *
100  * @p: see amdgpu_vm_update_params definition
101  * @fence: resulting fence
102  *
103  * Returns:
104  * Negativ errno, 0 for success.
105  */
106 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
107 				 struct dma_fence **fence)
108 {
109 	struct amdgpu_ib *ib = p->job->ibs;
110 	struct amdgpu_ring *ring;
111 	struct dma_fence *f;
112 
113 	ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
114 			    sched);
115 
116 	WARN_ON(ib->length_dw == 0);
117 	amdgpu_ring_pad_ib(ring, ib);
118 
119 	if (p->needs_flush)
120 		atomic64_inc(&p->vm->tlb_seq);
121 
122 	WARN_ON(ib->length_dw > p->num_dw_left);
123 	f = amdgpu_job_submit(p->job);
124 
125 	if (p->unlocked) {
126 		struct dma_fence *tmp = dma_fence_get(f);
127 
128 		swap(p->vm->last_unlocked, tmp);
129 		dma_fence_put(tmp);
130 	} else {
131 		dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f,
132 				   DMA_RESV_USAGE_BOOKKEEP);
133 	}
134 
135 	if (fence && !p->immediate) {
136 		/*
137 		 * Most hw generations now have a separate queue for page table
138 		 * updates, but when the queue is shared with userspace we need
139 		 * the extra CPU round trip to correctly flush the TLB.
140 		 */
141 		set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
142 		swap(*fence, f);
143 	}
144 	dma_fence_put(f);
145 	return 0;
146 }
147 
148 /**
149  * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
150  *
151  * @p: see amdgpu_vm_update_params definition
152  * @bo: PD/PT to update
153  * @pe: addr of the page entry
154  * @count: number of page entries to copy
155  *
156  * Traces the parameters and calls the DMA function to copy the PTEs.
157  */
158 static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
159 				     struct amdgpu_bo *bo, uint64_t pe,
160 				     unsigned count)
161 {
162 	struct amdgpu_ib *ib = p->job->ibs;
163 	uint64_t src = ib->gpu_addr;
164 
165 	src += p->num_dw_left * 4;
166 
167 	pe += amdgpu_bo_gpu_offset_no_check(bo);
168 	trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
169 
170 	amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
171 }
172 
173 /**
174  * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
175  *
176  * @p: see amdgpu_vm_update_params definition
177  * @bo: PD/PT to update
178  * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
179  * @addr: dst addr to write into pe
180  * @count: number of page entries to update
181  * @incr: increase next addr by incr bytes
182  * @flags: hw access flags
183  *
184  * Traces the parameters and calls the right asic functions
185  * to setup the page table using the DMA.
186  */
187 static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
188 				    struct amdgpu_bo *bo, uint64_t pe,
189 				    uint64_t addr, unsigned count,
190 				    uint32_t incr, uint64_t flags)
191 {
192 	struct amdgpu_ib *ib = p->job->ibs;
193 
194 	pe += amdgpu_bo_gpu_offset_no_check(bo);
195 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
196 	if (count < 3) {
197 		amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
198 				    count, incr);
199 	} else {
200 		amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
201 				      count, incr, flags);
202 	}
203 }
204 
205 /**
206  * amdgpu_vm_sdma_update - execute VM update
207  *
208  * @p: see amdgpu_vm_update_params definition
209  * @vmbo: PD/PT to update
210  * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
211  * @addr: dst addr to write into pe
212  * @count: number of page entries to update
213  * @incr: increase next addr by incr bytes
214  * @flags: hw access flags
215  *
216  * Reserve space in the IB, setup mapping buffer on demand and write commands to
217  * the IB.
218  */
219 static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
220 				 struct amdgpu_bo_vm *vmbo, uint64_t pe,
221 				 uint64_t addr, unsigned count, uint32_t incr,
222 				 uint64_t flags)
223 {
224 	struct amdgpu_bo *bo = &vmbo->bo;
225 	struct dma_resv_iter cursor;
226 	unsigned int i, ndw, nptes;
227 	struct dma_fence *fence;
228 	uint64_t *pte;
229 	int r;
230 
231 	/* Wait for PD/PT moves to be completed */
232 	dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
233 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
234 		dma_fence_get(fence);
235 		r = drm_sched_job_add_dependency(&p->job->base, fence);
236 		if (r) {
237 			dma_fence_put(fence);
238 			dma_resv_iter_end(&cursor);
239 			return r;
240 		}
241 	}
242 	dma_resv_iter_end(&cursor);
243 
244 	do {
245 		ndw = p->num_dw_left;
246 		ndw -= p->job->ibs->length_dw;
247 
248 		if (ndw < 32) {
249 			r = amdgpu_vm_sdma_commit(p, NULL);
250 			if (r)
251 				return r;
252 
253 			r = amdgpu_vm_sdma_alloc_job(p, count,
254 						     AMDGPU_KERNEL_JOB_ID_VM_UPDATE);
255 			if (r)
256 				return r;
257 		}
258 
259 		if (!p->pages_addr) {
260 			/* set page commands needed */
261 			amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
262 						incr, flags);
263 			return 0;
264 		}
265 
266 		/* copy commands needed */
267 		ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
268 
269 		/* for padding */
270 		ndw -= 7;
271 
272 		nptes = min(count, ndw / 2);
273 
274 		/* Put the PTEs at the end of the IB. */
275 		p->num_dw_left -= nptes * 2;
276 		pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
277 		for (i = 0; i < nptes; ++i, addr += incr) {
278 			pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
279 			pte[i] |= flags;
280 		}
281 
282 		amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
283 
284 		pe += nptes * 8;
285 		count -= nptes;
286 	} while (count);
287 
288 	return 0;
289 }
290 
291 const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
292 	.map_table = amdgpu_vm_sdma_map_table,
293 	.prepare = amdgpu_vm_sdma_prepare,
294 	.update = amdgpu_vm_sdma_update,
295 	.commit = amdgpu_vm_sdma_commit
296 };
297