xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 /*
2  * Copyright 2017 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #define NVKM_VMM_LEVELS_MAX 6
23 #include "vmm.h"
24 
25 #include <subdev/fb.h>
26 
27 static void
nvkm_vmm_pt_del(struct nvkm_vmm_pt ** ppgt)28 nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
29 {
30 	struct nvkm_vmm_pt *pgt = *ppgt;
31 	if (pgt) {
32 		kvfree(pgt->pde);
33 		kfree(pgt);
34 		*ppgt = NULL;
35 	}
36 }
37 
38 
39 static struct nvkm_vmm_pt *
nvkm_vmm_pt_new(const struct nvkm_vmm_desc * desc,bool sparse,const struct nvkm_vmm_page * page)40 nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
41 		const struct nvkm_vmm_page *page)
42 {
43 	const u32 pten = 1 << desc->bits;
44 	struct nvkm_vmm_pt *pgt;
45 	u32 lpte = 0;
46 
47 	if (desc->type > PGT) {
48 		if (desc->type == SPT) {
49 			const struct nvkm_vmm_desc *pair = page[-1].desc;
50 			lpte = pten >> (desc->bits - pair->bits);
51 		} else {
52 			lpte = pten;
53 		}
54 	}
55 
56 	if (!(pgt = kzalloc(sizeof(*pgt) + (sizeof(pgt->pte[0]) * lpte), GFP_KERNEL)))
57 		return NULL;
58 	pgt->page = page ? page->shift : 0;
59 	pgt->sparse = sparse;
60 
61 	if (desc->type == PGD) {
62 		pgt->pde = kvzalloc_objs(*pgt->pde, pten);
63 		if (!pgt->pde) {
64 			kfree(pgt);
65 			return NULL;
66 		}
67 	}
68 
69 	return pgt;
70 }
71 
72 struct nvkm_vmm_iter {
73 	const struct nvkm_vmm_page *page;
74 	const struct nvkm_vmm_desc *desc;
75 	struct nvkm_vmm *vmm;
76 	u64 cnt;
77 	u16 max, lvl;
78 	u32 pte[NVKM_VMM_LEVELS_MAX];
79 	struct nvkm_vmm_pt *pt[NVKM_VMM_LEVELS_MAX];
80 	int flush;
81 };
82 
83 #ifdef CONFIG_NOUVEAU_DEBUG_MMU
84 static const char *
nvkm_vmm_desc_type(const struct nvkm_vmm_desc * desc)85 nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
86 {
87 	switch (desc->type) {
88 	case PGD: return "PGD";
89 	case PGT: return "PGT";
90 	case SPT: return "SPT";
91 	case LPT: return "LPT";
92 	default:
93 		return "UNKNOWN";
94 	}
95 }
96 
97 static void
nvkm_vmm_trace(struct nvkm_vmm_iter * it,char * buf)98 nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
99 {
100 	int lvl;
101 	for (lvl = it->max; lvl >= 0; lvl--) {
102 		if (lvl >= it->lvl)
103 			buf += sprintf(buf,  "%05x:", it->pte[lvl]);
104 		else
105 			buf += sprintf(buf, "xxxxx:");
106 	}
107 }
108 
109 #define TRA(i,f,a...) do {                                                     \
110 	char _buf[NVKM_VMM_LEVELS_MAX * 7];                                    \
111 	struct nvkm_vmm_iter *_it = (i);                                       \
112 	nvkm_vmm_trace(_it, _buf);                                             \
113 	VMM_TRACE(_it->vmm, "%s "f, _buf, ##a);                                \
114 } while(0)
115 #else
116 #define TRA(i,f,a...)
117 #endif
118 
119 static inline void
nvkm_vmm_flush_mark(struct nvkm_vmm_iter * it)120 nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
121 {
122 	it->flush = min(it->flush, it->max - it->lvl);
123 }
124 
125 static inline void
nvkm_vmm_flush(struct nvkm_vmm_iter * it)126 nvkm_vmm_flush(struct nvkm_vmm_iter *it)
127 {
128 	if (it->flush != NVKM_VMM_LEVELS_MAX) {
129 		if (it->vmm->func->flush) {
130 			TRA(it, "flush: %d", it->flush);
131 			it->vmm->func->flush(it->vmm, it->flush);
132 		}
133 		it->flush = NVKM_VMM_LEVELS_MAX;
134 	}
135 }
136 
137 static void
nvkm_vmm_unref_pdes(struct nvkm_vmm_iter * it)138 nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
139 {
140 	const struct nvkm_vmm_desc *desc = it->desc;
141 	const int type = desc[it->lvl].type == SPT;
142 	struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
143 	struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
144 	struct nvkm_mmu_pt *pt = pgt->pt[type];
145 	struct nvkm_vmm *vmm = it->vmm;
146 	u32 pdei = it->pte[it->lvl + 1];
147 
148 	/* Recurse up the tree, unreferencing/destroying unneeded PDs. */
149 	it->lvl++;
150 	if (--pgd->refs[0]) {
151 		const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
152 		/* PD has other valid PDEs, so we need a proper update. */
153 		TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
154 		pgt->pt[type] = NULL;
155 		if (!pgt->refs[!type]) {
156 			/* PDE no longer required. */
157 			if (pgd->pt[0]) {
158 				if (pgt->sparse) {
159 					func->sparse(vmm, pgd->pt[0], pdei, 1);
160 					pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
161 				} else {
162 					func->unmap(vmm, pgd->pt[0], pdei, 1);
163 					pgd->pde[pdei] = NULL;
164 				}
165 			} else {
166 				/* Special handling for Tesla-class GPUs,
167 				 * where there's no central PD, but each
168 				 * instance has its own embedded PD.
169 				 */
170 				func->pde(vmm, pgd, pdei);
171 				pgd->pde[pdei] = NULL;
172 			}
173 		} else {
174 			/* PDE was pointing at dual-PTs and we're removing
175 			 * one of them, leaving the other in place.
176 			 */
177 			func->pde(vmm, pgd, pdei);
178 		}
179 
180 		/* GPU may have cached the PTs, flush before freeing. */
181 		nvkm_vmm_flush_mark(it);
182 		nvkm_vmm_flush(it);
183 	} else {
184 		/* PD has no valid PDEs left, so we can just destroy it. */
185 		nvkm_vmm_unref_pdes(it);
186 	}
187 
188 	/* Destroy PD/PT. */
189 	TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
190 	nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
191 	if (!pgt->refs[!type])
192 		nvkm_vmm_pt_del(&pgt);
193 	it->lvl--;
194 }
195 
196 static void
nvkm_vmm_unref_sptes(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgt,const struct nvkm_vmm_desc * desc,u32 ptei,u32 ptes)197 nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
198 		     const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
199 {
200 	const struct nvkm_vmm_desc *pair = it->page[-1].desc;
201 	const u32 sptb = desc->bits - pair->bits;
202 	const u32 sptn = 1 << sptb;
203 	struct nvkm_vmm *vmm = it->vmm;
204 	u32 spti = ptei & (sptn - 1), lpti, pteb;
205 
206 	/* Determine how many SPTEs are being touched under each LPTE,
207 	 * and drop reference counts.
208 	 */
209 	for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
210 		const u32 pten = min(sptn - spti, ptes);
211 		pgt->pte[lpti].s.sptes -= pten;
212 		ptes -= pten;
213 	}
214 
215 	/* We're done here if there's no corresponding LPT. */
216 	if (!pgt->refs[0])
217 		return;
218 
219 	for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
220 		/* Skip over any LPTEs that still have valid SPTEs. */
221 		if (pgt->pte[pteb].s.sptes) {
222 			for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
223 				if (!(pgt->pte[ptei].s.sptes))
224 					break;
225 			}
226 			continue;
227 		}
228 
229 		/* As there's no more non-UNMAPPED SPTEs left in the range
230 		 * covered by a number of LPTEs, the LPTEs once again take
231 		 * control over their address range.
232 		 *
233 		 * Determine how many LPTEs need to transition state.
234 		 */
235 		pgt->pte[ptei].s.spte_valid = false;
236 		for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
237 			if (pgt->pte[ptei].s.sptes)
238 				break;
239 			pgt->pte[ptei].s.spte_valid = false;
240 		}
241 
242 		if (pgt->pte[pteb].s.sparse) {
243 			TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
244 			pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
245 		} else if (!pgt->pte[pteb].s.lpte_valid) {
246 			if (pair->func->invalid) {
247 				/* If the MMU supports it, restore the LPTE to the
248 				 * INVALID state to tell the MMU there is no point
249 				 * trying to fetch the corresponding SPTEs.
250 				 */
251 				TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
252 				pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
253 			}
254 		} else {
255 			TRA(it, "LPTE %05x: V %d PTEs", pteb, ptes);
256 		}
257 	}
258 }
259 
260 static bool
nvkm_vmm_unref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)261 nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
262 {
263 	const struct nvkm_vmm_desc *desc = it->desc;
264 	const int type = desc->type == SPT;
265 	struct nvkm_vmm_pt *pgt = it->pt[0];
266 	bool dma;
267 
268 	if (pfn) {
269 		/* Need to clear PTE valid bits before we dma_unmap_page(). */
270 		dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
271 		if (dma) {
272 			/* GPU may have cached the PT, flush before unmap. */
273 			nvkm_vmm_flush_mark(it);
274 			nvkm_vmm_flush(it);
275 			desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
276 		}
277 	}
278 
279 	/* Drop PTE references. */
280 	pgt->refs[type] -= ptes;
281 
282 	/* Dual-PTs need special handling, unless PDE becoming invalid. */
283 	if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
284 		nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
285 
286 	if (desc->type == LPT && (pgt->refs[0] || pgt->refs[1])) {
287 		for (u32 lpti = ptei; ptes; lpti++) {
288 			pgt->pte[lpti].s.lptes--;
289 			if (pgt->pte[lpti].s.lptes == 0)
290 				pgt->pte[lpti].s.lpte_valid = false;
291 			ptes--;
292 		}
293 	}
294 
295 	/* PT no longer needed? Destroy it. */
296 	if (!pgt->refs[type]) {
297 		it->lvl++;
298 		TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
299 		it->lvl--;
300 		nvkm_vmm_unref_pdes(it);
301 		return false; /* PTE writes for unmap() not necessary. */
302 	}
303 
304 	return true;
305 }
306 
307 static void
nvkm_vmm_ref_sptes(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgt,const struct nvkm_vmm_desc * desc,u32 ptei,u32 ptes)308 nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
309 		   const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
310 {
311 	const struct nvkm_vmm_desc *pair = it->page[-1].desc;
312 	const u32 sptb = desc->bits - pair->bits;
313 	const u32 sptn = 1 << sptb;
314 	struct nvkm_vmm *vmm = it->vmm;
315 	u32 spti = ptei & (sptn - 1), lpti, pteb;
316 
317 	/* Determine how many SPTEs are being touched under each LPTE,
318 	 * and increase reference counts.
319 	 */
320 	for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
321 		const u32 pten = min(sptn - spti, ptes);
322 		pgt->pte[lpti].s.sptes += pten;
323 		ptes -= pten;
324 	}
325 
326 	/* We're done here if there's no corresponding LPT. */
327 	if (!pgt->refs[0])
328 		return;
329 
330 	for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
331 		/* Skip over any LPTEs that already have valid SPTEs. */
332 		if (pgt->pte[pteb].s.spte_valid) {
333 			for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
334 				if (!pgt->pte[ptei].s.spte_valid)
335 					break;
336 			}
337 			continue;
338 		}
339 
340 		/* As there are now non-UNMAPPED SPTEs in the range covered
341 		 * by a number of LPTEs, we need to transfer control of the
342 		 * address range to the SPTEs.
343 		 *
344 		 * Determine how many LPTEs need to transition state.
345 		 */
346 		pgt->pte[ptei].s.spte_valid = true;
347 		pgt->pte[ptei].s.lpte_valid = false;
348 		for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
349 			if (pgt->pte[ptei].s.spte_valid)
350 				break;
351 			pgt->pte[ptei].s.spte_valid = true;
352 			pgt->pte[ptei].s.lpte_valid = false;
353 		}
354 
355 		if (pgt->pte[pteb].s.sparse) {
356 			const u32 spti = pteb * sptn;
357 			const u32 sptc = ptes * sptn;
358 			/* The entire LPTE is marked as sparse, we need
359 			 * to make sure that the SPTEs are too.
360 			 */
361 			TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
362 			desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
363 			/* Sparse LPTEs prevent SPTEs from being accessed. */
364 			TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
365 			pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
366 		} else
367 		if (pair->func->invalid) {
368 			/* MMU supports blocking SPTEs by marking an LPTE
369 			 * as INVALID.  We need to reverse that here.
370 			 */
371 			TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
372 			pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
373 		}
374 	}
375 }
376 
377 static bool
nvkm_vmm_ref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)378 nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
379 {
380 	const struct nvkm_vmm_desc *desc = it->desc;
381 	const int type = desc->type == SPT;
382 	struct nvkm_vmm_pt *pgt = it->pt[0];
383 
384 	/* Take PTE references. */
385 	pgt->refs[type] += ptes;
386 
387 	/* Dual-PTs need special handling. */
388 	if (desc->type == SPT)
389 		nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
390 
391 	if (desc->type == LPT) {
392 		for (u32 lpti = ptei; ptes; lpti++) {
393 			pgt->pte[lpti].s.spte_valid = false;
394 			pgt->pte[lpti].s.lpte_valid = true;
395 			pgt->pte[lpti].s.lptes++;
396 			ptes--;
397 		}
398 	}
399 
400 	return true;
401 }
402 
403 static void
nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc * desc,struct nvkm_vmm_pt * pgt,u32 ptei,u32 ptes)404 nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
405 		     struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
406 {
407 	if (desc->type == PGD) {
408 		while (ptes--)
409 			pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
410 	} else
411 	if (desc->type == LPT) {
412 		union nvkm_pte_tracker sparse = { .s.sparse = 1 };
413 		memset32(&pgt->pte[ptei].u, sparse.u, ptes);
414 	}
415 }
416 
417 static bool
nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)418 nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
419 {
420 	struct nvkm_vmm_pt *pt = it->pt[0];
421 	if (it->desc->type == PGD)
422 		memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
423 	else
424 	if (it->desc->type == LPT)
425 		memset32(&pt->pte[ptei].u, 0x00, ptes);
426 	return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
427 }
428 
429 static bool
nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)430 nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
431 {
432 	nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
433 	return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
434 }
435 
436 static bool
nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgd,u32 pdei)437 nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
438 {
439 	const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
440 	const int type = desc->type == SPT;
441 	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
442 	const bool zero = !pgt->sparse && !desc->func->invalid;
443 	struct nvkm_vmm *vmm = it->vmm;
444 	struct nvkm_mmu *mmu = vmm->mmu;
445 	struct nvkm_mmu_pt *pt;
446 	u32 pten = 1 << desc->bits;
447 	u32 pteb, ptei, ptes;
448 	u32 size = desc->size * pten;
449 
450 	pgd->refs[0]++;
451 
452 	pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
453 	if (!pgt->pt[type]) {
454 		it->lvl--;
455 		nvkm_vmm_unref_pdes(it);
456 		return false;
457 	}
458 
459 	if (zero)
460 		goto done;
461 
462 	pt = pgt->pt[type];
463 
464 	if (desc->type == LPT && pgt->refs[1]) {
465 		/* SPT already exists covering the same range as this LPT,
466 		 * which means we need to be careful that any LPTEs which
467 		 * overlap valid SPTEs are unmapped as opposed to invalid
468 		 * or sparse, which would prevent the MMU from looking at
469 		 * the SPTEs on some GPUs.
470 		 */
471 		for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
472 			bool spte = !!pgt->pte[ptei].s.sptes;
473 			for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
474 				bool next = !!pgt->pte[ptei].s.sptes;
475 				if (spte != next)
476 					break;
477 			}
478 
479 			if (!spte) {
480 				if (pgt->sparse)
481 					desc->func->sparse(vmm, pt, pteb, ptes);
482 				else
483 					desc->func->invalid(vmm, pt, pteb, ptes);
484 				memset32(&pgt->pte[pteb].u, 0x00, ptes);
485 			} else {
486 				desc->func->unmap(vmm, pt, pteb, ptes);
487 				while (ptes--)
488 					pgt->pte[pteb++].s.spte_valid = true;
489 			}
490 		}
491 	} else {
492 		if (pgt->sparse) {
493 			nvkm_vmm_sparse_ptes(desc, pgt, 0, pten);
494 			desc->func->sparse(vmm, pt, 0, pten);
495 		} else {
496 			desc->func->invalid(vmm, pt, 0, pten);
497 		}
498 	}
499 
500 done:
501 	TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
502 	it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
503 	nvkm_vmm_flush_mark(it);
504 	return true;
505 }
506 
507 static bool
nvkm_vmm_ref_swpt(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgd,u32 pdei)508 nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
509 {
510 	const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
511 	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
512 
513 	pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
514 	if (!pgt) {
515 		if (!pgd->refs[0])
516 			nvkm_vmm_unref_pdes(it);
517 		return false;
518 	}
519 
520 	pgd->pde[pdei] = pgt;
521 	return true;
522 }
523 
524 static inline u64
nvkm_vmm_iter(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,const char * name,bool ref,bool pfn,bool (* REF_PTES)(struct nvkm_vmm_iter *,bool pfn,u32,u32),nvkm_vmm_pte_func MAP_PTES,struct nvkm_vmm_map * map,nvkm_vmm_pxe_func CLR_PTES)525 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
526 	      u64 addr, u64 size, const char *name, bool ref, bool pfn,
527 	      bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
528 	      nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
529 	      nvkm_vmm_pxe_func CLR_PTES)
530 {
531 	const struct nvkm_vmm_desc *desc = page->desc;
532 	struct nvkm_vmm_iter it;
533 	u64 bits = addr >> page->shift;
534 
535 	it.page = page;
536 	it.desc = desc;
537 	it.vmm = vmm;
538 	it.cnt = size >> page->shift;
539 	it.flush = NVKM_VMM_LEVELS_MAX;
540 
541 	/* Deconstruct address into PTE indices for each mapping level. */
542 	for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
543 		it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
544 		bits >>= desc[it.lvl].bits;
545 	}
546 	it.max = --it.lvl;
547 	it.pt[it.max] = vmm->pd;
548 
549 	it.lvl = 0;
550 	TRA(&it, "%s: %016llx %016llx %d %lld PTEs", name,
551 	         addr, size, page->shift, it.cnt);
552 	it.lvl = it.max;
553 
554 	/* Depth-first traversal of page tables. */
555 	while (it.cnt) {
556 		struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
557 		const int type = desc->type == SPT;
558 		const u32 pten = 1 << desc->bits;
559 		const u32 ptei = it.pte[0];
560 		const u32 ptes = min_t(u64, it.cnt, pten - ptei);
561 
562 		/* Walk down the tree, finding page tables for each level. */
563 		for (; it.lvl; it.lvl--) {
564 			const u32 pdei = it.pte[it.lvl];
565 			struct nvkm_vmm_pt *pgd = pgt;
566 
567 			/* Software PT. */
568 			if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
569 				if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
570 					goto fail;
571 			}
572 			it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
573 
574 			/* Hardware PT.
575 			 *
576 			 * This is a separate step from above due to GF100 and
577 			 * newer having dual page tables at some levels, which
578 			 * are refcounted independently.
579 			 */
580 			if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
581 				if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
582 					goto fail;
583 			}
584 		}
585 
586 		/* Handle PTE updates. */
587 		if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
588 			struct nvkm_mmu_pt *pt = pgt->pt[type];
589 			if (MAP_PTES || CLR_PTES) {
590 				if (MAP_PTES)
591 					MAP_PTES(vmm, pt, ptei, ptes, map);
592 				else
593 					CLR_PTES(vmm, pt, ptei, ptes);
594 				nvkm_vmm_flush_mark(&it);
595 			}
596 		}
597 
598 		/* Walk back up the tree to the next position. */
599 		it.pte[it.lvl] += ptes;
600 		it.cnt -= ptes;
601 		if (it.cnt) {
602 			while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
603 				it.pte[it.lvl++] = 0;
604 				it.pte[it.lvl]++;
605 			}
606 		}
607 	}
608 
609 	nvkm_vmm_flush(&it);
610 	return ~0ULL;
611 
612 fail:
613 	/* Reconstruct the failure address so the caller is able to
614 	 * reverse any partially completed operations.
615 	 */
616 	addr = it.pte[it.max--];
617 	do {
618 		addr  = addr << desc[it.max].bits;
619 		addr |= it.pte[it.max];
620 	} while (it.max--);
621 
622 	return addr << page->shift;
623 }
624 
625 static void
nvkm_vmm_ptes_sparse_put(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)626 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
627 			 u64 addr, u64 size)
628 {
629 	nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
630 		      nvkm_vmm_sparse_unref_ptes, NULL, NULL,
631 		      page->desc->func->invalid ?
632 		      page->desc->func->invalid : page->desc->func->unmap);
633 }
634 
635 static int
nvkm_vmm_ptes_sparse_get(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)636 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
637 			 u64 addr, u64 size)
638 {
639 	if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
640 		u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
641 					 true, false, nvkm_vmm_sparse_ref_ptes,
642 					 NULL, NULL, page->desc->func->sparse);
643 		if (fail != ~0ULL) {
644 			if ((size = fail - addr))
645 				nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
646 			return -ENOMEM;
647 		}
648 		return 0;
649 	}
650 	return -EINVAL;
651 }
652 
653 static int
nvkm_vmm_ptes_sparse(struct nvkm_vmm * vmm,u64 addr,u64 size,bool ref)654 nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
655 {
656 	const struct nvkm_vmm_page *page = vmm->func->page;
657 	int m = 0, i;
658 	u64 start = addr;
659 	u64 block;
660 
661 	while (size) {
662 		/* Limit maximum page size based on remaining size. */
663 		while (size < (1ULL << page[m].shift))
664 			m++;
665 		i = m;
666 
667 		/* Find largest page size suitable for alignment. */
668 		while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
669 			i++;
670 
671 		/* Determine number of PTEs at this page size. */
672 		if (i != m) {
673 			/* Limited to alignment boundary of next page size. */
674 			u64 next = 1ULL << page[i - 1].shift;
675 			u64 part = ALIGN(addr, next) - addr;
676 			if (size - part >= next)
677 				block = (part >> page[i].shift) << page[i].shift;
678 			else
679 				block = (size >> page[i].shift) << page[i].shift;
680 		} else {
681 			block = (size >> page[i].shift) << page[i].shift;
682 		}
683 
684 		/* Perform operation. */
685 		if (ref) {
686 			int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
687 			if (ret) {
688 				if ((size = addr - start))
689 					nvkm_vmm_ptes_sparse(vmm, start, size, false);
690 				return ret;
691 			}
692 		} else {
693 			nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
694 		}
695 
696 		size -= block;
697 		addr += block;
698 	}
699 
700 	return 0;
701 }
702 
703 static void
nvkm_vmm_ptes_unmap(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,bool sparse,bool pfn)704 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
705 		    u64 addr, u64 size, bool sparse, bool pfn)
706 {
707 	const struct nvkm_vmm_desc_func *func = page->desc->func;
708 
709 	mutex_lock(&vmm->mutex.map);
710 	nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
711 		      NULL, NULL, NULL,
712 		      sparse ? func->sparse : func->invalid ? func->invalid :
713 							      func->unmap);
714 	mutex_unlock(&vmm->mutex.map);
715 }
716 
717 static void
nvkm_vmm_ptes_map(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,struct nvkm_vmm_map * map,nvkm_vmm_pte_func func)718 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
719 		  u64 addr, u64 size, struct nvkm_vmm_map *map,
720 		  nvkm_vmm_pte_func func)
721 {
722 	mutex_lock(&vmm->mutex.map);
723 	nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
724 		      NULL, func, map, NULL);
725 	mutex_unlock(&vmm->mutex.map);
726 }
727 
728 static void
nvkm_vmm_ptes_put_locked(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)729 nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
730 			 u64 addr, u64 size)
731 {
732 	nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
733 		      nvkm_vmm_unref_ptes, NULL, NULL, NULL);
734 }
735 
736 static void
nvkm_vmm_ptes_put(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)737 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
738 		  u64 addr, u64 size)
739 {
740 	mutex_lock(&vmm->mutex.ref);
741 	nvkm_vmm_ptes_put_locked(vmm, page, addr, size);
742 	mutex_unlock(&vmm->mutex.ref);
743 }
744 
745 static int
nvkm_vmm_ptes_get(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)746 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
747 		  u64 addr, u64 size)
748 {
749 	u64 fail;
750 
751 	mutex_lock(&vmm->mutex.ref);
752 	fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
753 			     nvkm_vmm_ref_ptes, NULL, NULL, NULL);
754 	if (fail != ~0ULL) {
755 		if (fail != addr)
756 			nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr);
757 		mutex_unlock(&vmm->mutex.ref);
758 		return -ENOMEM;
759 	}
760 	mutex_unlock(&vmm->mutex.ref);
761 	return 0;
762 }
763 
764 static void
__nvkm_vmm_ptes_unmap_put(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,bool sparse,bool pfn)765 __nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
766 			  u64 addr, u64 size, bool sparse, bool pfn)
767 {
768 	const struct nvkm_vmm_desc_func *func = page->desc->func;
769 
770 	nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
771 		      false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
772 		      sparse ? func->sparse : func->invalid ? func->invalid :
773 							      func->unmap);
774 }
775 
776 static void
nvkm_vmm_ptes_unmap_put(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,bool sparse,bool pfn)777 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
778 			u64 addr, u64 size, bool sparse, bool pfn)
779 {
780 	if (vmm->managed.raw) {
781 		nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
782 		nvkm_vmm_ptes_put(vmm, page, addr, size);
783 	} else {
784 		__nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
785 	}
786 }
787 
788 static int
__nvkm_vmm_ptes_get_map(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,struct nvkm_vmm_map * map,nvkm_vmm_pte_func func)789 __nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
790 			u64 addr, u64 size, struct nvkm_vmm_map *map,
791 			nvkm_vmm_pte_func func)
792 {
793 	u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
794 				 false, nvkm_vmm_ref_ptes, func, map, NULL);
795 	if (fail != ~0ULL) {
796 		if ((size = fail - addr))
797 			nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
798 		return -ENOMEM;
799 	}
800 	return 0;
801 }
802 
803 static int
nvkm_vmm_ptes_get_map(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,struct nvkm_vmm_map * map,nvkm_vmm_pte_func func)804 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
805 		      u64 addr, u64 size, struct nvkm_vmm_map *map,
806 		      nvkm_vmm_pte_func func)
807 {
808 	int ret;
809 
810 	if (vmm->managed.raw) {
811 		ret = nvkm_vmm_ptes_get(vmm, page, addr, size);
812 		if (ret)
813 			return ret;
814 
815 		nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
816 
817 		return 0;
818 	} else {
819 		return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
820 	}
821 }
822 
823 struct nvkm_vma *
nvkm_vma_new(u64 addr,u64 size)824 nvkm_vma_new(u64 addr, u64 size)
825 {
826 	struct nvkm_vma *vma = kzalloc_obj(*vma);
827 	if (vma) {
828 		vma->addr = addr;
829 		vma->size = size;
830 		vma->page = NVKM_VMA_PAGE_NONE;
831 		vma->refd = NVKM_VMA_PAGE_NONE;
832 	}
833 	return vma;
834 }
835 
836 struct nvkm_vma *
nvkm_vma_tail(struct nvkm_vma * vma,u64 tail)837 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
838 {
839 	struct nvkm_vma *new;
840 
841 	BUG_ON(vma->size == tail);
842 
843 	if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
844 		return NULL;
845 	vma->size -= tail;
846 
847 	new->mapref = vma->mapref;
848 	new->sparse = vma->sparse;
849 	new->page = vma->page;
850 	new->refd = vma->refd;
851 	new->used = vma->used;
852 	new->part = vma->part;
853 	new->busy = vma->busy;
854 	new->mapped = vma->mapped;
855 	list_add(&new->head, &vma->head);
856 	return new;
857 }
858 
859 static inline void
nvkm_vmm_free_remove(struct nvkm_vmm * vmm,struct nvkm_vma * vma)860 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
861 {
862 	rb_erase(&vma->tree, &vmm->free);
863 }
864 
865 static inline void
nvkm_vmm_free_delete(struct nvkm_vmm * vmm,struct nvkm_vma * vma)866 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
867 {
868 	nvkm_vmm_free_remove(vmm, vma);
869 	list_del(&vma->head);
870 	kfree(vma);
871 }
872 
873 static void
nvkm_vmm_free_insert(struct nvkm_vmm * vmm,struct nvkm_vma * vma)874 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
875 {
876 	struct rb_node **ptr = &vmm->free.rb_node;
877 	struct rb_node *parent = NULL;
878 
879 	while (*ptr) {
880 		struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
881 		parent = *ptr;
882 		if (vma->size < this->size)
883 			ptr = &parent->rb_left;
884 		else
885 		if (vma->size > this->size)
886 			ptr = &parent->rb_right;
887 		else
888 		if (vma->addr < this->addr)
889 			ptr = &parent->rb_left;
890 		else
891 		if (vma->addr > this->addr)
892 			ptr = &parent->rb_right;
893 		else
894 			BUG();
895 	}
896 
897 	rb_link_node(&vma->tree, parent, ptr);
898 	rb_insert_color(&vma->tree, &vmm->free);
899 }
900 
901 static inline void
nvkm_vmm_node_remove(struct nvkm_vmm * vmm,struct nvkm_vma * vma)902 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
903 {
904 	rb_erase(&vma->tree, &vmm->root);
905 }
906 
907 static inline void
nvkm_vmm_node_delete(struct nvkm_vmm * vmm,struct nvkm_vma * vma)908 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
909 {
910 	nvkm_vmm_node_remove(vmm, vma);
911 	list_del(&vma->head);
912 	kfree(vma);
913 }
914 
915 static void
nvkm_vmm_node_insert(struct nvkm_vmm * vmm,struct nvkm_vma * vma)916 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
917 {
918 	struct rb_node **ptr = &vmm->root.rb_node;
919 	struct rb_node *parent = NULL;
920 
921 	while (*ptr) {
922 		struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
923 		parent = *ptr;
924 		if (vma->addr < this->addr)
925 			ptr = &parent->rb_left;
926 		else
927 		if (vma->addr > this->addr)
928 			ptr = &parent->rb_right;
929 		else
930 			BUG();
931 	}
932 
933 	rb_link_node(&vma->tree, parent, ptr);
934 	rb_insert_color(&vma->tree, &vmm->root);
935 }
936 
937 struct nvkm_vma *
nvkm_vmm_node_search(struct nvkm_vmm * vmm,u64 addr)938 nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
939 {
940 	struct rb_node *node = vmm->root.rb_node;
941 	while (node) {
942 		struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
943 		if (addr < vma->addr)
944 			node = node->rb_left;
945 		else
946 		if (addr >= vma->addr + vma->size)
947 			node = node->rb_right;
948 		else
949 			return vma;
950 	}
951 	return NULL;
952 }
953 
954 #define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL :             \
955 	list_entry((root)->head.dir, struct nvkm_vma, head))
956 
957 static struct nvkm_vma *
nvkm_vmm_node_merge(struct nvkm_vmm * vmm,struct nvkm_vma * prev,struct nvkm_vma * vma,struct nvkm_vma * next,u64 size)958 nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
959 		    struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
960 {
961 	if (next) {
962 		if (vma->size == size) {
963 			vma->size += next->size;
964 			nvkm_vmm_node_delete(vmm, next);
965 			if (prev) {
966 				prev->size += vma->size;
967 				nvkm_vmm_node_delete(vmm, vma);
968 				return prev;
969 			}
970 			return vma;
971 		}
972 		BUG_ON(prev);
973 
974 		nvkm_vmm_node_remove(vmm, next);
975 		vma->size -= size;
976 		next->addr -= size;
977 		next->size += size;
978 		nvkm_vmm_node_insert(vmm, next);
979 		return next;
980 	}
981 
982 	if (prev) {
983 		if (vma->size != size) {
984 			nvkm_vmm_node_remove(vmm, vma);
985 			prev->size += size;
986 			vma->addr += size;
987 			vma->size -= size;
988 			nvkm_vmm_node_insert(vmm, vma);
989 		} else {
990 			prev->size += vma->size;
991 			nvkm_vmm_node_delete(vmm, vma);
992 		}
993 		return prev;
994 	}
995 
996 	return vma;
997 }
998 
999 struct nvkm_vma *
nvkm_vmm_node_split(struct nvkm_vmm * vmm,struct nvkm_vma * vma,u64 addr,u64 size)1000 nvkm_vmm_node_split(struct nvkm_vmm *vmm,
1001 		    struct nvkm_vma *vma, u64 addr, u64 size)
1002 {
1003 	struct nvkm_vma *prev = NULL;
1004 
1005 	if (vma->addr != addr) {
1006 		prev = vma;
1007 		if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
1008 			return NULL;
1009 		vma->part = true;
1010 		nvkm_vmm_node_insert(vmm, vma);
1011 	}
1012 
1013 	if (vma->size != size) {
1014 		struct nvkm_vma *tmp;
1015 		if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
1016 			nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
1017 			return NULL;
1018 		}
1019 		tmp->part = true;
1020 		nvkm_vmm_node_insert(vmm, tmp);
1021 	}
1022 
1023 	return vma;
1024 }
1025 
1026 static void
nvkm_vma_dump(struct nvkm_vma * vma)1027 nvkm_vma_dump(struct nvkm_vma *vma)
1028 {
1029 	printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
1030 	       vma->addr, (u64)vma->size,
1031 	       vma->used ? '-' : 'F',
1032 	       vma->mapref ? 'R' : '-',
1033 	       vma->sparse ? 'S' : '-',
1034 	       vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
1035 	       vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
1036 	       vma->part ? 'P' : '-',
1037 	       vma->busy ? 'B' : '-',
1038 	       vma->mapped ? 'M' : '-',
1039 	       vma->memory);
1040 }
1041 
1042 static void
nvkm_vmm_dump(struct nvkm_vmm * vmm)1043 nvkm_vmm_dump(struct nvkm_vmm *vmm)
1044 {
1045 	struct nvkm_vma *vma;
1046 	list_for_each_entry(vma, &vmm->list, head) {
1047 		nvkm_vma_dump(vma);
1048 	}
1049 }
1050 
1051 static void
nvkm_vmm_dtor(struct nvkm_vmm * vmm)1052 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
1053 {
1054 	struct nvkm_vma *vma;
1055 	struct rb_node *node;
1056 
1057 	if (vmm->rm.client.gsp)
1058 		r535_mmu_vaspace_del(vmm);
1059 
1060 	if (0)
1061 		nvkm_vmm_dump(vmm);
1062 
1063 	while ((node = rb_first(&vmm->root))) {
1064 		struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
1065 		nvkm_vmm_put(vmm, &vma);
1066 	}
1067 
1068 	if (vmm->bootstrapped) {
1069 		const struct nvkm_vmm_page *page = vmm->func->page;
1070 		const u64 limit = vmm->limit - vmm->start;
1071 
1072 		while (page[1].shift)
1073 			page++;
1074 
1075 		nvkm_mmu_ptc_dump(vmm->mmu);
1076 		nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
1077 	}
1078 
1079 	vma = list_first_entry(&vmm->list, typeof(*vma), head);
1080 	list_del(&vma->head);
1081 	kfree(vma);
1082 	WARN_ON(!list_empty(&vmm->list));
1083 
1084 	if (vmm->nullp) {
1085 		dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
1086 				  vmm->nullp, vmm->null);
1087 	}
1088 
1089 	if (vmm->pd) {
1090 		nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
1091 		nvkm_vmm_pt_del(&vmm->pd);
1092 	}
1093 }
1094 
1095 static int
nvkm_vmm_ctor_managed(struct nvkm_vmm * vmm,u64 addr,u64 size)1096 nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
1097 {
1098 	struct nvkm_vma *vma;
1099 	if (!(vma = nvkm_vma_new(addr, size)))
1100 		return -ENOMEM;
1101 	vma->mapref = true;
1102 	vma->sparse = false;
1103 	vma->used = true;
1104 	nvkm_vmm_node_insert(vmm, vma);
1105 	list_add_tail(&vma->head, &vmm->list);
1106 	return 0;
1107 }
1108 
1109 static int
nvkm_vmm_ctor(const struct nvkm_vmm_func * func,struct nvkm_mmu * mmu,u32 pd_header,bool managed,u64 addr,u64 size,struct lock_class_key * key,const char * name,struct nvkm_vmm * vmm)1110 nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
1111 	      u32 pd_header, bool managed, u64 addr, u64 size,
1112 	      struct lock_class_key *key, const char *name,
1113 	      struct nvkm_vmm *vmm)
1114 {
1115 	static struct lock_class_key _key;
1116 	const struct nvkm_vmm_page *page = func->page;
1117 	const struct nvkm_vmm_desc *desc;
1118 	struct nvkm_vma *vma;
1119 	int levels, bits = 0, ret;
1120 
1121 	vmm->func = func;
1122 	vmm->mmu = mmu;
1123 	vmm->name = name;
1124 	vmm->debug = mmu->subdev.debug;
1125 	kref_init(&vmm->kref);
1126 
1127 	__mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key);
1128 	mutex_init(&vmm->mutex.ref);
1129 	mutex_init(&vmm->mutex.map);
1130 
1131 	/* Locate the smallest page size supported by the backend, it will
1132 	 * have the deepest nesting of page tables.
1133 	 */
1134 	while (page[1].shift)
1135 		page++;
1136 
1137 	/* Locate the structure that describes the layout of the top-level
1138 	 * page table, and determine the number of valid bits in a virtual
1139 	 * address.
1140 	 */
1141 	for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
1142 		bits += desc->bits;
1143 	bits += page->shift;
1144 	desc--;
1145 
1146 	if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
1147 		return -EINVAL;
1148 
1149 	/* Allocate top-level page table. */
1150 	vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
1151 	if (!vmm->pd)
1152 		return -ENOMEM;
1153 	vmm->pd->refs[0] = 1;
1154 	INIT_LIST_HEAD(&vmm->join);
1155 
1156 	/* ... and the GPU storage for it, except on Tesla-class GPUs that
1157 	 * have the PD embedded in the instance structure.
1158 	 */
1159 	if (desc->size) {
1160 		const u32 size = pd_header + desc->size * (1 << desc->bits);
1161 		vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
1162 		if (!vmm->pd->pt[0])
1163 			return -ENOMEM;
1164 	}
1165 
1166 	/* Initialise address-space MM. */
1167 	INIT_LIST_HEAD(&vmm->list);
1168 	vmm->free = RB_ROOT;
1169 	vmm->root = RB_ROOT;
1170 
1171 	if (managed) {
1172 		/* Address-space will be managed by the client for the most
1173 		 * part, except for a specified area where NVKM allocations
1174 		 * are allowed to be placed.
1175 		 */
1176 		vmm->start = 0;
1177 		vmm->limit = 1ULL << bits;
1178 		if (addr + size < addr || addr + size > vmm->limit)
1179 			return -EINVAL;
1180 
1181 		/* Client-managed area before the NVKM-managed area. */
1182 		if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
1183 			return ret;
1184 
1185 		vmm->managed.p.addr = 0;
1186 		vmm->managed.p.size = addr;
1187 
1188 		/* NVKM-managed area. */
1189 		if (size) {
1190 			if (!(vma = nvkm_vma_new(addr, size)))
1191 				return -ENOMEM;
1192 			nvkm_vmm_free_insert(vmm, vma);
1193 			list_add_tail(&vma->head, &vmm->list);
1194 		}
1195 
1196 		/* Client-managed area after the NVKM-managed area. */
1197 		addr = addr + size;
1198 		size = vmm->limit - addr;
1199 		if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
1200 			return ret;
1201 
1202 		vmm->managed.n.addr = addr;
1203 		vmm->managed.n.size = size;
1204 	} else {
1205 		/* Address-space fully managed by NVKM, requiring calls to
1206 		 * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
1207 		 */
1208 		vmm->start = addr;
1209 		vmm->limit = size ? (addr + size) : (1ULL << bits);
1210 		if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
1211 			return -EINVAL;
1212 
1213 		if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
1214 			return -ENOMEM;
1215 
1216 		nvkm_vmm_free_insert(vmm, vma);
1217 		list_add(&vma->head, &vmm->list);
1218 	}
1219 
1220 	return 0;
1221 }
1222 
1223 int
nvkm_vmm_new_(const struct nvkm_vmm_func * func,struct nvkm_mmu * mmu,u32 hdr,bool managed,u64 addr,u64 size,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)1224 nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
1225 	      u32 hdr, bool managed, u64 addr, u64 size,
1226 	      struct lock_class_key *key, const char *name,
1227 	      struct nvkm_vmm **pvmm)
1228 {
1229 	if (!(*pvmm = kzalloc_obj(**pvmm)))
1230 		return -ENOMEM;
1231 	return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
1232 }
1233 
1234 static struct nvkm_vma *
nvkm_vmm_pfn_split_merge(struct nvkm_vmm * vmm,struct nvkm_vma * vma,u64 addr,u64 size,u8 page,bool map)1235 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1236 			 u64 addr, u64 size, u8 page, bool map)
1237 {
1238 	struct nvkm_vma *prev = NULL;
1239 	struct nvkm_vma *next = NULL;
1240 
1241 	if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
1242 		if (prev->memory || prev->mapped != map)
1243 			prev = NULL;
1244 	}
1245 
1246 	if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
1247 		if (!next->part ||
1248 		    next->memory || next->mapped != map)
1249 			next = NULL;
1250 	}
1251 
1252 	if (prev || next)
1253 		return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
1254 	return nvkm_vmm_node_split(vmm, vma, addr, size);
1255 }
1256 
1257 int
nvkm_vmm_pfn_unmap(struct nvkm_vmm * vmm,u64 addr,u64 size)1258 nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
1259 {
1260 	struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
1261 	struct nvkm_vma *next;
1262 	u64 limit = addr + size;
1263 	u64 start = addr;
1264 
1265 	if (!vma)
1266 		return -EINVAL;
1267 
1268 	do {
1269 		if (!vma->mapped || vma->memory)
1270 			continue;
1271 
1272 		size = min(limit - start, vma->size - (start - vma->addr));
1273 
1274 		nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
1275 					start, size, false, true);
1276 
1277 		next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
1278 		if (!WARN_ON(!next)) {
1279 			vma = next;
1280 			vma->refd = NVKM_VMA_PAGE_NONE;
1281 			vma->mapped = false;
1282 		}
1283 	} while ((vma = node(vma, next)) && (start = vma->addr) < limit);
1284 
1285 	return 0;
1286 }
1287 
1288 /*TODO:
1289  * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
1290  *   with inside HMM, which would be a lot nicer for us to deal with.
1291  * - Support for systems without a 4KiB page size.
1292  */
1293 int
nvkm_vmm_pfn_map(struct nvkm_vmm * vmm,u8 shift,u64 addr,u64 size,u64 * pfn)1294 nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
1295 {
1296 	const struct nvkm_vmm_page *page = vmm->func->page;
1297 	struct nvkm_vma *vma, *tmp;
1298 	u64 limit = addr + size;
1299 	u64 start = addr;
1300 	int pm = size >> shift;
1301 	int pi = 0;
1302 
1303 	/* Only support mapping where the page size of the incoming page
1304 	 * array matches a page size available for direct mapping.
1305 	 */
1306 	while (page->shift && (page->shift != shift ||
1307 	       page->desc->func->pfn == NULL))
1308 		page++;
1309 
1310 	if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
1311 			    !IS_ALIGNED(size, 1ULL << shift) ||
1312 	    addr + size < addr || addr + size > vmm->limit) {
1313 		VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
1314 			  shift, page->shift, addr, size);
1315 		return -EINVAL;
1316 	}
1317 
1318 	if (!(vma = nvkm_vmm_node_search(vmm, addr)))
1319 		return -ENOENT;
1320 
1321 	do {
1322 		bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
1323 		bool mapped = vma->mapped;
1324 		u64 size = limit - start;
1325 		u64 addr = start;
1326 		int pn, ret = 0;
1327 
1328 		/* Narrow the operation window to cover a single action (page
1329 		 * should be mapped or not) within a single VMA.
1330 		 */
1331 		for (pn = 0; pi + pn < pm; pn++) {
1332 			if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
1333 				break;
1334 		}
1335 		size = min_t(u64, size, pn << page->shift);
1336 		size = min_t(u64, size, vma->size + vma->addr - addr);
1337 
1338 		/* Reject any operation to unmanaged regions, and areas that
1339 		 * have nvkm_memory objects mapped in them already.
1340 		 */
1341 		if (!vma->mapref || vma->memory) {
1342 			ret = -EINVAL;
1343 			goto next;
1344 		}
1345 
1346 		/* In order to both properly refcount GPU page tables, and
1347 		 * prevent "normal" mappings and these direct mappings from
1348 		 * interfering with each other, we need to track contiguous
1349 		 * ranges that have been mapped with this interface.
1350 		 *
1351 		 * Here we attempt to either split an existing VMA so we're
1352 		 * able to flag the region as either unmapped/mapped, or to
1353 		 * merge with adjacent VMAs that are already compatible.
1354 		 *
1355 		 * If the region is already compatible, nothing is required.
1356 		 */
1357 		if (map != mapped) {
1358 			tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
1359 						       page -
1360 						       vmm->func->page, map);
1361 			if (WARN_ON(!tmp)) {
1362 				ret = -ENOMEM;
1363 				goto next;
1364 			}
1365 
1366 			if ((tmp->mapped = map))
1367 				tmp->refd = page - vmm->func->page;
1368 			else
1369 				tmp->refd = NVKM_VMA_PAGE_NONE;
1370 			vma = tmp;
1371 		}
1372 
1373 		/* Update HW page tables. */
1374 		if (map) {
1375 			struct nvkm_vmm_map args;
1376 			args.page = page;
1377 			args.pfn = &pfn[pi];
1378 
1379 			if (!mapped) {
1380 				ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
1381 							    size, &args, page->
1382 							    desc->func->pfn);
1383 			} else {
1384 				nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
1385 						  page->desc->func->pfn);
1386 			}
1387 		} else {
1388 			if (mapped) {
1389 				nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
1390 							false, true);
1391 			}
1392 		}
1393 
1394 next:
1395 		/* Iterate to next operation. */
1396 		if (vma->addr + vma->size == addr + size)
1397 			vma = node(vma, next);
1398 		start += size;
1399 
1400 		if (ret) {
1401 			/* Failure is signalled by clearing the valid bit on
1402 			 * any PFN that couldn't be modified as requested.
1403 			 */
1404 			while (size) {
1405 				pfn[pi++] = NVKM_VMM_PFN_NONE;
1406 				size -= 1 << page->shift;
1407 			}
1408 		} else {
1409 			pi += size >> page->shift;
1410 		}
1411 	} while (vma && start < limit);
1412 
1413 	return 0;
1414 }
1415 
1416 void
nvkm_vmm_unmap_region(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1417 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1418 {
1419 	struct nvkm_vma *prev = NULL;
1420 	struct nvkm_vma *next;
1421 
1422 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1423 	nvkm_memory_unref(&vma->memory);
1424 	vma->mapped = false;
1425 
1426 	if (vma->part && (prev = node(vma, prev)) && prev->mapped)
1427 		prev = NULL;
1428 	if ((next = node(vma, next)) && (!next->part || next->mapped))
1429 		next = NULL;
1430 	nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
1431 }
1432 
1433 void
nvkm_vmm_unmap_locked(struct nvkm_vmm * vmm,struct nvkm_vma * vma,bool pfn)1434 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
1435 {
1436 	const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
1437 
1438 	if (vma->mapref) {
1439 		nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1440 		vma->refd = NVKM_VMA_PAGE_NONE;
1441 	} else {
1442 		nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1443 	}
1444 
1445 	nvkm_vmm_unmap_region(vmm, vma);
1446 }
1447 
1448 void
nvkm_vmm_unmap(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1449 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1450 {
1451 	if (vma->memory) {
1452 		mutex_lock(&vmm->mutex.vmm);
1453 		nvkm_vmm_unmap_locked(vmm, vma, false);
1454 		mutex_unlock(&vmm->mutex.vmm);
1455 	}
1456 }
1457 
1458 static int
nvkm_vmm_map_valid(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1459 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1460 		   void *argv, u32 argc, struct nvkm_vmm_map *map)
1461 {
1462 	switch (nvkm_memory_target(map->memory)) {
1463 	case NVKM_MEM_TARGET_VRAM:
1464 		if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
1465 			VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
1466 			return -EINVAL;
1467 		}
1468 		break;
1469 	case NVKM_MEM_TARGET_HOST:
1470 	case NVKM_MEM_TARGET_NCOH:
1471 		if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
1472 			VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
1473 			return -EINVAL;
1474 		}
1475 		break;
1476 	default:
1477 		WARN_ON(1);
1478 		return -ENOSYS;
1479 	}
1480 
1481 	if (!IS_ALIGNED(     vma->addr, 1ULL << map->page->shift) ||
1482 	    !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
1483 	    !IS_ALIGNED(   map->offset, 1ULL << map->page->shift) ||
1484 	    nvkm_memory_page(map->memory) < map->page->shift) {
1485 		VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
1486 		    vma->addr, (u64)vma->size, map->offset, map->page->shift,
1487 		    nvkm_memory_page(map->memory));
1488 		return -EINVAL;
1489 	}
1490 
1491 	return vmm->func->valid(vmm, argv, argc, map);
1492 }
1493 
1494 static int
nvkm_vmm_map_choose(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1495 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1496 		    void *argv, u32 argc, struct nvkm_vmm_map *map)
1497 {
1498 	for (map->page = vmm->func->page; map->page->shift; map->page++) {
1499 		VMM_DEBUG(vmm, "trying %d", map->page->shift);
1500 		if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
1501 			return 0;
1502 	}
1503 	return -EINVAL;
1504 }
1505 
1506 static int
nvkm_vmm_map_locked(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1507 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1508 		    void *argv, u32 argc, struct nvkm_vmm_map *map)
1509 {
1510 	nvkm_vmm_pte_func func;
1511 	int ret;
1512 
1513 	map->no_comp = vma->no_comp;
1514 
1515 	/* Make sure we won't overrun the end of the memory object. */
1516 	if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
1517 		VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
1518 			  nvkm_memory_size(map->memory),
1519 			  map->offset, (u64)vma->size);
1520 		return -EINVAL;
1521 	}
1522 
1523 	/* Check remaining arguments for validity. */
1524 	if (vma->page == NVKM_VMA_PAGE_NONE &&
1525 	    vma->refd == NVKM_VMA_PAGE_NONE) {
1526 		/* Find the largest page size we can perform the mapping at. */
1527 		const u32 debug = vmm->debug;
1528 		vmm->debug = 0;
1529 		ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1530 		vmm->debug = debug;
1531 		if (ret) {
1532 			VMM_DEBUG(vmm, "invalid at any page size");
1533 			nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1534 			return -EINVAL;
1535 		}
1536 	} else {
1537 		/* Page size of the VMA is already pre-determined. */
1538 		if (vma->refd != NVKM_VMA_PAGE_NONE)
1539 			map->page = &vmm->func->page[vma->refd];
1540 		else
1541 			map->page = &vmm->func->page[vma->page];
1542 
1543 		ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
1544 		if (ret) {
1545 			VMM_DEBUG(vmm, "invalid %d\n", ret);
1546 			return ret;
1547 		}
1548 	}
1549 
1550 	/* Deal with the 'offset' argument, and fetch the backend function. */
1551 	map->off = map->offset;
1552 	if (map->mem) {
1553 		for (; map->off; map->mem = map->mem->next) {
1554 			u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
1555 			if (size > map->off)
1556 				break;
1557 			map->off -= size;
1558 		}
1559 		func = map->page->desc->func->mem;
1560 	} else
1561 	if (map->sgl) {
1562 		for (; map->off; map->sgl = sg_next(map->sgl)) {
1563 			u64 size = sg_dma_len(map->sgl);
1564 			if (size > map->off)
1565 				break;
1566 			map->off -= size;
1567 		}
1568 		func = map->page->desc->func->sgl;
1569 	} else {
1570 		map->dma += map->offset >> PAGE_SHIFT;
1571 		map->off  = map->offset & PAGE_MASK;
1572 		func = map->page->desc->func->dma;
1573 	}
1574 
1575 	/* Perform the map. */
1576 	if (vma->refd == NVKM_VMA_PAGE_NONE) {
1577 		ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
1578 		if (ret)
1579 			return ret;
1580 
1581 		vma->refd = map->page - vmm->func->page;
1582 	} else {
1583 		nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
1584 	}
1585 
1586 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1587 	nvkm_memory_unref(&vma->memory);
1588 	vma->memory = nvkm_memory_ref(map->memory);
1589 	vma->mapped = true;
1590 	vma->tags = map->tags;
1591 	return 0;
1592 }
1593 
1594 int
nvkm_vmm_map(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1595 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
1596 	     struct nvkm_vmm_map *map)
1597 {
1598 	int ret;
1599 
1600 	if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
1601 	    vmm->managed.raw)
1602 		return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1603 
1604 	mutex_lock(&vmm->mutex.vmm);
1605 	ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1606 	vma->busy = false;
1607 	mutex_unlock(&vmm->mutex.vmm);
1608 	return ret;
1609 }
1610 
1611 static void
nvkm_vmm_put_region(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1612 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1613 {
1614 	struct nvkm_vma *prev, *next;
1615 
1616 	if ((prev = node(vma, prev)) && !prev->used) {
1617 		vma->addr  = prev->addr;
1618 		vma->size += prev->size;
1619 		nvkm_vmm_free_delete(vmm, prev);
1620 	}
1621 
1622 	if ((next = node(vma, next)) && !next->used) {
1623 		vma->size += next->size;
1624 		nvkm_vmm_free_delete(vmm, next);
1625 	}
1626 
1627 	nvkm_vmm_free_insert(vmm, vma);
1628 }
1629 
1630 void
nvkm_vmm_put_locked(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1631 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1632 {
1633 	const struct nvkm_vmm_page *page = vmm->func->page;
1634 	struct nvkm_vma *next = vma;
1635 
1636 	BUG_ON(vma->part);
1637 
1638 	if (vma->mapref || !vma->sparse) {
1639 		do {
1640 			const bool mem = next->memory != NULL;
1641 			const bool map = next->mapped;
1642 			const u8  refd = next->refd;
1643 			const u64 addr = next->addr;
1644 			u64 size = next->size;
1645 
1646 			/* Merge regions that are in the same state. */
1647 			while ((next = node(next, next)) && next->part &&
1648 			       (next->mapped == map) &&
1649 			       (next->memory != NULL) == mem &&
1650 			       (next->refd == refd))
1651 				size += next->size;
1652 
1653 			if (map) {
1654 				/* Region(s) are mapped, merge the unmap
1655 				 * and dereference into a single walk of
1656 				 * the page tree.
1657 				 */
1658 				nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
1659 							size, vma->sparse,
1660 							!mem);
1661 			} else
1662 			if (refd != NVKM_VMA_PAGE_NONE) {
1663 				/* Drop allocation-time PTE references. */
1664 				nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
1665 			}
1666 		} while (next && next->part);
1667 	}
1668 
1669 	/* Merge any mapped regions that were split from the initial
1670 	 * address-space allocation back into the allocated VMA, and
1671 	 * release memory/compression resources.
1672 	 */
1673 	next = vma;
1674 	do {
1675 		if (next->mapped)
1676 			nvkm_vmm_unmap_region(vmm, next);
1677 	} while ((next = node(vma, next)) && next->part);
1678 
1679 	if (vma->sparse && !vma->mapref) {
1680 		/* Sparse region that was allocated with a fixed page size,
1681 		 * meaning all relevant PTEs were referenced once when the
1682 		 * region was allocated, and remained that way, regardless
1683 		 * of whether memory was mapped into it afterwards.
1684 		 *
1685 		 * The process of unmapping, unsparsing, and dereferencing
1686 		 * PTEs can be done in a single page tree walk.
1687 		 */
1688 		nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
1689 	} else
1690 	if (vma->sparse) {
1691 		/* Sparse region that wasn't allocated with a fixed page size,
1692 		 * PTE references were taken both at allocation time (to make
1693 		 * the GPU see the region as sparse), and when mapping memory
1694 		 * into the region.
1695 		 *
1696 		 * The latter was handled above, and the remaining references
1697 		 * are dealt with here.
1698 		 */
1699 		nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
1700 	}
1701 
1702 	/* Remove VMA from the list of allocated nodes. */
1703 	nvkm_vmm_node_remove(vmm, vma);
1704 
1705 	/* Merge VMA back into the free list. */
1706 	vma->page = NVKM_VMA_PAGE_NONE;
1707 	vma->refd = NVKM_VMA_PAGE_NONE;
1708 	vma->used = false;
1709 	nvkm_vmm_put_region(vmm, vma);
1710 }
1711 
1712 void
nvkm_vmm_put(struct nvkm_vmm * vmm,struct nvkm_vma ** pvma)1713 nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
1714 {
1715 	struct nvkm_vma *vma = *pvma;
1716 	if (vma) {
1717 		mutex_lock(&vmm->mutex.vmm);
1718 		nvkm_vmm_put_locked(vmm, vma);
1719 		mutex_unlock(&vmm->mutex.vmm);
1720 		*pvma = NULL;
1721 	}
1722 }
1723 
1724 int
nvkm_vmm_get_locked(struct nvkm_vmm * vmm,bool getref,bool mapref,bool sparse,u8 shift,u8 align,u64 size,struct nvkm_vma ** pvma)1725 nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1726 		    u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
1727 {
1728 	const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
1729 	struct rb_node *node = NULL, *temp;
1730 	struct nvkm_vma *vma = NULL, *tmp;
1731 	u64 addr, tail;
1732 	int ret;
1733 
1734 	VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
1735 		       "shift: %d align: %d size: %016llx",
1736 		  getref, mapref, sparse, shift, align, size);
1737 
1738 	/* Zero-sized, or lazily-allocated sparse VMAs, make no sense. */
1739 	if (unlikely(!size || (!getref && !mapref && sparse))) {
1740 		VMM_DEBUG(vmm, "args %016llx %d %d %d",
1741 			  size, getref, mapref, sparse);
1742 		return -EINVAL;
1743 	}
1744 
1745 	/* Tesla-class GPUs can only select page size per-PDE, which means
1746 	 * we're required to know the mapping granularity up-front to find
1747 	 * a suitable region of address-space.
1748 	 *
1749 	 * The same goes if we're requesting up-front allocation of PTES.
1750 	 */
1751 	if (unlikely((getref || vmm->func->page_block) && !shift)) {
1752 		VMM_DEBUG(vmm, "page size required: %d %016llx",
1753 			  getref, vmm->func->page_block);
1754 		return -EINVAL;
1755 	}
1756 
1757 	/* If a specific page size was requested, determine its index and
1758 	 * make sure the requested size is a multiple of the page size.
1759 	 */
1760 	if (shift) {
1761 		for (page = vmm->func->page; page->shift; page++) {
1762 			if (shift == page->shift)
1763 				break;
1764 		}
1765 
1766 		if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
1767 			VMM_DEBUG(vmm, "page %d %016llx", shift, size);
1768 			return -EINVAL;
1769 		}
1770 		align = max_t(u8, align, shift);
1771 	} else {
1772 		align = max_t(u8, align, 12);
1773 	}
1774 
1775 	/* Locate smallest block that can possibly satisfy the allocation. */
1776 	temp = vmm->free.rb_node;
1777 	while (temp) {
1778 		struct nvkm_vma *this = rb_entry(temp, typeof(*this), tree);
1779 		if (this->size < size) {
1780 			temp = temp->rb_right;
1781 		} else {
1782 			node = temp;
1783 			temp = temp->rb_left;
1784 		}
1785 	}
1786 
1787 	if (unlikely(!node))
1788 		return -ENOSPC;
1789 
1790 	/* Take into account alignment restrictions, trying larger blocks
1791 	 * in turn until we find a suitable free block.
1792 	 */
1793 	do {
1794 		struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
1795 		struct nvkm_vma *prev = node(this, prev);
1796 		struct nvkm_vma *next = node(this, next);
1797 		const int p = page - vmm->func->page;
1798 
1799 		addr = this->addr;
1800 		if (vmm->func->page_block && prev && prev->page != p)
1801 			addr = ALIGN(addr, vmm->func->page_block);
1802 		addr = ALIGN(addr, 1ULL << align);
1803 
1804 		tail = this->addr + this->size;
1805 		if (vmm->func->page_block && next && next->page != p)
1806 			tail = ALIGN_DOWN(tail, vmm->func->page_block);
1807 
1808 		if (addr <= tail && tail - addr >= size) {
1809 			nvkm_vmm_free_remove(vmm, this);
1810 			vma = this;
1811 			break;
1812 		}
1813 	} while ((node = rb_next(node)));
1814 
1815 	if (unlikely(!vma))
1816 		return -ENOSPC;
1817 
1818 	/* If the VMA we found isn't already exactly the requested size,
1819 	 * it needs to be split, and the remaining free blocks returned.
1820 	 */
1821 	if (addr != vma->addr) {
1822 		if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
1823 			nvkm_vmm_put_region(vmm, vma);
1824 			return -ENOMEM;
1825 		}
1826 		nvkm_vmm_free_insert(vmm, vma);
1827 		vma = tmp;
1828 	}
1829 
1830 	if (size != vma->size) {
1831 		if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
1832 			nvkm_vmm_put_region(vmm, vma);
1833 			return -ENOMEM;
1834 		}
1835 		nvkm_vmm_free_insert(vmm, tmp);
1836 	}
1837 
1838 	/* Pre-allocate page tables and/or setup sparse mappings. */
1839 	if (sparse && getref)
1840 		ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
1841 	else if (sparse)
1842 		ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
1843 	else if (getref)
1844 		ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
1845 	else
1846 		ret = 0;
1847 	if (ret) {
1848 		nvkm_vmm_put_region(vmm, vma);
1849 		return ret;
1850 	}
1851 
1852 	vma->mapref = mapref && !getref;
1853 	vma->sparse = sparse;
1854 	vma->page = page - vmm->func->page;
1855 	vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
1856 	vma->used = true;
1857 	nvkm_vmm_node_insert(vmm, vma);
1858 	*pvma = vma;
1859 	return 0;
1860 }
1861 
1862 int
nvkm_vmm_get(struct nvkm_vmm * vmm,u8 page,u64 size,struct nvkm_vma ** pvma)1863 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
1864 {
1865 	int ret;
1866 	mutex_lock(&vmm->mutex.vmm);
1867 	ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
1868 	mutex_unlock(&vmm->mutex.vmm);
1869 	return ret;
1870 }
1871 
1872 void
nvkm_vmm_raw_unmap(struct nvkm_vmm * vmm,u64 addr,u64 size,bool sparse,u8 refd)1873 nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
1874 		   bool sparse, u8 refd)
1875 {
1876 	const struct nvkm_vmm_page *page = &vmm->func->page[refd];
1877 
1878 	nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false);
1879 }
1880 
1881 void
nvkm_vmm_raw_put(struct nvkm_vmm * vmm,u64 addr,u64 size,u8 refd)1882 nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
1883 {
1884 	const struct nvkm_vmm_page *page = vmm->func->page;
1885 
1886 	nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
1887 }
1888 
1889 int
nvkm_vmm_raw_get(struct nvkm_vmm * vmm,u64 addr,u64 size,u8 refd)1890 nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
1891 {
1892 	const struct nvkm_vmm_page *page = vmm->func->page;
1893 
1894 	if (unlikely(!size))
1895 		return -EINVAL;
1896 
1897 	return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size);
1898 }
1899 
1900 int
nvkm_vmm_raw_sparse(struct nvkm_vmm * vmm,u64 addr,u64 size,bool ref)1901 nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
1902 {
1903 	int ret;
1904 
1905 	mutex_lock(&vmm->mutex.ref);
1906 	ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref);
1907 	mutex_unlock(&vmm->mutex.ref);
1908 
1909 	return ret;
1910 }
1911 
1912 void
nvkm_vmm_part(struct nvkm_vmm * vmm,struct nvkm_memory * inst)1913 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1914 {
1915 	if (inst && vmm && vmm->func->part) {
1916 		mutex_lock(&vmm->mutex.vmm);
1917 		vmm->func->part(vmm, inst);
1918 		mutex_unlock(&vmm->mutex.vmm);
1919 	}
1920 }
1921 
1922 int
nvkm_vmm_join(struct nvkm_vmm * vmm,struct nvkm_memory * inst)1923 nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1924 {
1925 	int ret = 0;
1926 	if (vmm->func->join) {
1927 		mutex_lock(&vmm->mutex.vmm);
1928 		ret = vmm->func->join(vmm, inst);
1929 		mutex_unlock(&vmm->mutex.vmm);
1930 	}
1931 	return ret;
1932 }
1933 
1934 static bool
nvkm_vmm_boot_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)1935 nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
1936 {
1937 	const struct nvkm_vmm_desc *desc = it->desc;
1938 	const int type = desc->type == SPT;
1939 	nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
1940 	return false;
1941 }
1942 
1943 int
nvkm_vmm_boot(struct nvkm_vmm * vmm)1944 nvkm_vmm_boot(struct nvkm_vmm *vmm)
1945 {
1946 	const struct nvkm_vmm_page *page = vmm->func->page;
1947 	const u64 limit = vmm->limit - vmm->start;
1948 	int ret;
1949 
1950 	while (page[1].shift)
1951 		page++;
1952 
1953 	ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
1954 	if (ret)
1955 		return ret;
1956 
1957 	nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
1958 		      nvkm_vmm_boot_ptes, NULL, NULL, NULL);
1959 	vmm->bootstrapped = true;
1960 	return 0;
1961 }
1962 
1963 static void
nvkm_vmm_del(struct kref * kref)1964 nvkm_vmm_del(struct kref *kref)
1965 {
1966 	struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
1967 	nvkm_vmm_dtor(vmm);
1968 	kfree(vmm);
1969 }
1970 
1971 void
nvkm_vmm_unref(struct nvkm_vmm ** pvmm)1972 nvkm_vmm_unref(struct nvkm_vmm **pvmm)
1973 {
1974 	struct nvkm_vmm *vmm = *pvmm;
1975 	if (vmm) {
1976 		kref_put(&vmm->kref, nvkm_vmm_del);
1977 		*pvmm = NULL;
1978 	}
1979 }
1980 
1981 struct nvkm_vmm *
nvkm_vmm_ref(struct nvkm_vmm * vmm)1982 nvkm_vmm_ref(struct nvkm_vmm *vmm)
1983 {
1984 	if (vmm)
1985 		kref_get(&vmm->kref);
1986 	return vmm;
1987 }
1988 
1989 int
nvkm_vmm_new(struct nvkm_device * device,u64 addr,u64 size,void * argv,u32 argc,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)1990 nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
1991 	     u32 argc, struct lock_class_key *key, const char *name,
1992 	     struct nvkm_vmm **pvmm)
1993 {
1994 	struct nvkm_mmu *mmu = device->mmu;
1995 	struct nvkm_vmm *vmm = NULL;
1996 	int ret;
1997 	ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
1998 				  key, name, &vmm);
1999 	if (ret)
2000 		nvkm_vmm_unref(&vmm);
2001 	*pvmm = vmm;
2002 	return ret;
2003 }
2004