xref: /linux/drivers/gpu/drm/loongson/lsdc_ttm.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <drm/drm_drv.h>
7 #include <drm/drm_file.h>
8 #include <drm/drm_gem.h>
9 #include <drm/drm_managed.h>
10 #include <drm/drm_prime.h>
11 #include <drm/drm_print.h>
12 
13 #include "lsdc_drv.h"
14 #include "lsdc_ttm.h"
15 
16 const char *lsdc_mem_type_to_str(uint32_t mem_type)
17 {
18 	switch (mem_type) {
19 	case TTM_PL_VRAM:
20 		return "VRAM";
21 	case TTM_PL_TT:
22 		return "GTT";
23 	case TTM_PL_SYSTEM:
24 		return "SYSTEM";
25 	default:
26 		break;
27 	}
28 
29 	return "Unknown";
30 }
31 
32 const char *lsdc_domain_to_str(u32 domain)
33 {
34 	switch (domain) {
35 	case LSDC_GEM_DOMAIN_VRAM:
36 		return "VRAM";
37 	case LSDC_GEM_DOMAIN_GTT:
38 		return "GTT";
39 	case LSDC_GEM_DOMAIN_SYSTEM:
40 		return "SYSTEM";
41 	default:
42 		break;
43 	}
44 
45 	return "Unknown";
46 }
47 
48 static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
49 {
50 	u32 c = 0;
51 	u32 pflags = 0;
52 	u32 i;
53 
54 	if (lbo->tbo.base.size <= PAGE_SIZE)
55 		pflags |= TTM_PL_FLAG_TOPDOWN;
56 
57 	lbo->placement.placement = lbo->placements;
58 
59 	if (domain & LSDC_GEM_DOMAIN_VRAM) {
60 		lbo->placements[c].mem_type = TTM_PL_VRAM;
61 		lbo->placements[c++].flags = pflags;
62 	}
63 
64 	if (domain & LSDC_GEM_DOMAIN_GTT) {
65 		lbo->placements[c].mem_type = TTM_PL_TT;
66 		lbo->placements[c++].flags = pflags;
67 	}
68 
69 	if (domain & LSDC_GEM_DOMAIN_SYSTEM) {
70 		lbo->placements[c].mem_type = TTM_PL_SYSTEM;
71 		lbo->placements[c++].flags = 0;
72 	}
73 
74 	if (!c) {
75 		lbo->placements[c].mem_type = TTM_PL_SYSTEM;
76 		lbo->placements[c++].flags = 0;
77 	}
78 
79 	lbo->placement.num_placement = c;
80 
81 	for (i = 0; i < c; ++i) {
82 		lbo->placements[i].fpfn = 0;
83 		lbo->placements[i].lpfn = 0;
84 	}
85 }
86 
87 static void lsdc_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
88 {
89 	ttm_tt_fini(tt);
90 	kfree(tt);
91 }
92 
93 static struct ttm_tt *
94 lsdc_ttm_tt_create(struct ttm_buffer_object *tbo, uint32_t page_flags)
95 {
96 	struct ttm_tt *tt;
97 	int ret;
98 
99 	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
100 	if (!tt)
101 		return NULL;
102 
103 	ret = ttm_sg_tt_init(tt, tbo, page_flags, ttm_cached);
104 	if (ret < 0) {
105 		kfree(tt);
106 		return NULL;
107 	}
108 
109 	return tt;
110 }
111 
112 static int lsdc_ttm_tt_populate(struct ttm_device *bdev,
113 				struct ttm_tt *ttm,
114 				struct ttm_operation_ctx *ctx)
115 {
116 	bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
117 
118 	if (slave && ttm->sg) {
119 		drm_prime_sg_to_dma_addr_array(ttm->sg,
120 					       ttm->dma_address,
121 					       ttm->num_pages);
122 
123 		return 0;
124 	}
125 
126 	return ttm_pool_alloc(&bdev->pool, ttm, ctx);
127 }
128 
129 static void lsdc_ttm_tt_unpopulate(struct ttm_device *bdev,
130 				   struct ttm_tt *ttm)
131 {
132 	bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
133 
134 	if (slave)
135 		return;
136 
137 	return ttm_pool_free(&bdev->pool, ttm);
138 }
139 
140 static void lsdc_bo_evict_flags(struct ttm_buffer_object *tbo,
141 				struct ttm_placement *tplacement)
142 {
143 	struct ttm_resource *resource = tbo->resource;
144 	struct lsdc_bo *lbo = to_lsdc_bo(tbo);
145 
146 	switch (resource->mem_type) {
147 	case TTM_PL_VRAM:
148 		lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_GTT);
149 		break;
150 	case TTM_PL_TT:
151 	default:
152 		lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_SYSTEM);
153 		break;
154 	}
155 
156 	*tplacement = lbo->placement;
157 }
158 
159 static int lsdc_bo_move(struct ttm_buffer_object *tbo,
160 			bool evict,
161 			struct ttm_operation_ctx *ctx,
162 			struct ttm_resource *new_mem,
163 			struct ttm_place *hop)
164 {
165 	struct drm_device *ddev = tbo->base.dev;
166 	struct ttm_resource *old_mem = tbo->resource;
167 	struct lsdc_bo *lbo = to_lsdc_bo(tbo);
168 	int ret;
169 
170 	if (unlikely(tbo->pin_count > 0)) {
171 		drm_warn(ddev, "Can't move a pinned BO\n");
172 		return -EINVAL;
173 	}
174 
175 	ret = ttm_bo_wait_ctx(tbo, ctx);
176 	if (ret)
177 		return ret;
178 
179 	if (!old_mem) {
180 		drm_dbg(ddev, "bo[%p] move: NULL to %s, size: %zu\n",
181 			lbo, lsdc_mem_type_to_str(new_mem->mem_type),
182 			lsdc_bo_size(lbo));
183 		ttm_bo_move_null(tbo, new_mem);
184 		return 0;
185 	}
186 
187 	if (old_mem->mem_type == TTM_PL_SYSTEM && !tbo->ttm) {
188 		ttm_bo_move_null(tbo, new_mem);
189 		drm_dbg(ddev, "bo[%p] move: SYSTEM to NULL, size: %zu\n",
190 			lbo, lsdc_bo_size(lbo));
191 		return 0;
192 	}
193 
194 	if (old_mem->mem_type == TTM_PL_SYSTEM &&
195 	    new_mem->mem_type == TTM_PL_TT) {
196 		drm_dbg(ddev, "bo[%p] move: SYSTEM to GTT, size: %zu\n",
197 			lbo, lsdc_bo_size(lbo));
198 		ttm_bo_move_null(tbo, new_mem);
199 		return 0;
200 	}
201 
202 	if (old_mem->mem_type == TTM_PL_TT &&
203 	    new_mem->mem_type == TTM_PL_SYSTEM) {
204 		drm_dbg(ddev, "bo[%p] move: GTT to SYSTEM, size: %zu\n",
205 			lbo, lsdc_bo_size(lbo));
206 		ttm_resource_free(tbo, &tbo->resource);
207 		ttm_bo_assign_mem(tbo, new_mem);
208 		return 0;
209 	}
210 
211 	drm_dbg(ddev, "bo[%p] move: %s to %s, size: %zu\n",
212 		lbo,
213 		lsdc_mem_type_to_str(old_mem->mem_type),
214 		lsdc_mem_type_to_str(new_mem->mem_type),
215 		lsdc_bo_size(lbo));
216 
217 	return ttm_bo_move_memcpy(tbo, ctx, new_mem);
218 }
219 
220 static int lsdc_bo_reserve_io_mem(struct ttm_device *bdev,
221 				  struct ttm_resource *mem)
222 {
223 	struct lsdc_device *ldev = tdev_to_ldev(bdev);
224 
225 	switch (mem->mem_type) {
226 	case TTM_PL_SYSTEM:
227 		break;
228 	case TTM_PL_TT:
229 		break;
230 	case TTM_PL_VRAM:
231 		mem->bus.offset = (mem->start << PAGE_SHIFT) + ldev->vram_base;
232 		mem->bus.is_iomem = true;
233 		mem->bus.caching = ttm_write_combined;
234 		break;
235 	default:
236 		return -EINVAL;
237 	}
238 
239 	return 0;
240 }
241 
242 static struct ttm_device_funcs lsdc_bo_driver = {
243 	.ttm_tt_create = lsdc_ttm_tt_create,
244 	.ttm_tt_populate = lsdc_ttm_tt_populate,
245 	.ttm_tt_unpopulate = lsdc_ttm_tt_unpopulate,
246 	.ttm_tt_destroy = lsdc_ttm_tt_destroy,
247 	.eviction_valuable = ttm_bo_eviction_valuable,
248 	.evict_flags = lsdc_bo_evict_flags,
249 	.move = lsdc_bo_move,
250 	.io_mem_reserve = lsdc_bo_reserve_io_mem,
251 };
252 
253 u64 lsdc_bo_gpu_offset(struct lsdc_bo *lbo)
254 {
255 	struct ttm_buffer_object *tbo = &lbo->tbo;
256 	struct drm_device *ddev = tbo->base.dev;
257 	struct ttm_resource *resource = tbo->resource;
258 
259 	if (unlikely(!tbo->pin_count)) {
260 		drm_err(ddev, "unpinned bo, gpu virtual address is invalid\n");
261 		return 0;
262 	}
263 
264 	if (unlikely(resource->mem_type == TTM_PL_SYSTEM))
265 		return 0;
266 
267 	return resource->start << PAGE_SHIFT;
268 }
269 
270 size_t lsdc_bo_size(struct lsdc_bo *lbo)
271 {
272 	struct ttm_buffer_object *tbo = &lbo->tbo;
273 
274 	return tbo->base.size;
275 }
276 
277 int lsdc_bo_reserve(struct lsdc_bo *lbo)
278 {
279 	return ttm_bo_reserve(&lbo->tbo, true, false, NULL);
280 }
281 
282 void lsdc_bo_unreserve(struct lsdc_bo *lbo)
283 {
284 	return ttm_bo_unreserve(&lbo->tbo);
285 }
286 
287 int lsdc_bo_pin(struct lsdc_bo *lbo, u32 domain, u64 *gpu_addr)
288 {
289 	struct ttm_operation_ctx ctx = { false, false };
290 	struct ttm_buffer_object *tbo = &lbo->tbo;
291 	struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
292 	int ret;
293 
294 	if (tbo->pin_count)
295 		goto bo_pinned;
296 
297 	if (lbo->sharing_count && domain == LSDC_GEM_DOMAIN_VRAM)
298 		return -EINVAL;
299 
300 	if (domain)
301 		lsdc_bo_set_placement(lbo, domain);
302 
303 	ret = ttm_bo_validate(tbo, &lbo->placement, &ctx);
304 	if (unlikely(ret)) {
305 		drm_err(&ldev->base, "%p validate failed: %d\n", lbo, ret);
306 		return ret;
307 	}
308 
309 	if (domain == LSDC_GEM_DOMAIN_VRAM)
310 		ldev->vram_pinned_size += lsdc_bo_size(lbo);
311 	else if (domain == LSDC_GEM_DOMAIN_GTT)
312 		ldev->gtt_pinned_size += lsdc_bo_size(lbo);
313 
314 bo_pinned:
315 	ttm_bo_pin(tbo);
316 
317 	if (gpu_addr)
318 		*gpu_addr = lsdc_bo_gpu_offset(lbo);
319 
320 	return 0;
321 }
322 
323 void lsdc_bo_unpin(struct lsdc_bo *lbo)
324 {
325 	struct ttm_buffer_object *tbo = &lbo->tbo;
326 	struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
327 
328 	if (unlikely(!tbo->pin_count)) {
329 		drm_dbg(&ldev->base, "%p unpin is not necessary\n", lbo);
330 		return;
331 	}
332 
333 	ttm_bo_unpin(tbo);
334 
335 	if (!tbo->pin_count) {
336 		if (tbo->resource->mem_type == TTM_PL_VRAM)
337 			ldev->vram_pinned_size -= lsdc_bo_size(lbo);
338 		else if (tbo->resource->mem_type == TTM_PL_TT)
339 			ldev->gtt_pinned_size -= lsdc_bo_size(lbo);
340 	}
341 }
342 
343 void lsdc_bo_ref(struct lsdc_bo *lbo)
344 {
345 	drm_gem_object_get(&lbo->tbo.base);
346 }
347 
348 void lsdc_bo_unref(struct lsdc_bo *lbo)
349 {
350 	drm_gem_object_put(&lbo->tbo.base);
351 }
352 
353 int lsdc_bo_kmap(struct lsdc_bo *lbo)
354 {
355 	struct ttm_buffer_object *tbo = &lbo->tbo;
356 	struct drm_gem_object *gem = &tbo->base;
357 	struct drm_device *ddev = gem->dev;
358 	long ret;
359 	int err;
360 
361 	ret = dma_resv_wait_timeout(gem->resv, DMA_RESV_USAGE_KERNEL, false,
362 				    MAX_SCHEDULE_TIMEOUT);
363 	if (ret < 0) {
364 		drm_warn(ddev, "wait fence timeout\n");
365 		return ret;
366 	}
367 
368 	if (lbo->kptr)
369 		return 0;
370 
371 	err = ttm_bo_kmap(tbo, 0, PFN_UP(lsdc_bo_size(lbo)), &lbo->kmap);
372 	if (err) {
373 		drm_err(ddev, "kmap %p failed: %d\n", lbo, err);
374 		return err;
375 	}
376 
377 	lbo->kptr = ttm_kmap_obj_virtual(&lbo->kmap, &lbo->is_iomem);
378 
379 	return 0;
380 }
381 
382 void lsdc_bo_kunmap(struct lsdc_bo *lbo)
383 {
384 	if (!lbo->kptr)
385 		return;
386 
387 	lbo->kptr = NULL;
388 	ttm_bo_kunmap(&lbo->kmap);
389 }
390 
391 void lsdc_bo_clear(struct lsdc_bo *lbo)
392 {
393 	lsdc_bo_kmap(lbo);
394 
395 	if (lbo->is_iomem)
396 		memset_io((void __iomem *)lbo->kptr, 0, lbo->size);
397 	else
398 		memset(lbo->kptr, 0, lbo->size);
399 
400 	lsdc_bo_kunmap(lbo);
401 }
402 
403 int lsdc_bo_evict_vram(struct drm_device *ddev)
404 {
405 	struct lsdc_device *ldev = to_lsdc(ddev);
406 	struct ttm_device *bdev = &ldev->bdev;
407 	struct ttm_resource_manager *man;
408 
409 	man = ttm_manager_type(bdev, TTM_PL_VRAM);
410 	if (unlikely(!man))
411 		return 0;
412 
413 	return ttm_resource_manager_evict_all(bdev, man);
414 }
415 
416 static void lsdc_bo_destroy(struct ttm_buffer_object *tbo)
417 {
418 	struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
419 	struct lsdc_bo *lbo = to_lsdc_bo(tbo);
420 
421 	mutex_lock(&ldev->gem.mutex);
422 	list_del_init(&lbo->list);
423 	mutex_unlock(&ldev->gem.mutex);
424 
425 	drm_gem_object_release(&tbo->base);
426 
427 	kfree(lbo);
428 }
429 
430 struct lsdc_bo *lsdc_bo_create(struct drm_device *ddev,
431 			       u32 domain,
432 			       size_t size,
433 			       bool kernel,
434 			       struct sg_table *sg,
435 			       struct dma_resv *resv)
436 {
437 	struct lsdc_device *ldev = to_lsdc(ddev);
438 	struct ttm_device *bdev = &ldev->bdev;
439 	struct ttm_buffer_object *tbo;
440 	struct lsdc_bo *lbo;
441 	enum ttm_bo_type bo_type;
442 	int ret;
443 
444 	lbo = kzalloc(sizeof(*lbo), GFP_KERNEL);
445 	if (!lbo)
446 		return ERR_PTR(-ENOMEM);
447 
448 	INIT_LIST_HEAD(&lbo->list);
449 
450 	lbo->initial_domain = domain & (LSDC_GEM_DOMAIN_VRAM |
451 					LSDC_GEM_DOMAIN_GTT |
452 					LSDC_GEM_DOMAIN_SYSTEM);
453 
454 	tbo = &lbo->tbo;
455 
456 	size = ALIGN(size, PAGE_SIZE);
457 
458 	ret = drm_gem_object_init(ddev, &tbo->base, size);
459 	if (ret) {
460 		kfree(lbo);
461 		return ERR_PTR(ret);
462 	}
463 
464 	tbo->bdev = bdev;
465 
466 	if (kernel)
467 		bo_type = ttm_bo_type_kernel;
468 	else if (sg)
469 		bo_type = ttm_bo_type_sg;
470 	else
471 		bo_type = ttm_bo_type_device;
472 
473 	lsdc_bo_set_placement(lbo, domain);
474 	lbo->size = size;
475 
476 	ret = ttm_bo_init_validate(bdev, tbo, bo_type, &lbo->placement, 0,
477 				   false, sg, resv, lsdc_bo_destroy);
478 	if (ret) {
479 		kfree(lbo);
480 		return ERR_PTR(ret);
481 	}
482 
483 	return lbo;
484 }
485 
486 struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
487 					     u32 domain,
488 					     size_t size)
489 {
490 	struct lsdc_bo *lbo;
491 	int ret;
492 
493 	lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL);
494 	if (IS_ERR(lbo))
495 		return ERR_CAST(lbo);
496 
497 	ret = lsdc_bo_reserve(lbo);
498 	if (unlikely(ret)) {
499 		lsdc_bo_unref(lbo);
500 		return ERR_PTR(ret);
501 	}
502 
503 	ret = lsdc_bo_pin(lbo, domain, NULL);
504 	lsdc_bo_unreserve(lbo);
505 	if (unlikely(ret)) {
506 		lsdc_bo_unref(lbo);
507 		return ERR_PTR(ret);
508 	}
509 
510 	return lbo;
511 }
512 
513 void lsdc_bo_free_kernel_pinned(struct lsdc_bo *lbo)
514 {
515 	int ret;
516 
517 	ret = lsdc_bo_reserve(lbo);
518 	if (unlikely(ret))
519 		return;
520 
521 	lsdc_bo_unpin(lbo);
522 	lsdc_bo_unreserve(lbo);
523 
524 	lsdc_bo_unref(lbo);
525 }
526 
527 static void lsdc_ttm_fini(struct drm_device *ddev, void *data)
528 {
529 	struct lsdc_device *ldev = (struct lsdc_device *)data;
530 
531 	ttm_range_man_fini(&ldev->bdev, TTM_PL_VRAM);
532 	ttm_range_man_fini(&ldev->bdev, TTM_PL_TT);
533 
534 	ttm_device_fini(&ldev->bdev);
535 
536 	drm_dbg(ddev, "ttm finished\n");
537 }
538 
539 int lsdc_ttm_init(struct lsdc_device *ldev)
540 {
541 	struct drm_device *ddev = &ldev->base;
542 	unsigned long num_vram_pages;
543 	unsigned long num_gtt_pages;
544 	int ret;
545 
546 	ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev,
547 			      ddev->anon_inode->i_mapping,
548 			      ddev->vma_offset_manager, false, true);
549 	if (ret)
550 		return ret;
551 
552 	num_vram_pages = ldev->vram_size >> PAGE_SHIFT;
553 
554 	ret = ttm_range_man_init(&ldev->bdev, TTM_PL_VRAM, false, num_vram_pages);
555 	if (unlikely(ret))
556 		return ret;
557 
558 	drm_info(ddev, "VRAM: %lu pages ready\n", num_vram_pages);
559 
560 	/* 512M is far enough for us now */
561 	ldev->gtt_size = 512 << 20;
562 
563 	num_gtt_pages = ldev->gtt_size >> PAGE_SHIFT;
564 
565 	ret = ttm_range_man_init(&ldev->bdev, TTM_PL_TT, true, num_gtt_pages);
566 	if (unlikely(ret))
567 		return ret;
568 
569 	drm_info(ddev, "GTT: %lu pages ready\n", num_gtt_pages);
570 
571 	return drmm_add_action_or_reset(ddev, lsdc_ttm_fini, ldev);
572 }
573 
574 void lsdc_ttm_debugfs_init(struct lsdc_device *ldev)
575 {
576 	struct ttm_device *bdev = &ldev->bdev;
577 	struct drm_device *ddev = &ldev->base;
578 	struct drm_minor *minor = ddev->primary;
579 	struct dentry *root = minor->debugfs_root;
580 	struct ttm_resource_manager *vram_man;
581 	struct ttm_resource_manager *gtt_man;
582 
583 	vram_man = ttm_manager_type(bdev, TTM_PL_VRAM);
584 	gtt_man = ttm_manager_type(bdev, TTM_PL_TT);
585 
586 	ttm_resource_manager_create_debugfs(vram_man, root, "vram_mm");
587 	ttm_resource_manager_create_debugfs(gtt_man, root, "gtt_mm");
588 }
589