xref: /linux/drivers/gpu/drm/nouveau/nouveau_sgdma.c (revision 172cdcaefea5c297fdb3d20b7d5aff60ae4fbce6)
1 // SPDX-License-Identifier: MIT
2 #include <linux/pagemap.h>
3 #include <linux/slab.h>
4 
5 #include "nouveau_drv.h"
6 #include "nouveau_mem.h"
7 #include "nouveau_ttm.h"
8 #include "nouveau_bo.h"
9 
10 struct nouveau_sgdma_be {
11 	/* this has to be the first field so populate/unpopulated in
12 	 * nouve_bo.c works properly, otherwise have to move them here
13 	 */
14 	struct ttm_tt ttm;
15 	struct nouveau_mem *mem;
16 };
17 
18 void
19 nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
20 {
21 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
22 
23 	if (ttm) {
24 		nouveau_sgdma_unbind(bdev, ttm);
25 		ttm_tt_destroy_common(bdev, ttm);
26 		ttm_tt_fini(&nvbe->ttm);
27 		kfree(nvbe);
28 	}
29 }
30 
31 int
32 nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
33 {
34 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
35 	struct nouveau_drm *drm = nouveau_bdev(bdev);
36 	struct nouveau_mem *mem = nouveau_mem(reg);
37 	int ret;
38 
39 	if (nvbe->mem)
40 		return 0;
41 
42 	ret = nouveau_mem_host(reg, &nvbe->ttm);
43 	if (ret)
44 		return ret;
45 
46 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
47 		ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
48 		if (ret) {
49 			nouveau_mem_fini(mem);
50 			return ret;
51 		}
52 	}
53 
54 	nvbe->mem = mem;
55 	return 0;
56 }
57 
58 void
59 nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
60 {
61 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
62 	if (nvbe->mem) {
63 		nouveau_mem_fini(nvbe->mem);
64 		nvbe->mem = NULL;
65 	}
66 }
67 
68 struct ttm_tt *
69 nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
70 {
71 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
72 	struct nouveau_bo *nvbo = nouveau_bo(bo);
73 	struct nouveau_sgdma_be *nvbe;
74 	enum ttm_caching caching;
75 
76 	if (nvbo->force_coherent)
77 		caching = ttm_uncached;
78 	else if (drm->agp.bridge)
79 		caching = ttm_write_combined;
80 	else
81 		caching = ttm_cached;
82 
83 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
84 	if (!nvbe)
85 		return NULL;
86 
87 	if (ttm_sg_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
88 		kfree(nvbe);
89 		return NULL;
90 	}
91 	return &nvbe->ttm;
92 }
93