xref: /linux/drivers/gpu/drm/nouveau/nouveau_sgdma.c (revision 33dea5aae0320345af26ae9aba0894a930e0d4ec)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagemap.h>
3 #include <linux/slab.h>
4 
5 #include "nouveau_drv.h"
6 #include "nouveau_ttm.h"
7 
8 struct nouveau_sgdma_be {
9 	/* this has to be the first field so populate/unpopulated in
10 	 * nouve_bo.c works properly, otherwise have to move them here
11 	 */
12 	struct ttm_dma_tt ttm;
13 	struct nvkm_mem *node;
14 };
15 
16 static void
17 nouveau_sgdma_destroy(struct ttm_tt *ttm)
18 {
19 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
20 
21 	if (ttm) {
22 		ttm_dma_tt_fini(&nvbe->ttm);
23 		kfree(nvbe);
24 	}
25 }
26 
27 static int
28 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
29 {
30 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
31 	struct nvkm_mem *node = reg->mm_node;
32 
33 	if (ttm->sg) {
34 		node->sg    = ttm->sg;
35 		node->pages = NULL;
36 	} else {
37 		node->sg    = NULL;
38 		node->pages = nvbe->ttm.dma_address;
39 	}
40 	node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
41 
42 	nvkm_vm_map(&node->vma[0], node);
43 	nvbe->node = node;
44 	return 0;
45 }
46 
47 static int
48 nv04_sgdma_unbind(struct ttm_tt *ttm)
49 {
50 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
51 	nvkm_vm_unmap(&nvbe->node->vma[0]);
52 	return 0;
53 }
54 
55 static struct ttm_backend_func nv04_sgdma_backend = {
56 	.bind			= nv04_sgdma_bind,
57 	.unbind			= nv04_sgdma_unbind,
58 	.destroy		= nouveau_sgdma_destroy
59 };
60 
61 static int
62 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
63 {
64 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
65 	struct nvkm_mem *node = reg->mm_node;
66 
67 	/* noop: bound in move_notify() */
68 	if (ttm->sg) {
69 		node->sg    = ttm->sg;
70 		node->pages = NULL;
71 	} else {
72 		node->sg    = NULL;
73 		node->pages = nvbe->ttm.dma_address;
74 	}
75 	node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
76 	return 0;
77 }
78 
79 static int
80 nv50_sgdma_unbind(struct ttm_tt *ttm)
81 {
82 	/* noop: unbound in move_notify() */
83 	return 0;
84 }
85 
86 static struct ttm_backend_func nv50_sgdma_backend = {
87 	.bind			= nv50_sgdma_bind,
88 	.unbind			= nv50_sgdma_unbind,
89 	.destroy		= nouveau_sgdma_destroy
90 };
91 
92 struct ttm_tt *
93 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
94 			 unsigned long size, uint32_t page_flags,
95 			 struct page *dummy_read_page)
96 {
97 	struct nouveau_drm *drm = nouveau_bdev(bdev);
98 	struct nouveau_sgdma_be *nvbe;
99 
100 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
101 	if (!nvbe)
102 		return NULL;
103 
104 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
105 		nvbe->ttm.ttm.func = &nv04_sgdma_backend;
106 	else
107 		nvbe->ttm.ttm.func = &nv50_sgdma_backend;
108 
109 	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
110 		/*
111 		 * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
112 		 * and thus our nouveau_sgdma_destroy() hook, so we don't need
113 		 * to free nvbe here.
114 		 */
115 		return NULL;
116 	return &nvbe->ttm.ttm;
117 }
118