xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /*
2  * Copyright 2023 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <subdev/gsp.h>
25 
26 #include <nvhw/drf.h>
27 
28 #include <nvrm/nvtypes.h>
29 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h>
30 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
31 #include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
32 #include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h>
33 #include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
34 #include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
35 
36 struct fbsr_item {
37 	const char *type;
38 	u64 addr;
39 	u64 size;
40 
41 	struct list_head head;
42 };
43 
44 struct fbsr {
45 	struct list_head items;
46 
47 	u64 size;
48 	int regions;
49 
50 	struct nvkm_gsp_client client;
51 	struct nvkm_gsp_device device;
52 
53 	u64 hmemory;
54 	u64 sys_offset;
55 };
56 
57 static int
58 fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
59 	     u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
60 {
61 	struct nvkm_gsp_client *client = device->object.client;
62 	struct nvkm_gsp *gsp = client->gsp;
63 	const u32 pages = size / GSP_PAGE_SIZE;
64 	rpc_alloc_memory_v13_01 *rpc;
65 	int ret;
66 
67 	rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
68 			       sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0]));
69 	if (IS_ERR(rpc))
70 		return PTR_ERR(rpc);
71 
72 	rpc->hClient = client->object.handle;
73 	rpc->hDevice = device->object.handle;
74 	rpc->hMemory = handle;
75 	if (aper == NVKM_MEM_TARGET_HOST) {
76 		rpc->hClass = NV01_MEMORY_LIST_SYSTEM;
77 		rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) |
78 			     NVDEF(NVOS02, FLAGS, LOCATION, PCI) |
79 			     NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
80 	} else {
81 		rpc->hClass = NV01_MEMORY_LIST_FBMEM;
82 		rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) |
83 			     NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) |
84 			     NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
85 		rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */
86 	}
87 	rpc->pteAdjust = 0;
88 	rpc->length = size;
89 	rpc->pageCount = pages;
90 	rpc->pteDesc.idr = 0;
91 	rpc->pteDesc.reserved1 = 0;
92 	rpc->pteDesc.length = pages;
93 
94 	if (sgt) {
95 		struct scatterlist *sgl;
96 		int pte = 0, idx;
97 
98 		for_each_sgtable_dma_sg(sgt, sgl, idx) {
99 			for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++)
100 				rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i;
101 
102 		}
103 	} else {
104 		for (int i = 0; i < pages; i++)
105 			rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
106 	}
107 
108 	ret = nvkm_gsp_rpc_wr(gsp, rpc, true);
109 	if (ret)
110 		return ret;
111 
112 	object->client = device->object.client;
113 	object->parent = &device->object;
114 	object->handle = handle;
115 	return 0;
116 }
117 
118 static int
119 fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
120 {
121 	NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl;
122 	struct nvkm_gsp *gsp = fbsr->client.gsp;
123 	struct nvkm_gsp_object memlist;
124 	int ret;
125 
126 	ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
127 			   item->addr, item->size, NULL, &memlist);
128 	if (ret)
129 		return ret;
130 
131 	ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
132 				    NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO,
133 				    sizeof(*ctrl));
134 	if (IS_ERR(ctrl)) {
135 		ret = PTR_ERR(ctrl);
136 		goto done;
137 	}
138 
139 	ctrl->fbsrType = FBSR_TYPE_DMA;
140 	ctrl->hClient = fbsr->client.object.handle;
141 	ctrl->hVidMem = fbsr->hmemory++;
142 	ctrl->vidOffset = 0;
143 	ctrl->sysOffset = fbsr->sys_offset;
144 	ctrl->size = item->size;
145 
146 	ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
147 done:
148 	nvkm_gsp_rm_free(&memlist);
149 	if (ret)
150 		return ret;
151 
152 	fbsr->sys_offset += item->size;
153 	return 0;
154 }
155 
156 static int
157 fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
158 {
159 	NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
160 	struct nvkm_gsp *gsp = fbsr->client.gsp;
161 	struct nvkm_gsp_object memlist;
162 	int ret;
163 
164 	ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
165 			   0, fbsr->size, sgt, &memlist);
166 	if (ret)
167 		return ret;
168 
169 	ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
170 				    NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
171 	if (IS_ERR(ctrl))
172 		return PTR_ERR(ctrl);
173 
174 	ctrl->fbsrType = FBSR_TYPE_DMA;
175 	ctrl->numRegions = fbsr->regions;
176 	ctrl->hClient = fbsr->client.object.handle;
177 	ctrl->hSysMem = fbsr->hmemory++;
178 	ctrl->gspFbAllocsSysOffset = items_size;
179 
180 	ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
181 	if (ret)
182 		return ret;
183 
184 	nvkm_gsp_rm_free(&memlist);
185 	return 0;
186 }
187 
188 static bool
189 fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size)
190 {
191 	struct fbsr_item *item;
192 
193 	if (!(item = kzalloc(sizeof(*item), GFP_KERNEL)))
194 		return false;
195 
196 	item->type = type;
197 	item->addr = addr;
198 	item->size = size;
199 	list_add_tail(&item->head, &fbsr->items);
200 	return true;
201 }
202 
203 static bool
204 fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
205 {
206 	return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
207 }
208 
209 static void
210 r535_instmem_resume(struct nvkm_instmem *imem)
211 {
212 	/* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
213 	if (imem->rm.fbsr_valid) {
214 		nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr);
215 		imem->rm.fbsr_valid = false;
216 	}
217 }
218 
219 static int
220 r535_instmem_suspend(struct nvkm_instmem *imem)
221 {
222 	struct nvkm_subdev *subdev = &imem->subdev;
223 	struct nvkm_device *device = subdev->device;
224 	struct nvkm_gsp *gsp = device->gsp;
225 	struct nvkm_instobj *iobj;
226 	struct fbsr fbsr = {};
227 	struct fbsr_item *item, *temp;
228 	u64 items_size;
229 	int ret;
230 
231 	INIT_LIST_HEAD(&fbsr.items);
232 	fbsr.hmemory = 0xcaf00003;
233 
234 	/* Create a list of all regions we need RM to save during suspend. */
235 	list_for_each_entry(iobj, &imem->list, head) {
236 		if (iobj->preserve) {
237 			if (!fbsr_inst(&fbsr, "inst", &iobj->memory))
238 				return -ENOMEM;
239 		}
240 	}
241 
242 	list_for_each_entry(iobj, &imem->boot, head) {
243 		if (!fbsr_inst(&fbsr, "boot", &iobj->memory))
244 			return -ENOMEM;
245 	}
246 
247 	if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size))
248 		return -ENOMEM;
249 
250 	/* Determine memory requirements. */
251 	list_for_each_entry(item, &fbsr.items, head) {
252 		nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n",
253 			   item->addr, item->size, item->type);
254 		fbsr.size += item->size;
255 		fbsr.regions++;
256 	}
257 
258 	items_size = fbsr.size;
259 	nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size);
260 
261 	fbsr.size += gsp->fb.rsvd_size;
262 	fbsr.size += gsp->fb.bios.vga_workspace.size;
263 	nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
264 
265 	ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr);
266 	if (ret)
267 		goto done;
268 
269 	/* Tell RM about the sysmem which will hold VRAM contents across suspend. */
270 	ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device);
271 	if (ret)
272 		goto done_sgt;
273 
274 	ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size);
275 	if (WARN_ON(ret))
276 		goto done_sgt;
277 
278 	/* Send VRAM regions that need saving. */
279 	list_for_each_entry(item, &fbsr.items, head) {
280 		ret = fbsr_send(&fbsr, item);
281 		if (WARN_ON(ret))
282 			goto done_sgt;
283 	}
284 
285 	imem->rm.fbsr_valid = true;
286 
287 	/* Cleanup everything except the sysmem backup, which will be removed after resume. */
288 done_sgt:
289 	if (ret) /* ... unless we failed already. */
290 		nvkm_gsp_sg_free(device, &imem->rm.fbsr);
291 done:
292 	list_for_each_entry_safe(item, temp, &fbsr.items, head) {
293 		list_del(&item->head);
294 		kfree(item);
295 	}
296 
297 	nvkm_gsp_device_dtor(&fbsr.device);
298 	nvkm_gsp_client_dtor(&fbsr.client);
299 	return ret;
300 }
301 
302 static void *
303 r535_instmem_dtor(struct nvkm_instmem *imem)
304 {
305 	kfree(imem->func);
306 	return imem;
307 }
308 
309 int
310 r535_instmem_new(const struct nvkm_instmem_func *hw,
311 		 struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
312 		 struct nvkm_instmem **pinstmem)
313 {
314 	struct nvkm_instmem_func *rm;
315 	int ret;
316 
317 	if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
318 		return -ENOMEM;
319 
320 	rm->dtor = r535_instmem_dtor;
321 	rm->fini = hw->fini;
322 	rm->suspend = r535_instmem_suspend;
323 	rm->resume  = r535_instmem_resume;
324 	rm->memory_new = hw->memory_new;
325 	rm->memory_wrap = hw->memory_wrap;
326 	rm->zero = false;
327 
328 	ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
329 	if (ret)
330 		kfree(rm);
331 
332 	return ret;
333 }
334