xref: /linux/drivers/gpu/drm/nouveau/dispnv50/wndw.c (revision ccd27db8c731817ef36e75de2b5fdc2e79550213)
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "wndw.h"
23 
24 #include <nvif/class.h>
25 #include <nvif/cl0002.h>
26 
27 #include <drm/drm_atomic_helper.h>
28 #include "nouveau_bo.h"
29 
30 static void
31 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
32 {
33 	nvif_object_fini(&ctxdma->object);
34 	list_del(&ctxdma->head);
35 	kfree(ctxdma);
36 }
37 
38 static struct nv50_wndw_ctxdma *
39 nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
40 {
41 	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
42 	struct nv50_wndw_ctxdma *ctxdma;
43 	const u8    kind = fb->nvbo->kind;
44 	const u32 handle = 0xfb000000 | kind;
45 	struct {
46 		struct nv_dma_v0 base;
47 		union {
48 			struct nv50_dma_v0 nv50;
49 			struct gf100_dma_v0 gf100;
50 			struct gf119_dma_v0 gf119;
51 		};
52 	} args = {};
53 	u32 argc = sizeof(args.base);
54 	int ret;
55 
56 	list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
57 		if (ctxdma->object.handle == handle)
58 			return ctxdma;
59 	}
60 
61 	if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
62 		return ERR_PTR(-ENOMEM);
63 	list_add(&ctxdma->head, &wndw->ctxdma.list);
64 
65 	args.base.target = NV_DMA_V0_TARGET_VRAM;
66 	args.base.access = NV_DMA_V0_ACCESS_RDWR;
67 	args.base.start  = 0;
68 	args.base.limit  = drm->client.device.info.ram_user - 1;
69 
70 	if (drm->client.device.info.chipset < 0x80) {
71 		args.nv50.part = NV50_DMA_V0_PART_256;
72 		argc += sizeof(args.nv50);
73 	} else
74 	if (drm->client.device.info.chipset < 0xc0) {
75 		args.nv50.part = NV50_DMA_V0_PART_256;
76 		args.nv50.kind = kind;
77 		argc += sizeof(args.nv50);
78 	} else
79 	if (drm->client.device.info.chipset < 0xd0) {
80 		args.gf100.kind = kind;
81 		argc += sizeof(args.gf100);
82 	} else {
83 		args.gf119.page = GF119_DMA_V0_PAGE_LP;
84 		args.gf119.kind = kind;
85 		argc += sizeof(args.gf119);
86 	}
87 
88 	ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
89 			       &args, argc, &ctxdma->object);
90 	if (ret) {
91 		nv50_wndw_ctxdma_del(ctxdma);
92 		return ERR_PTR(ret);
93 	}
94 
95 	return ctxdma;
96 }
97 
98 int
99 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
100 {
101 	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
102 	if (asyw->set.ntfy) {
103 		return wndw->func->ntfy_wait_begun(disp->sync,
104 						   asyw->ntfy.offset,
105 						   wndw->wndw.base.device);
106 	}
107 	return 0;
108 }
109 
110 u32
111 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
112 		    struct nv50_wndw_atom *asyw)
113 {
114 	if (asyw->clr.sema && (!asyw->set.sema || flush))
115 		wndw->func->sema_clr(wndw);
116 	if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
117 		wndw->func->ntfy_clr(wndw);
118 	if (asyw->clr.image && (!asyw->set.image || flush))
119 		wndw->func->image_clr(wndw);
120 
121 	return flush ? wndw->func->update(wndw, interlock) : 0;
122 }
123 
124 u32
125 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
126 		    struct nv50_wndw_atom *asyw)
127 {
128 	if (interlock) {
129 		asyw->image.mode = 0;
130 		asyw->image.interval = 1;
131 	}
132 
133 	if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
134 	if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
135 	if (asyw->set.image) wndw->func->image_set(wndw, asyw);
136 	if (asyw->set.lut  ) wndw->func->lut      (wndw, asyw);
137 	if (asyw->set.point) {
138 		wndw->immd->point(wndw, asyw);
139 		wndw->immd->update(wndw, interlock);
140 	}
141 
142 	return wndw->func->update ? wndw->func->update(wndw, interlock) : 0;
143 }
144 
145 void
146 nv50_wndw_ntfy_enable(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
147 {
148 	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
149 
150 	asyw->ntfy.handle = wndw->wndw.sync.handle;
151 	asyw->ntfy.offset = wndw->ntfy;
152 	asyw->ntfy.awaken = false;
153 	asyw->set.ntfy = true;
154 
155 	wndw->func->ntfy_reset(disp->sync, wndw->ntfy);
156 	wndw->ntfy ^= 0x10;
157 }
158 
159 static void
160 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
161 			       struct nv50_wndw_atom *asyw,
162 			       struct nv50_head_atom *asyh)
163 {
164 	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
165 	NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
166 	wndw->func->release(wndw, asyw, asyh);
167 	asyw->ntfy.handle = 0;
168 	asyw->sema.handle = 0;
169 }
170 
171 static int
172 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
173 			       struct nv50_wndw_atom *asyw,
174 			       struct nv50_head_atom *asyh)
175 {
176 	struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
177 	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
178 	int ret;
179 
180 	NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
181 
182 	asyw->image.w = fb->base.width;
183 	asyw->image.h = fb->base.height;
184 	asyw->image.kind = fb->nvbo->kind;
185 
186 	if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
187 		asyw->interval = 0;
188 	else
189 		asyw->interval = 1;
190 
191 	if (asyw->image.kind) {
192 		asyw->image.layout = 0;
193 		if (drm->client.device.info.chipset >= 0xc0)
194 			asyw->image.block = fb->nvbo->mode >> 4;
195 		else
196 			asyw->image.block = fb->nvbo->mode;
197 		asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
198 	} else {
199 		asyw->image.layout = 1;
200 		asyw->image.block  = 0;
201 		asyw->image.pitch  = fb->base.pitches[0];
202 	}
203 
204 	ret = wndw->func->acquire(wndw, asyw, asyh);
205 	if (ret)
206 		return ret;
207 
208 	if (asyw->set.image) {
209 		if (!(asyw->image.mode = asyw->interval ? 0 : 1))
210 			asyw->image.interval = asyw->interval;
211 		else
212 			asyw->image.interval = 0;
213 	}
214 
215 	return 0;
216 }
217 
218 int
219 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
220 {
221 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
222 	struct nv50_wndw *wndw = nv50_wndw(plane);
223 	struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
224 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
225 	struct nv50_head_atom *harm = NULL, *asyh = NULL;
226 	bool varm = false, asyv = false, asym = false;
227 	int ret;
228 
229 	NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
230 	if (asyw->state.crtc) {
231 		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
232 		if (IS_ERR(asyh))
233 			return PTR_ERR(asyh);
234 		asym = drm_atomic_crtc_needs_modeset(&asyh->state);
235 		asyv = asyh->state.active;
236 	}
237 
238 	if (armw->state.crtc) {
239 		harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
240 		if (IS_ERR(harm))
241 			return PTR_ERR(harm);
242 		varm = harm->state.crtc->state->active;
243 	}
244 
245 	if (asyv) {
246 		asyw->point.x = asyw->state.crtc_x;
247 		asyw->point.y = asyw->state.crtc_y;
248 		if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
249 			asyw->set.point = true;
250 
251 		ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
252 		if (ret)
253 			return ret;
254 	} else
255 	if (varm) {
256 		nv50_wndw_atomic_check_release(wndw, asyw, harm);
257 	} else {
258 		return 0;
259 	}
260 
261 	if (!asyv || asym) {
262 		asyw->clr.ntfy = armw->ntfy.handle != 0;
263 		asyw->clr.sema = armw->sema.handle != 0;
264 		if (wndw->func->image_clr)
265 			asyw->clr.image = armw->image.handle != 0;
266 		asyw->set.lut = wndw->func->lut && asyv;
267 	}
268 
269 	return 0;
270 }
271 
272 static void
273 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
274 {
275 	struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
276 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
277 
278 	NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
279 	if (!old_state->fb)
280 		return;
281 
282 	nouveau_bo_unpin(fb->nvbo);
283 }
284 
285 static int
286 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
287 {
288 	struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
289 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
290 	struct nv50_wndw *wndw = nv50_wndw(plane);
291 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
292 	struct nv50_head_atom *asyh;
293 	struct nv50_wndw_ctxdma *ctxdma;
294 	int ret;
295 
296 	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
297 	if (!asyw->state.fb)
298 		return 0;
299 
300 	ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
301 	if (ret)
302 		return ret;
303 
304 	ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
305 	if (IS_ERR(ctxdma)) {
306 		nouveau_bo_unpin(fb->nvbo);
307 		return PTR_ERR(ctxdma);
308 	}
309 
310 	asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
311 	asyw->image.handle = ctxdma->object.handle;
312 	asyw->image.offset = fb->nvbo->bo.offset;
313 
314 	if (wndw->func->prepare) {
315 		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
316 		if (IS_ERR(asyh))
317 			return PTR_ERR(asyh);
318 
319 		wndw->func->prepare(wndw, asyh, asyw);
320 	}
321 
322 	return 0;
323 }
324 
325 static const struct drm_plane_helper_funcs
326 nv50_wndw_helper = {
327 	.prepare_fb = nv50_wndw_prepare_fb,
328 	.cleanup_fb = nv50_wndw_cleanup_fb,
329 	.atomic_check = nv50_wndw_atomic_check,
330 };
331 
332 static void
333 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
334 			       struct drm_plane_state *state)
335 {
336 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
337 	__drm_atomic_helper_plane_destroy_state(&asyw->state);
338 	kfree(asyw);
339 }
340 
341 static struct drm_plane_state *
342 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
343 {
344 	struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
345 	struct nv50_wndw_atom *asyw;
346 	if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
347 		return NULL;
348 	__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
349 	asyw->interval = 1;
350 	asyw->sema = armw->sema;
351 	asyw->ntfy = armw->ntfy;
352 	asyw->image = armw->image;
353 	asyw->point = armw->point;
354 	asyw->lut = armw->lut;
355 	asyw->clr.mask = 0;
356 	asyw->set.mask = 0;
357 	return &asyw->state;
358 }
359 
360 static void
361 nv50_wndw_reset(struct drm_plane *plane)
362 {
363 	struct nv50_wndw_atom *asyw;
364 
365 	if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
366 		return;
367 
368 	if (plane->state)
369 		plane->funcs->atomic_destroy_state(plane, plane->state);
370 	plane->state = &asyw->state;
371 	plane->state->plane = plane;
372 	plane->state->rotation = DRM_MODE_ROTATE_0;
373 }
374 
375 static void
376 nv50_wndw_destroy(struct drm_plane *plane)
377 {
378 	struct nv50_wndw *wndw = nv50_wndw(plane);
379 	struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
380 
381 	list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
382 		nv50_wndw_ctxdma_del(ctxdma);
383 	}
384 
385 	nvif_notify_fini(&wndw->notify);
386 	nv50_dmac_destroy(&wndw->wimm);
387 	nv50_dmac_destroy(&wndw->wndw);
388 	drm_plane_cleanup(&wndw->plane);
389 	kfree(wndw);
390 }
391 
392 const struct drm_plane_funcs
393 nv50_wndw = {
394 	.update_plane = drm_atomic_helper_update_plane,
395 	.disable_plane = drm_atomic_helper_disable_plane,
396 	.destroy = nv50_wndw_destroy,
397 	.reset = nv50_wndw_reset,
398 	.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
399 	.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
400 };
401 
402 static int
403 nv50_wndw_notify(struct nvif_notify *notify)
404 {
405 	return NVIF_NOTIFY_KEEP;
406 }
407 
408 void
409 nv50_wndw_fini(struct nv50_wndw *wndw)
410 {
411 	nvif_notify_put(&wndw->notify);
412 }
413 
414 void
415 nv50_wndw_init(struct nv50_wndw *wndw)
416 {
417 	nvif_notify_get(&wndw->notify);
418 }
419 
420 int
421 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
422 	       enum drm_plane_type type, const char *name, int index,
423 	       const u32 *format, struct nv50_wndw **pwndw)
424 {
425 	struct nv50_wndw *wndw;
426 	int nformat;
427 	int ret;
428 
429 	if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
430 		return -ENOMEM;
431 	wndw->func = func;
432 	wndw->id = index;
433 
434 	wndw->ctxdma.parent = &wndw->wndw.base.user;
435 	INIT_LIST_HEAD(&wndw->ctxdma.list);
436 
437 	for (nformat = 0; format[nformat]; nformat++);
438 
439 	ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
440 				       format, nformat, NULL,
441 				       type, "%s-%d", name, index);
442 	if (ret) {
443 		kfree(*pwndw);
444 		*pwndw = NULL;
445 		return ret;
446 	}
447 
448 	drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
449 
450 	wndw->notify.func = nv50_wndw_notify;
451 	return 0;
452 }
453