xref: /linux/drivers/gpu/drm/nouveau/dispnv50/wndw.c (revision b05d873808c77fedd25130b0355acc0da1c11e19)
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "wndw.h"
23 
24 #include <nvif/class.h>
25 #include <nvif/cl0002.h>
26 
27 #include <drm/drm_atomic_helper.h>
28 #include "nouveau_bo.h"
29 
30 static void
31 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
32 {
33 	nvif_object_fini(&ctxdma->object);
34 	list_del(&ctxdma->head);
35 	kfree(ctxdma);
36 }
37 
38 static struct nv50_wndw_ctxdma *
39 nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
40 {
41 	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
42 	struct nv50_wndw_ctxdma *ctxdma;
43 	const u8    kind = fb->nvbo->kind;
44 	const u32 handle = 0xfb000000 | kind;
45 	struct {
46 		struct nv_dma_v0 base;
47 		union {
48 			struct nv50_dma_v0 nv50;
49 			struct gf100_dma_v0 gf100;
50 			struct gf119_dma_v0 gf119;
51 		};
52 	} args = {};
53 	u32 argc = sizeof(args.base);
54 	int ret;
55 
56 	list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
57 		if (ctxdma->object.handle == handle)
58 			return ctxdma;
59 	}
60 
61 	if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
62 		return ERR_PTR(-ENOMEM);
63 	list_add(&ctxdma->head, &wndw->ctxdma.list);
64 
65 	args.base.target = NV_DMA_V0_TARGET_VRAM;
66 	args.base.access = NV_DMA_V0_ACCESS_RDWR;
67 	args.base.start  = 0;
68 	args.base.limit  = drm->client.device.info.ram_user - 1;
69 
70 	if (drm->client.device.info.chipset < 0x80) {
71 		args.nv50.part = NV50_DMA_V0_PART_256;
72 		argc += sizeof(args.nv50);
73 	} else
74 	if (drm->client.device.info.chipset < 0xc0) {
75 		args.nv50.part = NV50_DMA_V0_PART_256;
76 		args.nv50.kind = kind;
77 		argc += sizeof(args.nv50);
78 	} else
79 	if (drm->client.device.info.chipset < 0xd0) {
80 		args.gf100.kind = kind;
81 		argc += sizeof(args.gf100);
82 	} else {
83 		args.gf119.page = GF119_DMA_V0_PAGE_LP;
84 		args.gf119.kind = kind;
85 		argc += sizeof(args.gf119);
86 	}
87 
88 	ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
89 			       &args, argc, &ctxdma->object);
90 	if (ret) {
91 		nv50_wndw_ctxdma_del(ctxdma);
92 		return ERR_PTR(ret);
93 	}
94 
95 	return ctxdma;
96 }
97 
98 int
99 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
100 {
101 	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
102 	if (asyw->set.ntfy) {
103 		return wndw->func->ntfy_wait_begun(disp->sync,
104 						   asyw->ntfy.offset,
105 						   wndw->wndw.base.device);
106 	}
107 	return 0;
108 }
109 
110 void
111 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
112 		    struct nv50_wndw_atom *asyw)
113 {
114 	union nv50_wndw_atom_mask clr = {
115 		.mask = asyw->clr.mask & ~(flush ? 0 : asyw->set.mask),
116 	};
117 	if (clr.sema ) wndw->func-> sema_clr(wndw);
118 	if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
119 	if (clr.xlut ) wndw->func-> xlut_clr(wndw);
120 	if (clr.image) wndw->func->image_clr(wndw);
121 
122 	interlock[wndw->interlock.type] |= wndw->interlock.data;
123 }
124 
125 void
126 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
127 		    struct nv50_wndw_atom *asyw)
128 {
129 	if (interlock) {
130 		asyw->image.mode = 0;
131 		asyw->image.interval = 1;
132 	}
133 
134 	if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
135 	if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
136 	if (asyw->set.image) wndw->func->image_set(wndw, asyw);
137 
138 	if (asyw->set.xlut ) {
139 		if (asyw->ilut) {
140 			asyw->xlut.i.offset =
141 				nv50_lut_load(&wndw->ilut,
142 					      asyw->xlut.i.mode <= 1,
143 					      asyw->xlut.i.buffer,
144 					      asyw->ilut);
145 		}
146 		wndw->func->xlut_set(wndw, asyw);
147 	}
148 
149 	if (asyw->set.point) {
150 		wndw->immd->point(wndw, asyw);
151 		wndw->immd->update(wndw, interlock);
152 	}
153 
154 	interlock[wndw->interlock.type] |= wndw->interlock.data;
155 }
156 
157 void
158 nv50_wndw_ntfy_enable(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
159 {
160 	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
161 
162 	asyw->ntfy.handle = wndw->wndw.sync.handle;
163 	asyw->ntfy.offset = wndw->ntfy;
164 	asyw->ntfy.awaken = false;
165 	asyw->set.ntfy = true;
166 
167 	wndw->func->ntfy_reset(disp->sync, wndw->ntfy);
168 	wndw->ntfy ^= 0x10;
169 }
170 
171 static void
172 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
173 			       struct nv50_wndw_atom *asyw,
174 			       struct nv50_head_atom *asyh)
175 {
176 	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
177 	NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
178 	wndw->func->release(wndw, asyw, asyh);
179 	asyw->ntfy.handle = 0;
180 	asyw->sema.handle = 0;
181 }
182 
183 static int
184 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
185 {
186 	switch (asyw->state.fb->format->format) {
187 	case DRM_FORMAT_C8         : asyw->image.format = 0x1e; break;
188 	case DRM_FORMAT_XRGB8888   :
189 	case DRM_FORMAT_ARGB8888   : asyw->image.format = 0xcf; break;
190 	case DRM_FORMAT_RGB565     : asyw->image.format = 0xe8; break;
191 	case DRM_FORMAT_XRGB1555   :
192 	case DRM_FORMAT_ARGB1555   : asyw->image.format = 0xe9; break;
193 	case DRM_FORMAT_XBGR2101010:
194 	case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
195 	case DRM_FORMAT_XBGR8888   :
196 	case DRM_FORMAT_ABGR8888   : asyw->image.format = 0xd5; break;
197 	default:
198 		WARN_ON(1);
199 		return -EINVAL;
200 	}
201 	return 0;
202 }
203 
204 static int
205 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
206 			       struct nv50_wndw_atom *armw,
207 			       struct nv50_wndw_atom *asyw,
208 			       struct nv50_head_atom *asyh)
209 {
210 	struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
211 	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
212 	int ret;
213 
214 	NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
215 
216 	if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
217 		asyw->image.w = fb->base.width;
218 		asyw->image.h = fb->base.height;
219 		asyw->image.kind = fb->nvbo->kind;
220 
221 		ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
222 		if (ret)
223 			return ret;
224 
225 		if (asyw->image.kind) {
226 			asyw->image.layout = 0;
227 			if (drm->client.device.info.chipset >= 0xc0)
228 				asyw->image.blockh = fb->nvbo->mode >> 4;
229 			else
230 				asyw->image.blockh = fb->nvbo->mode;
231 			asyw->image.blocks[0] = fb->base.pitches[0] / 64;
232 			asyw->image.pitch[0] = 0;
233 		} else {
234 			asyw->image.layout = 1;
235 			asyw->image.blockh = 0;
236 			asyw->image.blocks[0] = 0;
237 			asyw->image.pitch[0] = fb->base.pitches[0];
238 		}
239 
240 		if (!(asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
241 			asyw->image.interval = 1;
242 		else
243 			asyw->image.interval = 0;
244 		asyw->image.mode = asyw->image.interval ? 0 : 1;
245 		asyw->set.image = wndw->func->image_set != NULL;
246 	}
247 
248 	if (wndw->immd) {
249 		asyw->point.x = asyw->state.crtc_x;
250 		asyw->point.y = asyw->state.crtc_y;
251 		if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
252 			asyw->set.point = true;
253 	}
254 
255 	return wndw->func->acquire(wndw, asyw, asyh);
256 }
257 
258 static void
259 nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
260 			   struct nv50_wndw_atom *armw,
261 			   struct nv50_wndw_atom *asyw,
262 			   struct nv50_head_atom *asyh)
263 {
264 	struct drm_property_blob *ilut = asyh->state.degamma_lut;
265 
266 	/* I8 format without an input LUT makes no sense, and the
267 	 * HW error-checks for this.
268 	 *
269 	 * In order to handle legacy gamma, when there's no input
270 	 * LUT we need to steal the output LUT and use it instead.
271 	 */
272 	if (!ilut && asyw->state.fb->format->format == DRM_FORMAT_C8) {
273 		/* This should be an error, but there's legacy clients
274 		 * that do a modeset before providing a gamma table.
275 		 *
276 		 * We keep the window disabled to avoid angering HW.
277 		 */
278 		if (!(ilut = asyh->state.gamma_lut)) {
279 			asyw->visible = false;
280 			return;
281 		}
282 
283 		if (wndw->func->ilut)
284 			asyh->wndw.olut |= BIT(wndw->id);
285 	} else {
286 		asyh->wndw.olut &= ~BIT(wndw->id);
287 	}
288 
289 	/* Recalculate LUT state. */
290 	memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
291 	if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
292 		wndw->func->ilut(wndw, asyw);
293 		asyw->xlut.handle = wndw->wndw.vram.handle;
294 		asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
295 		asyw->set.xlut = true;
296 	}
297 
298 	/* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
299 	if (wndw->func->olut_core &&
300 	    (!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
301 		asyw->set.xlut = true;
302 
303 	/* Can't do an immediate flip while changing the LUT. */
304 	asyh->state.pageflip_flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
305 }
306 
307 static int
308 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
309 {
310 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
311 	struct nv50_wndw *wndw = nv50_wndw(plane);
312 	struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
313 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
314 	struct nv50_head_atom *harm = NULL, *asyh = NULL;
315 	bool modeset = false;
316 	int ret;
317 
318 	NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
319 
320 	/* Fetch the assembly state for the head the window will belong to,
321 	 * and determine whether the window will be visible.
322 	 */
323 	if (asyw->state.crtc) {
324 		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
325 		if (IS_ERR(asyh))
326 			return PTR_ERR(asyh);
327 		modeset = drm_atomic_crtc_needs_modeset(&asyh->state);
328 		asyw->visible = asyh->state.active;
329 	} else {
330 		asyw->visible = false;
331 	}
332 
333 	/* Fetch assembly state for the head the window used to belong to. */
334 	if (armw->state.crtc) {
335 		harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
336 		if (IS_ERR(harm))
337 			return PTR_ERR(harm);
338 	}
339 
340 	/* LUT configuration can potentially cause the window to be disabled. */
341 	if (asyw->visible && wndw->func->xlut_set &&
342 	    (!armw->visible ||
343 	     asyh->state.color_mgmt_changed ||
344 	     asyw->state.fb->format->format !=
345 	     armw->state.fb->format->format))
346 		nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
347 
348 	/* Calculate new window state. */
349 	if (asyw->visible) {
350 		ret = nv50_wndw_atomic_check_acquire(wndw, modeset,
351 						     armw, asyw, asyh);
352 		if (ret)
353 			return ret;
354 
355 		asyh->wndw.mask |= BIT(wndw->id);
356 	} else
357 	if (armw->visible) {
358 		nv50_wndw_atomic_check_release(wndw, asyw, harm);
359 		harm->wndw.mask &= ~BIT(wndw->id);
360 	} else {
361 		return 0;
362 	}
363 
364 	/* Aside from the obvious case where the window is actively being
365 	 * disabled, we might also need to temporarily disable the window
366 	 * when performing certain modeset operations.
367 	 */
368 	if (!asyw->visible || modeset) {
369 		asyw->clr.ntfy = armw->ntfy.handle != 0;
370 		asyw->clr.sema = armw->sema.handle != 0;
371 		asyw->clr.xlut = armw->xlut.handle != 0;
372 		if (wndw->func->image_clr)
373 			asyw->clr.image = armw->image.handle[0] != 0;
374 	}
375 
376 	return 0;
377 }
378 
379 static void
380 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
381 {
382 	struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
383 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
384 
385 	NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
386 	if (!old_state->fb)
387 		return;
388 
389 	nouveau_bo_unpin(fb->nvbo);
390 }
391 
392 static int
393 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
394 {
395 	struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
396 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
397 	struct nv50_wndw *wndw = nv50_wndw(plane);
398 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
399 	struct nv50_head_atom *asyh;
400 	struct nv50_wndw_ctxdma *ctxdma;
401 	int ret;
402 
403 	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
404 	if (!asyw->state.fb)
405 		return 0;
406 
407 	ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
408 	if (ret)
409 		return ret;
410 
411 	ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
412 	if (IS_ERR(ctxdma)) {
413 		nouveau_bo_unpin(fb->nvbo);
414 		return PTR_ERR(ctxdma);
415 	}
416 
417 	asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
418 	asyw->image.handle[0] = ctxdma->object.handle;
419 	asyw->image.offset[0] = fb->nvbo->bo.offset;
420 
421 	if (wndw->func->prepare) {
422 		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
423 		if (IS_ERR(asyh))
424 			return PTR_ERR(asyh);
425 
426 		wndw->func->prepare(wndw, asyh, asyw);
427 	}
428 
429 	return 0;
430 }
431 
432 static const struct drm_plane_helper_funcs
433 nv50_wndw_helper = {
434 	.prepare_fb = nv50_wndw_prepare_fb,
435 	.cleanup_fb = nv50_wndw_cleanup_fb,
436 	.atomic_check = nv50_wndw_atomic_check,
437 };
438 
439 static void
440 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
441 			       struct drm_plane_state *state)
442 {
443 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
444 	__drm_atomic_helper_plane_destroy_state(&asyw->state);
445 	kfree(asyw);
446 }
447 
448 static struct drm_plane_state *
449 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
450 {
451 	struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
452 	struct nv50_wndw_atom *asyw;
453 	if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
454 		return NULL;
455 	__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
456 	asyw->sema = armw->sema;
457 	asyw->ntfy = armw->ntfy;
458 	asyw->ilut = NULL;
459 	asyw->xlut = armw->xlut;
460 	asyw->image = armw->image;
461 	asyw->point = armw->point;
462 	asyw->clr.mask = 0;
463 	asyw->set.mask = 0;
464 	return &asyw->state;
465 }
466 
467 static void
468 nv50_wndw_reset(struct drm_plane *plane)
469 {
470 	struct nv50_wndw_atom *asyw;
471 
472 	if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
473 		return;
474 
475 	if (plane->state)
476 		plane->funcs->atomic_destroy_state(plane, plane->state);
477 	plane->state = &asyw->state;
478 	plane->state->plane = plane;
479 	plane->state->rotation = DRM_MODE_ROTATE_0;
480 }
481 
482 static void
483 nv50_wndw_destroy(struct drm_plane *plane)
484 {
485 	struct nv50_wndw *wndw = nv50_wndw(plane);
486 	struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
487 
488 	list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
489 		nv50_wndw_ctxdma_del(ctxdma);
490 	}
491 
492 	nvif_notify_fini(&wndw->notify);
493 	nv50_dmac_destroy(&wndw->wimm);
494 	nv50_dmac_destroy(&wndw->wndw);
495 
496 	nv50_lut_fini(&wndw->ilut);
497 
498 	drm_plane_cleanup(&wndw->plane);
499 	kfree(wndw);
500 }
501 
502 const struct drm_plane_funcs
503 nv50_wndw = {
504 	.update_plane = drm_atomic_helper_update_plane,
505 	.disable_plane = drm_atomic_helper_disable_plane,
506 	.destroy = nv50_wndw_destroy,
507 	.reset = nv50_wndw_reset,
508 	.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
509 	.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
510 };
511 
512 static int
513 nv50_wndw_notify(struct nvif_notify *notify)
514 {
515 	return NVIF_NOTIFY_KEEP;
516 }
517 
518 void
519 nv50_wndw_fini(struct nv50_wndw *wndw)
520 {
521 	nvif_notify_put(&wndw->notify);
522 }
523 
524 void
525 nv50_wndw_init(struct nv50_wndw *wndw)
526 {
527 	nvif_notify_get(&wndw->notify);
528 }
529 
530 int
531 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
532 	       enum drm_plane_type type, const char *name, int index,
533 	       const u32 *format, u32 heads,
534 	       enum nv50_disp_interlock_type interlock_type, u32 interlock_data,
535 	       struct nv50_wndw **pwndw)
536 {
537 	struct nouveau_drm *drm = nouveau_drm(dev);
538 	struct nvif_mmu *mmu = &drm->client.mmu;
539 	struct nv50_disp *disp = nv50_disp(dev);
540 	struct nv50_wndw *wndw;
541 	int nformat;
542 	int ret;
543 
544 	if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
545 		return -ENOMEM;
546 	wndw->func = func;
547 	wndw->id = index;
548 	wndw->interlock.type = interlock_type;
549 	wndw->interlock.data = interlock_data;
550 	wndw->ctxdma.parent = &wndw->wndw.base.user;
551 
552 	wndw->ctxdma.parent = &wndw->wndw.base.user;
553 	INIT_LIST_HEAD(&wndw->ctxdma.list);
554 
555 	for (nformat = 0; format[nformat]; nformat++);
556 
557 	ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
558 				       format, nformat, NULL,
559 				       type, "%s-%d", name, index);
560 	if (ret) {
561 		kfree(*pwndw);
562 		*pwndw = NULL;
563 		return ret;
564 	}
565 
566 	drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
567 
568 	if (wndw->func->ilut) {
569 		ret = nv50_lut_init(disp, mmu, &wndw->ilut);
570 		if (ret)
571 			return ret;
572 	}
573 
574 	wndw->notify.func = nv50_wndw_notify;
575 	return 0;
576 }
577