xref: /linux/drivers/gpu/drm/nouveau/dispnv50/wndw.c (revision 38a72243235ecf2c1359ce66ebed29a7dfb680f7)
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "wndw.h"
23 #include "wimm.h"
24 
25 #include <nvif/class.h>
26 #include <nvif/cl0002.h>
27 
28 #include <drm/drm_atomic_helper.h>
29 #include <drm/drm_fourcc.h>
30 
31 #include "nouveau_bo.h"
32 
33 static void
34 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
35 {
36 	nvif_object_fini(&ctxdma->object);
37 	list_del(&ctxdma->head);
38 	kfree(ctxdma);
39 }
40 
41 static struct nv50_wndw_ctxdma *
42 nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
43 {
44 	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
45 	struct nv50_wndw_ctxdma *ctxdma;
46 	const u8    kind = fb->nvbo->kind;
47 	const u32 handle = 0xfb000000 | kind;
48 	struct {
49 		struct nv_dma_v0 base;
50 		union {
51 			struct nv50_dma_v0 nv50;
52 			struct gf100_dma_v0 gf100;
53 			struct gf119_dma_v0 gf119;
54 		};
55 	} args = {};
56 	u32 argc = sizeof(args.base);
57 	int ret;
58 
59 	list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
60 		if (ctxdma->object.handle == handle)
61 			return ctxdma;
62 	}
63 
64 	if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
65 		return ERR_PTR(-ENOMEM);
66 	list_add(&ctxdma->head, &wndw->ctxdma.list);
67 
68 	args.base.target = NV_DMA_V0_TARGET_VRAM;
69 	args.base.access = NV_DMA_V0_ACCESS_RDWR;
70 	args.base.start  = 0;
71 	args.base.limit  = drm->client.device.info.ram_user - 1;
72 
73 	if (drm->client.device.info.chipset < 0x80) {
74 		args.nv50.part = NV50_DMA_V0_PART_256;
75 		argc += sizeof(args.nv50);
76 	} else
77 	if (drm->client.device.info.chipset < 0xc0) {
78 		args.nv50.part = NV50_DMA_V0_PART_256;
79 		args.nv50.kind = kind;
80 		argc += sizeof(args.nv50);
81 	} else
82 	if (drm->client.device.info.chipset < 0xd0) {
83 		args.gf100.kind = kind;
84 		argc += sizeof(args.gf100);
85 	} else {
86 		args.gf119.page = GF119_DMA_V0_PAGE_LP;
87 		args.gf119.kind = kind;
88 		argc += sizeof(args.gf119);
89 	}
90 
91 	ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
92 			       &args, argc, &ctxdma->object);
93 	if (ret) {
94 		nv50_wndw_ctxdma_del(ctxdma);
95 		return ERR_PTR(ret);
96 	}
97 
98 	return ctxdma;
99 }
100 
101 int
102 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
103 {
104 	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
105 	if (asyw->set.ntfy) {
106 		return wndw->func->ntfy_wait_begun(disp->sync,
107 						   asyw->ntfy.offset,
108 						   wndw->wndw.base.device);
109 	}
110 	return 0;
111 }
112 
113 void
114 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
115 		    struct nv50_wndw_atom *asyw)
116 {
117 	union nv50_wndw_atom_mask clr = {
118 		.mask = asyw->clr.mask & ~(flush ? 0 : asyw->set.mask),
119 	};
120 	if (clr.sema ) wndw->func-> sema_clr(wndw);
121 	if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
122 	if (clr.xlut ) wndw->func-> xlut_clr(wndw);
123 	if (clr.image) wndw->func->image_clr(wndw);
124 
125 	interlock[wndw->interlock.type] |= wndw->interlock.data;
126 }
127 
128 void
129 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
130 		    struct nv50_wndw_atom *asyw)
131 {
132 	if (interlock[NV50_DISP_INTERLOCK_CORE]) {
133 		asyw->image.mode = 0;
134 		asyw->image.interval = 1;
135 	}
136 
137 	if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
138 	if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
139 	if (asyw->set.image) wndw->func->image_set(wndw, asyw);
140 
141 	if (asyw->set.xlut ) {
142 		if (asyw->ilut) {
143 			asyw->xlut.i.offset =
144 				nv50_lut_load(&wndw->ilut, asyw->xlut.i.buffer,
145 					      asyw->ilut, asyw->xlut.i.load);
146 		}
147 		wndw->func->xlut_set(wndw, asyw);
148 	}
149 
150 	if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
151 	if (asyw->set.point) {
152 		if (asyw->set.point = false, asyw->set.mask)
153 			interlock[wndw->interlock.type] |= wndw->interlock.data;
154 		interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
155 
156 		wndw->immd->point(wndw, asyw);
157 		wndw->immd->update(wndw, interlock);
158 	} else {
159 		interlock[wndw->interlock.type] |= wndw->interlock.data;
160 	}
161 }
162 
163 void
164 nv50_wndw_ntfy_enable(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
165 {
166 	struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
167 
168 	asyw->ntfy.handle = wndw->wndw.sync.handle;
169 	asyw->ntfy.offset = wndw->ntfy;
170 	asyw->ntfy.awaken = false;
171 	asyw->set.ntfy = true;
172 
173 	wndw->func->ntfy_reset(disp->sync, wndw->ntfy);
174 	wndw->ntfy ^= 0x10;
175 }
176 
177 static void
178 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
179 			       struct nv50_wndw_atom *asyw,
180 			       struct nv50_head_atom *asyh)
181 {
182 	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
183 	NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
184 	wndw->func->release(wndw, asyw, asyh);
185 	asyw->ntfy.handle = 0;
186 	asyw->sema.handle = 0;
187 }
188 
189 static int
190 nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom *asyw)
191 {
192 	switch (asyw->state.fb->format->format) {
193 	case DRM_FORMAT_YUYV: asyw->image.format = 0x28; break;
194 	case DRM_FORMAT_UYVY: asyw->image.format = 0x29; break;
195 	default:
196 		WARN_ON(1);
197 		return -EINVAL;
198 	}
199 	asyw->image.colorspace = 1;
200 	return 0;
201 }
202 
203 static int
204 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
205 {
206 	switch (asyw->state.fb->format->format) {
207 	case DRM_FORMAT_C8           : asyw->image.format = 0x1e; break;
208 	case DRM_FORMAT_XRGB8888     :
209 	case DRM_FORMAT_ARGB8888     : asyw->image.format = 0xcf; break;
210 	case DRM_FORMAT_RGB565       : asyw->image.format = 0xe8; break;
211 	case DRM_FORMAT_XRGB1555     :
212 	case DRM_FORMAT_ARGB1555     : asyw->image.format = 0xe9; break;
213 	case DRM_FORMAT_XBGR2101010  :
214 	case DRM_FORMAT_ABGR2101010  : asyw->image.format = 0xd1; break;
215 	case DRM_FORMAT_XBGR8888     :
216 	case DRM_FORMAT_ABGR8888     : asyw->image.format = 0xd5; break;
217 	case DRM_FORMAT_XRGB2101010  :
218 	case DRM_FORMAT_ARGB2101010  : asyw->image.format = 0xdf; break;
219 	case DRM_FORMAT_XBGR16161616F:
220 	case DRM_FORMAT_ABGR16161616F: asyw->image.format = 0xca; break;
221 	default:
222 		return -EINVAL;
223 	}
224 	asyw->image.colorspace = 0;
225 	return 0;
226 }
227 
228 static int
229 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
230 			       struct nv50_wndw_atom *armw,
231 			       struct nv50_wndw_atom *asyw,
232 			       struct nv50_head_atom *asyh)
233 {
234 	struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
235 	struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
236 	int ret;
237 
238 	NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
239 
240 	if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
241 		asyw->image.w = fb->base.width;
242 		asyw->image.h = fb->base.height;
243 		asyw->image.kind = fb->nvbo->kind;
244 
245 		ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
246 		if (ret) {
247 			ret = nv50_wndw_atomic_check_acquire_yuv(asyw);
248 			if (ret)
249 				return ret;
250 		}
251 
252 		if (asyw->image.kind) {
253 			asyw->image.layout = 0;
254 			if (drm->client.device.info.chipset >= 0xc0)
255 				asyw->image.blockh = fb->nvbo->mode >> 4;
256 			else
257 				asyw->image.blockh = fb->nvbo->mode;
258 			asyw->image.blocks[0] = fb->base.pitches[0] / 64;
259 			asyw->image.pitch[0] = 0;
260 		} else {
261 			asyw->image.layout = 1;
262 			asyw->image.blockh = 0;
263 			asyw->image.blocks[0] = 0;
264 			asyw->image.pitch[0] = fb->base.pitches[0];
265 		}
266 
267 		if (!(asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
268 			asyw->image.interval = 1;
269 		else
270 			asyw->image.interval = 0;
271 		asyw->image.mode = asyw->image.interval ? 0 : 1;
272 		asyw->set.image = wndw->func->image_set != NULL;
273 	}
274 
275 	if (wndw->func->scale_set) {
276 		asyw->scale.sx = asyw->state.src_x >> 16;
277 		asyw->scale.sy = asyw->state.src_y >> 16;
278 		asyw->scale.sw = asyw->state.src_w >> 16;
279 		asyw->scale.sh = asyw->state.src_h >> 16;
280 		asyw->scale.dw = asyw->state.crtc_w;
281 		asyw->scale.dh = asyw->state.crtc_h;
282 		if (memcmp(&armw->scale, &asyw->scale, sizeof(asyw->scale)))
283 			asyw->set.scale = true;
284 	}
285 
286 	if (wndw->immd) {
287 		asyw->point.x = asyw->state.crtc_x;
288 		asyw->point.y = asyw->state.crtc_y;
289 		if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
290 			asyw->set.point = true;
291 	}
292 
293 	return wndw->func->acquire(wndw, asyw, asyh);
294 }
295 
296 static void
297 nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
298 			   struct nv50_wndw_atom *armw,
299 			   struct nv50_wndw_atom *asyw,
300 			   struct nv50_head_atom *asyh)
301 {
302 	struct drm_property_blob *ilut = asyh->state.degamma_lut;
303 
304 	/* I8 format without an input LUT makes no sense, and the
305 	 * HW error-checks for this.
306 	 *
307 	 * In order to handle legacy gamma, when there's no input
308 	 * LUT we need to steal the output LUT and use it instead.
309 	 */
310 	if (!ilut && asyw->state.fb->format->format == DRM_FORMAT_C8) {
311 		/* This should be an error, but there's legacy clients
312 		 * that do a modeset before providing a gamma table.
313 		 *
314 		 * We keep the window disabled to avoid angering HW.
315 		 */
316 		if (!(ilut = asyh->state.gamma_lut)) {
317 			asyw->visible = false;
318 			return;
319 		}
320 
321 		if (wndw->func->ilut)
322 			asyh->wndw.olut |= BIT(wndw->id);
323 	} else {
324 		asyh->wndw.olut &= ~BIT(wndw->id);
325 	}
326 
327 	if (!ilut && wndw->func->ilut_identity &&
328 	    asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F &&
329 	    asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) {
330 		static struct drm_property_blob dummy = {};
331 		ilut = &dummy;
332 	}
333 
334 	/* Recalculate LUT state. */
335 	memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
336 	if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
337 		wndw->func->ilut(wndw, asyw);
338 		asyw->xlut.handle = wndw->wndw.vram.handle;
339 		asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
340 		asyw->set.xlut = true;
341 	} else {
342 		asyw->clr.xlut = armw->xlut.handle != 0;
343 	}
344 
345 	/* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
346 	if (wndw->func->olut_core &&
347 	    (!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
348 		asyw->set.xlut = true;
349 
350 	/* Can't do an immediate flip while changing the LUT. */
351 	asyh->state.pageflip_flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
352 }
353 
354 static int
355 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
356 {
357 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
358 	struct nv50_wndw *wndw = nv50_wndw(plane);
359 	struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
360 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
361 	struct nv50_head_atom *harm = NULL, *asyh = NULL;
362 	bool modeset = false;
363 	int ret;
364 
365 	NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
366 
367 	/* Fetch the assembly state for the head the window will belong to,
368 	 * and determine whether the window will be visible.
369 	 */
370 	if (asyw->state.crtc) {
371 		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
372 		if (IS_ERR(asyh))
373 			return PTR_ERR(asyh);
374 		modeset = drm_atomic_crtc_needs_modeset(&asyh->state);
375 		asyw->visible = asyh->state.active;
376 	} else {
377 		asyw->visible = false;
378 	}
379 
380 	/* Fetch assembly state for the head the window used to belong to. */
381 	if (armw->state.crtc) {
382 		harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
383 		if (IS_ERR(harm))
384 			return PTR_ERR(harm);
385 	}
386 
387 	/* LUT configuration can potentially cause the window to be disabled. */
388 	if (asyw->visible && wndw->func->xlut_set &&
389 	    (!armw->visible ||
390 	     asyh->state.color_mgmt_changed ||
391 	     asyw->state.fb->format->format !=
392 	     armw->state.fb->format->format))
393 		nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
394 
395 	/* Calculate new window state. */
396 	if (asyw->visible) {
397 		ret = nv50_wndw_atomic_check_acquire(wndw, modeset,
398 						     armw, asyw, asyh);
399 		if (ret)
400 			return ret;
401 
402 		asyh->wndw.mask |= BIT(wndw->id);
403 	} else
404 	if (armw->visible) {
405 		nv50_wndw_atomic_check_release(wndw, asyw, harm);
406 		harm->wndw.mask &= ~BIT(wndw->id);
407 	} else {
408 		return 0;
409 	}
410 
411 	/* Aside from the obvious case where the window is actively being
412 	 * disabled, we might also need to temporarily disable the window
413 	 * when performing certain modeset operations.
414 	 */
415 	if (!asyw->visible || modeset) {
416 		asyw->clr.ntfy = armw->ntfy.handle != 0;
417 		asyw->clr.sema = armw->sema.handle != 0;
418 		asyw->clr.xlut = armw->xlut.handle != 0;
419 		if (wndw->func->image_clr)
420 			asyw->clr.image = armw->image.handle[0] != 0;
421 	}
422 
423 	return 0;
424 }
425 
426 static void
427 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
428 {
429 	struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
430 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
431 
432 	NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
433 	if (!old_state->fb)
434 		return;
435 
436 	nouveau_bo_unpin(fb->nvbo);
437 }
438 
439 static int
440 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
441 {
442 	struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
443 	struct nouveau_drm *drm = nouveau_drm(plane->dev);
444 	struct nv50_wndw *wndw = nv50_wndw(plane);
445 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
446 	struct nv50_head_atom *asyh;
447 	struct nv50_wndw_ctxdma *ctxdma;
448 	int ret;
449 
450 	NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
451 	if (!asyw->state.fb)
452 		return 0;
453 
454 	ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
455 	if (ret)
456 		return ret;
457 
458 	if (wndw->ctxdma.parent) {
459 		ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
460 		if (IS_ERR(ctxdma)) {
461 			nouveau_bo_unpin(fb->nvbo);
462 			return PTR_ERR(ctxdma);
463 		}
464 
465 		asyw->image.handle[0] = ctxdma->object.handle;
466 	}
467 
468 	asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
469 	asyw->image.offset[0] = fb->nvbo->bo.offset;
470 
471 	if (wndw->func->prepare) {
472 		asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
473 		if (IS_ERR(asyh))
474 			return PTR_ERR(asyh);
475 
476 		wndw->func->prepare(wndw, asyh, asyw);
477 	}
478 
479 	return 0;
480 }
481 
482 static const struct drm_plane_helper_funcs
483 nv50_wndw_helper = {
484 	.prepare_fb = nv50_wndw_prepare_fb,
485 	.cleanup_fb = nv50_wndw_cleanup_fb,
486 	.atomic_check = nv50_wndw_atomic_check,
487 };
488 
489 static void
490 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
491 			       struct drm_plane_state *state)
492 {
493 	struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
494 	__drm_atomic_helper_plane_destroy_state(&asyw->state);
495 	kfree(asyw);
496 }
497 
498 static struct drm_plane_state *
499 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
500 {
501 	struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
502 	struct nv50_wndw_atom *asyw;
503 	if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
504 		return NULL;
505 	__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
506 	asyw->sema = armw->sema;
507 	asyw->ntfy = armw->ntfy;
508 	asyw->ilut = NULL;
509 	asyw->xlut = armw->xlut;
510 	asyw->image = armw->image;
511 	asyw->point = armw->point;
512 	asyw->clr.mask = 0;
513 	asyw->set.mask = 0;
514 	return &asyw->state;
515 }
516 
517 static void
518 nv50_wndw_reset(struct drm_plane *plane)
519 {
520 	struct nv50_wndw_atom *asyw;
521 
522 	if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
523 		return;
524 
525 	if (plane->state)
526 		plane->funcs->atomic_destroy_state(plane, plane->state);
527 	plane->state = &asyw->state;
528 	plane->state->plane = plane;
529 	plane->state->rotation = DRM_MODE_ROTATE_0;
530 }
531 
532 static void
533 nv50_wndw_destroy(struct drm_plane *plane)
534 {
535 	struct nv50_wndw *wndw = nv50_wndw(plane);
536 	struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
537 
538 	list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
539 		nv50_wndw_ctxdma_del(ctxdma);
540 	}
541 
542 	nvif_notify_fini(&wndw->notify);
543 	nv50_dmac_destroy(&wndw->wimm);
544 	nv50_dmac_destroy(&wndw->wndw);
545 
546 	nv50_lut_fini(&wndw->ilut);
547 
548 	drm_plane_cleanup(&wndw->plane);
549 	kfree(wndw);
550 }
551 
552 const struct drm_plane_funcs
553 nv50_wndw = {
554 	.update_plane = drm_atomic_helper_update_plane,
555 	.disable_plane = drm_atomic_helper_disable_plane,
556 	.destroy = nv50_wndw_destroy,
557 	.reset = nv50_wndw_reset,
558 	.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
559 	.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
560 };
561 
562 static int
563 nv50_wndw_notify(struct nvif_notify *notify)
564 {
565 	return NVIF_NOTIFY_KEEP;
566 }
567 
568 void
569 nv50_wndw_fini(struct nv50_wndw *wndw)
570 {
571 	nvif_notify_put(&wndw->notify);
572 }
573 
574 void
575 nv50_wndw_init(struct nv50_wndw *wndw)
576 {
577 	nvif_notify_get(&wndw->notify);
578 }
579 
580 int
581 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
582 	       enum drm_plane_type type, const char *name, int index,
583 	       const u32 *format, u32 heads,
584 	       enum nv50_disp_interlock_type interlock_type, u32 interlock_data,
585 	       struct nv50_wndw **pwndw)
586 {
587 	struct nouveau_drm *drm = nouveau_drm(dev);
588 	struct nvif_mmu *mmu = &drm->client.mmu;
589 	struct nv50_disp *disp = nv50_disp(dev);
590 	struct nv50_wndw *wndw;
591 	int nformat;
592 	int ret;
593 
594 	if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
595 		return -ENOMEM;
596 	wndw->func = func;
597 	wndw->id = index;
598 	wndw->interlock.type = interlock_type;
599 	wndw->interlock.data = interlock_data;
600 
601 	wndw->ctxdma.parent = &wndw->wndw.base.user;
602 	INIT_LIST_HEAD(&wndw->ctxdma.list);
603 
604 	for (nformat = 0; format[nformat]; nformat++);
605 
606 	ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
607 				       format, nformat, NULL,
608 				       type, "%s-%d", name, index);
609 	if (ret) {
610 		kfree(*pwndw);
611 		*pwndw = NULL;
612 		return ret;
613 	}
614 
615 	drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
616 
617 	if (wndw->func->ilut) {
618 		ret = nv50_lut_init(disp, mmu, &wndw->ilut);
619 		if (ret)
620 			return ret;
621 	}
622 
623 	wndw->notify.func = nv50_wndw_notify;
624 	return 0;
625 }
626 
627 int
628 nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
629 	      struct nv50_wndw **pwndw)
630 {
631 	struct {
632 		s32 oclass;
633 		int version;
634 		int (*new)(struct nouveau_drm *, enum drm_plane_type,
635 			   int, s32, struct nv50_wndw **);
636 	} wndws[] = {
637 		{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
638 		{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
639 		{}
640 	};
641 	struct nv50_disp *disp = nv50_disp(drm->dev);
642 	int cid, ret;
643 
644 	cid = nvif_mclass(&disp->disp->object, wndws);
645 	if (cid < 0) {
646 		NV_ERROR(drm, "No supported window class\n");
647 		return cid;
648 	}
649 
650 	ret = wndws[cid].new(drm, type, index, wndws[cid].oclass, pwndw);
651 	if (ret)
652 		return ret;
653 
654 	return nv50_wimm_init(drm, *pwndw);
655 }
656