xref: /linux/drivers/gpu/drm/vc4/vc4_plane.c (revision f85f5ae45ad945270a8884261de8249431e8b5a6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Broadcom
4  */
5 
6 /**
7  * DOC: VC4 plane module
8  *
9  * Each DRM plane is a layer of pixels being scanned out by the HVS.
10  *
11  * At atomic modeset check time, we compute the HVS display element
12  * state that would be necessary for displaying the plane (giving us a
13  * chance to figure out if a plane configuration is invalid), then at
14  * atomic flush time the CRTC will ask us to write our element state
15  * into the region of the HVS that it has allocated for us.
16  */
17 
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_atomic_uapi.h>
21 #include <drm/drm_blend.h>
22 #include <drm/drm_drv.h>
23 #include <drm/drm_fb_dma_helper.h>
24 #include <drm/drm_fourcc.h>
25 #include <drm/drm_framebuffer.h>
26 #include <drm/drm_gem_atomic_helper.h>
27 
28 #include "uapi/drm/vc4_drm.h"
29 
30 #include "vc4_drv.h"
31 #include "vc4_regs.h"
32 
33 static const struct hvs_format {
34 	u32 drm; /* DRM_FORMAT_* */
35 	u32 hvs; /* HVS_FORMAT_* */
36 	u32 pixel_order;
37 	u32 pixel_order_hvs5;
38 	bool hvs5_only;
39 } hvs_formats[] = {
40 	{
41 		.drm = DRM_FORMAT_XRGB8888,
42 		.hvs = HVS_PIXEL_FORMAT_RGBA8888,
43 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
44 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
45 	},
46 	{
47 		.drm = DRM_FORMAT_ARGB8888,
48 		.hvs = HVS_PIXEL_FORMAT_RGBA8888,
49 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
50 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
51 	},
52 	{
53 		.drm = DRM_FORMAT_ABGR8888,
54 		.hvs = HVS_PIXEL_FORMAT_RGBA8888,
55 		.pixel_order = HVS_PIXEL_ORDER_ARGB,
56 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
57 	},
58 	{
59 		.drm = DRM_FORMAT_XBGR8888,
60 		.hvs = HVS_PIXEL_FORMAT_RGBA8888,
61 		.pixel_order = HVS_PIXEL_ORDER_ARGB,
62 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
63 	},
64 	{
65 		.drm = DRM_FORMAT_RGB565,
66 		.hvs = HVS_PIXEL_FORMAT_RGB565,
67 		.pixel_order = HVS_PIXEL_ORDER_XRGB,
68 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XRGB,
69 	},
70 	{
71 		.drm = DRM_FORMAT_BGR565,
72 		.hvs = HVS_PIXEL_FORMAT_RGB565,
73 		.pixel_order = HVS_PIXEL_ORDER_XBGR,
74 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XBGR,
75 	},
76 	{
77 		.drm = DRM_FORMAT_ARGB1555,
78 		.hvs = HVS_PIXEL_FORMAT_RGBA5551,
79 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
80 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
81 	},
82 	{
83 		.drm = DRM_FORMAT_XRGB1555,
84 		.hvs = HVS_PIXEL_FORMAT_RGBA5551,
85 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
86 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
87 	},
88 	{
89 		.drm = DRM_FORMAT_RGB888,
90 		.hvs = HVS_PIXEL_FORMAT_RGB888,
91 		.pixel_order = HVS_PIXEL_ORDER_XRGB,
92 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XRGB,
93 	},
94 	{
95 		.drm = DRM_FORMAT_BGR888,
96 		.hvs = HVS_PIXEL_FORMAT_RGB888,
97 		.pixel_order = HVS_PIXEL_ORDER_XBGR,
98 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XBGR,
99 	},
100 	{
101 		.drm = DRM_FORMAT_YUV422,
102 		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
103 		.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
104 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
105 	},
106 	{
107 		.drm = DRM_FORMAT_YVU422,
108 		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
109 		.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
110 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
111 	},
112 	{
113 		.drm = DRM_FORMAT_YUV420,
114 		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
115 		.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
116 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
117 	},
118 	{
119 		.drm = DRM_FORMAT_YVU420,
120 		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
121 		.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
122 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
123 	},
124 	{
125 		.drm = DRM_FORMAT_NV12,
126 		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
127 		.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
128 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
129 	},
130 	{
131 		.drm = DRM_FORMAT_NV21,
132 		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
133 		.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
134 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
135 	},
136 	{
137 		.drm = DRM_FORMAT_NV16,
138 		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
139 		.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
140 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
141 	},
142 	{
143 		.drm = DRM_FORMAT_NV61,
144 		.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
145 		.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
146 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
147 	},
148 	{
149 		.drm = DRM_FORMAT_P030,
150 		.hvs = HVS_PIXEL_FORMAT_YCBCR_10BIT,
151 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
152 		.hvs5_only = true,
153 	},
154 	{
155 		.drm = DRM_FORMAT_XRGB2101010,
156 		.hvs = HVS_PIXEL_FORMAT_RGBA1010102,
157 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
158 		.hvs5_only = true,
159 	},
160 	{
161 		.drm = DRM_FORMAT_ARGB2101010,
162 		.hvs = HVS_PIXEL_FORMAT_RGBA1010102,
163 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
164 		.hvs5_only = true,
165 	},
166 	{
167 		.drm = DRM_FORMAT_ABGR2101010,
168 		.hvs = HVS_PIXEL_FORMAT_RGBA1010102,
169 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
170 		.hvs5_only = true,
171 	},
172 	{
173 		.drm = DRM_FORMAT_XBGR2101010,
174 		.hvs = HVS_PIXEL_FORMAT_RGBA1010102,
175 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
176 		.hvs5_only = true,
177 	},
178 	{
179 		.drm = DRM_FORMAT_RGB332,
180 		.hvs = HVS_PIXEL_FORMAT_RGB332,
181 		.pixel_order = HVS_PIXEL_ORDER_ARGB,
182 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
183 	},
184 	{
185 		.drm = DRM_FORMAT_BGR233,
186 		.hvs = HVS_PIXEL_FORMAT_RGB332,
187 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
188 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
189 	},
190 	{
191 		.drm = DRM_FORMAT_XRGB4444,
192 		.hvs = HVS_PIXEL_FORMAT_RGBA4444,
193 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
194 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
195 	},
196 	{
197 		.drm = DRM_FORMAT_ARGB4444,
198 		.hvs = HVS_PIXEL_FORMAT_RGBA4444,
199 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
200 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
201 	},
202 	{
203 		.drm = DRM_FORMAT_XBGR4444,
204 		.hvs = HVS_PIXEL_FORMAT_RGBA4444,
205 		.pixel_order = HVS_PIXEL_ORDER_ARGB,
206 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
207 	},
208 	{
209 		.drm = DRM_FORMAT_ABGR4444,
210 		.hvs = HVS_PIXEL_FORMAT_RGBA4444,
211 		.pixel_order = HVS_PIXEL_ORDER_ARGB,
212 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
213 	},
214 	{
215 		.drm = DRM_FORMAT_BGRX4444,
216 		.hvs = HVS_PIXEL_FORMAT_RGBA4444,
217 		.pixel_order = HVS_PIXEL_ORDER_RGBA,
218 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_BGRA,
219 	},
220 	{
221 		.drm = DRM_FORMAT_BGRA4444,
222 		.hvs = HVS_PIXEL_FORMAT_RGBA4444,
223 		.pixel_order = HVS_PIXEL_ORDER_RGBA,
224 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_BGRA,
225 	},
226 	{
227 		.drm = DRM_FORMAT_RGBX4444,
228 		.hvs = HVS_PIXEL_FORMAT_RGBA4444,
229 		.pixel_order = HVS_PIXEL_ORDER_BGRA,
230 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_RGBA,
231 	},
232 	{
233 		.drm = DRM_FORMAT_RGBA4444,
234 		.hvs = HVS_PIXEL_FORMAT_RGBA4444,
235 		.pixel_order = HVS_PIXEL_ORDER_BGRA,
236 		.pixel_order_hvs5 = HVS_PIXEL_ORDER_RGBA,
237 	},
238 };
239 
240 static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
241 {
242 	unsigned i;
243 
244 	for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
245 		if (hvs_formats[i].drm == drm_format)
246 			return &hvs_formats[i];
247 	}
248 
249 	return NULL;
250 }
251 
252 static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
253 {
254 	if (dst == src)
255 		return VC4_SCALING_NONE;
256 	if (3 * dst >= 2 * src)
257 		return VC4_SCALING_PPF;
258 	else
259 		return VC4_SCALING_TPZ;
260 }
261 
262 static bool plane_enabled(struct drm_plane_state *state)
263 {
264 	return state->fb && !WARN_ON(!state->crtc);
265 }
266 
267 static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
268 {
269 	struct vc4_plane_state *vc4_state;
270 
271 	if (WARN_ON(!plane->state))
272 		return NULL;
273 
274 	vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL);
275 	if (!vc4_state)
276 		return NULL;
277 
278 	memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
279 	vc4_state->dlist_initialized = 0;
280 
281 	__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
282 
283 	if (vc4_state->dlist) {
284 		vc4_state->dlist = kmemdup(vc4_state->dlist,
285 					   vc4_state->dlist_count * 4,
286 					   GFP_KERNEL);
287 		if (!vc4_state->dlist) {
288 			kfree(vc4_state);
289 			return NULL;
290 		}
291 		vc4_state->dlist_size = vc4_state->dlist_count;
292 	}
293 
294 	return &vc4_state->base;
295 }
296 
297 static void vc4_plane_destroy_state(struct drm_plane *plane,
298 				    struct drm_plane_state *state)
299 {
300 	struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
301 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
302 
303 	if (drm_mm_node_allocated(&vc4_state->lbm)) {
304 		unsigned long irqflags;
305 
306 		spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
307 		drm_mm_remove_node(&vc4_state->lbm);
308 		spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
309 	}
310 
311 	kfree(vc4_state->dlist);
312 	__drm_atomic_helper_plane_destroy_state(&vc4_state->base);
313 	kfree(state);
314 }
315 
316 /* Called during init to allocate the plane's atomic state. */
317 static void vc4_plane_reset(struct drm_plane *plane)
318 {
319 	struct vc4_plane_state *vc4_state;
320 
321 	WARN_ON(plane->state);
322 
323 	vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
324 	if (!vc4_state)
325 		return;
326 
327 	__drm_atomic_helper_plane_reset(plane, &vc4_state->base);
328 }
329 
330 static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
331 {
332 	if (vc4_state->dlist_count == vc4_state->dlist_size) {
333 		u32 new_size = max(4u, vc4_state->dlist_count * 2);
334 		u32 *new_dlist = kmalloc_array(new_size, 4, GFP_KERNEL);
335 
336 		if (!new_dlist)
337 			return;
338 		memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4);
339 
340 		kfree(vc4_state->dlist);
341 		vc4_state->dlist = new_dlist;
342 		vc4_state->dlist_size = new_size;
343 	}
344 
345 	vc4_state->dlist_count++;
346 }
347 
348 static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
349 {
350 	unsigned int idx = vc4_state->dlist_count;
351 
352 	vc4_dlist_counter_increment(vc4_state);
353 	vc4_state->dlist[idx] = val;
354 }
355 
356 /* Returns the scl0/scl1 field based on whether the dimensions need to
357  * be up/down/non-scaled.
358  *
359  * This is a replication of a table from the spec.
360  */
361 static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
362 {
363 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
364 
365 	switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) {
366 	case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF:
367 		return SCALER_CTL0_SCL_H_PPF_V_PPF;
368 	case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF:
369 		return SCALER_CTL0_SCL_H_TPZ_V_PPF;
370 	case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ:
371 		return SCALER_CTL0_SCL_H_PPF_V_TPZ;
372 	case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ:
373 		return SCALER_CTL0_SCL_H_TPZ_V_TPZ;
374 	case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE:
375 		return SCALER_CTL0_SCL_H_PPF_V_NONE;
376 	case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF:
377 		return SCALER_CTL0_SCL_H_NONE_V_PPF;
378 	case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ:
379 		return SCALER_CTL0_SCL_H_NONE_V_TPZ;
380 	case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE:
381 		return SCALER_CTL0_SCL_H_TPZ_V_NONE;
382 	default:
383 	case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE:
384 		/* The unity case is independently handled by
385 		 * SCALER_CTL0_UNITY.
386 		 */
387 		return 0;
388 	}
389 }
390 
391 static int vc4_plane_margins_adj(struct drm_plane_state *pstate)
392 {
393 	struct vc4_plane_state *vc4_pstate = to_vc4_plane_state(pstate);
394 	unsigned int left, right, top, bottom, adjhdisplay, adjvdisplay;
395 	struct drm_crtc_state *crtc_state;
396 
397 	crtc_state = drm_atomic_get_new_crtc_state(pstate->state,
398 						   pstate->crtc);
399 
400 	vc4_crtc_get_margins(crtc_state, &left, &right, &top, &bottom);
401 	if (!left && !right && !top && !bottom)
402 		return 0;
403 
404 	if (left + right >= crtc_state->mode.hdisplay ||
405 	    top + bottom >= crtc_state->mode.vdisplay)
406 		return -EINVAL;
407 
408 	adjhdisplay = crtc_state->mode.hdisplay - (left + right);
409 	vc4_pstate->crtc_x = DIV_ROUND_CLOSEST(vc4_pstate->crtc_x *
410 					       adjhdisplay,
411 					       crtc_state->mode.hdisplay);
412 	vc4_pstate->crtc_x += left;
413 	if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - right)
414 		vc4_pstate->crtc_x = crtc_state->mode.hdisplay - right;
415 
416 	adjvdisplay = crtc_state->mode.vdisplay - (top + bottom);
417 	vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y *
418 					       adjvdisplay,
419 					       crtc_state->mode.vdisplay);
420 	vc4_pstate->crtc_y += top;
421 	if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - bottom)
422 		vc4_pstate->crtc_y = crtc_state->mode.vdisplay - bottom;
423 
424 	vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w *
425 					       adjhdisplay,
426 					       crtc_state->mode.hdisplay);
427 	vc4_pstate->crtc_h = DIV_ROUND_CLOSEST(vc4_pstate->crtc_h *
428 					       adjvdisplay,
429 					       crtc_state->mode.vdisplay);
430 
431 	if (!vc4_pstate->crtc_w || !vc4_pstate->crtc_h)
432 		return -EINVAL;
433 
434 	return 0;
435 }
436 
437 static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
438 {
439 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
440 	struct drm_framebuffer *fb = state->fb;
441 	struct drm_gem_dma_object *bo;
442 	int num_planes = fb->format->num_planes;
443 	struct drm_crtc_state *crtc_state;
444 	u32 h_subsample = fb->format->hsub;
445 	u32 v_subsample = fb->format->vsub;
446 	int i, ret;
447 
448 	crtc_state = drm_atomic_get_existing_crtc_state(state->state,
449 							state->crtc);
450 	if (!crtc_state) {
451 		DRM_DEBUG_KMS("Invalid crtc state\n");
452 		return -EINVAL;
453 	}
454 
455 	ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1,
456 						  INT_MAX, true, true);
457 	if (ret)
458 		return ret;
459 
460 	for (i = 0; i < num_planes; i++) {
461 		bo = drm_fb_dma_get_gem_obj(fb, i);
462 		vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
463 	}
464 
465 	/*
466 	 * We don't support subpixel source positioning for scaling,
467 	 * but fractional coordinates can be generated by clipping
468 	 * so just round for now
469 	 */
470 	vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16);
471 	vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16);
472 	vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x;
473 	vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y;
474 
475 	vc4_state->crtc_x = state->dst.x1;
476 	vc4_state->crtc_y = state->dst.y1;
477 	vc4_state->crtc_w = state->dst.x2 - state->dst.x1;
478 	vc4_state->crtc_h = state->dst.y2 - state->dst.y1;
479 
480 	ret = vc4_plane_margins_adj(state);
481 	if (ret)
482 		return ret;
483 
484 	vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
485 						       vc4_state->crtc_w);
486 	vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
487 						       vc4_state->crtc_h);
488 
489 	vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
490 			       vc4_state->y_scaling[0] == VC4_SCALING_NONE);
491 
492 	if (num_planes > 1) {
493 		vc4_state->is_yuv = true;
494 
495 		vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
496 		vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
497 
498 		vc4_state->x_scaling[1] =
499 			vc4_get_scaling_mode(vc4_state->src_w[1],
500 					     vc4_state->crtc_w);
501 		vc4_state->y_scaling[1] =
502 			vc4_get_scaling_mode(vc4_state->src_h[1],
503 					     vc4_state->crtc_h);
504 
505 		/* YUV conversion requires that horizontal scaling be enabled
506 		 * on the UV plane even if vc4_get_scaling_mode() returned
507 		 * VC4_SCALING_NONE (which can happen when the down-scaling
508 		 * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
509 		 * case.
510 		 */
511 		if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
512 			vc4_state->x_scaling[1] = VC4_SCALING_PPF;
513 	} else {
514 		vc4_state->is_yuv = false;
515 		vc4_state->x_scaling[1] = VC4_SCALING_NONE;
516 		vc4_state->y_scaling[1] = VC4_SCALING_NONE;
517 	}
518 
519 	return 0;
520 }
521 
522 static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
523 {
524 	u32 scale, recip;
525 
526 	scale = (1 << 16) * src / dst;
527 
528 	/* The specs note that while the reciprocal would be defined
529 	 * as (1<<32)/scale, ~0 is close enough.
530 	 */
531 	recip = ~0 / scale;
532 
533 	vc4_dlist_write(vc4_state,
534 			VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
535 			VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
536 	vc4_dlist_write(vc4_state,
537 			VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP));
538 }
539 
540 static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
541 {
542 	u32 scale = (1 << 16) * src / dst;
543 
544 	vc4_dlist_write(vc4_state,
545 			SCALER_PPF_AGC |
546 			VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
547 			VC4_SET_FIELD(0, SCALER_PPF_IPHASE));
548 }
549 
550 static u32 vc4_lbm_size(struct drm_plane_state *state)
551 {
552 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
553 	struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
554 	u32 pix_per_line;
555 	u32 lbm;
556 
557 	/* LBM is not needed when there's no vertical scaling. */
558 	if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
559 	    vc4_state->y_scaling[1] == VC4_SCALING_NONE)
560 		return 0;
561 
562 	/*
563 	 * This can be further optimized in the RGB/YUV444 case if the PPF
564 	 * decimation factor is between 0.5 and 1.0 by using crtc_w.
565 	 *
566 	 * It's not an issue though, since in that case since src_w[0] is going
567 	 * to be greater than or equal to crtc_w.
568 	 */
569 	if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ)
570 		pix_per_line = vc4_state->crtc_w;
571 	else
572 		pix_per_line = vc4_state->src_w[0];
573 
574 	if (!vc4_state->is_yuv) {
575 		if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
576 			lbm = pix_per_line * 8;
577 		else {
578 			/* In special cases, this multiplier might be 12. */
579 			lbm = pix_per_line * 16;
580 		}
581 	} else {
582 		/* There are cases for this going down to a multiplier
583 		 * of 2, but according to the firmware source, the
584 		 * table in the docs is somewhat wrong.
585 		 */
586 		lbm = pix_per_line * 16;
587 	}
588 
589 	/* Align it to 64 or 128 (hvs5) bytes */
590 	lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
591 
592 	/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
593 	lbm /= vc4->is_vc5 ? 4 : 2;
594 
595 	return lbm;
596 }
597 
598 static void vc4_write_scaling_parameters(struct drm_plane_state *state,
599 					 int channel)
600 {
601 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
602 
603 	/* Ch0 H-PPF Word 0: Scaling Parameters */
604 	if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
605 		vc4_write_ppf(vc4_state,
606 			      vc4_state->src_w[channel], vc4_state->crtc_w);
607 	}
608 
609 	/* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */
610 	if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) {
611 		vc4_write_ppf(vc4_state,
612 			      vc4_state->src_h[channel], vc4_state->crtc_h);
613 		vc4_dlist_write(vc4_state, 0xc0c0c0c0);
614 	}
615 
616 	/* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */
617 	if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) {
618 		vc4_write_tpz(vc4_state,
619 			      vc4_state->src_w[channel], vc4_state->crtc_w);
620 	}
621 
622 	/* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */
623 	if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) {
624 		vc4_write_tpz(vc4_state,
625 			      vc4_state->src_h[channel], vc4_state->crtc_h);
626 		vc4_dlist_write(vc4_state, 0xc0c0c0c0);
627 	}
628 }
629 
630 static void vc4_plane_calc_load(struct drm_plane_state *state)
631 {
632 	unsigned int hvs_load_shift, vrefresh, i;
633 	struct drm_framebuffer *fb = state->fb;
634 	struct vc4_plane_state *vc4_state;
635 	struct drm_crtc_state *crtc_state;
636 	unsigned int vscale_factor;
637 
638 	vc4_state = to_vc4_plane_state(state);
639 	crtc_state = drm_atomic_get_existing_crtc_state(state->state,
640 							state->crtc);
641 	vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode);
642 
643 	/* The HVS is able to process 2 pixels/cycle when scaling the source,
644 	 * 4 pixels/cycle otherwise.
645 	 * Alpha blending step seems to be pipelined and it's always operating
646 	 * at 4 pixels/cycle, so the limiting aspect here seems to be the
647 	 * scaler block.
648 	 * HVS load is expressed in clk-cycles/sec (AKA Hz).
649 	 */
650 	if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
651 	    vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
652 	    vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
653 	    vc4_state->y_scaling[1] != VC4_SCALING_NONE)
654 		hvs_load_shift = 1;
655 	else
656 		hvs_load_shift = 2;
657 
658 	vc4_state->membus_load = 0;
659 	vc4_state->hvs_load = 0;
660 	for (i = 0; i < fb->format->num_planes; i++) {
661 		/* Even if the bandwidth/plane required for a single frame is
662 		 *
663 		 * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh
664 		 *
665 		 * when downscaling, we have to read more pixels per line in
666 		 * the time frame reserved for a single line, so the bandwidth
667 		 * demand can be punctually higher. To account for that, we
668 		 * calculate the down-scaling factor and multiply the plane
669 		 * load by this number. We're likely over-estimating the read
670 		 * demand, but that's better than under-estimating it.
671 		 */
672 		vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i],
673 					     vc4_state->crtc_h);
674 		vc4_state->membus_load += vc4_state->src_w[i] *
675 					  vc4_state->src_h[i] * vscale_factor *
676 					  fb->format->cpp[i];
677 		vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w;
678 	}
679 
680 	vc4_state->hvs_load *= vrefresh;
681 	vc4_state->hvs_load >>= hvs_load_shift;
682 	vc4_state->membus_load *= vrefresh;
683 }
684 
685 static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
686 {
687 	struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
688 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
689 	unsigned long irqflags;
690 	u32 lbm_size;
691 
692 	lbm_size = vc4_lbm_size(state);
693 	if (!lbm_size)
694 		return 0;
695 
696 	if (WARN_ON(!vc4_state->lbm_offset))
697 		return -EINVAL;
698 
699 	/* Allocate the LBM memory that the HVS will use for temporary
700 	 * storage due to our scaling/format conversion.
701 	 */
702 	if (!drm_mm_node_allocated(&vc4_state->lbm)) {
703 		int ret;
704 
705 		spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
706 		ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
707 						 &vc4_state->lbm,
708 						 lbm_size,
709 						 vc4->is_vc5 ? 64 : 32,
710 						 0, 0);
711 		spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
712 
713 		if (ret)
714 			return ret;
715 	} else {
716 		WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
717 	}
718 
719 	vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start;
720 
721 	return 0;
722 }
723 
724 /*
725  * The colorspace conversion matrices are held in 3 entries in the dlist.
726  * Create an array of them, with entries for each full and limited mode, and
727  * each supported colorspace.
728  */
729 static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = {
730 	{
731 		/* Limited range */
732 		{
733 			/* BT601 */
734 			SCALER_CSC0_ITR_R_601_5,
735 			SCALER_CSC1_ITR_R_601_5,
736 			SCALER_CSC2_ITR_R_601_5,
737 		}, {
738 			/* BT709 */
739 			SCALER_CSC0_ITR_R_709_3,
740 			SCALER_CSC1_ITR_R_709_3,
741 			SCALER_CSC2_ITR_R_709_3,
742 		}, {
743 			/* BT2020 */
744 			SCALER_CSC0_ITR_R_2020,
745 			SCALER_CSC1_ITR_R_2020,
746 			SCALER_CSC2_ITR_R_2020,
747 		}
748 	}, {
749 		/* Full range */
750 		{
751 			/* JFIF */
752 			SCALER_CSC0_JPEG_JFIF,
753 			SCALER_CSC1_JPEG_JFIF,
754 			SCALER_CSC2_JPEG_JFIF,
755 		}, {
756 			/* BT709 */
757 			SCALER_CSC0_ITR_R_709_3_FR,
758 			SCALER_CSC1_ITR_R_709_3_FR,
759 			SCALER_CSC2_ITR_R_709_3_FR,
760 		}, {
761 			/* BT2020 */
762 			SCALER_CSC0_ITR_R_2020_FR,
763 			SCALER_CSC1_ITR_R_2020_FR,
764 			SCALER_CSC2_ITR_R_2020_FR,
765 		}
766 	}
767 };
768 
769 static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
770 {
771 	if (!state->fb->format->has_alpha)
772 		return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
773 				     SCALER_POS2_ALPHA_MODE);
774 
775 	switch (state->pixel_blend_mode) {
776 	case DRM_MODE_BLEND_PIXEL_NONE:
777 		return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
778 				     SCALER_POS2_ALPHA_MODE);
779 	default:
780 	case DRM_MODE_BLEND_PREMULTI:
781 		return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_PIPELINE,
782 				     SCALER_POS2_ALPHA_MODE) |
783 			SCALER_POS2_ALPHA_PREMULT;
784 	case DRM_MODE_BLEND_COVERAGE:
785 		return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_PIPELINE,
786 				     SCALER_POS2_ALPHA_MODE);
787 	}
788 }
789 
790 static u32 vc4_hvs5_get_alpha_blend_mode(struct drm_plane_state *state)
791 {
792 	if (!state->fb->format->has_alpha)
793 		return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
794 				     SCALER5_CTL2_ALPHA_MODE);
795 
796 	switch (state->pixel_blend_mode) {
797 	case DRM_MODE_BLEND_PIXEL_NONE:
798 		return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
799 				     SCALER5_CTL2_ALPHA_MODE);
800 	default:
801 	case DRM_MODE_BLEND_PREMULTI:
802 		return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
803 				     SCALER5_CTL2_ALPHA_MODE) |
804 			SCALER5_CTL2_ALPHA_PREMULT;
805 	case DRM_MODE_BLEND_COVERAGE:
806 		return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
807 				     SCALER5_CTL2_ALPHA_MODE);
808 	}
809 }
810 
811 /* Writes out a full display list for an active plane to the plane's
812  * private dlist state.
813  */
814 static int vc4_plane_mode_set(struct drm_plane *plane,
815 			      struct drm_plane_state *state)
816 {
817 	struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
818 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
819 	struct drm_framebuffer *fb = state->fb;
820 	u32 ctl0_offset = vc4_state->dlist_count;
821 	const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
822 	u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
823 	int num_planes = fb->format->num_planes;
824 	u32 h_subsample = fb->format->hsub;
825 	u32 v_subsample = fb->format->vsub;
826 	bool mix_plane_alpha;
827 	bool covers_screen;
828 	u32 scl0, scl1, pitch0;
829 	u32 tiling, src_y;
830 	u32 hvs_format = format->hvs;
831 	unsigned int rotation;
832 	int ret, i;
833 
834 	if (vc4_state->dlist_initialized)
835 		return 0;
836 
837 	ret = vc4_plane_setup_clipping_and_scaling(state);
838 	if (ret)
839 		return ret;
840 
841 	/* SCL1 is used for Cb/Cr scaling of planar formats.  For RGB
842 	 * and 4:4:4, scl1 should be set to scl0 so both channels of
843 	 * the scaler do the same thing.  For YUV, the Y plane needs
844 	 * to be put in channel 1 and Cb/Cr in channel 0, so we swap
845 	 * the scl fields here.
846 	 */
847 	if (num_planes == 1) {
848 		scl0 = vc4_get_scl_field(state, 0);
849 		scl1 = scl0;
850 	} else {
851 		scl0 = vc4_get_scl_field(state, 1);
852 		scl1 = vc4_get_scl_field(state, 0);
853 	}
854 
855 	rotation = drm_rotation_simplify(state->rotation,
856 					 DRM_MODE_ROTATE_0 |
857 					 DRM_MODE_REFLECT_X |
858 					 DRM_MODE_REFLECT_Y);
859 
860 	/* We must point to the last line when Y reflection is enabled. */
861 	src_y = vc4_state->src_y;
862 	if (rotation & DRM_MODE_REFLECT_Y)
863 		src_y += vc4_state->src_h[0] - 1;
864 
865 	switch (base_format_mod) {
866 	case DRM_FORMAT_MOD_LINEAR:
867 		tiling = SCALER_CTL0_TILING_LINEAR;
868 		pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
869 
870 		/* Adjust the base pointer to the first pixel to be scanned
871 		 * out.
872 		 */
873 		for (i = 0; i < num_planes; i++) {
874 			vc4_state->offsets[i] += src_y /
875 						 (i ? v_subsample : 1) *
876 						 fb->pitches[i];
877 
878 			vc4_state->offsets[i] += vc4_state->src_x /
879 						 (i ? h_subsample : 1) *
880 						 fb->format->cpp[i];
881 		}
882 
883 		break;
884 
885 	case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: {
886 		u32 tile_size_shift = 12; /* T tiles are 4kb */
887 		/* Whole-tile offsets, mostly for setting the pitch. */
888 		u32 tile_w_shift = fb->format->cpp[0] == 2 ? 6 : 5;
889 		u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */
890 		u32 tile_w_mask = (1 << tile_w_shift) - 1;
891 		/* The height mask on 32-bit-per-pixel tiles is 63, i.e. twice
892 		 * the height (in pixels) of a 4k tile.
893 		 */
894 		u32 tile_h_mask = (2 << tile_h_shift) - 1;
895 		/* For T-tiled, the FB pitch is "how many bytes from one row to
896 		 * the next, such that
897 		 *
898 		 *	pitch * tile_h == tile_size * tiles_per_row
899 		 */
900 		u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
901 		u32 tiles_l = vc4_state->src_x >> tile_w_shift;
902 		u32 tiles_r = tiles_w - tiles_l;
903 		u32 tiles_t = src_y >> tile_h_shift;
904 		/* Intra-tile offsets, which modify the base address (the
905 		 * SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that
906 		 * base address).
907 		 */
908 		u32 tile_y = (src_y >> 4) & 1;
909 		u32 subtile_y = (src_y >> 2) & 3;
910 		u32 utile_y = src_y & 3;
911 		u32 x_off = vc4_state->src_x & tile_w_mask;
912 		u32 y_off = src_y & tile_h_mask;
913 
914 		/* When Y reflection is requested we must set the
915 		 * SCALER_PITCH0_TILE_LINE_DIR flag to tell HVS that all lines
916 		 * after the initial one should be fetched in descending order,
917 		 * which makes sense since we start from the last line and go
918 		 * backward.
919 		 * Don't know why we need y_off = max_y_off - y_off, but it's
920 		 * definitely required (I guess it's also related to the "going
921 		 * backward" situation).
922 		 */
923 		if (rotation & DRM_MODE_REFLECT_Y) {
924 			y_off = tile_h_mask - y_off;
925 			pitch0 = SCALER_PITCH0_TILE_LINE_DIR;
926 		} else {
927 			pitch0 = 0;
928 		}
929 
930 		tiling = SCALER_CTL0_TILING_256B_OR_T;
931 		pitch0 |= (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) |
932 			   VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) |
933 			   VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) |
934 			   VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R));
935 		vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift);
936 		vc4_state->offsets[0] += subtile_y << 8;
937 		vc4_state->offsets[0] += utile_y << 4;
938 
939 		/* Rows of tiles alternate left-to-right and right-to-left. */
940 		if (tiles_t & 1) {
941 			pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR;
942 			vc4_state->offsets[0] += (tiles_w - tiles_l) <<
943 						 tile_size_shift;
944 			vc4_state->offsets[0] -= (1 + !tile_y) << 10;
945 		} else {
946 			vc4_state->offsets[0] += tiles_l << tile_size_shift;
947 			vc4_state->offsets[0] += tile_y << 10;
948 		}
949 
950 		break;
951 	}
952 
953 	case DRM_FORMAT_MOD_BROADCOM_SAND64:
954 	case DRM_FORMAT_MOD_BROADCOM_SAND128:
955 	case DRM_FORMAT_MOD_BROADCOM_SAND256: {
956 		uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
957 
958 		if (param > SCALER_TILE_HEIGHT_MASK) {
959 			DRM_DEBUG_KMS("SAND height too large (%d)\n",
960 				      param);
961 			return -EINVAL;
962 		}
963 
964 		if (fb->format->format == DRM_FORMAT_P030) {
965 			hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT;
966 			tiling = SCALER_CTL0_TILING_128B;
967 		} else {
968 			hvs_format = HVS_PIXEL_FORMAT_H264;
969 
970 			switch (base_format_mod) {
971 			case DRM_FORMAT_MOD_BROADCOM_SAND64:
972 				tiling = SCALER_CTL0_TILING_64B;
973 				break;
974 			case DRM_FORMAT_MOD_BROADCOM_SAND128:
975 				tiling = SCALER_CTL0_TILING_128B;
976 				break;
977 			case DRM_FORMAT_MOD_BROADCOM_SAND256:
978 				tiling = SCALER_CTL0_TILING_256B_OR_T;
979 				break;
980 			default:
981 				return -EINVAL;
982 			}
983 		}
984 
985 		/* Adjust the base pointer to the first pixel to be scanned
986 		 * out.
987 		 *
988 		 * For P030, y_ptr [31:4] is the 128bit word for the start pixel
989 		 * y_ptr [3:0] is the pixel (0-11) contained within that 128bit
990 		 * word that should be taken as the first pixel.
991 		 * Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the
992 		 * element within the 128bit word, eg for pixel 3 the value
993 		 * should be 6.
994 		 */
995 		for (i = 0; i < num_planes; i++) {
996 			u32 tile_w, tile, x_off, pix_per_tile;
997 
998 			if (fb->format->format == DRM_FORMAT_P030) {
999 				/*
1000 				 * Spec says: bits [31:4] of the given address
1001 				 * should point to the 128-bit word containing
1002 				 * the desired starting pixel, and bits[3:0]
1003 				 * should be between 0 and 11, indicating which
1004 				 * of the 12-pixels in that 128-bit word is the
1005 				 * first pixel to be used
1006 				 */
1007 				u32 remaining_pixels = vc4_state->src_x % 96;
1008 				u32 aligned = remaining_pixels / 12;
1009 				u32 last_bits = remaining_pixels % 12;
1010 
1011 				x_off = aligned * 16 + last_bits;
1012 				tile_w = 128;
1013 				pix_per_tile = 96;
1014 			} else {
1015 				switch (base_format_mod) {
1016 				case DRM_FORMAT_MOD_BROADCOM_SAND64:
1017 					tile_w = 64;
1018 					break;
1019 				case DRM_FORMAT_MOD_BROADCOM_SAND128:
1020 					tile_w = 128;
1021 					break;
1022 				case DRM_FORMAT_MOD_BROADCOM_SAND256:
1023 					tile_w = 256;
1024 					break;
1025 				default:
1026 					return -EINVAL;
1027 				}
1028 				pix_per_tile = tile_w / fb->format->cpp[0];
1029 				x_off = (vc4_state->src_x % pix_per_tile) /
1030 					(i ? h_subsample : 1) *
1031 					fb->format->cpp[i];
1032 			}
1033 
1034 			tile = vc4_state->src_x / pix_per_tile;
1035 
1036 			vc4_state->offsets[i] += param * tile_w * tile;
1037 			vc4_state->offsets[i] += src_y /
1038 						 (i ? v_subsample : 1) *
1039 						 tile_w;
1040 			vc4_state->offsets[i] += x_off & ~(i ? 1 : 0);
1041 		}
1042 
1043 		pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
1044 		break;
1045 	}
1046 
1047 	default:
1048 		DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
1049 			      (long long)fb->modifier);
1050 		return -EINVAL;
1051 	}
1052 
1053 	/* Don't waste cycles mixing with plane alpha if the set alpha
1054 	 * is opaque or there is no per-pixel alpha information.
1055 	 * In any case we use the alpha property value as the fixed alpha.
1056 	 */
1057 	mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
1058 			  fb->format->has_alpha;
1059 
1060 	if (!vc4->is_vc5) {
1061 	/* Control word */
1062 		vc4_dlist_write(vc4_state,
1063 				SCALER_CTL0_VALID |
1064 				(rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) |
1065 				(rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) |
1066 				VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
1067 				(format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
1068 				(hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
1069 				VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
1070 				(vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
1071 				VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
1072 				VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1));
1073 
1074 		/* Position Word 0: Image Positions and Alpha Value */
1075 		vc4_state->pos0_offset = vc4_state->dlist_count;
1076 		vc4_dlist_write(vc4_state,
1077 				VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) |
1078 				VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
1079 				VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
1080 
1081 		/* Position Word 1: Scaled Image Dimensions. */
1082 		if (!vc4_state->is_unity) {
1083 			vc4_dlist_write(vc4_state,
1084 					VC4_SET_FIELD(vc4_state->crtc_w,
1085 						      SCALER_POS1_SCL_WIDTH) |
1086 					VC4_SET_FIELD(vc4_state->crtc_h,
1087 						      SCALER_POS1_SCL_HEIGHT));
1088 		}
1089 
1090 		/* Position Word 2: Source Image Size, Alpha */
1091 		vc4_state->pos2_offset = vc4_state->dlist_count;
1092 		vc4_dlist_write(vc4_state,
1093 				(mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
1094 				vc4_hvs4_get_alpha_blend_mode(state) |
1095 				VC4_SET_FIELD(vc4_state->src_w[0],
1096 					      SCALER_POS2_WIDTH) |
1097 				VC4_SET_FIELD(vc4_state->src_h[0],
1098 					      SCALER_POS2_HEIGHT));
1099 
1100 		/* Position Word 3: Context.  Written by the HVS. */
1101 		vc4_dlist_write(vc4_state, 0xc0c0c0c0);
1102 
1103 	} else {
1104 		/* Control word */
1105 		vc4_dlist_write(vc4_state,
1106 				SCALER_CTL0_VALID |
1107 				(format->pixel_order_hvs5 << SCALER_CTL0_ORDER_SHIFT) |
1108 				(hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
1109 				VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
1110 				(vc4_state->is_unity ?
1111 						SCALER5_CTL0_UNITY : 0) |
1112 				VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
1113 				VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1) |
1114 				SCALER5_CTL0_ALPHA_EXPAND |
1115 				SCALER5_CTL0_RGB_EXPAND);
1116 
1117 		/* Position Word 0: Image Positions and Alpha Value */
1118 		vc4_state->pos0_offset = vc4_state->dlist_count;
1119 		vc4_dlist_write(vc4_state,
1120 				(rotation & DRM_MODE_REFLECT_Y ?
1121 						SCALER5_POS0_VFLIP : 0) |
1122 				VC4_SET_FIELD(vc4_state->crtc_x,
1123 					      SCALER_POS0_START_X) |
1124 				(rotation & DRM_MODE_REFLECT_X ?
1125 					      SCALER5_POS0_HFLIP : 0) |
1126 				VC4_SET_FIELD(vc4_state->crtc_y,
1127 					      SCALER5_POS0_START_Y)
1128 			       );
1129 
1130 		/* Control Word 2 */
1131 		vc4_dlist_write(vc4_state,
1132 				VC4_SET_FIELD(state->alpha >> 4,
1133 					      SCALER5_CTL2_ALPHA) |
1134 				vc4_hvs5_get_alpha_blend_mode(state) |
1135 				(mix_plane_alpha ?
1136 					SCALER5_CTL2_ALPHA_MIX : 0)
1137 			       );
1138 
1139 		/* Position Word 1: Scaled Image Dimensions. */
1140 		if (!vc4_state->is_unity) {
1141 			vc4_dlist_write(vc4_state,
1142 					VC4_SET_FIELD(vc4_state->crtc_w,
1143 						      SCALER5_POS1_SCL_WIDTH) |
1144 					VC4_SET_FIELD(vc4_state->crtc_h,
1145 						      SCALER5_POS1_SCL_HEIGHT));
1146 		}
1147 
1148 		/* Position Word 2: Source Image Size */
1149 		vc4_state->pos2_offset = vc4_state->dlist_count;
1150 		vc4_dlist_write(vc4_state,
1151 				VC4_SET_FIELD(vc4_state->src_w[0],
1152 					      SCALER5_POS2_WIDTH) |
1153 				VC4_SET_FIELD(vc4_state->src_h[0],
1154 					      SCALER5_POS2_HEIGHT));
1155 
1156 		/* Position Word 3: Context.  Written by the HVS. */
1157 		vc4_dlist_write(vc4_state, 0xc0c0c0c0);
1158 	}
1159 
1160 
1161 	/* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers
1162 	 *
1163 	 * The pointers may be any byte address.
1164 	 */
1165 	vc4_state->ptr0_offset = vc4_state->dlist_count;
1166 	for (i = 0; i < num_planes; i++)
1167 		vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
1168 
1169 	/* Pointer Context Word 0/1/2: Written by the HVS */
1170 	for (i = 0; i < num_planes; i++)
1171 		vc4_dlist_write(vc4_state, 0xc0c0c0c0);
1172 
1173 	/* Pitch word 0 */
1174 	vc4_dlist_write(vc4_state, pitch0);
1175 
1176 	/* Pitch word 1/2 */
1177 	for (i = 1; i < num_planes; i++) {
1178 		if (hvs_format != HVS_PIXEL_FORMAT_H264 &&
1179 		    hvs_format != HVS_PIXEL_FORMAT_YCBCR_10BIT) {
1180 			vc4_dlist_write(vc4_state,
1181 					VC4_SET_FIELD(fb->pitches[i],
1182 						      SCALER_SRC_PITCH));
1183 		} else {
1184 			vc4_dlist_write(vc4_state, pitch0);
1185 		}
1186 	}
1187 
1188 	/* Colorspace conversion words */
1189 	if (vc4_state->is_yuv) {
1190 		enum drm_color_encoding color_encoding = state->color_encoding;
1191 		enum drm_color_range color_range = state->color_range;
1192 		const u32 *ccm;
1193 
1194 		if (color_encoding >= DRM_COLOR_ENCODING_MAX)
1195 			color_encoding = DRM_COLOR_YCBCR_BT601;
1196 		if (color_range >= DRM_COLOR_RANGE_MAX)
1197 			color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
1198 
1199 		ccm = colorspace_coeffs[color_range][color_encoding];
1200 
1201 		vc4_dlist_write(vc4_state, ccm[0]);
1202 		vc4_dlist_write(vc4_state, ccm[1]);
1203 		vc4_dlist_write(vc4_state, ccm[2]);
1204 	}
1205 
1206 	vc4_state->lbm_offset = 0;
1207 
1208 	if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
1209 	    vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
1210 	    vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
1211 	    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
1212 		/* Reserve a slot for the LBM Base Address. The real value will
1213 		 * be set when calling vc4_plane_allocate_lbm().
1214 		 */
1215 		if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
1216 		    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
1217 			vc4_state->lbm_offset = vc4_state->dlist_count;
1218 			vc4_dlist_counter_increment(vc4_state);
1219 		}
1220 
1221 		if (num_planes > 1) {
1222 			/* Emit Cb/Cr as channel 0 and Y as channel
1223 			 * 1. This matches how we set up scl0/scl1
1224 			 * above.
1225 			 */
1226 			vc4_write_scaling_parameters(state, 1);
1227 		}
1228 		vc4_write_scaling_parameters(state, 0);
1229 
1230 		/* If any PPF setup was done, then all the kernel
1231 		 * pointers get uploaded.
1232 		 */
1233 		if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
1234 		    vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
1235 		    vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
1236 		    vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
1237 			u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
1238 						   SCALER_PPF_KERNEL_OFFSET);
1239 
1240 			/* HPPF plane 0 */
1241 			vc4_dlist_write(vc4_state, kernel);
1242 			/* VPPF plane 0 */
1243 			vc4_dlist_write(vc4_state, kernel);
1244 			/* HPPF plane 1 */
1245 			vc4_dlist_write(vc4_state, kernel);
1246 			/* VPPF plane 1 */
1247 			vc4_dlist_write(vc4_state, kernel);
1248 		}
1249 	}
1250 
1251 	vc4_state->dlist[ctl0_offset] |=
1252 		VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
1253 
1254 	/* crtc_* are already clipped coordinates. */
1255 	covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 &&
1256 			vc4_state->crtc_w == state->crtc->mode.hdisplay &&
1257 			vc4_state->crtc_h == state->crtc->mode.vdisplay;
1258 	/* Background fill might be necessary when the plane has per-pixel
1259 	 * alpha content or a non-opaque plane alpha and could blend from the
1260 	 * background or does not cover the entire screen.
1261 	 */
1262 	vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
1263 				   state->alpha != DRM_BLEND_ALPHA_OPAQUE;
1264 
1265 	/* Flag the dlist as initialized to avoid checking it twice in case
1266 	 * the async update check already called vc4_plane_mode_set() and
1267 	 * decided to fallback to sync update because async update was not
1268 	 * possible.
1269 	 */
1270 	vc4_state->dlist_initialized = 1;
1271 
1272 	vc4_plane_calc_load(state);
1273 
1274 	return 0;
1275 }
1276 
1277 /* If a modeset involves changing the setup of a plane, the atomic
1278  * infrastructure will call this to validate a proposed plane setup.
1279  * However, if a plane isn't getting updated, this (and the
1280  * corresponding vc4_plane_atomic_update) won't get called.  Thus, we
1281  * compute the dlist here and have all active plane dlists get updated
1282  * in the CRTC's flush.
1283  */
1284 static int vc4_plane_atomic_check(struct drm_plane *plane,
1285 				  struct drm_atomic_state *state)
1286 {
1287 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1288 										 plane);
1289 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(new_plane_state);
1290 	int ret;
1291 
1292 	vc4_state->dlist_count = 0;
1293 
1294 	if (!plane_enabled(new_plane_state))
1295 		return 0;
1296 
1297 	ret = vc4_plane_mode_set(plane, new_plane_state);
1298 	if (ret)
1299 		return ret;
1300 
1301 	return vc4_plane_allocate_lbm(new_plane_state);
1302 }
1303 
1304 static void vc4_plane_atomic_update(struct drm_plane *plane,
1305 				    struct drm_atomic_state *state)
1306 {
1307 	/* No contents here.  Since we don't know where in the CRTC's
1308 	 * dlist we should be stored, our dlist is uploaded to the
1309 	 * hardware with vc4_plane_write_dlist() at CRTC atomic_flush
1310 	 * time.
1311 	 */
1312 }
1313 
1314 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
1315 {
1316 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
1317 	int i;
1318 	int idx;
1319 
1320 	if (!drm_dev_enter(plane->dev, &idx))
1321 		goto out;
1322 
1323 	vc4_state->hw_dlist = dlist;
1324 
1325 	/* Can't memcpy_toio() because it needs to be 32-bit writes. */
1326 	for (i = 0; i < vc4_state->dlist_count; i++)
1327 		writel(vc4_state->dlist[i], &dlist[i]);
1328 
1329 	drm_dev_exit(idx);
1330 
1331 out:
1332 	return vc4_state->dlist_count;
1333 }
1334 
1335 u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
1336 {
1337 	const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
1338 
1339 	return vc4_state->dlist_count;
1340 }
1341 
1342 /* Updates the plane to immediately (well, once the FIFO needs
1343  * refilling) scan out from at a new framebuffer.
1344  */
1345 void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
1346 {
1347 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
1348 	struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
1349 	uint32_t addr;
1350 	int idx;
1351 
1352 	if (!drm_dev_enter(plane->dev, &idx))
1353 		return;
1354 
1355 	/* We're skipping the address adjustment for negative origin,
1356 	 * because this is only called on the primary plane.
1357 	 */
1358 	WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
1359 	addr = bo->dma_addr + fb->offsets[0];
1360 
1361 	/* Write the new address into the hardware immediately.  The
1362 	 * scanout will start from this address as soon as the FIFO
1363 	 * needs to refill with pixels.
1364 	 */
1365 	writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
1366 
1367 	/* Also update the CPU-side dlist copy, so that any later
1368 	 * atomic updates that don't do a new modeset on our plane
1369 	 * also use our updated address.
1370 	 */
1371 	vc4_state->dlist[vc4_state->ptr0_offset] = addr;
1372 
1373 	drm_dev_exit(idx);
1374 }
1375 
1376 static void vc4_plane_atomic_async_update(struct drm_plane *plane,
1377 					  struct drm_atomic_state *state)
1378 {
1379 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1380 										 plane);
1381 	struct vc4_plane_state *vc4_state, *new_vc4_state;
1382 	int idx;
1383 
1384 	if (!drm_dev_enter(plane->dev, &idx))
1385 		return;
1386 
1387 	swap(plane->state->fb, new_plane_state->fb);
1388 	plane->state->crtc_x = new_plane_state->crtc_x;
1389 	plane->state->crtc_y = new_plane_state->crtc_y;
1390 	plane->state->crtc_w = new_plane_state->crtc_w;
1391 	plane->state->crtc_h = new_plane_state->crtc_h;
1392 	plane->state->src_x = new_plane_state->src_x;
1393 	plane->state->src_y = new_plane_state->src_y;
1394 	plane->state->src_w = new_plane_state->src_w;
1395 	plane->state->src_h = new_plane_state->src_h;
1396 	plane->state->alpha = new_plane_state->alpha;
1397 	plane->state->pixel_blend_mode = new_plane_state->pixel_blend_mode;
1398 	plane->state->rotation = new_plane_state->rotation;
1399 	plane->state->zpos = new_plane_state->zpos;
1400 	plane->state->normalized_zpos = new_plane_state->normalized_zpos;
1401 	plane->state->color_encoding = new_plane_state->color_encoding;
1402 	plane->state->color_range = new_plane_state->color_range;
1403 	plane->state->src = new_plane_state->src;
1404 	plane->state->dst = new_plane_state->dst;
1405 	plane->state->visible = new_plane_state->visible;
1406 
1407 	new_vc4_state = to_vc4_plane_state(new_plane_state);
1408 	vc4_state = to_vc4_plane_state(plane->state);
1409 
1410 	vc4_state->crtc_x = new_vc4_state->crtc_x;
1411 	vc4_state->crtc_y = new_vc4_state->crtc_y;
1412 	vc4_state->crtc_h = new_vc4_state->crtc_h;
1413 	vc4_state->crtc_w = new_vc4_state->crtc_w;
1414 	vc4_state->src_x = new_vc4_state->src_x;
1415 	vc4_state->src_y = new_vc4_state->src_y;
1416 	memcpy(vc4_state->src_w, new_vc4_state->src_w,
1417 	       sizeof(vc4_state->src_w));
1418 	memcpy(vc4_state->src_h, new_vc4_state->src_h,
1419 	       sizeof(vc4_state->src_h));
1420 	memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling,
1421 	       sizeof(vc4_state->x_scaling));
1422 	memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling,
1423 	       sizeof(vc4_state->y_scaling));
1424 	vc4_state->is_unity = new_vc4_state->is_unity;
1425 	vc4_state->is_yuv = new_vc4_state->is_yuv;
1426 	memcpy(vc4_state->offsets, new_vc4_state->offsets,
1427 	       sizeof(vc4_state->offsets));
1428 	vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill;
1429 
1430 	/* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
1431 	vc4_state->dlist[vc4_state->pos0_offset] =
1432 		new_vc4_state->dlist[vc4_state->pos0_offset];
1433 	vc4_state->dlist[vc4_state->pos2_offset] =
1434 		new_vc4_state->dlist[vc4_state->pos2_offset];
1435 	vc4_state->dlist[vc4_state->ptr0_offset] =
1436 		new_vc4_state->dlist[vc4_state->ptr0_offset];
1437 
1438 	/* Note that we can't just call vc4_plane_write_dlist()
1439 	 * because that would smash the context data that the HVS is
1440 	 * currently using.
1441 	 */
1442 	writel(vc4_state->dlist[vc4_state->pos0_offset],
1443 	       &vc4_state->hw_dlist[vc4_state->pos0_offset]);
1444 	writel(vc4_state->dlist[vc4_state->pos2_offset],
1445 	       &vc4_state->hw_dlist[vc4_state->pos2_offset]);
1446 	writel(vc4_state->dlist[vc4_state->ptr0_offset],
1447 	       &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
1448 
1449 	drm_dev_exit(idx);
1450 }
1451 
1452 static int vc4_plane_atomic_async_check(struct drm_plane *plane,
1453 					struct drm_atomic_state *state)
1454 {
1455 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1456 										 plane);
1457 	struct vc4_plane_state *old_vc4_state, *new_vc4_state;
1458 	int ret;
1459 	u32 i;
1460 
1461 	ret = vc4_plane_mode_set(plane, new_plane_state);
1462 	if (ret)
1463 		return ret;
1464 
1465 	old_vc4_state = to_vc4_plane_state(plane->state);
1466 	new_vc4_state = to_vc4_plane_state(new_plane_state);
1467 
1468 	if (!new_vc4_state->hw_dlist)
1469 		return -EINVAL;
1470 
1471 	if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
1472 	    old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
1473 	    old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
1474 	    old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
1475 	    vc4_lbm_size(plane->state) != vc4_lbm_size(new_plane_state))
1476 		return -EINVAL;
1477 
1478 	/* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update
1479 	 * if anything else has changed, fallback to a sync update.
1480 	 */
1481 	for (i = 0; i < new_vc4_state->dlist_count; i++) {
1482 		if (i == new_vc4_state->pos0_offset ||
1483 		    i == new_vc4_state->pos2_offset ||
1484 		    i == new_vc4_state->ptr0_offset ||
1485 		    (new_vc4_state->lbm_offset &&
1486 		     i == new_vc4_state->lbm_offset))
1487 			continue;
1488 
1489 		if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i])
1490 			return -EINVAL;
1491 	}
1492 
1493 	return 0;
1494 }
1495 
1496 static int vc4_prepare_fb(struct drm_plane *plane,
1497 			  struct drm_plane_state *state)
1498 {
1499 	struct vc4_bo *bo;
1500 
1501 	if (!state->fb)
1502 		return 0;
1503 
1504 	bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
1505 
1506 	drm_gem_plane_helper_prepare_fb(plane, state);
1507 
1508 	if (plane->state->fb == state->fb)
1509 		return 0;
1510 
1511 	return vc4_bo_inc_usecnt(bo);
1512 }
1513 
1514 static void vc4_cleanup_fb(struct drm_plane *plane,
1515 			   struct drm_plane_state *state)
1516 {
1517 	struct vc4_bo *bo;
1518 
1519 	if (plane->state->fb == state->fb || !state->fb)
1520 		return;
1521 
1522 	bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
1523 	vc4_bo_dec_usecnt(bo);
1524 }
1525 
1526 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
1527 	.atomic_check = vc4_plane_atomic_check,
1528 	.atomic_update = vc4_plane_atomic_update,
1529 	.prepare_fb = vc4_prepare_fb,
1530 	.cleanup_fb = vc4_cleanup_fb,
1531 	.atomic_async_check = vc4_plane_atomic_async_check,
1532 	.atomic_async_update = vc4_plane_atomic_async_update,
1533 };
1534 
1535 static const struct drm_plane_helper_funcs vc5_plane_helper_funcs = {
1536 	.atomic_check = vc4_plane_atomic_check,
1537 	.atomic_update = vc4_plane_atomic_update,
1538 	.atomic_async_check = vc4_plane_atomic_async_check,
1539 	.atomic_async_update = vc4_plane_atomic_async_update,
1540 };
1541 
1542 static bool vc4_format_mod_supported(struct drm_plane *plane,
1543 				     uint32_t format,
1544 				     uint64_t modifier)
1545 {
1546 	/* Support T_TILING for RGB formats only. */
1547 	switch (format) {
1548 	case DRM_FORMAT_XRGB8888:
1549 	case DRM_FORMAT_ARGB8888:
1550 	case DRM_FORMAT_ABGR8888:
1551 	case DRM_FORMAT_XBGR8888:
1552 	case DRM_FORMAT_RGB565:
1553 	case DRM_FORMAT_BGR565:
1554 	case DRM_FORMAT_ARGB1555:
1555 	case DRM_FORMAT_XRGB1555:
1556 		switch (fourcc_mod_broadcom_mod(modifier)) {
1557 		case DRM_FORMAT_MOD_LINEAR:
1558 		case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
1559 			return true;
1560 		default:
1561 			return false;
1562 		}
1563 	case DRM_FORMAT_NV12:
1564 	case DRM_FORMAT_NV21:
1565 		switch (fourcc_mod_broadcom_mod(modifier)) {
1566 		case DRM_FORMAT_MOD_LINEAR:
1567 		case DRM_FORMAT_MOD_BROADCOM_SAND64:
1568 		case DRM_FORMAT_MOD_BROADCOM_SAND128:
1569 		case DRM_FORMAT_MOD_BROADCOM_SAND256:
1570 			return true;
1571 		default:
1572 			return false;
1573 		}
1574 	case DRM_FORMAT_P030:
1575 		switch (fourcc_mod_broadcom_mod(modifier)) {
1576 		case DRM_FORMAT_MOD_BROADCOM_SAND128:
1577 			return true;
1578 		default:
1579 			return false;
1580 		}
1581 	case DRM_FORMAT_RGBX1010102:
1582 	case DRM_FORMAT_BGRX1010102:
1583 	case DRM_FORMAT_RGBA1010102:
1584 	case DRM_FORMAT_BGRA1010102:
1585 	case DRM_FORMAT_XRGB4444:
1586 	case DRM_FORMAT_ARGB4444:
1587 	case DRM_FORMAT_XBGR4444:
1588 	case DRM_FORMAT_ABGR4444:
1589 	case DRM_FORMAT_RGBX4444:
1590 	case DRM_FORMAT_RGBA4444:
1591 	case DRM_FORMAT_BGRX4444:
1592 	case DRM_FORMAT_BGRA4444:
1593 	case DRM_FORMAT_RGB332:
1594 	case DRM_FORMAT_BGR233:
1595 	case DRM_FORMAT_YUV422:
1596 	case DRM_FORMAT_YVU422:
1597 	case DRM_FORMAT_YUV420:
1598 	case DRM_FORMAT_YVU420:
1599 	case DRM_FORMAT_NV16:
1600 	case DRM_FORMAT_NV61:
1601 	default:
1602 		return (modifier == DRM_FORMAT_MOD_LINEAR);
1603 	}
1604 }
1605 
1606 static const struct drm_plane_funcs vc4_plane_funcs = {
1607 	.update_plane = drm_atomic_helper_update_plane,
1608 	.disable_plane = drm_atomic_helper_disable_plane,
1609 	.reset = vc4_plane_reset,
1610 	.atomic_duplicate_state = vc4_plane_duplicate_state,
1611 	.atomic_destroy_state = vc4_plane_destroy_state,
1612 	.format_mod_supported = vc4_format_mod_supported,
1613 };
1614 
1615 struct drm_plane *vc4_plane_init(struct drm_device *dev,
1616 				 enum drm_plane_type type,
1617 				 uint32_t possible_crtcs)
1618 {
1619 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1620 	struct drm_plane *plane;
1621 	struct vc4_plane *vc4_plane;
1622 	u32 formats[ARRAY_SIZE(hvs_formats)];
1623 	int num_formats = 0;
1624 	unsigned i;
1625 	static const uint64_t modifiers[] = {
1626 		DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
1627 		DRM_FORMAT_MOD_BROADCOM_SAND128,
1628 		DRM_FORMAT_MOD_BROADCOM_SAND64,
1629 		DRM_FORMAT_MOD_BROADCOM_SAND256,
1630 		DRM_FORMAT_MOD_LINEAR,
1631 		DRM_FORMAT_MOD_INVALID
1632 	};
1633 
1634 	for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
1635 		if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
1636 			formats[num_formats] = hvs_formats[i].drm;
1637 			num_formats++;
1638 		}
1639 	}
1640 
1641 	vc4_plane = drmm_universal_plane_alloc(dev, struct vc4_plane, base,
1642 					       possible_crtcs,
1643 					       &vc4_plane_funcs,
1644 					       formats, num_formats,
1645 					       modifiers, type, NULL);
1646 	if (IS_ERR(vc4_plane))
1647 		return ERR_CAST(vc4_plane);
1648 	plane = &vc4_plane->base;
1649 
1650 	if (vc4->is_vc5)
1651 		drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
1652 	else
1653 		drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
1654 
1655 	drm_plane_create_alpha_property(plane);
1656 	drm_plane_create_blend_mode_property(plane,
1657 					     BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1658 					     BIT(DRM_MODE_BLEND_PREMULTI) |
1659 					     BIT(DRM_MODE_BLEND_COVERAGE));
1660 	drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1661 					   DRM_MODE_ROTATE_0 |
1662 					   DRM_MODE_ROTATE_180 |
1663 					   DRM_MODE_REFLECT_X |
1664 					   DRM_MODE_REFLECT_Y);
1665 
1666 	drm_plane_create_color_properties(plane,
1667 					  BIT(DRM_COLOR_YCBCR_BT601) |
1668 					  BIT(DRM_COLOR_YCBCR_BT709) |
1669 					  BIT(DRM_COLOR_YCBCR_BT2020),
1670 					  BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1671 					  BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1672 					  DRM_COLOR_YCBCR_BT709,
1673 					  DRM_COLOR_YCBCR_LIMITED_RANGE);
1674 
1675 	if (type == DRM_PLANE_TYPE_PRIMARY)
1676 		drm_plane_create_zpos_immutable_property(plane, 0);
1677 
1678 	return plane;
1679 }
1680 
1681 #define VC4_NUM_OVERLAY_PLANES	16
1682 
1683 int vc4_plane_create_additional_planes(struct drm_device *drm)
1684 {
1685 	struct drm_plane *cursor_plane;
1686 	struct drm_crtc *crtc;
1687 	unsigned int i;
1688 
1689 	/* Set up some arbitrary number of planes.  We're not limited
1690 	 * by a set number of physical registers, just the space in
1691 	 * the HVS (16k) and how small an plane can be (28 bytes).
1692 	 * However, each plane we set up takes up some memory, and
1693 	 * increases the cost of looping over planes, which atomic
1694 	 * modesetting does quite a bit.  As a result, we pick a
1695 	 * modest number of planes to expose, that should hopefully
1696 	 * still cover any sane usecase.
1697 	 */
1698 	for (i = 0; i < VC4_NUM_OVERLAY_PLANES; i++) {
1699 		struct drm_plane *plane =
1700 			vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY,
1701 				       GENMASK(drm->mode_config.num_crtc - 1, 0));
1702 
1703 		if (IS_ERR(plane))
1704 			continue;
1705 
1706 		/* Create zpos property. Max of all the overlays + 1 primary +
1707 		 * 1 cursor plane on a crtc.
1708 		 */
1709 		drm_plane_create_zpos_property(plane, i + 1, 1,
1710 					       VC4_NUM_OVERLAY_PLANES + 1);
1711 	}
1712 
1713 	drm_for_each_crtc(crtc, drm) {
1714 		/* Set up the legacy cursor after overlay initialization,
1715 		 * since the zpos fallback is that planes are rendered by plane
1716 		 * ID order, and that then puts the cursor on top.
1717 		 */
1718 		cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR,
1719 					      drm_crtc_mask(crtc));
1720 		if (!IS_ERR(cursor_plane)) {
1721 			crtc->cursor = cursor_plane;
1722 
1723 			drm_plane_create_zpos_property(cursor_plane,
1724 						       VC4_NUM_OVERLAY_PLANES + 1,
1725 						       1,
1726 						       VC4_NUM_OVERLAY_PLANES + 1);
1727 		}
1728 	}
1729 
1730 	return 0;
1731 }
1732