xref: /linux/drivers/gpu/drm/tegra/plane.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/iommu.h>
7 
8 #include <drm/drm_atomic.h>
9 #include <drm/drm_atomic_helper.h>
10 #include <drm/drm_fourcc.h>
11 #include <drm/drm_gem_framebuffer_helper.h>
12 #include <drm/drm_plane_helper.h>
13 
14 #include "dc.h"
15 #include "plane.h"
16 
17 static void tegra_plane_destroy(struct drm_plane *plane)
18 {
19 	struct tegra_plane *p = to_tegra_plane(plane);
20 
21 	drm_plane_cleanup(plane);
22 	kfree(p);
23 }
24 
25 static void tegra_plane_reset(struct drm_plane *plane)
26 {
27 	struct tegra_plane *p = to_tegra_plane(plane);
28 	struct tegra_plane_state *state;
29 	unsigned int i;
30 
31 	if (plane->state)
32 		__drm_atomic_helper_plane_destroy_state(plane->state);
33 
34 	kfree(plane->state);
35 	plane->state = NULL;
36 
37 	state = kzalloc(sizeof(*state), GFP_KERNEL);
38 	if (state) {
39 		plane->state = &state->base;
40 		plane->state->plane = plane;
41 		plane->state->zpos = p->index;
42 		plane->state->normalized_zpos = p->index;
43 
44 		for (i = 0; i < 3; i++)
45 			state->iova[i] = DMA_MAPPING_ERROR;
46 	}
47 }
48 
49 static struct drm_plane_state *
50 tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
51 {
52 	struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
53 	struct tegra_plane_state *copy;
54 	unsigned int i;
55 
56 	copy = kmalloc(sizeof(*copy), GFP_KERNEL);
57 	if (!copy)
58 		return NULL;
59 
60 	__drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
61 	copy->tiling = state->tiling;
62 	copy->format = state->format;
63 	copy->swap = state->swap;
64 	copy->reflect_x = state->reflect_x;
65 	copy->reflect_y = state->reflect_y;
66 	copy->opaque = state->opaque;
67 
68 	for (i = 0; i < 2; i++)
69 		copy->blending[i] = state->blending[i];
70 
71 	for (i = 0; i < 3; i++) {
72 		copy->iova[i] = DMA_MAPPING_ERROR;
73 		copy->sgt[i] = NULL;
74 	}
75 
76 	return &copy->base;
77 }
78 
79 static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
80 					     struct drm_plane_state *state)
81 {
82 	__drm_atomic_helper_plane_destroy_state(state);
83 	kfree(state);
84 }
85 
86 static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
87 					     uint32_t format,
88 					     uint64_t modifier)
89 {
90 	const struct drm_format_info *info = drm_format_info(format);
91 
92 	if (modifier == DRM_FORMAT_MOD_LINEAR)
93 		return true;
94 
95 	if (info->num_planes == 1)
96 		return true;
97 
98 	return false;
99 }
100 
101 const struct drm_plane_funcs tegra_plane_funcs = {
102 	.update_plane = drm_atomic_helper_update_plane,
103 	.disable_plane = drm_atomic_helper_disable_plane,
104 	.destroy = tegra_plane_destroy,
105 	.reset = tegra_plane_reset,
106 	.atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
107 	.atomic_destroy_state = tegra_plane_atomic_destroy_state,
108 	.format_mod_supported = tegra_plane_format_mod_supported,
109 };
110 
111 static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
112 {
113 	struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
114 	unsigned int i;
115 	int err;
116 
117 	for (i = 0; i < state->base.fb->format->num_planes; i++) {
118 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
119 		dma_addr_t phys_addr, *phys;
120 		struct sg_table *sgt;
121 
122 		if (!domain || dc->client.group)
123 			phys = &phys_addr;
124 		else
125 			phys = NULL;
126 
127 		sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
128 		if (IS_ERR(sgt)) {
129 			err = PTR_ERR(sgt);
130 			goto unpin;
131 		}
132 
133 		if (sgt) {
134 			err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
135 			if (err)
136 				goto unpin;
137 
138 			/*
139 			 * The display controller needs contiguous memory, so
140 			 * fail if the buffer is discontiguous and we fail to
141 			 * map its SG table to a single contiguous chunk of
142 			 * I/O virtual memory.
143 			 */
144 			if (sgt->nents > 1) {
145 				err = -EINVAL;
146 				goto unpin;
147 			}
148 
149 			state->iova[i] = sg_dma_address(sgt->sgl);
150 			state->sgt[i] = sgt;
151 		} else {
152 			state->iova[i] = phys_addr;
153 		}
154 	}
155 
156 	return 0;
157 
158 unpin:
159 	dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
160 
161 	while (i--) {
162 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
163 		struct sg_table *sgt = state->sgt[i];
164 
165 		if (sgt)
166 			dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
167 
168 		host1x_bo_unpin(dc->dev, &bo->base, sgt);
169 		state->iova[i] = DMA_MAPPING_ERROR;
170 		state->sgt[i] = NULL;
171 	}
172 
173 	return err;
174 }
175 
176 static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
177 {
178 	unsigned int i;
179 
180 	for (i = 0; i < state->base.fb->format->num_planes; i++) {
181 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
182 		struct sg_table *sgt = state->sgt[i];
183 
184 		if (sgt)
185 			dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
186 
187 		host1x_bo_unpin(dc->dev, &bo->base, sgt);
188 		state->iova[i] = DMA_MAPPING_ERROR;
189 		state->sgt[i] = NULL;
190 	}
191 }
192 
193 int tegra_plane_prepare_fb(struct drm_plane *plane,
194 			   struct drm_plane_state *state)
195 {
196 	struct tegra_dc *dc = to_tegra_dc(state->crtc);
197 
198 	if (!state->fb)
199 		return 0;
200 
201 	drm_gem_fb_prepare_fb(plane, state);
202 
203 	return tegra_dc_pin(dc, to_tegra_plane_state(state));
204 }
205 
206 void tegra_plane_cleanup_fb(struct drm_plane *plane,
207 			    struct drm_plane_state *state)
208 {
209 	struct tegra_dc *dc = to_tegra_dc(state->crtc);
210 
211 	if (dc)
212 		tegra_dc_unpin(dc, to_tegra_plane_state(state));
213 }
214 
215 int tegra_plane_state_add(struct tegra_plane *plane,
216 			  struct drm_plane_state *state)
217 {
218 	struct drm_crtc_state *crtc_state;
219 	struct tegra_dc_state *tegra;
220 	int err;
221 
222 	/* Propagate errors from allocation or locking failures. */
223 	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
224 	if (IS_ERR(crtc_state))
225 		return PTR_ERR(crtc_state);
226 
227 	/* Check plane state for visibility and calculate clipping bounds */
228 	err = drm_atomic_helper_check_plane_state(state, crtc_state,
229 						  0, INT_MAX, true, true);
230 	if (err < 0)
231 		return err;
232 
233 	tegra = to_dc_state(crtc_state);
234 
235 	tegra->planes |= WIN_A_ACT_REQ << plane->index;
236 
237 	return 0;
238 }
239 
240 int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap)
241 {
242 	/* assume no swapping of fetched data */
243 	if (swap)
244 		*swap = BYTE_SWAP_NOSWAP;
245 
246 	switch (fourcc) {
247 	case DRM_FORMAT_ARGB4444:
248 		*format = WIN_COLOR_DEPTH_B4G4R4A4;
249 		break;
250 
251 	case DRM_FORMAT_ARGB1555:
252 		*format = WIN_COLOR_DEPTH_B5G5R5A1;
253 		break;
254 
255 	case DRM_FORMAT_RGB565:
256 		*format = WIN_COLOR_DEPTH_B5G6R5;
257 		break;
258 
259 	case DRM_FORMAT_RGBA5551:
260 		*format = WIN_COLOR_DEPTH_A1B5G5R5;
261 		break;
262 
263 	case DRM_FORMAT_ARGB8888:
264 		*format = WIN_COLOR_DEPTH_B8G8R8A8;
265 		break;
266 
267 	case DRM_FORMAT_ABGR8888:
268 		*format = WIN_COLOR_DEPTH_R8G8B8A8;
269 		break;
270 
271 	case DRM_FORMAT_ABGR4444:
272 		*format = WIN_COLOR_DEPTH_R4G4B4A4;
273 		break;
274 
275 	case DRM_FORMAT_ABGR1555:
276 		*format = WIN_COLOR_DEPTH_R5G5B5A;
277 		break;
278 
279 	case DRM_FORMAT_BGRA5551:
280 		*format = WIN_COLOR_DEPTH_AR5G5B5;
281 		break;
282 
283 	case DRM_FORMAT_XRGB1555:
284 		*format = WIN_COLOR_DEPTH_B5G5R5X1;
285 		break;
286 
287 	case DRM_FORMAT_RGBX5551:
288 		*format = WIN_COLOR_DEPTH_X1B5G5R5;
289 		break;
290 
291 	case DRM_FORMAT_XBGR1555:
292 		*format = WIN_COLOR_DEPTH_R5G5B5X1;
293 		break;
294 
295 	case DRM_FORMAT_BGRX5551:
296 		*format = WIN_COLOR_DEPTH_X1R5G5B5;
297 		break;
298 
299 	case DRM_FORMAT_BGR565:
300 		*format = WIN_COLOR_DEPTH_R5G6B5;
301 		break;
302 
303 	case DRM_FORMAT_BGRA8888:
304 		*format = WIN_COLOR_DEPTH_A8R8G8B8;
305 		break;
306 
307 	case DRM_FORMAT_RGBA8888:
308 		*format = WIN_COLOR_DEPTH_A8B8G8R8;
309 		break;
310 
311 	case DRM_FORMAT_XRGB8888:
312 		*format = WIN_COLOR_DEPTH_B8G8R8X8;
313 		break;
314 
315 	case DRM_FORMAT_XBGR8888:
316 		*format = WIN_COLOR_DEPTH_R8G8B8X8;
317 		break;
318 
319 	case DRM_FORMAT_UYVY:
320 		*format = WIN_COLOR_DEPTH_YCbCr422;
321 		break;
322 
323 	case DRM_FORMAT_YUYV:
324 		if (!swap)
325 			return -EINVAL;
326 
327 		*format = WIN_COLOR_DEPTH_YCbCr422;
328 		*swap = BYTE_SWAP_SWAP2;
329 		break;
330 
331 	case DRM_FORMAT_YUV420:
332 		*format = WIN_COLOR_DEPTH_YCbCr420P;
333 		break;
334 
335 	case DRM_FORMAT_YUV422:
336 		*format = WIN_COLOR_DEPTH_YCbCr422P;
337 		break;
338 
339 	default:
340 		return -EINVAL;
341 	}
342 
343 	return 0;
344 }
345 
346 bool tegra_plane_format_is_yuv(unsigned int format, bool *planar)
347 {
348 	switch (format) {
349 	case WIN_COLOR_DEPTH_YCbCr422:
350 	case WIN_COLOR_DEPTH_YUV422:
351 		if (planar)
352 			*planar = false;
353 
354 		return true;
355 
356 	case WIN_COLOR_DEPTH_YCbCr420P:
357 	case WIN_COLOR_DEPTH_YUV420P:
358 	case WIN_COLOR_DEPTH_YCbCr422P:
359 	case WIN_COLOR_DEPTH_YUV422P:
360 	case WIN_COLOR_DEPTH_YCbCr422R:
361 	case WIN_COLOR_DEPTH_YUV422R:
362 	case WIN_COLOR_DEPTH_YCbCr422RA:
363 	case WIN_COLOR_DEPTH_YUV422RA:
364 		if (planar)
365 			*planar = true;
366 
367 		return true;
368 	}
369 
370 	if (planar)
371 		*planar = false;
372 
373 	return false;
374 }
375 
376 static bool __drm_format_has_alpha(u32 format)
377 {
378 	switch (format) {
379 	case DRM_FORMAT_ARGB1555:
380 	case DRM_FORMAT_RGBA5551:
381 	case DRM_FORMAT_ABGR8888:
382 	case DRM_FORMAT_ARGB8888:
383 		return true;
384 	}
385 
386 	return false;
387 }
388 
389 static int tegra_plane_format_get_alpha(unsigned int opaque,
390 					unsigned int *alpha)
391 {
392 	if (tegra_plane_format_is_yuv(opaque, NULL)) {
393 		*alpha = opaque;
394 		return 0;
395 	}
396 
397 	switch (opaque) {
398 	case WIN_COLOR_DEPTH_B5G5R5X1:
399 		*alpha = WIN_COLOR_DEPTH_B5G5R5A1;
400 		return 0;
401 
402 	case WIN_COLOR_DEPTH_X1B5G5R5:
403 		*alpha = WIN_COLOR_DEPTH_A1B5G5R5;
404 		return 0;
405 
406 	case WIN_COLOR_DEPTH_R8G8B8X8:
407 		*alpha = WIN_COLOR_DEPTH_R8G8B8A8;
408 		return 0;
409 
410 	case WIN_COLOR_DEPTH_B8G8R8X8:
411 		*alpha = WIN_COLOR_DEPTH_B8G8R8A8;
412 		return 0;
413 
414 	case WIN_COLOR_DEPTH_B5G6R5:
415 		*alpha = opaque;
416 		return 0;
417 	}
418 
419 	return -EINVAL;
420 }
421 
422 /*
423  * This is applicable to Tegra20 and Tegra30 only where the opaque formats can
424  * be emulated using the alpha formats and alpha blending disabled.
425  */
426 static int tegra_plane_setup_opacity(struct tegra_plane *tegra,
427 				     struct tegra_plane_state *state)
428 {
429 	unsigned int format;
430 	int err;
431 
432 	switch (state->format) {
433 	case WIN_COLOR_DEPTH_B5G5R5A1:
434 	case WIN_COLOR_DEPTH_A1B5G5R5:
435 	case WIN_COLOR_DEPTH_R8G8B8A8:
436 	case WIN_COLOR_DEPTH_B8G8R8A8:
437 		state->opaque = false;
438 		break;
439 
440 	default:
441 		err = tegra_plane_format_get_alpha(state->format, &format);
442 		if (err < 0)
443 			return err;
444 
445 		state->format = format;
446 		state->opaque = true;
447 		break;
448 	}
449 
450 	return 0;
451 }
452 
453 static int tegra_plane_check_transparency(struct tegra_plane *tegra,
454 					  struct tegra_plane_state *state)
455 {
456 	struct drm_plane_state *old, *plane_state;
457 	struct drm_plane *plane;
458 
459 	old = drm_atomic_get_old_plane_state(state->base.state, &tegra->base);
460 
461 	/* check if zpos / transparency changed */
462 	if (old->normalized_zpos == state->base.normalized_zpos &&
463 	    to_tegra_plane_state(old)->opaque == state->opaque)
464 		return 0;
465 
466 	/* include all sibling planes into this commit */
467 	drm_for_each_plane(plane, tegra->base.dev) {
468 		struct tegra_plane *p = to_tegra_plane(plane);
469 
470 		/* skip this plane and planes on different CRTCs */
471 		if (p == tegra || p->dc != tegra->dc)
472 			continue;
473 
474 		plane_state = drm_atomic_get_plane_state(state->base.state,
475 							 plane);
476 		if (IS_ERR(plane_state))
477 			return PTR_ERR(plane_state);
478 	}
479 
480 	return 1;
481 }
482 
483 static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
484 						  struct tegra_plane *other)
485 {
486 	unsigned int index = 0, i;
487 
488 	WARN_ON(plane == other);
489 
490 	for (i = 0; i < 3; i++) {
491 		if (i == plane->index)
492 			continue;
493 
494 		if (i == other->index)
495 			break;
496 
497 		index++;
498 	}
499 
500 	return index;
501 }
502 
503 static void tegra_plane_update_transparency(struct tegra_plane *tegra,
504 					    struct tegra_plane_state *state)
505 {
506 	struct drm_plane_state *new;
507 	struct drm_plane *plane;
508 	unsigned int i;
509 
510 	for_each_new_plane_in_state(state->base.state, plane, new, i) {
511 		struct tegra_plane *p = to_tegra_plane(plane);
512 		unsigned index;
513 
514 		/* skip this plane and planes on different CRTCs */
515 		if (p == tegra || p->dc != tegra->dc)
516 			continue;
517 
518 		index = tegra_plane_get_overlap_index(tegra, p);
519 
520 		if (new->fb && __drm_format_has_alpha(new->fb->format->format))
521 			state->blending[index].alpha = true;
522 		else
523 			state->blending[index].alpha = false;
524 
525 		if (new->normalized_zpos > state->base.normalized_zpos)
526 			state->blending[index].top = true;
527 		else
528 			state->blending[index].top = false;
529 
530 		/*
531 		 * Missing framebuffer means that plane is disabled, in this
532 		 * case mark B / C window as top to be able to differentiate
533 		 * windows indices order in regards to zPos for the middle
534 		 * window X / Y registers programming.
535 		 */
536 		if (!new->fb)
537 			state->blending[index].top = (index == 1);
538 	}
539 }
540 
541 static int tegra_plane_setup_transparency(struct tegra_plane *tegra,
542 					  struct tegra_plane_state *state)
543 {
544 	struct tegra_plane_state *tegra_state;
545 	struct drm_plane_state *new;
546 	struct drm_plane *plane;
547 	int err;
548 
549 	/*
550 	 * If planes zpos / transparency changed, sibling planes blending
551 	 * state may require adjustment and in this case they will be included
552 	 * into this atom commit, otherwise blending state is unchanged.
553 	 */
554 	err = tegra_plane_check_transparency(tegra, state);
555 	if (err <= 0)
556 		return err;
557 
558 	/*
559 	 * All planes are now in the atomic state, walk them up and update
560 	 * transparency state for each plane.
561 	 */
562 	drm_for_each_plane(plane, tegra->base.dev) {
563 		struct tegra_plane *p = to_tegra_plane(plane);
564 
565 		/* skip planes on different CRTCs */
566 		if (p->dc != tegra->dc)
567 			continue;
568 
569 		new = drm_atomic_get_new_plane_state(state->base.state, plane);
570 		tegra_state = to_tegra_plane_state(new);
571 
572 		/*
573 		 * There is no need to update blending state for the disabled
574 		 * plane.
575 		 */
576 		if (new->fb)
577 			tegra_plane_update_transparency(p, tegra_state);
578 	}
579 
580 	return 0;
581 }
582 
583 int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
584 				   struct tegra_plane_state *state)
585 {
586 	int err;
587 
588 	err = tegra_plane_setup_opacity(tegra, state);
589 	if (err < 0)
590 		return err;
591 
592 	err = tegra_plane_setup_transparency(tegra, state);
593 	if (err < 0)
594 		return err;
595 
596 	return 0;
597 }
598