xref: /linux/drivers/gpu/drm/tegra/plane.c (revision e058a84bfddc42ba356a2316f2cf1141974625c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/iommu.h>
7 
8 #include <drm/drm_atomic.h>
9 #include <drm/drm_atomic_helper.h>
10 #include <drm/drm_fourcc.h>
11 #include <drm/drm_gem_atomic_helper.h>
12 #include <drm/drm_plane_helper.h>
13 
14 #include "dc.h"
15 #include "plane.h"
16 
17 static void tegra_plane_destroy(struct drm_plane *plane)
18 {
19 	struct tegra_plane *p = to_tegra_plane(plane);
20 
21 	drm_plane_cleanup(plane);
22 	kfree(p);
23 }
24 
25 static void tegra_plane_reset(struct drm_plane *plane)
26 {
27 	struct tegra_plane *p = to_tegra_plane(plane);
28 	struct tegra_plane_state *state;
29 	unsigned int i;
30 
31 	if (plane->state)
32 		__drm_atomic_helper_plane_destroy_state(plane->state);
33 
34 	kfree(plane->state);
35 	plane->state = NULL;
36 
37 	state = kzalloc(sizeof(*state), GFP_KERNEL);
38 	if (state) {
39 		plane->state = &state->base;
40 		plane->state->plane = plane;
41 		plane->state->zpos = p->index;
42 		plane->state->normalized_zpos = p->index;
43 
44 		for (i = 0; i < 3; i++)
45 			state->iova[i] = DMA_MAPPING_ERROR;
46 	}
47 }
48 
49 static struct drm_plane_state *
50 tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
51 {
52 	struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
53 	struct tegra_plane_state *copy;
54 	unsigned int i;
55 
56 	copy = kmalloc(sizeof(*copy), GFP_KERNEL);
57 	if (!copy)
58 		return NULL;
59 
60 	__drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
61 	copy->tiling = state->tiling;
62 	copy->format = state->format;
63 	copy->swap = state->swap;
64 	copy->reflect_x = state->reflect_x;
65 	copy->reflect_y = state->reflect_y;
66 	copy->opaque = state->opaque;
67 
68 	for (i = 0; i < 2; i++)
69 		copy->blending[i] = state->blending[i];
70 
71 	for (i = 0; i < 3; i++) {
72 		copy->iova[i] = DMA_MAPPING_ERROR;
73 		copy->sgt[i] = NULL;
74 	}
75 
76 	return &copy->base;
77 }
78 
79 static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
80 					     struct drm_plane_state *state)
81 {
82 	__drm_atomic_helper_plane_destroy_state(state);
83 	kfree(state);
84 }
85 
86 static bool tegra_plane_supports_sector_layout(struct drm_plane *plane)
87 {
88 	struct drm_crtc *crtc;
89 
90 	drm_for_each_crtc(crtc, plane->dev) {
91 		if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
92 			struct tegra_dc *dc = to_tegra_dc(crtc);
93 
94 			if (!dc->soc->supports_sector_layout)
95 				return false;
96 		}
97 	}
98 
99 	return true;
100 }
101 
102 static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
103 					     uint32_t format,
104 					     uint64_t modifier)
105 {
106 	const struct drm_format_info *info = drm_format_info(format);
107 
108 	if (modifier == DRM_FORMAT_MOD_LINEAR)
109 		return true;
110 
111 	/* check for the sector layout bit */
112 	if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
113 		if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
114 			if (!tegra_plane_supports_sector_layout(plane))
115 				return false;
116 		}
117 	}
118 
119 	if (info->num_planes == 1)
120 		return true;
121 
122 	return false;
123 }
124 
125 const struct drm_plane_funcs tegra_plane_funcs = {
126 	.update_plane = drm_atomic_helper_update_plane,
127 	.disable_plane = drm_atomic_helper_disable_plane,
128 	.destroy = tegra_plane_destroy,
129 	.reset = tegra_plane_reset,
130 	.atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
131 	.atomic_destroy_state = tegra_plane_atomic_destroy_state,
132 	.format_mod_supported = tegra_plane_format_mod_supported,
133 };
134 
135 static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
136 {
137 	struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
138 	unsigned int i;
139 	int err;
140 
141 	for (i = 0; i < state->base.fb->format->num_planes; i++) {
142 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
143 		dma_addr_t phys_addr, *phys;
144 		struct sg_table *sgt;
145 
146 		/*
147 		 * If we're not attached to a domain, we already stored the
148 		 * physical address when the buffer was allocated. If we're
149 		 * part of a group that's shared between all display
150 		 * controllers, we've also already mapped the framebuffer
151 		 * through the SMMU. In both cases we can short-circuit the
152 		 * code below and retrieve the stored IOV address.
153 		 */
154 		if (!domain || dc->client.group)
155 			phys = &phys_addr;
156 		else
157 			phys = NULL;
158 
159 		sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
160 		if (IS_ERR(sgt)) {
161 			err = PTR_ERR(sgt);
162 			goto unpin;
163 		}
164 
165 		if (sgt) {
166 			err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
167 			if (err)
168 				goto unpin;
169 
170 			/*
171 			 * The display controller needs contiguous memory, so
172 			 * fail if the buffer is discontiguous and we fail to
173 			 * map its SG table to a single contiguous chunk of
174 			 * I/O virtual memory.
175 			 */
176 			if (sgt->nents > 1) {
177 				err = -EINVAL;
178 				goto unpin;
179 			}
180 
181 			state->iova[i] = sg_dma_address(sgt->sgl);
182 			state->sgt[i] = sgt;
183 		} else {
184 			state->iova[i] = phys_addr;
185 		}
186 	}
187 
188 	return 0;
189 
190 unpin:
191 	dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
192 
193 	while (i--) {
194 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
195 		struct sg_table *sgt = state->sgt[i];
196 
197 		if (sgt)
198 			dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
199 
200 		host1x_bo_unpin(dc->dev, &bo->base, sgt);
201 		state->iova[i] = DMA_MAPPING_ERROR;
202 		state->sgt[i] = NULL;
203 	}
204 
205 	return err;
206 }
207 
208 static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
209 {
210 	unsigned int i;
211 
212 	for (i = 0; i < state->base.fb->format->num_planes; i++) {
213 		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
214 		struct sg_table *sgt = state->sgt[i];
215 
216 		if (sgt)
217 			dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
218 
219 		host1x_bo_unpin(dc->dev, &bo->base, sgt);
220 		state->iova[i] = DMA_MAPPING_ERROR;
221 		state->sgt[i] = NULL;
222 	}
223 }
224 
225 int tegra_plane_prepare_fb(struct drm_plane *plane,
226 			   struct drm_plane_state *state)
227 {
228 	struct tegra_dc *dc = to_tegra_dc(state->crtc);
229 
230 	if (!state->fb)
231 		return 0;
232 
233 	drm_gem_plane_helper_prepare_fb(plane, state);
234 
235 	return tegra_dc_pin(dc, to_tegra_plane_state(state));
236 }
237 
238 void tegra_plane_cleanup_fb(struct drm_plane *plane,
239 			    struct drm_plane_state *state)
240 {
241 	struct tegra_dc *dc = to_tegra_dc(state->crtc);
242 
243 	if (dc)
244 		tegra_dc_unpin(dc, to_tegra_plane_state(state));
245 }
246 
247 int tegra_plane_state_add(struct tegra_plane *plane,
248 			  struct drm_plane_state *state)
249 {
250 	struct drm_crtc_state *crtc_state;
251 	struct tegra_dc_state *tegra;
252 	int err;
253 
254 	/* Propagate errors from allocation or locking failures. */
255 	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
256 	if (IS_ERR(crtc_state))
257 		return PTR_ERR(crtc_state);
258 
259 	/* Check plane state for visibility and calculate clipping bounds */
260 	err = drm_atomic_helper_check_plane_state(state, crtc_state,
261 						  0, INT_MAX, true, true);
262 	if (err < 0)
263 		return err;
264 
265 	tegra = to_dc_state(crtc_state);
266 
267 	tegra->planes |= WIN_A_ACT_REQ << plane->index;
268 
269 	return 0;
270 }
271 
272 int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap)
273 {
274 	/* assume no swapping of fetched data */
275 	if (swap)
276 		*swap = BYTE_SWAP_NOSWAP;
277 
278 	switch (fourcc) {
279 	case DRM_FORMAT_ARGB4444:
280 		*format = WIN_COLOR_DEPTH_B4G4R4A4;
281 		break;
282 
283 	case DRM_FORMAT_ARGB1555:
284 		*format = WIN_COLOR_DEPTH_B5G5R5A1;
285 		break;
286 
287 	case DRM_FORMAT_RGB565:
288 		*format = WIN_COLOR_DEPTH_B5G6R5;
289 		break;
290 
291 	case DRM_FORMAT_RGBA5551:
292 		*format = WIN_COLOR_DEPTH_A1B5G5R5;
293 		break;
294 
295 	case DRM_FORMAT_ARGB8888:
296 		*format = WIN_COLOR_DEPTH_B8G8R8A8;
297 		break;
298 
299 	case DRM_FORMAT_ABGR8888:
300 		*format = WIN_COLOR_DEPTH_R8G8B8A8;
301 		break;
302 
303 	case DRM_FORMAT_ABGR4444:
304 		*format = WIN_COLOR_DEPTH_R4G4B4A4;
305 		break;
306 
307 	case DRM_FORMAT_ABGR1555:
308 		*format = WIN_COLOR_DEPTH_R5G5B5A;
309 		break;
310 
311 	case DRM_FORMAT_BGRA5551:
312 		*format = WIN_COLOR_DEPTH_AR5G5B5;
313 		break;
314 
315 	case DRM_FORMAT_XRGB1555:
316 		*format = WIN_COLOR_DEPTH_B5G5R5X1;
317 		break;
318 
319 	case DRM_FORMAT_RGBX5551:
320 		*format = WIN_COLOR_DEPTH_X1B5G5R5;
321 		break;
322 
323 	case DRM_FORMAT_XBGR1555:
324 		*format = WIN_COLOR_DEPTH_R5G5B5X1;
325 		break;
326 
327 	case DRM_FORMAT_BGRX5551:
328 		*format = WIN_COLOR_DEPTH_X1R5G5B5;
329 		break;
330 
331 	case DRM_FORMAT_BGR565:
332 		*format = WIN_COLOR_DEPTH_R5G6B5;
333 		break;
334 
335 	case DRM_FORMAT_BGRA8888:
336 		*format = WIN_COLOR_DEPTH_A8R8G8B8;
337 		break;
338 
339 	case DRM_FORMAT_RGBA8888:
340 		*format = WIN_COLOR_DEPTH_A8B8G8R8;
341 		break;
342 
343 	case DRM_FORMAT_XRGB8888:
344 		*format = WIN_COLOR_DEPTH_B8G8R8X8;
345 		break;
346 
347 	case DRM_FORMAT_XBGR8888:
348 		*format = WIN_COLOR_DEPTH_R8G8B8X8;
349 		break;
350 
351 	case DRM_FORMAT_UYVY:
352 		*format = WIN_COLOR_DEPTH_YCbCr422;
353 		break;
354 
355 	case DRM_FORMAT_YUYV:
356 		if (!swap)
357 			return -EINVAL;
358 
359 		*format = WIN_COLOR_DEPTH_YCbCr422;
360 		*swap = BYTE_SWAP_SWAP2;
361 		break;
362 
363 	case DRM_FORMAT_YUV420:
364 		*format = WIN_COLOR_DEPTH_YCbCr420P;
365 		break;
366 
367 	case DRM_FORMAT_YUV422:
368 		*format = WIN_COLOR_DEPTH_YCbCr422P;
369 		break;
370 
371 	default:
372 		return -EINVAL;
373 	}
374 
375 	return 0;
376 }
377 
378 bool tegra_plane_format_is_indexed(unsigned int format)
379 {
380 	switch (format) {
381 	case WIN_COLOR_DEPTH_P1:
382 	case WIN_COLOR_DEPTH_P2:
383 	case WIN_COLOR_DEPTH_P4:
384 	case WIN_COLOR_DEPTH_P8:
385 		return true;
386 	}
387 
388 	return false;
389 }
390 
391 bool tegra_plane_format_is_yuv(unsigned int format, bool *planar, unsigned int *bpc)
392 {
393 	switch (format) {
394 	case WIN_COLOR_DEPTH_YCbCr422:
395 	case WIN_COLOR_DEPTH_YUV422:
396 		if (planar)
397 			*planar = false;
398 
399 		if (bpc)
400 			*bpc = 8;
401 
402 		return true;
403 
404 	case WIN_COLOR_DEPTH_YCbCr420P:
405 	case WIN_COLOR_DEPTH_YUV420P:
406 	case WIN_COLOR_DEPTH_YCbCr422P:
407 	case WIN_COLOR_DEPTH_YUV422P:
408 	case WIN_COLOR_DEPTH_YCbCr422R:
409 	case WIN_COLOR_DEPTH_YUV422R:
410 	case WIN_COLOR_DEPTH_YCbCr422RA:
411 	case WIN_COLOR_DEPTH_YUV422RA:
412 		if (planar)
413 			*planar = true;
414 
415 		if (bpc)
416 			*bpc = 8;
417 
418 		return true;
419 	}
420 
421 	if (planar)
422 		*planar = false;
423 
424 	return false;
425 }
426 
427 static bool __drm_format_has_alpha(u32 format)
428 {
429 	switch (format) {
430 	case DRM_FORMAT_ARGB1555:
431 	case DRM_FORMAT_RGBA5551:
432 	case DRM_FORMAT_ABGR8888:
433 	case DRM_FORMAT_ARGB8888:
434 		return true;
435 	}
436 
437 	return false;
438 }
439 
440 static int tegra_plane_format_get_alpha(unsigned int opaque,
441 					unsigned int *alpha)
442 {
443 	if (tegra_plane_format_is_yuv(opaque, NULL, NULL)) {
444 		*alpha = opaque;
445 		return 0;
446 	}
447 
448 	switch (opaque) {
449 	case WIN_COLOR_DEPTH_B5G5R5X1:
450 		*alpha = WIN_COLOR_DEPTH_B5G5R5A1;
451 		return 0;
452 
453 	case WIN_COLOR_DEPTH_X1B5G5R5:
454 		*alpha = WIN_COLOR_DEPTH_A1B5G5R5;
455 		return 0;
456 
457 	case WIN_COLOR_DEPTH_R8G8B8X8:
458 		*alpha = WIN_COLOR_DEPTH_R8G8B8A8;
459 		return 0;
460 
461 	case WIN_COLOR_DEPTH_B8G8R8X8:
462 		*alpha = WIN_COLOR_DEPTH_B8G8R8A8;
463 		return 0;
464 
465 	case WIN_COLOR_DEPTH_B5G6R5:
466 		*alpha = opaque;
467 		return 0;
468 	}
469 
470 	return -EINVAL;
471 }
472 
473 /*
474  * This is applicable to Tegra20 and Tegra30 only where the opaque formats can
475  * be emulated using the alpha formats and alpha blending disabled.
476  */
477 static int tegra_plane_setup_opacity(struct tegra_plane *tegra,
478 				     struct tegra_plane_state *state)
479 {
480 	unsigned int format;
481 	int err;
482 
483 	switch (state->format) {
484 	case WIN_COLOR_DEPTH_B5G5R5A1:
485 	case WIN_COLOR_DEPTH_A1B5G5R5:
486 	case WIN_COLOR_DEPTH_R8G8B8A8:
487 	case WIN_COLOR_DEPTH_B8G8R8A8:
488 		state->opaque = false;
489 		break;
490 
491 	default:
492 		err = tegra_plane_format_get_alpha(state->format, &format);
493 		if (err < 0)
494 			return err;
495 
496 		state->format = format;
497 		state->opaque = true;
498 		break;
499 	}
500 
501 	return 0;
502 }
503 
504 static int tegra_plane_check_transparency(struct tegra_plane *tegra,
505 					  struct tegra_plane_state *state)
506 {
507 	struct drm_plane_state *old, *plane_state;
508 	struct drm_plane *plane;
509 
510 	old = drm_atomic_get_old_plane_state(state->base.state, &tegra->base);
511 
512 	/* check if zpos / transparency changed */
513 	if (old->normalized_zpos == state->base.normalized_zpos &&
514 	    to_tegra_plane_state(old)->opaque == state->opaque)
515 		return 0;
516 
517 	/* include all sibling planes into this commit */
518 	drm_for_each_plane(plane, tegra->base.dev) {
519 		struct tegra_plane *p = to_tegra_plane(plane);
520 
521 		/* skip this plane and planes on different CRTCs */
522 		if (p == tegra || p->dc != tegra->dc)
523 			continue;
524 
525 		plane_state = drm_atomic_get_plane_state(state->base.state,
526 							 plane);
527 		if (IS_ERR(plane_state))
528 			return PTR_ERR(plane_state);
529 	}
530 
531 	return 1;
532 }
533 
534 static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
535 						  struct tegra_plane *other)
536 {
537 	unsigned int index = 0, i;
538 
539 	WARN_ON(plane == other);
540 
541 	for (i = 0; i < 3; i++) {
542 		if (i == plane->index)
543 			continue;
544 
545 		if (i == other->index)
546 			break;
547 
548 		index++;
549 	}
550 
551 	return index;
552 }
553 
554 static void tegra_plane_update_transparency(struct tegra_plane *tegra,
555 					    struct tegra_plane_state *state)
556 {
557 	struct drm_plane_state *new;
558 	struct drm_plane *plane;
559 	unsigned int i;
560 
561 	for_each_new_plane_in_state(state->base.state, plane, new, i) {
562 		struct tegra_plane *p = to_tegra_plane(plane);
563 		unsigned index;
564 
565 		/* skip this plane and planes on different CRTCs */
566 		if (p == tegra || p->dc != tegra->dc)
567 			continue;
568 
569 		index = tegra_plane_get_overlap_index(tegra, p);
570 
571 		if (new->fb && __drm_format_has_alpha(new->fb->format->format))
572 			state->blending[index].alpha = true;
573 		else
574 			state->blending[index].alpha = false;
575 
576 		if (new->normalized_zpos > state->base.normalized_zpos)
577 			state->blending[index].top = true;
578 		else
579 			state->blending[index].top = false;
580 
581 		/*
582 		 * Missing framebuffer means that plane is disabled, in this
583 		 * case mark B / C window as top to be able to differentiate
584 		 * windows indices order in regards to zPos for the middle
585 		 * window X / Y registers programming.
586 		 */
587 		if (!new->fb)
588 			state->blending[index].top = (index == 1);
589 	}
590 }
591 
592 static int tegra_plane_setup_transparency(struct tegra_plane *tegra,
593 					  struct tegra_plane_state *state)
594 {
595 	struct tegra_plane_state *tegra_state;
596 	struct drm_plane_state *new;
597 	struct drm_plane *plane;
598 	int err;
599 
600 	/*
601 	 * If planes zpos / transparency changed, sibling planes blending
602 	 * state may require adjustment and in this case they will be included
603 	 * into this atom commit, otherwise blending state is unchanged.
604 	 */
605 	err = tegra_plane_check_transparency(tegra, state);
606 	if (err <= 0)
607 		return err;
608 
609 	/*
610 	 * All planes are now in the atomic state, walk them up and update
611 	 * transparency state for each plane.
612 	 */
613 	drm_for_each_plane(plane, tegra->base.dev) {
614 		struct tegra_plane *p = to_tegra_plane(plane);
615 
616 		/* skip planes on different CRTCs */
617 		if (p->dc != tegra->dc)
618 			continue;
619 
620 		new = drm_atomic_get_new_plane_state(state->base.state, plane);
621 		tegra_state = to_tegra_plane_state(new);
622 
623 		/*
624 		 * There is no need to update blending state for the disabled
625 		 * plane.
626 		 */
627 		if (new->fb)
628 			tegra_plane_update_transparency(p, tegra_state);
629 	}
630 
631 	return 0;
632 }
633 
634 int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
635 				   struct tegra_plane_state *state)
636 {
637 	int err;
638 
639 	err = tegra_plane_setup_opacity(tegra, state);
640 	if (err < 0)
641 		return err;
642 
643 	err = tegra_plane_setup_transparency(tegra, state);
644 	if (err < 0)
645 		return err;
646 
647 	return 0;
648 }
649