xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_blend.h>
29 #include "drm/drm_framebuffer.h"
30 #include <drm/drm_gem_atomic_helper.h>
31 #include <drm/drm_plane_helper.h>
32 #include <drm/drm_gem_framebuffer_helper.h>
33 #include <drm/drm_fourcc.h>
34 
35 #include "amdgpu.h"
36 #include "dal_asic_id.h"
37 #include "amdgpu_display.h"
38 #include "amdgpu_dm_trace.h"
39 #include "amdgpu_dm_plane.h"
40 #include "amdgpu_dm_colorop.h"
41 #include "gc/gc_11_0_0_offset.h"
42 #include "gc/gc_11_0_0_sh_mask.h"
43 
44 /*
45  * TODO: these are currently initialized to rgb formats only.
46  * For future use cases we should either initialize them dynamically based on
47  * plane capabilities, or initialize this array to all formats, so internal drm
48  * check will succeed, and let DC implement proper check
49  */
50 static const uint32_t rgb_formats[] = {
51 	DRM_FORMAT_XRGB8888,
52 	DRM_FORMAT_ARGB8888,
53 	DRM_FORMAT_RGBA8888,
54 	DRM_FORMAT_XRGB2101010,
55 	DRM_FORMAT_XBGR2101010,
56 	DRM_FORMAT_ARGB2101010,
57 	DRM_FORMAT_ABGR2101010,
58 	DRM_FORMAT_XRGB16161616,
59 	DRM_FORMAT_XBGR16161616,
60 	DRM_FORMAT_ARGB16161616,
61 	DRM_FORMAT_ABGR16161616,
62 	DRM_FORMAT_XBGR8888,
63 	DRM_FORMAT_ABGR8888,
64 	DRM_FORMAT_RGB565,
65 };
66 
67 static const uint32_t overlay_formats[] = {
68 	DRM_FORMAT_XRGB8888,
69 	DRM_FORMAT_ARGB8888,
70 	DRM_FORMAT_RGBA8888,
71 	DRM_FORMAT_XBGR8888,
72 	DRM_FORMAT_ABGR8888,
73 	DRM_FORMAT_RGB565,
74 	DRM_FORMAT_NV21,
75 	DRM_FORMAT_NV12,
76 	DRM_FORMAT_P010
77 };
78 
79 static const uint32_t video_formats[] = {
80 	DRM_FORMAT_NV21,
81 	DRM_FORMAT_NV12,
82 	DRM_FORMAT_P010
83 };
84 
85 static const u32 cursor_formats[] = {
86 	DRM_FORMAT_ARGB8888
87 };
88 
89 enum dm_micro_swizzle {
90 	MICRO_SWIZZLE_Z = 0,
91 	MICRO_SWIZZLE_S = 1,
92 	MICRO_SWIZZLE_D = 2,
93 	MICRO_SWIZZLE_R = 3
94 };
95 
amdgpu_dm_plane_get_format_info(u32 pixel_format,u64 modifier)96 const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier)
97 {
98 	return amdgpu_lookup_format_info(pixel_format, modifier);
99 }
100 
amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * pre_multiplied_alpha,bool * global_alpha,int * global_alpha_value)101 void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
102 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
103 			       bool *global_alpha, int *global_alpha_value)
104 {
105 	*per_pixel_alpha = false;
106 	*pre_multiplied_alpha = true;
107 	*global_alpha = false;
108 	*global_alpha_value = 0xff;
109 
110 
111 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
112 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
113 		static const uint32_t alpha_formats[] = {
114 			DRM_FORMAT_ARGB8888,
115 			DRM_FORMAT_RGBA8888,
116 			DRM_FORMAT_ABGR8888,
117 			DRM_FORMAT_ARGB2101010,
118 			DRM_FORMAT_ABGR2101010,
119 			DRM_FORMAT_ARGB16161616,
120 			DRM_FORMAT_ABGR16161616,
121 			DRM_FORMAT_ARGB16161616F,
122 		};
123 		uint32_t format = plane_state->fb->format->format;
124 		unsigned int i;
125 
126 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
127 			if (format == alpha_formats[i]) {
128 				*per_pixel_alpha = true;
129 				break;
130 			}
131 		}
132 
133 		if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
134 			*pre_multiplied_alpha = false;
135 	}
136 
137 	if (plane_state->alpha < 0xffff) {
138 		*global_alpha = true;
139 		*global_alpha_value = plane_state->alpha >> 8;
140 	}
141 }
142 
amdgpu_dm_plane_add_modifier(uint64_t ** mods,uint64_t * size,uint64_t * cap,uint64_t mod)143 static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
144 {
145 	if (!*mods)
146 		return;
147 
148 	if (*cap - *size < 1) {
149 		uint64_t new_cap = *cap * 2;
150 		uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL);
151 
152 		if (!new_mods) {
153 			kfree(*mods);
154 			*mods = NULL;
155 			return;
156 		}
157 
158 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
159 		kfree(*mods);
160 		*mods = new_mods;
161 		*cap = new_cap;
162 	}
163 
164 	(*mods)[*size] = mod;
165 	*size += 1;
166 }
167 
amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier)168 static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier)
169 {
170 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
171 }
172 
amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier)173 static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier)
174 {
175 	if (modifier == DRM_FORMAT_MOD_LINEAR)
176 		return 0;
177 
178 	return AMD_FMT_MOD_GET(TILE, modifier);
179 }
180 
amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info * tiling_info,uint64_t tiling_flags)181 static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info,
182 							     uint64_t tiling_flags)
183 {
184 	/* Fill GFX8 params */
185 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
186 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
187 
188 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
189 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
190 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
191 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
192 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
193 
194 		tiling_info->gfxversion = DcGfxVersion8;
195 		/* XXX fix me for VI */
196 		tiling_info->gfx8.num_banks = num_banks;
197 		tiling_info->gfx8.array_mode =
198 				DC_ARRAY_2D_TILED_THIN1;
199 		tiling_info->gfx8.tile_split = tile_split;
200 		tiling_info->gfx8.bank_width = bankw;
201 		tiling_info->gfx8.bank_height = bankh;
202 		tiling_info->gfx8.tile_aspect = mtaspect;
203 		tiling_info->gfx8.tile_mode =
204 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
205 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
206 			== DC_ARRAY_1D_TILED_THIN1) {
207 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
208 	}
209 
210 	tiling_info->gfx8.pipe_config =
211 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
212 }
213 
amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device * adev,struct dc_tiling_info * tiling_info)214 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
215 							      struct dc_tiling_info *tiling_info)
216 {
217 	/* Fill GFX9 params */
218 	tiling_info->gfx9.num_pipes =
219 		adev->gfx.config.gb_addr_config_fields.num_pipes;
220 	tiling_info->gfx9.num_banks =
221 		adev->gfx.config.gb_addr_config_fields.num_banks;
222 	tiling_info->gfx9.pipe_interleave =
223 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
224 	tiling_info->gfx9.num_shader_engines =
225 		adev->gfx.config.gb_addr_config_fields.num_se;
226 	tiling_info->gfx9.max_compressed_frags =
227 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
228 	tiling_info->gfx9.num_rb_per_se =
229 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
230 	tiling_info->gfx9.shaderEnable = 1;
231 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
232 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
233 }
234 
amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device * adev,struct dc_tiling_info * tiling_info,uint64_t modifier)235 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
236 								struct dc_tiling_info *tiling_info,
237 								uint64_t modifier)
238 {
239 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
240 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
241 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
242 	unsigned int pipes_log2;
243 
244 	pipes_log2 = min(5u, mod_pipe_xor_bits);
245 
246 	amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
247 
248 	if (!IS_AMD_FMT_MOD(modifier))
249 		return;
250 
251 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
252 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
253 
254 	if (adev->family >= AMDGPU_FAMILY_NV) {
255 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
256 	} else {
257 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
258 
259 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
260 	}
261 }
262 
amdgpu_dm_plane_validate_dcc(struct amdgpu_device * adev,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct dc_tiling_info * tiling_info,const struct dc_plane_dcc_param * dcc,const struct dc_plane_address * address,const struct plane_size * plane_size)263 static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
264 					const enum surface_pixel_format format,
265 					const enum dc_rotation_angle rotation,
266 					const struct dc_tiling_info *tiling_info,
267 					const struct dc_plane_dcc_param *dcc,
268 					const struct dc_plane_address *address,
269 					const struct plane_size *plane_size)
270 {
271 	struct dc *dc = adev->dm.dc;
272 	struct dc_dcc_surface_param input;
273 	struct dc_surface_dcc_cap output;
274 
275 	memset(&input, 0, sizeof(input));
276 	memset(&output, 0, sizeof(output));
277 
278 	if (!dcc->enable)
279 		return 0;
280 
281 	if (adev->family < AMDGPU_FAMILY_GC_12_0_0 &&
282 	    format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
283 		return -EINVAL;
284 
285 	if (!dc->cap_funcs.get_dcc_compression_cap)
286 		return -EINVAL;
287 
288 	input.format = format;
289 	input.surface_size.width = plane_size->surface_size.width;
290 	input.surface_size.height = plane_size->surface_size.height;
291 	input.swizzle_mode = tiling_info->gfx9.swizzle;
292 
293 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
294 		input.scan = SCAN_DIRECTION_HORIZONTAL;
295 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
296 		input.scan = SCAN_DIRECTION_VERTICAL;
297 
298 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
299 		return -EINVAL;
300 
301 	if (!output.capable)
302 		return -EINVAL;
303 
304 	if (dcc->independent_64b_blks == 0 &&
305 	    output.grph.rgb.independent_64b_blks != 0)
306 		return -EINVAL;
307 
308 	return 0;
309 }
310 
amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,struct dc_tiling_info * tiling_info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address)311 static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
312 								     const struct amdgpu_framebuffer *afb,
313 								     const enum surface_pixel_format format,
314 								     const enum dc_rotation_angle rotation,
315 								     const struct plane_size *plane_size,
316 								     struct dc_tiling_info *tiling_info,
317 								     struct dc_plane_dcc_param *dcc,
318 								     struct dc_plane_address *address)
319 {
320 	const uint64_t modifier = afb->base.modifier;
321 	int ret = 0;
322 
323 	amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
324 	tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
325 	tiling_info->gfxversion = DcGfxVersion9;
326 
327 	if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
328 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
329 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
330 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
331 
332 		dcc->enable = 1;
333 		dcc->meta_pitch = afb->base.pitches[1];
334 		dcc->independent_64b_blks = independent_64b_blks;
335 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
336 			if (independent_64b_blks && independent_128b_blks)
337 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
338 			else if (independent_128b_blks)
339 				dcc->dcc_ind_blk = hubp_ind_block_128b;
340 			else if (independent_64b_blks && !independent_128b_blks)
341 				dcc->dcc_ind_blk = hubp_ind_block_64b;
342 			else
343 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
344 		} else {
345 			if (independent_64b_blks)
346 				dcc->dcc_ind_blk = hubp_ind_block_64b;
347 			else
348 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
349 		}
350 
351 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
352 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
353 	}
354 
355 	ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
356 	if (ret)
357 		drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
358 
359 	return ret;
360 }
361 
amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,struct dc_tiling_info * tiling_info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address)362 static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device *adev,
363 								      const struct amdgpu_framebuffer *afb,
364 								      const enum surface_pixel_format format,
365 								      const enum dc_rotation_angle rotation,
366 								      const struct plane_size *plane_size,
367 								      struct dc_tiling_info *tiling_info,
368 								      struct dc_plane_dcc_param *dcc,
369 								      struct dc_plane_address *address)
370 {
371 	const uint64_t modifier = afb->base.modifier;
372 	int ret = 0;
373 
374 	/* TODO: Most of this function shouldn't be needed on GFX12. */
375 	amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
376 
377 	tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
378 	tiling_info->gfxversion = DcGfxAddr3;
379 
380 	if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
381 		int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
382 
383 		dcc->enable = 1;
384 		dcc->independent_64b_blks = max_compressed_block == 0;
385 
386 		if (max_compressed_block == 0)
387 			dcc->dcc_ind_blk = hubp_ind_block_64b;
388 		else if (max_compressed_block == 1)
389 			dcc->dcc_ind_blk = hubp_ind_block_128b;
390 		else
391 			dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
392 	}
393 
394 	/* TODO: This seems wrong because there is no DCC plane on GFX12. */
395 	ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
396 	if (ret)
397 		drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
398 
399 	return ret;
400 }
401 
amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)402 static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev,
403 						  uint64_t **mods,
404 						  uint64_t *size,
405 						  uint64_t *capacity)
406 {
407 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
408 
409 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
410 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
411 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
412 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
413 				     AMD_FMT_MOD_SET(DCC, 1) |
414 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
415 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
416 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
417 
418 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
419 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
420 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
421 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
422 				     AMD_FMT_MOD_SET(DCC, 1) |
423 				     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
424 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
425 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
426 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
427 
428 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
429 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
430 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
431 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
432 
433 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
434 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
435 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
436 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
437 
438 
439 	/* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
440 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
441 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
442 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
443 
444 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
445 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
446 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
447 }
448 
amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)449 static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev,
450 					       uint64_t **mods,
451 					       uint64_t *size,
452 					       uint64_t *capacity)
453 {
454 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
455 	int pipe_xor_bits = min(8, pipes +
456 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
457 	int bank_xor_bits = min(8 - pipe_xor_bits,
458 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
459 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
460 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
461 
462 
463 	if (adev->family == AMDGPU_FAMILY_RV) {
464 		/* Raven2 and later */
465 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
466 
467 		/*
468 		 * No _D DCC swizzles yet because we only allow 32bpp, which
469 		 * doesn't support _D on DCN
470 		 */
471 
472 		if (has_constant_encode) {
473 			amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
474 						     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
475 						     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
476 						     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
477 						     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
478 						     AMD_FMT_MOD_SET(DCC, 1) |
479 						     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
480 						     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
481 						     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
482 		}
483 
484 		amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
485 					     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
486 					     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
487 					     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
488 					     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
489 					     AMD_FMT_MOD_SET(DCC, 1) |
490 					     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
491 					     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
492 					     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
493 
494 		if (has_constant_encode) {
495 			amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
496 						     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
497 						     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
498 						     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
499 						     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
500 						     AMD_FMT_MOD_SET(DCC, 1) |
501 						     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
502 						     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
503 						     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
504 						     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
505 						     AMD_FMT_MOD_SET(RB, rb) |
506 						     AMD_FMT_MOD_SET(PIPE, pipes));
507 		}
508 
509 		amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
510 					     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
511 					     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
512 					     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
513 					     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
514 					     AMD_FMT_MOD_SET(DCC, 1) |
515 					     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
516 					     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
517 					     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
518 					     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
519 					     AMD_FMT_MOD_SET(RB, rb) |
520 					     AMD_FMT_MOD_SET(PIPE, pipes));
521 	}
522 
523 	/*
524 	 * Only supported for 64bpp on Raven, will be filtered on format in
525 	 * amdgpu_dm_plane_format_mod_supported.
526 	 */
527 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
528 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
529 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
530 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
531 				     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
532 
533 	if (adev->family == AMDGPU_FAMILY_RV) {
534 		amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
535 					     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
536 					     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
537 					     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
538 					     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
539 	}
540 
541 	/*
542 	 * Only supported for 64bpp on Raven, will be filtered on format in
543 	 * amdgpu_dm_plane_format_mod_supported.
544 	 */
545 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
546 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
547 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
548 
549 	if (adev->family == AMDGPU_FAMILY_RV) {
550 		amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
551 					     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
552 					     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
553 	}
554 }
555 
amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)556 static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev,
557 						  uint64_t **mods,
558 						  uint64_t *size,
559 						  uint64_t *capacity)
560 {
561 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
562 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
563 
564 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
565 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
566 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
567 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
568 				     AMD_FMT_MOD_SET(PACKERS, pkrs) |
569 				     AMD_FMT_MOD_SET(DCC, 1) |
570 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
571 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
572 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
573 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
574 
575 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
576 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
577 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
578 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
579 				     AMD_FMT_MOD_SET(PACKERS, pkrs) |
580 				     AMD_FMT_MOD_SET(DCC, 1) |
581 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
582 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
583 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
584 
585 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
586 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
587 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
588 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
589 				     AMD_FMT_MOD_SET(PACKERS, pkrs) |
590 				     AMD_FMT_MOD_SET(DCC, 1) |
591 				     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
592 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
593 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
594 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
595 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
596 
597 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
598 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
599 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
600 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
601 				     AMD_FMT_MOD_SET(PACKERS, pkrs) |
602 				     AMD_FMT_MOD_SET(DCC, 1) |
603 				     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
604 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
605 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
606 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
607 
608 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
609 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
610 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
611 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
612 				     AMD_FMT_MOD_SET(PACKERS, pkrs));
613 
614 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
615 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
616 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
617 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
618 				     AMD_FMT_MOD_SET(PACKERS, pkrs));
619 
620 	/* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
621 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
622 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
623 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
624 
625 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
626 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
627 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
628 }
629 
amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)630 static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev,
631 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
632 {
633 	int num_pipes = 0;
634 	int pipe_xor_bits = 0;
635 	int num_pkrs = 0;
636 	int pkrs = 0;
637 	u32 gb_addr_config;
638 	u8 i = 0;
639 	unsigned int swizzle_r_x;
640 	uint64_t modifier_r_x;
641 	uint64_t modifier_dcc_best;
642 	uint64_t modifier_dcc_4k;
643 
644 	/* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
645 	 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes}
646 	 */
647 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
648 	ASSERT(gb_addr_config != 0);
649 
650 	num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
651 	pkrs = ilog2(num_pkrs);
652 	num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
653 	pipe_xor_bits = ilog2(num_pipes);
654 
655 	for (i = 0; i < 2; i++) {
656 		/* Insert the best one first. */
657 		/* R_X swizzle modes are the best for rendering and DCC requires them. */
658 		if (num_pipes > 16)
659 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
660 		else
661 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
662 
663 		modifier_r_x = AMD_FMT_MOD |
664 			       AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
665 			       AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
666 			       AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
667 			       AMD_FMT_MOD_SET(PACKERS, pkrs);
668 
669 		/* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
670 		modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
671 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
672 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
673 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
674 
675 		/* DCC settings for 4K and greater resolutions. (required by display hw) */
676 		modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
677 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
678 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
679 				  AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
680 
681 		amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best);
682 		amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k);
683 
684 		amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
685 		amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
686 
687 		amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x);
688 	}
689 
690 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
691 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
692 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
693 }
694 
amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)695 static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev,
696 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
697 {
698 	uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12);
699 	uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D);
700 	uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D);
701 	uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
702 	uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
703 	uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1);
704 	uint8_t max_comp_block[] = {2, 1, 0};
705 	uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0};
706 	uint8_t i = 0, j = 0;
707 	uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR};
708 
709 	for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
710 		max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]);
711 
712 	/* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different
713 	 * max compressed blocks first and then move on to the next smaller sized layouts.
714 	 * Do not add the linear modifier here, and hence the condition of size-1 for the loop
715 	 */
716 	for (j = 0; j < ARRAY_SIZE(gfx12_modifiers) - 1; j++)
717 		for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
718 			amdgpu_dm_plane_add_modifier(mods, size, capacity,
719 						     ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]);
720 
721 	/* Without DCC. Add all modifiers including linear at the end */
722 	for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++)
723 		amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]);
724 
725 }
726 
amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device * adev,unsigned int plane_type,uint64_t ** mods)727 static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
728 {
729 	uint64_t size = 0, capacity = 128;
730 	*mods = NULL;
731 
732 	/* We have not hooked up any pre-GFX9 modifiers. */
733 	if (adev->family < AMDGPU_FAMILY_AI)
734 		return 0;
735 
736 	*mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL);
737 
738 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
739 		amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
740 		amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
741 		return *mods ? 0 : -ENOMEM;
742 	}
743 
744 	switch (adev->family) {
745 	case AMDGPU_FAMILY_AI:
746 	case AMDGPU_FAMILY_RV:
747 		amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity);
748 		break;
749 	case AMDGPU_FAMILY_NV:
750 	case AMDGPU_FAMILY_VGH:
751 	case AMDGPU_FAMILY_YC:
752 	case AMDGPU_FAMILY_GC_10_3_6:
753 	case AMDGPU_FAMILY_GC_10_3_7:
754 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
755 			amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity);
756 		else
757 			amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity);
758 		break;
759 	case AMDGPU_FAMILY_GC_11_0_0:
760 	case AMDGPU_FAMILY_GC_11_0_1:
761 	case AMDGPU_FAMILY_GC_11_5_0:
762 		amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity);
763 		break;
764 	case AMDGPU_FAMILY_GC_12_0_0:
765 		amdgpu_dm_plane_add_gfx12_modifiers(adev, mods, &size, &capacity);
766 		break;
767 	}
768 
769 	amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
770 
771 	/* INVALID marks the end of the list. */
772 	amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
773 
774 	if (!*mods)
775 		return -ENOMEM;
776 
777 	return 0;
778 }
779 
amdgpu_dm_plane_get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)780 static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane,
781 					     const struct dc_plane_cap *plane_cap,
782 					     uint32_t *formats, int max_formats)
783 {
784 	int i, num_formats = 0;
785 
786 	/*
787 	 * TODO: Query support for each group of formats directly from
788 	 * DC plane caps. This will require adding more formats to the
789 	 * caps list.
790 	 */
791 
792 	if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
793 		(plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
794 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
795 			if (num_formats >= max_formats)
796 				break;
797 
798 			formats[num_formats++] = rgb_formats[i];
799 		}
800 
801 		if (plane_cap && plane_cap->pixel_format_support.nv12)
802 			formats[num_formats++] = DRM_FORMAT_NV12;
803 		if (plane_cap && plane_cap->pixel_format_support.p010)
804 			formats[num_formats++] = DRM_FORMAT_P010;
805 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
806 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
807 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
808 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
809 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
810 		}
811 	} else {
812 		switch (plane->type) {
813 		case DRM_PLANE_TYPE_OVERLAY:
814 			for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
815 				if (num_formats >= max_formats)
816 					break;
817 
818 				formats[num_formats++] = overlay_formats[i];
819 			}
820 			break;
821 
822 		case DRM_PLANE_TYPE_CURSOR:
823 			for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
824 				if (num_formats >= max_formats)
825 					break;
826 
827 				formats[num_formats++] = cursor_formats[i];
828 			}
829 			break;
830 
831 		default:
832 			break;
833 		}
834 	}
835 
836 	return num_formats;
837 }
838 
amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,struct dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface)839 int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
840 			     const struct amdgpu_framebuffer *afb,
841 			     const enum surface_pixel_format format,
842 			     const enum dc_rotation_angle rotation,
843 			     const uint64_t tiling_flags,
844 			     struct dc_tiling_info *tiling_info,
845 			     struct plane_size *plane_size,
846 			     struct dc_plane_dcc_param *dcc,
847 			     struct dc_plane_address *address,
848 			     bool tmz_surface)
849 {
850 	const struct drm_framebuffer *fb = &afb->base;
851 	int ret;
852 
853 	memset(tiling_info, 0, sizeof(*tiling_info));
854 	memset(plane_size, 0, sizeof(*plane_size));
855 	memset(dcc, 0, sizeof(*dcc));
856 	memset(address, 0, sizeof(*address));
857 
858 	address->tmz_surface = tmz_surface;
859 
860 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
861 		uint64_t addr = afb->address + fb->offsets[0];
862 
863 		plane_size->surface_size.x = 0;
864 		plane_size->surface_size.y = 0;
865 		plane_size->surface_size.width = fb->width;
866 		plane_size->surface_size.height = fb->height;
867 		plane_size->surface_pitch =
868 			fb->pitches[0] / fb->format->cpp[0];
869 
870 		address->type = PLN_ADDR_TYPE_GRAPHICS;
871 		address->grph.addr.low_part = lower_32_bits(addr);
872 		address->grph.addr.high_part = upper_32_bits(addr);
873 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
874 		uint64_t luma_addr = afb->address + fb->offsets[0];
875 		uint64_t chroma_addr = afb->address + fb->offsets[1];
876 
877 		plane_size->surface_size.x = 0;
878 		plane_size->surface_size.y = 0;
879 		plane_size->surface_size.width = fb->width;
880 		plane_size->surface_size.height = fb->height;
881 		plane_size->surface_pitch =
882 			fb->pitches[0] / fb->format->cpp[0];
883 
884 		plane_size->chroma_size.x = 0;
885 		plane_size->chroma_size.y = 0;
886 		/* TODO: set these based on surface format */
887 		plane_size->chroma_size.width = fb->width / 2;
888 		plane_size->chroma_size.height = fb->height / 2;
889 
890 		plane_size->chroma_pitch =
891 			fb->pitches[1] / fb->format->cpp[1];
892 
893 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
894 		address->video_progressive.luma_addr.low_part =
895 			lower_32_bits(luma_addr);
896 		address->video_progressive.luma_addr.high_part =
897 			upper_32_bits(luma_addr);
898 		address->video_progressive.chroma_addr.low_part =
899 			lower_32_bits(chroma_addr);
900 		address->video_progressive.chroma_addr.high_part =
901 			upper_32_bits(chroma_addr);
902 	}
903 
904 	if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) {
905 		ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
906 										 rotation, plane_size,
907 										 tiling_info, dcc,
908 										 address);
909 		if (ret)
910 			return ret;
911 	} else if (adev->family >= AMDGPU_FAMILY_AI) {
912 		ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
913 										rotation, plane_size,
914 										tiling_info, dcc,
915 										address);
916 		if (ret)
917 			return ret;
918 	} else {
919 		amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
920 	}
921 
922 	return 0;
923 }
924 
amdgpu_dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)925 static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
926 					     struct drm_plane_state *new_state)
927 {
928 	struct amdgpu_framebuffer *afb;
929 	struct drm_gem_object *obj;
930 	struct amdgpu_device *adev;
931 	struct amdgpu_bo *rbo;
932 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
933 	uint32_t domain;
934 	int r;
935 
936 	if (!new_state->fb) {
937 		DRM_DEBUG_KMS("No FB bound\n");
938 		return 0;
939 	}
940 
941 	afb = to_amdgpu_framebuffer(new_state->fb);
942 	obj = drm_gem_fb_get_obj(new_state->fb, 0);
943 	if (!obj) {
944 		DRM_ERROR("Failed to get obj from framebuffer\n");
945 		return -EINVAL;
946 	}
947 
948 	rbo = gem_to_amdgpu_bo(obj);
949 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
950 	r = amdgpu_bo_reserve(rbo, true);
951 	if (r) {
952 		drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
953 		return r;
954 	}
955 
956 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
957 	if (r) {
958 		drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
959 		goto error_unlock;
960 	}
961 
962 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
963 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
964 	else
965 		domain = AMDGPU_GEM_DOMAIN_VRAM;
966 
967 	rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
968 	r = amdgpu_bo_pin(rbo, domain);
969 	if (unlikely(r != 0)) {
970 		if (r != -ERESTARTSYS)
971 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
972 		goto error_unlock;
973 	}
974 
975 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
976 	if (unlikely(r != 0)) {
977 		DRM_ERROR("%p bind failed\n", rbo);
978 		goto error_unpin;
979 	}
980 
981 	r = drm_gem_plane_helper_prepare_fb(plane, new_state);
982 	if (unlikely(r != 0))
983 		goto error_unpin;
984 
985 	amdgpu_bo_unreserve(rbo);
986 
987 	afb->address = amdgpu_bo_gpu_offset(rbo);
988 
989 	amdgpu_bo_ref(rbo);
990 
991 	/**
992 	 * We don't do surface updates on planes that have been newly created,
993 	 * but we also don't have the afb->address during atomic check.
994 	 *
995 	 * Fill in buffer attributes depending on the address here, but only on
996 	 * newly created planes since they're not being used by DC yet and this
997 	 * won't modify global state.
998 	 */
999 	dm_plane_state_old = to_dm_plane_state(plane->state);
1000 	dm_plane_state_new = to_dm_plane_state(new_state);
1001 
1002 	if (dm_plane_state_new->dc_state &&
1003 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
1004 		struct dc_plane_state *plane_state =
1005 			dm_plane_state_new->dc_state;
1006 
1007 		amdgpu_dm_plane_fill_plane_buffer_attributes(
1008 			adev, afb, plane_state->format, plane_state->rotation,
1009 			afb->tiling_flags,
1010 			&plane_state->tiling_info, &plane_state->plane_size,
1011 			&plane_state->dcc, &plane_state->address,
1012 			afb->tmz_surface);
1013 	}
1014 
1015 	return 0;
1016 
1017 error_unpin:
1018 	amdgpu_bo_unpin(rbo);
1019 
1020 error_unlock:
1021 	amdgpu_bo_unreserve(rbo);
1022 	return r;
1023 }
1024 
amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)1025 static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane,
1026 					      struct drm_plane_state *old_state)
1027 {
1028 	struct amdgpu_bo *rbo;
1029 	int r;
1030 
1031 	if (!old_state->fb)
1032 		return;
1033 
1034 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
1035 	r = amdgpu_bo_reserve(rbo, false);
1036 	if (unlikely(r)) {
1037 		DRM_ERROR("failed to reserve rbo before unpin\n");
1038 		return;
1039 	}
1040 
1041 	amdgpu_bo_unpin(rbo);
1042 	amdgpu_bo_unreserve(rbo);
1043 	amdgpu_bo_unref(&rbo);
1044 }
1045 
amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device * dev,struct drm_framebuffer * fb,int * min_downscale,int * max_upscale)1046 static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev,
1047 					 struct drm_framebuffer *fb,
1048 					 int *min_downscale, int *max_upscale)
1049 {
1050 	struct amdgpu_device *adev = drm_to_adev(dev);
1051 	struct dc *dc = adev->dm.dc;
1052 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
1053 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
1054 
1055 	switch (fb->format->format) {
1056 	case DRM_FORMAT_P010:
1057 	case DRM_FORMAT_NV12:
1058 	case DRM_FORMAT_NV21:
1059 		*max_upscale = plane_cap->max_upscale_factor.nv12;
1060 		*min_downscale = plane_cap->max_downscale_factor.nv12;
1061 		break;
1062 
1063 	case DRM_FORMAT_XRGB16161616F:
1064 	case DRM_FORMAT_ARGB16161616F:
1065 	case DRM_FORMAT_XBGR16161616F:
1066 	case DRM_FORMAT_ABGR16161616F:
1067 		*max_upscale = plane_cap->max_upscale_factor.fp16;
1068 		*min_downscale = plane_cap->max_downscale_factor.fp16;
1069 		break;
1070 
1071 	default:
1072 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
1073 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
1074 		break;
1075 	}
1076 
1077 	/*
1078 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
1079 	 * scaling factor of 1.0 == 1000 units.
1080 	 */
1081 	if (*max_upscale == 1)
1082 		*max_upscale = 1000;
1083 
1084 	if (*min_downscale == 1)
1085 		*min_downscale = 1000;
1086 }
1087 
amdgpu_dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)1088 int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
1089 				       struct drm_crtc_state *new_crtc_state)
1090 {
1091 	struct drm_framebuffer *fb = state->fb;
1092 	int min_downscale, max_upscale;
1093 	int min_scale = 0;
1094 	int max_scale = INT_MAX;
1095 
1096 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
1097 	if (fb && state->crtc) {
1098 		/* Validate viewport to cover the case when only the position changes */
1099 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
1100 			int viewport_width = state->crtc_w;
1101 			int viewport_height = state->crtc_h;
1102 
1103 			if (state->crtc_x < 0)
1104 				viewport_width += state->crtc_x;
1105 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
1106 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
1107 
1108 			if (state->crtc_y < 0)
1109 				viewport_height += state->crtc_y;
1110 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
1111 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
1112 
1113 			if (viewport_width < 0 || viewport_height < 0) {
1114 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
1115 				return -EINVAL;
1116 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
1117 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
1118 				return -EINVAL;
1119 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
1120 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
1121 				return -EINVAL;
1122 			}
1123 
1124 		}
1125 
1126 		/* Get min/max allowed scaling factors from plane caps. */
1127 		amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb,
1128 							     &min_downscale, &max_upscale);
1129 		/*
1130 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
1131 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
1132 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
1133 		 */
1134 		min_scale = (1000 << 16) / max_upscale;
1135 		max_scale = (1000 << 16) / min_downscale;
1136 	}
1137 
1138 	return drm_atomic_helper_check_plane_state(
1139 		state, new_crtc_state, min_scale, max_scale, true, true);
1140 }
1141 
amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device * adev,const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)1142 int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
1143 				const struct drm_plane_state *state,
1144 				struct dc_scaling_info *scaling_info)
1145 {
1146 	int scale_w, scale_h, min_downscale, max_upscale;
1147 
1148 	memset(scaling_info, 0, sizeof(*scaling_info));
1149 
1150 	/* Source is fixed 16.16 but we ignore mantissa for now... */
1151 	scaling_info->src_rect.x = state->src_x >> 16;
1152 	scaling_info->src_rect.y = state->src_y >> 16;
1153 
1154 	/*
1155 	 * For reasons we don't (yet) fully understand a non-zero
1156 	 * src_y coordinate into an NV12 buffer can cause a
1157 	 * system hang on DCN1x.
1158 	 * To avoid hangs (and maybe be overly cautious)
1159 	 * let's reject both non-zero src_x and src_y.
1160 	 *
1161 	 * We currently know of only one use-case to reproduce a
1162 	 * scenario with non-zero src_x and src_y for NV12, which
1163 	 * is to gesture the YouTube Android app into full screen
1164 	 * on ChromeOS.
1165 	 */
1166 	if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1167 	    (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) &&
1168 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
1169 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
1170 		return -EINVAL;
1171 
1172 	scaling_info->src_rect.width = state->src_w >> 16;
1173 	if (scaling_info->src_rect.width == 0)
1174 		return -EINVAL;
1175 
1176 	scaling_info->src_rect.height = state->src_h >> 16;
1177 	if (scaling_info->src_rect.height == 0)
1178 		return -EINVAL;
1179 
1180 	scaling_info->dst_rect.x = state->crtc_x;
1181 	scaling_info->dst_rect.y = state->crtc_y;
1182 
1183 	if (state->crtc_w == 0)
1184 		return -EINVAL;
1185 
1186 	scaling_info->dst_rect.width = state->crtc_w;
1187 
1188 	if (state->crtc_h == 0)
1189 		return -EINVAL;
1190 
1191 	scaling_info->dst_rect.height = state->crtc_h;
1192 
1193 	/* DRM doesn't specify clipping on destination output. */
1194 	scaling_info->clip_rect = scaling_info->dst_rect;
1195 
1196 	/* Validate scaling per-format with DC plane caps */
1197 	if (state->plane && state->plane->dev && state->fb) {
1198 		amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
1199 							     &min_downscale, &max_upscale);
1200 	} else {
1201 		min_downscale = 250;
1202 		max_upscale = 16000;
1203 	}
1204 
1205 	scale_w = scaling_info->dst_rect.width * 1000 /
1206 		  scaling_info->src_rect.width;
1207 
1208 	if (scale_w < min_downscale || scale_w > max_upscale)
1209 		return -EINVAL;
1210 
1211 	scale_h = scaling_info->dst_rect.height * 1000 /
1212 		  scaling_info->src_rect.height;
1213 
1214 	if (scale_h < min_downscale || scale_h > max_upscale)
1215 		return -EINVAL;
1216 
1217 	/*
1218 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
1219 	 * assume reasonable defaults based on the format.
1220 	 */
1221 
1222 	return 0;
1223 }
1224 
amdgpu_dm_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)1225 static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
1226 					struct drm_atomic_state *state)
1227 {
1228 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1229 										 plane);
1230 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1231 	struct dc *dc = adev->dm.dc;
1232 	struct dm_plane_state *dm_plane_state;
1233 	struct dc_scaling_info scaling_info;
1234 	struct drm_crtc_state *new_crtc_state;
1235 	int ret;
1236 
1237 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
1238 
1239 	dm_plane_state = to_dm_plane_state(new_plane_state);
1240 
1241 	if (!dm_plane_state->dc_state)
1242 		return 0;
1243 
1244 	new_crtc_state =
1245 		drm_atomic_get_new_crtc_state(state,
1246 					      new_plane_state->crtc);
1247 	if (!new_crtc_state)
1248 		return -EINVAL;
1249 
1250 	ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
1251 	if (ret)
1252 		return ret;
1253 
1254 	ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
1255 	if (ret)
1256 		return ret;
1257 
1258 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
1259 		return 0;
1260 
1261 	return -EINVAL;
1262 }
1263 
amdgpu_dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_atomic_state * state,bool flip)1264 static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
1265 					      struct drm_atomic_state *state, bool flip)
1266 {
1267 	struct drm_crtc_state *new_crtc_state;
1268 	struct drm_plane_state *new_plane_state;
1269 	struct dm_crtc_state *dm_new_crtc_state;
1270 
1271 	if (flip) {
1272 		if (plane->type != DRM_PLANE_TYPE_OVERLAY)
1273 			return -EINVAL;
1274 	} else if (plane->type != DRM_PLANE_TYPE_CURSOR) {
1275 		return -EINVAL;
1276 	}
1277 
1278 	new_plane_state = drm_atomic_get_new_plane_state(state, plane);
1279 	new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
1280 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1281 	/* Reject overlay cursors for now*/
1282 	if (!flip && dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
1283 		return -EINVAL;
1284 
1285 	return 0;
1286 }
1287 
amdgpu_dm_plane_get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)1288 int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
1289 					struct dc_cursor_position *position)
1290 {
1291 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1292 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1293 	int x, y;
1294 	int xorigin = 0, yorigin = 0;
1295 
1296 	if (!crtc || !plane->state->fb)
1297 		return 0;
1298 
1299 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
1300 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
1301 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
1302 			  __func__,
1303 			  plane->state->crtc_w,
1304 			  plane->state->crtc_h);
1305 		return -EINVAL;
1306 	}
1307 
1308 	x = plane->state->crtc_x;
1309 	y = plane->state->crtc_y;
1310 
1311 	if (x <= -amdgpu_crtc->max_cursor_width ||
1312 	    y <= -amdgpu_crtc->max_cursor_height)
1313 		return 0;
1314 
1315 	if (x < 0) {
1316 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1317 		x = 0;
1318 	}
1319 	if (y < 0) {
1320 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1321 		y = 0;
1322 	}
1323 	position->enable = true;
1324 	position->x = x;
1325 	position->y = y;
1326 	position->x_hotspot = xorigin;
1327 	position->y_hotspot = yorigin;
1328 
1329 	if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1))
1330 		position->translate_by_source = true;
1331 
1332 	return 0;
1333 }
1334 
amdgpu_dm_plane_handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)1335 void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
1336 				 struct drm_plane_state *old_plane_state)
1337 {
1338 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1339 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
1340 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
1341 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
1342 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1343 	uint64_t address = afb ? afb->address : 0;
1344 	struct dc_cursor_position position = {0};
1345 	struct dc_cursor_attributes attributes;
1346 	int ret;
1347 
1348 	if (!plane->state->fb && !old_plane_state->fb)
1349 		return;
1350 
1351 	drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n",
1352 		       amdgpu_crtc->crtc_id, plane->state->crtc_w,
1353 		       plane->state->crtc_h);
1354 
1355 	ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position);
1356 	if (ret)
1357 		return;
1358 
1359 	if (!position.enable) {
1360 		/* turn off cursor */
1361 		if (crtc_state && crtc_state->stream) {
1362 			mutex_lock(&adev->dm.dc_lock);
1363 			dc_stream_program_cursor_position(crtc_state->stream,
1364 						      &position);
1365 			mutex_unlock(&adev->dm.dc_lock);
1366 		}
1367 		return;
1368 	}
1369 
1370 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
1371 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
1372 
1373 	memset(&attributes, 0, sizeof(attributes));
1374 	attributes.address.high_part = upper_32_bits(address);
1375 	attributes.address.low_part  = lower_32_bits(address);
1376 	attributes.width             = plane->state->crtc_w;
1377 	attributes.height            = plane->state->crtc_h;
1378 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
1379 	attributes.rotation_angle    = 0;
1380 	attributes.attribute_flags.value = 0;
1381 
1382 	/* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
1383 	 * legacy gamma setup.
1384 	 */
1385 	if (crtc_state->cm_is_degamma_srgb &&
1386 	    adev->dm.dc->caps.color.dpp.gamma_corr)
1387 		attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
1388 
1389 	if (afb)
1390 		attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
1391 
1392 	if (crtc_state->stream) {
1393 		mutex_lock(&adev->dm.dc_lock);
1394 		if (!dc_stream_program_cursor_attributes(crtc_state->stream,
1395 							 &attributes))
1396 			DRM_ERROR("DC failed to set cursor attributes\n");
1397 
1398 		if (!dc_stream_program_cursor_position(crtc_state->stream,
1399 						   &position))
1400 			DRM_ERROR("DC failed to set cursor position\n");
1401 		mutex_unlock(&adev->dm.dc_lock);
1402 	}
1403 }
1404 
amdgpu_dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_atomic_state * state)1405 static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
1406 						struct drm_atomic_state *state)
1407 {
1408 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
1409 									   plane);
1410 	struct drm_plane_state *old_state =
1411 		drm_atomic_get_old_plane_state(state, plane);
1412 
1413 	trace_amdgpu_dm_atomic_update_cursor(new_state);
1414 
1415 	swap(plane->state->fb, new_state->fb);
1416 
1417 	plane->state->src_x = new_state->src_x;
1418 	plane->state->src_y = new_state->src_y;
1419 	plane->state->src_w = new_state->src_w;
1420 	plane->state->src_h = new_state->src_h;
1421 	plane->state->crtc_x = new_state->crtc_x;
1422 	plane->state->crtc_y = new_state->crtc_y;
1423 	plane->state->crtc_w = new_state->crtc_w;
1424 	plane->state->crtc_h = new_state->crtc_h;
1425 
1426 	amdgpu_dm_plane_handle_cursor_update(plane, old_state);
1427 }
1428 
amdgpu_dm_plane_panic_flush(struct drm_plane * plane)1429 static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane)
1430 {
1431 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane->state);
1432 	struct drm_framebuffer *fb = plane->state->fb;
1433 	struct dc_plane_state *dc_plane_state;
1434 
1435 	if (!dm_plane_state || !dm_plane_state->dc_state)
1436 		return;
1437 
1438 	dc_plane_state = dm_plane_state->dc_state;
1439 
1440 	dc_plane_force_dcc_and_tiling_disable(dc_plane_state, fb->modifier ? true : false);
1441 }
1442 
1443 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1444 	.prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
1445 	.cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
1446 	.atomic_check = amdgpu_dm_plane_atomic_check,
1447 	.atomic_async_check = amdgpu_dm_plane_atomic_async_check,
1448 	.atomic_async_update = amdgpu_dm_plane_atomic_async_update
1449 };
1450 
1451 static const struct drm_plane_helper_funcs dm_primary_plane_helper_funcs = {
1452 	.prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
1453 	.cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
1454 	.atomic_check = amdgpu_dm_plane_atomic_check,
1455 	.atomic_async_check = amdgpu_dm_plane_atomic_async_check,
1456 	.atomic_async_update = amdgpu_dm_plane_atomic_async_update,
1457 	.get_scanout_buffer = amdgpu_display_get_scanout_buffer,
1458 	.panic_flush = amdgpu_dm_plane_panic_flush,
1459 };
1460 
amdgpu_dm_plane_drm_plane_reset(struct drm_plane * plane)1461 static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
1462 {
1463 	struct dm_plane_state *amdgpu_state = NULL;
1464 
1465 	if (plane->state)
1466 		plane->funcs->atomic_destroy_state(plane, plane->state);
1467 
1468 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
1469 	WARN_ON(amdgpu_state == NULL);
1470 
1471 	if (!amdgpu_state)
1472 		return;
1473 
1474 	__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
1475 	amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1476 	amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT;
1477 	amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1478 	amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1479 }
1480 
amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane * plane)1481 static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane)
1482 {
1483 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
1484 
1485 	old_dm_plane_state = to_dm_plane_state(plane->state);
1486 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
1487 	if (!dm_plane_state)
1488 		return NULL;
1489 
1490 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
1491 
1492 	if (old_dm_plane_state->dc_state) {
1493 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
1494 		dc_plane_state_retain(dm_plane_state->dc_state);
1495 	}
1496 
1497 	if (old_dm_plane_state->degamma_lut)
1498 		dm_plane_state->degamma_lut =
1499 			drm_property_blob_get(old_dm_plane_state->degamma_lut);
1500 	if (old_dm_plane_state->ctm)
1501 		dm_plane_state->ctm =
1502 			drm_property_blob_get(old_dm_plane_state->ctm);
1503 	if (old_dm_plane_state->shaper_lut)
1504 		dm_plane_state->shaper_lut =
1505 			drm_property_blob_get(old_dm_plane_state->shaper_lut);
1506 	if (old_dm_plane_state->lut3d)
1507 		dm_plane_state->lut3d =
1508 			drm_property_blob_get(old_dm_plane_state->lut3d);
1509 	if (old_dm_plane_state->blend_lut)
1510 		dm_plane_state->blend_lut =
1511 			drm_property_blob_get(old_dm_plane_state->blend_lut);
1512 
1513 	dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf;
1514 	dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult;
1515 	dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf;
1516 	dm_plane_state->blend_tf = old_dm_plane_state->blend_tf;
1517 
1518 	return &dm_plane_state->base;
1519 }
1520 
amdgpu_dm_plane_format_mod_supported(struct drm_plane * plane,uint32_t format,uint64_t modifier)1521 static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
1522 						 uint32_t format,
1523 						 uint64_t modifier)
1524 {
1525 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1526 	const struct drm_format_info *info = drm_format_info(format);
1527 	int i;
1528 
1529 	if (!info)
1530 		return false;
1531 
1532 	/*
1533 	 * We always have to allow these modifiers:
1534 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
1535 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
1536 	 */
1537 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
1538 	    modifier == DRM_FORMAT_MOD_INVALID) {
1539 		return true;
1540 	}
1541 
1542 	/* Check that the modifier is on the list of the plane's supported modifiers. */
1543 	for (i = 0; i < plane->modifier_count; i++) {
1544 		if (modifier == plane->modifiers[i])
1545 			break;
1546 	}
1547 	if (i == plane->modifier_count)
1548 		return false;
1549 
1550 	/* GFX12 doesn't have these limitations. */
1551 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) {
1552 		enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
1553 
1554 		/*
1555 		 * For D swizzle the canonical modifier depends on the bpp, so check
1556 		 * it here.
1557 		 */
1558 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
1559 		    adev->family >= AMDGPU_FAMILY_NV) {
1560 			if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
1561 				return false;
1562 		}
1563 
1564 		if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
1565 		    info->cpp[0] < 8)
1566 			return false;
1567 
1568 		if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
1569 			/* Per radeonsi comments 16/64 bpp are more complicated. */
1570 			if (info->cpp[0] != 4)
1571 				return false;
1572 			/* We support multi-planar formats, but not when combined with
1573 			 * additional DCC metadata planes.
1574 			 */
1575 			if (info->num_planes > 1)
1576 				return false;
1577 		}
1578 	}
1579 
1580 	return true;
1581 }
1582 
amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)1583 static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane,
1584 						    struct drm_plane_state *state)
1585 {
1586 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1587 
1588 	if (dm_plane_state->degamma_lut)
1589 		drm_property_blob_put(dm_plane_state->degamma_lut);
1590 	if (dm_plane_state->ctm)
1591 		drm_property_blob_put(dm_plane_state->ctm);
1592 	if (dm_plane_state->lut3d)
1593 		drm_property_blob_put(dm_plane_state->lut3d);
1594 	if (dm_plane_state->shaper_lut)
1595 		drm_property_blob_put(dm_plane_state->shaper_lut);
1596 	if (dm_plane_state->blend_lut)
1597 		drm_property_blob_put(dm_plane_state->blend_lut);
1598 
1599 	if (dm_plane_state->dc_state)
1600 		dc_plane_state_release(dm_plane_state->dc_state);
1601 
1602 	drm_atomic_helper_plane_destroy_state(plane, state);
1603 }
1604 
1605 #ifdef AMD_PRIVATE_COLOR
1606 static void
dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager * dm,struct drm_plane * plane)1607 dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
1608 					     struct drm_plane *plane)
1609 {
1610 	struct amdgpu_mode_info mode_info = dm->adev->mode_info;
1611 	struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp;
1612 
1613 	/* Check HW color pipeline capabilities on DPP block (pre-blending)
1614 	 * before exposing related properties.
1615 	 */
1616 	if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) {
1617 		drm_object_attach_property(&plane->base,
1618 					   mode_info.plane_degamma_lut_property,
1619 					   0);
1620 		drm_object_attach_property(&plane->base,
1621 					   mode_info.plane_degamma_lut_size_property,
1622 					   MAX_COLOR_LUT_ENTRIES);
1623 		drm_object_attach_property(&plane->base,
1624 					   dm->adev->mode_info.plane_degamma_tf_property,
1625 					   AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1626 	}
1627 	/* HDR MULT is always available */
1628 	drm_object_attach_property(&plane->base,
1629 				   dm->adev->mode_info.plane_hdr_mult_property,
1630 				   AMDGPU_HDR_MULT_DEFAULT);
1631 
1632 	/* Only enable plane CTM if both DPP and MPC gamut remap is available. */
1633 	if (dm->dc->caps.color.mpc.gamut_remap)
1634 		drm_object_attach_property(&plane->base,
1635 					   dm->adev->mode_info.plane_ctm_property, 0);
1636 
1637 	if (dpp_color_caps.hw_3d_lut || dm->dc->caps.color.mpc.preblend) {
1638 		drm_object_attach_property(&plane->base,
1639 					   mode_info.plane_shaper_lut_property, 0);
1640 		drm_object_attach_property(&plane->base,
1641 					   mode_info.plane_shaper_lut_size_property,
1642 					   MAX_COLOR_LUT_ENTRIES);
1643 		drm_object_attach_property(&plane->base,
1644 					   mode_info.plane_shaper_tf_property,
1645 					   AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1646 		drm_object_attach_property(&plane->base,
1647 					   mode_info.plane_lut3d_property, 0);
1648 		drm_object_attach_property(&plane->base,
1649 					   mode_info.plane_lut3d_size_property,
1650 					   MAX_COLOR_3DLUT_SIZE);
1651 	}
1652 
1653 	if (dpp_color_caps.ogam_ram) {
1654 		drm_object_attach_property(&plane->base,
1655 					   mode_info.plane_blend_lut_property, 0);
1656 		drm_object_attach_property(&plane->base,
1657 					   mode_info.plane_blend_lut_size_property,
1658 					   MAX_COLOR_LUT_ENTRIES);
1659 		drm_object_attach_property(&plane->base,
1660 					   mode_info.plane_blend_tf_property,
1661 					   AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1662 	}
1663 }
1664 
1665 static int
dm_atomic_plane_set_property(struct drm_plane * plane,struct drm_plane_state * state,struct drm_property * property,uint64_t val)1666 dm_atomic_plane_set_property(struct drm_plane *plane,
1667 			     struct drm_plane_state *state,
1668 			     struct drm_property *property,
1669 			     uint64_t val)
1670 {
1671 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1672 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1673 	bool replaced = false;
1674 	int ret;
1675 
1676 	if (property == adev->mode_info.plane_degamma_lut_property) {
1677 		ret = drm_property_replace_blob_from_id(plane->dev,
1678 							&dm_plane_state->degamma_lut,
1679 							val, -1,
1680 							sizeof(struct drm_color_lut),
1681 							&replaced);
1682 		dm_plane_state->base.color_mgmt_changed |= replaced;
1683 		return ret;
1684 	} else if (property == adev->mode_info.plane_degamma_tf_property) {
1685 		if (dm_plane_state->degamma_tf != val) {
1686 			dm_plane_state->degamma_tf = val;
1687 			dm_plane_state->base.color_mgmt_changed = 1;
1688 		}
1689 	} else if (property == adev->mode_info.plane_hdr_mult_property) {
1690 		if (dm_plane_state->hdr_mult != val) {
1691 			dm_plane_state->hdr_mult = val;
1692 			dm_plane_state->base.color_mgmt_changed = 1;
1693 		}
1694 	} else if (property == adev->mode_info.plane_ctm_property) {
1695 		ret = drm_property_replace_blob_from_id(plane->dev,
1696 							&dm_plane_state->ctm,
1697 							val,
1698 							sizeof(struct drm_color_ctm_3x4), -1,
1699 							&replaced);
1700 		dm_plane_state->base.color_mgmt_changed |= replaced;
1701 		return ret;
1702 	} else if (property == adev->mode_info.plane_shaper_lut_property) {
1703 		ret = drm_property_replace_blob_from_id(plane->dev,
1704 							&dm_plane_state->shaper_lut,
1705 							val, -1,
1706 							sizeof(struct drm_color_lut),
1707 							&replaced);
1708 		dm_plane_state->base.color_mgmt_changed |= replaced;
1709 		return ret;
1710 	} else if (property == adev->mode_info.plane_shaper_tf_property) {
1711 		if (dm_plane_state->shaper_tf != val) {
1712 			dm_plane_state->shaper_tf = val;
1713 			dm_plane_state->base.color_mgmt_changed = 1;
1714 		}
1715 	} else if (property == adev->mode_info.plane_lut3d_property) {
1716 		ret = drm_property_replace_blob_from_id(plane->dev,
1717 							&dm_plane_state->lut3d,
1718 							val, -1,
1719 							sizeof(struct drm_color_lut),
1720 							&replaced);
1721 		dm_plane_state->base.color_mgmt_changed |= replaced;
1722 		return ret;
1723 	} else if (property == adev->mode_info.plane_blend_lut_property) {
1724 		ret = drm_property_replace_blob_from_id(plane->dev,
1725 							&dm_plane_state->blend_lut,
1726 							val, -1,
1727 							sizeof(struct drm_color_lut),
1728 							&replaced);
1729 		dm_plane_state->base.color_mgmt_changed |= replaced;
1730 		return ret;
1731 	} else if (property == adev->mode_info.plane_blend_tf_property) {
1732 		if (dm_plane_state->blend_tf != val) {
1733 			dm_plane_state->blend_tf = val;
1734 			dm_plane_state->base.color_mgmt_changed = 1;
1735 		}
1736 	} else {
1737 		drm_dbg_atomic(plane->dev,
1738 			       "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
1739 			       plane->base.id, plane->name,
1740 			       property->base.id, property->name);
1741 		return -EINVAL;
1742 	}
1743 
1744 	return 0;
1745 }
1746 
1747 static int
dm_atomic_plane_get_property(struct drm_plane * plane,const struct drm_plane_state * state,struct drm_property * property,uint64_t * val)1748 dm_atomic_plane_get_property(struct drm_plane *plane,
1749 			     const struct drm_plane_state *state,
1750 			     struct drm_property *property,
1751 			     uint64_t *val)
1752 {
1753 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1754 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1755 
1756 	if (property == adev->mode_info.plane_degamma_lut_property) {
1757 		*val = (dm_plane_state->degamma_lut) ?
1758 			dm_plane_state->degamma_lut->base.id : 0;
1759 	} else if (property == adev->mode_info.plane_degamma_tf_property) {
1760 		*val = dm_plane_state->degamma_tf;
1761 	} else if (property == adev->mode_info.plane_hdr_mult_property) {
1762 		*val = dm_plane_state->hdr_mult;
1763 	} else if (property == adev->mode_info.plane_ctm_property) {
1764 		*val = (dm_plane_state->ctm) ?
1765 			dm_plane_state->ctm->base.id : 0;
1766 	} else 	if (property == adev->mode_info.plane_shaper_lut_property) {
1767 		*val = (dm_plane_state->shaper_lut) ?
1768 			dm_plane_state->shaper_lut->base.id : 0;
1769 	} else if (property == adev->mode_info.plane_shaper_tf_property) {
1770 		*val = dm_plane_state->shaper_tf;
1771 	} else 	if (property == adev->mode_info.plane_lut3d_property) {
1772 		*val = (dm_plane_state->lut3d) ?
1773 			dm_plane_state->lut3d->base.id : 0;
1774 	} else 	if (property == adev->mode_info.plane_blend_lut_property) {
1775 		*val = (dm_plane_state->blend_lut) ?
1776 			dm_plane_state->blend_lut->base.id : 0;
1777 	} else if (property == adev->mode_info.plane_blend_tf_property) {
1778 		*val = dm_plane_state->blend_tf;
1779 
1780 	} else {
1781 		return -EINVAL;
1782 	}
1783 
1784 	return 0;
1785 }
1786 #else
1787 
1788 #define MAX_COLOR_PIPELINES 5
1789 
1790 static int
dm_plane_init_colorops(struct drm_plane * plane)1791 dm_plane_init_colorops(struct drm_plane *plane)
1792 {
1793 	struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
1794 	struct drm_device *dev = plane->dev;
1795 	struct amdgpu_device *adev = drm_to_adev(dev);
1796 	struct dc *dc = adev->dm.dc;
1797 	int len = 0;
1798 	int ret;
1799 
1800 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
1801 		return 0;
1802 
1803 	/* initialize pipeline */
1804 	if (dc->ctx->dce_version >= DCN_VERSION_3_0) {
1805 		ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]);
1806 		if (ret) {
1807 			drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n",
1808 				plane->base.id, ret);
1809 			return ret;
1810 		}
1811 		len++;
1812 
1813 		/* Create COLOR_PIPELINE property and attach */
1814 		drm_plane_create_color_pipeline_property(plane, pipelines, len);
1815 	}
1816 
1817 	return 0;
1818 }
1819 #endif
1820 
1821 static const struct drm_plane_funcs dm_plane_funcs = {
1822 	.update_plane	= drm_atomic_helper_update_plane,
1823 	.disable_plane	= drm_atomic_helper_disable_plane,
1824 	.destroy	= drm_plane_helper_destroy,
1825 	.reset = amdgpu_dm_plane_drm_plane_reset,
1826 	.atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state,
1827 	.atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state,
1828 	.format_mod_supported = amdgpu_dm_plane_format_mod_supported,
1829 #ifdef AMD_PRIVATE_COLOR
1830 	.atomic_set_property = dm_atomic_plane_set_property,
1831 	.atomic_get_property = dm_atomic_plane_get_property,
1832 #endif
1833 };
1834 
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)1835 int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
1836 				struct drm_plane *plane,
1837 				unsigned long possible_crtcs,
1838 				const struct dc_plane_cap *plane_cap)
1839 {
1840 	uint32_t formats[32];
1841 	int num_formats;
1842 	int res = -EPERM;
1843 	unsigned int supported_rotations;
1844 	uint64_t *modifiers = NULL;
1845 	unsigned int primary_zpos = dm->dc->caps.max_slave_planes;
1846 
1847 	num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats,
1848 							ARRAY_SIZE(formats));
1849 
1850 	res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers);
1851 	if (res)
1852 		return res;
1853 
1854 	if (modifiers == NULL)
1855 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
1856 
1857 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
1858 				       &dm_plane_funcs, formats, num_formats,
1859 				       modifiers, plane->type, NULL);
1860 	kfree(modifiers);
1861 	if (res)
1862 		return res;
1863 
1864 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
1865 	    plane_cap && plane_cap->per_pixel_alpha) {
1866 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1867 					  BIT(DRM_MODE_BLEND_PREMULTI) |
1868 					  BIT(DRM_MODE_BLEND_COVERAGE);
1869 
1870 		drm_plane_create_alpha_property(plane);
1871 		drm_plane_create_blend_mode_property(plane, blend_caps);
1872 	}
1873 
1874 	if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
1875 		/*
1876 		 * Allow OVERLAY planes to be used as underlays by assigning an
1877 		 * immutable zpos = # of OVERLAY planes to the PRIMARY plane.
1878 		 */
1879 		drm_plane_create_zpos_immutable_property(plane, primary_zpos);
1880 	} else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
1881 		/*
1882 		 * OVERLAY planes can be below or above the PRIMARY, but cannot
1883 		 * be above the CURSOR plane.
1884 		 */
1885 		unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane);
1886 
1887 		drm_plane_create_zpos_property(plane, zpos, 0, 254);
1888 	} else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
1889 		drm_plane_create_zpos_immutable_property(plane, 255);
1890 	}
1891 
1892 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
1893 	    plane_cap &&
1894 	    (plane_cap->pixel_format_support.nv12 ||
1895 	     plane_cap->pixel_format_support.p010)) {
1896 		/* This only affects YUV formats. */
1897 		drm_plane_create_color_properties(
1898 			plane,
1899 			BIT(DRM_COLOR_YCBCR_BT601) |
1900 			BIT(DRM_COLOR_YCBCR_BT709) |
1901 			BIT(DRM_COLOR_YCBCR_BT2020),
1902 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1903 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1904 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
1905 	}
1906 
1907 	supported_rotations =
1908 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1909 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1910 
1911 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
1912 	    plane->type != DRM_PLANE_TYPE_CURSOR)
1913 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1914 						   supported_rotations);
1915 
1916 	if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) &&
1917 	    plane->type != DRM_PLANE_TYPE_CURSOR)
1918 		drm_plane_enable_fb_damage_clips(plane);
1919 
1920 	if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1921 		drm_plane_helper_add(plane, &dm_primary_plane_helper_funcs);
1922 	else
1923 		drm_plane_helper_add(plane, &dm_plane_helper_funcs);
1924 
1925 #ifdef AMD_PRIVATE_COLOR
1926 	dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
1927 #else
1928 	res = dm_plane_init_colorops(plane);
1929 	if (res)
1930 		return res;
1931 #endif
1932 
1933 	/* Create (reset) the plane state */
1934 	if (plane->funcs->reset)
1935 		plane->funcs->reset(plane);
1936 
1937 	return 0;
1938 }
1939 
amdgpu_dm_plane_is_video_format(uint32_t format)1940 bool amdgpu_dm_plane_is_video_format(uint32_t format)
1941 {
1942 	int i;
1943 
1944 	for (i = 0; i < ARRAY_SIZE(video_formats); i++)
1945 		if (format == video_formats[i])
1946 			return true;
1947 
1948 	return false;
1949 }
1950 
1951