xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_blend.h>
29 #include "drm/drm_framebuffer.h"
30 #include <drm/drm_gem_atomic_helper.h>
31 #include <drm/drm_plane_helper.h>
32 #include <drm/drm_gem_framebuffer_helper.h>
33 #include <drm/drm_fourcc.h>
34 
35 #include "amdgpu.h"
36 #include "dal_asic_id.h"
37 #include "amdgpu_display.h"
38 #include "amdgpu_dm_trace.h"
39 #include "amdgpu_dm_plane.h"
40 #include "gc/gc_11_0_0_offset.h"
41 #include "gc/gc_11_0_0_sh_mask.h"
42 
43 /*
44  * TODO: these are currently initialized to rgb formats only.
45  * For future use cases we should either initialize them dynamically based on
46  * plane capabilities, or initialize this array to all formats, so internal drm
47  * check will succeed, and let DC implement proper check
48  */
49 static const uint32_t rgb_formats[] = {
50 	DRM_FORMAT_XRGB8888,
51 	DRM_FORMAT_ARGB8888,
52 	DRM_FORMAT_RGBA8888,
53 	DRM_FORMAT_XRGB2101010,
54 	DRM_FORMAT_XBGR2101010,
55 	DRM_FORMAT_ARGB2101010,
56 	DRM_FORMAT_ABGR2101010,
57 	DRM_FORMAT_XRGB16161616,
58 	DRM_FORMAT_XBGR16161616,
59 	DRM_FORMAT_ARGB16161616,
60 	DRM_FORMAT_ABGR16161616,
61 	DRM_FORMAT_XBGR8888,
62 	DRM_FORMAT_ABGR8888,
63 	DRM_FORMAT_RGB565,
64 };
65 
66 static const uint32_t overlay_formats[] = {
67 	DRM_FORMAT_XRGB8888,
68 	DRM_FORMAT_ARGB8888,
69 	DRM_FORMAT_RGBA8888,
70 	DRM_FORMAT_XBGR8888,
71 	DRM_FORMAT_ABGR8888,
72 	DRM_FORMAT_RGB565,
73 	DRM_FORMAT_NV21,
74 	DRM_FORMAT_NV12,
75 	DRM_FORMAT_P010
76 };
77 
78 static const uint32_t video_formats[] = {
79 	DRM_FORMAT_NV21,
80 	DRM_FORMAT_NV12,
81 	DRM_FORMAT_P010
82 };
83 
84 static const u32 cursor_formats[] = {
85 	DRM_FORMAT_ARGB8888
86 };
87 
88 enum dm_micro_swizzle {
89 	MICRO_SWIZZLE_Z = 0,
90 	MICRO_SWIZZLE_S = 1,
91 	MICRO_SWIZZLE_D = 2,
92 	MICRO_SWIZZLE_R = 3
93 };
94 
amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 * cmd)95 const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
96 {
97 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
98 }
99 
amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * pre_multiplied_alpha,bool * global_alpha,int * global_alpha_value)100 void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
101 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
102 			       bool *global_alpha, int *global_alpha_value)
103 {
104 	*per_pixel_alpha = false;
105 	*pre_multiplied_alpha = true;
106 	*global_alpha = false;
107 	*global_alpha_value = 0xff;
108 
109 
110 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
111 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
112 		static const uint32_t alpha_formats[] = {
113 			DRM_FORMAT_ARGB8888,
114 			DRM_FORMAT_RGBA8888,
115 			DRM_FORMAT_ABGR8888,
116 			DRM_FORMAT_ARGB2101010,
117 			DRM_FORMAT_ABGR2101010,
118 			DRM_FORMAT_ARGB16161616,
119 			DRM_FORMAT_ABGR16161616,
120 			DRM_FORMAT_ARGB16161616F,
121 		};
122 		uint32_t format = plane_state->fb->format->format;
123 		unsigned int i;
124 
125 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
126 			if (format == alpha_formats[i]) {
127 				*per_pixel_alpha = true;
128 				break;
129 			}
130 		}
131 
132 		if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
133 			*pre_multiplied_alpha = false;
134 	}
135 
136 	if (plane_state->alpha < 0xffff) {
137 		*global_alpha = true;
138 		*global_alpha_value = plane_state->alpha >> 8;
139 	}
140 }
141 
amdgpu_dm_plane_add_modifier(uint64_t ** mods,uint64_t * size,uint64_t * cap,uint64_t mod)142 static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
143 {
144 	if (!*mods)
145 		return;
146 
147 	if (*cap - *size < 1) {
148 		uint64_t new_cap = *cap * 2;
149 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
150 
151 		if (!new_mods) {
152 			kfree(*mods);
153 			*mods = NULL;
154 			return;
155 		}
156 
157 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
158 		kfree(*mods);
159 		*mods = new_mods;
160 		*cap = new_cap;
161 	}
162 
163 	(*mods)[*size] = mod;
164 	*size += 1;
165 }
166 
amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier)167 static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier)
168 {
169 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
170 }
171 
amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier)172 static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier)
173 {
174 	if (modifier == DRM_FORMAT_MOD_LINEAR)
175 		return 0;
176 
177 	return AMD_FMT_MOD_GET(TILE, modifier);
178 }
179 
amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info * tiling_info,uint64_t tiling_flags)180 static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info,
181 							     uint64_t tiling_flags)
182 {
183 	/* Fill GFX8 params */
184 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
185 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
186 
187 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
188 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
189 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
190 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
191 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
192 
193 		tiling_info->gfxversion = DcGfxVersion8;
194 		/* XXX fix me for VI */
195 		tiling_info->gfx8.num_banks = num_banks;
196 		tiling_info->gfx8.array_mode =
197 				DC_ARRAY_2D_TILED_THIN1;
198 		tiling_info->gfx8.tile_split = tile_split;
199 		tiling_info->gfx8.bank_width = bankw;
200 		tiling_info->gfx8.bank_height = bankh;
201 		tiling_info->gfx8.tile_aspect = mtaspect;
202 		tiling_info->gfx8.tile_mode =
203 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
204 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
205 			== DC_ARRAY_1D_TILED_THIN1) {
206 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
207 	}
208 
209 	tiling_info->gfx8.pipe_config =
210 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
211 }
212 
amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device * adev,struct dc_tiling_info * tiling_info)213 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
214 							      struct dc_tiling_info *tiling_info)
215 {
216 	/* Fill GFX9 params */
217 	tiling_info->gfx9.num_pipes =
218 		adev->gfx.config.gb_addr_config_fields.num_pipes;
219 	tiling_info->gfx9.num_banks =
220 		adev->gfx.config.gb_addr_config_fields.num_banks;
221 	tiling_info->gfx9.pipe_interleave =
222 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
223 	tiling_info->gfx9.num_shader_engines =
224 		adev->gfx.config.gb_addr_config_fields.num_se;
225 	tiling_info->gfx9.max_compressed_frags =
226 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
227 	tiling_info->gfx9.num_rb_per_se =
228 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
229 	tiling_info->gfx9.shaderEnable = 1;
230 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
231 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
232 }
233 
amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device * adev,struct dc_tiling_info * tiling_info,uint64_t modifier)234 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
235 								struct dc_tiling_info *tiling_info,
236 								uint64_t modifier)
237 {
238 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
239 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
240 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
241 	unsigned int pipes_log2;
242 
243 	pipes_log2 = min(5u, mod_pipe_xor_bits);
244 
245 	amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
246 
247 	if (!IS_AMD_FMT_MOD(modifier))
248 		return;
249 
250 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
251 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
252 
253 	if (adev->family >= AMDGPU_FAMILY_NV) {
254 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
255 	} else {
256 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
257 
258 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
259 	}
260 }
261 
amdgpu_dm_plane_validate_dcc(struct amdgpu_device * adev,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct dc_tiling_info * tiling_info,const struct dc_plane_dcc_param * dcc,const struct dc_plane_address * address,const struct plane_size * plane_size)262 static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
263 					const enum surface_pixel_format format,
264 					const enum dc_rotation_angle rotation,
265 					const struct dc_tiling_info *tiling_info,
266 					const struct dc_plane_dcc_param *dcc,
267 					const struct dc_plane_address *address,
268 					const struct plane_size *plane_size)
269 {
270 	struct dc *dc = adev->dm.dc;
271 	struct dc_dcc_surface_param input;
272 	struct dc_surface_dcc_cap output;
273 
274 	memset(&input, 0, sizeof(input));
275 	memset(&output, 0, sizeof(output));
276 
277 	if (!dcc->enable)
278 		return 0;
279 
280 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
281 	    !dc->cap_funcs.get_dcc_compression_cap)
282 		return -EINVAL;
283 
284 	input.format = format;
285 	input.surface_size.width = plane_size->surface_size.width;
286 	input.surface_size.height = plane_size->surface_size.height;
287 	input.swizzle_mode = tiling_info->gfx9.swizzle;
288 
289 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
290 		input.scan = SCAN_DIRECTION_HORIZONTAL;
291 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
292 		input.scan = SCAN_DIRECTION_VERTICAL;
293 
294 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
295 		return -EINVAL;
296 
297 	if (!output.capable)
298 		return -EINVAL;
299 
300 	if (dcc->independent_64b_blks == 0 &&
301 	    output.grph.rgb.independent_64b_blks != 0)
302 		return -EINVAL;
303 
304 	return 0;
305 }
306 
amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,struct dc_tiling_info * tiling_info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address)307 static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
308 								     const struct amdgpu_framebuffer *afb,
309 								     const enum surface_pixel_format format,
310 								     const enum dc_rotation_angle rotation,
311 								     const struct plane_size *plane_size,
312 								     struct dc_tiling_info *tiling_info,
313 								     struct dc_plane_dcc_param *dcc,
314 								     struct dc_plane_address *address)
315 {
316 	const uint64_t modifier = afb->base.modifier;
317 	int ret = 0;
318 
319 	amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
320 	tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
321 	tiling_info->gfxversion = DcGfxVersion9;
322 
323 	if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
324 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
325 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
326 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
327 
328 		dcc->enable = 1;
329 		dcc->meta_pitch = afb->base.pitches[1];
330 		dcc->independent_64b_blks = independent_64b_blks;
331 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
332 			if (independent_64b_blks && independent_128b_blks)
333 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
334 			else if (independent_128b_blks)
335 				dcc->dcc_ind_blk = hubp_ind_block_128b;
336 			else if (independent_64b_blks && !independent_128b_blks)
337 				dcc->dcc_ind_blk = hubp_ind_block_64b;
338 			else
339 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
340 		} else {
341 			if (independent_64b_blks)
342 				dcc->dcc_ind_blk = hubp_ind_block_64b;
343 			else
344 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
345 		}
346 
347 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
348 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
349 	}
350 
351 	ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
352 	if (ret)
353 		drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
354 
355 	return ret;
356 }
357 
amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,struct dc_tiling_info * tiling_info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address)358 static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device *adev,
359 								      const struct amdgpu_framebuffer *afb,
360 								      const enum surface_pixel_format format,
361 								      const enum dc_rotation_angle rotation,
362 								      const struct plane_size *plane_size,
363 								      struct dc_tiling_info *tiling_info,
364 								      struct dc_plane_dcc_param *dcc,
365 								      struct dc_plane_address *address)
366 {
367 	const uint64_t modifier = afb->base.modifier;
368 	int ret = 0;
369 
370 	/* TODO: Most of this function shouldn't be needed on GFX12. */
371 	amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
372 
373 	tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
374 	tiling_info->gfxversion = DcGfxAddr3;
375 
376 	if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
377 		int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
378 
379 		dcc->enable = 1;
380 		dcc->independent_64b_blks = max_compressed_block == 0;
381 
382 		if (max_compressed_block == 0)
383 			dcc->dcc_ind_blk = hubp_ind_block_64b;
384 		else if (max_compressed_block == 1)
385 			dcc->dcc_ind_blk = hubp_ind_block_128b;
386 		else
387 			dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
388 	}
389 
390 	/* TODO: This seems wrong because there is no DCC plane on GFX12. */
391 	ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
392 	if (ret)
393 		drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
394 
395 	return ret;
396 }
397 
amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)398 static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev,
399 						  uint64_t **mods,
400 						  uint64_t *size,
401 						  uint64_t *capacity)
402 {
403 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
404 
405 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
406 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
407 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
408 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
409 				     AMD_FMT_MOD_SET(DCC, 1) |
410 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
411 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
412 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
413 
414 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
415 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
416 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
417 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
418 				     AMD_FMT_MOD_SET(DCC, 1) |
419 				     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
420 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
421 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
422 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
423 
424 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
425 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
426 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
427 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
428 
429 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
430 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
431 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
432 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
433 
434 
435 	/* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
436 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
437 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
438 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
439 
440 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
441 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
442 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
443 }
444 
amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)445 static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev,
446 					       uint64_t **mods,
447 					       uint64_t *size,
448 					       uint64_t *capacity)
449 {
450 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
451 	int pipe_xor_bits = min(8, pipes +
452 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
453 	int bank_xor_bits = min(8 - pipe_xor_bits,
454 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
455 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
456 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
457 
458 
459 	if (adev->family == AMDGPU_FAMILY_RV) {
460 		/* Raven2 and later */
461 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
462 
463 		/*
464 		 * No _D DCC swizzles yet because we only allow 32bpp, which
465 		 * doesn't support _D on DCN
466 		 */
467 
468 		if (has_constant_encode) {
469 			amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
470 						     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
471 						     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
472 						     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
473 						     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
474 						     AMD_FMT_MOD_SET(DCC, 1) |
475 						     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
476 						     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
477 						     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
478 		}
479 
480 		amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
481 					     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
482 					     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
483 					     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
484 					     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
485 					     AMD_FMT_MOD_SET(DCC, 1) |
486 					     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
487 					     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
488 					     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
489 
490 		if (has_constant_encode) {
491 			amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
492 						     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
493 						     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
494 						     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
495 						     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
496 						     AMD_FMT_MOD_SET(DCC, 1) |
497 						     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
498 						     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
499 						     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
500 						     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
501 						     AMD_FMT_MOD_SET(RB, rb) |
502 						     AMD_FMT_MOD_SET(PIPE, pipes));
503 		}
504 
505 		amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
506 					     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
507 					     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
508 					     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
509 					     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
510 					     AMD_FMT_MOD_SET(DCC, 1) |
511 					     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
512 					     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
513 					     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
514 					     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
515 					     AMD_FMT_MOD_SET(RB, rb) |
516 					     AMD_FMT_MOD_SET(PIPE, pipes));
517 	}
518 
519 	/*
520 	 * Only supported for 64bpp on Raven, will be filtered on format in
521 	 * amdgpu_dm_plane_format_mod_supported.
522 	 */
523 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
524 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
525 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
526 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
527 				     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
528 
529 	if (adev->family == AMDGPU_FAMILY_RV) {
530 		amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
531 					     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
532 					     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
533 					     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
534 					     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
535 	}
536 
537 	/*
538 	 * Only supported for 64bpp on Raven, will be filtered on format in
539 	 * amdgpu_dm_plane_format_mod_supported.
540 	 */
541 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
542 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
543 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
544 
545 	if (adev->family == AMDGPU_FAMILY_RV) {
546 		amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
547 					     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
548 					     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
549 	}
550 }
551 
amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)552 static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev,
553 						  uint64_t **mods,
554 						  uint64_t *size,
555 						  uint64_t *capacity)
556 {
557 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
558 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
559 
560 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
561 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
562 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
563 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
564 				     AMD_FMT_MOD_SET(PACKERS, pkrs) |
565 				     AMD_FMT_MOD_SET(DCC, 1) |
566 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
567 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
568 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
569 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
570 
571 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
572 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
573 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
574 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
575 				     AMD_FMT_MOD_SET(PACKERS, pkrs) |
576 				     AMD_FMT_MOD_SET(DCC, 1) |
577 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
578 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
579 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
580 
581 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
582 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
583 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
584 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
585 				     AMD_FMT_MOD_SET(PACKERS, pkrs) |
586 				     AMD_FMT_MOD_SET(DCC, 1) |
587 				     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
588 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
589 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
590 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
591 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
592 
593 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
594 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
595 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
596 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
597 				     AMD_FMT_MOD_SET(PACKERS, pkrs) |
598 				     AMD_FMT_MOD_SET(DCC, 1) |
599 				     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
600 				     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
601 				     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
602 				     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
603 
604 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
605 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
606 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
607 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
608 				     AMD_FMT_MOD_SET(PACKERS, pkrs));
609 
610 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
611 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
612 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
613 				     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
614 				     AMD_FMT_MOD_SET(PACKERS, pkrs));
615 
616 	/* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
617 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
618 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
619 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
620 
621 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
622 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
623 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
624 }
625 
amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)626 static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev,
627 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
628 {
629 	int num_pipes = 0;
630 	int pipe_xor_bits = 0;
631 	int num_pkrs = 0;
632 	int pkrs = 0;
633 	u32 gb_addr_config;
634 	u8 i = 0;
635 	unsigned int swizzle_r_x;
636 	uint64_t modifier_r_x;
637 	uint64_t modifier_dcc_best;
638 	uint64_t modifier_dcc_4k;
639 
640 	/* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
641 	 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes}
642 	 */
643 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
644 	ASSERT(gb_addr_config != 0);
645 
646 	num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
647 	pkrs = ilog2(num_pkrs);
648 	num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
649 	pipe_xor_bits = ilog2(num_pipes);
650 
651 	for (i = 0; i < 2; i++) {
652 		/* Insert the best one first. */
653 		/* R_X swizzle modes are the best for rendering and DCC requires them. */
654 		if (num_pipes > 16)
655 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
656 		else
657 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
658 
659 		modifier_r_x = AMD_FMT_MOD |
660 			       AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
661 			       AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
662 			       AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
663 			       AMD_FMT_MOD_SET(PACKERS, pkrs);
664 
665 		/* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
666 		modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
667 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
668 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
669 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
670 
671 		/* DCC settings for 4K and greater resolutions. (required by display hw) */
672 		modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
673 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
674 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
675 				  AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
676 
677 		amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best);
678 		amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k);
679 
680 		amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
681 		amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
682 
683 		amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x);
684 	}
685 
686 	amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
687 				     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
688 				     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
689 }
690 
amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)691 static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev,
692 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
693 {
694 	uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12);
695 	uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D);
696 	uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D);
697 	uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
698 	uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
699 	uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1);
700 	uint8_t max_comp_block[] = {1, 0};
701 	uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0};
702 	uint8_t i = 0, j = 0;
703 	uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR};
704 
705 	for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
706 		max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]);
707 
708 	/* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different
709 	 * max compressed blocks first and then move on to the next smaller sized layouts.
710 	 * Do not add the linear modifier here, and hence the condition of size-1 for the loop
711 	 */
712 	for (j = 0; j < ARRAY_SIZE(gfx12_modifiers) - 1; j++)
713 		for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
714 			amdgpu_dm_plane_add_modifier(mods, size, capacity,
715 						     ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]);
716 
717 	/* Without DCC. Add all modifiers including linear at the end */
718 	for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++)
719 		amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]);
720 
721 }
722 
amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device * adev,unsigned int plane_type,uint64_t ** mods)723 static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
724 {
725 	uint64_t size = 0, capacity = 128;
726 	*mods = NULL;
727 
728 	/* We have not hooked up any pre-GFX9 modifiers. */
729 	if (adev->family < AMDGPU_FAMILY_AI)
730 		return 0;
731 
732 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
733 
734 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
735 		amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
736 		amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
737 		return *mods ? 0 : -ENOMEM;
738 	}
739 
740 	switch (adev->family) {
741 	case AMDGPU_FAMILY_AI:
742 	case AMDGPU_FAMILY_RV:
743 		amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity);
744 		break;
745 	case AMDGPU_FAMILY_NV:
746 	case AMDGPU_FAMILY_VGH:
747 	case AMDGPU_FAMILY_YC:
748 	case AMDGPU_FAMILY_GC_10_3_6:
749 	case AMDGPU_FAMILY_GC_10_3_7:
750 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
751 			amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity);
752 		else
753 			amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity);
754 		break;
755 	case AMDGPU_FAMILY_GC_11_0_0:
756 	case AMDGPU_FAMILY_GC_11_0_1:
757 	case AMDGPU_FAMILY_GC_11_5_0:
758 		amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity);
759 		break;
760 	case AMDGPU_FAMILY_GC_12_0_0:
761 		amdgpu_dm_plane_add_gfx12_modifiers(adev, mods, &size, &capacity);
762 		break;
763 	}
764 
765 	amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
766 
767 	/* INVALID marks the end of the list. */
768 	amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
769 
770 	if (!*mods)
771 		return -ENOMEM;
772 
773 	return 0;
774 }
775 
amdgpu_dm_plane_get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)776 static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane,
777 					     const struct dc_plane_cap *plane_cap,
778 					     uint32_t *formats, int max_formats)
779 {
780 	int i, num_formats = 0;
781 
782 	/*
783 	 * TODO: Query support for each group of formats directly from
784 	 * DC plane caps. This will require adding more formats to the
785 	 * caps list.
786 	 */
787 
788 	if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
789 		(plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
790 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
791 			if (num_formats >= max_formats)
792 				break;
793 
794 			formats[num_formats++] = rgb_formats[i];
795 		}
796 
797 		if (plane_cap && plane_cap->pixel_format_support.nv12)
798 			formats[num_formats++] = DRM_FORMAT_NV12;
799 		if (plane_cap && plane_cap->pixel_format_support.p010)
800 			formats[num_formats++] = DRM_FORMAT_P010;
801 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
802 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
803 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
804 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
805 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
806 		}
807 	} else {
808 		switch (plane->type) {
809 		case DRM_PLANE_TYPE_OVERLAY:
810 			for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
811 				if (num_formats >= max_formats)
812 					break;
813 
814 				formats[num_formats++] = overlay_formats[i];
815 			}
816 			break;
817 
818 		case DRM_PLANE_TYPE_CURSOR:
819 			for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
820 				if (num_formats >= max_formats)
821 					break;
822 
823 				formats[num_formats++] = cursor_formats[i];
824 			}
825 			break;
826 
827 		default:
828 			break;
829 		}
830 	}
831 
832 	return num_formats;
833 }
834 
amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,struct dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface)835 int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
836 			     const struct amdgpu_framebuffer *afb,
837 			     const enum surface_pixel_format format,
838 			     const enum dc_rotation_angle rotation,
839 			     const uint64_t tiling_flags,
840 			     struct dc_tiling_info *tiling_info,
841 			     struct plane_size *plane_size,
842 			     struct dc_plane_dcc_param *dcc,
843 			     struct dc_plane_address *address,
844 			     bool tmz_surface)
845 {
846 	const struct drm_framebuffer *fb = &afb->base;
847 	int ret;
848 
849 	memset(tiling_info, 0, sizeof(*tiling_info));
850 	memset(plane_size, 0, sizeof(*plane_size));
851 	memset(dcc, 0, sizeof(*dcc));
852 	memset(address, 0, sizeof(*address));
853 
854 	address->tmz_surface = tmz_surface;
855 
856 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
857 		uint64_t addr = afb->address + fb->offsets[0];
858 
859 		plane_size->surface_size.x = 0;
860 		plane_size->surface_size.y = 0;
861 		plane_size->surface_size.width = fb->width;
862 		plane_size->surface_size.height = fb->height;
863 		plane_size->surface_pitch =
864 			fb->pitches[0] / fb->format->cpp[0];
865 
866 		address->type = PLN_ADDR_TYPE_GRAPHICS;
867 		address->grph.addr.low_part = lower_32_bits(addr);
868 		address->grph.addr.high_part = upper_32_bits(addr);
869 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
870 		uint64_t luma_addr = afb->address + fb->offsets[0];
871 		uint64_t chroma_addr = afb->address + fb->offsets[1];
872 
873 		plane_size->surface_size.x = 0;
874 		plane_size->surface_size.y = 0;
875 		plane_size->surface_size.width = fb->width;
876 		plane_size->surface_size.height = fb->height;
877 		plane_size->surface_pitch =
878 			fb->pitches[0] / fb->format->cpp[0];
879 
880 		plane_size->chroma_size.x = 0;
881 		plane_size->chroma_size.y = 0;
882 		/* TODO: set these based on surface format */
883 		plane_size->chroma_size.width = fb->width / 2;
884 		plane_size->chroma_size.height = fb->height / 2;
885 
886 		plane_size->chroma_pitch =
887 			fb->pitches[1] / fb->format->cpp[1];
888 
889 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
890 		address->video_progressive.luma_addr.low_part =
891 			lower_32_bits(luma_addr);
892 		address->video_progressive.luma_addr.high_part =
893 			upper_32_bits(luma_addr);
894 		address->video_progressive.chroma_addr.low_part =
895 			lower_32_bits(chroma_addr);
896 		address->video_progressive.chroma_addr.high_part =
897 			upper_32_bits(chroma_addr);
898 	}
899 
900 	if (adev->family >= AMDGPU_FAMILY_GC_12_0_0) {
901 		ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
902 										 rotation, plane_size,
903 										 tiling_info, dcc,
904 										 address);
905 		if (ret)
906 			return ret;
907 	} else if (adev->family >= AMDGPU_FAMILY_AI) {
908 		ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
909 										rotation, plane_size,
910 										tiling_info, dcc,
911 										address);
912 		if (ret)
913 			return ret;
914 	} else {
915 		amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
916 	}
917 
918 	return 0;
919 }
920 
amdgpu_dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)921 static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
922 					     struct drm_plane_state *new_state)
923 {
924 	struct amdgpu_framebuffer *afb;
925 	struct drm_gem_object *obj;
926 	struct amdgpu_device *adev;
927 	struct amdgpu_bo *rbo;
928 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
929 	uint32_t domain;
930 	int r;
931 
932 	if (!new_state->fb) {
933 		DRM_DEBUG_KMS("No FB bound\n");
934 		return 0;
935 	}
936 
937 	afb = to_amdgpu_framebuffer(new_state->fb);
938 	obj = drm_gem_fb_get_obj(new_state->fb, 0);
939 	if (!obj) {
940 		DRM_ERROR("Failed to get obj from framebuffer\n");
941 		return -EINVAL;
942 	}
943 
944 	rbo = gem_to_amdgpu_bo(obj);
945 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
946 	r = amdgpu_bo_reserve(rbo, true);
947 	if (r) {
948 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
949 		return r;
950 	}
951 
952 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
953 	if (r) {
954 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
955 		goto error_unlock;
956 	}
957 
958 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
959 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
960 	else
961 		domain = AMDGPU_GEM_DOMAIN_VRAM;
962 
963 	rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
964 	r = amdgpu_bo_pin(rbo, domain);
965 	if (unlikely(r != 0)) {
966 		if (r != -ERESTARTSYS)
967 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
968 		goto error_unlock;
969 	}
970 
971 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
972 	if (unlikely(r != 0)) {
973 		DRM_ERROR("%p bind failed\n", rbo);
974 		goto error_unpin;
975 	}
976 
977 	r = drm_gem_plane_helper_prepare_fb(plane, new_state);
978 	if (unlikely(r != 0))
979 		goto error_unpin;
980 
981 	amdgpu_bo_unreserve(rbo);
982 
983 	afb->address = amdgpu_bo_gpu_offset(rbo);
984 
985 	amdgpu_bo_ref(rbo);
986 
987 	/**
988 	 * We don't do surface updates on planes that have been newly created,
989 	 * but we also don't have the afb->address during atomic check.
990 	 *
991 	 * Fill in buffer attributes depending on the address here, but only on
992 	 * newly created planes since they're not being used by DC yet and this
993 	 * won't modify global state.
994 	 */
995 	dm_plane_state_old = to_dm_plane_state(plane->state);
996 	dm_plane_state_new = to_dm_plane_state(new_state);
997 
998 	if (dm_plane_state_new->dc_state &&
999 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
1000 		struct dc_plane_state *plane_state =
1001 			dm_plane_state_new->dc_state;
1002 
1003 		amdgpu_dm_plane_fill_plane_buffer_attributes(
1004 			adev, afb, plane_state->format, plane_state->rotation,
1005 			afb->tiling_flags,
1006 			&plane_state->tiling_info, &plane_state->plane_size,
1007 			&plane_state->dcc, &plane_state->address,
1008 			afb->tmz_surface);
1009 	}
1010 
1011 	return 0;
1012 
1013 error_unpin:
1014 	amdgpu_bo_unpin(rbo);
1015 
1016 error_unlock:
1017 	amdgpu_bo_unreserve(rbo);
1018 	return r;
1019 }
1020 
amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)1021 static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane,
1022 					      struct drm_plane_state *old_state)
1023 {
1024 	struct amdgpu_bo *rbo;
1025 	int r;
1026 
1027 	if (!old_state->fb)
1028 		return;
1029 
1030 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
1031 	r = amdgpu_bo_reserve(rbo, false);
1032 	if (unlikely(r)) {
1033 		DRM_ERROR("failed to reserve rbo before unpin\n");
1034 		return;
1035 	}
1036 
1037 	amdgpu_bo_unpin(rbo);
1038 	amdgpu_bo_unreserve(rbo);
1039 	amdgpu_bo_unref(&rbo);
1040 }
1041 
amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device * dev,struct drm_framebuffer * fb,int * min_downscale,int * max_upscale)1042 static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev,
1043 					 struct drm_framebuffer *fb,
1044 					 int *min_downscale, int *max_upscale)
1045 {
1046 	struct amdgpu_device *adev = drm_to_adev(dev);
1047 	struct dc *dc = adev->dm.dc;
1048 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
1049 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
1050 
1051 	switch (fb->format->format) {
1052 	case DRM_FORMAT_P010:
1053 	case DRM_FORMAT_NV12:
1054 	case DRM_FORMAT_NV21:
1055 		*max_upscale = plane_cap->max_upscale_factor.nv12;
1056 		*min_downscale = plane_cap->max_downscale_factor.nv12;
1057 		break;
1058 
1059 	case DRM_FORMAT_XRGB16161616F:
1060 	case DRM_FORMAT_ARGB16161616F:
1061 	case DRM_FORMAT_XBGR16161616F:
1062 	case DRM_FORMAT_ABGR16161616F:
1063 		*max_upscale = plane_cap->max_upscale_factor.fp16;
1064 		*min_downscale = plane_cap->max_downscale_factor.fp16;
1065 		break;
1066 
1067 	default:
1068 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
1069 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
1070 		break;
1071 	}
1072 
1073 	/*
1074 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
1075 	 * scaling factor of 1.0 == 1000 units.
1076 	 */
1077 	if (*max_upscale == 1)
1078 		*max_upscale = 1000;
1079 
1080 	if (*min_downscale == 1)
1081 		*min_downscale = 1000;
1082 }
1083 
amdgpu_dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)1084 int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
1085 				       struct drm_crtc_state *new_crtc_state)
1086 {
1087 	struct drm_framebuffer *fb = state->fb;
1088 	int min_downscale, max_upscale;
1089 	int min_scale = 0;
1090 	int max_scale = INT_MAX;
1091 
1092 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
1093 	if (fb && state->crtc) {
1094 		/* Validate viewport to cover the case when only the position changes */
1095 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
1096 			int viewport_width = state->crtc_w;
1097 			int viewport_height = state->crtc_h;
1098 
1099 			if (state->crtc_x < 0)
1100 				viewport_width += state->crtc_x;
1101 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
1102 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
1103 
1104 			if (state->crtc_y < 0)
1105 				viewport_height += state->crtc_y;
1106 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
1107 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
1108 
1109 			if (viewport_width < 0 || viewport_height < 0) {
1110 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
1111 				return -EINVAL;
1112 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
1113 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
1114 				return -EINVAL;
1115 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
1116 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
1117 				return -EINVAL;
1118 			}
1119 
1120 		}
1121 
1122 		/* Get min/max allowed scaling factors from plane caps. */
1123 		amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb,
1124 							     &min_downscale, &max_upscale);
1125 		/*
1126 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
1127 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
1128 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
1129 		 */
1130 		min_scale = (1000 << 16) / max_upscale;
1131 		max_scale = (1000 << 16) / min_downscale;
1132 	}
1133 
1134 	return drm_atomic_helper_check_plane_state(
1135 		state, new_crtc_state, min_scale, max_scale, true, true);
1136 }
1137 
amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device * adev,const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)1138 int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
1139 				const struct drm_plane_state *state,
1140 				struct dc_scaling_info *scaling_info)
1141 {
1142 	int scale_w, scale_h, min_downscale, max_upscale;
1143 
1144 	memset(scaling_info, 0, sizeof(*scaling_info));
1145 
1146 	/* Source is fixed 16.16 but we ignore mantissa for now... */
1147 	scaling_info->src_rect.x = state->src_x >> 16;
1148 	scaling_info->src_rect.y = state->src_y >> 16;
1149 
1150 	/*
1151 	 * For reasons we don't (yet) fully understand a non-zero
1152 	 * src_y coordinate into an NV12 buffer can cause a
1153 	 * system hang on DCN1x.
1154 	 * To avoid hangs (and maybe be overly cautious)
1155 	 * let's reject both non-zero src_x and src_y.
1156 	 *
1157 	 * We currently know of only one use-case to reproduce a
1158 	 * scenario with non-zero src_x and src_y for NV12, which
1159 	 * is to gesture the YouTube Android app into full screen
1160 	 * on ChromeOS.
1161 	 */
1162 	if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1163 	    (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) &&
1164 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
1165 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
1166 		return -EINVAL;
1167 
1168 	scaling_info->src_rect.width = state->src_w >> 16;
1169 	if (scaling_info->src_rect.width == 0)
1170 		return -EINVAL;
1171 
1172 	scaling_info->src_rect.height = state->src_h >> 16;
1173 	if (scaling_info->src_rect.height == 0)
1174 		return -EINVAL;
1175 
1176 	scaling_info->dst_rect.x = state->crtc_x;
1177 	scaling_info->dst_rect.y = state->crtc_y;
1178 
1179 	if (state->crtc_w == 0)
1180 		return -EINVAL;
1181 
1182 	scaling_info->dst_rect.width = state->crtc_w;
1183 
1184 	if (state->crtc_h == 0)
1185 		return -EINVAL;
1186 
1187 	scaling_info->dst_rect.height = state->crtc_h;
1188 
1189 	/* DRM doesn't specify clipping on destination output. */
1190 	scaling_info->clip_rect = scaling_info->dst_rect;
1191 
1192 	/* Validate scaling per-format with DC plane caps */
1193 	if (state->plane && state->plane->dev && state->fb) {
1194 		amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
1195 							     &min_downscale, &max_upscale);
1196 	} else {
1197 		min_downscale = 250;
1198 		max_upscale = 16000;
1199 	}
1200 
1201 	scale_w = scaling_info->dst_rect.width * 1000 /
1202 		  scaling_info->src_rect.width;
1203 
1204 	if (scale_w < min_downscale || scale_w > max_upscale)
1205 		return -EINVAL;
1206 
1207 	scale_h = scaling_info->dst_rect.height * 1000 /
1208 		  scaling_info->src_rect.height;
1209 
1210 	if (scale_h < min_downscale || scale_h > max_upscale)
1211 		return -EINVAL;
1212 
1213 	/*
1214 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
1215 	 * assume reasonable defaults based on the format.
1216 	 */
1217 
1218 	return 0;
1219 }
1220 
amdgpu_dm_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)1221 static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
1222 					struct drm_atomic_state *state)
1223 {
1224 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1225 										 plane);
1226 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1227 	struct dc *dc = adev->dm.dc;
1228 	struct dm_plane_state *dm_plane_state;
1229 	struct dc_scaling_info scaling_info;
1230 	struct drm_crtc_state *new_crtc_state;
1231 	int ret;
1232 
1233 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
1234 
1235 	dm_plane_state = to_dm_plane_state(new_plane_state);
1236 
1237 	if (!dm_plane_state->dc_state)
1238 		return 0;
1239 
1240 	new_crtc_state =
1241 		drm_atomic_get_new_crtc_state(state,
1242 					      new_plane_state->crtc);
1243 	if (!new_crtc_state)
1244 		return -EINVAL;
1245 
1246 	ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
1247 	if (ret)
1248 		return ret;
1249 
1250 	ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
1251 	if (ret)
1252 		return ret;
1253 
1254 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
1255 		return 0;
1256 
1257 	return -EINVAL;
1258 }
1259 
amdgpu_dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_atomic_state * state)1260 static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
1261 					      struct drm_atomic_state *state)
1262 {
1263 	struct drm_crtc_state *new_crtc_state;
1264 	struct drm_plane_state *new_plane_state;
1265 	struct dm_crtc_state *dm_new_crtc_state;
1266 
1267 	/* Only support async updates on cursor planes. */
1268 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
1269 		return -EINVAL;
1270 
1271 	new_plane_state = drm_atomic_get_new_plane_state(state, plane);
1272 	new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
1273 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1274 	/* Reject overlay cursors for now*/
1275 	if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
1276 		return -EINVAL;
1277 
1278 	return 0;
1279 }
1280 
amdgpu_dm_plane_get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)1281 int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
1282 					struct dc_cursor_position *position)
1283 {
1284 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1285 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1286 	int x, y;
1287 	int xorigin = 0, yorigin = 0;
1288 
1289 	if (!crtc || !plane->state->fb)
1290 		return 0;
1291 
1292 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
1293 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
1294 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
1295 			  __func__,
1296 			  plane->state->crtc_w,
1297 			  plane->state->crtc_h);
1298 		return -EINVAL;
1299 	}
1300 
1301 	x = plane->state->crtc_x;
1302 	y = plane->state->crtc_y;
1303 
1304 	if (x <= -amdgpu_crtc->max_cursor_width ||
1305 	    y <= -amdgpu_crtc->max_cursor_height)
1306 		return 0;
1307 
1308 	if (x < 0) {
1309 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1310 		x = 0;
1311 	}
1312 	if (y < 0) {
1313 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1314 		y = 0;
1315 	}
1316 	position->enable = true;
1317 	position->x = x;
1318 	position->y = y;
1319 	position->x_hotspot = xorigin;
1320 	position->y_hotspot = yorigin;
1321 
1322 	if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1))
1323 		position->translate_by_source = true;
1324 
1325 	return 0;
1326 }
1327 
amdgpu_dm_plane_handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)1328 void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
1329 				 struct drm_plane_state *old_plane_state)
1330 {
1331 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1332 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
1333 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
1334 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
1335 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1336 	uint64_t address = afb ? afb->address : 0;
1337 	struct dc_cursor_position position = {0};
1338 	struct dc_cursor_attributes attributes;
1339 	int ret;
1340 
1341 	if (!plane->state->fb && !old_plane_state->fb)
1342 		return;
1343 
1344 	drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n",
1345 		       amdgpu_crtc->crtc_id, plane->state->crtc_w,
1346 		       plane->state->crtc_h);
1347 
1348 	ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position);
1349 	if (ret)
1350 		return;
1351 
1352 	if (!position.enable) {
1353 		/* turn off cursor */
1354 		if (crtc_state && crtc_state->stream) {
1355 			mutex_lock(&adev->dm.dc_lock);
1356 			dc_stream_program_cursor_position(crtc_state->stream,
1357 						      &position);
1358 			mutex_unlock(&adev->dm.dc_lock);
1359 		}
1360 		return;
1361 	}
1362 
1363 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
1364 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
1365 
1366 	memset(&attributes, 0, sizeof(attributes));
1367 	attributes.address.high_part = upper_32_bits(address);
1368 	attributes.address.low_part  = lower_32_bits(address);
1369 	attributes.width             = plane->state->crtc_w;
1370 	attributes.height            = plane->state->crtc_h;
1371 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
1372 	attributes.rotation_angle    = 0;
1373 	attributes.attribute_flags.value = 0;
1374 
1375 	/* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
1376 	 * legacy gamma setup.
1377 	 */
1378 	if (crtc_state->cm_is_degamma_srgb &&
1379 	    adev->dm.dc->caps.color.dpp.gamma_corr)
1380 		attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
1381 
1382 	if (afb)
1383 		attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
1384 
1385 	if (crtc_state->stream) {
1386 		mutex_lock(&adev->dm.dc_lock);
1387 		if (!dc_stream_program_cursor_attributes(crtc_state->stream,
1388 							 &attributes))
1389 			DRM_ERROR("DC failed to set cursor attributes\n");
1390 
1391 		if (!dc_stream_program_cursor_position(crtc_state->stream,
1392 						   &position))
1393 			DRM_ERROR("DC failed to set cursor position\n");
1394 		mutex_unlock(&adev->dm.dc_lock);
1395 	}
1396 }
1397 
amdgpu_dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_atomic_state * state)1398 static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
1399 						struct drm_atomic_state *state)
1400 {
1401 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
1402 									   plane);
1403 	struct drm_plane_state *old_state =
1404 		drm_atomic_get_old_plane_state(state, plane);
1405 
1406 	trace_amdgpu_dm_atomic_update_cursor(new_state);
1407 
1408 	swap(plane->state->fb, new_state->fb);
1409 
1410 	plane->state->src_x = new_state->src_x;
1411 	plane->state->src_y = new_state->src_y;
1412 	plane->state->src_w = new_state->src_w;
1413 	plane->state->src_h = new_state->src_h;
1414 	plane->state->crtc_x = new_state->crtc_x;
1415 	plane->state->crtc_y = new_state->crtc_y;
1416 	plane->state->crtc_w = new_state->crtc_w;
1417 	plane->state->crtc_h = new_state->crtc_h;
1418 
1419 	amdgpu_dm_plane_handle_cursor_update(plane, old_state);
1420 }
1421 
amdgpu_dm_plane_panic_flush(struct drm_plane * plane)1422 static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane)
1423 {
1424 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane->state);
1425 	struct drm_framebuffer *fb = plane->state->fb;
1426 	struct dc_plane_state *dc_plane_state;
1427 
1428 	if (!dm_plane_state || !dm_plane_state->dc_state)
1429 		return;
1430 
1431 	dc_plane_state = dm_plane_state->dc_state;
1432 
1433 	dc_plane_force_update_for_panic(dc_plane_state, fb->modifier ? true : false);
1434 }
1435 
1436 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1437 	.prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
1438 	.cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
1439 	.atomic_check = amdgpu_dm_plane_atomic_check,
1440 	.atomic_async_check = amdgpu_dm_plane_atomic_async_check,
1441 	.atomic_async_update = amdgpu_dm_plane_atomic_async_update
1442 };
1443 
1444 static const struct drm_plane_helper_funcs dm_primary_plane_helper_funcs = {
1445 	.prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
1446 	.cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
1447 	.atomic_check = amdgpu_dm_plane_atomic_check,
1448 	.atomic_async_check = amdgpu_dm_plane_atomic_async_check,
1449 	.atomic_async_update = amdgpu_dm_plane_atomic_async_update,
1450 	.get_scanout_buffer = amdgpu_display_get_scanout_buffer,
1451 	.panic_flush = amdgpu_dm_plane_panic_flush,
1452 };
1453 
amdgpu_dm_plane_drm_plane_reset(struct drm_plane * plane)1454 static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
1455 {
1456 	struct dm_plane_state *amdgpu_state = NULL;
1457 
1458 	if (plane->state)
1459 		plane->funcs->atomic_destroy_state(plane, plane->state);
1460 
1461 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
1462 	WARN_ON(amdgpu_state == NULL);
1463 
1464 	if (!amdgpu_state)
1465 		return;
1466 
1467 	__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
1468 	amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1469 	amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT;
1470 	amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1471 	amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1472 }
1473 
amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane * plane)1474 static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane)
1475 {
1476 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
1477 
1478 	old_dm_plane_state = to_dm_plane_state(plane->state);
1479 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
1480 	if (!dm_plane_state)
1481 		return NULL;
1482 
1483 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
1484 
1485 	if (old_dm_plane_state->dc_state) {
1486 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
1487 		dc_plane_state_retain(dm_plane_state->dc_state);
1488 	}
1489 
1490 	if (old_dm_plane_state->degamma_lut)
1491 		dm_plane_state->degamma_lut =
1492 			drm_property_blob_get(old_dm_plane_state->degamma_lut);
1493 	if (old_dm_plane_state->ctm)
1494 		dm_plane_state->ctm =
1495 			drm_property_blob_get(old_dm_plane_state->ctm);
1496 	if (old_dm_plane_state->shaper_lut)
1497 		dm_plane_state->shaper_lut =
1498 			drm_property_blob_get(old_dm_plane_state->shaper_lut);
1499 	if (old_dm_plane_state->lut3d)
1500 		dm_plane_state->lut3d =
1501 			drm_property_blob_get(old_dm_plane_state->lut3d);
1502 	if (old_dm_plane_state->blend_lut)
1503 		dm_plane_state->blend_lut =
1504 			drm_property_blob_get(old_dm_plane_state->blend_lut);
1505 
1506 	dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf;
1507 	dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult;
1508 	dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf;
1509 	dm_plane_state->blend_tf = old_dm_plane_state->blend_tf;
1510 
1511 	return &dm_plane_state->base;
1512 }
1513 
amdgpu_dm_plane_format_mod_supported(struct drm_plane * plane,uint32_t format,uint64_t modifier)1514 static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
1515 						 uint32_t format,
1516 						 uint64_t modifier)
1517 {
1518 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1519 	const struct drm_format_info *info = drm_format_info(format);
1520 	int i;
1521 
1522 	if (!info)
1523 		return false;
1524 
1525 	/*
1526 	 * We always have to allow these modifiers:
1527 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
1528 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
1529 	 */
1530 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
1531 	    modifier == DRM_FORMAT_MOD_INVALID) {
1532 		return true;
1533 	}
1534 
1535 	/* Check that the modifier is on the list of the plane's supported modifiers. */
1536 	for (i = 0; i < plane->modifier_count; i++) {
1537 		if (modifier == plane->modifiers[i])
1538 			break;
1539 	}
1540 	if (i == plane->modifier_count)
1541 		return false;
1542 
1543 	/* GFX12 doesn't have these limitations. */
1544 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) {
1545 		enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
1546 
1547 		/*
1548 		 * For D swizzle the canonical modifier depends on the bpp, so check
1549 		 * it here.
1550 		 */
1551 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
1552 		    adev->family >= AMDGPU_FAMILY_NV) {
1553 			if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
1554 				return false;
1555 		}
1556 
1557 		if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
1558 		    info->cpp[0] < 8)
1559 			return false;
1560 
1561 		if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
1562 			/* Per radeonsi comments 16/64 bpp are more complicated. */
1563 			if (info->cpp[0] != 4)
1564 				return false;
1565 			/* We support multi-planar formats, but not when combined with
1566 			 * additional DCC metadata planes.
1567 			 */
1568 			if (info->num_planes > 1)
1569 				return false;
1570 		}
1571 	}
1572 
1573 	return true;
1574 }
1575 
amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)1576 static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane,
1577 						    struct drm_plane_state *state)
1578 {
1579 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1580 
1581 	if (dm_plane_state->degamma_lut)
1582 		drm_property_blob_put(dm_plane_state->degamma_lut);
1583 	if (dm_plane_state->ctm)
1584 		drm_property_blob_put(dm_plane_state->ctm);
1585 	if (dm_plane_state->lut3d)
1586 		drm_property_blob_put(dm_plane_state->lut3d);
1587 	if (dm_plane_state->shaper_lut)
1588 		drm_property_blob_put(dm_plane_state->shaper_lut);
1589 	if (dm_plane_state->blend_lut)
1590 		drm_property_blob_put(dm_plane_state->blend_lut);
1591 
1592 	if (dm_plane_state->dc_state)
1593 		dc_plane_state_release(dm_plane_state->dc_state);
1594 
1595 	drm_atomic_helper_plane_destroy_state(plane, state);
1596 }
1597 
1598 #ifdef AMD_PRIVATE_COLOR
1599 static void
dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager * dm,struct drm_plane * plane)1600 dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
1601 					     struct drm_plane *plane)
1602 {
1603 	struct amdgpu_mode_info mode_info = dm->adev->mode_info;
1604 	struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp;
1605 
1606 	/* Check HW color pipeline capabilities on DPP block (pre-blending)
1607 	 * before exposing related properties.
1608 	 */
1609 	if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) {
1610 		drm_object_attach_property(&plane->base,
1611 					   mode_info.plane_degamma_lut_property,
1612 					   0);
1613 		drm_object_attach_property(&plane->base,
1614 					   mode_info.plane_degamma_lut_size_property,
1615 					   MAX_COLOR_LUT_ENTRIES);
1616 		drm_object_attach_property(&plane->base,
1617 					   dm->adev->mode_info.plane_degamma_tf_property,
1618 					   AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1619 	}
1620 	/* HDR MULT is always available */
1621 	drm_object_attach_property(&plane->base,
1622 				   dm->adev->mode_info.plane_hdr_mult_property,
1623 				   AMDGPU_HDR_MULT_DEFAULT);
1624 
1625 	/* Only enable plane CTM if both DPP and MPC gamut remap is available. */
1626 	if (dm->dc->caps.color.mpc.gamut_remap)
1627 		drm_object_attach_property(&plane->base,
1628 					   dm->adev->mode_info.plane_ctm_property, 0);
1629 
1630 	if (dpp_color_caps.hw_3d_lut) {
1631 		drm_object_attach_property(&plane->base,
1632 					   mode_info.plane_shaper_lut_property, 0);
1633 		drm_object_attach_property(&plane->base,
1634 					   mode_info.plane_shaper_lut_size_property,
1635 					   MAX_COLOR_LUT_ENTRIES);
1636 		drm_object_attach_property(&plane->base,
1637 					   mode_info.plane_shaper_tf_property,
1638 					   AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1639 		drm_object_attach_property(&plane->base,
1640 					   mode_info.plane_lut3d_property, 0);
1641 		drm_object_attach_property(&plane->base,
1642 					   mode_info.plane_lut3d_size_property,
1643 					   MAX_COLOR_3DLUT_SIZE);
1644 	}
1645 
1646 	if (dpp_color_caps.ogam_ram) {
1647 		drm_object_attach_property(&plane->base,
1648 					   mode_info.plane_blend_lut_property, 0);
1649 		drm_object_attach_property(&plane->base,
1650 					   mode_info.plane_blend_lut_size_property,
1651 					   MAX_COLOR_LUT_ENTRIES);
1652 		drm_object_attach_property(&plane->base,
1653 					   mode_info.plane_blend_tf_property,
1654 					   AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1655 	}
1656 }
1657 
1658 static int
dm_atomic_plane_set_property(struct drm_plane * plane,struct drm_plane_state * state,struct drm_property * property,uint64_t val)1659 dm_atomic_plane_set_property(struct drm_plane *plane,
1660 			     struct drm_plane_state *state,
1661 			     struct drm_property *property,
1662 			     uint64_t val)
1663 {
1664 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1665 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1666 	bool replaced = false;
1667 	int ret;
1668 
1669 	if (property == adev->mode_info.plane_degamma_lut_property) {
1670 		ret = drm_property_replace_blob_from_id(plane->dev,
1671 							&dm_plane_state->degamma_lut,
1672 							val, -1,
1673 							sizeof(struct drm_color_lut),
1674 							&replaced);
1675 		dm_plane_state->base.color_mgmt_changed |= replaced;
1676 		return ret;
1677 	} else if (property == adev->mode_info.plane_degamma_tf_property) {
1678 		if (dm_plane_state->degamma_tf != val) {
1679 			dm_plane_state->degamma_tf = val;
1680 			dm_plane_state->base.color_mgmt_changed = 1;
1681 		}
1682 	} else if (property == adev->mode_info.plane_hdr_mult_property) {
1683 		if (dm_plane_state->hdr_mult != val) {
1684 			dm_plane_state->hdr_mult = val;
1685 			dm_plane_state->base.color_mgmt_changed = 1;
1686 		}
1687 	} else if (property == adev->mode_info.plane_ctm_property) {
1688 		ret = drm_property_replace_blob_from_id(plane->dev,
1689 							&dm_plane_state->ctm,
1690 							val,
1691 							sizeof(struct drm_color_ctm_3x4), -1,
1692 							&replaced);
1693 		dm_plane_state->base.color_mgmt_changed |= replaced;
1694 		return ret;
1695 	} else if (property == adev->mode_info.plane_shaper_lut_property) {
1696 		ret = drm_property_replace_blob_from_id(plane->dev,
1697 							&dm_plane_state->shaper_lut,
1698 							val, -1,
1699 							sizeof(struct drm_color_lut),
1700 							&replaced);
1701 		dm_plane_state->base.color_mgmt_changed |= replaced;
1702 		return ret;
1703 	} else if (property == adev->mode_info.plane_shaper_tf_property) {
1704 		if (dm_plane_state->shaper_tf != val) {
1705 			dm_plane_state->shaper_tf = val;
1706 			dm_plane_state->base.color_mgmt_changed = 1;
1707 		}
1708 	} else if (property == adev->mode_info.plane_lut3d_property) {
1709 		ret = drm_property_replace_blob_from_id(plane->dev,
1710 							&dm_plane_state->lut3d,
1711 							val, -1,
1712 							sizeof(struct drm_color_lut),
1713 							&replaced);
1714 		dm_plane_state->base.color_mgmt_changed |= replaced;
1715 		return ret;
1716 	} else if (property == adev->mode_info.plane_blend_lut_property) {
1717 		ret = drm_property_replace_blob_from_id(plane->dev,
1718 							&dm_plane_state->blend_lut,
1719 							val, -1,
1720 							sizeof(struct drm_color_lut),
1721 							&replaced);
1722 		dm_plane_state->base.color_mgmt_changed |= replaced;
1723 		return ret;
1724 	} else if (property == adev->mode_info.plane_blend_tf_property) {
1725 		if (dm_plane_state->blend_tf != val) {
1726 			dm_plane_state->blend_tf = val;
1727 			dm_plane_state->base.color_mgmt_changed = 1;
1728 		}
1729 	} else {
1730 		drm_dbg_atomic(plane->dev,
1731 			       "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
1732 			       plane->base.id, plane->name,
1733 			       property->base.id, property->name);
1734 		return -EINVAL;
1735 	}
1736 
1737 	return 0;
1738 }
1739 
1740 static int
dm_atomic_plane_get_property(struct drm_plane * plane,const struct drm_plane_state * state,struct drm_property * property,uint64_t * val)1741 dm_atomic_plane_get_property(struct drm_plane *plane,
1742 			     const struct drm_plane_state *state,
1743 			     struct drm_property *property,
1744 			     uint64_t *val)
1745 {
1746 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1747 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
1748 
1749 	if (property == adev->mode_info.plane_degamma_lut_property) {
1750 		*val = (dm_plane_state->degamma_lut) ?
1751 			dm_plane_state->degamma_lut->base.id : 0;
1752 	} else if (property == adev->mode_info.plane_degamma_tf_property) {
1753 		*val = dm_plane_state->degamma_tf;
1754 	} else if (property == adev->mode_info.plane_hdr_mult_property) {
1755 		*val = dm_plane_state->hdr_mult;
1756 	} else if (property == adev->mode_info.plane_ctm_property) {
1757 		*val = (dm_plane_state->ctm) ?
1758 			dm_plane_state->ctm->base.id : 0;
1759 	} else 	if (property == adev->mode_info.plane_shaper_lut_property) {
1760 		*val = (dm_plane_state->shaper_lut) ?
1761 			dm_plane_state->shaper_lut->base.id : 0;
1762 	} else if (property == adev->mode_info.plane_shaper_tf_property) {
1763 		*val = dm_plane_state->shaper_tf;
1764 	} else 	if (property == adev->mode_info.plane_lut3d_property) {
1765 		*val = (dm_plane_state->lut3d) ?
1766 			dm_plane_state->lut3d->base.id : 0;
1767 	} else 	if (property == adev->mode_info.plane_blend_lut_property) {
1768 		*val = (dm_plane_state->blend_lut) ?
1769 			dm_plane_state->blend_lut->base.id : 0;
1770 	} else if (property == adev->mode_info.plane_blend_tf_property) {
1771 		*val = dm_plane_state->blend_tf;
1772 
1773 	} else {
1774 		return -EINVAL;
1775 	}
1776 
1777 	return 0;
1778 }
1779 #endif
1780 
1781 static const struct drm_plane_funcs dm_plane_funcs = {
1782 	.update_plane	= drm_atomic_helper_update_plane,
1783 	.disable_plane	= drm_atomic_helper_disable_plane,
1784 	.destroy	= drm_plane_helper_destroy,
1785 	.reset = amdgpu_dm_plane_drm_plane_reset,
1786 	.atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state,
1787 	.atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state,
1788 	.format_mod_supported = amdgpu_dm_plane_format_mod_supported,
1789 #ifdef AMD_PRIVATE_COLOR
1790 	.atomic_set_property = dm_atomic_plane_set_property,
1791 	.atomic_get_property = dm_atomic_plane_get_property,
1792 #endif
1793 };
1794 
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)1795 int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
1796 				struct drm_plane *plane,
1797 				unsigned long possible_crtcs,
1798 				const struct dc_plane_cap *plane_cap)
1799 {
1800 	uint32_t formats[32];
1801 	int num_formats;
1802 	int res = -EPERM;
1803 	unsigned int supported_rotations;
1804 	uint64_t *modifiers = NULL;
1805 	unsigned int primary_zpos = dm->dc->caps.max_slave_planes;
1806 
1807 	num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats,
1808 							ARRAY_SIZE(formats));
1809 
1810 	res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers);
1811 	if (res)
1812 		return res;
1813 
1814 	if (modifiers == NULL)
1815 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
1816 
1817 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
1818 				       &dm_plane_funcs, formats, num_formats,
1819 				       modifiers, plane->type, NULL);
1820 	kfree(modifiers);
1821 	if (res)
1822 		return res;
1823 
1824 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
1825 	    plane_cap && plane_cap->per_pixel_alpha) {
1826 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1827 					  BIT(DRM_MODE_BLEND_PREMULTI) |
1828 					  BIT(DRM_MODE_BLEND_COVERAGE);
1829 
1830 		drm_plane_create_alpha_property(plane);
1831 		drm_plane_create_blend_mode_property(plane, blend_caps);
1832 	}
1833 
1834 	if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
1835 		/*
1836 		 * Allow OVERLAY planes to be used as underlays by assigning an
1837 		 * immutable zpos = # of OVERLAY planes to the PRIMARY plane.
1838 		 */
1839 		drm_plane_create_zpos_immutable_property(plane, primary_zpos);
1840 	} else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
1841 		/*
1842 		 * OVERLAY planes can be below or above the PRIMARY, but cannot
1843 		 * be above the CURSOR plane.
1844 		 */
1845 		unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane);
1846 
1847 		drm_plane_create_zpos_property(plane, zpos, 0, 254);
1848 	} else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
1849 		drm_plane_create_zpos_immutable_property(plane, 255);
1850 	}
1851 
1852 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
1853 	    plane_cap &&
1854 	    (plane_cap->pixel_format_support.nv12 ||
1855 	     plane_cap->pixel_format_support.p010)) {
1856 		/* This only affects YUV formats. */
1857 		drm_plane_create_color_properties(
1858 			plane,
1859 			BIT(DRM_COLOR_YCBCR_BT601) |
1860 			BIT(DRM_COLOR_YCBCR_BT709) |
1861 			BIT(DRM_COLOR_YCBCR_BT2020),
1862 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1863 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1864 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
1865 	}
1866 
1867 	supported_rotations =
1868 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1869 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1870 
1871 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
1872 	    plane->type != DRM_PLANE_TYPE_CURSOR)
1873 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1874 						   supported_rotations);
1875 
1876 	if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) &&
1877 	    plane->type != DRM_PLANE_TYPE_CURSOR)
1878 		drm_plane_enable_fb_damage_clips(plane);
1879 
1880 	if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1881 		drm_plane_helper_add(plane, &dm_primary_plane_helper_funcs);
1882 	else
1883 		drm_plane_helper_add(plane, &dm_plane_helper_funcs);
1884 
1885 #ifdef AMD_PRIVATE_COLOR
1886 	dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
1887 #endif
1888 	/* Create (reset) the plane state */
1889 	if (plane->funcs->reset)
1890 		plane->funcs->reset(plane);
1891 
1892 	return 0;
1893 }
1894 
amdgpu_dm_plane_is_video_format(uint32_t format)1895 bool amdgpu_dm_plane_is_video_format(uint32_t format)
1896 {
1897 	int i;
1898 
1899 	for (i = 0; i < ARRAY_SIZE(video_formats); i++)
1900 		if (format == video_formats[i])
1901 			return true;
1902 
1903 	return false;
1904 }
1905 
1906