1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022-2026 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_blend.h>
29 #include "drm/drm_framebuffer.h"
30 #include <drm/drm_gem_atomic_helper.h>
31 #include <drm/drm_plane_helper.h>
32 #include <drm/drm_gem_framebuffer_helper.h>
33 #include <drm/drm_fourcc.h>
34
35 #include "amdgpu.h"
36 #include "dal_asic_id.h"
37 #include "amdgpu_display.h"
38 #include "amdgpu_dm_trace.h"
39 #include "amdgpu_dm_plane.h"
40 #include "amdgpu_dm_colorop.h"
41 #include "gc/gc_11_0_0_offset.h"
42 #include "gc/gc_11_0_0_sh_mask.h"
43
44 /*
45 * TODO: these are currently initialized to rgb formats only.
46 * For future use cases we should either initialize them dynamically based on
47 * plane capabilities, or initialize this array to all formats, so internal drm
48 * check will succeed, and let DC implement proper check
49 */
50 static const uint32_t rgb_formats[] = {
51 DRM_FORMAT_XRGB8888,
52 DRM_FORMAT_ARGB8888,
53 DRM_FORMAT_RGBA8888,
54 DRM_FORMAT_XRGB2101010,
55 DRM_FORMAT_XBGR2101010,
56 DRM_FORMAT_ARGB2101010,
57 DRM_FORMAT_ABGR2101010,
58 DRM_FORMAT_XRGB16161616,
59 DRM_FORMAT_XBGR16161616,
60 DRM_FORMAT_ARGB16161616,
61 DRM_FORMAT_ABGR16161616,
62 DRM_FORMAT_XBGR8888,
63 DRM_FORMAT_ABGR8888,
64 DRM_FORMAT_RGB565,
65 };
66
67 static const uint32_t overlay_formats[] = {
68 DRM_FORMAT_XRGB8888,
69 DRM_FORMAT_ARGB8888,
70 DRM_FORMAT_RGBA8888,
71 DRM_FORMAT_XBGR8888,
72 DRM_FORMAT_ABGR8888,
73 DRM_FORMAT_RGB565,
74 DRM_FORMAT_NV21,
75 DRM_FORMAT_NV12,
76 DRM_FORMAT_P010
77 };
78
79 static const uint32_t video_formats[] = {
80 DRM_FORMAT_NV21,
81 DRM_FORMAT_NV12,
82 DRM_FORMAT_P010
83 };
84
85 static const u32 cursor_formats[] = {
86 DRM_FORMAT_ARGB8888
87 };
88
89 enum dm_micro_swizzle {
90 MICRO_SWIZZLE_Z = 0,
91 MICRO_SWIZZLE_S = 1,
92 MICRO_SWIZZLE_D = 2,
93 MICRO_SWIZZLE_R = 3
94 };
95
amdgpu_dm_plane_get_format_info(u32 pixel_format,u64 modifier)96 const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier)
97 {
98 return amdgpu_lookup_format_info(pixel_format, modifier);
99 }
100
amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * pre_multiplied_alpha,bool * global_alpha,int * global_alpha_value)101 void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
102 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
103 bool *global_alpha, int *global_alpha_value)
104 {
105 *per_pixel_alpha = false;
106 *pre_multiplied_alpha = true;
107 *global_alpha = false;
108 *global_alpha_value = 0xff;
109
110
111 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
112 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
113 static const uint32_t alpha_formats[] = {
114 DRM_FORMAT_ARGB8888,
115 DRM_FORMAT_RGBA8888,
116 DRM_FORMAT_ABGR8888,
117 DRM_FORMAT_ARGB2101010,
118 DRM_FORMAT_ABGR2101010,
119 DRM_FORMAT_ARGB16161616,
120 DRM_FORMAT_ABGR16161616,
121 DRM_FORMAT_ARGB16161616F,
122 };
123 uint32_t format = plane_state->fb->format->format;
124 unsigned int i;
125
126 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
127 if (format == alpha_formats[i]) {
128 *per_pixel_alpha = true;
129 break;
130 }
131 }
132
133 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
134 *pre_multiplied_alpha = false;
135 }
136
137 if (plane_state->alpha < 0xffff) {
138 *global_alpha = true;
139 *global_alpha_value = plane_state->alpha >> 8;
140 }
141 }
142
amdgpu_dm_plane_add_modifier(uint64_t ** mods,uint64_t * size,uint64_t * cap,uint64_t mod)143 static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
144 {
145 if (!*mods)
146 return;
147
148 if (*cap - *size < 1) {
149 uint64_t new_cap = *cap * 2;
150 uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL);
151
152 if (!new_mods) {
153 kfree(*mods);
154 *mods = NULL;
155 return;
156 }
157
158 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
159 kfree(*mods);
160 *mods = new_mods;
161 *cap = new_cap;
162 }
163
164 (*mods)[*size] = mod;
165 *size += 1;
166 }
167
amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier)168 static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier)
169 {
170 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
171 }
172
amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier)173 static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier)
174 {
175 if (modifier == DRM_FORMAT_MOD_LINEAR)
176 return 0;
177
178 return AMD_FMT_MOD_GET(TILE, modifier);
179 }
180
amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info * tiling_info,uint64_t tiling_flags)181 static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info,
182 uint64_t tiling_flags)
183 {
184 /* Fill GFX8 params */
185 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
186 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
187
188 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
189 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
190 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
191 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
192 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
193
194 tiling_info->gfxversion = DcGfxVersion8;
195 /* XXX fix me for VI */
196 tiling_info->gfx8.num_banks = num_banks;
197 tiling_info->gfx8.array_mode =
198 DC_ARRAY_2D_TILED_THIN1;
199 tiling_info->gfx8.tile_split = tile_split;
200 tiling_info->gfx8.bank_width = bankw;
201 tiling_info->gfx8.bank_height = bankh;
202 tiling_info->gfx8.tile_aspect = mtaspect;
203 tiling_info->gfx8.tile_mode =
204 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
205 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
206 == DC_ARRAY_1D_TILED_THIN1) {
207 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
208 }
209
210 tiling_info->gfx8.pipe_config =
211 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
212 }
213
amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device * adev,struct dc_tiling_info * tiling_info)214 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
215 struct dc_tiling_info *tiling_info)
216 {
217 /* Fill GFX9 params */
218 tiling_info->gfx9.num_pipes =
219 adev->gfx.config.gb_addr_config_fields.num_pipes;
220 tiling_info->gfx9.num_banks =
221 adev->gfx.config.gb_addr_config_fields.num_banks;
222 tiling_info->gfx9.pipe_interleave =
223 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
224 tiling_info->gfx9.num_shader_engines =
225 adev->gfx.config.gb_addr_config_fields.num_se;
226 tiling_info->gfx9.max_compressed_frags =
227 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
228 tiling_info->gfx9.num_rb_per_se =
229 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
230 tiling_info->gfx9.shaderEnable = 1;
231 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
232 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
233 }
234
amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device * adev,struct dc_tiling_info * tiling_info,uint64_t modifier)235 static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
236 struct dc_tiling_info *tiling_info,
237 uint64_t modifier)
238 {
239 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
240 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
241 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
242 unsigned int pipes_log2;
243
244 pipes_log2 = min(5u, mod_pipe_xor_bits);
245
246 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
247
248 if (!IS_AMD_FMT_MOD(modifier))
249 return;
250
251 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
252 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
253
254 if (adev->family >= AMDGPU_FAMILY_NV) {
255 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
256 } else {
257 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
258
259 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
260 }
261 }
262
amdgpu_dm_plane_validate_dcc(struct amdgpu_device * adev,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct dc_tiling_info * tiling_info,const struct dc_plane_dcc_param * dcc,const struct dc_plane_address * address,const struct plane_size * plane_size)263 static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
264 const enum surface_pixel_format format,
265 const enum dc_rotation_angle rotation,
266 const struct dc_tiling_info *tiling_info,
267 const struct dc_plane_dcc_param *dcc,
268 const struct dc_plane_address *address,
269 const struct plane_size *plane_size)
270 {
271 struct dc *dc = adev->dm.dc;
272 struct dc_dcc_surface_param input;
273 struct dc_surface_dcc_cap output;
274
275 memset(&input, 0, sizeof(input));
276 memset(&output, 0, sizeof(output));
277
278 if (!dcc->enable)
279 return 0;
280
281 if (adev->family != AMDGPU_FAMILY_GC_12_0_0 &&
282 format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
283 return -EINVAL;
284
285 if (!dc->cap_funcs.get_dcc_compression_cap)
286 return -EINVAL;
287
288 input.format = format;
289 input.surface_size.width = plane_size->surface_size.width;
290 input.surface_size.height = plane_size->surface_size.height;
291 input.swizzle_mode = tiling_info->gfx9.swizzle;
292
293 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
294 input.scan = SCAN_DIRECTION_HORIZONTAL;
295 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
296 input.scan = SCAN_DIRECTION_VERTICAL;
297
298 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
299 return -EINVAL;
300
301 if (!output.capable)
302 return -EINVAL;
303
304 if (dcc->independent_64b_blks == 0 &&
305 output.grph.rgb.independent_64b_blks != 0)
306 return -EINVAL;
307
308 return 0;
309 }
310
amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,struct dc_tiling_info * tiling_info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address)311 static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
312 const struct amdgpu_framebuffer *afb,
313 const enum surface_pixel_format format,
314 const enum dc_rotation_angle rotation,
315 const struct plane_size *plane_size,
316 struct dc_tiling_info *tiling_info,
317 struct dc_plane_dcc_param *dcc,
318 struct dc_plane_address *address)
319 {
320 const uint64_t modifier = afb->base.modifier;
321 int ret = 0;
322
323 amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
324 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
325 tiling_info->gfxversion = DcGfxVersion9;
326
327 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
328 uint64_t dcc_address = afb->address + afb->base.offsets[1];
329 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
330 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
331
332 dcc->enable = 1;
333 dcc->meta_pitch = afb->base.pitches[1];
334 dcc->independent_64b_blks = independent_64b_blks;
335 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
336 if (independent_64b_blks && independent_128b_blks)
337 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
338 else if (independent_128b_blks)
339 dcc->dcc_ind_blk = hubp_ind_block_128b;
340 else if (independent_64b_blks && !independent_128b_blks)
341 dcc->dcc_ind_blk = hubp_ind_block_64b;
342 else
343 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
344 } else {
345 if (independent_64b_blks)
346 dcc->dcc_ind_blk = hubp_ind_block_64b;
347 else
348 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
349 }
350
351 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
352 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
353 }
354
355 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
356 if (ret)
357 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
358
359 return ret;
360 }
361
amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,struct dc_tiling_info * tiling_info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address)362 static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amdgpu_device *adev,
363 const struct amdgpu_framebuffer *afb,
364 const enum surface_pixel_format format,
365 const enum dc_rotation_angle rotation,
366 const struct plane_size *plane_size,
367 struct dc_tiling_info *tiling_info,
368 struct dc_plane_dcc_param *dcc,
369 struct dc_plane_address *address)
370 {
371 const uint64_t modifier = afb->base.modifier;
372 int ret = 0;
373
374 /* TODO: Most of this function shouldn't be needed on GFX12. */
375 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
376
377 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
378 tiling_info->gfxversion = DcGfxAddr3;
379
380 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
381 int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
382
383 dcc->enable = 1;
384 dcc->independent_64b_blks = max_compressed_block == 0;
385
386 if (max_compressed_block == 0)
387 dcc->dcc_ind_blk = hubp_ind_block_64b;
388 else if (max_compressed_block == 1)
389 dcc->dcc_ind_blk = hubp_ind_block_128b;
390 else
391 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
392 }
393
394 /* TODO: This seems wrong because there is no DCC plane on GFX12. */
395 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
396 if (ret)
397 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
398
399 return ret;
400 }
401
amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)402 static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev,
403 uint64_t **mods,
404 uint64_t *size,
405 uint64_t *capacity)
406 {
407 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
408
409 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
410 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
411 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
412 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
413 AMD_FMT_MOD_SET(DCC, 1) |
414 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
415 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
416 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
417
418 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
419 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
420 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
421 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
422 AMD_FMT_MOD_SET(DCC, 1) |
423 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
424 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
425 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
426 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
427
428 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
429 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
430 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
431 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
432
433 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
434 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
435 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
436 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
437
438
439 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
440 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
441 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
442 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
443
444 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
445 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
446 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
447 }
448
amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)449 static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev,
450 uint64_t **mods,
451 uint64_t *size,
452 uint64_t *capacity)
453 {
454 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
455 int pipe_xor_bits = min(8, pipes +
456 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
457 int bank_xor_bits = min(8 - pipe_xor_bits,
458 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
459 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
460 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
461
462
463 if (adev->family == AMDGPU_FAMILY_RV) {
464 /* Raven2 and later */
465 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
466
467 /*
468 * No _D DCC swizzles yet because we only allow 32bpp, which
469 * doesn't support _D on DCN
470 */
471
472 if (has_constant_encode) {
473 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
474 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
475 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
476 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
477 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
478 AMD_FMT_MOD_SET(DCC, 1) |
479 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
480 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
481 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
482 }
483
484 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
485 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
486 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
487 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
488 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
489 AMD_FMT_MOD_SET(DCC, 1) |
490 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
491 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
492 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
493
494 if (has_constant_encode) {
495 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
496 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
497 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
498 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
499 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
500 AMD_FMT_MOD_SET(DCC, 1) |
501 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
502 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
503 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
504 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
505 AMD_FMT_MOD_SET(RB, rb) |
506 AMD_FMT_MOD_SET(PIPE, pipes));
507 }
508
509 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
510 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
511 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
512 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
513 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
514 AMD_FMT_MOD_SET(DCC, 1) |
515 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
516 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
517 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
518 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
519 AMD_FMT_MOD_SET(RB, rb) |
520 AMD_FMT_MOD_SET(PIPE, pipes));
521 }
522
523 /*
524 * Only supported for 64bpp on Raven, will be filtered on format in
525 * amdgpu_dm_plane_format_mod_supported.
526 */
527 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
528 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
529 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
530 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
531 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
532
533 if (adev->family == AMDGPU_FAMILY_RV) {
534 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
535 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
536 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
537 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
538 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
539 }
540
541 /*
542 * Only supported for 64bpp on Raven, will be filtered on format in
543 * amdgpu_dm_plane_format_mod_supported.
544 */
545 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
546 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
547 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
548
549 if (adev->family == AMDGPU_FAMILY_RV) {
550 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
551 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
552 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
553 }
554 }
555
amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)556 static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev,
557 uint64_t **mods,
558 uint64_t *size,
559 uint64_t *capacity)
560 {
561 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
562 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
563
564 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
565 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
566 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
567 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
568 AMD_FMT_MOD_SET(PACKERS, pkrs) |
569 AMD_FMT_MOD_SET(DCC, 1) |
570 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
571 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
572 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
573 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
574
575 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
576 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
577 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
578 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
579 AMD_FMT_MOD_SET(PACKERS, pkrs) |
580 AMD_FMT_MOD_SET(DCC, 1) |
581 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
582 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
583 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
584
585 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
586 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
587 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
588 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
589 AMD_FMT_MOD_SET(PACKERS, pkrs) |
590 AMD_FMT_MOD_SET(DCC, 1) |
591 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
592 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
593 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
594 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
595 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
596
597 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
598 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
599 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
600 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
601 AMD_FMT_MOD_SET(PACKERS, pkrs) |
602 AMD_FMT_MOD_SET(DCC, 1) |
603 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
604 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
605 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
606 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
607
608 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
609 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
610 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
611 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
612 AMD_FMT_MOD_SET(PACKERS, pkrs));
613
614 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
615 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
616 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
617 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
618 AMD_FMT_MOD_SET(PACKERS, pkrs));
619
620 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
621 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
622 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
623 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
624
625 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
626 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
627 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
628 }
629
amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)630 static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev,
631 uint64_t **mods, uint64_t *size, uint64_t *capacity)
632 {
633 int num_pipes = 0;
634 int pipe_xor_bits = 0;
635 int num_pkrs = 0;
636 int pkrs = 0;
637 u32 gb_addr_config;
638 u8 i = 0;
639 unsigned int swizzle_r_x;
640 uint64_t modifier_r_x;
641 uint64_t modifier_dcc_best;
642 uint64_t modifier_dcc_4k;
643
644 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
645 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes}
646 */
647 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
648 ASSERT(gb_addr_config != 0);
649
650 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
651 pkrs = ilog2(num_pkrs);
652 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
653 pipe_xor_bits = ilog2(num_pipes);
654
655 for (i = 0; i < 2; i++) {
656 /* Insert the best one first. */
657 /* R_X swizzle modes are the best for rendering and DCC requires them. */
658 if (num_pipes > 16)
659 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
660 else
661 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
662
663 modifier_r_x = AMD_FMT_MOD |
664 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
665 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
666 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
667 AMD_FMT_MOD_SET(PACKERS, pkrs);
668
669 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
670 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
671 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
672 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
673 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
674
675 /* DCC settings for 4K and greater resolutions. (required by display hw) */
676 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
677 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
678 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
679 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
680
681 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best);
682 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k);
683
684 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
685 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
686
687 amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x);
688 }
689
690 amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
691 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
692 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
693 }
694
amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device * adev,uint64_t ** mods,uint64_t * size,uint64_t * capacity)695 static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev,
696 uint64_t **mods, uint64_t *size, uint64_t *capacity)
697 {
698 uint64_t ver = AMD_FMT_MOD | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12);
699 uint64_t mod_256k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D);
700 uint64_t mod_64k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D);
701 uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
702 uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
703 uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1);
704 uint8_t max_comp_block[] = {2, 1, 0};
705 uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0};
706 uint8_t i = 0, j = 0;
707 /* Note, linear (no DCC) gets added to the modifier list for all chips by the caller. */
708 uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b};
709
710 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
711 max_comp_block_mod[i] = AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block[i]);
712
713 /* With DCC: Best choice should be kept first. Hence, add all 256k modifiers of different
714 * max compressed blocks first and then move on to the next smaller sized layouts.
715 */
716 for (j = 0; j < ARRAY_SIZE(gfx12_modifiers); j++)
717 for (i = 0; i < ARRAY_SIZE(max_comp_block); i++)
718 amdgpu_dm_plane_add_modifier(mods, size, capacity,
719 ver | dcc | max_comp_block_mod[i] | gfx12_modifiers[j]);
720
721 /* Without DCC. */
722 for (i = 0; i < ARRAY_SIZE(gfx12_modifiers); i++)
723 amdgpu_dm_plane_add_modifier(mods, size, capacity, gfx12_modifiers[i]);
724
725 }
726
amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device * adev,unsigned int plane_type,uint64_t ** mods)727 static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
728 {
729 uint64_t size = 0, capacity = 128;
730 *mods = NULL;
731
732 /* We have not hooked up any pre-GFX9 modifiers. */
733 if (adev->family < AMDGPU_FAMILY_AI)
734 return 0;
735
736 *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL);
737
738 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
739 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
740 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
741 return *mods ? 0 : -ENOMEM;
742 }
743
744 switch (adev->family) {
745 case AMDGPU_FAMILY_AI:
746 case AMDGPU_FAMILY_RV:
747 amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity);
748 break;
749 case AMDGPU_FAMILY_NV:
750 case AMDGPU_FAMILY_VGH:
751 case AMDGPU_FAMILY_YC:
752 case AMDGPU_FAMILY_GC_10_3_6:
753 case AMDGPU_FAMILY_GC_10_3_7:
754 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
755 amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity);
756 else
757 amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity);
758 break;
759 case AMDGPU_FAMILY_GC_11_0_0:
760 case AMDGPU_FAMILY_GC_11_0_1:
761 case AMDGPU_FAMILY_GC_11_5_0:
762 case AMDGPU_FAMILY_GC_11_5_4:
763 amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity);
764 break;
765 case AMDGPU_FAMILY_GC_12_0_0:
766 amdgpu_dm_plane_add_gfx12_modifiers(adev, mods, &size, &capacity);
767 break;
768 }
769
770 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
771
772 /* INVALID marks the end of the list. */
773 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
774
775 if (!*mods)
776 return -ENOMEM;
777
778 return 0;
779 }
780
amdgpu_dm_plane_get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)781 static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane,
782 const struct dc_plane_cap *plane_cap,
783 uint32_t *formats, int max_formats)
784 {
785 int i, num_formats = 0;
786
787 /*
788 * TODO: Query support for each group of formats directly from
789 * DC plane caps. This will require adding more formats to the
790 * caps list.
791 */
792
793 if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
794 (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
795 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
796 if (num_formats >= max_formats)
797 break;
798
799 formats[num_formats++] = rgb_formats[i];
800 }
801
802 if (plane_cap && plane_cap->pixel_format_support.nv12)
803 formats[num_formats++] = DRM_FORMAT_NV12;
804 if (plane_cap && plane_cap->pixel_format_support.p010)
805 formats[num_formats++] = DRM_FORMAT_P010;
806 if (plane_cap && plane_cap->pixel_format_support.fp16) {
807 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
808 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
809 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
810 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
811 }
812 } else {
813 switch (plane->type) {
814 case DRM_PLANE_TYPE_OVERLAY:
815 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
816 if (num_formats >= max_formats)
817 break;
818
819 formats[num_formats++] = overlay_formats[i];
820 }
821 break;
822
823 case DRM_PLANE_TYPE_CURSOR:
824 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
825 if (num_formats >= max_formats)
826 break;
827
828 formats[num_formats++] = cursor_formats[i];
829 }
830 break;
831
832 default:
833 break;
834 }
835 }
836
837 return num_formats;
838 }
839
amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,struct dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface)840 int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
841 const struct amdgpu_framebuffer *afb,
842 const enum surface_pixel_format format,
843 const enum dc_rotation_angle rotation,
844 const uint64_t tiling_flags,
845 struct dc_tiling_info *tiling_info,
846 struct plane_size *plane_size,
847 struct dc_plane_dcc_param *dcc,
848 struct dc_plane_address *address,
849 bool tmz_surface)
850 {
851 const struct drm_framebuffer *fb = &afb->base;
852 int ret;
853
854 memset(tiling_info, 0, sizeof(*tiling_info));
855 memset(plane_size, 0, sizeof(*plane_size));
856 memset(dcc, 0, sizeof(*dcc));
857 memset(address, 0, sizeof(*address));
858
859 address->tmz_surface = tmz_surface;
860
861 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
862 uint64_t addr = afb->address + fb->offsets[0];
863
864 plane_size->surface_size.x = 0;
865 plane_size->surface_size.y = 0;
866 plane_size->surface_size.width = fb->width;
867 plane_size->surface_size.height = fb->height;
868 plane_size->surface_pitch =
869 fb->pitches[0] / fb->format->cpp[0];
870
871 address->type = PLN_ADDR_TYPE_GRAPHICS;
872 address->grph.addr.low_part = lower_32_bits(addr);
873 address->grph.addr.high_part = upper_32_bits(addr);
874 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
875 uint64_t luma_addr = afb->address + fb->offsets[0];
876 uint64_t chroma_addr = afb->address + fb->offsets[1];
877
878 plane_size->surface_size.x = 0;
879 plane_size->surface_size.y = 0;
880 plane_size->surface_size.width = fb->width;
881 plane_size->surface_size.height = fb->height;
882 plane_size->surface_pitch =
883 fb->pitches[0] / fb->format->cpp[0];
884
885 plane_size->chroma_size.x = 0;
886 plane_size->chroma_size.y = 0;
887 /* TODO: set these based on surface format */
888 plane_size->chroma_size.width = fb->width / 2;
889 plane_size->chroma_size.height = fb->height / 2;
890
891 plane_size->chroma_pitch =
892 fb->pitches[1] / fb->format->cpp[1];
893
894 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
895 address->video_progressive.luma_addr.low_part =
896 lower_32_bits(luma_addr);
897 address->video_progressive.luma_addr.high_part =
898 upper_32_bits(luma_addr);
899 address->video_progressive.chroma_addr.low_part =
900 lower_32_bits(chroma_addr);
901 address->video_progressive.chroma_addr.high_part =
902 upper_32_bits(chroma_addr);
903 }
904
905 if (adev->family == AMDGPU_FAMILY_GC_12_0_0) {
906 ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
907 rotation, plane_size,
908 tiling_info, dcc,
909 address);
910 if (ret)
911 return ret;
912 } else if (adev->family >= AMDGPU_FAMILY_AI) {
913 ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
914 rotation, plane_size,
915 tiling_info, dcc,
916 address);
917 if (ret)
918 return ret;
919 } else {
920 amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
921 }
922
923 return 0;
924 }
925
amdgpu_dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)926 static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
927 struct drm_plane_state *new_state)
928 {
929 struct amdgpu_framebuffer *afb;
930 struct drm_gem_object *obj;
931 struct amdgpu_device *adev;
932 struct amdgpu_bo *rbo;
933 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
934 uint32_t domain;
935 int r;
936
937 if (!new_state->fb) {
938 DRM_DEBUG_KMS("No FB bound\n");
939 return 0;
940 }
941
942 afb = to_amdgpu_framebuffer(new_state->fb);
943 obj = drm_gem_fb_get_obj(new_state->fb, 0);
944 if (!obj) {
945 DRM_ERROR("Failed to get obj from framebuffer\n");
946 return -EINVAL;
947 }
948
949 rbo = gem_to_amdgpu_bo(obj);
950 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
951 r = amdgpu_bo_reserve(rbo, true);
952 if (r) {
953 drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
954 return r;
955 }
956
957 r = dma_resv_reserve_fences(rbo->tbo.base.resv, TTM_NUM_MOVE_FENCES);
958 if (r)
959 goto error_unlock;
960
961 if (plane->type != DRM_PLANE_TYPE_CURSOR)
962 domain = amdgpu_display_supported_domains(adev, rbo->flags);
963 else
964 domain = AMDGPU_GEM_DOMAIN_VRAM;
965
966 rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
967 r = amdgpu_bo_pin(rbo, domain);
968 if (unlikely(r != 0)) {
969 if (r != -ERESTARTSYS)
970 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
971 goto error_unlock;
972 }
973
974 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
975 if (unlikely(r != 0)) {
976 DRM_ERROR("%p bind failed\n", rbo);
977 goto error_unpin;
978 }
979
980 r = drm_gem_plane_helper_prepare_fb(plane, new_state);
981 if (unlikely(r != 0))
982 goto error_unpin;
983
984 amdgpu_bo_unreserve(rbo);
985
986 afb->address = amdgpu_bo_gpu_offset(rbo);
987
988 amdgpu_bo_ref(rbo);
989
990 /**
991 * We don't do surface updates on planes that have been newly created,
992 * but we also don't have the afb->address during atomic check.
993 *
994 * Fill in buffer attributes depending on the address here, but only on
995 * newly created planes since they're not being used by DC yet and this
996 * won't modify global state.
997 */
998 dm_plane_state_old = to_dm_plane_state(plane->state);
999 dm_plane_state_new = to_dm_plane_state(new_state);
1000
1001 if (dm_plane_state_new->dc_state &&
1002 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
1003 struct dc_plane_state *plane_state =
1004 dm_plane_state_new->dc_state;
1005
1006 amdgpu_dm_plane_fill_plane_buffer_attributes(
1007 adev, afb, plane_state->format, plane_state->rotation,
1008 afb->tiling_flags,
1009 &plane_state->tiling_info, &plane_state->plane_size,
1010 &plane_state->dcc, &plane_state->address,
1011 afb->tmz_surface);
1012 }
1013
1014 return 0;
1015
1016 error_unpin:
1017 amdgpu_bo_unpin(rbo);
1018
1019 error_unlock:
1020 amdgpu_bo_unreserve(rbo);
1021 return r;
1022 }
1023
amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)1024 static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane,
1025 struct drm_plane_state *old_state)
1026 {
1027 struct amdgpu_bo *rbo;
1028 int r;
1029
1030 if (!old_state->fb)
1031 return;
1032
1033 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
1034 r = amdgpu_bo_reserve(rbo, false);
1035 if (unlikely(r)) {
1036 DRM_ERROR("failed to reserve rbo before unpin\n");
1037 return;
1038 }
1039
1040 amdgpu_bo_unpin(rbo);
1041 amdgpu_bo_unreserve(rbo);
1042 amdgpu_bo_unref(&rbo);
1043 }
1044
amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device * dev,struct drm_framebuffer * fb,int * min_downscale,int * max_upscale)1045 static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev,
1046 struct drm_framebuffer *fb,
1047 int *min_downscale, int *max_upscale)
1048 {
1049 struct amdgpu_device *adev = drm_to_adev(dev);
1050 struct dc *dc = adev->dm.dc;
1051 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
1052 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
1053
1054 switch (fb->format->format) {
1055 case DRM_FORMAT_P010:
1056 case DRM_FORMAT_NV12:
1057 case DRM_FORMAT_NV21:
1058 *max_upscale = plane_cap->max_upscale_factor.nv12;
1059 *min_downscale = plane_cap->max_downscale_factor.nv12;
1060 break;
1061
1062 /* All 64 bpp formats have the same fp16 scaling limits */
1063 case DRM_FORMAT_XRGB16161616F:
1064 case DRM_FORMAT_ARGB16161616F:
1065 case DRM_FORMAT_XBGR16161616F:
1066 case DRM_FORMAT_ABGR16161616F:
1067 case DRM_FORMAT_XRGB16161616:
1068 case DRM_FORMAT_ARGB16161616:
1069 case DRM_FORMAT_XBGR16161616:
1070 case DRM_FORMAT_ABGR16161616:
1071 *max_upscale = plane_cap->max_upscale_factor.fp16;
1072 *min_downscale = plane_cap->max_downscale_factor.fp16;
1073 break;
1074
1075 default:
1076 *max_upscale = plane_cap->max_upscale_factor.argb8888;
1077 *min_downscale = plane_cap->max_downscale_factor.argb8888;
1078 break;
1079 }
1080
1081 /*
1082 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
1083 * scaling factor of 1.0 == 1000 units.
1084 */
1085 if (*max_upscale == 1)
1086 *max_upscale = 1000;
1087
1088 if (*min_downscale == 1)
1089 *min_downscale = 1000;
1090 }
1091
amdgpu_dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)1092 int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
1093 struct drm_crtc_state *new_crtc_state)
1094 {
1095 struct drm_framebuffer *fb = state->fb;
1096 int min_downscale, max_upscale;
1097 int min_scale = 0;
1098 int max_scale = INT_MAX;
1099
1100 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
1101 if (fb && state->crtc) {
1102 /* Validate viewport to cover the case when only the position changes */
1103 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
1104 int viewport_width = state->crtc_w;
1105 int viewport_height = state->crtc_h;
1106
1107 if (state->crtc_x < 0)
1108 viewport_width += state->crtc_x;
1109 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
1110 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
1111
1112 if (state->crtc_y < 0)
1113 viewport_height += state->crtc_y;
1114 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
1115 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
1116
1117 if (viewport_width < 0 || viewport_height < 0) {
1118 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
1119 return -EINVAL;
1120 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
1121 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
1122 return -EINVAL;
1123 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
1124 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
1125 return -EINVAL;
1126 }
1127
1128 }
1129
1130 /* Get min/max allowed scaling factors from plane caps. */
1131 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb,
1132 &min_downscale, &max_upscale);
1133 /*
1134 * Convert to drm convention: 16.16 fixed point, instead of dc's
1135 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
1136 * dst/src, so min_scale = 1.0 / max_upscale, etc.
1137 */
1138 min_scale = (1000 << 16) / max_upscale;
1139 max_scale = (1000 << 16) / min_downscale;
1140 }
1141
1142 return drm_atomic_helper_check_plane_state(
1143 state, new_crtc_state, min_scale, max_scale, true, true);
1144 }
1145
amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device * adev,const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)1146 int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
1147 const struct drm_plane_state *state,
1148 struct dc_scaling_info *scaling_info)
1149 {
1150 int scale_w, scale_h, min_downscale, max_upscale;
1151
1152 memset(scaling_info, 0, sizeof(*scaling_info));
1153
1154 /* Source is fixed 16.16 but we ignore mantissa for now... */
1155 scaling_info->src_rect.x = state->src_x >> 16;
1156 scaling_info->src_rect.y = state->src_y >> 16;
1157
1158 /*
1159 * For reasons we don't (yet) fully understand a non-zero
1160 * src_y coordinate into an NV12 buffer can cause a
1161 * system hang on DCN1x.
1162 * To avoid hangs (and maybe be overly cautious)
1163 * let's reject both non-zero src_x and src_y.
1164 *
1165 * We currently know of only one use-case to reproduce a
1166 * scenario with non-zero src_x and src_y for NV12, which
1167 * is to gesture the YouTube Android app into full screen
1168 * on ChromeOS.
1169 */
1170 if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1171 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) &&
1172 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
1173 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
1174 return -EINVAL;
1175
1176 scaling_info->src_rect.width = state->src_w >> 16;
1177 if (scaling_info->src_rect.width == 0)
1178 return -EINVAL;
1179
1180 scaling_info->src_rect.height = state->src_h >> 16;
1181 if (scaling_info->src_rect.height == 0)
1182 return -EINVAL;
1183
1184 scaling_info->dst_rect.x = state->crtc_x;
1185 scaling_info->dst_rect.y = state->crtc_y;
1186
1187 if (state->crtc_w == 0)
1188 return -EINVAL;
1189
1190 scaling_info->dst_rect.width = state->crtc_w;
1191
1192 if (state->crtc_h == 0)
1193 return -EINVAL;
1194
1195 scaling_info->dst_rect.height = state->crtc_h;
1196
1197 /* DRM doesn't specify clipping on destination output. */
1198 scaling_info->clip_rect = scaling_info->dst_rect;
1199
1200 /* Validate scaling per-format with DC plane caps */
1201 if (state->plane && state->plane->dev && state->fb) {
1202 amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
1203 &min_downscale, &max_upscale);
1204 } else {
1205 min_downscale = 250;
1206 max_upscale = 16000;
1207 }
1208
1209 scale_w = scaling_info->dst_rect.width * 1000 /
1210 scaling_info->src_rect.width;
1211
1212 if (scale_w < min_downscale || scale_w > max_upscale)
1213 return -EINVAL;
1214
1215 scale_h = scaling_info->dst_rect.height * 1000 /
1216 scaling_info->src_rect.height;
1217
1218 if (scale_h < min_downscale || scale_h > max_upscale)
1219 return -EINVAL;
1220
1221 /*
1222 * The "scaling_quality" can be ignored for now, quality = 0 has DC
1223 * assume reasonable defaults based on the format.
1224 */
1225
1226 return 0;
1227 }
1228
amdgpu_dm_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)1229 static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
1230 struct drm_atomic_state *state)
1231 {
1232 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1233 plane);
1234 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1235 struct dc *dc = adev->dm.dc;
1236 struct dm_plane_state *dm_plane_state;
1237 struct dc_scaling_info scaling_info;
1238 struct drm_crtc_state *new_crtc_state;
1239 int ret;
1240
1241 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
1242
1243 dm_plane_state = to_dm_plane_state(new_plane_state);
1244
1245 if (!dm_plane_state->dc_state)
1246 return 0;
1247
1248 new_crtc_state =
1249 drm_atomic_get_new_crtc_state(state,
1250 new_plane_state->crtc);
1251 if (!new_crtc_state)
1252 return -EINVAL;
1253
1254 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
1255 if (ret)
1256 return ret;
1257
1258 /* Reject commits that attempt to use both COLOR_PIPELINE and CRTC DEGAMMA_LUT */
1259 if (new_plane_state->color_pipeline && new_crtc_state->degamma_lut) {
1260 drm_dbg_atomic(plane->dev,
1261 "[PLANE:%d:%s] COLOR_PIPELINE and CRTC DEGAMMA_LUT cannot be enabled simultaneously\n",
1262 plane->base.id, plane->name);
1263 return -EINVAL;
1264 }
1265
1266 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
1267 if (ret)
1268 return ret;
1269
1270 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
1271 return 0;
1272
1273 return -EINVAL;
1274 }
1275
amdgpu_dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_atomic_state * state,bool flip)1276 static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
1277 struct drm_atomic_state *state, bool flip)
1278 {
1279 struct drm_crtc_state *new_crtc_state;
1280 struct drm_plane_state *new_plane_state;
1281 struct dm_crtc_state *dm_new_crtc_state;
1282
1283 if (flip) {
1284 if (plane->type != DRM_PLANE_TYPE_OVERLAY)
1285 return -EINVAL;
1286 } else if (plane->type != DRM_PLANE_TYPE_CURSOR) {
1287 return -EINVAL;
1288 }
1289
1290 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
1291 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
1292 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1293 /* Reject overlay cursors for now*/
1294 if (!flip && dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE)
1295 return -EINVAL;
1296
1297 return 0;
1298 }
1299
amdgpu_dm_plane_get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)1300 int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
1301 struct dc_cursor_position *position)
1302 {
1303 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1304 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1305 int x, y;
1306 int xorigin = 0, yorigin = 0;
1307
1308 if (!crtc || !plane->state->fb)
1309 return 0;
1310
1311 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
1312 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
1313 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
1314 __func__,
1315 plane->state->crtc_w,
1316 plane->state->crtc_h);
1317 return -EINVAL;
1318 }
1319
1320 x = plane->state->crtc_x;
1321 y = plane->state->crtc_y;
1322
1323 if (x <= -amdgpu_crtc->max_cursor_width ||
1324 y <= -amdgpu_crtc->max_cursor_height)
1325 return 0;
1326
1327 if (x < 0) {
1328 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1329 x = 0;
1330 }
1331 if (y < 0) {
1332 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1333 y = 0;
1334 }
1335 position->enable = true;
1336 position->x = x;
1337 position->y = y;
1338 position->x_hotspot = xorigin;
1339 position->y_hotspot = yorigin;
1340
1341 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1))
1342 position->translate_by_source = true;
1343
1344 return 0;
1345 }
1346
amdgpu_dm_plane_handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)1347 void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
1348 struct drm_plane_state *old_plane_state)
1349 {
1350 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1351 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
1352 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
1353 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
1354 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1355 uint64_t address = afb ? afb->address : 0;
1356 struct dc_cursor_position position = {0};
1357 struct dc_cursor_attributes attributes;
1358 int ret;
1359
1360 if (!plane->state->fb && !old_plane_state->fb)
1361 return;
1362
1363 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n",
1364 amdgpu_crtc->crtc_id, plane->state->crtc_w,
1365 plane->state->crtc_h);
1366
1367 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position);
1368 if (ret)
1369 return;
1370
1371 if (!position.enable) {
1372 /* turn off cursor */
1373 if (crtc_state && crtc_state->stream) {
1374 mutex_lock(&adev->dm.dc_lock);
1375 amdgpu_dm_ism_commit_event(
1376 &amdgpu_crtc->ism,
1377 DM_ISM_EVENT_BEGIN_CURSOR_UPDATE);
1378
1379 dc_stream_program_cursor_position(crtc_state->stream,
1380 &position);
1381
1382 amdgpu_dm_ism_commit_event(
1383 &amdgpu_crtc->ism,
1384 DM_ISM_EVENT_END_CURSOR_UPDATE);
1385 mutex_unlock(&adev->dm.dc_lock);
1386 }
1387 return;
1388 }
1389
1390 amdgpu_crtc->cursor_width = plane->state->crtc_w;
1391 amdgpu_crtc->cursor_height = plane->state->crtc_h;
1392
1393 memset(&attributes, 0, sizeof(attributes));
1394 attributes.address.high_part = upper_32_bits(address);
1395 attributes.address.low_part = lower_32_bits(address);
1396 attributes.width = plane->state->crtc_w;
1397 attributes.height = plane->state->crtc_h;
1398 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
1399 attributes.rotation_angle = 0;
1400 attributes.attribute_flags.value = 0;
1401
1402 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
1403 * legacy gamma setup.
1404 */
1405 if (crtc_state->cm_is_degamma_srgb &&
1406 adev->dm.dc->caps.color.dpp.gamma_corr)
1407 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
1408
1409 if (afb)
1410 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
1411
1412 if (crtc_state->stream) {
1413 mutex_lock(&adev->dm.dc_lock);
1414 amdgpu_dm_ism_commit_event(
1415 &amdgpu_crtc->ism,
1416 DM_ISM_EVENT_BEGIN_CURSOR_UPDATE);
1417
1418 if (!dc_stream_program_cursor_attributes(crtc_state->stream,
1419 &attributes))
1420 DRM_ERROR("DC failed to set cursor attributes\n");
1421
1422 if (!dc_stream_program_cursor_position(crtc_state->stream,
1423 &position))
1424 DRM_ERROR("DC failed to set cursor position\n");
1425
1426 amdgpu_dm_ism_commit_event(
1427 &amdgpu_crtc->ism,
1428 DM_ISM_EVENT_END_CURSOR_UPDATE);
1429 mutex_unlock(&adev->dm.dc_lock);
1430 }
1431 }
1432
amdgpu_dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_atomic_state * state)1433 static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
1434 struct drm_atomic_state *state)
1435 {
1436 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
1437 plane);
1438 struct drm_plane_state *old_state =
1439 drm_atomic_get_old_plane_state(state, plane);
1440
1441 trace_amdgpu_dm_atomic_update_cursor(new_state);
1442
1443 swap(plane->state->fb, new_state->fb);
1444
1445 plane->state->src_x = new_state->src_x;
1446 plane->state->src_y = new_state->src_y;
1447 plane->state->src_w = new_state->src_w;
1448 plane->state->src_h = new_state->src_h;
1449 plane->state->crtc_x = new_state->crtc_x;
1450 plane->state->crtc_y = new_state->crtc_y;
1451 plane->state->crtc_w = new_state->crtc_w;
1452 plane->state->crtc_h = new_state->crtc_h;
1453
1454 amdgpu_dm_plane_handle_cursor_update(plane, old_state);
1455 }
1456
amdgpu_dm_plane_panic_flush(struct drm_plane * plane)1457 static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane)
1458 {
1459 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane->state);
1460 struct drm_framebuffer *fb = plane->state->fb;
1461 struct dc_plane_state *dc_plane_state;
1462
1463 if (!dm_plane_state || !dm_plane_state->dc_state)
1464 return;
1465
1466 dc_plane_state = dm_plane_state->dc_state;
1467
1468 dc_plane_force_dcc_and_tiling_disable(dc_plane_state, fb->modifier ? true : false);
1469 }
1470
1471 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1472 .prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
1473 .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
1474 .atomic_check = amdgpu_dm_plane_atomic_check,
1475 .atomic_async_check = amdgpu_dm_plane_atomic_async_check,
1476 .atomic_async_update = amdgpu_dm_plane_atomic_async_update
1477 };
1478
1479 static const struct drm_plane_helper_funcs dm_primary_plane_helper_funcs = {
1480 .prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
1481 .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
1482 .atomic_check = amdgpu_dm_plane_atomic_check,
1483 .atomic_async_check = amdgpu_dm_plane_atomic_async_check,
1484 .atomic_async_update = amdgpu_dm_plane_atomic_async_update,
1485 .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
1486 .panic_flush = amdgpu_dm_plane_panic_flush,
1487 };
1488
amdgpu_dm_plane_drm_plane_reset(struct drm_plane * plane)1489 static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
1490 {
1491 struct dm_plane_state *amdgpu_state = NULL;
1492
1493 if (plane->state)
1494 plane->funcs->atomic_destroy_state(plane, plane->state);
1495
1496 amdgpu_state = kzalloc_obj(*amdgpu_state);
1497 WARN_ON(amdgpu_state == NULL);
1498
1499 if (!amdgpu_state)
1500 return;
1501
1502 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
1503 amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1504 amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT;
1505 amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1506 amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
1507 }
1508
amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane * plane)1509 static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane)
1510 {
1511 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
1512
1513 old_dm_plane_state = to_dm_plane_state(plane->state);
1514 dm_plane_state = kzalloc_obj(*dm_plane_state);
1515 if (!dm_plane_state)
1516 return NULL;
1517
1518 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
1519
1520 if (old_dm_plane_state->dc_state) {
1521 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
1522 dc_plane_state_retain(dm_plane_state->dc_state);
1523 }
1524
1525 if (old_dm_plane_state->degamma_lut)
1526 dm_plane_state->degamma_lut =
1527 drm_property_blob_get(old_dm_plane_state->degamma_lut);
1528 if (old_dm_plane_state->ctm)
1529 dm_plane_state->ctm =
1530 drm_property_blob_get(old_dm_plane_state->ctm);
1531 if (old_dm_plane_state->shaper_lut)
1532 dm_plane_state->shaper_lut =
1533 drm_property_blob_get(old_dm_plane_state->shaper_lut);
1534 if (old_dm_plane_state->lut3d)
1535 dm_plane_state->lut3d =
1536 drm_property_blob_get(old_dm_plane_state->lut3d);
1537 if (old_dm_plane_state->blend_lut)
1538 dm_plane_state->blend_lut =
1539 drm_property_blob_get(old_dm_plane_state->blend_lut);
1540
1541 dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf;
1542 dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult;
1543 dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf;
1544 dm_plane_state->blend_tf = old_dm_plane_state->blend_tf;
1545
1546 return &dm_plane_state->base;
1547 }
1548
amdgpu_dm_plane_format_mod_supported(struct drm_plane * plane,uint32_t format,uint64_t modifier)1549 static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
1550 uint32_t format,
1551 uint64_t modifier)
1552 {
1553 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1554 const struct drm_format_info *info = drm_format_info(format);
1555 int i;
1556
1557 if (!info)
1558 return false;
1559
1560 /*
1561 * We always have to allow these modifiers:
1562 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
1563 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
1564 */
1565 if (modifier == DRM_FORMAT_MOD_LINEAR ||
1566 modifier == DRM_FORMAT_MOD_INVALID) {
1567 return true;
1568 }
1569
1570 /* Check that the modifier is on the list of the plane's supported modifiers. */
1571 for (i = 0; i < plane->modifier_count; i++) {
1572 if (modifier == plane->modifiers[i])
1573 break;
1574 }
1575 if (i == plane->modifier_count)
1576 return false;
1577
1578 /* GFX12 doesn't have these limitations. */
1579 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11) {
1580 enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
1581
1582 /*
1583 * For D swizzle the canonical modifier depends on the bpp, so check
1584 * it here.
1585 */
1586 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
1587 adev->family >= AMDGPU_FAMILY_NV) {
1588 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
1589 return false;
1590 }
1591
1592 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
1593 info->cpp[0] < 8)
1594 return false;
1595
1596 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
1597 /* Per radeonsi comments 16/64 bpp are more complicated. */
1598 if (info->cpp[0] != 4)
1599 return false;
1600 /* We support multi-planar formats, but not when combined with
1601 * additional DCC metadata planes.
1602 */
1603 if (info->num_planes > 1)
1604 return false;
1605 }
1606 }
1607
1608 return true;
1609 }
1610
amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)1611 static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane,
1612 struct drm_plane_state *state)
1613 {
1614 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1615
1616 if (dm_plane_state->degamma_lut)
1617 drm_property_blob_put(dm_plane_state->degamma_lut);
1618 if (dm_plane_state->ctm)
1619 drm_property_blob_put(dm_plane_state->ctm);
1620 if (dm_plane_state->lut3d)
1621 drm_property_blob_put(dm_plane_state->lut3d);
1622 if (dm_plane_state->shaper_lut)
1623 drm_property_blob_put(dm_plane_state->shaper_lut);
1624 if (dm_plane_state->blend_lut)
1625 drm_property_blob_put(dm_plane_state->blend_lut);
1626
1627 if (dm_plane_state->dc_state)
1628 dc_plane_state_release(dm_plane_state->dc_state);
1629
1630 drm_atomic_helper_plane_destroy_state(plane, state);
1631 }
1632
1633 #ifdef AMD_PRIVATE_COLOR
1634 static void
dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager * dm,struct drm_plane * plane)1635 dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
1636 struct drm_plane *plane)
1637 {
1638 struct amdgpu_mode_info mode_info = dm->adev->mode_info;
1639 struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp;
1640
1641 /* Check HW color pipeline capabilities on DPP block (pre-blending)
1642 * before exposing related properties.
1643 */
1644 if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) {
1645 drm_object_attach_property(&plane->base,
1646 mode_info.plane_degamma_lut_property,
1647 0);
1648 drm_object_attach_property(&plane->base,
1649 mode_info.plane_degamma_lut_size_property,
1650 MAX_COLOR_LUT_ENTRIES);
1651 drm_object_attach_property(&plane->base,
1652 dm->adev->mode_info.plane_degamma_tf_property,
1653 AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1654 }
1655 /* HDR MULT is always available */
1656 drm_object_attach_property(&plane->base,
1657 dm->adev->mode_info.plane_hdr_mult_property,
1658 AMDGPU_HDR_MULT_DEFAULT);
1659
1660 /* Only enable plane CTM if both DPP and MPC gamut remap is available. */
1661 if (dm->dc->caps.color.mpc.gamut_remap)
1662 drm_object_attach_property(&plane->base,
1663 dm->adev->mode_info.plane_ctm_property, 0);
1664
1665 if (dpp_color_caps.hw_3d_lut || dm->dc->caps.color.mpc.preblend) {
1666 drm_object_attach_property(&plane->base,
1667 mode_info.plane_shaper_lut_property, 0);
1668 drm_object_attach_property(&plane->base,
1669 mode_info.plane_shaper_lut_size_property,
1670 MAX_COLOR_LUT_ENTRIES);
1671 drm_object_attach_property(&plane->base,
1672 mode_info.plane_shaper_tf_property,
1673 AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1674 drm_object_attach_property(&plane->base,
1675 mode_info.plane_lut3d_property, 0);
1676 drm_object_attach_property(&plane->base,
1677 mode_info.plane_lut3d_size_property,
1678 MAX_COLOR_3DLUT_SIZE);
1679 }
1680
1681 if (dpp_color_caps.ogam_ram || dm->dc->caps.color.mpc.preblend) {
1682 drm_object_attach_property(&plane->base,
1683 mode_info.plane_blend_lut_property, 0);
1684 drm_object_attach_property(&plane->base,
1685 mode_info.plane_blend_lut_size_property,
1686 MAX_COLOR_LUT_ENTRIES);
1687 drm_object_attach_property(&plane->base,
1688 mode_info.plane_blend_tf_property,
1689 AMDGPU_TRANSFER_FUNCTION_DEFAULT);
1690 }
1691 }
1692
1693 static int
dm_atomic_plane_set_property(struct drm_plane * plane,struct drm_plane_state * state,struct drm_property * property,uint64_t val)1694 dm_atomic_plane_set_property(struct drm_plane *plane,
1695 struct drm_plane_state *state,
1696 struct drm_property *property,
1697 uint64_t val)
1698 {
1699 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1700 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1701 bool replaced = false;
1702 int ret;
1703
1704 if (property == adev->mode_info.plane_degamma_lut_property) {
1705 ret = drm_property_replace_blob_from_id(plane->dev,
1706 &dm_plane_state->degamma_lut,
1707 val,
1708 -1, -1, sizeof(struct drm_color_lut),
1709 &replaced);
1710 dm_plane_state->base.color_mgmt_changed |= replaced;
1711 return ret;
1712 } else if (property == adev->mode_info.plane_degamma_tf_property) {
1713 if (dm_plane_state->degamma_tf != val) {
1714 dm_plane_state->degamma_tf = val;
1715 dm_plane_state->base.color_mgmt_changed = 1;
1716 }
1717 } else if (property == adev->mode_info.plane_hdr_mult_property) {
1718 if (dm_plane_state->hdr_mult != val) {
1719 dm_plane_state->hdr_mult = val;
1720 dm_plane_state->base.color_mgmt_changed = 1;
1721 }
1722 } else if (property == adev->mode_info.plane_ctm_property) {
1723 ret = drm_property_replace_blob_from_id(plane->dev,
1724 &dm_plane_state->ctm,
1725 val,
1726 -1, sizeof(struct drm_color_ctm_3x4), -1,
1727 &replaced);
1728 dm_plane_state->base.color_mgmt_changed |= replaced;
1729 return ret;
1730 } else if (property == adev->mode_info.plane_shaper_lut_property) {
1731 ret = drm_property_replace_blob_from_id(plane->dev,
1732 &dm_plane_state->shaper_lut,
1733 val,
1734 -1, -1, sizeof(struct drm_color_lut),
1735 &replaced);
1736 dm_plane_state->base.color_mgmt_changed |= replaced;
1737 return ret;
1738 } else if (property == adev->mode_info.plane_shaper_tf_property) {
1739 if (dm_plane_state->shaper_tf != val) {
1740 dm_plane_state->shaper_tf = val;
1741 dm_plane_state->base.color_mgmt_changed = 1;
1742 }
1743 } else if (property == adev->mode_info.plane_lut3d_property) {
1744 ret = drm_property_replace_blob_from_id(plane->dev,
1745 &dm_plane_state->lut3d,
1746 val,
1747 -1, -1, sizeof(struct drm_color_lut),
1748 &replaced);
1749 dm_plane_state->base.color_mgmt_changed |= replaced;
1750 return ret;
1751 } else if (property == adev->mode_info.plane_blend_lut_property) {
1752 ret = drm_property_replace_blob_from_id(plane->dev,
1753 &dm_plane_state->blend_lut,
1754 val,
1755 -1, -1, sizeof(struct drm_color_lut),
1756 &replaced);
1757 dm_plane_state->base.color_mgmt_changed |= replaced;
1758 return ret;
1759 } else if (property == adev->mode_info.plane_blend_tf_property) {
1760 if (dm_plane_state->blend_tf != val) {
1761 dm_plane_state->blend_tf = val;
1762 dm_plane_state->base.color_mgmt_changed = 1;
1763 }
1764 } else {
1765 drm_dbg_atomic(plane->dev,
1766 "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
1767 plane->base.id, plane->name,
1768 property->base.id, property->name);
1769 return -EINVAL;
1770 }
1771
1772 return 0;
1773 }
1774
1775 static int
dm_atomic_plane_get_property(struct drm_plane * plane,const struct drm_plane_state * state,struct drm_property * property,uint64_t * val)1776 dm_atomic_plane_get_property(struct drm_plane *plane,
1777 const struct drm_plane_state *state,
1778 struct drm_property *property,
1779 uint64_t *val)
1780 {
1781 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1782 struct amdgpu_device *adev = drm_to_adev(plane->dev);
1783
1784 if (property == adev->mode_info.plane_degamma_lut_property) {
1785 *val = (dm_plane_state->degamma_lut) ?
1786 dm_plane_state->degamma_lut->base.id : 0;
1787 } else if (property == adev->mode_info.plane_degamma_tf_property) {
1788 *val = dm_plane_state->degamma_tf;
1789 } else if (property == adev->mode_info.plane_hdr_mult_property) {
1790 *val = dm_plane_state->hdr_mult;
1791 } else if (property == adev->mode_info.plane_ctm_property) {
1792 *val = (dm_plane_state->ctm) ?
1793 dm_plane_state->ctm->base.id : 0;
1794 } else if (property == adev->mode_info.plane_shaper_lut_property) {
1795 *val = (dm_plane_state->shaper_lut) ?
1796 dm_plane_state->shaper_lut->base.id : 0;
1797 } else if (property == adev->mode_info.plane_shaper_tf_property) {
1798 *val = dm_plane_state->shaper_tf;
1799 } else if (property == adev->mode_info.plane_lut3d_property) {
1800 *val = (dm_plane_state->lut3d) ?
1801 dm_plane_state->lut3d->base.id : 0;
1802 } else if (property == adev->mode_info.plane_blend_lut_property) {
1803 *val = (dm_plane_state->blend_lut) ?
1804 dm_plane_state->blend_lut->base.id : 0;
1805 } else if (property == adev->mode_info.plane_blend_tf_property) {
1806 *val = dm_plane_state->blend_tf;
1807
1808 } else {
1809 return -EINVAL;
1810 }
1811
1812 return 0;
1813 }
1814 #else
1815
1816 #define MAX_COLOR_PIPELINES 5
1817
1818 static int
dm_plane_init_colorops(struct drm_plane * plane)1819 dm_plane_init_colorops(struct drm_plane *plane)
1820 {
1821 struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES] = {};
1822 struct drm_device *dev = plane->dev;
1823 struct amdgpu_device *adev = drm_to_adev(dev);
1824 struct dc *dc = adev->dm.dc;
1825 int len = 0;
1826 int ret = 0;
1827 int i;
1828
1829 if (plane->type == DRM_PLANE_TYPE_CURSOR)
1830 return 0;
1831
1832 /* initialize pipeline */
1833 if (dc->ctx->dce_version >= DCN_VERSION_3_0) {
1834 ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]);
1835 if (ret) {
1836 drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n",
1837 plane->base.id, ret);
1838 goto out;
1839 }
1840 len++;
1841
1842 /* Create COLOR_PIPELINE property and attach */
1843 drm_plane_create_color_pipeline_property(plane, pipelines, len);
1844 }
1845
1846 out:
1847 for (i = 0; i < len; i++)
1848 kfree(pipelines[i].name);
1849
1850 return ret;
1851 }
1852 #endif
1853
1854 static const struct drm_plane_funcs dm_plane_funcs = {
1855 .update_plane = drm_atomic_helper_update_plane,
1856 .disable_plane = drm_atomic_helper_disable_plane,
1857 .destroy = drm_plane_helper_destroy,
1858 .reset = amdgpu_dm_plane_drm_plane_reset,
1859 .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state,
1860 .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state,
1861 .format_mod_supported = amdgpu_dm_plane_format_mod_supported,
1862 #ifdef AMD_PRIVATE_COLOR
1863 .atomic_set_property = dm_atomic_plane_set_property,
1864 .atomic_get_property = dm_atomic_plane_get_property,
1865 #endif
1866 };
1867
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)1868 int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
1869 struct drm_plane *plane,
1870 unsigned long possible_crtcs,
1871 const struct dc_plane_cap *plane_cap)
1872 {
1873 uint32_t formats[32];
1874 int num_formats;
1875 int res = -EPERM;
1876 unsigned int supported_rotations;
1877 uint64_t *modifiers = NULL;
1878 unsigned int primary_zpos = dm->dc->caps.max_slave_planes;
1879
1880 num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats,
1881 ARRAY_SIZE(formats));
1882
1883 res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers);
1884 if (res)
1885 return res;
1886
1887 if (modifiers == NULL)
1888 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
1889
1890 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
1891 &dm_plane_funcs, formats, num_formats,
1892 modifiers, plane->type, NULL);
1893 kfree(modifiers);
1894 if (res)
1895 return res;
1896
1897 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
1898 plane_cap && plane_cap->per_pixel_alpha) {
1899 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1900 BIT(DRM_MODE_BLEND_PREMULTI) |
1901 BIT(DRM_MODE_BLEND_COVERAGE);
1902
1903 drm_plane_create_alpha_property(plane);
1904 drm_plane_create_blend_mode_property(plane, blend_caps);
1905 }
1906
1907 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
1908 /*
1909 * Allow OVERLAY planes to be used as underlays by assigning an
1910 * immutable zpos = # of OVERLAY planes to the PRIMARY plane.
1911 */
1912 drm_plane_create_zpos_immutable_property(plane, primary_zpos);
1913 } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
1914 /*
1915 * OVERLAY planes can be below or above the PRIMARY, but cannot
1916 * be above the CURSOR plane.
1917 */
1918 unsigned int zpos = primary_zpos + 1 + drm_plane_index(plane);
1919
1920 drm_plane_create_zpos_property(plane, zpos, 0, 254);
1921 } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
1922 drm_plane_create_zpos_immutable_property(plane, 255);
1923 }
1924
1925 if ((plane->type == DRM_PLANE_TYPE_PRIMARY ||
1926 plane->type == DRM_PLANE_TYPE_OVERLAY) &&
1927 plane_cap &&
1928 (plane_cap->pixel_format_support.nv12 ||
1929 plane_cap->pixel_format_support.p010)) {
1930 /* This only affects YUV formats. */
1931 drm_plane_create_color_properties(
1932 plane,
1933 BIT(DRM_COLOR_YCBCR_BT601) |
1934 BIT(DRM_COLOR_YCBCR_BT709) |
1935 BIT(DRM_COLOR_YCBCR_BT2020),
1936 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1937 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1938 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
1939 }
1940
1941 supported_rotations =
1942 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1943 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1944
1945 if (dm->adev->asic_type >= CHIP_BONAIRE &&
1946 plane->type != DRM_PLANE_TYPE_CURSOR)
1947 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1948 supported_rotations);
1949
1950 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) &&
1951 plane->type != DRM_PLANE_TYPE_CURSOR)
1952 drm_plane_enable_fb_damage_clips(plane);
1953
1954 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1955 drm_plane_helper_add(plane, &dm_primary_plane_helper_funcs);
1956 else
1957 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
1958
1959 #ifdef AMD_PRIVATE_COLOR
1960 dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
1961 #else
1962 res = dm_plane_init_colorops(plane);
1963 if (res)
1964 return res;
1965 #endif
1966
1967 /* Create (reset) the plane state */
1968 if (plane->funcs->reset)
1969 plane->funcs->reset(plane);
1970
1971 return 0;
1972 }
1973
amdgpu_dm_plane_is_video_format(uint32_t format)1974 bool amdgpu_dm_plane_is_video_format(uint32_t format)
1975 {
1976 int i;
1977
1978 for (i = 0; i < ARRAY_SIZE(video_formats); i++)
1979 if (format == video_formats[i])
1980 return true;
1981
1982 return false;
1983 }
1984
1985